1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * main.c - Multi purpose firmware loading support 4 * 5 * Copyright (c) 2003 Manuel Estrada Sainz 6 * 7 * Please see Documentation/driver-api/firmware/ for more information. 8 * 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/capability.h> 14 #include <linux/device.h> 15 #include <linux/kernel_read_file.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/initrd.h> 19 #include <linux/timer.h> 20 #include <linux/vmalloc.h> 21 #include <linux/interrupt.h> 22 #include <linux/bitops.h> 23 #include <linux/mutex.h> 24 #include <linux/workqueue.h> 25 #include <linux/highmem.h> 26 #include <linux/firmware.h> 27 #include <linux/slab.h> 28 #include <linux/sched.h> 29 #include <linux/file.h> 30 #include <linux/list.h> 31 #include <linux/fs.h> 32 #include <linux/async.h> 33 #include <linux/pm.h> 34 #include <linux/suspend.h> 35 #include <linux/syscore_ops.h> 36 #include <linux/reboot.h> 37 #include <linux/security.h> 38 #include <linux/xz.h> 39 40 #include <generated/utsrelease.h> 41 42 #include "../base.h" 43 #include "firmware.h" 44 #include "fallback.h" 45 46 MODULE_AUTHOR("Manuel Estrada Sainz"); 47 MODULE_DESCRIPTION("Multi purpose firmware loading support"); 48 MODULE_LICENSE("GPL"); 49 50 struct firmware_cache { 51 /* firmware_buf instance will be added into the below list */ 52 spinlock_t lock; 53 struct list_head head; 54 int state; 55 56 #ifdef CONFIG_FW_CACHE 57 /* 58 * Names of firmware images which have been cached successfully 59 * will be added into the below list so that device uncache 60 * helper can trace which firmware images have been cached 61 * before. 62 */ 63 spinlock_t name_lock; 64 struct list_head fw_names; 65 66 struct delayed_work work; 67 68 struct notifier_block pm_notify; 69 #endif 70 }; 71 72 struct fw_cache_entry { 73 struct list_head list; 74 const char *name; 75 }; 76 77 struct fw_name_devm { 78 unsigned long magic; 79 const char *name; 80 }; 81 82 static inline struct fw_priv *to_fw_priv(struct kref *ref) 83 { 84 return container_of(ref, struct fw_priv, ref); 85 } 86 87 #define FW_LOADER_NO_CACHE 0 88 #define FW_LOADER_START_CACHE 1 89 90 /* fw_lock could be moved to 'struct fw_sysfs' but since it is just 91 * guarding for corner cases a global lock should be OK */ 92 DEFINE_MUTEX(fw_lock); 93 94 static struct firmware_cache fw_cache; 95 96 static void fw_state_init(struct fw_priv *fw_priv) 97 { 98 struct fw_state *fw_st = &fw_priv->fw_st; 99 100 init_completion(&fw_st->completion); 101 fw_st->status = FW_STATUS_UNKNOWN; 102 } 103 104 static inline int fw_state_wait(struct fw_priv *fw_priv) 105 { 106 return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT); 107 } 108 109 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv); 110 111 static struct fw_priv *__allocate_fw_priv(const char *fw_name, 112 struct firmware_cache *fwc, 113 void *dbuf, 114 size_t size, 115 size_t offset, 116 u32 opt_flags) 117 { 118 struct fw_priv *fw_priv; 119 120 /* For a partial read, the buffer must be preallocated. */ 121 if ((opt_flags & FW_OPT_PARTIAL) && !dbuf) 122 return NULL; 123 124 /* Only partial reads are allowed to use an offset. */ 125 if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL)) 126 return NULL; 127 128 fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC); 129 if (!fw_priv) 130 return NULL; 131 132 fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC); 133 if (!fw_priv->fw_name) { 134 kfree(fw_priv); 135 return NULL; 136 } 137 138 kref_init(&fw_priv->ref); 139 fw_priv->fwc = fwc; 140 fw_priv->data = dbuf; 141 fw_priv->allocated_size = size; 142 fw_priv->offset = offset; 143 fw_priv->opt_flags = opt_flags; 144 fw_state_init(fw_priv); 145 #ifdef CONFIG_FW_LOADER_USER_HELPER 146 INIT_LIST_HEAD(&fw_priv->pending_list); 147 #endif 148 149 pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv); 150 151 return fw_priv; 152 } 153 154 static struct fw_priv *__lookup_fw_priv(const char *fw_name) 155 { 156 struct fw_priv *tmp; 157 struct firmware_cache *fwc = &fw_cache; 158 159 list_for_each_entry(tmp, &fwc->head, list) 160 if (!strcmp(tmp->fw_name, fw_name)) 161 return tmp; 162 return NULL; 163 } 164 165 /* Returns 1 for batching firmware requests with the same name */ 166 static int alloc_lookup_fw_priv(const char *fw_name, 167 struct firmware_cache *fwc, 168 struct fw_priv **fw_priv, 169 void *dbuf, 170 size_t size, 171 size_t offset, 172 u32 opt_flags) 173 { 174 struct fw_priv *tmp; 175 176 spin_lock(&fwc->lock); 177 /* 178 * Do not merge requests that are marked to be non-cached or 179 * are performing partial reads. 180 */ 181 if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) { 182 tmp = __lookup_fw_priv(fw_name); 183 if (tmp) { 184 kref_get(&tmp->ref); 185 spin_unlock(&fwc->lock); 186 *fw_priv = tmp; 187 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); 188 return 1; 189 } 190 } 191 192 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags); 193 if (tmp) { 194 INIT_LIST_HEAD(&tmp->list); 195 if (!(opt_flags & FW_OPT_NOCACHE)) 196 list_add(&tmp->list, &fwc->head); 197 } 198 spin_unlock(&fwc->lock); 199 200 *fw_priv = tmp; 201 202 return tmp ? 0 : -ENOMEM; 203 } 204 205 static void __free_fw_priv(struct kref *ref) 206 __releases(&fwc->lock) 207 { 208 struct fw_priv *fw_priv = to_fw_priv(ref); 209 struct firmware_cache *fwc = fw_priv->fwc; 210 211 pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", 212 __func__, fw_priv->fw_name, fw_priv, fw_priv->data, 213 (unsigned int)fw_priv->size); 214 215 list_del(&fw_priv->list); 216 spin_unlock(&fwc->lock); 217 218 if (fw_is_paged_buf(fw_priv)) 219 fw_free_paged_buf(fw_priv); 220 else if (!fw_priv->allocated_size) 221 vfree(fw_priv->data); 222 223 kfree_const(fw_priv->fw_name); 224 kfree(fw_priv); 225 } 226 227 static void free_fw_priv(struct fw_priv *fw_priv) 228 { 229 struct firmware_cache *fwc = fw_priv->fwc; 230 spin_lock(&fwc->lock); 231 if (!kref_put(&fw_priv->ref, __free_fw_priv)) 232 spin_unlock(&fwc->lock); 233 } 234 235 #ifdef CONFIG_FW_LOADER_PAGED_BUF 236 bool fw_is_paged_buf(struct fw_priv *fw_priv) 237 { 238 return fw_priv->is_paged_buf; 239 } 240 241 void fw_free_paged_buf(struct fw_priv *fw_priv) 242 { 243 int i; 244 245 if (!fw_priv->pages) 246 return; 247 248 vunmap(fw_priv->data); 249 250 for (i = 0; i < fw_priv->nr_pages; i++) 251 __free_page(fw_priv->pages[i]); 252 kvfree(fw_priv->pages); 253 fw_priv->pages = NULL; 254 fw_priv->page_array_size = 0; 255 fw_priv->nr_pages = 0; 256 } 257 258 int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) 259 { 260 /* If the array of pages is too small, grow it */ 261 if (fw_priv->page_array_size < pages_needed) { 262 int new_array_size = max(pages_needed, 263 fw_priv->page_array_size * 2); 264 struct page **new_pages; 265 266 new_pages = kvmalloc_array(new_array_size, sizeof(void *), 267 GFP_KERNEL); 268 if (!new_pages) 269 return -ENOMEM; 270 memcpy(new_pages, fw_priv->pages, 271 fw_priv->page_array_size * sizeof(void *)); 272 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * 273 (new_array_size - fw_priv->page_array_size)); 274 kvfree(fw_priv->pages); 275 fw_priv->pages = new_pages; 276 fw_priv->page_array_size = new_array_size; 277 } 278 279 while (fw_priv->nr_pages < pages_needed) { 280 fw_priv->pages[fw_priv->nr_pages] = 281 alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 282 283 if (!fw_priv->pages[fw_priv->nr_pages]) 284 return -ENOMEM; 285 fw_priv->nr_pages++; 286 } 287 288 return 0; 289 } 290 291 int fw_map_paged_buf(struct fw_priv *fw_priv) 292 { 293 /* one pages buffer should be mapped/unmapped only once */ 294 if (!fw_priv->pages) 295 return 0; 296 297 vunmap(fw_priv->data); 298 fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, 299 PAGE_KERNEL_RO); 300 if (!fw_priv->data) 301 return -ENOMEM; 302 303 return 0; 304 } 305 #endif 306 307 /* 308 * XZ-compressed firmware support 309 */ 310 #ifdef CONFIG_FW_LOADER_COMPRESS 311 /* show an error and return the standard error code */ 312 static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret) 313 { 314 if (xz_ret != XZ_STREAM_END) { 315 dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret); 316 return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL; 317 } 318 return 0; 319 } 320 321 /* single-shot decompression onto the pre-allocated buffer */ 322 static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv, 323 size_t in_size, const void *in_buffer) 324 { 325 struct xz_dec *xz_dec; 326 struct xz_buf xz_buf; 327 enum xz_ret xz_ret; 328 329 xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1); 330 if (!xz_dec) 331 return -ENOMEM; 332 333 xz_buf.in_size = in_size; 334 xz_buf.in = in_buffer; 335 xz_buf.in_pos = 0; 336 xz_buf.out_size = fw_priv->allocated_size; 337 xz_buf.out = fw_priv->data; 338 xz_buf.out_pos = 0; 339 340 xz_ret = xz_dec_run(xz_dec, &xz_buf); 341 xz_dec_end(xz_dec); 342 343 fw_priv->size = xz_buf.out_pos; 344 return fw_decompress_xz_error(dev, xz_ret); 345 } 346 347 /* decompression on paged buffer and map it */ 348 static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv, 349 size_t in_size, const void *in_buffer) 350 { 351 struct xz_dec *xz_dec; 352 struct xz_buf xz_buf; 353 enum xz_ret xz_ret; 354 struct page *page; 355 int err = 0; 356 357 xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1); 358 if (!xz_dec) 359 return -ENOMEM; 360 361 xz_buf.in_size = in_size; 362 xz_buf.in = in_buffer; 363 xz_buf.in_pos = 0; 364 365 fw_priv->is_paged_buf = true; 366 fw_priv->size = 0; 367 do { 368 if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) { 369 err = -ENOMEM; 370 goto out; 371 } 372 373 /* decompress onto the new allocated page */ 374 page = fw_priv->pages[fw_priv->nr_pages - 1]; 375 xz_buf.out = kmap(page); 376 xz_buf.out_pos = 0; 377 xz_buf.out_size = PAGE_SIZE; 378 xz_ret = xz_dec_run(xz_dec, &xz_buf); 379 kunmap(page); 380 fw_priv->size += xz_buf.out_pos; 381 /* partial decompression means either end or error */ 382 if (xz_buf.out_pos != PAGE_SIZE) 383 break; 384 } while (xz_ret == XZ_OK); 385 386 err = fw_decompress_xz_error(dev, xz_ret); 387 if (!err) 388 err = fw_map_paged_buf(fw_priv); 389 390 out: 391 xz_dec_end(xz_dec); 392 return err; 393 } 394 395 static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv, 396 size_t in_size, const void *in_buffer) 397 { 398 /* if the buffer is pre-allocated, we can perform in single-shot mode */ 399 if (fw_priv->data) 400 return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer); 401 else 402 return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer); 403 } 404 #endif /* CONFIG_FW_LOADER_COMPRESS */ 405 406 /* direct firmware loading support */ 407 static char fw_path_para[256]; 408 static const char * const fw_path[] = { 409 fw_path_para, 410 "/lib/firmware/updates/" UTS_RELEASE, 411 "/lib/firmware/updates", 412 "/lib/firmware/" UTS_RELEASE, 413 "/lib/firmware" 414 }; 415 416 /* 417 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH' 418 * from kernel command line because firmware_class is generally built in 419 * kernel instead of module. 420 */ 421 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); 422 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); 423 424 static int 425 fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, 426 const char *suffix, 427 int (*decompress)(struct device *dev, 428 struct fw_priv *fw_priv, 429 size_t in_size, 430 const void *in_buffer)) 431 { 432 size_t size; 433 int i, len; 434 int rc = -ENOENT; 435 char *path; 436 size_t msize = INT_MAX; 437 void *buffer = NULL; 438 439 /* Already populated data member means we're loading into a buffer */ 440 if (!decompress && fw_priv->data) { 441 buffer = fw_priv->data; 442 msize = fw_priv->allocated_size; 443 } 444 445 path = __getname(); 446 if (!path) 447 return -ENOMEM; 448 449 wait_for_initramfs(); 450 for (i = 0; i < ARRAY_SIZE(fw_path); i++) { 451 size_t file_size = 0; 452 size_t *file_size_ptr = NULL; 453 454 /* skip the unset customized path */ 455 if (!fw_path[i][0]) 456 continue; 457 458 len = snprintf(path, PATH_MAX, "%s/%s%s", 459 fw_path[i], fw_priv->fw_name, suffix); 460 if (len >= PATH_MAX) { 461 rc = -ENAMETOOLONG; 462 break; 463 } 464 465 fw_priv->size = 0; 466 467 /* 468 * The total file size is only examined when doing a partial 469 * read; the "full read" case needs to fail if the whole 470 * firmware was not completely loaded. 471 */ 472 if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer) 473 file_size_ptr = &file_size; 474 475 /* load firmware files from the mount namespace of init */ 476 rc = kernel_read_file_from_path_initns(path, fw_priv->offset, 477 &buffer, msize, 478 file_size_ptr, 479 READING_FIRMWARE); 480 if (rc < 0) { 481 if (rc != -ENOENT) 482 dev_warn(device, "loading %s failed with error %d\n", 483 path, rc); 484 else 485 dev_dbg(device, "loading %s failed for no such file or directory.\n", 486 path); 487 continue; 488 } 489 size = rc; 490 rc = 0; 491 492 dev_dbg(device, "Loading firmware from %s\n", path); 493 if (decompress) { 494 dev_dbg(device, "f/w decompressing %s\n", 495 fw_priv->fw_name); 496 rc = decompress(device, fw_priv, size, buffer); 497 /* discard the superfluous original content */ 498 vfree(buffer); 499 buffer = NULL; 500 if (rc) { 501 fw_free_paged_buf(fw_priv); 502 continue; 503 } 504 } else { 505 dev_dbg(device, "direct-loading %s\n", 506 fw_priv->fw_name); 507 if (!fw_priv->data) 508 fw_priv->data = buffer; 509 fw_priv->size = size; 510 } 511 fw_state_done(fw_priv); 512 break; 513 } 514 __putname(path); 515 516 return rc; 517 } 518 519 /* firmware holds the ownership of pages */ 520 static void firmware_free_data(const struct firmware *fw) 521 { 522 /* Loaded directly? */ 523 if (!fw->priv) { 524 vfree(fw->data); 525 return; 526 } 527 free_fw_priv(fw->priv); 528 } 529 530 /* store the pages buffer info firmware from buf */ 531 static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw) 532 { 533 fw->priv = fw_priv; 534 fw->size = fw_priv->size; 535 fw->data = fw_priv->data; 536 537 pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", 538 __func__, fw_priv->fw_name, fw_priv, fw_priv->data, 539 (unsigned int)fw_priv->size); 540 } 541 542 #ifdef CONFIG_FW_CACHE 543 static void fw_name_devm_release(struct device *dev, void *res) 544 { 545 struct fw_name_devm *fwn = res; 546 547 if (fwn->magic == (unsigned long)&fw_cache) 548 pr_debug("%s: fw_name-%s devm-%p released\n", 549 __func__, fwn->name, res); 550 kfree_const(fwn->name); 551 } 552 553 static int fw_devm_match(struct device *dev, void *res, 554 void *match_data) 555 { 556 struct fw_name_devm *fwn = res; 557 558 return (fwn->magic == (unsigned long)&fw_cache) && 559 !strcmp(fwn->name, match_data); 560 } 561 562 static struct fw_name_devm *fw_find_devm_name(struct device *dev, 563 const char *name) 564 { 565 struct fw_name_devm *fwn; 566 567 fwn = devres_find(dev, fw_name_devm_release, 568 fw_devm_match, (void *)name); 569 return fwn; 570 } 571 572 static bool fw_cache_is_setup(struct device *dev, const char *name) 573 { 574 struct fw_name_devm *fwn; 575 576 fwn = fw_find_devm_name(dev, name); 577 if (fwn) 578 return true; 579 580 return false; 581 } 582 583 /* add firmware name into devres list */ 584 static int fw_add_devm_name(struct device *dev, const char *name) 585 { 586 struct fw_name_devm *fwn; 587 588 if (fw_cache_is_setup(dev, name)) 589 return 0; 590 591 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm), 592 GFP_KERNEL); 593 if (!fwn) 594 return -ENOMEM; 595 fwn->name = kstrdup_const(name, GFP_KERNEL); 596 if (!fwn->name) { 597 devres_free(fwn); 598 return -ENOMEM; 599 } 600 601 fwn->magic = (unsigned long)&fw_cache; 602 devres_add(dev, fwn); 603 604 return 0; 605 } 606 #else 607 static bool fw_cache_is_setup(struct device *dev, const char *name) 608 { 609 return false; 610 } 611 612 static int fw_add_devm_name(struct device *dev, const char *name) 613 { 614 return 0; 615 } 616 #endif 617 618 int assign_fw(struct firmware *fw, struct device *device) 619 { 620 struct fw_priv *fw_priv = fw->priv; 621 int ret; 622 623 mutex_lock(&fw_lock); 624 if (!fw_priv->size || fw_state_is_aborted(fw_priv)) { 625 mutex_unlock(&fw_lock); 626 return -ENOENT; 627 } 628 629 /* 630 * add firmware name into devres list so that we can auto cache 631 * and uncache firmware for device. 632 * 633 * device may has been deleted already, but the problem 634 * should be fixed in devres or driver core. 635 */ 636 /* don't cache firmware handled without uevent */ 637 if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) && 638 !(fw_priv->opt_flags & FW_OPT_NOCACHE)) { 639 ret = fw_add_devm_name(device, fw_priv->fw_name); 640 if (ret) { 641 mutex_unlock(&fw_lock); 642 return ret; 643 } 644 } 645 646 /* 647 * After caching firmware image is started, let it piggyback 648 * on request firmware. 649 */ 650 if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) && 651 fw_priv->fwc->state == FW_LOADER_START_CACHE) 652 fw_cache_piggyback_on_request(fw_priv); 653 654 /* pass the pages buffer to driver at the last minute */ 655 fw_set_page_data(fw_priv, fw); 656 mutex_unlock(&fw_lock); 657 return 0; 658 } 659 660 /* prepare firmware and firmware_buf structs; 661 * return 0 if a firmware is already assigned, 1 if need to load one, 662 * or a negative error code 663 */ 664 static int 665 _request_firmware_prepare(struct firmware **firmware_p, const char *name, 666 struct device *device, void *dbuf, size_t size, 667 size_t offset, u32 opt_flags) 668 { 669 struct firmware *firmware; 670 struct fw_priv *fw_priv; 671 int ret; 672 673 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 674 if (!firmware) { 675 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 676 __func__); 677 return -ENOMEM; 678 } 679 680 if (firmware_request_builtin_buf(firmware, name, dbuf, size)) { 681 dev_dbg(device, "using built-in %s\n", name); 682 return 0; /* assigned */ 683 } 684 685 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size, 686 offset, opt_flags); 687 688 /* 689 * bind with 'priv' now to avoid warning in failure path 690 * of requesting firmware. 691 */ 692 firmware->priv = fw_priv; 693 694 if (ret > 0) { 695 ret = fw_state_wait(fw_priv); 696 if (!ret) { 697 fw_set_page_data(fw_priv, firmware); 698 return 0; /* assigned */ 699 } 700 } 701 702 if (ret < 0) 703 return ret; 704 return 1; /* need to load */ 705 } 706 707 /* 708 * Batched requests need only one wake, we need to do this step last due to the 709 * fallback mechanism. The buf is protected with kref_get(), and it won't be 710 * released until the last user calls release_firmware(). 711 * 712 * Failed batched requests are possible as well, in such cases we just share 713 * the struct fw_priv and won't release it until all requests are woken 714 * and have gone through this same path. 715 */ 716 static void fw_abort_batch_reqs(struct firmware *fw) 717 { 718 struct fw_priv *fw_priv; 719 720 /* Loaded directly? */ 721 if (!fw || !fw->priv) 722 return; 723 724 fw_priv = fw->priv; 725 mutex_lock(&fw_lock); 726 if (!fw_state_is_aborted(fw_priv)) 727 fw_state_aborted(fw_priv); 728 mutex_unlock(&fw_lock); 729 } 730 731 /* called from request_firmware() and request_firmware_work_func() */ 732 static int 733 _request_firmware(const struct firmware **firmware_p, const char *name, 734 struct device *device, void *buf, size_t size, 735 size_t offset, u32 opt_flags) 736 { 737 struct firmware *fw = NULL; 738 struct cred *kern_cred = NULL; 739 const struct cred *old_cred; 740 bool nondirect = false; 741 int ret; 742 743 if (!firmware_p) 744 return -EINVAL; 745 746 if (!name || name[0] == '\0') { 747 ret = -EINVAL; 748 goto out; 749 } 750 751 ret = _request_firmware_prepare(&fw, name, device, buf, size, 752 offset, opt_flags); 753 if (ret <= 0) /* error or already assigned */ 754 goto out; 755 756 /* 757 * We are about to try to access the firmware file. Because we may have been 758 * called by a driver when serving an unrelated request from userland, we use 759 * the kernel credentials to read the file. 760 */ 761 kern_cred = prepare_kernel_cred(NULL); 762 if (!kern_cred) { 763 ret = -ENOMEM; 764 goto out; 765 } 766 old_cred = override_creds(kern_cred); 767 768 ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); 769 770 /* Only full reads can support decompression, platform, and sysfs. */ 771 if (!(opt_flags & FW_OPT_PARTIAL)) 772 nondirect = true; 773 774 #ifdef CONFIG_FW_LOADER_COMPRESS 775 if (ret == -ENOENT && nondirect) 776 ret = fw_get_filesystem_firmware(device, fw->priv, ".xz", 777 fw_decompress_xz); 778 #endif 779 if (ret == -ENOENT && nondirect) 780 ret = firmware_fallback_platform(fw->priv); 781 782 if (ret) { 783 if (!(opt_flags & FW_OPT_NO_WARN)) 784 dev_warn(device, 785 "Direct firmware load for %s failed with error %d\n", 786 name, ret); 787 if (nondirect) 788 ret = firmware_fallback_sysfs(fw, name, device, 789 opt_flags, ret); 790 } else 791 ret = assign_fw(fw, device); 792 793 revert_creds(old_cred); 794 put_cred(kern_cred); 795 796 out: 797 if (ret < 0) { 798 fw_abort_batch_reqs(fw); 799 release_firmware(fw); 800 fw = NULL; 801 } 802 803 *firmware_p = fw; 804 return ret; 805 } 806 807 /** 808 * request_firmware() - send firmware request and wait for it 809 * @firmware_p: pointer to firmware image 810 * @name: name of firmware file 811 * @device: device for which firmware is being loaded 812 * 813 * @firmware_p will be used to return a firmware image by the name 814 * of @name for device @device. 815 * 816 * Should be called from user context where sleeping is allowed. 817 * 818 * @name will be used as $FIRMWARE in the uevent environment and 819 * should be distinctive enough not to be confused with any other 820 * firmware image for this or any other device. 821 * 822 * Caller must hold the reference count of @device. 823 * 824 * The function can be called safely inside device's suspend and 825 * resume callback. 826 **/ 827 int 828 request_firmware(const struct firmware **firmware_p, const char *name, 829 struct device *device) 830 { 831 int ret; 832 833 /* Need to pin this module until return */ 834 __module_get(THIS_MODULE); 835 ret = _request_firmware(firmware_p, name, device, NULL, 0, 0, 836 FW_OPT_UEVENT); 837 module_put(THIS_MODULE); 838 return ret; 839 } 840 EXPORT_SYMBOL(request_firmware); 841 842 /** 843 * firmware_request_nowarn() - request for an optional fw module 844 * @firmware: pointer to firmware image 845 * @name: name of firmware file 846 * @device: device for which firmware is being loaded 847 * 848 * This function is similar in behaviour to request_firmware(), except it 849 * doesn't produce warning messages when the file is not found. The sysfs 850 * fallback mechanism is enabled if direct filesystem lookup fails. However, 851 * failures to find the firmware file with it are still suppressed. It is 852 * therefore up to the driver to check for the return value of this call and to 853 * decide when to inform the users of errors. 854 **/ 855 int firmware_request_nowarn(const struct firmware **firmware, const char *name, 856 struct device *device) 857 { 858 int ret; 859 860 /* Need to pin this module until return */ 861 __module_get(THIS_MODULE); 862 ret = _request_firmware(firmware, name, device, NULL, 0, 0, 863 FW_OPT_UEVENT | FW_OPT_NO_WARN); 864 module_put(THIS_MODULE); 865 return ret; 866 } 867 EXPORT_SYMBOL_GPL(firmware_request_nowarn); 868 869 /** 870 * request_firmware_direct() - load firmware directly without usermode helper 871 * @firmware_p: pointer to firmware image 872 * @name: name of firmware file 873 * @device: device for which firmware is being loaded 874 * 875 * This function works pretty much like request_firmware(), but this doesn't 876 * fall back to usermode helper even if the firmware couldn't be loaded 877 * directly from fs. Hence it's useful for loading optional firmwares, which 878 * aren't always present, without extra long timeouts of udev. 879 **/ 880 int request_firmware_direct(const struct firmware **firmware_p, 881 const char *name, struct device *device) 882 { 883 int ret; 884 885 __module_get(THIS_MODULE); 886 ret = _request_firmware(firmware_p, name, device, NULL, 0, 0, 887 FW_OPT_UEVENT | FW_OPT_NO_WARN | 888 FW_OPT_NOFALLBACK_SYSFS); 889 module_put(THIS_MODULE); 890 return ret; 891 } 892 EXPORT_SYMBOL_GPL(request_firmware_direct); 893 894 /** 895 * firmware_request_platform() - request firmware with platform-fw fallback 896 * @firmware: pointer to firmware image 897 * @name: name of firmware file 898 * @device: device for which firmware is being loaded 899 * 900 * This function is similar in behaviour to request_firmware, except that if 901 * direct filesystem lookup fails, it will fallback to looking for a copy of the 902 * requested firmware embedded in the platform's main (e.g. UEFI) firmware. 903 **/ 904 int firmware_request_platform(const struct firmware **firmware, 905 const char *name, struct device *device) 906 { 907 int ret; 908 909 /* Need to pin this module until return */ 910 __module_get(THIS_MODULE); 911 ret = _request_firmware(firmware, name, device, NULL, 0, 0, 912 FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM); 913 module_put(THIS_MODULE); 914 return ret; 915 } 916 EXPORT_SYMBOL_GPL(firmware_request_platform); 917 918 /** 919 * firmware_request_cache() - cache firmware for suspend so resume can use it 920 * @name: name of firmware file 921 * @device: device for which firmware should be cached for 922 * 923 * There are some devices with an optimization that enables the device to not 924 * require loading firmware on system reboot. This optimization may still 925 * require the firmware present on resume from suspend. This routine can be 926 * used to ensure the firmware is present on resume from suspend in these 927 * situations. This helper is not compatible with drivers which use 928 * request_firmware_into_buf() or request_firmware_nowait() with no uevent set. 929 **/ 930 int firmware_request_cache(struct device *device, const char *name) 931 { 932 int ret; 933 934 mutex_lock(&fw_lock); 935 ret = fw_add_devm_name(device, name); 936 mutex_unlock(&fw_lock); 937 938 return ret; 939 } 940 EXPORT_SYMBOL_GPL(firmware_request_cache); 941 942 /** 943 * request_firmware_into_buf() - load firmware into a previously allocated buffer 944 * @firmware_p: pointer to firmware image 945 * @name: name of firmware file 946 * @device: device for which firmware is being loaded and DMA region allocated 947 * @buf: address of buffer to load firmware into 948 * @size: size of buffer 949 * 950 * This function works pretty much like request_firmware(), but it doesn't 951 * allocate a buffer to hold the firmware data. Instead, the firmware 952 * is loaded directly into the buffer pointed to by @buf and the @firmware_p 953 * data member is pointed at @buf. 954 * 955 * This function doesn't cache firmware either. 956 */ 957 int 958 request_firmware_into_buf(const struct firmware **firmware_p, const char *name, 959 struct device *device, void *buf, size_t size) 960 { 961 int ret; 962 963 if (fw_cache_is_setup(device, name)) 964 return -EOPNOTSUPP; 965 966 __module_get(THIS_MODULE); 967 ret = _request_firmware(firmware_p, name, device, buf, size, 0, 968 FW_OPT_UEVENT | FW_OPT_NOCACHE); 969 module_put(THIS_MODULE); 970 return ret; 971 } 972 EXPORT_SYMBOL(request_firmware_into_buf); 973 974 /** 975 * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer 976 * @firmware_p: pointer to firmware image 977 * @name: name of firmware file 978 * @device: device for which firmware is being loaded and DMA region allocated 979 * @buf: address of buffer to load firmware into 980 * @size: size of buffer 981 * @offset: offset into file to read 982 * 983 * This function works pretty much like request_firmware_into_buf except 984 * it allows a partial read of the file. 985 */ 986 int 987 request_partial_firmware_into_buf(const struct firmware **firmware_p, 988 const char *name, struct device *device, 989 void *buf, size_t size, size_t offset) 990 { 991 int ret; 992 993 if (fw_cache_is_setup(device, name)) 994 return -EOPNOTSUPP; 995 996 __module_get(THIS_MODULE); 997 ret = _request_firmware(firmware_p, name, device, buf, size, offset, 998 FW_OPT_UEVENT | FW_OPT_NOCACHE | 999 FW_OPT_PARTIAL); 1000 module_put(THIS_MODULE); 1001 return ret; 1002 } 1003 EXPORT_SYMBOL(request_partial_firmware_into_buf); 1004 1005 /** 1006 * release_firmware() - release the resource associated with a firmware image 1007 * @fw: firmware resource to release 1008 **/ 1009 void release_firmware(const struct firmware *fw) 1010 { 1011 if (fw) { 1012 if (!firmware_is_builtin(fw)) 1013 firmware_free_data(fw); 1014 kfree(fw); 1015 } 1016 } 1017 EXPORT_SYMBOL(release_firmware); 1018 1019 /* Async support */ 1020 struct firmware_work { 1021 struct work_struct work; 1022 struct module *module; 1023 const char *name; 1024 struct device *device; 1025 void *context; 1026 void (*cont)(const struct firmware *fw, void *context); 1027 u32 opt_flags; 1028 }; 1029 1030 static void request_firmware_work_func(struct work_struct *work) 1031 { 1032 struct firmware_work *fw_work; 1033 const struct firmware *fw; 1034 1035 fw_work = container_of(work, struct firmware_work, work); 1036 1037 _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0, 1038 fw_work->opt_flags); 1039 fw_work->cont(fw, fw_work->context); 1040 put_device(fw_work->device); /* taken in request_firmware_nowait() */ 1041 1042 module_put(fw_work->module); 1043 kfree_const(fw_work->name); 1044 kfree(fw_work); 1045 } 1046 1047 /** 1048 * request_firmware_nowait() - asynchronous version of request_firmware 1049 * @module: module requesting the firmware 1050 * @uevent: sends uevent to copy the firmware image if this flag 1051 * is non-zero else the firmware copy must be done manually. 1052 * @name: name of firmware file 1053 * @device: device for which firmware is being loaded 1054 * @gfp: allocation flags 1055 * @context: will be passed over to @cont, and 1056 * @fw may be %NULL if firmware request fails. 1057 * @cont: function will be called asynchronously when the firmware 1058 * request is over. 1059 * 1060 * Caller must hold the reference count of @device. 1061 * 1062 * Asynchronous variant of request_firmware() for user contexts: 1063 * - sleep for as small periods as possible since it may 1064 * increase kernel boot time of built-in device drivers 1065 * requesting firmware in their ->probe() methods, if 1066 * @gfp is GFP_KERNEL. 1067 * 1068 * - can't sleep at all if @gfp is GFP_ATOMIC. 1069 **/ 1070 int 1071 request_firmware_nowait( 1072 struct module *module, bool uevent, 1073 const char *name, struct device *device, gfp_t gfp, void *context, 1074 void (*cont)(const struct firmware *fw, void *context)) 1075 { 1076 struct firmware_work *fw_work; 1077 1078 fw_work = kzalloc(sizeof(struct firmware_work), gfp); 1079 if (!fw_work) 1080 return -ENOMEM; 1081 1082 fw_work->module = module; 1083 fw_work->name = kstrdup_const(name, gfp); 1084 if (!fw_work->name) { 1085 kfree(fw_work); 1086 return -ENOMEM; 1087 } 1088 fw_work->device = device; 1089 fw_work->context = context; 1090 fw_work->cont = cont; 1091 fw_work->opt_flags = FW_OPT_NOWAIT | 1092 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); 1093 1094 if (!uevent && fw_cache_is_setup(device, name)) { 1095 kfree_const(fw_work->name); 1096 kfree(fw_work); 1097 return -EOPNOTSUPP; 1098 } 1099 1100 if (!try_module_get(module)) { 1101 kfree_const(fw_work->name); 1102 kfree(fw_work); 1103 return -EFAULT; 1104 } 1105 1106 get_device(fw_work->device); 1107 INIT_WORK(&fw_work->work, request_firmware_work_func); 1108 schedule_work(&fw_work->work); 1109 return 0; 1110 } 1111 EXPORT_SYMBOL(request_firmware_nowait); 1112 1113 #ifdef CONFIG_FW_CACHE 1114 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); 1115 1116 /** 1117 * cache_firmware() - cache one firmware image in kernel memory space 1118 * @fw_name: the firmware image name 1119 * 1120 * Cache firmware in kernel memory so that drivers can use it when 1121 * system isn't ready for them to request firmware image from userspace. 1122 * Once it returns successfully, driver can use request_firmware or its 1123 * nowait version to get the cached firmware without any interacting 1124 * with userspace 1125 * 1126 * Return 0 if the firmware image has been cached successfully 1127 * Return !0 otherwise 1128 * 1129 */ 1130 static int cache_firmware(const char *fw_name) 1131 { 1132 int ret; 1133 const struct firmware *fw; 1134 1135 pr_debug("%s: %s\n", __func__, fw_name); 1136 1137 ret = request_firmware(&fw, fw_name, NULL); 1138 if (!ret) 1139 kfree(fw); 1140 1141 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret); 1142 1143 return ret; 1144 } 1145 1146 static struct fw_priv *lookup_fw_priv(const char *fw_name) 1147 { 1148 struct fw_priv *tmp; 1149 struct firmware_cache *fwc = &fw_cache; 1150 1151 spin_lock(&fwc->lock); 1152 tmp = __lookup_fw_priv(fw_name); 1153 spin_unlock(&fwc->lock); 1154 1155 return tmp; 1156 } 1157 1158 /** 1159 * uncache_firmware() - remove one cached firmware image 1160 * @fw_name: the firmware image name 1161 * 1162 * Uncache one firmware image which has been cached successfully 1163 * before. 1164 * 1165 * Return 0 if the firmware cache has been removed successfully 1166 * Return !0 otherwise 1167 * 1168 */ 1169 static int uncache_firmware(const char *fw_name) 1170 { 1171 struct fw_priv *fw_priv; 1172 struct firmware fw; 1173 1174 pr_debug("%s: %s\n", __func__, fw_name); 1175 1176 if (firmware_request_builtin(&fw, fw_name)) 1177 return 0; 1178 1179 fw_priv = lookup_fw_priv(fw_name); 1180 if (fw_priv) { 1181 free_fw_priv(fw_priv); 1182 return 0; 1183 } 1184 1185 return -EINVAL; 1186 } 1187 1188 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) 1189 { 1190 struct fw_cache_entry *fce; 1191 1192 fce = kzalloc(sizeof(*fce), GFP_ATOMIC); 1193 if (!fce) 1194 goto exit; 1195 1196 fce->name = kstrdup_const(name, GFP_ATOMIC); 1197 if (!fce->name) { 1198 kfree(fce); 1199 fce = NULL; 1200 goto exit; 1201 } 1202 exit: 1203 return fce; 1204 } 1205 1206 static int __fw_entry_found(const char *name) 1207 { 1208 struct firmware_cache *fwc = &fw_cache; 1209 struct fw_cache_entry *fce; 1210 1211 list_for_each_entry(fce, &fwc->fw_names, list) { 1212 if (!strcmp(fce->name, name)) 1213 return 1; 1214 } 1215 return 0; 1216 } 1217 1218 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv) 1219 { 1220 const char *name = fw_priv->fw_name; 1221 struct firmware_cache *fwc = fw_priv->fwc; 1222 struct fw_cache_entry *fce; 1223 1224 spin_lock(&fwc->name_lock); 1225 if (__fw_entry_found(name)) 1226 goto found; 1227 1228 fce = alloc_fw_cache_entry(name); 1229 if (fce) { 1230 list_add(&fce->list, &fwc->fw_names); 1231 kref_get(&fw_priv->ref); 1232 pr_debug("%s: fw: %s\n", __func__, name); 1233 } 1234 found: 1235 spin_unlock(&fwc->name_lock); 1236 } 1237 1238 static void free_fw_cache_entry(struct fw_cache_entry *fce) 1239 { 1240 kfree_const(fce->name); 1241 kfree(fce); 1242 } 1243 1244 static void __async_dev_cache_fw_image(void *fw_entry, 1245 async_cookie_t cookie) 1246 { 1247 struct fw_cache_entry *fce = fw_entry; 1248 struct firmware_cache *fwc = &fw_cache; 1249 int ret; 1250 1251 ret = cache_firmware(fce->name); 1252 if (ret) { 1253 spin_lock(&fwc->name_lock); 1254 list_del(&fce->list); 1255 spin_unlock(&fwc->name_lock); 1256 1257 free_fw_cache_entry(fce); 1258 } 1259 } 1260 1261 /* called with dev->devres_lock held */ 1262 static void dev_create_fw_entry(struct device *dev, void *res, 1263 void *data) 1264 { 1265 struct fw_name_devm *fwn = res; 1266 const char *fw_name = fwn->name; 1267 struct list_head *head = data; 1268 struct fw_cache_entry *fce; 1269 1270 fce = alloc_fw_cache_entry(fw_name); 1271 if (fce) 1272 list_add(&fce->list, head); 1273 } 1274 1275 static int devm_name_match(struct device *dev, void *res, 1276 void *match_data) 1277 { 1278 struct fw_name_devm *fwn = res; 1279 return (fwn->magic == (unsigned long)match_data); 1280 } 1281 1282 static void dev_cache_fw_image(struct device *dev, void *data) 1283 { 1284 LIST_HEAD(todo); 1285 struct fw_cache_entry *fce; 1286 struct fw_cache_entry *fce_next; 1287 struct firmware_cache *fwc = &fw_cache; 1288 1289 devres_for_each_res(dev, fw_name_devm_release, 1290 devm_name_match, &fw_cache, 1291 dev_create_fw_entry, &todo); 1292 1293 list_for_each_entry_safe(fce, fce_next, &todo, list) { 1294 list_del(&fce->list); 1295 1296 spin_lock(&fwc->name_lock); 1297 /* only one cache entry for one firmware */ 1298 if (!__fw_entry_found(fce->name)) { 1299 list_add(&fce->list, &fwc->fw_names); 1300 } else { 1301 free_fw_cache_entry(fce); 1302 fce = NULL; 1303 } 1304 spin_unlock(&fwc->name_lock); 1305 1306 if (fce) 1307 async_schedule_domain(__async_dev_cache_fw_image, 1308 (void *)fce, 1309 &fw_cache_domain); 1310 } 1311 } 1312 1313 static void __device_uncache_fw_images(void) 1314 { 1315 struct firmware_cache *fwc = &fw_cache; 1316 struct fw_cache_entry *fce; 1317 1318 spin_lock(&fwc->name_lock); 1319 while (!list_empty(&fwc->fw_names)) { 1320 fce = list_entry(fwc->fw_names.next, 1321 struct fw_cache_entry, list); 1322 list_del(&fce->list); 1323 spin_unlock(&fwc->name_lock); 1324 1325 uncache_firmware(fce->name); 1326 free_fw_cache_entry(fce); 1327 1328 spin_lock(&fwc->name_lock); 1329 } 1330 spin_unlock(&fwc->name_lock); 1331 } 1332 1333 /** 1334 * device_cache_fw_images() - cache devices' firmware 1335 * 1336 * If one device called request_firmware or its nowait version 1337 * successfully before, the firmware names are recored into the 1338 * device's devres link list, so device_cache_fw_images can call 1339 * cache_firmware() to cache these firmwares for the device, 1340 * then the device driver can load its firmwares easily at 1341 * time when system is not ready to complete loading firmware. 1342 */ 1343 static void device_cache_fw_images(void) 1344 { 1345 struct firmware_cache *fwc = &fw_cache; 1346 DEFINE_WAIT(wait); 1347 1348 pr_debug("%s\n", __func__); 1349 1350 /* cancel uncache work */ 1351 cancel_delayed_work_sync(&fwc->work); 1352 1353 fw_fallback_set_cache_timeout(); 1354 1355 mutex_lock(&fw_lock); 1356 fwc->state = FW_LOADER_START_CACHE; 1357 dpm_for_each_dev(NULL, dev_cache_fw_image); 1358 mutex_unlock(&fw_lock); 1359 1360 /* wait for completion of caching firmware for all devices */ 1361 async_synchronize_full_domain(&fw_cache_domain); 1362 1363 fw_fallback_set_default_timeout(); 1364 } 1365 1366 /** 1367 * device_uncache_fw_images() - uncache devices' firmware 1368 * 1369 * uncache all firmwares which have been cached successfully 1370 * by device_uncache_fw_images earlier 1371 */ 1372 static void device_uncache_fw_images(void) 1373 { 1374 pr_debug("%s\n", __func__); 1375 __device_uncache_fw_images(); 1376 } 1377 1378 static void device_uncache_fw_images_work(struct work_struct *work) 1379 { 1380 device_uncache_fw_images(); 1381 } 1382 1383 /** 1384 * device_uncache_fw_images_delay() - uncache devices firmwares 1385 * @delay: number of milliseconds to delay uncache device firmwares 1386 * 1387 * uncache all devices's firmwares which has been cached successfully 1388 * by device_cache_fw_images after @delay milliseconds. 1389 */ 1390 static void device_uncache_fw_images_delay(unsigned long delay) 1391 { 1392 queue_delayed_work(system_power_efficient_wq, &fw_cache.work, 1393 msecs_to_jiffies(delay)); 1394 } 1395 1396 static int fw_pm_notify(struct notifier_block *notify_block, 1397 unsigned long mode, void *unused) 1398 { 1399 switch (mode) { 1400 case PM_HIBERNATION_PREPARE: 1401 case PM_SUSPEND_PREPARE: 1402 case PM_RESTORE_PREPARE: 1403 /* 1404 * kill pending fallback requests with a custom fallback 1405 * to avoid stalling suspend. 1406 */ 1407 kill_pending_fw_fallback_reqs(true); 1408 device_cache_fw_images(); 1409 break; 1410 1411 case PM_POST_SUSPEND: 1412 case PM_POST_HIBERNATION: 1413 case PM_POST_RESTORE: 1414 /* 1415 * In case that system sleep failed and syscore_suspend is 1416 * not called. 1417 */ 1418 mutex_lock(&fw_lock); 1419 fw_cache.state = FW_LOADER_NO_CACHE; 1420 mutex_unlock(&fw_lock); 1421 1422 device_uncache_fw_images_delay(10 * MSEC_PER_SEC); 1423 break; 1424 } 1425 1426 return 0; 1427 } 1428 1429 /* stop caching firmware once syscore_suspend is reached */ 1430 static int fw_suspend(void) 1431 { 1432 fw_cache.state = FW_LOADER_NO_CACHE; 1433 return 0; 1434 } 1435 1436 static struct syscore_ops fw_syscore_ops = { 1437 .suspend = fw_suspend, 1438 }; 1439 1440 static int __init register_fw_pm_ops(void) 1441 { 1442 int ret; 1443 1444 spin_lock_init(&fw_cache.name_lock); 1445 INIT_LIST_HEAD(&fw_cache.fw_names); 1446 1447 INIT_DELAYED_WORK(&fw_cache.work, 1448 device_uncache_fw_images_work); 1449 1450 fw_cache.pm_notify.notifier_call = fw_pm_notify; 1451 ret = register_pm_notifier(&fw_cache.pm_notify); 1452 if (ret) 1453 return ret; 1454 1455 register_syscore_ops(&fw_syscore_ops); 1456 1457 return ret; 1458 } 1459 1460 static inline void unregister_fw_pm_ops(void) 1461 { 1462 unregister_syscore_ops(&fw_syscore_ops); 1463 unregister_pm_notifier(&fw_cache.pm_notify); 1464 } 1465 #else 1466 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv) 1467 { 1468 } 1469 static inline int register_fw_pm_ops(void) 1470 { 1471 return 0; 1472 } 1473 static inline void unregister_fw_pm_ops(void) 1474 { 1475 } 1476 #endif 1477 1478 static void __init fw_cache_init(void) 1479 { 1480 spin_lock_init(&fw_cache.lock); 1481 INIT_LIST_HEAD(&fw_cache.head); 1482 fw_cache.state = FW_LOADER_NO_CACHE; 1483 } 1484 1485 static int fw_shutdown_notify(struct notifier_block *unused1, 1486 unsigned long unused2, void *unused3) 1487 { 1488 /* 1489 * Kill all pending fallback requests to avoid both stalling shutdown, 1490 * and avoid a deadlock with the usermode_lock. 1491 */ 1492 kill_pending_fw_fallback_reqs(false); 1493 1494 return NOTIFY_DONE; 1495 } 1496 1497 static struct notifier_block fw_shutdown_nb = { 1498 .notifier_call = fw_shutdown_notify, 1499 }; 1500 1501 static int __init firmware_class_init(void) 1502 { 1503 int ret; 1504 1505 /* No need to unfold these on exit */ 1506 fw_cache_init(); 1507 1508 ret = register_fw_pm_ops(); 1509 if (ret) 1510 return ret; 1511 1512 ret = register_reboot_notifier(&fw_shutdown_nb); 1513 if (ret) 1514 goto out; 1515 1516 return register_sysfs_loader(); 1517 1518 out: 1519 unregister_fw_pm_ops(); 1520 return ret; 1521 } 1522 1523 static void __exit firmware_class_exit(void) 1524 { 1525 unregister_fw_pm_ops(); 1526 unregister_reboot_notifier(&fw_shutdown_nb); 1527 unregister_sysfs_loader(); 1528 } 1529 1530 fs_initcall(firmware_class_init); 1531 module_exit(firmware_class_exit); 1532