1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/types.h> 4 #include <linux/kconfig.h> 5 #include <linux/list.h> 6 #include <linux/slab.h> 7 #include <linux/security.h> 8 #include <linux/highmem.h> 9 #include <linux/umh.h> 10 #include <linux/sysctl.h> 11 #include <linux/vmalloc.h> 12 #include <linux/module.h> 13 14 #include "fallback.h" 15 #include "firmware.h" 16 17 /* 18 * firmware fallback mechanism 19 */ 20 21 MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); 22 23 extern struct firmware_fallback_config fw_fallback_config; 24 25 /* These getters are vetted to use int properly */ 26 static inline int __firmware_loading_timeout(void) 27 { 28 return fw_fallback_config.loading_timeout; 29 } 30 31 /* These setters are vetted to use int properly */ 32 static void __fw_fallback_set_timeout(int timeout) 33 { 34 fw_fallback_config.loading_timeout = timeout; 35 } 36 37 /* 38 * use small loading timeout for caching devices' firmware because all these 39 * firmware images have been loaded successfully at lease once, also system is 40 * ready for completing firmware loading now. The maximum size of firmware in 41 * current distributions is about 2M bytes, so 10 secs should be enough. 42 */ 43 void fw_fallback_set_cache_timeout(void) 44 { 45 fw_fallback_config.old_timeout = __firmware_loading_timeout(); 46 __fw_fallback_set_timeout(10); 47 } 48 49 /* Restores the timeout to the value last configured during normal operation */ 50 void fw_fallback_set_default_timeout(void) 51 { 52 __fw_fallback_set_timeout(fw_fallback_config.old_timeout); 53 } 54 55 static long firmware_loading_timeout(void) 56 { 57 return __firmware_loading_timeout() > 0 ? 58 __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET; 59 } 60 61 static inline bool fw_sysfs_done(struct fw_priv *fw_priv) 62 { 63 return __fw_state_check(fw_priv, FW_STATUS_DONE); 64 } 65 66 static inline bool fw_sysfs_loading(struct fw_priv *fw_priv) 67 { 68 return __fw_state_check(fw_priv, FW_STATUS_LOADING); 69 } 70 71 static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout) 72 { 73 return __fw_state_wait_common(fw_priv, timeout); 74 } 75 76 struct fw_sysfs { 77 bool nowait; 78 struct device dev; 79 struct fw_priv *fw_priv; 80 struct firmware *fw; 81 }; 82 83 static struct fw_sysfs *to_fw_sysfs(struct device *dev) 84 { 85 return container_of(dev, struct fw_sysfs, dev); 86 } 87 88 static void __fw_load_abort(struct fw_priv *fw_priv) 89 { 90 /* 91 * There is a small window in which user can write to 'loading' 92 * between loading done/aborted and disappearance of 'loading' 93 */ 94 if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv)) 95 return; 96 97 fw_state_aborted(fw_priv); 98 } 99 100 static void fw_load_abort(struct fw_sysfs *fw_sysfs) 101 { 102 struct fw_priv *fw_priv = fw_sysfs->fw_priv; 103 104 __fw_load_abort(fw_priv); 105 } 106 107 static LIST_HEAD(pending_fw_head); 108 109 void kill_pending_fw_fallback_reqs(bool only_kill_custom) 110 { 111 struct fw_priv *fw_priv; 112 struct fw_priv *next; 113 114 mutex_lock(&fw_lock); 115 list_for_each_entry_safe(fw_priv, next, &pending_fw_head, 116 pending_list) { 117 if (!fw_priv->need_uevent || !only_kill_custom) 118 __fw_load_abort(fw_priv); 119 } 120 mutex_unlock(&fw_lock); 121 } 122 123 static ssize_t timeout_show(struct class *class, struct class_attribute *attr, 124 char *buf) 125 { 126 return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); 127 } 128 129 /** 130 * timeout_store() - set number of seconds to wait for firmware 131 * @class: device class pointer 132 * @attr: device attribute pointer 133 * @buf: buffer to scan for timeout value 134 * @count: number of bytes in @buf 135 * 136 * Sets the number of seconds to wait for the firmware. Once 137 * this expires an error will be returned to the driver and no 138 * firmware will be provided. 139 * 140 * Note: zero means 'wait forever'. 141 **/ 142 static ssize_t timeout_store(struct class *class, struct class_attribute *attr, 143 const char *buf, size_t count) 144 { 145 int tmp_loading_timeout = simple_strtol(buf, NULL, 10); 146 147 if (tmp_loading_timeout < 0) 148 tmp_loading_timeout = 0; 149 150 __fw_fallback_set_timeout(tmp_loading_timeout); 151 152 return count; 153 } 154 static CLASS_ATTR_RW(timeout); 155 156 static struct attribute *firmware_class_attrs[] = { 157 &class_attr_timeout.attr, 158 NULL, 159 }; 160 ATTRIBUTE_GROUPS(firmware_class); 161 162 static void fw_dev_release(struct device *dev) 163 { 164 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 165 166 kfree(fw_sysfs); 167 } 168 169 static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) 170 { 171 if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) 172 return -ENOMEM; 173 if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout())) 174 return -ENOMEM; 175 if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait)) 176 return -ENOMEM; 177 178 return 0; 179 } 180 181 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 182 { 183 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 184 int err = 0; 185 186 mutex_lock(&fw_lock); 187 if (fw_sysfs->fw_priv) 188 err = do_firmware_uevent(fw_sysfs, env); 189 mutex_unlock(&fw_lock); 190 return err; 191 } 192 193 static struct class firmware_class = { 194 .name = "firmware", 195 .class_groups = firmware_class_groups, 196 .dev_uevent = firmware_uevent, 197 .dev_release = fw_dev_release, 198 }; 199 200 int register_sysfs_loader(void) 201 { 202 int ret = class_register(&firmware_class); 203 204 if (ret != 0) 205 return ret; 206 return register_firmware_config_sysctl(); 207 } 208 209 void unregister_sysfs_loader(void) 210 { 211 unregister_firmware_config_sysctl(); 212 class_unregister(&firmware_class); 213 } 214 215 static ssize_t firmware_loading_show(struct device *dev, 216 struct device_attribute *attr, char *buf) 217 { 218 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 219 int loading = 0; 220 221 mutex_lock(&fw_lock); 222 if (fw_sysfs->fw_priv) 223 loading = fw_sysfs_loading(fw_sysfs->fw_priv); 224 mutex_unlock(&fw_lock); 225 226 return sysfs_emit(buf, "%d\n", loading); 227 } 228 229 /** 230 * firmware_loading_store() - set value in the 'loading' control file 231 * @dev: device pointer 232 * @attr: device attribute pointer 233 * @buf: buffer to scan for loading control value 234 * @count: number of bytes in @buf 235 * 236 * The relevant values are: 237 * 238 * 1: Start a load, discarding any previous partial load. 239 * 0: Conclude the load and hand the data to the driver code. 240 * -1: Conclude the load with an error and discard any written data. 241 **/ 242 static ssize_t firmware_loading_store(struct device *dev, 243 struct device_attribute *attr, 244 const char *buf, size_t count) 245 { 246 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 247 struct fw_priv *fw_priv; 248 ssize_t written = count; 249 int loading = simple_strtol(buf, NULL, 10); 250 251 mutex_lock(&fw_lock); 252 fw_priv = fw_sysfs->fw_priv; 253 if (fw_state_is_aborted(fw_priv)) 254 goto out; 255 256 switch (loading) { 257 case 1: 258 /* discarding any previous partial load */ 259 if (!fw_sysfs_done(fw_priv)) { 260 fw_free_paged_buf(fw_priv); 261 fw_state_start(fw_priv); 262 } 263 break; 264 case 0: 265 if (fw_sysfs_loading(fw_priv)) { 266 int rc; 267 268 /* 269 * Several loading requests may be pending on 270 * one same firmware buf, so let all requests 271 * see the mapped 'buf->data' once the loading 272 * is completed. 273 * */ 274 rc = fw_map_paged_buf(fw_priv); 275 if (rc) 276 dev_err(dev, "%s: map pages failed\n", 277 __func__); 278 else 279 rc = security_kernel_post_load_data(fw_priv->data, 280 fw_priv->size, 281 LOADING_FIRMWARE, "blob"); 282 283 /* 284 * Same logic as fw_load_abort, only the DONE bit 285 * is ignored and we set ABORT only on failure. 286 */ 287 if (rc) { 288 fw_state_aborted(fw_priv); 289 written = rc; 290 } else { 291 fw_state_done(fw_priv); 292 } 293 break; 294 } 295 fallthrough; 296 default: 297 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 298 fallthrough; 299 case -1: 300 fw_load_abort(fw_sysfs); 301 break; 302 } 303 out: 304 mutex_unlock(&fw_lock); 305 return written; 306 } 307 308 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); 309 310 static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer, 311 loff_t offset, size_t count, bool read) 312 { 313 if (read) 314 memcpy(buffer, fw_priv->data + offset, count); 315 else 316 memcpy(fw_priv->data + offset, buffer, count); 317 } 318 319 static void firmware_rw(struct fw_priv *fw_priv, char *buffer, 320 loff_t offset, size_t count, bool read) 321 { 322 while (count) { 323 void *page_data; 324 int page_nr = offset >> PAGE_SHIFT; 325 int page_ofs = offset & (PAGE_SIZE-1); 326 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 327 328 page_data = kmap(fw_priv->pages[page_nr]); 329 330 if (read) 331 memcpy(buffer, page_data + page_ofs, page_cnt); 332 else 333 memcpy(page_data + page_ofs, buffer, page_cnt); 334 335 kunmap(fw_priv->pages[page_nr]); 336 buffer += page_cnt; 337 offset += page_cnt; 338 count -= page_cnt; 339 } 340 } 341 342 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, 343 struct bin_attribute *bin_attr, 344 char *buffer, loff_t offset, size_t count) 345 { 346 struct device *dev = kobj_to_dev(kobj); 347 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 348 struct fw_priv *fw_priv; 349 ssize_t ret_count; 350 351 mutex_lock(&fw_lock); 352 fw_priv = fw_sysfs->fw_priv; 353 if (!fw_priv || fw_sysfs_done(fw_priv)) { 354 ret_count = -ENODEV; 355 goto out; 356 } 357 if (offset > fw_priv->size) { 358 ret_count = 0; 359 goto out; 360 } 361 if (count > fw_priv->size - offset) 362 count = fw_priv->size - offset; 363 364 ret_count = count; 365 366 if (fw_priv->data) 367 firmware_rw_data(fw_priv, buffer, offset, count, true); 368 else 369 firmware_rw(fw_priv, buffer, offset, count, true); 370 371 out: 372 mutex_unlock(&fw_lock); 373 return ret_count; 374 } 375 376 static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) 377 { 378 int err; 379 380 err = fw_grow_paged_buf(fw_sysfs->fw_priv, 381 PAGE_ALIGN(min_size) >> PAGE_SHIFT); 382 if (err) 383 fw_load_abort(fw_sysfs); 384 return err; 385 } 386 387 /** 388 * firmware_data_write() - write method for firmware 389 * @filp: open sysfs file 390 * @kobj: kobject for the device 391 * @bin_attr: bin_attr structure 392 * @buffer: buffer being written 393 * @offset: buffer offset for write in total data store area 394 * @count: buffer size 395 * 396 * Data written to the 'data' attribute will be later handed to 397 * the driver as a firmware image. 398 **/ 399 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, 400 struct bin_attribute *bin_attr, 401 char *buffer, loff_t offset, size_t count) 402 { 403 struct device *dev = kobj_to_dev(kobj); 404 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 405 struct fw_priv *fw_priv; 406 ssize_t retval; 407 408 if (!capable(CAP_SYS_RAWIO)) 409 return -EPERM; 410 411 mutex_lock(&fw_lock); 412 fw_priv = fw_sysfs->fw_priv; 413 if (!fw_priv || fw_sysfs_done(fw_priv)) { 414 retval = -ENODEV; 415 goto out; 416 } 417 418 if (fw_priv->data) { 419 if (offset + count > fw_priv->allocated_size) { 420 retval = -ENOMEM; 421 goto out; 422 } 423 firmware_rw_data(fw_priv, buffer, offset, count, false); 424 retval = count; 425 } else { 426 retval = fw_realloc_pages(fw_sysfs, offset + count); 427 if (retval) 428 goto out; 429 430 retval = count; 431 firmware_rw(fw_priv, buffer, offset, count, false); 432 } 433 434 fw_priv->size = max_t(size_t, offset + count, fw_priv->size); 435 out: 436 mutex_unlock(&fw_lock); 437 return retval; 438 } 439 440 static struct bin_attribute firmware_attr_data = { 441 .attr = { .name = "data", .mode = 0644 }, 442 .size = 0, 443 .read = firmware_data_read, 444 .write = firmware_data_write, 445 }; 446 447 static struct attribute *fw_dev_attrs[] = { 448 &dev_attr_loading.attr, 449 NULL 450 }; 451 452 static struct bin_attribute *fw_dev_bin_attrs[] = { 453 &firmware_attr_data, 454 NULL 455 }; 456 457 static const struct attribute_group fw_dev_attr_group = { 458 .attrs = fw_dev_attrs, 459 .bin_attrs = fw_dev_bin_attrs, 460 }; 461 462 static const struct attribute_group *fw_dev_attr_groups[] = { 463 &fw_dev_attr_group, 464 NULL 465 }; 466 467 static struct fw_sysfs * 468 fw_create_instance(struct firmware *firmware, const char *fw_name, 469 struct device *device, u32 opt_flags) 470 { 471 struct fw_sysfs *fw_sysfs; 472 struct device *f_dev; 473 474 fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL); 475 if (!fw_sysfs) { 476 fw_sysfs = ERR_PTR(-ENOMEM); 477 goto exit; 478 } 479 480 fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT); 481 fw_sysfs->fw = firmware; 482 f_dev = &fw_sysfs->dev; 483 484 device_initialize(f_dev); 485 dev_set_name(f_dev, "%s", fw_name); 486 f_dev->parent = device; 487 f_dev->class = &firmware_class; 488 f_dev->groups = fw_dev_attr_groups; 489 exit: 490 return fw_sysfs; 491 } 492 493 /** 494 * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism 495 * @fw_sysfs: firmware sysfs information for the firmware to load 496 * @timeout: timeout to wait for the load 497 * 498 * In charge of constructing a sysfs fallback interface for firmware loading. 499 **/ 500 static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) 501 { 502 int retval = 0; 503 struct device *f_dev = &fw_sysfs->dev; 504 struct fw_priv *fw_priv = fw_sysfs->fw_priv; 505 506 /* fall back on userspace loading */ 507 if (!fw_priv->data) 508 fw_priv->is_paged_buf = true; 509 510 dev_set_uevent_suppress(f_dev, true); 511 512 retval = device_add(f_dev); 513 if (retval) { 514 dev_err(f_dev, "%s: device_register failed\n", __func__); 515 goto err_put_dev; 516 } 517 518 mutex_lock(&fw_lock); 519 if (fw_state_is_aborted(fw_priv)) { 520 mutex_unlock(&fw_lock); 521 retval = -EINTR; 522 goto out; 523 } 524 list_add(&fw_priv->pending_list, &pending_fw_head); 525 mutex_unlock(&fw_lock); 526 527 if (fw_priv->opt_flags & FW_OPT_UEVENT) { 528 fw_priv->need_uevent = true; 529 dev_set_uevent_suppress(f_dev, false); 530 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name); 531 kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD); 532 } else { 533 timeout = MAX_JIFFY_OFFSET; 534 } 535 536 retval = fw_sysfs_wait_timeout(fw_priv, timeout); 537 if (retval < 0 && retval != -ENOENT) { 538 mutex_lock(&fw_lock); 539 fw_load_abort(fw_sysfs); 540 mutex_unlock(&fw_lock); 541 } 542 543 if (fw_state_is_aborted(fw_priv)) { 544 if (retval == -ERESTARTSYS) 545 retval = -EINTR; 546 } else if (fw_priv->is_paged_buf && !fw_priv->data) 547 retval = -ENOMEM; 548 549 out: 550 device_del(f_dev); 551 err_put_dev: 552 put_device(f_dev); 553 return retval; 554 } 555 556 static int fw_load_from_user_helper(struct firmware *firmware, 557 const char *name, struct device *device, 558 u32 opt_flags) 559 { 560 struct fw_sysfs *fw_sysfs; 561 long timeout; 562 int ret; 563 564 timeout = firmware_loading_timeout(); 565 if (opt_flags & FW_OPT_NOWAIT) { 566 timeout = usermodehelper_read_lock_wait(timeout); 567 if (!timeout) { 568 dev_dbg(device, "firmware: %s loading timed out\n", 569 name); 570 return -EBUSY; 571 } 572 } else { 573 ret = usermodehelper_read_trylock(); 574 if (WARN_ON(ret)) { 575 dev_err(device, "firmware: %s will not be loaded\n", 576 name); 577 return ret; 578 } 579 } 580 581 fw_sysfs = fw_create_instance(firmware, name, device, opt_flags); 582 if (IS_ERR(fw_sysfs)) { 583 ret = PTR_ERR(fw_sysfs); 584 goto out_unlock; 585 } 586 587 fw_sysfs->fw_priv = firmware->priv; 588 ret = fw_load_sysfs_fallback(fw_sysfs, timeout); 589 590 if (!ret) 591 ret = assign_fw(firmware, device); 592 593 out_unlock: 594 usermodehelper_read_unlock(); 595 596 return ret; 597 } 598 599 static bool fw_force_sysfs_fallback(u32 opt_flags) 600 { 601 if (fw_fallback_config.force_sysfs_fallback) 602 return true; 603 if (!(opt_flags & FW_OPT_USERHELPER)) 604 return false; 605 return true; 606 } 607 608 static bool fw_run_sysfs_fallback(u32 opt_flags) 609 { 610 int ret; 611 612 if (fw_fallback_config.ignore_sysfs_fallback) { 613 pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n"); 614 return false; 615 } 616 617 if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS)) 618 return false; 619 620 /* Also permit LSMs and IMA to fail firmware sysfs fallback */ 621 ret = security_kernel_load_data(LOADING_FIRMWARE, true); 622 if (ret < 0) 623 return false; 624 625 return fw_force_sysfs_fallback(opt_flags); 626 } 627 628 /** 629 * firmware_fallback_sysfs() - use the fallback mechanism to find firmware 630 * @fw: pointer to firmware image 631 * @name: name of firmware file to look for 632 * @device: device for which firmware is being loaded 633 * @opt_flags: options to control firmware loading behaviour, as defined by 634 * &enum fw_opt 635 * @ret: return value from direct lookup which triggered the fallback mechanism 636 * 637 * This function is called if direct lookup for the firmware failed, it enables 638 * a fallback mechanism through userspace by exposing a sysfs loading 639 * interface. Userspace is in charge of loading the firmware through the sysfs 640 * loading interface. This sysfs fallback mechanism may be disabled completely 641 * on a system by setting the proc sysctl value ignore_sysfs_fallback to true. 642 * If this is false we check if the internal API caller set the 643 * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback 644 * mechanism. A system may want to enforce the sysfs fallback mechanism at all 645 * times, it can do this by setting ignore_sysfs_fallback to false and 646 * force_sysfs_fallback to true. 647 * Enabling force_sysfs_fallback is functionally equivalent to build a kernel 648 * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK. 649 **/ 650 int firmware_fallback_sysfs(struct firmware *fw, const char *name, 651 struct device *device, 652 u32 opt_flags, 653 int ret) 654 { 655 if (!fw_run_sysfs_fallback(opt_flags)) 656 return ret; 657 658 if (!(opt_flags & FW_OPT_NO_WARN)) 659 dev_warn(device, "Falling back to sysfs fallback for: %s\n", 660 name); 661 else 662 dev_dbg(device, "Falling back to sysfs fallback for: %s\n", 663 name); 664 return fw_load_from_user_helper(fw, name, device, opt_flags); 665 } 666