1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/types.h> 4 #include <linux/kconfig.h> 5 #include <linux/list.h> 6 #include <linux/slab.h> 7 #include <linux/security.h> 8 #include <linux/highmem.h> 9 #include <linux/umh.h> 10 #include <linux/sysctl.h> 11 #include <linux/vmalloc.h> 12 #include <linux/module.h> 13 14 #include "fallback.h" 15 #include "firmware.h" 16 17 /* 18 * firmware fallback mechanism 19 */ 20 21 MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); 22 23 extern struct firmware_fallback_config fw_fallback_config; 24 25 /* These getters are vetted to use int properly */ 26 static inline int __firmware_loading_timeout(void) 27 { 28 return fw_fallback_config.loading_timeout; 29 } 30 31 /* These setters are vetted to use int properly */ 32 static void __fw_fallback_set_timeout(int timeout) 33 { 34 fw_fallback_config.loading_timeout = timeout; 35 } 36 37 /* 38 * use small loading timeout for caching devices' firmware because all these 39 * firmware images have been loaded successfully at lease once, also system is 40 * ready for completing firmware loading now. The maximum size of firmware in 41 * current distributions is about 2M bytes, so 10 secs should be enough. 42 */ 43 void fw_fallback_set_cache_timeout(void) 44 { 45 fw_fallback_config.old_timeout = __firmware_loading_timeout(); 46 __fw_fallback_set_timeout(10); 47 } 48 49 /* Restores the timeout to the value last configured during normal operation */ 50 void fw_fallback_set_default_timeout(void) 51 { 52 __fw_fallback_set_timeout(fw_fallback_config.old_timeout); 53 } 54 55 static long firmware_loading_timeout(void) 56 { 57 return __firmware_loading_timeout() > 0 ? 58 __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET; 59 } 60 61 static inline bool fw_sysfs_done(struct fw_priv *fw_priv) 62 { 63 return __fw_state_check(fw_priv, FW_STATUS_DONE); 64 } 65 66 static inline bool fw_sysfs_loading(struct fw_priv *fw_priv) 67 { 68 return __fw_state_check(fw_priv, FW_STATUS_LOADING); 69 } 70 71 static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout) 72 { 73 return __fw_state_wait_common(fw_priv, timeout); 74 } 75 76 struct fw_sysfs { 77 bool nowait; 78 struct device dev; 79 struct fw_priv *fw_priv; 80 struct firmware *fw; 81 }; 82 83 static struct fw_sysfs *to_fw_sysfs(struct device *dev) 84 { 85 return container_of(dev, struct fw_sysfs, dev); 86 } 87 88 static void __fw_load_abort(struct fw_priv *fw_priv) 89 { 90 /* 91 * There is a small window in which user can write to 'loading' 92 * between loading done/aborted and disappearance of 'loading' 93 */ 94 if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv)) 95 return; 96 97 fw_state_aborted(fw_priv); 98 } 99 100 static void fw_load_abort(struct fw_sysfs *fw_sysfs) 101 { 102 struct fw_priv *fw_priv = fw_sysfs->fw_priv; 103 104 __fw_load_abort(fw_priv); 105 } 106 107 static LIST_HEAD(pending_fw_head); 108 109 void kill_pending_fw_fallback_reqs(bool only_kill_custom) 110 { 111 struct fw_priv *fw_priv; 112 struct fw_priv *next; 113 114 mutex_lock(&fw_lock); 115 list_for_each_entry_safe(fw_priv, next, &pending_fw_head, 116 pending_list) { 117 if (!fw_priv->need_uevent || !only_kill_custom) 118 __fw_load_abort(fw_priv); 119 } 120 mutex_unlock(&fw_lock); 121 } 122 123 static ssize_t timeout_show(struct class *class, struct class_attribute *attr, 124 char *buf) 125 { 126 return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); 127 } 128 129 /** 130 * timeout_store() - set number of seconds to wait for firmware 131 * @class: device class pointer 132 * @attr: device attribute pointer 133 * @buf: buffer to scan for timeout value 134 * @count: number of bytes in @buf 135 * 136 * Sets the number of seconds to wait for the firmware. Once 137 * this expires an error will be returned to the driver and no 138 * firmware will be provided. 139 * 140 * Note: zero means 'wait forever'. 141 **/ 142 static ssize_t timeout_store(struct class *class, struct class_attribute *attr, 143 const char *buf, size_t count) 144 { 145 int tmp_loading_timeout = simple_strtol(buf, NULL, 10); 146 147 if (tmp_loading_timeout < 0) 148 tmp_loading_timeout = 0; 149 150 __fw_fallback_set_timeout(tmp_loading_timeout); 151 152 return count; 153 } 154 static CLASS_ATTR_RW(timeout); 155 156 static struct attribute *firmware_class_attrs[] = { 157 &class_attr_timeout.attr, 158 NULL, 159 }; 160 ATTRIBUTE_GROUPS(firmware_class); 161 162 static void fw_dev_release(struct device *dev) 163 { 164 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 165 166 kfree(fw_sysfs); 167 } 168 169 static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) 170 { 171 if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) 172 return -ENOMEM; 173 if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout())) 174 return -ENOMEM; 175 if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait)) 176 return -ENOMEM; 177 178 return 0; 179 } 180 181 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 182 { 183 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 184 int err = 0; 185 186 mutex_lock(&fw_lock); 187 if (fw_sysfs->fw_priv) 188 err = do_firmware_uevent(fw_sysfs, env); 189 mutex_unlock(&fw_lock); 190 return err; 191 } 192 193 static struct class firmware_class = { 194 .name = "firmware", 195 .class_groups = firmware_class_groups, 196 .dev_uevent = firmware_uevent, 197 .dev_release = fw_dev_release, 198 }; 199 200 int register_sysfs_loader(void) 201 { 202 return class_register(&firmware_class); 203 } 204 205 void unregister_sysfs_loader(void) 206 { 207 class_unregister(&firmware_class); 208 } 209 210 static ssize_t firmware_loading_show(struct device *dev, 211 struct device_attribute *attr, char *buf) 212 { 213 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 214 int loading = 0; 215 216 mutex_lock(&fw_lock); 217 if (fw_sysfs->fw_priv) 218 loading = fw_sysfs_loading(fw_sysfs->fw_priv); 219 mutex_unlock(&fw_lock); 220 221 return sysfs_emit(buf, "%d\n", loading); 222 } 223 224 /** 225 * firmware_loading_store() - set value in the 'loading' control file 226 * @dev: device pointer 227 * @attr: device attribute pointer 228 * @buf: buffer to scan for loading control value 229 * @count: number of bytes in @buf 230 * 231 * The relevant values are: 232 * 233 * 1: Start a load, discarding any previous partial load. 234 * 0: Conclude the load and hand the data to the driver code. 235 * -1: Conclude the load with an error and discard any written data. 236 **/ 237 static ssize_t firmware_loading_store(struct device *dev, 238 struct device_attribute *attr, 239 const char *buf, size_t count) 240 { 241 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 242 struct fw_priv *fw_priv; 243 ssize_t written = count; 244 int loading = simple_strtol(buf, NULL, 10); 245 246 mutex_lock(&fw_lock); 247 fw_priv = fw_sysfs->fw_priv; 248 if (fw_state_is_aborted(fw_priv)) 249 goto out; 250 251 switch (loading) { 252 case 1: 253 /* discarding any previous partial load */ 254 if (!fw_sysfs_done(fw_priv)) { 255 fw_free_paged_buf(fw_priv); 256 fw_state_start(fw_priv); 257 } 258 break; 259 case 0: 260 if (fw_sysfs_loading(fw_priv)) { 261 int rc; 262 263 /* 264 * Several loading requests may be pending on 265 * one same firmware buf, so let all requests 266 * see the mapped 'buf->data' once the loading 267 * is completed. 268 * */ 269 rc = fw_map_paged_buf(fw_priv); 270 if (rc) 271 dev_err(dev, "%s: map pages failed\n", 272 __func__); 273 else 274 rc = security_kernel_post_load_data(fw_priv->data, 275 fw_priv->size, 276 LOADING_FIRMWARE, "blob"); 277 278 /* 279 * Same logic as fw_load_abort, only the DONE bit 280 * is ignored and we set ABORT only on failure. 281 */ 282 if (rc) { 283 fw_state_aborted(fw_priv); 284 written = rc; 285 } else { 286 fw_state_done(fw_priv); 287 } 288 break; 289 } 290 fallthrough; 291 default: 292 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 293 fallthrough; 294 case -1: 295 fw_load_abort(fw_sysfs); 296 break; 297 } 298 out: 299 mutex_unlock(&fw_lock); 300 return written; 301 } 302 303 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); 304 305 static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer, 306 loff_t offset, size_t count, bool read) 307 { 308 if (read) 309 memcpy(buffer, fw_priv->data + offset, count); 310 else 311 memcpy(fw_priv->data + offset, buffer, count); 312 } 313 314 static void firmware_rw(struct fw_priv *fw_priv, char *buffer, 315 loff_t offset, size_t count, bool read) 316 { 317 while (count) { 318 void *page_data; 319 int page_nr = offset >> PAGE_SHIFT; 320 int page_ofs = offset & (PAGE_SIZE-1); 321 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 322 323 page_data = kmap(fw_priv->pages[page_nr]); 324 325 if (read) 326 memcpy(buffer, page_data + page_ofs, page_cnt); 327 else 328 memcpy(page_data + page_ofs, buffer, page_cnt); 329 330 kunmap(fw_priv->pages[page_nr]); 331 buffer += page_cnt; 332 offset += page_cnt; 333 count -= page_cnt; 334 } 335 } 336 337 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, 338 struct bin_attribute *bin_attr, 339 char *buffer, loff_t offset, size_t count) 340 { 341 struct device *dev = kobj_to_dev(kobj); 342 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 343 struct fw_priv *fw_priv; 344 ssize_t ret_count; 345 346 mutex_lock(&fw_lock); 347 fw_priv = fw_sysfs->fw_priv; 348 if (!fw_priv || fw_sysfs_done(fw_priv)) { 349 ret_count = -ENODEV; 350 goto out; 351 } 352 if (offset > fw_priv->size) { 353 ret_count = 0; 354 goto out; 355 } 356 if (count > fw_priv->size - offset) 357 count = fw_priv->size - offset; 358 359 ret_count = count; 360 361 if (fw_priv->data) 362 firmware_rw_data(fw_priv, buffer, offset, count, true); 363 else 364 firmware_rw(fw_priv, buffer, offset, count, true); 365 366 out: 367 mutex_unlock(&fw_lock); 368 return ret_count; 369 } 370 371 static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) 372 { 373 int err; 374 375 err = fw_grow_paged_buf(fw_sysfs->fw_priv, 376 PAGE_ALIGN(min_size) >> PAGE_SHIFT); 377 if (err) 378 fw_load_abort(fw_sysfs); 379 return err; 380 } 381 382 /** 383 * firmware_data_write() - write method for firmware 384 * @filp: open sysfs file 385 * @kobj: kobject for the device 386 * @bin_attr: bin_attr structure 387 * @buffer: buffer being written 388 * @offset: buffer offset for write in total data store area 389 * @count: buffer size 390 * 391 * Data written to the 'data' attribute will be later handed to 392 * the driver as a firmware image. 393 **/ 394 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, 395 struct bin_attribute *bin_attr, 396 char *buffer, loff_t offset, size_t count) 397 { 398 struct device *dev = kobj_to_dev(kobj); 399 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 400 struct fw_priv *fw_priv; 401 ssize_t retval; 402 403 if (!capable(CAP_SYS_RAWIO)) 404 return -EPERM; 405 406 mutex_lock(&fw_lock); 407 fw_priv = fw_sysfs->fw_priv; 408 if (!fw_priv || fw_sysfs_done(fw_priv)) { 409 retval = -ENODEV; 410 goto out; 411 } 412 413 if (fw_priv->data) { 414 if (offset + count > fw_priv->allocated_size) { 415 retval = -ENOMEM; 416 goto out; 417 } 418 firmware_rw_data(fw_priv, buffer, offset, count, false); 419 retval = count; 420 } else { 421 retval = fw_realloc_pages(fw_sysfs, offset + count); 422 if (retval) 423 goto out; 424 425 retval = count; 426 firmware_rw(fw_priv, buffer, offset, count, false); 427 } 428 429 fw_priv->size = max_t(size_t, offset + count, fw_priv->size); 430 out: 431 mutex_unlock(&fw_lock); 432 return retval; 433 } 434 435 static struct bin_attribute firmware_attr_data = { 436 .attr = { .name = "data", .mode = 0644 }, 437 .size = 0, 438 .read = firmware_data_read, 439 .write = firmware_data_write, 440 }; 441 442 static struct attribute *fw_dev_attrs[] = { 443 &dev_attr_loading.attr, 444 NULL 445 }; 446 447 static struct bin_attribute *fw_dev_bin_attrs[] = { 448 &firmware_attr_data, 449 NULL 450 }; 451 452 static const struct attribute_group fw_dev_attr_group = { 453 .attrs = fw_dev_attrs, 454 .bin_attrs = fw_dev_bin_attrs, 455 }; 456 457 static const struct attribute_group *fw_dev_attr_groups[] = { 458 &fw_dev_attr_group, 459 NULL 460 }; 461 462 static struct fw_sysfs * 463 fw_create_instance(struct firmware *firmware, const char *fw_name, 464 struct device *device, u32 opt_flags) 465 { 466 struct fw_sysfs *fw_sysfs; 467 struct device *f_dev; 468 469 fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL); 470 if (!fw_sysfs) { 471 fw_sysfs = ERR_PTR(-ENOMEM); 472 goto exit; 473 } 474 475 fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT); 476 fw_sysfs->fw = firmware; 477 f_dev = &fw_sysfs->dev; 478 479 device_initialize(f_dev); 480 dev_set_name(f_dev, "%s", fw_name); 481 f_dev->parent = device; 482 f_dev->class = &firmware_class; 483 f_dev->groups = fw_dev_attr_groups; 484 exit: 485 return fw_sysfs; 486 } 487 488 /** 489 * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism 490 * @fw_sysfs: firmware sysfs information for the firmware to load 491 * @timeout: timeout to wait for the load 492 * 493 * In charge of constructing a sysfs fallback interface for firmware loading. 494 **/ 495 static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) 496 { 497 int retval = 0; 498 struct device *f_dev = &fw_sysfs->dev; 499 struct fw_priv *fw_priv = fw_sysfs->fw_priv; 500 501 /* fall back on userspace loading */ 502 if (!fw_priv->data) 503 fw_priv->is_paged_buf = true; 504 505 dev_set_uevent_suppress(f_dev, true); 506 507 retval = device_add(f_dev); 508 if (retval) { 509 dev_err(f_dev, "%s: device_register failed\n", __func__); 510 goto err_put_dev; 511 } 512 513 mutex_lock(&fw_lock); 514 if (fw_state_is_aborted(fw_priv)) { 515 mutex_unlock(&fw_lock); 516 retval = -EINTR; 517 goto out; 518 } 519 list_add(&fw_priv->pending_list, &pending_fw_head); 520 mutex_unlock(&fw_lock); 521 522 if (fw_priv->opt_flags & FW_OPT_UEVENT) { 523 fw_priv->need_uevent = true; 524 dev_set_uevent_suppress(f_dev, false); 525 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name); 526 kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD); 527 } else { 528 timeout = MAX_JIFFY_OFFSET; 529 } 530 531 retval = fw_sysfs_wait_timeout(fw_priv, timeout); 532 if (retval < 0 && retval != -ENOENT) { 533 mutex_lock(&fw_lock); 534 fw_load_abort(fw_sysfs); 535 mutex_unlock(&fw_lock); 536 } 537 538 if (fw_state_is_aborted(fw_priv)) { 539 if (retval == -ERESTARTSYS) 540 retval = -EINTR; 541 } else if (fw_priv->is_paged_buf && !fw_priv->data) 542 retval = -ENOMEM; 543 544 out: 545 device_del(f_dev); 546 err_put_dev: 547 put_device(f_dev); 548 return retval; 549 } 550 551 static int fw_load_from_user_helper(struct firmware *firmware, 552 const char *name, struct device *device, 553 u32 opt_flags) 554 { 555 struct fw_sysfs *fw_sysfs; 556 long timeout; 557 int ret; 558 559 timeout = firmware_loading_timeout(); 560 if (opt_flags & FW_OPT_NOWAIT) { 561 timeout = usermodehelper_read_lock_wait(timeout); 562 if (!timeout) { 563 dev_dbg(device, "firmware: %s loading timed out\n", 564 name); 565 return -EBUSY; 566 } 567 } else { 568 ret = usermodehelper_read_trylock(); 569 if (WARN_ON(ret)) { 570 dev_err(device, "firmware: %s will not be loaded\n", 571 name); 572 return ret; 573 } 574 } 575 576 fw_sysfs = fw_create_instance(firmware, name, device, opt_flags); 577 if (IS_ERR(fw_sysfs)) { 578 ret = PTR_ERR(fw_sysfs); 579 goto out_unlock; 580 } 581 582 fw_sysfs->fw_priv = firmware->priv; 583 ret = fw_load_sysfs_fallback(fw_sysfs, timeout); 584 585 if (!ret) 586 ret = assign_fw(firmware, device); 587 588 out_unlock: 589 usermodehelper_read_unlock(); 590 591 return ret; 592 } 593 594 static bool fw_force_sysfs_fallback(u32 opt_flags) 595 { 596 if (fw_fallback_config.force_sysfs_fallback) 597 return true; 598 if (!(opt_flags & FW_OPT_USERHELPER)) 599 return false; 600 return true; 601 } 602 603 static bool fw_run_sysfs_fallback(u32 opt_flags) 604 { 605 int ret; 606 607 if (fw_fallback_config.ignore_sysfs_fallback) { 608 pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n"); 609 return false; 610 } 611 612 if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS)) 613 return false; 614 615 /* Also permit LSMs and IMA to fail firmware sysfs fallback */ 616 ret = security_kernel_load_data(LOADING_FIRMWARE, true); 617 if (ret < 0) 618 return false; 619 620 return fw_force_sysfs_fallback(opt_flags); 621 } 622 623 /** 624 * firmware_fallback_sysfs() - use the fallback mechanism to find firmware 625 * @fw: pointer to firmware image 626 * @name: name of firmware file to look for 627 * @device: device for which firmware is being loaded 628 * @opt_flags: options to control firmware loading behaviour, as defined by 629 * &enum fw_opt 630 * @ret: return value from direct lookup which triggered the fallback mechanism 631 * 632 * This function is called if direct lookup for the firmware failed, it enables 633 * a fallback mechanism through userspace by exposing a sysfs loading 634 * interface. Userspace is in charge of loading the firmware through the sysfs 635 * loading interface. This sysfs fallback mechanism may be disabled completely 636 * on a system by setting the proc sysctl value ignore_sysfs_fallback to true. 637 * If this is false we check if the internal API caller set the 638 * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback 639 * mechanism. A system may want to enforce the sysfs fallback mechanism at all 640 * times, it can do this by setting ignore_sysfs_fallback to false and 641 * force_sysfs_fallback to true. 642 * Enabling force_sysfs_fallback is functionally equivalent to build a kernel 643 * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK. 644 **/ 645 int firmware_fallback_sysfs(struct firmware *fw, const char *name, 646 struct device *device, 647 u32 opt_flags, 648 int ret) 649 { 650 if (!fw_run_sysfs_fallback(opt_flags)) 651 return ret; 652 653 if (!(opt_flags & FW_OPT_NO_WARN)) 654 dev_warn(device, "Falling back to sysfs fallback for: %s\n", 655 name); 656 else 657 dev_dbg(device, "Falling back to sysfs fallback for: %s\n", 658 name); 659 return fw_load_from_user_helper(fw, name, device, opt_flags); 660 } 661