1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/types.h> 4 #include <linux/kconfig.h> 5 #include <linux/list.h> 6 #include <linux/slab.h> 7 #include <linux/security.h> 8 #include <linux/highmem.h> 9 #include <linux/umh.h> 10 #include <linux/sysctl.h> 11 #include <linux/vmalloc.h> 12 #include <linux/module.h> 13 14 #include "fallback.h" 15 #include "firmware.h" 16 17 /* 18 * firmware fallback mechanism 19 */ 20 21 MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); 22 23 extern struct firmware_fallback_config fw_fallback_config; 24 25 /* These getters are vetted to use int properly */ 26 static inline int __firmware_loading_timeout(void) 27 { 28 return fw_fallback_config.loading_timeout; 29 } 30 31 /* These setters are vetted to use int properly */ 32 static void __fw_fallback_set_timeout(int timeout) 33 { 34 fw_fallback_config.loading_timeout = timeout; 35 } 36 37 /* 38 * use small loading timeout for caching devices' firmware because all these 39 * firmware images have been loaded successfully at lease once, also system is 40 * ready for completing firmware loading now. The maximum size of firmware in 41 * current distributions is about 2M bytes, so 10 secs should be enough. 42 */ 43 void fw_fallback_set_cache_timeout(void) 44 { 45 fw_fallback_config.old_timeout = __firmware_loading_timeout(); 46 __fw_fallback_set_timeout(10); 47 } 48 49 /* Restores the timeout to the value last configured during normal operation */ 50 void fw_fallback_set_default_timeout(void) 51 { 52 __fw_fallback_set_timeout(fw_fallback_config.old_timeout); 53 } 54 55 static long firmware_loading_timeout(void) 56 { 57 return __firmware_loading_timeout() > 0 ? 58 __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET; 59 } 60 61 static inline bool fw_sysfs_done(struct fw_priv *fw_priv) 62 { 63 return __fw_state_check(fw_priv, FW_STATUS_DONE); 64 } 65 66 static inline bool fw_sysfs_loading(struct fw_priv *fw_priv) 67 { 68 return __fw_state_check(fw_priv, FW_STATUS_LOADING); 69 } 70 71 static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout) 72 { 73 return __fw_state_wait_common(fw_priv, timeout); 74 } 75 76 struct fw_sysfs { 77 bool nowait; 78 struct device dev; 79 struct fw_priv *fw_priv; 80 struct firmware *fw; 81 }; 82 83 static struct fw_sysfs *to_fw_sysfs(struct device *dev) 84 { 85 return container_of(dev, struct fw_sysfs, dev); 86 } 87 88 static void __fw_load_abort(struct fw_priv *fw_priv) 89 { 90 /* 91 * There is a small window in which user can write to 'loading' 92 * between loading done and disappearance of 'loading' 93 */ 94 if (fw_sysfs_done(fw_priv)) 95 return; 96 97 list_del_init(&fw_priv->pending_list); 98 fw_state_aborted(fw_priv); 99 } 100 101 static void fw_load_abort(struct fw_sysfs *fw_sysfs) 102 { 103 struct fw_priv *fw_priv = fw_sysfs->fw_priv; 104 105 __fw_load_abort(fw_priv); 106 } 107 108 static LIST_HEAD(pending_fw_head); 109 110 void kill_pending_fw_fallback_reqs(bool only_kill_custom) 111 { 112 struct fw_priv *fw_priv; 113 struct fw_priv *next; 114 115 mutex_lock(&fw_lock); 116 list_for_each_entry_safe(fw_priv, next, &pending_fw_head, 117 pending_list) { 118 if (!fw_priv->need_uevent || !only_kill_custom) 119 __fw_load_abort(fw_priv); 120 } 121 mutex_unlock(&fw_lock); 122 } 123 124 static ssize_t timeout_show(struct class *class, struct class_attribute *attr, 125 char *buf) 126 { 127 return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); 128 } 129 130 /** 131 * firmware_timeout_store() - set number of seconds to wait for firmware 132 * @class: device class pointer 133 * @attr: device attribute pointer 134 * @buf: buffer to scan for timeout value 135 * @count: number of bytes in @buf 136 * 137 * Sets the number of seconds to wait for the firmware. Once 138 * this expires an error will be returned to the driver and no 139 * firmware will be provided. 140 * 141 * Note: zero means 'wait forever'. 142 **/ 143 static ssize_t timeout_store(struct class *class, struct class_attribute *attr, 144 const char *buf, size_t count) 145 { 146 int tmp_loading_timeout = simple_strtol(buf, NULL, 10); 147 148 if (tmp_loading_timeout < 0) 149 tmp_loading_timeout = 0; 150 151 __fw_fallback_set_timeout(tmp_loading_timeout); 152 153 return count; 154 } 155 static CLASS_ATTR_RW(timeout); 156 157 static struct attribute *firmware_class_attrs[] = { 158 &class_attr_timeout.attr, 159 NULL, 160 }; 161 ATTRIBUTE_GROUPS(firmware_class); 162 163 static void fw_dev_release(struct device *dev) 164 { 165 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 166 167 kfree(fw_sysfs); 168 } 169 170 static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) 171 { 172 if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) 173 return -ENOMEM; 174 if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout())) 175 return -ENOMEM; 176 if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait)) 177 return -ENOMEM; 178 179 return 0; 180 } 181 182 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 183 { 184 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 185 int err = 0; 186 187 mutex_lock(&fw_lock); 188 if (fw_sysfs->fw_priv) 189 err = do_firmware_uevent(fw_sysfs, env); 190 mutex_unlock(&fw_lock); 191 return err; 192 } 193 194 static struct class firmware_class = { 195 .name = "firmware", 196 .class_groups = firmware_class_groups, 197 .dev_uevent = firmware_uevent, 198 .dev_release = fw_dev_release, 199 }; 200 201 int register_sysfs_loader(void) 202 { 203 return class_register(&firmware_class); 204 } 205 206 void unregister_sysfs_loader(void) 207 { 208 class_unregister(&firmware_class); 209 } 210 211 static ssize_t firmware_loading_show(struct device *dev, 212 struct device_attribute *attr, char *buf) 213 { 214 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 215 int loading = 0; 216 217 mutex_lock(&fw_lock); 218 if (fw_sysfs->fw_priv) 219 loading = fw_sysfs_loading(fw_sysfs->fw_priv); 220 mutex_unlock(&fw_lock); 221 222 return sysfs_emit(buf, "%d\n", loading); 223 } 224 225 /** 226 * firmware_loading_store() - set value in the 'loading' control file 227 * @dev: device pointer 228 * @attr: device attribute pointer 229 * @buf: buffer to scan for loading control value 230 * @count: number of bytes in @buf 231 * 232 * The relevant values are: 233 * 234 * 1: Start a load, discarding any previous partial load. 235 * 0: Conclude the load and hand the data to the driver code. 236 * -1: Conclude the load with an error and discard any written data. 237 **/ 238 static ssize_t firmware_loading_store(struct device *dev, 239 struct device_attribute *attr, 240 const char *buf, size_t count) 241 { 242 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 243 struct fw_priv *fw_priv; 244 ssize_t written = count; 245 int loading = simple_strtol(buf, NULL, 10); 246 247 mutex_lock(&fw_lock); 248 fw_priv = fw_sysfs->fw_priv; 249 if (fw_state_is_aborted(fw_priv)) 250 goto out; 251 252 switch (loading) { 253 case 1: 254 /* discarding any previous partial load */ 255 if (!fw_sysfs_done(fw_priv)) { 256 fw_free_paged_buf(fw_priv); 257 fw_state_start(fw_priv); 258 } 259 break; 260 case 0: 261 if (fw_sysfs_loading(fw_priv)) { 262 int rc; 263 264 /* 265 * Several loading requests may be pending on 266 * one same firmware buf, so let all requests 267 * see the mapped 'buf->data' once the loading 268 * is completed. 269 * */ 270 rc = fw_map_paged_buf(fw_priv); 271 if (rc) 272 dev_err(dev, "%s: map pages failed\n", 273 __func__); 274 else 275 rc = security_kernel_post_load_data(fw_priv->data, 276 fw_priv->size, 277 LOADING_FIRMWARE, "blob"); 278 279 /* 280 * Same logic as fw_load_abort, only the DONE bit 281 * is ignored and we set ABORT only on failure. 282 */ 283 list_del_init(&fw_priv->pending_list); 284 if (rc) { 285 fw_state_aborted(fw_priv); 286 written = rc; 287 } else { 288 fw_state_done(fw_priv); 289 } 290 break; 291 } 292 fallthrough; 293 default: 294 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 295 fallthrough; 296 case -1: 297 fw_load_abort(fw_sysfs); 298 break; 299 } 300 out: 301 mutex_unlock(&fw_lock); 302 return written; 303 } 304 305 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); 306 307 static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer, 308 loff_t offset, size_t count, bool read) 309 { 310 if (read) 311 memcpy(buffer, fw_priv->data + offset, count); 312 else 313 memcpy(fw_priv->data + offset, buffer, count); 314 } 315 316 static void firmware_rw(struct fw_priv *fw_priv, char *buffer, 317 loff_t offset, size_t count, bool read) 318 { 319 while (count) { 320 void *page_data; 321 int page_nr = offset >> PAGE_SHIFT; 322 int page_ofs = offset & (PAGE_SIZE-1); 323 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 324 325 page_data = kmap(fw_priv->pages[page_nr]); 326 327 if (read) 328 memcpy(buffer, page_data + page_ofs, page_cnt); 329 else 330 memcpy(page_data + page_ofs, buffer, page_cnt); 331 332 kunmap(fw_priv->pages[page_nr]); 333 buffer += page_cnt; 334 offset += page_cnt; 335 count -= page_cnt; 336 } 337 } 338 339 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, 340 struct bin_attribute *bin_attr, 341 char *buffer, loff_t offset, size_t count) 342 { 343 struct device *dev = kobj_to_dev(kobj); 344 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 345 struct fw_priv *fw_priv; 346 ssize_t ret_count; 347 348 mutex_lock(&fw_lock); 349 fw_priv = fw_sysfs->fw_priv; 350 if (!fw_priv || fw_sysfs_done(fw_priv)) { 351 ret_count = -ENODEV; 352 goto out; 353 } 354 if (offset > fw_priv->size) { 355 ret_count = 0; 356 goto out; 357 } 358 if (count > fw_priv->size - offset) 359 count = fw_priv->size - offset; 360 361 ret_count = count; 362 363 if (fw_priv->data) 364 firmware_rw_data(fw_priv, buffer, offset, count, true); 365 else 366 firmware_rw(fw_priv, buffer, offset, count, true); 367 368 out: 369 mutex_unlock(&fw_lock); 370 return ret_count; 371 } 372 373 static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) 374 { 375 int err; 376 377 err = fw_grow_paged_buf(fw_sysfs->fw_priv, 378 PAGE_ALIGN(min_size) >> PAGE_SHIFT); 379 if (err) 380 fw_load_abort(fw_sysfs); 381 return err; 382 } 383 384 /** 385 * firmware_data_write() - write method for firmware 386 * @filp: open sysfs file 387 * @kobj: kobject for the device 388 * @bin_attr: bin_attr structure 389 * @buffer: buffer being written 390 * @offset: buffer offset for write in total data store area 391 * @count: buffer size 392 * 393 * Data written to the 'data' attribute will be later handed to 394 * the driver as a firmware image. 395 **/ 396 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, 397 struct bin_attribute *bin_attr, 398 char *buffer, loff_t offset, size_t count) 399 { 400 struct device *dev = kobj_to_dev(kobj); 401 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 402 struct fw_priv *fw_priv; 403 ssize_t retval; 404 405 if (!capable(CAP_SYS_RAWIO)) 406 return -EPERM; 407 408 mutex_lock(&fw_lock); 409 fw_priv = fw_sysfs->fw_priv; 410 if (!fw_priv || fw_sysfs_done(fw_priv)) { 411 retval = -ENODEV; 412 goto out; 413 } 414 415 if (fw_priv->data) { 416 if (offset + count > fw_priv->allocated_size) { 417 retval = -ENOMEM; 418 goto out; 419 } 420 firmware_rw_data(fw_priv, buffer, offset, count, false); 421 retval = count; 422 } else { 423 retval = fw_realloc_pages(fw_sysfs, offset + count); 424 if (retval) 425 goto out; 426 427 retval = count; 428 firmware_rw(fw_priv, buffer, offset, count, false); 429 } 430 431 fw_priv->size = max_t(size_t, offset + count, fw_priv->size); 432 out: 433 mutex_unlock(&fw_lock); 434 return retval; 435 } 436 437 static struct bin_attribute firmware_attr_data = { 438 .attr = { .name = "data", .mode = 0644 }, 439 .size = 0, 440 .read = firmware_data_read, 441 .write = firmware_data_write, 442 }; 443 444 static struct attribute *fw_dev_attrs[] = { 445 &dev_attr_loading.attr, 446 NULL 447 }; 448 449 static struct bin_attribute *fw_dev_bin_attrs[] = { 450 &firmware_attr_data, 451 NULL 452 }; 453 454 static const struct attribute_group fw_dev_attr_group = { 455 .attrs = fw_dev_attrs, 456 .bin_attrs = fw_dev_bin_attrs, 457 }; 458 459 static const struct attribute_group *fw_dev_attr_groups[] = { 460 &fw_dev_attr_group, 461 NULL 462 }; 463 464 static struct fw_sysfs * 465 fw_create_instance(struct firmware *firmware, const char *fw_name, 466 struct device *device, u32 opt_flags) 467 { 468 struct fw_sysfs *fw_sysfs; 469 struct device *f_dev; 470 471 fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL); 472 if (!fw_sysfs) { 473 fw_sysfs = ERR_PTR(-ENOMEM); 474 goto exit; 475 } 476 477 fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT); 478 fw_sysfs->fw = firmware; 479 f_dev = &fw_sysfs->dev; 480 481 device_initialize(f_dev); 482 dev_set_name(f_dev, "%s", fw_name); 483 f_dev->parent = device; 484 f_dev->class = &firmware_class; 485 f_dev->groups = fw_dev_attr_groups; 486 exit: 487 return fw_sysfs; 488 } 489 490 /** 491 * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism 492 * @fw_sysfs: firmware sysfs information for the firmware to load 493 * @timeout: timeout to wait for the load 494 * 495 * In charge of constructing a sysfs fallback interface for firmware loading. 496 **/ 497 static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) 498 { 499 int retval = 0; 500 struct device *f_dev = &fw_sysfs->dev; 501 struct fw_priv *fw_priv = fw_sysfs->fw_priv; 502 503 /* fall back on userspace loading */ 504 if (!fw_priv->data) 505 fw_priv->is_paged_buf = true; 506 507 dev_set_uevent_suppress(f_dev, true); 508 509 retval = device_add(f_dev); 510 if (retval) { 511 dev_err(f_dev, "%s: device_register failed\n", __func__); 512 goto err_put_dev; 513 } 514 515 mutex_lock(&fw_lock); 516 list_add(&fw_priv->pending_list, &pending_fw_head); 517 mutex_unlock(&fw_lock); 518 519 if (fw_priv->opt_flags & FW_OPT_UEVENT) { 520 fw_priv->need_uevent = true; 521 dev_set_uevent_suppress(f_dev, false); 522 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name); 523 kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD); 524 } else { 525 timeout = MAX_JIFFY_OFFSET; 526 } 527 528 retval = fw_sysfs_wait_timeout(fw_priv, timeout); 529 if (retval < 0 && retval != -ENOENT) { 530 mutex_lock(&fw_lock); 531 fw_load_abort(fw_sysfs); 532 mutex_unlock(&fw_lock); 533 } 534 535 if (fw_state_is_aborted(fw_priv)) { 536 if (retval == -ERESTARTSYS) 537 retval = -EINTR; 538 else 539 retval = -EAGAIN; 540 } else if (fw_priv->is_paged_buf && !fw_priv->data) 541 retval = -ENOMEM; 542 543 device_del(f_dev); 544 err_put_dev: 545 put_device(f_dev); 546 return retval; 547 } 548 549 static int fw_load_from_user_helper(struct firmware *firmware, 550 const char *name, struct device *device, 551 u32 opt_flags) 552 { 553 struct fw_sysfs *fw_sysfs; 554 long timeout; 555 int ret; 556 557 timeout = firmware_loading_timeout(); 558 if (opt_flags & FW_OPT_NOWAIT) { 559 timeout = usermodehelper_read_lock_wait(timeout); 560 if (!timeout) { 561 dev_dbg(device, "firmware: %s loading timed out\n", 562 name); 563 return -EBUSY; 564 } 565 } else { 566 ret = usermodehelper_read_trylock(); 567 if (WARN_ON(ret)) { 568 dev_err(device, "firmware: %s will not be loaded\n", 569 name); 570 return ret; 571 } 572 } 573 574 fw_sysfs = fw_create_instance(firmware, name, device, opt_flags); 575 if (IS_ERR(fw_sysfs)) { 576 ret = PTR_ERR(fw_sysfs); 577 goto out_unlock; 578 } 579 580 fw_sysfs->fw_priv = firmware->priv; 581 ret = fw_load_sysfs_fallback(fw_sysfs, timeout); 582 583 if (!ret) 584 ret = assign_fw(firmware, device); 585 586 out_unlock: 587 usermodehelper_read_unlock(); 588 589 return ret; 590 } 591 592 static bool fw_force_sysfs_fallback(u32 opt_flags) 593 { 594 if (fw_fallback_config.force_sysfs_fallback) 595 return true; 596 if (!(opt_flags & FW_OPT_USERHELPER)) 597 return false; 598 return true; 599 } 600 601 static bool fw_run_sysfs_fallback(u32 opt_flags) 602 { 603 int ret; 604 605 if (fw_fallback_config.ignore_sysfs_fallback) { 606 pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n"); 607 return false; 608 } 609 610 if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS)) 611 return false; 612 613 /* Also permit LSMs and IMA to fail firmware sysfs fallback */ 614 ret = security_kernel_load_data(LOADING_FIRMWARE, true); 615 if (ret < 0) 616 return false; 617 618 return fw_force_sysfs_fallback(opt_flags); 619 } 620 621 /** 622 * firmware_fallback_sysfs() - use the fallback mechanism to find firmware 623 * @fw: pointer to firmware image 624 * @name: name of firmware file to look for 625 * @device: device for which firmware is being loaded 626 * @opt_flags: options to control firmware loading behaviour, as defined by 627 * &enum fw_opt 628 * @ret: return value from direct lookup which triggered the fallback mechanism 629 * 630 * This function is called if direct lookup for the firmware failed, it enables 631 * a fallback mechanism through userspace by exposing a sysfs loading 632 * interface. Userspace is in charge of loading the firmware through the sysfs 633 * loading interface. This sysfs fallback mechanism may be disabled completely 634 * on a system by setting the proc sysctl value ignore_sysfs_fallback to true. 635 * If this is false we check if the internal API caller set the 636 * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback 637 * mechanism. A system may want to enforce the sysfs fallback mechanism at all 638 * times, it can do this by setting ignore_sysfs_fallback to false and 639 * force_sysfs_fallback to true. 640 * Enabling force_sysfs_fallback is functionally equivalent to build a kernel 641 * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK. 642 **/ 643 int firmware_fallback_sysfs(struct firmware *fw, const char *name, 644 struct device *device, 645 u32 opt_flags, 646 int ret) 647 { 648 if (!fw_run_sysfs_fallback(opt_flags)) 649 return ret; 650 651 if (!(opt_flags & FW_OPT_NO_WARN)) 652 dev_warn(device, "Falling back to sysfs fallback for: %s\n", 653 name); 654 else 655 dev_dbg(device, "Falling back to sysfs fallback for: %s\n", 656 name); 657 return fw_load_from_user_helper(fw, name, device, opt_flags); 658 } 659