1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/types.h>
4 #include <linux/kconfig.h>
5 #include <linux/list.h>
6 #include <linux/slab.h>
7 #include <linux/security.h>
8 #include <linux/highmem.h>
9 #include <linux/umh.h>
10 #include <linux/sysctl.h>
11 #include <linux/vmalloc.h>
12 
13 #include "fallback.h"
14 #include "firmware.h"
15 
16 /*
17  * firmware fallback mechanism
18  */
19 
20 extern struct firmware_fallback_config fw_fallback_config;
21 
22 /* These getters are vetted to use int properly */
23 static inline int __firmware_loading_timeout(void)
24 {
25 	return fw_fallback_config.loading_timeout;
26 }
27 
28 /* These setters are vetted to use int properly */
29 static void __fw_fallback_set_timeout(int timeout)
30 {
31 	fw_fallback_config.loading_timeout = timeout;
32 }
33 
34 /*
35  * use small loading timeout for caching devices' firmware because all these
36  * firmware images have been loaded successfully at lease once, also system is
37  * ready for completing firmware loading now. The maximum size of firmware in
38  * current distributions is about 2M bytes, so 10 secs should be enough.
39  */
40 void fw_fallback_set_cache_timeout(void)
41 {
42 	fw_fallback_config.old_timeout = __firmware_loading_timeout();
43 	__fw_fallback_set_timeout(10);
44 }
45 
46 /* Restores the timeout to the value last configured during normal operation */
47 void fw_fallback_set_default_timeout(void)
48 {
49 	__fw_fallback_set_timeout(fw_fallback_config.old_timeout);
50 }
51 
52 static long firmware_loading_timeout(void)
53 {
54 	return __firmware_loading_timeout() > 0 ?
55 		__firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
56 }
57 
58 static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
59 {
60 	return __fw_state_check(fw_priv, FW_STATUS_DONE);
61 }
62 
63 static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
64 {
65 	return __fw_state_check(fw_priv, FW_STATUS_LOADING);
66 }
67 
68 static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv,  long timeout)
69 {
70 	return __fw_state_wait_common(fw_priv, timeout);
71 }
72 
73 struct fw_sysfs {
74 	bool nowait;
75 	struct device dev;
76 	struct fw_priv *fw_priv;
77 	struct firmware *fw;
78 };
79 
80 static struct fw_sysfs *to_fw_sysfs(struct device *dev)
81 {
82 	return container_of(dev, struct fw_sysfs, dev);
83 }
84 
85 static void __fw_load_abort(struct fw_priv *fw_priv)
86 {
87 	/*
88 	 * There is a small window in which user can write to 'loading'
89 	 * between loading done and disappearance of 'loading'
90 	 */
91 	if (fw_sysfs_done(fw_priv))
92 		return;
93 
94 	list_del_init(&fw_priv->pending_list);
95 	fw_state_aborted(fw_priv);
96 }
97 
98 static void fw_load_abort(struct fw_sysfs *fw_sysfs)
99 {
100 	struct fw_priv *fw_priv = fw_sysfs->fw_priv;
101 
102 	__fw_load_abort(fw_priv);
103 }
104 
105 static LIST_HEAD(pending_fw_head);
106 
107 void kill_pending_fw_fallback_reqs(bool only_kill_custom)
108 {
109 	struct fw_priv *fw_priv;
110 	struct fw_priv *next;
111 
112 	mutex_lock(&fw_lock);
113 	list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
114 				 pending_list) {
115 		if (!fw_priv->need_uevent || !only_kill_custom)
116 			 __fw_load_abort(fw_priv);
117 	}
118 	mutex_unlock(&fw_lock);
119 }
120 
121 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
122 			    char *buf)
123 {
124 	return sprintf(buf, "%d\n", __firmware_loading_timeout());
125 }
126 
127 /**
128  * firmware_timeout_store() - set number of seconds to wait for firmware
129  * @class: device class pointer
130  * @attr: device attribute pointer
131  * @buf: buffer to scan for timeout value
132  * @count: number of bytes in @buf
133  *
134  *	Sets the number of seconds to wait for the firmware.  Once
135  *	this expires an error will be returned to the driver and no
136  *	firmware will be provided.
137  *
138  *	Note: zero means 'wait forever'.
139  **/
140 static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
141 			     const char *buf, size_t count)
142 {
143 	int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
144 
145 	if (tmp_loading_timeout < 0)
146 		tmp_loading_timeout = 0;
147 
148 	__fw_fallback_set_timeout(tmp_loading_timeout);
149 
150 	return count;
151 }
152 static CLASS_ATTR_RW(timeout);
153 
154 static struct attribute *firmware_class_attrs[] = {
155 	&class_attr_timeout.attr,
156 	NULL,
157 };
158 ATTRIBUTE_GROUPS(firmware_class);
159 
160 static void fw_dev_release(struct device *dev)
161 {
162 	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
163 
164 	kfree(fw_sysfs);
165 }
166 
167 static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
168 {
169 	if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
170 		return -ENOMEM;
171 	if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
172 		return -ENOMEM;
173 	if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
174 		return -ENOMEM;
175 
176 	return 0;
177 }
178 
179 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
180 {
181 	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
182 	int err = 0;
183 
184 	mutex_lock(&fw_lock);
185 	if (fw_sysfs->fw_priv)
186 		err = do_firmware_uevent(fw_sysfs, env);
187 	mutex_unlock(&fw_lock);
188 	return err;
189 }
190 
191 static struct class firmware_class = {
192 	.name		= "firmware",
193 	.class_groups	= firmware_class_groups,
194 	.dev_uevent	= firmware_uevent,
195 	.dev_release	= fw_dev_release,
196 };
197 
198 int register_sysfs_loader(void)
199 {
200 	return class_register(&firmware_class);
201 }
202 
203 void unregister_sysfs_loader(void)
204 {
205 	class_unregister(&firmware_class);
206 }
207 
208 static ssize_t firmware_loading_show(struct device *dev,
209 				     struct device_attribute *attr, char *buf)
210 {
211 	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
212 	int loading = 0;
213 
214 	mutex_lock(&fw_lock);
215 	if (fw_sysfs->fw_priv)
216 		loading = fw_sysfs_loading(fw_sysfs->fw_priv);
217 	mutex_unlock(&fw_lock);
218 
219 	return sprintf(buf, "%d\n", loading);
220 }
221 
222 /* Some architectures don't have PAGE_KERNEL_RO */
223 #ifndef PAGE_KERNEL_RO
224 #define PAGE_KERNEL_RO PAGE_KERNEL
225 #endif
226 
227 /* one pages buffer should be mapped/unmapped only once */
228 static int map_fw_priv_pages(struct fw_priv *fw_priv)
229 {
230 	if (!fw_priv->is_paged_buf)
231 		return 0;
232 
233 	vunmap(fw_priv->data);
234 	fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
235 			     PAGE_KERNEL_RO);
236 	if (!fw_priv->data)
237 		return -ENOMEM;
238 	return 0;
239 }
240 
241 /**
242  * firmware_loading_store() - set value in the 'loading' control file
243  * @dev: device pointer
244  * @attr: device attribute pointer
245  * @buf: buffer to scan for loading control value
246  * @count: number of bytes in @buf
247  *
248  *	The relevant values are:
249  *
250  *	 1: Start a load, discarding any previous partial load.
251  *	 0: Conclude the load and hand the data to the driver code.
252  *	-1: Conclude the load with an error and discard any written data.
253  **/
254 static ssize_t firmware_loading_store(struct device *dev,
255 				      struct device_attribute *attr,
256 				      const char *buf, size_t count)
257 {
258 	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
259 	struct fw_priv *fw_priv;
260 	ssize_t written = count;
261 	int loading = simple_strtol(buf, NULL, 10);
262 	int i;
263 
264 	mutex_lock(&fw_lock);
265 	fw_priv = fw_sysfs->fw_priv;
266 	if (fw_state_is_aborted(fw_priv))
267 		goto out;
268 
269 	switch (loading) {
270 	case 1:
271 		/* discarding any previous partial load */
272 		if (!fw_sysfs_done(fw_priv)) {
273 			for (i = 0; i < fw_priv->nr_pages; i++)
274 				__free_page(fw_priv->pages[i]);
275 			vfree(fw_priv->pages);
276 			fw_priv->pages = NULL;
277 			fw_priv->page_array_size = 0;
278 			fw_priv->nr_pages = 0;
279 			fw_state_start(fw_priv);
280 		}
281 		break;
282 	case 0:
283 		if (fw_sysfs_loading(fw_priv)) {
284 			int rc;
285 
286 			/*
287 			 * Several loading requests may be pending on
288 			 * one same firmware buf, so let all requests
289 			 * see the mapped 'buf->data' once the loading
290 			 * is completed.
291 			 * */
292 			rc = map_fw_priv_pages(fw_priv);
293 			if (rc)
294 				dev_err(dev, "%s: map pages failed\n",
295 					__func__);
296 			else
297 				rc = security_kernel_post_read_file(NULL,
298 						fw_priv->data, fw_priv->size,
299 						READING_FIRMWARE);
300 
301 			/*
302 			 * Same logic as fw_load_abort, only the DONE bit
303 			 * is ignored and we set ABORT only on failure.
304 			 */
305 			list_del_init(&fw_priv->pending_list);
306 			if (rc) {
307 				fw_state_aborted(fw_priv);
308 				written = rc;
309 			} else {
310 				fw_state_done(fw_priv);
311 			}
312 			break;
313 		}
314 		/* fallthrough */
315 	default:
316 		dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
317 		/* fallthrough */
318 	case -1:
319 		fw_load_abort(fw_sysfs);
320 		break;
321 	}
322 out:
323 	mutex_unlock(&fw_lock);
324 	return written;
325 }
326 
327 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
328 
329 static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
330 			   loff_t offset, size_t count, bool read)
331 {
332 	if (read)
333 		memcpy(buffer, fw_priv->data + offset, count);
334 	else
335 		memcpy(fw_priv->data + offset, buffer, count);
336 }
337 
338 static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
339 			loff_t offset, size_t count, bool read)
340 {
341 	while (count) {
342 		void *page_data;
343 		int page_nr = offset >> PAGE_SHIFT;
344 		int page_ofs = offset & (PAGE_SIZE-1);
345 		int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
346 
347 		page_data = kmap(fw_priv->pages[page_nr]);
348 
349 		if (read)
350 			memcpy(buffer, page_data + page_ofs, page_cnt);
351 		else
352 			memcpy(page_data + page_ofs, buffer, page_cnt);
353 
354 		kunmap(fw_priv->pages[page_nr]);
355 		buffer += page_cnt;
356 		offset += page_cnt;
357 		count -= page_cnt;
358 	}
359 }
360 
361 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
362 				  struct bin_attribute *bin_attr,
363 				  char *buffer, loff_t offset, size_t count)
364 {
365 	struct device *dev = kobj_to_dev(kobj);
366 	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
367 	struct fw_priv *fw_priv;
368 	ssize_t ret_count;
369 
370 	mutex_lock(&fw_lock);
371 	fw_priv = fw_sysfs->fw_priv;
372 	if (!fw_priv || fw_sysfs_done(fw_priv)) {
373 		ret_count = -ENODEV;
374 		goto out;
375 	}
376 	if (offset > fw_priv->size) {
377 		ret_count = 0;
378 		goto out;
379 	}
380 	if (count > fw_priv->size - offset)
381 		count = fw_priv->size - offset;
382 
383 	ret_count = count;
384 
385 	if (fw_priv->data)
386 		firmware_rw_data(fw_priv, buffer, offset, count, true);
387 	else
388 		firmware_rw(fw_priv, buffer, offset, count, true);
389 
390 out:
391 	mutex_unlock(&fw_lock);
392 	return ret_count;
393 }
394 
395 static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
396 {
397 	struct fw_priv *fw_priv= fw_sysfs->fw_priv;
398 	int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
399 
400 	/* If the array of pages is too small, grow it... */
401 	if (fw_priv->page_array_size < pages_needed) {
402 		int new_array_size = max(pages_needed,
403 					 fw_priv->page_array_size * 2);
404 		struct page **new_pages;
405 
406 		new_pages = vmalloc(array_size(new_array_size, sizeof(void *)));
407 		if (!new_pages) {
408 			fw_load_abort(fw_sysfs);
409 			return -ENOMEM;
410 		}
411 		memcpy(new_pages, fw_priv->pages,
412 		       fw_priv->page_array_size * sizeof(void *));
413 		memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
414 		       (new_array_size - fw_priv->page_array_size));
415 		vfree(fw_priv->pages);
416 		fw_priv->pages = new_pages;
417 		fw_priv->page_array_size = new_array_size;
418 	}
419 
420 	while (fw_priv->nr_pages < pages_needed) {
421 		fw_priv->pages[fw_priv->nr_pages] =
422 			alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
423 
424 		if (!fw_priv->pages[fw_priv->nr_pages]) {
425 			fw_load_abort(fw_sysfs);
426 			return -ENOMEM;
427 		}
428 		fw_priv->nr_pages++;
429 	}
430 	return 0;
431 }
432 
433 /**
434  * firmware_data_write() - write method for firmware
435  * @filp: open sysfs file
436  * @kobj: kobject for the device
437  * @bin_attr: bin_attr structure
438  * @buffer: buffer being written
439  * @offset: buffer offset for write in total data store area
440  * @count: buffer size
441  *
442  *	Data written to the 'data' attribute will be later handed to
443  *	the driver as a firmware image.
444  **/
445 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
446 				   struct bin_attribute *bin_attr,
447 				   char *buffer, loff_t offset, size_t count)
448 {
449 	struct device *dev = kobj_to_dev(kobj);
450 	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
451 	struct fw_priv *fw_priv;
452 	ssize_t retval;
453 
454 	if (!capable(CAP_SYS_RAWIO))
455 		return -EPERM;
456 
457 	mutex_lock(&fw_lock);
458 	fw_priv = fw_sysfs->fw_priv;
459 	if (!fw_priv || fw_sysfs_done(fw_priv)) {
460 		retval = -ENODEV;
461 		goto out;
462 	}
463 
464 	if (fw_priv->data) {
465 		if (offset + count > fw_priv->allocated_size) {
466 			retval = -ENOMEM;
467 			goto out;
468 		}
469 		firmware_rw_data(fw_priv, buffer, offset, count, false);
470 		retval = count;
471 	} else {
472 		retval = fw_realloc_pages(fw_sysfs, offset + count);
473 		if (retval)
474 			goto out;
475 
476 		retval = count;
477 		firmware_rw(fw_priv, buffer, offset, count, false);
478 	}
479 
480 	fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
481 out:
482 	mutex_unlock(&fw_lock);
483 	return retval;
484 }
485 
486 static struct bin_attribute firmware_attr_data = {
487 	.attr = { .name = "data", .mode = 0644 },
488 	.size = 0,
489 	.read = firmware_data_read,
490 	.write = firmware_data_write,
491 };
492 
493 static struct attribute *fw_dev_attrs[] = {
494 	&dev_attr_loading.attr,
495 	NULL
496 };
497 
498 static struct bin_attribute *fw_dev_bin_attrs[] = {
499 	&firmware_attr_data,
500 	NULL
501 };
502 
503 static const struct attribute_group fw_dev_attr_group = {
504 	.attrs = fw_dev_attrs,
505 	.bin_attrs = fw_dev_bin_attrs,
506 };
507 
508 static const struct attribute_group *fw_dev_attr_groups[] = {
509 	&fw_dev_attr_group,
510 	NULL
511 };
512 
513 static struct fw_sysfs *
514 fw_create_instance(struct firmware *firmware, const char *fw_name,
515 		   struct device *device, enum fw_opt opt_flags)
516 {
517 	struct fw_sysfs *fw_sysfs;
518 	struct device *f_dev;
519 
520 	fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
521 	if (!fw_sysfs) {
522 		fw_sysfs = ERR_PTR(-ENOMEM);
523 		goto exit;
524 	}
525 
526 	fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
527 	fw_sysfs->fw = firmware;
528 	f_dev = &fw_sysfs->dev;
529 
530 	device_initialize(f_dev);
531 	dev_set_name(f_dev, "%s", fw_name);
532 	f_dev->parent = device;
533 	f_dev->class = &firmware_class;
534 	f_dev->groups = fw_dev_attr_groups;
535 exit:
536 	return fw_sysfs;
537 }
538 
539 /**
540  * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
541  * @fw_sysfs: firmware sysfs information for the firmware to load
542  * @opt_flags: flags of options, FW_OPT_*
543  * @timeout: timeout to wait for the load
544  *
545  * In charge of constructing a sysfs fallback interface for firmware loading.
546  **/
547 static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
548 				  enum fw_opt opt_flags, long timeout)
549 {
550 	int retval = 0;
551 	struct device *f_dev = &fw_sysfs->dev;
552 	struct fw_priv *fw_priv = fw_sysfs->fw_priv;
553 
554 	/* fall back on userspace loading */
555 	if (!fw_priv->data)
556 		fw_priv->is_paged_buf = true;
557 
558 	dev_set_uevent_suppress(f_dev, true);
559 
560 	retval = device_add(f_dev);
561 	if (retval) {
562 		dev_err(f_dev, "%s: device_register failed\n", __func__);
563 		goto err_put_dev;
564 	}
565 
566 	mutex_lock(&fw_lock);
567 	list_add(&fw_priv->pending_list, &pending_fw_head);
568 	mutex_unlock(&fw_lock);
569 
570 	if (opt_flags & FW_OPT_UEVENT) {
571 		fw_priv->need_uevent = true;
572 		dev_set_uevent_suppress(f_dev, false);
573 		dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
574 		kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
575 	} else {
576 		timeout = MAX_JIFFY_OFFSET;
577 	}
578 
579 	retval = fw_sysfs_wait_timeout(fw_priv, timeout);
580 	if (retval < 0) {
581 		mutex_lock(&fw_lock);
582 		fw_load_abort(fw_sysfs);
583 		mutex_unlock(&fw_lock);
584 	}
585 
586 	if (fw_state_is_aborted(fw_priv)) {
587 		if (retval == -ERESTARTSYS)
588 			retval = -EINTR;
589 		else
590 			retval = -EAGAIN;
591 	} else if (fw_priv->is_paged_buf && !fw_priv->data)
592 		retval = -ENOMEM;
593 
594 	device_del(f_dev);
595 err_put_dev:
596 	put_device(f_dev);
597 	return retval;
598 }
599 
600 static int fw_load_from_user_helper(struct firmware *firmware,
601 				    const char *name, struct device *device,
602 				    enum fw_opt opt_flags)
603 {
604 	struct fw_sysfs *fw_sysfs;
605 	long timeout;
606 	int ret;
607 
608 	timeout = firmware_loading_timeout();
609 	if (opt_flags & FW_OPT_NOWAIT) {
610 		timeout = usermodehelper_read_lock_wait(timeout);
611 		if (!timeout) {
612 			dev_dbg(device, "firmware: %s loading timed out\n",
613 				name);
614 			return -EBUSY;
615 		}
616 	} else {
617 		ret = usermodehelper_read_trylock();
618 		if (WARN_ON(ret)) {
619 			dev_err(device, "firmware: %s will not be loaded\n",
620 				name);
621 			return ret;
622 		}
623 	}
624 
625 	fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
626 	if (IS_ERR(fw_sysfs)) {
627 		ret = PTR_ERR(fw_sysfs);
628 		goto out_unlock;
629 	}
630 
631 	fw_sysfs->fw_priv = firmware->priv;
632 	ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout);
633 
634 	if (!ret)
635 		ret = assign_fw(firmware, device, opt_flags);
636 
637 out_unlock:
638 	usermodehelper_read_unlock();
639 
640 	return ret;
641 }
642 
643 static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
644 {
645 	if (fw_fallback_config.force_sysfs_fallback)
646 		return true;
647 	if (!(opt_flags & FW_OPT_USERHELPER))
648 		return false;
649 	return true;
650 }
651 
652 static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
653 {
654 	if (fw_fallback_config.ignore_sysfs_fallback) {
655 		pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
656 		return false;
657 	}
658 
659 	if ((opt_flags & FW_OPT_NOFALLBACK))
660 		return false;
661 
662 	return fw_force_sysfs_fallback(opt_flags);
663 }
664 
665 /**
666  * firmware_fallback_sysfs() - use the fallback mechanism to find firmware
667  * @fw: pointer to firmware image
668  * @name: name of firmware file to look for
669  * @device: device for which firmware is being loaded
670  * @opt_flags: options to control firmware loading behaviour
671  * @ret: return value from direct lookup which triggered the fallback mechanism
672  *
673  * This function is called if direct lookup for the firmware failed, it enables
674  * a fallback mechanism through userspace by exposing a sysfs loading
675  * interface. Userspace is in charge of loading the firmware through the syfs
676  * loading interface. This syfs fallback mechanism may be disabled completely
677  * on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
678  * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
679  * flag, if so it would also disable the fallback mechanism. A system may want
680  * to enfoce the sysfs fallback mechanism at all times, it can do this by
681  * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true.
682  * Enabling force_sysfs_fallback is functionally equivalent to build a kernel
683  * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
684  **/
685 int firmware_fallback_sysfs(struct firmware *fw, const char *name,
686 			    struct device *device,
687 			    enum fw_opt opt_flags,
688 			    int ret)
689 {
690 	if (!fw_run_sysfs_fallback(opt_flags))
691 		return ret;
692 
693 	if (!(opt_flags & FW_OPT_NO_WARN))
694 		dev_warn(device, "Falling back to syfs fallback for: %s\n",
695 				 name);
696 	else
697 		dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
698 				name);
699 	return fw_load_from_user_helper(fw, name, device, opt_flags);
700 }
701