xref: /openbmc/linux/drivers/base/firmware_loader/main.c (revision 1830dad34c070161fda2ff1db77b39ffa78aa380)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * main.c - Multi purpose firmware loading support
4  *
5  * Copyright (c) 2003 Manuel Estrada Sainz
6  *
7  * Please see Documentation/firmware_class/ for more information.
8  *
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/capability.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/timer.h>
18 #include <linux/vmalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/bitops.h>
21 #include <linux/mutex.h>
22 #include <linux/workqueue.h>
23 #include <linux/highmem.h>
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/file.h>
28 #include <linux/list.h>
29 #include <linux/fs.h>
30 #include <linux/async.h>
31 #include <linux/pm.h>
32 #include <linux/suspend.h>
33 #include <linux/syscore_ops.h>
34 #include <linux/reboot.h>
35 #include <linux/security.h>
36 
37 #include <generated/utsrelease.h>
38 
39 #include "../base.h"
40 #include "firmware.h"
41 #include "fallback.h"
42 
43 MODULE_AUTHOR("Manuel Estrada Sainz");
44 MODULE_DESCRIPTION("Multi purpose firmware loading support");
45 MODULE_LICENSE("GPL");
46 
47 struct firmware_cache {
48 	/* firmware_buf instance will be added into the below list */
49 	spinlock_t lock;
50 	struct list_head head;
51 	int state;
52 
53 #ifdef CONFIG_PM_SLEEP
54 	/*
55 	 * Names of firmware images which have been cached successfully
56 	 * will be added into the below list so that device uncache
57 	 * helper can trace which firmware images have been cached
58 	 * before.
59 	 */
60 	spinlock_t name_lock;
61 	struct list_head fw_names;
62 
63 	struct delayed_work work;
64 
65 	struct notifier_block   pm_notify;
66 #endif
67 };
68 
69 struct fw_cache_entry {
70 	struct list_head list;
71 	const char *name;
72 };
73 
74 struct fw_name_devm {
75 	unsigned long magic;
76 	const char *name;
77 };
78 
79 static inline struct fw_priv *to_fw_priv(struct kref *ref)
80 {
81 	return container_of(ref, struct fw_priv, ref);
82 }
83 
84 #define	FW_LOADER_NO_CACHE	0
85 #define	FW_LOADER_START_CACHE	1
86 
87 /* fw_lock could be moved to 'struct fw_sysfs' but since it is just
88  * guarding for corner cases a global lock should be OK */
89 DEFINE_MUTEX(fw_lock);
90 
91 static struct firmware_cache fw_cache;
92 
93 /* Builtin firmware support */
94 
95 #ifdef CONFIG_FW_LOADER
96 
97 extern struct builtin_fw __start_builtin_fw[];
98 extern struct builtin_fw __end_builtin_fw[];
99 
100 static void fw_copy_to_prealloc_buf(struct firmware *fw,
101 				    void *buf, size_t size)
102 {
103 	if (!buf || size < fw->size)
104 		return;
105 	memcpy(buf, fw->data, fw->size);
106 }
107 
108 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
109 				    void *buf, size_t size)
110 {
111 	struct builtin_fw *b_fw;
112 
113 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
114 		if (strcmp(name, b_fw->name) == 0) {
115 			fw->size = b_fw->size;
116 			fw->data = b_fw->data;
117 			fw_copy_to_prealloc_buf(fw, buf, size);
118 
119 			return true;
120 		}
121 	}
122 
123 	return false;
124 }
125 
126 static bool fw_is_builtin_firmware(const struct firmware *fw)
127 {
128 	struct builtin_fw *b_fw;
129 
130 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
131 		if (fw->data == b_fw->data)
132 			return true;
133 
134 	return false;
135 }
136 
137 #else /* Module case - no builtin firmware support */
138 
139 static inline bool fw_get_builtin_firmware(struct firmware *fw,
140 					   const char *name, void *buf,
141 					   size_t size)
142 {
143 	return false;
144 }
145 
146 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
147 {
148 	return false;
149 }
150 #endif
151 
152 static void fw_state_init(struct fw_priv *fw_priv)
153 {
154 	struct fw_state *fw_st = &fw_priv->fw_st;
155 
156 	init_completion(&fw_st->completion);
157 	fw_st->status = FW_STATUS_UNKNOWN;
158 }
159 
160 static inline int fw_state_wait(struct fw_priv *fw_priv)
161 {
162 	return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
163 }
164 
165 static int fw_cache_piggyback_on_request(const char *name);
166 
167 static struct fw_priv *__allocate_fw_priv(const char *fw_name,
168 					  struct firmware_cache *fwc,
169 					  void *dbuf, size_t size)
170 {
171 	struct fw_priv *fw_priv;
172 
173 	fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
174 	if (!fw_priv)
175 		return NULL;
176 
177 	fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
178 	if (!fw_priv->fw_name) {
179 		kfree(fw_priv);
180 		return NULL;
181 	}
182 
183 	kref_init(&fw_priv->ref);
184 	fw_priv->fwc = fwc;
185 	fw_priv->data = dbuf;
186 	fw_priv->allocated_size = size;
187 	fw_state_init(fw_priv);
188 #ifdef CONFIG_FW_LOADER_USER_HELPER
189 	INIT_LIST_HEAD(&fw_priv->pending_list);
190 #endif
191 
192 	pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
193 
194 	return fw_priv;
195 }
196 
197 static struct fw_priv *__lookup_fw_priv(const char *fw_name)
198 {
199 	struct fw_priv *tmp;
200 	struct firmware_cache *fwc = &fw_cache;
201 
202 	list_for_each_entry(tmp, &fwc->head, list)
203 		if (!strcmp(tmp->fw_name, fw_name))
204 			return tmp;
205 	return NULL;
206 }
207 
208 /* Returns 1 for batching firmware requests with the same name */
209 static int alloc_lookup_fw_priv(const char *fw_name,
210 				struct firmware_cache *fwc,
211 				struct fw_priv **fw_priv, void *dbuf,
212 				size_t size, enum fw_opt opt_flags)
213 {
214 	struct fw_priv *tmp;
215 
216 	spin_lock(&fwc->lock);
217 	if (!(opt_flags & FW_OPT_NOCACHE)) {
218 		tmp = __lookup_fw_priv(fw_name);
219 		if (tmp) {
220 			kref_get(&tmp->ref);
221 			spin_unlock(&fwc->lock);
222 			*fw_priv = tmp;
223 			pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
224 			return 1;
225 		}
226 	}
227 
228 	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
229 	if (tmp && !(opt_flags & FW_OPT_NOCACHE))
230 		list_add(&tmp->list, &fwc->head);
231 	spin_unlock(&fwc->lock);
232 
233 	*fw_priv = tmp;
234 
235 	return tmp ? 0 : -ENOMEM;
236 }
237 
238 static void __free_fw_priv(struct kref *ref)
239 	__releases(&fwc->lock)
240 {
241 	struct fw_priv *fw_priv = to_fw_priv(ref);
242 	struct firmware_cache *fwc = fw_priv->fwc;
243 
244 	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
245 		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
246 		 (unsigned int)fw_priv->size);
247 
248 	list_del(&fw_priv->list);
249 	spin_unlock(&fwc->lock);
250 
251 #ifdef CONFIG_FW_LOADER_USER_HELPER
252 	if (fw_priv->is_paged_buf) {
253 		int i;
254 		vunmap(fw_priv->data);
255 		for (i = 0; i < fw_priv->nr_pages; i++)
256 			__free_page(fw_priv->pages[i]);
257 		vfree(fw_priv->pages);
258 	} else
259 #endif
260 	if (!fw_priv->allocated_size)
261 		vfree(fw_priv->data);
262 	kfree_const(fw_priv->fw_name);
263 	kfree(fw_priv);
264 }
265 
266 static void free_fw_priv(struct fw_priv *fw_priv)
267 {
268 	struct firmware_cache *fwc = fw_priv->fwc;
269 	spin_lock(&fwc->lock);
270 	if (!kref_put(&fw_priv->ref, __free_fw_priv))
271 		spin_unlock(&fwc->lock);
272 }
273 
274 /* direct firmware loading support */
275 static char fw_path_para[256];
276 static const char * const fw_path[] = {
277 	fw_path_para,
278 	"/lib/firmware/updates/" UTS_RELEASE,
279 	"/lib/firmware/updates",
280 	"/lib/firmware/" UTS_RELEASE,
281 	"/lib/firmware"
282 };
283 
284 /*
285  * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
286  * from kernel command line because firmware_class is generally built in
287  * kernel instead of module.
288  */
289 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
290 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
291 
292 static int
293 fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
294 {
295 	loff_t size;
296 	int i, len;
297 	int rc = -ENOENT;
298 	char *path;
299 	enum kernel_read_file_id id = READING_FIRMWARE;
300 	size_t msize = INT_MAX;
301 
302 	/* Already populated data member means we're loading into a buffer */
303 	if (fw_priv->data) {
304 		id = READING_FIRMWARE_PREALLOC_BUFFER;
305 		msize = fw_priv->allocated_size;
306 	}
307 
308 	path = __getname();
309 	if (!path)
310 		return -ENOMEM;
311 
312 	for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
313 		/* skip the unset customized path */
314 		if (!fw_path[i][0])
315 			continue;
316 
317 		len = snprintf(path, PATH_MAX, "%s/%s",
318 			       fw_path[i], fw_priv->fw_name);
319 		if (len >= PATH_MAX) {
320 			rc = -ENAMETOOLONG;
321 			break;
322 		}
323 
324 		fw_priv->size = 0;
325 		rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
326 						msize, id);
327 		if (rc) {
328 			if (rc == -ENOENT)
329 				dev_dbg(device, "loading %s failed with error %d\n",
330 					 path, rc);
331 			else
332 				dev_warn(device, "loading %s failed with error %d\n",
333 					 path, rc);
334 			continue;
335 		}
336 		dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name);
337 		fw_priv->size = size;
338 		fw_state_done(fw_priv);
339 		break;
340 	}
341 	__putname(path);
342 
343 	return rc;
344 }
345 
346 /* firmware holds the ownership of pages */
347 static void firmware_free_data(const struct firmware *fw)
348 {
349 	/* Loaded directly? */
350 	if (!fw->priv) {
351 		vfree(fw->data);
352 		return;
353 	}
354 	free_fw_priv(fw->priv);
355 }
356 
357 /* store the pages buffer info firmware from buf */
358 static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
359 {
360 	fw->priv = fw_priv;
361 #ifdef CONFIG_FW_LOADER_USER_HELPER
362 	fw->pages = fw_priv->pages;
363 #endif
364 	fw->size = fw_priv->size;
365 	fw->data = fw_priv->data;
366 
367 	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
368 		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
369 		 (unsigned int)fw_priv->size);
370 }
371 
372 #ifdef CONFIG_PM_SLEEP
373 static void fw_name_devm_release(struct device *dev, void *res)
374 {
375 	struct fw_name_devm *fwn = res;
376 
377 	if (fwn->magic == (unsigned long)&fw_cache)
378 		pr_debug("%s: fw_name-%s devm-%p released\n",
379 				__func__, fwn->name, res);
380 	kfree_const(fwn->name);
381 }
382 
383 static int fw_devm_match(struct device *dev, void *res,
384 		void *match_data)
385 {
386 	struct fw_name_devm *fwn = res;
387 
388 	return (fwn->magic == (unsigned long)&fw_cache) &&
389 		!strcmp(fwn->name, match_data);
390 }
391 
392 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
393 		const char *name)
394 {
395 	struct fw_name_devm *fwn;
396 
397 	fwn = devres_find(dev, fw_name_devm_release,
398 			  fw_devm_match, (void *)name);
399 	return fwn;
400 }
401 
402 static bool fw_cache_is_setup(struct device *dev, const char *name)
403 {
404 	struct fw_name_devm *fwn;
405 
406 	fwn = fw_find_devm_name(dev, name);
407 	if (fwn)
408 		return true;
409 
410 	return false;
411 }
412 
413 /* add firmware name into devres list */
414 static int fw_add_devm_name(struct device *dev, const char *name)
415 {
416 	struct fw_name_devm *fwn;
417 
418 	if (fw_cache_is_setup(dev, name))
419 		return 0;
420 
421 	fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
422 			   GFP_KERNEL);
423 	if (!fwn)
424 		return -ENOMEM;
425 	fwn->name = kstrdup_const(name, GFP_KERNEL);
426 	if (!fwn->name) {
427 		devres_free(fwn);
428 		return -ENOMEM;
429 	}
430 
431 	fwn->magic = (unsigned long)&fw_cache;
432 	devres_add(dev, fwn);
433 
434 	return 0;
435 }
436 #else
437 static bool fw_cache_is_setup(struct device *dev, const char *name)
438 {
439 	return false;
440 }
441 
442 static int fw_add_devm_name(struct device *dev, const char *name)
443 {
444 	return 0;
445 }
446 #endif
447 
448 int assign_fw(struct firmware *fw, struct device *device,
449 	      enum fw_opt opt_flags)
450 {
451 	struct fw_priv *fw_priv = fw->priv;
452 	int ret;
453 
454 	mutex_lock(&fw_lock);
455 	if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
456 		mutex_unlock(&fw_lock);
457 		return -ENOENT;
458 	}
459 
460 	/*
461 	 * add firmware name into devres list so that we can auto cache
462 	 * and uncache firmware for device.
463 	 *
464 	 * device may has been deleted already, but the problem
465 	 * should be fixed in devres or driver core.
466 	 */
467 	/* don't cache firmware handled without uevent */
468 	if (device && (opt_flags & FW_OPT_UEVENT) &&
469 	    !(opt_flags & FW_OPT_NOCACHE)) {
470 		ret = fw_add_devm_name(device, fw_priv->fw_name);
471 		if (ret) {
472 			mutex_unlock(&fw_lock);
473 			return ret;
474 		}
475 	}
476 
477 	/*
478 	 * After caching firmware image is started, let it piggyback
479 	 * on request firmware.
480 	 */
481 	if (!(opt_flags & FW_OPT_NOCACHE) &&
482 	    fw_priv->fwc->state == FW_LOADER_START_CACHE) {
483 		if (fw_cache_piggyback_on_request(fw_priv->fw_name))
484 			kref_get(&fw_priv->ref);
485 	}
486 
487 	/* pass the pages buffer to driver at the last minute */
488 	fw_set_page_data(fw_priv, fw);
489 	mutex_unlock(&fw_lock);
490 	return 0;
491 }
492 
493 /* prepare firmware and firmware_buf structs;
494  * return 0 if a firmware is already assigned, 1 if need to load one,
495  * or a negative error code
496  */
497 static int
498 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
499 			  struct device *device, void *dbuf, size_t size,
500 			  enum fw_opt opt_flags)
501 {
502 	struct firmware *firmware;
503 	struct fw_priv *fw_priv;
504 	int ret;
505 
506 	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
507 	if (!firmware) {
508 		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
509 			__func__);
510 		return -ENOMEM;
511 	}
512 
513 	if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
514 		dev_dbg(device, "using built-in %s\n", name);
515 		return 0; /* assigned */
516 	}
517 
518 	ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
519 				  opt_flags);
520 
521 	/*
522 	 * bind with 'priv' now to avoid warning in failure path
523 	 * of requesting firmware.
524 	 */
525 	firmware->priv = fw_priv;
526 
527 	if (ret > 0) {
528 		ret = fw_state_wait(fw_priv);
529 		if (!ret) {
530 			fw_set_page_data(fw_priv, firmware);
531 			return 0; /* assigned */
532 		}
533 	}
534 
535 	if (ret < 0)
536 		return ret;
537 	return 1; /* need to load */
538 }
539 
540 /*
541  * Batched requests need only one wake, we need to do this step last due to the
542  * fallback mechanism. The buf is protected with kref_get(), and it won't be
543  * released until the last user calls release_firmware().
544  *
545  * Failed batched requests are possible as well, in such cases we just share
546  * the struct fw_priv and won't release it until all requests are woken
547  * and have gone through this same path.
548  */
549 static void fw_abort_batch_reqs(struct firmware *fw)
550 {
551 	struct fw_priv *fw_priv;
552 
553 	/* Loaded directly? */
554 	if (!fw || !fw->priv)
555 		return;
556 
557 	fw_priv = fw->priv;
558 	if (!fw_state_is_aborted(fw_priv))
559 		fw_state_aborted(fw_priv);
560 }
561 
562 /* called from request_firmware() and request_firmware_work_func() */
563 static int
564 _request_firmware(const struct firmware **firmware_p, const char *name,
565 		  struct device *device, void *buf, size_t size,
566 		  enum fw_opt opt_flags)
567 {
568 	struct firmware *fw = NULL;
569 	int ret;
570 
571 	if (!firmware_p)
572 		return -EINVAL;
573 
574 	if (!name || name[0] == '\0') {
575 		ret = -EINVAL;
576 		goto out;
577 	}
578 
579 	ret = _request_firmware_prepare(&fw, name, device, buf, size,
580 					opt_flags);
581 	if (ret <= 0) /* error or already assigned */
582 		goto out;
583 
584 	ret = fw_get_filesystem_firmware(device, fw->priv);
585 	if (ret) {
586 		if (!(opt_flags & FW_OPT_NO_WARN))
587 			dev_warn(device,
588 				 "Direct firmware load for %s failed with error %d\n",
589 				 name, ret);
590 		ret = firmware_fallback_sysfs(fw, name, device, opt_flags, ret);
591 	} else
592 		ret = assign_fw(fw, device, opt_flags);
593 
594  out:
595 	if (ret < 0) {
596 		fw_abort_batch_reqs(fw);
597 		release_firmware(fw);
598 		fw = NULL;
599 	}
600 
601 	*firmware_p = fw;
602 	return ret;
603 }
604 
605 /**
606  * request_firmware() - send firmware request and wait for it
607  * @firmware_p: pointer to firmware image
608  * @name: name of firmware file
609  * @device: device for which firmware is being loaded
610  *
611  *      @firmware_p will be used to return a firmware image by the name
612  *      of @name for device @device.
613  *
614  *      Should be called from user context where sleeping is allowed.
615  *
616  *      @name will be used as $FIRMWARE in the uevent environment and
617  *      should be distinctive enough not to be confused with any other
618  *      firmware image for this or any other device.
619  *
620  *	Caller must hold the reference count of @device.
621  *
622  *	The function can be called safely inside device's suspend and
623  *	resume callback.
624  **/
625 int
626 request_firmware(const struct firmware **firmware_p, const char *name,
627 		 struct device *device)
628 {
629 	int ret;
630 
631 	/* Need to pin this module until return */
632 	__module_get(THIS_MODULE);
633 	ret = _request_firmware(firmware_p, name, device, NULL, 0,
634 				FW_OPT_UEVENT);
635 	module_put(THIS_MODULE);
636 	return ret;
637 }
638 EXPORT_SYMBOL(request_firmware);
639 
640 /**
641  * firmware_request_nowarn() - request for an optional fw module
642  * @firmware: pointer to firmware image
643  * @name: name of firmware file
644  * @device: device for which firmware is being loaded
645  *
646  * This function is similar in behaviour to request_firmware(), except
647  * it doesn't produce warning messages when the file is not found.
648  * The sysfs fallback mechanism is enabled if direct filesystem lookup fails,
649  * however, however failures to find the firmware file with it are still
650  * suppressed. It is therefore up to the driver to check for the return value
651  * of this call and to decide when to inform the users of errors.
652  **/
653 int firmware_request_nowarn(const struct firmware **firmware, const char *name,
654 			    struct device *device)
655 {
656 	int ret;
657 
658 	/* Need to pin this module until return */
659 	__module_get(THIS_MODULE);
660 	ret = _request_firmware(firmware, name, device, NULL, 0,
661 				FW_OPT_UEVENT | FW_OPT_NO_WARN);
662 	module_put(THIS_MODULE);
663 	return ret;
664 }
665 EXPORT_SYMBOL_GPL(firmware_request_nowarn);
666 
667 /**
668  * request_firmware_direct() - load firmware directly without usermode helper
669  * @firmware_p: pointer to firmware image
670  * @name: name of firmware file
671  * @device: device for which firmware is being loaded
672  *
673  * This function works pretty much like request_firmware(), but this doesn't
674  * fall back to usermode helper even if the firmware couldn't be loaded
675  * directly from fs.  Hence it's useful for loading optional firmwares, which
676  * aren't always present, without extra long timeouts of udev.
677  **/
678 int request_firmware_direct(const struct firmware **firmware_p,
679 			    const char *name, struct device *device)
680 {
681 	int ret;
682 
683 	__module_get(THIS_MODULE);
684 	ret = _request_firmware(firmware_p, name, device, NULL, 0,
685 				FW_OPT_UEVENT | FW_OPT_NO_WARN |
686 				FW_OPT_NOFALLBACK);
687 	module_put(THIS_MODULE);
688 	return ret;
689 }
690 EXPORT_SYMBOL_GPL(request_firmware_direct);
691 
692 /**
693  * firmware_request_cache() - cache firmware for suspend so resume can use it
694  * @name: name of firmware file
695  * @device: device for which firmware should be cached for
696  *
697  * There are some devices with an optimization that enables the device to not
698  * require loading firmware on system reboot. This optimization may still
699  * require the firmware present on resume from suspend. This routine can be
700  * used to ensure the firmware is present on resume from suspend in these
701  * situations. This helper is not compatible with drivers which use
702  * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
703  **/
704 int firmware_request_cache(struct device *device, const char *name)
705 {
706 	int ret;
707 
708 	mutex_lock(&fw_lock);
709 	ret = fw_add_devm_name(device, name);
710 	mutex_unlock(&fw_lock);
711 
712 	return ret;
713 }
714 EXPORT_SYMBOL_GPL(firmware_request_cache);
715 
716 /**
717  * request_firmware_into_buf() - load firmware into a previously allocated buffer
718  * @firmware_p: pointer to firmware image
719  * @name: name of firmware file
720  * @device: device for which firmware is being loaded and DMA region allocated
721  * @buf: address of buffer to load firmware into
722  * @size: size of buffer
723  *
724  * This function works pretty much like request_firmware(), but it doesn't
725  * allocate a buffer to hold the firmware data. Instead, the firmware
726  * is loaded directly into the buffer pointed to by @buf and the @firmware_p
727  * data member is pointed at @buf.
728  *
729  * This function doesn't cache firmware either.
730  */
731 int
732 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
733 			  struct device *device, void *buf, size_t size)
734 {
735 	int ret;
736 
737 	if (fw_cache_is_setup(device, name))
738 		return -EOPNOTSUPP;
739 
740 	__module_get(THIS_MODULE);
741 	ret = _request_firmware(firmware_p, name, device, buf, size,
742 				FW_OPT_UEVENT | FW_OPT_NOCACHE);
743 	module_put(THIS_MODULE);
744 	return ret;
745 }
746 EXPORT_SYMBOL(request_firmware_into_buf);
747 
748 /**
749  * release_firmware() - release the resource associated with a firmware image
750  * @fw: firmware resource to release
751  **/
752 void release_firmware(const struct firmware *fw)
753 {
754 	if (fw) {
755 		if (!fw_is_builtin_firmware(fw))
756 			firmware_free_data(fw);
757 		kfree(fw);
758 	}
759 }
760 EXPORT_SYMBOL(release_firmware);
761 
762 /* Async support */
763 struct firmware_work {
764 	struct work_struct work;
765 	struct module *module;
766 	const char *name;
767 	struct device *device;
768 	void *context;
769 	void (*cont)(const struct firmware *fw, void *context);
770 	enum fw_opt opt_flags;
771 };
772 
773 static void request_firmware_work_func(struct work_struct *work)
774 {
775 	struct firmware_work *fw_work;
776 	const struct firmware *fw;
777 
778 	fw_work = container_of(work, struct firmware_work, work);
779 
780 	_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
781 			  fw_work->opt_flags);
782 	fw_work->cont(fw, fw_work->context);
783 	put_device(fw_work->device); /* taken in request_firmware_nowait() */
784 
785 	module_put(fw_work->module);
786 	kfree_const(fw_work->name);
787 	kfree(fw_work);
788 }
789 
790 /**
791  * request_firmware_nowait() - asynchronous version of request_firmware
792  * @module: module requesting the firmware
793  * @uevent: sends uevent to copy the firmware image if this flag
794  *	is non-zero else the firmware copy must be done manually.
795  * @name: name of firmware file
796  * @device: device for which firmware is being loaded
797  * @gfp: allocation flags
798  * @context: will be passed over to @cont, and
799  *	@fw may be %NULL if firmware request fails.
800  * @cont: function will be called asynchronously when the firmware
801  *	request is over.
802  *
803  *	Caller must hold the reference count of @device.
804  *
805  *	Asynchronous variant of request_firmware() for user contexts:
806  *		- sleep for as small periods as possible since it may
807  *		  increase kernel boot time of built-in device drivers
808  *		  requesting firmware in their ->probe() methods, if
809  *		  @gfp is GFP_KERNEL.
810  *
811  *		- can't sleep at all if @gfp is GFP_ATOMIC.
812  **/
813 int
814 request_firmware_nowait(
815 	struct module *module, bool uevent,
816 	const char *name, struct device *device, gfp_t gfp, void *context,
817 	void (*cont)(const struct firmware *fw, void *context))
818 {
819 	struct firmware_work *fw_work;
820 
821 	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
822 	if (!fw_work)
823 		return -ENOMEM;
824 
825 	fw_work->module = module;
826 	fw_work->name = kstrdup_const(name, gfp);
827 	if (!fw_work->name) {
828 		kfree(fw_work);
829 		return -ENOMEM;
830 	}
831 	fw_work->device = device;
832 	fw_work->context = context;
833 	fw_work->cont = cont;
834 	fw_work->opt_flags = FW_OPT_NOWAIT |
835 		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
836 
837 	if (!uevent && fw_cache_is_setup(device, name)) {
838 		kfree_const(fw_work->name);
839 		kfree(fw_work);
840 		return -EOPNOTSUPP;
841 	}
842 
843 	if (!try_module_get(module)) {
844 		kfree_const(fw_work->name);
845 		kfree(fw_work);
846 		return -EFAULT;
847 	}
848 
849 	get_device(fw_work->device);
850 	INIT_WORK(&fw_work->work, request_firmware_work_func);
851 	schedule_work(&fw_work->work);
852 	return 0;
853 }
854 EXPORT_SYMBOL(request_firmware_nowait);
855 
856 #ifdef CONFIG_PM_SLEEP
857 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
858 
859 /**
860  * cache_firmware() - cache one firmware image in kernel memory space
861  * @fw_name: the firmware image name
862  *
863  * Cache firmware in kernel memory so that drivers can use it when
864  * system isn't ready for them to request firmware image from userspace.
865  * Once it returns successfully, driver can use request_firmware or its
866  * nowait version to get the cached firmware without any interacting
867  * with userspace
868  *
869  * Return 0 if the firmware image has been cached successfully
870  * Return !0 otherwise
871  *
872  */
873 static int cache_firmware(const char *fw_name)
874 {
875 	int ret;
876 	const struct firmware *fw;
877 
878 	pr_debug("%s: %s\n", __func__, fw_name);
879 
880 	ret = request_firmware(&fw, fw_name, NULL);
881 	if (!ret)
882 		kfree(fw);
883 
884 	pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
885 
886 	return ret;
887 }
888 
889 static struct fw_priv *lookup_fw_priv(const char *fw_name)
890 {
891 	struct fw_priv *tmp;
892 	struct firmware_cache *fwc = &fw_cache;
893 
894 	spin_lock(&fwc->lock);
895 	tmp = __lookup_fw_priv(fw_name);
896 	spin_unlock(&fwc->lock);
897 
898 	return tmp;
899 }
900 
901 /**
902  * uncache_firmware() - remove one cached firmware image
903  * @fw_name: the firmware image name
904  *
905  * Uncache one firmware image which has been cached successfully
906  * before.
907  *
908  * Return 0 if the firmware cache has been removed successfully
909  * Return !0 otherwise
910  *
911  */
912 static int uncache_firmware(const char *fw_name)
913 {
914 	struct fw_priv *fw_priv;
915 	struct firmware fw;
916 
917 	pr_debug("%s: %s\n", __func__, fw_name);
918 
919 	if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
920 		return 0;
921 
922 	fw_priv = lookup_fw_priv(fw_name);
923 	if (fw_priv) {
924 		free_fw_priv(fw_priv);
925 		return 0;
926 	}
927 
928 	return -EINVAL;
929 }
930 
931 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
932 {
933 	struct fw_cache_entry *fce;
934 
935 	fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
936 	if (!fce)
937 		goto exit;
938 
939 	fce->name = kstrdup_const(name, GFP_ATOMIC);
940 	if (!fce->name) {
941 		kfree(fce);
942 		fce = NULL;
943 		goto exit;
944 	}
945 exit:
946 	return fce;
947 }
948 
949 static int __fw_entry_found(const char *name)
950 {
951 	struct firmware_cache *fwc = &fw_cache;
952 	struct fw_cache_entry *fce;
953 
954 	list_for_each_entry(fce, &fwc->fw_names, list) {
955 		if (!strcmp(fce->name, name))
956 			return 1;
957 	}
958 	return 0;
959 }
960 
961 static int fw_cache_piggyback_on_request(const char *name)
962 {
963 	struct firmware_cache *fwc = &fw_cache;
964 	struct fw_cache_entry *fce;
965 	int ret = 0;
966 
967 	spin_lock(&fwc->name_lock);
968 	if (__fw_entry_found(name))
969 		goto found;
970 
971 	fce = alloc_fw_cache_entry(name);
972 	if (fce) {
973 		ret = 1;
974 		list_add(&fce->list, &fwc->fw_names);
975 		pr_debug("%s: fw: %s\n", __func__, name);
976 	}
977 found:
978 	spin_unlock(&fwc->name_lock);
979 	return ret;
980 }
981 
982 static void free_fw_cache_entry(struct fw_cache_entry *fce)
983 {
984 	kfree_const(fce->name);
985 	kfree(fce);
986 }
987 
988 static void __async_dev_cache_fw_image(void *fw_entry,
989 				       async_cookie_t cookie)
990 {
991 	struct fw_cache_entry *fce = fw_entry;
992 	struct firmware_cache *fwc = &fw_cache;
993 	int ret;
994 
995 	ret = cache_firmware(fce->name);
996 	if (ret) {
997 		spin_lock(&fwc->name_lock);
998 		list_del(&fce->list);
999 		spin_unlock(&fwc->name_lock);
1000 
1001 		free_fw_cache_entry(fce);
1002 	}
1003 }
1004 
1005 /* called with dev->devres_lock held */
1006 static void dev_create_fw_entry(struct device *dev, void *res,
1007 				void *data)
1008 {
1009 	struct fw_name_devm *fwn = res;
1010 	const char *fw_name = fwn->name;
1011 	struct list_head *head = data;
1012 	struct fw_cache_entry *fce;
1013 
1014 	fce = alloc_fw_cache_entry(fw_name);
1015 	if (fce)
1016 		list_add(&fce->list, head);
1017 }
1018 
1019 static int devm_name_match(struct device *dev, void *res,
1020 			   void *match_data)
1021 {
1022 	struct fw_name_devm *fwn = res;
1023 	return (fwn->magic == (unsigned long)match_data);
1024 }
1025 
1026 static void dev_cache_fw_image(struct device *dev, void *data)
1027 {
1028 	LIST_HEAD(todo);
1029 	struct fw_cache_entry *fce;
1030 	struct fw_cache_entry *fce_next;
1031 	struct firmware_cache *fwc = &fw_cache;
1032 
1033 	devres_for_each_res(dev, fw_name_devm_release,
1034 			    devm_name_match, &fw_cache,
1035 			    dev_create_fw_entry, &todo);
1036 
1037 	list_for_each_entry_safe(fce, fce_next, &todo, list) {
1038 		list_del(&fce->list);
1039 
1040 		spin_lock(&fwc->name_lock);
1041 		/* only one cache entry for one firmware */
1042 		if (!__fw_entry_found(fce->name)) {
1043 			list_add(&fce->list, &fwc->fw_names);
1044 		} else {
1045 			free_fw_cache_entry(fce);
1046 			fce = NULL;
1047 		}
1048 		spin_unlock(&fwc->name_lock);
1049 
1050 		if (fce)
1051 			async_schedule_domain(__async_dev_cache_fw_image,
1052 					      (void *)fce,
1053 					      &fw_cache_domain);
1054 	}
1055 }
1056 
1057 static void __device_uncache_fw_images(void)
1058 {
1059 	struct firmware_cache *fwc = &fw_cache;
1060 	struct fw_cache_entry *fce;
1061 
1062 	spin_lock(&fwc->name_lock);
1063 	while (!list_empty(&fwc->fw_names)) {
1064 		fce = list_entry(fwc->fw_names.next,
1065 				struct fw_cache_entry, list);
1066 		list_del(&fce->list);
1067 		spin_unlock(&fwc->name_lock);
1068 
1069 		uncache_firmware(fce->name);
1070 		free_fw_cache_entry(fce);
1071 
1072 		spin_lock(&fwc->name_lock);
1073 	}
1074 	spin_unlock(&fwc->name_lock);
1075 }
1076 
1077 /**
1078  * device_cache_fw_images() - cache devices' firmware
1079  *
1080  * If one device called request_firmware or its nowait version
1081  * successfully before, the firmware names are recored into the
1082  * device's devres link list, so device_cache_fw_images can call
1083  * cache_firmware() to cache these firmwares for the device,
1084  * then the device driver can load its firmwares easily at
1085  * time when system is not ready to complete loading firmware.
1086  */
1087 static void device_cache_fw_images(void)
1088 {
1089 	struct firmware_cache *fwc = &fw_cache;
1090 	DEFINE_WAIT(wait);
1091 
1092 	pr_debug("%s\n", __func__);
1093 
1094 	/* cancel uncache work */
1095 	cancel_delayed_work_sync(&fwc->work);
1096 
1097 	fw_fallback_set_cache_timeout();
1098 
1099 	mutex_lock(&fw_lock);
1100 	fwc->state = FW_LOADER_START_CACHE;
1101 	dpm_for_each_dev(NULL, dev_cache_fw_image);
1102 	mutex_unlock(&fw_lock);
1103 
1104 	/* wait for completion of caching firmware for all devices */
1105 	async_synchronize_full_domain(&fw_cache_domain);
1106 
1107 	fw_fallback_set_default_timeout();
1108 }
1109 
1110 /**
1111  * device_uncache_fw_images() - uncache devices' firmware
1112  *
1113  * uncache all firmwares which have been cached successfully
1114  * by device_uncache_fw_images earlier
1115  */
1116 static void device_uncache_fw_images(void)
1117 {
1118 	pr_debug("%s\n", __func__);
1119 	__device_uncache_fw_images();
1120 }
1121 
1122 static void device_uncache_fw_images_work(struct work_struct *work)
1123 {
1124 	device_uncache_fw_images();
1125 }
1126 
1127 /**
1128  * device_uncache_fw_images_delay() - uncache devices firmwares
1129  * @delay: number of milliseconds to delay uncache device firmwares
1130  *
1131  * uncache all devices's firmwares which has been cached successfully
1132  * by device_cache_fw_images after @delay milliseconds.
1133  */
1134 static void device_uncache_fw_images_delay(unsigned long delay)
1135 {
1136 	queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1137 			   msecs_to_jiffies(delay));
1138 }
1139 
1140 static int fw_pm_notify(struct notifier_block *notify_block,
1141 			unsigned long mode, void *unused)
1142 {
1143 	switch (mode) {
1144 	case PM_HIBERNATION_PREPARE:
1145 	case PM_SUSPEND_PREPARE:
1146 	case PM_RESTORE_PREPARE:
1147 		/*
1148 		 * kill pending fallback requests with a custom fallback
1149 		 * to avoid stalling suspend.
1150 		 */
1151 		kill_pending_fw_fallback_reqs(true);
1152 		device_cache_fw_images();
1153 		break;
1154 
1155 	case PM_POST_SUSPEND:
1156 	case PM_POST_HIBERNATION:
1157 	case PM_POST_RESTORE:
1158 		/*
1159 		 * In case that system sleep failed and syscore_suspend is
1160 		 * not called.
1161 		 */
1162 		mutex_lock(&fw_lock);
1163 		fw_cache.state = FW_LOADER_NO_CACHE;
1164 		mutex_unlock(&fw_lock);
1165 
1166 		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1167 		break;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
1173 /* stop caching firmware once syscore_suspend is reached */
1174 static int fw_suspend(void)
1175 {
1176 	fw_cache.state = FW_LOADER_NO_CACHE;
1177 	return 0;
1178 }
1179 
1180 static struct syscore_ops fw_syscore_ops = {
1181 	.suspend = fw_suspend,
1182 };
1183 
1184 static int __init register_fw_pm_ops(void)
1185 {
1186 	int ret;
1187 
1188 	spin_lock_init(&fw_cache.name_lock);
1189 	INIT_LIST_HEAD(&fw_cache.fw_names);
1190 
1191 	INIT_DELAYED_WORK(&fw_cache.work,
1192 			  device_uncache_fw_images_work);
1193 
1194 	fw_cache.pm_notify.notifier_call = fw_pm_notify;
1195 	ret = register_pm_notifier(&fw_cache.pm_notify);
1196 	if (ret)
1197 		return ret;
1198 
1199 	register_syscore_ops(&fw_syscore_ops);
1200 
1201 	return ret;
1202 }
1203 
1204 static inline void unregister_fw_pm_ops(void)
1205 {
1206 	unregister_syscore_ops(&fw_syscore_ops);
1207 	unregister_pm_notifier(&fw_cache.pm_notify);
1208 }
1209 #else
1210 static int fw_cache_piggyback_on_request(const char *name)
1211 {
1212 	return 0;
1213 }
1214 static inline int register_fw_pm_ops(void)
1215 {
1216 	return 0;
1217 }
1218 static inline void unregister_fw_pm_ops(void)
1219 {
1220 }
1221 #endif
1222 
1223 static void __init fw_cache_init(void)
1224 {
1225 	spin_lock_init(&fw_cache.lock);
1226 	INIT_LIST_HEAD(&fw_cache.head);
1227 	fw_cache.state = FW_LOADER_NO_CACHE;
1228 }
1229 
1230 static int fw_shutdown_notify(struct notifier_block *unused1,
1231 			      unsigned long unused2, void *unused3)
1232 {
1233 	/*
1234 	 * Kill all pending fallback requests to avoid both stalling shutdown,
1235 	 * and avoid a deadlock with the usermode_lock.
1236 	 */
1237 	kill_pending_fw_fallback_reqs(false);
1238 
1239 	return NOTIFY_DONE;
1240 }
1241 
1242 static struct notifier_block fw_shutdown_nb = {
1243 	.notifier_call = fw_shutdown_notify,
1244 };
1245 
1246 static int __init firmware_class_init(void)
1247 {
1248 	int ret;
1249 
1250 	/* No need to unfold these on exit */
1251 	fw_cache_init();
1252 
1253 	ret = register_fw_pm_ops();
1254 	if (ret)
1255 		return ret;
1256 
1257 	ret = register_reboot_notifier(&fw_shutdown_nb);
1258 	if (ret)
1259 		goto out;
1260 
1261 	return register_sysfs_loader();
1262 
1263 out:
1264 	unregister_fw_pm_ops();
1265 	return ret;
1266 }
1267 
1268 static void __exit firmware_class_exit(void)
1269 {
1270 	unregister_fw_pm_ops();
1271 	unregister_reboot_notifier(&fw_shutdown_nb);
1272 	unregister_sysfs_loader();
1273 }
1274 
1275 fs_initcall(firmware_class_init);
1276 module_exit(firmware_class_exit);
1277