1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * main.c - Multi purpose firmware loading support
4  *
5  * Copyright (c) 2003 Manuel Estrada Sainz
6  *
7  * Please see Documentation/driver-api/firmware/ for more information.
8  *
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/capability.h>
14 #include <linux/device.h>
15 #include <linux/kernel_read_file.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/initrd.h>
19 #include <linux/timer.h>
20 #include <linux/vmalloc.h>
21 #include <linux/interrupt.h>
22 #include <linux/bitops.h>
23 #include <linux/mutex.h>
24 #include <linux/workqueue.h>
25 #include <linux/highmem.h>
26 #include <linux/firmware.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/file.h>
30 #include <linux/list.h>
31 #include <linux/fs.h>
32 #include <linux/async.h>
33 #include <linux/pm.h>
34 #include <linux/suspend.h>
35 #include <linux/syscore_ops.h>
36 #include <linux/reboot.h>
37 #include <linux/security.h>
38 #include <linux/xz.h>
39 
40 #include <generated/utsrelease.h>
41 
42 #include "../base.h"
43 #include "firmware.h"
44 #include "fallback.h"
45 
46 MODULE_AUTHOR("Manuel Estrada Sainz");
47 MODULE_DESCRIPTION("Multi purpose firmware loading support");
48 MODULE_LICENSE("GPL");
49 
50 struct firmware_cache {
51 	/* firmware_buf instance will be added into the below list */
52 	spinlock_t lock;
53 	struct list_head head;
54 	int state;
55 
56 #ifdef CONFIG_FW_CACHE
57 	/*
58 	 * Names of firmware images which have been cached successfully
59 	 * will be added into the below list so that device uncache
60 	 * helper can trace which firmware images have been cached
61 	 * before.
62 	 */
63 	spinlock_t name_lock;
64 	struct list_head fw_names;
65 
66 	struct delayed_work work;
67 
68 	struct notifier_block   pm_notify;
69 #endif
70 };
71 
72 struct fw_cache_entry {
73 	struct list_head list;
74 	const char *name;
75 };
76 
77 struct fw_name_devm {
78 	unsigned long magic;
79 	const char *name;
80 };
81 
82 static inline struct fw_priv *to_fw_priv(struct kref *ref)
83 {
84 	return container_of(ref, struct fw_priv, ref);
85 }
86 
87 #define	FW_LOADER_NO_CACHE	0
88 #define	FW_LOADER_START_CACHE	1
89 
90 /* fw_lock could be moved to 'struct fw_sysfs' but since it is just
91  * guarding for corner cases a global lock should be OK */
92 DEFINE_MUTEX(fw_lock);
93 
94 static struct firmware_cache fw_cache;
95 
96 /* Builtin firmware support */
97 
98 #ifdef CONFIG_FW_LOADER
99 
100 extern struct builtin_fw __start_builtin_fw[];
101 extern struct builtin_fw __end_builtin_fw[];
102 
103 static bool fw_copy_to_prealloc_buf(struct firmware *fw,
104 				    void *buf, size_t size)
105 {
106 	if (!buf)
107 		return true;
108 	if (size < fw->size)
109 		return false;
110 	memcpy(buf, fw->data, fw->size);
111 	return true;
112 }
113 
114 static bool firmware_request_builtin(struct firmware *fw, const char *name)
115 {
116 	struct builtin_fw *b_fw;
117 
118 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
119 		if (strcmp(name, b_fw->name) == 0) {
120 			fw->size = b_fw->size;
121 			fw->data = b_fw->data;
122 			return true;
123 		}
124 	}
125 
126 	return false;
127 }
128 
129 static bool firmware_request_builtin_buf(struct firmware *fw, const char *name,
130 					 void *buf, size_t size)
131 {
132 	if (!firmware_request_builtin(fw, name))
133 		return false;
134 	return fw_copy_to_prealloc_buf(fw, buf, size);
135 }
136 
137 static bool fw_is_builtin_firmware(const struct firmware *fw)
138 {
139 	struct builtin_fw *b_fw;
140 
141 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
142 		if (fw->data == b_fw->data)
143 			return true;
144 
145 	return false;
146 }
147 
148 #else /* Module case - no builtin firmware support */
149 
150 static inline bool firmware_request_builtin(struct firmware *fw,
151 					    const char *name)
152 {
153 	return false;
154 }
155 
156 static inline bool firmware_request_builtin_buf(struct firmware *fw,
157 						const char *name, void *buf,
158 						size_t size)
159 {
160 	return false;
161 }
162 
163 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
164 {
165 	return false;
166 }
167 #endif
168 
169 static void fw_state_init(struct fw_priv *fw_priv)
170 {
171 	struct fw_state *fw_st = &fw_priv->fw_st;
172 
173 	init_completion(&fw_st->completion);
174 	fw_st->status = FW_STATUS_UNKNOWN;
175 }
176 
177 static inline int fw_state_wait(struct fw_priv *fw_priv)
178 {
179 	return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
180 }
181 
182 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
183 
184 static struct fw_priv *__allocate_fw_priv(const char *fw_name,
185 					  struct firmware_cache *fwc,
186 					  void *dbuf,
187 					  size_t size,
188 					  size_t offset,
189 					  u32 opt_flags)
190 {
191 	struct fw_priv *fw_priv;
192 
193 	/* For a partial read, the buffer must be preallocated. */
194 	if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
195 		return NULL;
196 
197 	/* Only partial reads are allowed to use an offset. */
198 	if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
199 		return NULL;
200 
201 	fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
202 	if (!fw_priv)
203 		return NULL;
204 
205 	fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
206 	if (!fw_priv->fw_name) {
207 		kfree(fw_priv);
208 		return NULL;
209 	}
210 
211 	kref_init(&fw_priv->ref);
212 	fw_priv->fwc = fwc;
213 	fw_priv->data = dbuf;
214 	fw_priv->allocated_size = size;
215 	fw_priv->offset = offset;
216 	fw_priv->opt_flags = opt_flags;
217 	fw_state_init(fw_priv);
218 #ifdef CONFIG_FW_LOADER_USER_HELPER
219 	INIT_LIST_HEAD(&fw_priv->pending_list);
220 #endif
221 
222 	pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
223 
224 	return fw_priv;
225 }
226 
227 static struct fw_priv *__lookup_fw_priv(const char *fw_name)
228 {
229 	struct fw_priv *tmp;
230 	struct firmware_cache *fwc = &fw_cache;
231 
232 	list_for_each_entry(tmp, &fwc->head, list)
233 		if (!strcmp(tmp->fw_name, fw_name))
234 			return tmp;
235 	return NULL;
236 }
237 
238 /* Returns 1 for batching firmware requests with the same name */
239 static int alloc_lookup_fw_priv(const char *fw_name,
240 				struct firmware_cache *fwc,
241 				struct fw_priv **fw_priv,
242 				void *dbuf,
243 				size_t size,
244 				size_t offset,
245 				u32 opt_flags)
246 {
247 	struct fw_priv *tmp;
248 
249 	spin_lock(&fwc->lock);
250 	/*
251 	 * Do not merge requests that are marked to be non-cached or
252 	 * are performing partial reads.
253 	 */
254 	if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
255 		tmp = __lookup_fw_priv(fw_name);
256 		if (tmp) {
257 			kref_get(&tmp->ref);
258 			spin_unlock(&fwc->lock);
259 			*fw_priv = tmp;
260 			pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
261 			return 1;
262 		}
263 	}
264 
265 	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
266 	if (tmp) {
267 		INIT_LIST_HEAD(&tmp->list);
268 		if (!(opt_flags & FW_OPT_NOCACHE))
269 			list_add(&tmp->list, &fwc->head);
270 	}
271 	spin_unlock(&fwc->lock);
272 
273 	*fw_priv = tmp;
274 
275 	return tmp ? 0 : -ENOMEM;
276 }
277 
278 static void __free_fw_priv(struct kref *ref)
279 	__releases(&fwc->lock)
280 {
281 	struct fw_priv *fw_priv = to_fw_priv(ref);
282 	struct firmware_cache *fwc = fw_priv->fwc;
283 
284 	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
285 		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
286 		 (unsigned int)fw_priv->size);
287 
288 	list_del(&fw_priv->list);
289 	spin_unlock(&fwc->lock);
290 
291 	if (fw_is_paged_buf(fw_priv))
292 		fw_free_paged_buf(fw_priv);
293 	else if (!fw_priv->allocated_size)
294 		vfree(fw_priv->data);
295 
296 	kfree_const(fw_priv->fw_name);
297 	kfree(fw_priv);
298 }
299 
300 static void free_fw_priv(struct fw_priv *fw_priv)
301 {
302 	struct firmware_cache *fwc = fw_priv->fwc;
303 	spin_lock(&fwc->lock);
304 	if (!kref_put(&fw_priv->ref, __free_fw_priv))
305 		spin_unlock(&fwc->lock);
306 }
307 
308 #ifdef CONFIG_FW_LOADER_PAGED_BUF
309 bool fw_is_paged_buf(struct fw_priv *fw_priv)
310 {
311 	return fw_priv->is_paged_buf;
312 }
313 
314 void fw_free_paged_buf(struct fw_priv *fw_priv)
315 {
316 	int i;
317 
318 	if (!fw_priv->pages)
319 		return;
320 
321 	vunmap(fw_priv->data);
322 
323 	for (i = 0; i < fw_priv->nr_pages; i++)
324 		__free_page(fw_priv->pages[i]);
325 	kvfree(fw_priv->pages);
326 	fw_priv->pages = NULL;
327 	fw_priv->page_array_size = 0;
328 	fw_priv->nr_pages = 0;
329 }
330 
331 int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
332 {
333 	/* If the array of pages is too small, grow it */
334 	if (fw_priv->page_array_size < pages_needed) {
335 		int new_array_size = max(pages_needed,
336 					 fw_priv->page_array_size * 2);
337 		struct page **new_pages;
338 
339 		new_pages = kvmalloc_array(new_array_size, sizeof(void *),
340 					   GFP_KERNEL);
341 		if (!new_pages)
342 			return -ENOMEM;
343 		memcpy(new_pages, fw_priv->pages,
344 		       fw_priv->page_array_size * sizeof(void *));
345 		memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
346 		       (new_array_size - fw_priv->page_array_size));
347 		kvfree(fw_priv->pages);
348 		fw_priv->pages = new_pages;
349 		fw_priv->page_array_size = new_array_size;
350 	}
351 
352 	while (fw_priv->nr_pages < pages_needed) {
353 		fw_priv->pages[fw_priv->nr_pages] =
354 			alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
355 
356 		if (!fw_priv->pages[fw_priv->nr_pages])
357 			return -ENOMEM;
358 		fw_priv->nr_pages++;
359 	}
360 
361 	return 0;
362 }
363 
364 int fw_map_paged_buf(struct fw_priv *fw_priv)
365 {
366 	/* one pages buffer should be mapped/unmapped only once */
367 	if (!fw_priv->pages)
368 		return 0;
369 
370 	vunmap(fw_priv->data);
371 	fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
372 			     PAGE_KERNEL_RO);
373 	if (!fw_priv->data)
374 		return -ENOMEM;
375 
376 	return 0;
377 }
378 #endif
379 
380 /*
381  * XZ-compressed firmware support
382  */
383 #ifdef CONFIG_FW_LOADER_COMPRESS
384 /* show an error and return the standard error code */
385 static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
386 {
387 	if (xz_ret != XZ_STREAM_END) {
388 		dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
389 		return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
390 	}
391 	return 0;
392 }
393 
394 /* single-shot decompression onto the pre-allocated buffer */
395 static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
396 				   size_t in_size, const void *in_buffer)
397 {
398 	struct xz_dec *xz_dec;
399 	struct xz_buf xz_buf;
400 	enum xz_ret xz_ret;
401 
402 	xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
403 	if (!xz_dec)
404 		return -ENOMEM;
405 
406 	xz_buf.in_size = in_size;
407 	xz_buf.in = in_buffer;
408 	xz_buf.in_pos = 0;
409 	xz_buf.out_size = fw_priv->allocated_size;
410 	xz_buf.out = fw_priv->data;
411 	xz_buf.out_pos = 0;
412 
413 	xz_ret = xz_dec_run(xz_dec, &xz_buf);
414 	xz_dec_end(xz_dec);
415 
416 	fw_priv->size = xz_buf.out_pos;
417 	return fw_decompress_xz_error(dev, xz_ret);
418 }
419 
420 /* decompression on paged buffer and map it */
421 static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
422 				  size_t in_size, const void *in_buffer)
423 {
424 	struct xz_dec *xz_dec;
425 	struct xz_buf xz_buf;
426 	enum xz_ret xz_ret;
427 	struct page *page;
428 	int err = 0;
429 
430 	xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
431 	if (!xz_dec)
432 		return -ENOMEM;
433 
434 	xz_buf.in_size = in_size;
435 	xz_buf.in = in_buffer;
436 	xz_buf.in_pos = 0;
437 
438 	fw_priv->is_paged_buf = true;
439 	fw_priv->size = 0;
440 	do {
441 		if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
442 			err = -ENOMEM;
443 			goto out;
444 		}
445 
446 		/* decompress onto the new allocated page */
447 		page = fw_priv->pages[fw_priv->nr_pages - 1];
448 		xz_buf.out = kmap(page);
449 		xz_buf.out_pos = 0;
450 		xz_buf.out_size = PAGE_SIZE;
451 		xz_ret = xz_dec_run(xz_dec, &xz_buf);
452 		kunmap(page);
453 		fw_priv->size += xz_buf.out_pos;
454 		/* partial decompression means either end or error */
455 		if (xz_buf.out_pos != PAGE_SIZE)
456 			break;
457 	} while (xz_ret == XZ_OK);
458 
459 	err = fw_decompress_xz_error(dev, xz_ret);
460 	if (!err)
461 		err = fw_map_paged_buf(fw_priv);
462 
463  out:
464 	xz_dec_end(xz_dec);
465 	return err;
466 }
467 
468 static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
469 			    size_t in_size, const void *in_buffer)
470 {
471 	/* if the buffer is pre-allocated, we can perform in single-shot mode */
472 	if (fw_priv->data)
473 		return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
474 	else
475 		return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
476 }
477 #endif /* CONFIG_FW_LOADER_COMPRESS */
478 
479 /* direct firmware loading support */
480 static char fw_path_para[256];
481 static const char * const fw_path[] = {
482 	fw_path_para,
483 	"/lib/firmware/updates/" UTS_RELEASE,
484 	"/lib/firmware/updates",
485 	"/lib/firmware/" UTS_RELEASE,
486 	"/lib/firmware"
487 };
488 
489 /*
490  * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
491  * from kernel command line because firmware_class is generally built in
492  * kernel instead of module.
493  */
494 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
495 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
496 
497 static int
498 fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
499 			   const char *suffix,
500 			   int (*decompress)(struct device *dev,
501 					     struct fw_priv *fw_priv,
502 					     size_t in_size,
503 					     const void *in_buffer))
504 {
505 	size_t size;
506 	int i, len;
507 	int rc = -ENOENT;
508 	char *path;
509 	size_t msize = INT_MAX;
510 	void *buffer = NULL;
511 
512 	/* Already populated data member means we're loading into a buffer */
513 	if (!decompress && fw_priv->data) {
514 		buffer = fw_priv->data;
515 		msize = fw_priv->allocated_size;
516 	}
517 
518 	path = __getname();
519 	if (!path)
520 		return -ENOMEM;
521 
522 	wait_for_initramfs();
523 	for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
524 		size_t file_size = 0;
525 		size_t *file_size_ptr = NULL;
526 
527 		/* skip the unset customized path */
528 		if (!fw_path[i][0])
529 			continue;
530 
531 		len = snprintf(path, PATH_MAX, "%s/%s%s",
532 			       fw_path[i], fw_priv->fw_name, suffix);
533 		if (len >= PATH_MAX) {
534 			rc = -ENAMETOOLONG;
535 			break;
536 		}
537 
538 		fw_priv->size = 0;
539 
540 		/*
541 		 * The total file size is only examined when doing a partial
542 		 * read; the "full read" case needs to fail if the whole
543 		 * firmware was not completely loaded.
544 		 */
545 		if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
546 			file_size_ptr = &file_size;
547 
548 		/* load firmware files from the mount namespace of init */
549 		rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
550 						       &buffer, msize,
551 						       file_size_ptr,
552 						       READING_FIRMWARE);
553 		if (rc < 0) {
554 			if (rc != -ENOENT)
555 				dev_warn(device, "loading %s failed with error %d\n",
556 					 path, rc);
557 			else
558 				dev_dbg(device, "loading %s failed for no such file or directory.\n",
559 					 path);
560 			continue;
561 		}
562 		size = rc;
563 		rc = 0;
564 
565 		dev_dbg(device, "Loading firmware from %s\n", path);
566 		if (decompress) {
567 			dev_dbg(device, "f/w decompressing %s\n",
568 				fw_priv->fw_name);
569 			rc = decompress(device, fw_priv, size, buffer);
570 			/* discard the superfluous original content */
571 			vfree(buffer);
572 			buffer = NULL;
573 			if (rc) {
574 				fw_free_paged_buf(fw_priv);
575 				continue;
576 			}
577 		} else {
578 			dev_dbg(device, "direct-loading %s\n",
579 				fw_priv->fw_name);
580 			if (!fw_priv->data)
581 				fw_priv->data = buffer;
582 			fw_priv->size = size;
583 		}
584 		fw_state_done(fw_priv);
585 		break;
586 	}
587 	__putname(path);
588 
589 	return rc;
590 }
591 
592 /* firmware holds the ownership of pages */
593 static void firmware_free_data(const struct firmware *fw)
594 {
595 	/* Loaded directly? */
596 	if (!fw->priv) {
597 		vfree(fw->data);
598 		return;
599 	}
600 	free_fw_priv(fw->priv);
601 }
602 
603 /* store the pages buffer info firmware from buf */
604 static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
605 {
606 	fw->priv = fw_priv;
607 	fw->size = fw_priv->size;
608 	fw->data = fw_priv->data;
609 
610 	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
611 		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
612 		 (unsigned int)fw_priv->size);
613 }
614 
615 #ifdef CONFIG_FW_CACHE
616 static void fw_name_devm_release(struct device *dev, void *res)
617 {
618 	struct fw_name_devm *fwn = res;
619 
620 	if (fwn->magic == (unsigned long)&fw_cache)
621 		pr_debug("%s: fw_name-%s devm-%p released\n",
622 				__func__, fwn->name, res);
623 	kfree_const(fwn->name);
624 }
625 
626 static int fw_devm_match(struct device *dev, void *res,
627 		void *match_data)
628 {
629 	struct fw_name_devm *fwn = res;
630 
631 	return (fwn->magic == (unsigned long)&fw_cache) &&
632 		!strcmp(fwn->name, match_data);
633 }
634 
635 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
636 		const char *name)
637 {
638 	struct fw_name_devm *fwn;
639 
640 	fwn = devres_find(dev, fw_name_devm_release,
641 			  fw_devm_match, (void *)name);
642 	return fwn;
643 }
644 
645 static bool fw_cache_is_setup(struct device *dev, const char *name)
646 {
647 	struct fw_name_devm *fwn;
648 
649 	fwn = fw_find_devm_name(dev, name);
650 	if (fwn)
651 		return true;
652 
653 	return false;
654 }
655 
656 /* add firmware name into devres list */
657 static int fw_add_devm_name(struct device *dev, const char *name)
658 {
659 	struct fw_name_devm *fwn;
660 
661 	if (fw_cache_is_setup(dev, name))
662 		return 0;
663 
664 	fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
665 			   GFP_KERNEL);
666 	if (!fwn)
667 		return -ENOMEM;
668 	fwn->name = kstrdup_const(name, GFP_KERNEL);
669 	if (!fwn->name) {
670 		devres_free(fwn);
671 		return -ENOMEM;
672 	}
673 
674 	fwn->magic = (unsigned long)&fw_cache;
675 	devres_add(dev, fwn);
676 
677 	return 0;
678 }
679 #else
680 static bool fw_cache_is_setup(struct device *dev, const char *name)
681 {
682 	return false;
683 }
684 
685 static int fw_add_devm_name(struct device *dev, const char *name)
686 {
687 	return 0;
688 }
689 #endif
690 
691 int assign_fw(struct firmware *fw, struct device *device)
692 {
693 	struct fw_priv *fw_priv = fw->priv;
694 	int ret;
695 
696 	mutex_lock(&fw_lock);
697 	if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
698 		mutex_unlock(&fw_lock);
699 		return -ENOENT;
700 	}
701 
702 	/*
703 	 * add firmware name into devres list so that we can auto cache
704 	 * and uncache firmware for device.
705 	 *
706 	 * device may has been deleted already, but the problem
707 	 * should be fixed in devres or driver core.
708 	 */
709 	/* don't cache firmware handled without uevent */
710 	if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
711 	    !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
712 		ret = fw_add_devm_name(device, fw_priv->fw_name);
713 		if (ret) {
714 			mutex_unlock(&fw_lock);
715 			return ret;
716 		}
717 	}
718 
719 	/*
720 	 * After caching firmware image is started, let it piggyback
721 	 * on request firmware.
722 	 */
723 	if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
724 	    fw_priv->fwc->state == FW_LOADER_START_CACHE)
725 		fw_cache_piggyback_on_request(fw_priv);
726 
727 	/* pass the pages buffer to driver at the last minute */
728 	fw_set_page_data(fw_priv, fw);
729 	mutex_unlock(&fw_lock);
730 	return 0;
731 }
732 
733 /* prepare firmware and firmware_buf structs;
734  * return 0 if a firmware is already assigned, 1 if need to load one,
735  * or a negative error code
736  */
737 static int
738 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
739 			  struct device *device, void *dbuf, size_t size,
740 			  size_t offset, u32 opt_flags)
741 {
742 	struct firmware *firmware;
743 	struct fw_priv *fw_priv;
744 	int ret;
745 
746 	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
747 	if (!firmware) {
748 		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
749 			__func__);
750 		return -ENOMEM;
751 	}
752 
753 	if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
754 		dev_dbg(device, "using built-in %s\n", name);
755 		return 0; /* assigned */
756 	}
757 
758 	ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
759 				   offset, opt_flags);
760 
761 	/*
762 	 * bind with 'priv' now to avoid warning in failure path
763 	 * of requesting firmware.
764 	 */
765 	firmware->priv = fw_priv;
766 
767 	if (ret > 0) {
768 		ret = fw_state_wait(fw_priv);
769 		if (!ret) {
770 			fw_set_page_data(fw_priv, firmware);
771 			return 0; /* assigned */
772 		}
773 	}
774 
775 	if (ret < 0)
776 		return ret;
777 	return 1; /* need to load */
778 }
779 
780 /*
781  * Batched requests need only one wake, we need to do this step last due to the
782  * fallback mechanism. The buf is protected with kref_get(), and it won't be
783  * released until the last user calls release_firmware().
784  *
785  * Failed batched requests are possible as well, in such cases we just share
786  * the struct fw_priv and won't release it until all requests are woken
787  * and have gone through this same path.
788  */
789 static void fw_abort_batch_reqs(struct firmware *fw)
790 {
791 	struct fw_priv *fw_priv;
792 
793 	/* Loaded directly? */
794 	if (!fw || !fw->priv)
795 		return;
796 
797 	fw_priv = fw->priv;
798 	mutex_lock(&fw_lock);
799 	if (!fw_state_is_aborted(fw_priv))
800 		fw_state_aborted(fw_priv);
801 	mutex_unlock(&fw_lock);
802 }
803 
804 /* called from request_firmware() and request_firmware_work_func() */
805 static int
806 _request_firmware(const struct firmware **firmware_p, const char *name,
807 		  struct device *device, void *buf, size_t size,
808 		  size_t offset, u32 opt_flags)
809 {
810 	struct firmware *fw = NULL;
811 	bool nondirect = false;
812 	int ret;
813 
814 	if (!firmware_p)
815 		return -EINVAL;
816 
817 	if (!name || name[0] == '\0') {
818 		ret = -EINVAL;
819 		goto out;
820 	}
821 
822 	ret = _request_firmware_prepare(&fw, name, device, buf, size,
823 					offset, opt_flags);
824 	if (ret <= 0) /* error or already assigned */
825 		goto out;
826 
827 	ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
828 
829 	/* Only full reads can support decompression, platform, and sysfs. */
830 	if (!(opt_flags & FW_OPT_PARTIAL))
831 		nondirect = true;
832 
833 #ifdef CONFIG_FW_LOADER_COMPRESS
834 	if (ret == -ENOENT && nondirect)
835 		ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
836 						 fw_decompress_xz);
837 #endif
838 	if (ret == -ENOENT && nondirect)
839 		ret = firmware_fallback_platform(fw->priv);
840 
841 	if (ret) {
842 		if (!(opt_flags & FW_OPT_NO_WARN))
843 			dev_warn(device,
844 				 "Direct firmware load for %s failed with error %d\n",
845 				 name, ret);
846 		if (nondirect)
847 			ret = firmware_fallback_sysfs(fw, name, device,
848 						      opt_flags, ret);
849 	} else
850 		ret = assign_fw(fw, device);
851 
852  out:
853 	if (ret < 0) {
854 		fw_abort_batch_reqs(fw);
855 		release_firmware(fw);
856 		fw = NULL;
857 	}
858 
859 	*firmware_p = fw;
860 	return ret;
861 }
862 
863 /**
864  * request_firmware() - send firmware request and wait for it
865  * @firmware_p: pointer to firmware image
866  * @name: name of firmware file
867  * @device: device for which firmware is being loaded
868  *
869  *      @firmware_p will be used to return a firmware image by the name
870  *      of @name for device @device.
871  *
872  *      Should be called from user context where sleeping is allowed.
873  *
874  *      @name will be used as $FIRMWARE in the uevent environment and
875  *      should be distinctive enough not to be confused with any other
876  *      firmware image for this or any other device.
877  *
878  *	Caller must hold the reference count of @device.
879  *
880  *	The function can be called safely inside device's suspend and
881  *	resume callback.
882  **/
883 int
884 request_firmware(const struct firmware **firmware_p, const char *name,
885 		 struct device *device)
886 {
887 	int ret;
888 
889 	/* Need to pin this module until return */
890 	__module_get(THIS_MODULE);
891 	ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
892 				FW_OPT_UEVENT);
893 	module_put(THIS_MODULE);
894 	return ret;
895 }
896 EXPORT_SYMBOL(request_firmware);
897 
898 /**
899  * firmware_request_nowarn() - request for an optional fw module
900  * @firmware: pointer to firmware image
901  * @name: name of firmware file
902  * @device: device for which firmware is being loaded
903  *
904  * This function is similar in behaviour to request_firmware(), except it
905  * doesn't produce warning messages when the file is not found. The sysfs
906  * fallback mechanism is enabled if direct filesystem lookup fails. However,
907  * failures to find the firmware file with it are still suppressed. It is
908  * therefore up to the driver to check for the return value of this call and to
909  * decide when to inform the users of errors.
910  **/
911 int firmware_request_nowarn(const struct firmware **firmware, const char *name,
912 			    struct device *device)
913 {
914 	int ret;
915 
916 	/* Need to pin this module until return */
917 	__module_get(THIS_MODULE);
918 	ret = _request_firmware(firmware, name, device, NULL, 0, 0,
919 				FW_OPT_UEVENT | FW_OPT_NO_WARN);
920 	module_put(THIS_MODULE);
921 	return ret;
922 }
923 EXPORT_SYMBOL_GPL(firmware_request_nowarn);
924 
925 /**
926  * request_firmware_direct() - load firmware directly without usermode helper
927  * @firmware_p: pointer to firmware image
928  * @name: name of firmware file
929  * @device: device for which firmware is being loaded
930  *
931  * This function works pretty much like request_firmware(), but this doesn't
932  * fall back to usermode helper even if the firmware couldn't be loaded
933  * directly from fs.  Hence it's useful for loading optional firmwares, which
934  * aren't always present, without extra long timeouts of udev.
935  **/
936 int request_firmware_direct(const struct firmware **firmware_p,
937 			    const char *name, struct device *device)
938 {
939 	int ret;
940 
941 	__module_get(THIS_MODULE);
942 	ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
943 				FW_OPT_UEVENT | FW_OPT_NO_WARN |
944 				FW_OPT_NOFALLBACK_SYSFS);
945 	module_put(THIS_MODULE);
946 	return ret;
947 }
948 EXPORT_SYMBOL_GPL(request_firmware_direct);
949 
950 /**
951  * firmware_request_platform() - request firmware with platform-fw fallback
952  * @firmware: pointer to firmware image
953  * @name: name of firmware file
954  * @device: device for which firmware is being loaded
955  *
956  * This function is similar in behaviour to request_firmware, except that if
957  * direct filesystem lookup fails, it will fallback to looking for a copy of the
958  * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
959  **/
960 int firmware_request_platform(const struct firmware **firmware,
961 			      const char *name, struct device *device)
962 {
963 	int ret;
964 
965 	/* Need to pin this module until return */
966 	__module_get(THIS_MODULE);
967 	ret = _request_firmware(firmware, name, device, NULL, 0, 0,
968 				FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
969 	module_put(THIS_MODULE);
970 	return ret;
971 }
972 EXPORT_SYMBOL_GPL(firmware_request_platform);
973 
974 /**
975  * firmware_request_cache() - cache firmware for suspend so resume can use it
976  * @name: name of firmware file
977  * @device: device for which firmware should be cached for
978  *
979  * There are some devices with an optimization that enables the device to not
980  * require loading firmware on system reboot. This optimization may still
981  * require the firmware present on resume from suspend. This routine can be
982  * used to ensure the firmware is present on resume from suspend in these
983  * situations. This helper is not compatible with drivers which use
984  * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
985  **/
986 int firmware_request_cache(struct device *device, const char *name)
987 {
988 	int ret;
989 
990 	mutex_lock(&fw_lock);
991 	ret = fw_add_devm_name(device, name);
992 	mutex_unlock(&fw_lock);
993 
994 	return ret;
995 }
996 EXPORT_SYMBOL_GPL(firmware_request_cache);
997 
998 /**
999  * request_firmware_into_buf() - load firmware into a previously allocated buffer
1000  * @firmware_p: pointer to firmware image
1001  * @name: name of firmware file
1002  * @device: device for which firmware is being loaded and DMA region allocated
1003  * @buf: address of buffer to load firmware into
1004  * @size: size of buffer
1005  *
1006  * This function works pretty much like request_firmware(), but it doesn't
1007  * allocate a buffer to hold the firmware data. Instead, the firmware
1008  * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1009  * data member is pointed at @buf.
1010  *
1011  * This function doesn't cache firmware either.
1012  */
1013 int
1014 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1015 			  struct device *device, void *buf, size_t size)
1016 {
1017 	int ret;
1018 
1019 	if (fw_cache_is_setup(device, name))
1020 		return -EOPNOTSUPP;
1021 
1022 	__module_get(THIS_MODULE);
1023 	ret = _request_firmware(firmware_p, name, device, buf, size, 0,
1024 				FW_OPT_UEVENT | FW_OPT_NOCACHE);
1025 	module_put(THIS_MODULE);
1026 	return ret;
1027 }
1028 EXPORT_SYMBOL(request_firmware_into_buf);
1029 
1030 /**
1031  * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
1032  * @firmware_p: pointer to firmware image
1033  * @name: name of firmware file
1034  * @device: device for which firmware is being loaded and DMA region allocated
1035  * @buf: address of buffer to load firmware into
1036  * @size: size of buffer
1037  * @offset: offset into file to read
1038  *
1039  * This function works pretty much like request_firmware_into_buf except
1040  * it allows a partial read of the file.
1041  */
1042 int
1043 request_partial_firmware_into_buf(const struct firmware **firmware_p,
1044 				  const char *name, struct device *device,
1045 				  void *buf, size_t size, size_t offset)
1046 {
1047 	int ret;
1048 
1049 	if (fw_cache_is_setup(device, name))
1050 		return -EOPNOTSUPP;
1051 
1052 	__module_get(THIS_MODULE);
1053 	ret = _request_firmware(firmware_p, name, device, buf, size, offset,
1054 				FW_OPT_UEVENT | FW_OPT_NOCACHE |
1055 				FW_OPT_PARTIAL);
1056 	module_put(THIS_MODULE);
1057 	return ret;
1058 }
1059 EXPORT_SYMBOL(request_partial_firmware_into_buf);
1060 
1061 /**
1062  * release_firmware() - release the resource associated with a firmware image
1063  * @fw: firmware resource to release
1064  **/
1065 void release_firmware(const struct firmware *fw)
1066 {
1067 	if (fw) {
1068 		if (!fw_is_builtin_firmware(fw))
1069 			firmware_free_data(fw);
1070 		kfree(fw);
1071 	}
1072 }
1073 EXPORT_SYMBOL(release_firmware);
1074 
1075 /* Async support */
1076 struct firmware_work {
1077 	struct work_struct work;
1078 	struct module *module;
1079 	const char *name;
1080 	struct device *device;
1081 	void *context;
1082 	void (*cont)(const struct firmware *fw, void *context);
1083 	u32 opt_flags;
1084 };
1085 
1086 static void request_firmware_work_func(struct work_struct *work)
1087 {
1088 	struct firmware_work *fw_work;
1089 	const struct firmware *fw;
1090 
1091 	fw_work = container_of(work, struct firmware_work, work);
1092 
1093 	_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
1094 			  fw_work->opt_flags);
1095 	fw_work->cont(fw, fw_work->context);
1096 	put_device(fw_work->device); /* taken in request_firmware_nowait() */
1097 
1098 	module_put(fw_work->module);
1099 	kfree_const(fw_work->name);
1100 	kfree(fw_work);
1101 }
1102 
1103 /**
1104  * request_firmware_nowait() - asynchronous version of request_firmware
1105  * @module: module requesting the firmware
1106  * @uevent: sends uevent to copy the firmware image if this flag
1107  *	is non-zero else the firmware copy must be done manually.
1108  * @name: name of firmware file
1109  * @device: device for which firmware is being loaded
1110  * @gfp: allocation flags
1111  * @context: will be passed over to @cont, and
1112  *	@fw may be %NULL if firmware request fails.
1113  * @cont: function will be called asynchronously when the firmware
1114  *	request is over.
1115  *
1116  *	Caller must hold the reference count of @device.
1117  *
1118  *	Asynchronous variant of request_firmware() for user contexts:
1119  *		- sleep for as small periods as possible since it may
1120  *		  increase kernel boot time of built-in device drivers
1121  *		  requesting firmware in their ->probe() methods, if
1122  *		  @gfp is GFP_KERNEL.
1123  *
1124  *		- can't sleep at all if @gfp is GFP_ATOMIC.
1125  **/
1126 int
1127 request_firmware_nowait(
1128 	struct module *module, bool uevent,
1129 	const char *name, struct device *device, gfp_t gfp, void *context,
1130 	void (*cont)(const struct firmware *fw, void *context))
1131 {
1132 	struct firmware_work *fw_work;
1133 
1134 	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1135 	if (!fw_work)
1136 		return -ENOMEM;
1137 
1138 	fw_work->module = module;
1139 	fw_work->name = kstrdup_const(name, gfp);
1140 	if (!fw_work->name) {
1141 		kfree(fw_work);
1142 		return -ENOMEM;
1143 	}
1144 	fw_work->device = device;
1145 	fw_work->context = context;
1146 	fw_work->cont = cont;
1147 	fw_work->opt_flags = FW_OPT_NOWAIT |
1148 		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1149 
1150 	if (!uevent && fw_cache_is_setup(device, name)) {
1151 		kfree_const(fw_work->name);
1152 		kfree(fw_work);
1153 		return -EOPNOTSUPP;
1154 	}
1155 
1156 	if (!try_module_get(module)) {
1157 		kfree_const(fw_work->name);
1158 		kfree(fw_work);
1159 		return -EFAULT;
1160 	}
1161 
1162 	get_device(fw_work->device);
1163 	INIT_WORK(&fw_work->work, request_firmware_work_func);
1164 	schedule_work(&fw_work->work);
1165 	return 0;
1166 }
1167 EXPORT_SYMBOL(request_firmware_nowait);
1168 
1169 #ifdef CONFIG_FW_CACHE
1170 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1171 
1172 /**
1173  * cache_firmware() - cache one firmware image in kernel memory space
1174  * @fw_name: the firmware image name
1175  *
1176  * Cache firmware in kernel memory so that drivers can use it when
1177  * system isn't ready for them to request firmware image from userspace.
1178  * Once it returns successfully, driver can use request_firmware or its
1179  * nowait version to get the cached firmware without any interacting
1180  * with userspace
1181  *
1182  * Return 0 if the firmware image has been cached successfully
1183  * Return !0 otherwise
1184  *
1185  */
1186 static int cache_firmware(const char *fw_name)
1187 {
1188 	int ret;
1189 	const struct firmware *fw;
1190 
1191 	pr_debug("%s: %s\n", __func__, fw_name);
1192 
1193 	ret = request_firmware(&fw, fw_name, NULL);
1194 	if (!ret)
1195 		kfree(fw);
1196 
1197 	pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1198 
1199 	return ret;
1200 }
1201 
1202 static struct fw_priv *lookup_fw_priv(const char *fw_name)
1203 {
1204 	struct fw_priv *tmp;
1205 	struct firmware_cache *fwc = &fw_cache;
1206 
1207 	spin_lock(&fwc->lock);
1208 	tmp = __lookup_fw_priv(fw_name);
1209 	spin_unlock(&fwc->lock);
1210 
1211 	return tmp;
1212 }
1213 
1214 /**
1215  * uncache_firmware() - remove one cached firmware image
1216  * @fw_name: the firmware image name
1217  *
1218  * Uncache one firmware image which has been cached successfully
1219  * before.
1220  *
1221  * Return 0 if the firmware cache has been removed successfully
1222  * Return !0 otherwise
1223  *
1224  */
1225 static int uncache_firmware(const char *fw_name)
1226 {
1227 	struct fw_priv *fw_priv;
1228 	struct firmware fw;
1229 
1230 	pr_debug("%s: %s\n", __func__, fw_name);
1231 
1232 	if (firmware_request_builtin(&fw, fw_name))
1233 		return 0;
1234 
1235 	fw_priv = lookup_fw_priv(fw_name);
1236 	if (fw_priv) {
1237 		free_fw_priv(fw_priv);
1238 		return 0;
1239 	}
1240 
1241 	return -EINVAL;
1242 }
1243 
1244 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1245 {
1246 	struct fw_cache_entry *fce;
1247 
1248 	fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1249 	if (!fce)
1250 		goto exit;
1251 
1252 	fce->name = kstrdup_const(name, GFP_ATOMIC);
1253 	if (!fce->name) {
1254 		kfree(fce);
1255 		fce = NULL;
1256 		goto exit;
1257 	}
1258 exit:
1259 	return fce;
1260 }
1261 
1262 static int __fw_entry_found(const char *name)
1263 {
1264 	struct firmware_cache *fwc = &fw_cache;
1265 	struct fw_cache_entry *fce;
1266 
1267 	list_for_each_entry(fce, &fwc->fw_names, list) {
1268 		if (!strcmp(fce->name, name))
1269 			return 1;
1270 	}
1271 	return 0;
1272 }
1273 
1274 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1275 {
1276 	const char *name = fw_priv->fw_name;
1277 	struct firmware_cache *fwc = fw_priv->fwc;
1278 	struct fw_cache_entry *fce;
1279 
1280 	spin_lock(&fwc->name_lock);
1281 	if (__fw_entry_found(name))
1282 		goto found;
1283 
1284 	fce = alloc_fw_cache_entry(name);
1285 	if (fce) {
1286 		list_add(&fce->list, &fwc->fw_names);
1287 		kref_get(&fw_priv->ref);
1288 		pr_debug("%s: fw: %s\n", __func__, name);
1289 	}
1290 found:
1291 	spin_unlock(&fwc->name_lock);
1292 }
1293 
1294 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1295 {
1296 	kfree_const(fce->name);
1297 	kfree(fce);
1298 }
1299 
1300 static void __async_dev_cache_fw_image(void *fw_entry,
1301 				       async_cookie_t cookie)
1302 {
1303 	struct fw_cache_entry *fce = fw_entry;
1304 	struct firmware_cache *fwc = &fw_cache;
1305 	int ret;
1306 
1307 	ret = cache_firmware(fce->name);
1308 	if (ret) {
1309 		spin_lock(&fwc->name_lock);
1310 		list_del(&fce->list);
1311 		spin_unlock(&fwc->name_lock);
1312 
1313 		free_fw_cache_entry(fce);
1314 	}
1315 }
1316 
1317 /* called with dev->devres_lock held */
1318 static void dev_create_fw_entry(struct device *dev, void *res,
1319 				void *data)
1320 {
1321 	struct fw_name_devm *fwn = res;
1322 	const char *fw_name = fwn->name;
1323 	struct list_head *head = data;
1324 	struct fw_cache_entry *fce;
1325 
1326 	fce = alloc_fw_cache_entry(fw_name);
1327 	if (fce)
1328 		list_add(&fce->list, head);
1329 }
1330 
1331 static int devm_name_match(struct device *dev, void *res,
1332 			   void *match_data)
1333 {
1334 	struct fw_name_devm *fwn = res;
1335 	return (fwn->magic == (unsigned long)match_data);
1336 }
1337 
1338 static void dev_cache_fw_image(struct device *dev, void *data)
1339 {
1340 	LIST_HEAD(todo);
1341 	struct fw_cache_entry *fce;
1342 	struct fw_cache_entry *fce_next;
1343 	struct firmware_cache *fwc = &fw_cache;
1344 
1345 	devres_for_each_res(dev, fw_name_devm_release,
1346 			    devm_name_match, &fw_cache,
1347 			    dev_create_fw_entry, &todo);
1348 
1349 	list_for_each_entry_safe(fce, fce_next, &todo, list) {
1350 		list_del(&fce->list);
1351 
1352 		spin_lock(&fwc->name_lock);
1353 		/* only one cache entry for one firmware */
1354 		if (!__fw_entry_found(fce->name)) {
1355 			list_add(&fce->list, &fwc->fw_names);
1356 		} else {
1357 			free_fw_cache_entry(fce);
1358 			fce = NULL;
1359 		}
1360 		spin_unlock(&fwc->name_lock);
1361 
1362 		if (fce)
1363 			async_schedule_domain(__async_dev_cache_fw_image,
1364 					      (void *)fce,
1365 					      &fw_cache_domain);
1366 	}
1367 }
1368 
1369 static void __device_uncache_fw_images(void)
1370 {
1371 	struct firmware_cache *fwc = &fw_cache;
1372 	struct fw_cache_entry *fce;
1373 
1374 	spin_lock(&fwc->name_lock);
1375 	while (!list_empty(&fwc->fw_names)) {
1376 		fce = list_entry(fwc->fw_names.next,
1377 				struct fw_cache_entry, list);
1378 		list_del(&fce->list);
1379 		spin_unlock(&fwc->name_lock);
1380 
1381 		uncache_firmware(fce->name);
1382 		free_fw_cache_entry(fce);
1383 
1384 		spin_lock(&fwc->name_lock);
1385 	}
1386 	spin_unlock(&fwc->name_lock);
1387 }
1388 
1389 /**
1390  * device_cache_fw_images() - cache devices' firmware
1391  *
1392  * If one device called request_firmware or its nowait version
1393  * successfully before, the firmware names are recored into the
1394  * device's devres link list, so device_cache_fw_images can call
1395  * cache_firmware() to cache these firmwares for the device,
1396  * then the device driver can load its firmwares easily at
1397  * time when system is not ready to complete loading firmware.
1398  */
1399 static void device_cache_fw_images(void)
1400 {
1401 	struct firmware_cache *fwc = &fw_cache;
1402 	DEFINE_WAIT(wait);
1403 
1404 	pr_debug("%s\n", __func__);
1405 
1406 	/* cancel uncache work */
1407 	cancel_delayed_work_sync(&fwc->work);
1408 
1409 	fw_fallback_set_cache_timeout();
1410 
1411 	mutex_lock(&fw_lock);
1412 	fwc->state = FW_LOADER_START_CACHE;
1413 	dpm_for_each_dev(NULL, dev_cache_fw_image);
1414 	mutex_unlock(&fw_lock);
1415 
1416 	/* wait for completion of caching firmware for all devices */
1417 	async_synchronize_full_domain(&fw_cache_domain);
1418 
1419 	fw_fallback_set_default_timeout();
1420 }
1421 
1422 /**
1423  * device_uncache_fw_images() - uncache devices' firmware
1424  *
1425  * uncache all firmwares which have been cached successfully
1426  * by device_uncache_fw_images earlier
1427  */
1428 static void device_uncache_fw_images(void)
1429 {
1430 	pr_debug("%s\n", __func__);
1431 	__device_uncache_fw_images();
1432 }
1433 
1434 static void device_uncache_fw_images_work(struct work_struct *work)
1435 {
1436 	device_uncache_fw_images();
1437 }
1438 
1439 /**
1440  * device_uncache_fw_images_delay() - uncache devices firmwares
1441  * @delay: number of milliseconds to delay uncache device firmwares
1442  *
1443  * uncache all devices's firmwares which has been cached successfully
1444  * by device_cache_fw_images after @delay milliseconds.
1445  */
1446 static void device_uncache_fw_images_delay(unsigned long delay)
1447 {
1448 	queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1449 			   msecs_to_jiffies(delay));
1450 }
1451 
1452 static int fw_pm_notify(struct notifier_block *notify_block,
1453 			unsigned long mode, void *unused)
1454 {
1455 	switch (mode) {
1456 	case PM_HIBERNATION_PREPARE:
1457 	case PM_SUSPEND_PREPARE:
1458 	case PM_RESTORE_PREPARE:
1459 		/*
1460 		 * kill pending fallback requests with a custom fallback
1461 		 * to avoid stalling suspend.
1462 		 */
1463 		kill_pending_fw_fallback_reqs(true);
1464 		device_cache_fw_images();
1465 		break;
1466 
1467 	case PM_POST_SUSPEND:
1468 	case PM_POST_HIBERNATION:
1469 	case PM_POST_RESTORE:
1470 		/*
1471 		 * In case that system sleep failed and syscore_suspend is
1472 		 * not called.
1473 		 */
1474 		mutex_lock(&fw_lock);
1475 		fw_cache.state = FW_LOADER_NO_CACHE;
1476 		mutex_unlock(&fw_lock);
1477 
1478 		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1479 		break;
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 /* stop caching firmware once syscore_suspend is reached */
1486 static int fw_suspend(void)
1487 {
1488 	fw_cache.state = FW_LOADER_NO_CACHE;
1489 	return 0;
1490 }
1491 
1492 static struct syscore_ops fw_syscore_ops = {
1493 	.suspend = fw_suspend,
1494 };
1495 
1496 static int __init register_fw_pm_ops(void)
1497 {
1498 	int ret;
1499 
1500 	spin_lock_init(&fw_cache.name_lock);
1501 	INIT_LIST_HEAD(&fw_cache.fw_names);
1502 
1503 	INIT_DELAYED_WORK(&fw_cache.work,
1504 			  device_uncache_fw_images_work);
1505 
1506 	fw_cache.pm_notify.notifier_call = fw_pm_notify;
1507 	ret = register_pm_notifier(&fw_cache.pm_notify);
1508 	if (ret)
1509 		return ret;
1510 
1511 	register_syscore_ops(&fw_syscore_ops);
1512 
1513 	return ret;
1514 }
1515 
1516 static inline void unregister_fw_pm_ops(void)
1517 {
1518 	unregister_syscore_ops(&fw_syscore_ops);
1519 	unregister_pm_notifier(&fw_cache.pm_notify);
1520 }
1521 #else
1522 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1523 {
1524 }
1525 static inline int register_fw_pm_ops(void)
1526 {
1527 	return 0;
1528 }
1529 static inline void unregister_fw_pm_ops(void)
1530 {
1531 }
1532 #endif
1533 
1534 static void __init fw_cache_init(void)
1535 {
1536 	spin_lock_init(&fw_cache.lock);
1537 	INIT_LIST_HEAD(&fw_cache.head);
1538 	fw_cache.state = FW_LOADER_NO_CACHE;
1539 }
1540 
1541 static int fw_shutdown_notify(struct notifier_block *unused1,
1542 			      unsigned long unused2, void *unused3)
1543 {
1544 	/*
1545 	 * Kill all pending fallback requests to avoid both stalling shutdown,
1546 	 * and avoid a deadlock with the usermode_lock.
1547 	 */
1548 	kill_pending_fw_fallback_reqs(false);
1549 
1550 	return NOTIFY_DONE;
1551 }
1552 
1553 static struct notifier_block fw_shutdown_nb = {
1554 	.notifier_call = fw_shutdown_notify,
1555 };
1556 
1557 static int __init firmware_class_init(void)
1558 {
1559 	int ret;
1560 
1561 	/* No need to unfold these on exit */
1562 	fw_cache_init();
1563 
1564 	ret = register_fw_pm_ops();
1565 	if (ret)
1566 		return ret;
1567 
1568 	ret = register_reboot_notifier(&fw_shutdown_nb);
1569 	if (ret)
1570 		goto out;
1571 
1572 	return register_sysfs_loader();
1573 
1574 out:
1575 	unregister_fw_pm_ops();
1576 	return ret;
1577 }
1578 
1579 static void __exit firmware_class_exit(void)
1580 {
1581 	unregister_fw_pm_ops();
1582 	unregister_reboot_notifier(&fw_shutdown_nb);
1583 	unregister_sysfs_loader();
1584 }
1585 
1586 fs_initcall(firmware_class_init);
1587 module_exit(firmware_class_exit);
1588