xref: /openbmc/linux/lib/test_firmware.c (revision f9a82c48)
1 /*
2  * This module provides an interface to trigger and test firmware loading.
3  *
4  * It is designed to be used for basic evaluation of the firmware loading
5  * subsystem (for example when validating firmware verification). It lacks
6  * any extra dependencies, and will not normally be loaded by the system
7  * unless explicitly requested by name.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/printk.h>
15 #include <linux/completion.h>
16 #include <linux/firmware.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/miscdevice.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/vmalloc.h>
25 
26 #define TEST_FIRMWARE_NAME	"test-firmware.bin"
27 #define TEST_FIRMWARE_NUM_REQS	4
28 
29 static DEFINE_MUTEX(test_fw_mutex);
30 static const struct firmware *test_firmware;
31 
32 struct test_batched_req {
33 	u8 idx;
34 	int rc;
35 	bool sent;
36 	const struct firmware *fw;
37 	const char *name;
38 	struct completion completion;
39 	struct task_struct *task;
40 	struct device *dev;
41 };
42 
43 /**
44  * test_config - represents configuration for the test for different triggers
45  *
46  * @name: the name of the firmware file to look for
47  * @sync_direct: when the sync trigger is used if this is true
48  *	request_firmware_direct() will be used instead.
49  * @send_uevent: whether or not to send a uevent for async requests
50  * @num_requests: number of requests to try per test case. This is trigger
51  *	specific.
52  * @reqs: stores all requests information
53  * @read_fw_idx: index of thread from which we want to read firmware results
54  *	from through the read_fw trigger.
55  * @test_result: a test may use this to collect the result from the call
56  *	of the request_firmware*() calls used in their tests. In order of
57  *	priority we always keep first any setup error. If no setup errors were
58  *	found then we move on to the first error encountered while running the
59  *	API. Note that for async calls this typically will be a successful
60  *	result (0) unless of course you've used bogus parameters, or the system
61  *	is out of memory.  In the async case the callback is expected to do a
62  *	bit more homework to figure out what happened, unfortunately the only
63  *	information passed today on error is the fact that no firmware was
64  *	found so we can only assume -ENOENT on async calls if the firmware is
65  *	NULL.
66  *
67  *	Errors you can expect:
68  *
69  *	API specific:
70  *
71  *	0:		success for sync, for async it means request was sent
72  *	-EINVAL:	invalid parameters or request
73  *	-ENOENT:	files not found
74  *
75  *	System environment:
76  *
77  *	-ENOMEM:	memory pressure on system
78  *	-ENODEV:	out of number of devices to test
79  *	-EINVAL:	an unexpected error has occurred
80  * @req_firmware: if @sync_direct is true this is set to
81  *	request_firmware_direct(), otherwise request_firmware()
82  */
83 struct test_config {
84 	char *name;
85 	bool sync_direct;
86 	bool send_uevent;
87 	u8 num_requests;
88 	u8 read_fw_idx;
89 
90 	/*
91 	 * These below don't belong her but we'll move them once we create
92 	 * a struct fw_test_device and stuff the misc_dev under there later.
93 	 */
94 	struct test_batched_req *reqs;
95 	int test_result;
96 	int (*req_firmware)(const struct firmware **fw, const char *name,
97 			    struct device *device);
98 };
99 
100 static struct test_config *test_fw_config;
101 
102 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
103 				 size_t size, loff_t *offset)
104 {
105 	ssize_t rc = 0;
106 
107 	mutex_lock(&test_fw_mutex);
108 	if (test_firmware)
109 		rc = simple_read_from_buffer(buf, size, offset,
110 					     test_firmware->data,
111 					     test_firmware->size);
112 	mutex_unlock(&test_fw_mutex);
113 	return rc;
114 }
115 
116 static const struct file_operations test_fw_fops = {
117 	.owner          = THIS_MODULE,
118 	.read           = test_fw_misc_read,
119 };
120 
121 static void __test_release_all_firmware(void)
122 {
123 	struct test_batched_req *req;
124 	u8 i;
125 
126 	if (!test_fw_config->reqs)
127 		return;
128 
129 	for (i = 0; i < test_fw_config->num_requests; i++) {
130 		req = &test_fw_config->reqs[i];
131 		if (req->fw)
132 			release_firmware(req->fw);
133 	}
134 
135 	vfree(test_fw_config->reqs);
136 	test_fw_config->reqs = NULL;
137 }
138 
139 static void test_release_all_firmware(void)
140 {
141 	mutex_lock(&test_fw_mutex);
142 	__test_release_all_firmware();
143 	mutex_unlock(&test_fw_mutex);
144 }
145 
146 
147 static void __test_firmware_config_free(void)
148 {
149 	__test_release_all_firmware();
150 	kfree_const(test_fw_config->name);
151 	test_fw_config->name = NULL;
152 }
153 
154 /*
155  * XXX: move to kstrncpy() once merged.
156  *
157  * Users should use kfree_const() when freeing these.
158  */
159 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
160 {
161 	*dst = kstrndup(name, count, gfp);
162 	if (!*dst)
163 		return -ENOSPC;
164 	return count;
165 }
166 
167 static int __test_firmware_config_init(void)
168 {
169 	int ret;
170 
171 	ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
172 			 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
173 	if (ret < 0)
174 		goto out;
175 
176 	test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
177 	test_fw_config->send_uevent = true;
178 	test_fw_config->sync_direct = false;
179 	test_fw_config->req_firmware = request_firmware;
180 	test_fw_config->test_result = 0;
181 	test_fw_config->reqs = NULL;
182 
183 	return 0;
184 
185 out:
186 	__test_firmware_config_free();
187 	return ret;
188 }
189 
190 static ssize_t reset_store(struct device *dev,
191 			   struct device_attribute *attr,
192 			   const char *buf, size_t count)
193 {
194 	int ret;
195 
196 	mutex_lock(&test_fw_mutex);
197 
198 	__test_firmware_config_free();
199 
200 	ret = __test_firmware_config_init();
201 	if (ret < 0) {
202 		ret = -ENOMEM;
203 		pr_err("could not alloc settings for config trigger: %d\n",
204 		       ret);
205 		goto out;
206 	}
207 
208 	pr_info("reset\n");
209 	ret = count;
210 
211 out:
212 	mutex_unlock(&test_fw_mutex);
213 
214 	return ret;
215 }
216 static DEVICE_ATTR_WO(reset);
217 
218 static ssize_t config_show(struct device *dev,
219 			   struct device_attribute *attr,
220 			   char *buf)
221 {
222 	int len = 0;
223 
224 	mutex_lock(&test_fw_mutex);
225 
226 	len += snprintf(buf, PAGE_SIZE,
227 			"Custom trigger configuration for: %s\n",
228 			dev_name(dev));
229 
230 	if (test_fw_config->name)
231 		len += snprintf(buf+len, PAGE_SIZE,
232 				"name:\t%s\n",
233 				test_fw_config->name);
234 	else
235 		len += snprintf(buf+len, PAGE_SIZE,
236 				"name:\tEMTPY\n");
237 
238 	len += snprintf(buf+len, PAGE_SIZE,
239 			"num_requests:\t%u\n", test_fw_config->num_requests);
240 
241 	len += snprintf(buf+len, PAGE_SIZE,
242 			"send_uevent:\t\t%s\n",
243 			test_fw_config->send_uevent ?
244 			"FW_ACTION_HOTPLUG" :
245 			"FW_ACTION_NOHOTPLUG");
246 	len += snprintf(buf+len, PAGE_SIZE,
247 			"sync_direct:\t\t%s\n",
248 			test_fw_config->sync_direct ? "true" : "false");
249 	len += snprintf(buf+len, PAGE_SIZE,
250 			"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
251 
252 	mutex_unlock(&test_fw_mutex);
253 
254 	return len;
255 }
256 static DEVICE_ATTR_RO(config);
257 
258 static ssize_t config_name_store(struct device *dev,
259 				 struct device_attribute *attr,
260 				 const char *buf, size_t count)
261 {
262 	int ret;
263 
264 	mutex_lock(&test_fw_mutex);
265 	kfree_const(test_fw_config->name);
266 	ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
267 	mutex_unlock(&test_fw_mutex);
268 
269 	return ret;
270 }
271 
272 /*
273  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
274  */
275 static ssize_t config_test_show_str(char *dst,
276 				    char *src)
277 {
278 	int len;
279 
280 	mutex_lock(&test_fw_mutex);
281 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
282 	mutex_unlock(&test_fw_mutex);
283 
284 	return len;
285 }
286 
287 static int test_dev_config_update_bool(const char *buf, size_t size,
288 				       bool *cfg)
289 {
290 	int ret;
291 
292 	mutex_lock(&test_fw_mutex);
293 	if (strtobool(buf, cfg) < 0)
294 		ret = -EINVAL;
295 	else
296 		ret = size;
297 	mutex_unlock(&test_fw_mutex);
298 
299 	return ret;
300 }
301 
302 static ssize_t
303 test_dev_config_show_bool(char *buf,
304 			  bool config)
305 {
306 	bool val;
307 
308 	mutex_lock(&test_fw_mutex);
309 	val = config;
310 	mutex_unlock(&test_fw_mutex);
311 
312 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
313 }
314 
315 static ssize_t test_dev_config_show_int(char *buf, int cfg)
316 {
317 	int val;
318 
319 	mutex_lock(&test_fw_mutex);
320 	val = cfg;
321 	mutex_unlock(&test_fw_mutex);
322 
323 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
324 }
325 
326 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
327 {
328 	int ret;
329 	long new;
330 
331 	ret = kstrtol(buf, 10, &new);
332 	if (ret)
333 		return ret;
334 
335 	if (new > U8_MAX)
336 		return -EINVAL;
337 
338 	mutex_lock(&test_fw_mutex);
339 	*(u8 *)cfg = new;
340 	mutex_unlock(&test_fw_mutex);
341 
342 	/* Always return full write size even if we didn't consume all */
343 	return size;
344 }
345 
346 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
347 {
348 	u8 val;
349 
350 	mutex_lock(&test_fw_mutex);
351 	val = cfg;
352 	mutex_unlock(&test_fw_mutex);
353 
354 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
355 }
356 
357 static ssize_t config_name_show(struct device *dev,
358 				struct device_attribute *attr,
359 				char *buf)
360 {
361 	return config_test_show_str(buf, test_fw_config->name);
362 }
363 static DEVICE_ATTR_RW(config_name);
364 
365 static ssize_t config_num_requests_store(struct device *dev,
366 					 struct device_attribute *attr,
367 					 const char *buf, size_t count)
368 {
369 	int rc;
370 
371 	mutex_lock(&test_fw_mutex);
372 	if (test_fw_config->reqs) {
373 		pr_err("Must call release_all_firmware prior to changing config\n");
374 		rc = -EINVAL;
375 		mutex_unlock(&test_fw_mutex);
376 		goto out;
377 	}
378 	mutex_unlock(&test_fw_mutex);
379 
380 	rc = test_dev_config_update_u8(buf, count,
381 				       &test_fw_config->num_requests);
382 
383 out:
384 	return rc;
385 }
386 
387 static ssize_t config_num_requests_show(struct device *dev,
388 					struct device_attribute *attr,
389 					char *buf)
390 {
391 	return test_dev_config_show_u8(buf, test_fw_config->num_requests);
392 }
393 static DEVICE_ATTR_RW(config_num_requests);
394 
395 static ssize_t config_sync_direct_store(struct device *dev,
396 					struct device_attribute *attr,
397 					const char *buf, size_t count)
398 {
399 	int rc = test_dev_config_update_bool(buf, count,
400 					     &test_fw_config->sync_direct);
401 
402 	if (rc == count)
403 		test_fw_config->req_firmware = test_fw_config->sync_direct ?
404 				       request_firmware_direct :
405 				       request_firmware;
406 	return rc;
407 }
408 
409 static ssize_t config_sync_direct_show(struct device *dev,
410 				       struct device_attribute *attr,
411 				       char *buf)
412 {
413 	return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
414 }
415 static DEVICE_ATTR_RW(config_sync_direct);
416 
417 static ssize_t config_send_uevent_store(struct device *dev,
418 					struct device_attribute *attr,
419 					const char *buf, size_t count)
420 {
421 	return test_dev_config_update_bool(buf, count,
422 					   &test_fw_config->send_uevent);
423 }
424 
425 static ssize_t config_send_uevent_show(struct device *dev,
426 				       struct device_attribute *attr,
427 				       char *buf)
428 {
429 	return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
430 }
431 static DEVICE_ATTR_RW(config_send_uevent);
432 
433 static ssize_t config_read_fw_idx_store(struct device *dev,
434 					struct device_attribute *attr,
435 					const char *buf, size_t count)
436 {
437 	return test_dev_config_update_u8(buf, count,
438 					 &test_fw_config->read_fw_idx);
439 }
440 
441 static ssize_t config_read_fw_idx_show(struct device *dev,
442 				       struct device_attribute *attr,
443 				       char *buf)
444 {
445 	return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
446 }
447 static DEVICE_ATTR_RW(config_read_fw_idx);
448 
449 
450 static ssize_t trigger_request_store(struct device *dev,
451 				     struct device_attribute *attr,
452 				     const char *buf, size_t count)
453 {
454 	int rc;
455 	char *name;
456 
457 	name = kstrndup(buf, count, GFP_KERNEL);
458 	if (!name)
459 		return -ENOSPC;
460 
461 	pr_info("loading '%s'\n", name);
462 
463 	mutex_lock(&test_fw_mutex);
464 	release_firmware(test_firmware);
465 	test_firmware = NULL;
466 	rc = request_firmware(&test_firmware, name, dev);
467 	if (rc) {
468 		pr_info("load of '%s' failed: %d\n", name, rc);
469 		goto out;
470 	}
471 	pr_info("loaded: %zu\n", test_firmware->size);
472 	rc = count;
473 
474 out:
475 	mutex_unlock(&test_fw_mutex);
476 
477 	kfree(name);
478 
479 	return rc;
480 }
481 static DEVICE_ATTR_WO(trigger_request);
482 
483 static DECLARE_COMPLETION(async_fw_done);
484 
485 static void trigger_async_request_cb(const struct firmware *fw, void *context)
486 {
487 	test_firmware = fw;
488 	complete(&async_fw_done);
489 }
490 
491 static ssize_t trigger_async_request_store(struct device *dev,
492 					   struct device_attribute *attr,
493 					   const char *buf, size_t count)
494 {
495 	int rc;
496 	char *name;
497 
498 	name = kstrndup(buf, count, GFP_KERNEL);
499 	if (!name)
500 		return -ENOSPC;
501 
502 	pr_info("loading '%s'\n", name);
503 
504 	mutex_lock(&test_fw_mutex);
505 	release_firmware(test_firmware);
506 	test_firmware = NULL;
507 	rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
508 				     NULL, trigger_async_request_cb);
509 	if (rc) {
510 		pr_info("async load of '%s' failed: %d\n", name, rc);
511 		kfree(name);
512 		goto out;
513 	}
514 	/* Free 'name' ASAP, to test for race conditions */
515 	kfree(name);
516 
517 	wait_for_completion(&async_fw_done);
518 
519 	if (test_firmware) {
520 		pr_info("loaded: %zu\n", test_firmware->size);
521 		rc = count;
522 	} else {
523 		pr_err("failed to async load firmware\n");
524 		rc = -ENODEV;
525 	}
526 
527 out:
528 	mutex_unlock(&test_fw_mutex);
529 
530 	return rc;
531 }
532 static DEVICE_ATTR_WO(trigger_async_request);
533 
534 static ssize_t trigger_custom_fallback_store(struct device *dev,
535 					     struct device_attribute *attr,
536 					     const char *buf, size_t count)
537 {
538 	int rc;
539 	char *name;
540 
541 	name = kstrndup(buf, count, GFP_KERNEL);
542 	if (!name)
543 		return -ENOSPC;
544 
545 	pr_info("loading '%s' using custom fallback mechanism\n", name);
546 
547 	mutex_lock(&test_fw_mutex);
548 	release_firmware(test_firmware);
549 	test_firmware = NULL;
550 	rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
551 				     dev, GFP_KERNEL, NULL,
552 				     trigger_async_request_cb);
553 	if (rc) {
554 		pr_info("async load of '%s' failed: %d\n", name, rc);
555 		kfree(name);
556 		goto out;
557 	}
558 	/* Free 'name' ASAP, to test for race conditions */
559 	kfree(name);
560 
561 	wait_for_completion(&async_fw_done);
562 
563 	if (test_firmware) {
564 		pr_info("loaded: %zu\n", test_firmware->size);
565 		rc = count;
566 	} else {
567 		pr_err("failed to async load firmware\n");
568 		rc = -ENODEV;
569 	}
570 
571 out:
572 	mutex_unlock(&test_fw_mutex);
573 
574 	return rc;
575 }
576 static DEVICE_ATTR_WO(trigger_custom_fallback);
577 
578 static int test_fw_run_batch_request(void *data)
579 {
580 	struct test_batched_req *req = data;
581 
582 	if (!req) {
583 		test_fw_config->test_result = -EINVAL;
584 		return -EINVAL;
585 	}
586 
587 	req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
588 	if (req->rc) {
589 		pr_info("#%u: batched sync load failed: %d\n",
590 			req->idx, req->rc);
591 		if (!test_fw_config->test_result)
592 			test_fw_config->test_result = req->rc;
593 	} else if (req->fw) {
594 		req->sent = true;
595 		pr_info("#%u: batched sync loaded %zu\n",
596 			req->idx, req->fw->size);
597 	}
598 	complete(&req->completion);
599 
600 	req->task = NULL;
601 
602 	return 0;
603 }
604 
605 /*
606  * We use a kthread as otherwise the kernel serializes all our sync requests
607  * and we would not be able to mimic batched requests on a sync call. Batched
608  * requests on a sync call can for instance happen on a device driver when
609  * multiple cards are used and firmware loading happens outside of probe.
610  */
611 static ssize_t trigger_batched_requests_store(struct device *dev,
612 					      struct device_attribute *attr,
613 					      const char *buf, size_t count)
614 {
615 	struct test_batched_req *req;
616 	int rc;
617 	u8 i;
618 
619 	mutex_lock(&test_fw_mutex);
620 
621 	test_fw_config->reqs =
622 		vzalloc(array3_size(sizeof(struct test_batched_req),
623 				    test_fw_config->num_requests, 2));
624 	if (!test_fw_config->reqs) {
625 		rc = -ENOMEM;
626 		goto out_unlock;
627 	}
628 
629 	pr_info("batched sync firmware loading '%s' %u times\n",
630 		test_fw_config->name, test_fw_config->num_requests);
631 
632 	for (i = 0; i < test_fw_config->num_requests; i++) {
633 		req = &test_fw_config->reqs[i];
634 		req->fw = NULL;
635 		req->idx = i;
636 		req->name = test_fw_config->name;
637 		req->dev = dev;
638 		init_completion(&req->completion);
639 		req->task = kthread_run(test_fw_run_batch_request, req,
640 					     "%s-%u", KBUILD_MODNAME, req->idx);
641 		if (!req->task || IS_ERR(req->task)) {
642 			pr_err("Setting up thread %u failed\n", req->idx);
643 			req->task = NULL;
644 			rc = -ENOMEM;
645 			goto out_bail;
646 		}
647 	}
648 
649 	rc = count;
650 
651 	/*
652 	 * We require an explicit release to enable more time and delay of
653 	 * calling release_firmware() to improve our chances of forcing a
654 	 * batched request. If we instead called release_firmware() right away
655 	 * then we might miss on an opportunity of having a successful firmware
656 	 * request pass on the opportunity to be come a batched request.
657 	 */
658 
659 out_bail:
660 	for (i = 0; i < test_fw_config->num_requests; i++) {
661 		req = &test_fw_config->reqs[i];
662 		if (req->task || req->sent)
663 			wait_for_completion(&req->completion);
664 	}
665 
666 	/* Override any worker error if we had a general setup error */
667 	if (rc < 0)
668 		test_fw_config->test_result = rc;
669 
670 out_unlock:
671 	mutex_unlock(&test_fw_mutex);
672 
673 	return rc;
674 }
675 static DEVICE_ATTR_WO(trigger_batched_requests);
676 
677 /*
678  * We wait for each callback to return with the lock held, no need to lock here
679  */
680 static void trigger_batched_cb(const struct firmware *fw, void *context)
681 {
682 	struct test_batched_req *req = context;
683 
684 	if (!req) {
685 		test_fw_config->test_result = -EINVAL;
686 		return;
687 	}
688 
689 	/* forces *some* batched requests to queue up */
690 	if (!req->idx)
691 		ssleep(2);
692 
693 	req->fw = fw;
694 
695 	/*
696 	 * Unfortunately the firmware API gives us nothing other than a null FW
697 	 * if the firmware was not found on async requests.  Best we can do is
698 	 * just assume -ENOENT. A better API would pass the actual return
699 	 * value to the callback.
700 	 */
701 	if (!fw && !test_fw_config->test_result)
702 		test_fw_config->test_result = -ENOENT;
703 
704 	complete(&req->completion);
705 }
706 
707 static
708 ssize_t trigger_batched_requests_async_store(struct device *dev,
709 					     struct device_attribute *attr,
710 					     const char *buf, size_t count)
711 {
712 	struct test_batched_req *req;
713 	bool send_uevent;
714 	int rc;
715 	u8 i;
716 
717 	mutex_lock(&test_fw_mutex);
718 
719 	test_fw_config->reqs =
720 		vzalloc(array3_size(sizeof(struct test_batched_req),
721 				    test_fw_config->num_requests, 2));
722 	if (!test_fw_config->reqs) {
723 		rc = -ENOMEM;
724 		goto out;
725 	}
726 
727 	pr_info("batched loading '%s' custom fallback mechanism %u times\n",
728 		test_fw_config->name, test_fw_config->num_requests);
729 
730 	send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
731 		FW_ACTION_NOHOTPLUG;
732 
733 	for (i = 0; i < test_fw_config->num_requests; i++) {
734 		req = &test_fw_config->reqs[i];
735 		req->name = test_fw_config->name;
736 		req->fw = NULL;
737 		req->idx = i;
738 		init_completion(&req->completion);
739 		rc = request_firmware_nowait(THIS_MODULE, send_uevent,
740 					     req->name,
741 					     dev, GFP_KERNEL, req,
742 					     trigger_batched_cb);
743 		if (rc) {
744 			pr_info("#%u: batched async load failed setup: %d\n",
745 				i, rc);
746 			req->rc = rc;
747 			goto out_bail;
748 		} else
749 			req->sent = true;
750 	}
751 
752 	rc = count;
753 
754 out_bail:
755 
756 	/*
757 	 * We require an explicit release to enable more time and delay of
758 	 * calling release_firmware() to improve our chances of forcing a
759 	 * batched request. If we instead called release_firmware() right away
760 	 * then we might miss on an opportunity of having a successful firmware
761 	 * request pass on the opportunity to be come a batched request.
762 	 */
763 
764 	for (i = 0; i < test_fw_config->num_requests; i++) {
765 		req = &test_fw_config->reqs[i];
766 		if (req->sent)
767 			wait_for_completion(&req->completion);
768 	}
769 
770 	/* Override any worker error if we had a general setup error */
771 	if (rc < 0)
772 		test_fw_config->test_result = rc;
773 
774 out:
775 	mutex_unlock(&test_fw_mutex);
776 
777 	return rc;
778 }
779 static DEVICE_ATTR_WO(trigger_batched_requests_async);
780 
781 static ssize_t test_result_show(struct device *dev,
782 				struct device_attribute *attr,
783 				char *buf)
784 {
785 	return test_dev_config_show_int(buf, test_fw_config->test_result);
786 }
787 static DEVICE_ATTR_RO(test_result);
788 
789 static ssize_t release_all_firmware_store(struct device *dev,
790 					  struct device_attribute *attr,
791 					  const char *buf, size_t count)
792 {
793 	test_release_all_firmware();
794 	return count;
795 }
796 static DEVICE_ATTR_WO(release_all_firmware);
797 
798 static ssize_t read_firmware_show(struct device *dev,
799 				  struct device_attribute *attr,
800 				  char *buf)
801 {
802 	struct test_batched_req *req;
803 	u8 idx;
804 	ssize_t rc = 0;
805 
806 	mutex_lock(&test_fw_mutex);
807 
808 	idx = test_fw_config->read_fw_idx;
809 	if (idx >= test_fw_config->num_requests) {
810 		rc = -ERANGE;
811 		goto out;
812 	}
813 
814 	if (!test_fw_config->reqs) {
815 		rc = -EINVAL;
816 		goto out;
817 	}
818 
819 	req = &test_fw_config->reqs[idx];
820 	if (!req->fw) {
821 		pr_err("#%u: failed to async load firmware\n", idx);
822 		rc = -ENOENT;
823 		goto out;
824 	}
825 
826 	pr_info("#%u: loaded %zu\n", idx, req->fw->size);
827 
828 	if (req->fw->size > PAGE_SIZE) {
829 		pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
830 		rc = -EINVAL;
831 		goto out;
832 	}
833 	memcpy(buf, req->fw->data, req->fw->size);
834 
835 	rc = req->fw->size;
836 out:
837 	mutex_unlock(&test_fw_mutex);
838 
839 	return rc;
840 }
841 static DEVICE_ATTR_RO(read_firmware);
842 
843 #define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
844 
845 static struct attribute *test_dev_attrs[] = {
846 	TEST_FW_DEV_ATTR(reset),
847 
848 	TEST_FW_DEV_ATTR(config),
849 	TEST_FW_DEV_ATTR(config_name),
850 	TEST_FW_DEV_ATTR(config_num_requests),
851 	TEST_FW_DEV_ATTR(config_sync_direct),
852 	TEST_FW_DEV_ATTR(config_send_uevent),
853 	TEST_FW_DEV_ATTR(config_read_fw_idx),
854 
855 	/* These don't use the config at all - they could be ported! */
856 	TEST_FW_DEV_ATTR(trigger_request),
857 	TEST_FW_DEV_ATTR(trigger_async_request),
858 	TEST_FW_DEV_ATTR(trigger_custom_fallback),
859 
860 	/* These use the config and can use the test_result */
861 	TEST_FW_DEV_ATTR(trigger_batched_requests),
862 	TEST_FW_DEV_ATTR(trigger_batched_requests_async),
863 
864 	TEST_FW_DEV_ATTR(release_all_firmware),
865 	TEST_FW_DEV_ATTR(test_result),
866 	TEST_FW_DEV_ATTR(read_firmware),
867 	NULL,
868 };
869 
870 ATTRIBUTE_GROUPS(test_dev);
871 
872 static struct miscdevice test_fw_misc_device = {
873 	.minor          = MISC_DYNAMIC_MINOR,
874 	.name           = "test_firmware",
875 	.fops           = &test_fw_fops,
876 	.groups 	= test_dev_groups,
877 };
878 
879 static int __init test_firmware_init(void)
880 {
881 	int rc;
882 
883 	test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
884 	if (!test_fw_config)
885 		return -ENOMEM;
886 
887 	rc = __test_firmware_config_init();
888 	if (rc)
889 		return rc;
890 
891 	rc = misc_register(&test_fw_misc_device);
892 	if (rc) {
893 		kfree(test_fw_config);
894 		pr_err("could not register misc device: %d\n", rc);
895 		return rc;
896 	}
897 
898 	pr_warn("interface ready\n");
899 
900 	return 0;
901 }
902 
903 module_init(test_firmware_init);
904 
905 static void __exit test_firmware_exit(void)
906 {
907 	mutex_lock(&test_fw_mutex);
908 	release_firmware(test_firmware);
909 	misc_deregister(&test_fw_misc_device);
910 	__test_firmware_config_free();
911 	kfree(test_fw_config);
912 	mutex_unlock(&test_fw_mutex);
913 
914 	pr_warn("removed interface\n");
915 }
916 
917 module_exit(test_firmware_exit);
918 
919 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
920 MODULE_LICENSE("GPL");
921