xref: /openbmc/linux/fs/pstore/platform.c (revision 748008e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Persistent Storage - platform driver interface parts.
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7  */
8 
9 #define pr_fmt(fmt) "pstore: " fmt
10 
11 #include <linux/atomic.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kmsg_dump.h>
16 #include <linux/console.h>
17 #include <linux/module.h>
18 #include <linux/pstore.h>
19 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
20 #include <linux/lzo.h>
21 #endif
22 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
23 #include <linux/lz4.h>
24 #endif
25 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
26 #include <linux/zstd.h>
27 #endif
28 #include <linux/crypto.h>
29 #include <linux/string.h>
30 #include <linux/timer.h>
31 #include <linux/scatterlist.h>
32 #include <linux/slab.h>
33 #include <linux/uaccess.h>
34 #include <linux/jiffies.h>
35 #include <linux/workqueue.h>
36 
37 #include <crypto/acompress.h>
38 
39 #include "internal.h"
40 
41 /*
42  * We defer making "oops" entries appear in pstore - see
43  * whether the system is actually still running well enough
44  * to let someone see the entry
45  */
46 static int pstore_update_ms = -1;
47 module_param_named(update_ms, pstore_update_ms, int, 0600);
48 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
49 		 "(default is -1, which means runtime updates are disabled; "
50 		 "enabling this option may not be safe; it may lead to further "
51 		 "corruption on Oopses)");
52 
53 /* Names should be in the same order as the enum pstore_type_id */
54 static const char * const pstore_type_names[] = {
55 	"dmesg",
56 	"mce",
57 	"console",
58 	"ftrace",
59 	"rtas",
60 	"powerpc-ofw",
61 	"powerpc-common",
62 	"pmsg",
63 	"powerpc-opal",
64 };
65 
66 static int pstore_new_entry;
67 
68 static void pstore_timefunc(struct timer_list *);
69 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
70 
71 static void pstore_dowork(struct work_struct *);
72 static DECLARE_WORK(pstore_work, pstore_dowork);
73 
74 /*
75  * psinfo_lock protects "psinfo" during calls to
76  * pstore_register(), pstore_unregister(), and
77  * the filesystem mount/unmount routines.
78  */
79 static DEFINE_MUTEX(psinfo_lock);
80 struct pstore_info *psinfo;
81 
82 static char *backend;
83 module_param(backend, charp, 0444);
84 MODULE_PARM_DESC(backend, "specific backend to use");
85 
86 static char *compress =
87 #ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
88 		CONFIG_PSTORE_COMPRESS_DEFAULT;
89 #else
90 		NULL;
91 #endif
92 module_param(compress, charp, 0444);
93 MODULE_PARM_DESC(compress, "compression to use");
94 
95 /* Compression parameters */
96 static struct crypto_acomp *tfm;
97 static struct acomp_req *creq;
98 
99 struct pstore_zbackend {
100 	int (*zbufsize)(size_t size);
101 	const char *name;
102 };
103 
104 static char *big_oops_buf;
105 static size_t big_oops_buf_sz;
106 
107 /* How much of the console log to snapshot */
108 unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
109 
110 void pstore_set_kmsg_bytes(int bytes)
111 {
112 	kmsg_bytes = bytes;
113 }
114 
115 /* Tag each group of saved records with a sequence number */
116 static int	oopscount;
117 
118 const char *pstore_type_to_name(enum pstore_type_id type)
119 {
120 	BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
121 
122 	if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
123 		return "unknown";
124 
125 	return pstore_type_names[type];
126 }
127 EXPORT_SYMBOL_GPL(pstore_type_to_name);
128 
129 enum pstore_type_id pstore_name_to_type(const char *name)
130 {
131 	int i;
132 
133 	for (i = 0; i < PSTORE_TYPE_MAX; i++) {
134 		if (!strcmp(pstore_type_names[i], name))
135 			return i;
136 	}
137 
138 	return PSTORE_TYPE_MAX;
139 }
140 EXPORT_SYMBOL_GPL(pstore_name_to_type);
141 
142 static void pstore_timer_kick(void)
143 {
144 	if (pstore_update_ms < 0)
145 		return;
146 
147 	mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
148 }
149 
150 static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
151 {
152 	/*
153 	 * In case of NMI path, pstore shouldn't be blocked
154 	 * regardless of reason.
155 	 */
156 	if (in_nmi())
157 		return true;
158 
159 	switch (reason) {
160 	/* In panic case, other cpus are stopped by smp_send_stop(). */
161 	case KMSG_DUMP_PANIC:
162 	/*
163 	 * Emergency restart shouldn't be blocked by spinning on
164 	 * pstore_info::buf_lock.
165 	 */
166 	case KMSG_DUMP_EMERG:
167 		return true;
168 	default:
169 		return false;
170 	}
171 }
172 
173 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
174 static int zbufsize_deflate(size_t size)
175 {
176 	size_t cmpr;
177 
178 	switch (size) {
179 	/* buffer range for efivars */
180 	case 1000 ... 2000:
181 		cmpr = 56;
182 		break;
183 	case 2001 ... 3000:
184 		cmpr = 54;
185 		break;
186 	case 3001 ... 3999:
187 		cmpr = 52;
188 		break;
189 	/* buffer range for nvram, erst */
190 	case 4000 ... 10000:
191 		cmpr = 45;
192 		break;
193 	default:
194 		cmpr = 60;
195 		break;
196 	}
197 
198 	return (size * 100) / cmpr;
199 }
200 #endif
201 
202 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
203 static int zbufsize_lzo(size_t size)
204 {
205 	return lzo1x_worst_compress(size);
206 }
207 #endif
208 
209 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
210 static int zbufsize_lz4(size_t size)
211 {
212 	return LZ4_compressBound(size);
213 }
214 #endif
215 
216 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
217 static int zbufsize_842(size_t size)
218 {
219 	return size;
220 }
221 #endif
222 
223 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
224 static int zbufsize_zstd(size_t size)
225 {
226 	return zstd_compress_bound(size);
227 }
228 #endif
229 
230 static const struct pstore_zbackend *zbackend __ro_after_init;
231 
232 static const struct pstore_zbackend zbackends[] = {
233 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
234 	{
235 		.zbufsize	= zbufsize_deflate,
236 		.name		= "deflate",
237 	},
238 #endif
239 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
240 	{
241 		.zbufsize	= zbufsize_lzo,
242 		.name		= "lzo",
243 	},
244 #endif
245 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
246 	{
247 		.zbufsize	= zbufsize_lz4,
248 		.name		= "lz4",
249 	},
250 #endif
251 #if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
252 	{
253 		.zbufsize	= zbufsize_lz4,
254 		.name		= "lz4hc",
255 	},
256 #endif
257 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
258 	{
259 		.zbufsize	= zbufsize_842,
260 		.name		= "842",
261 	},
262 #endif
263 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
264 	{
265 		.zbufsize	= zbufsize_zstd,
266 		.name		= "zstd",
267 	},
268 #endif
269 	{ }
270 };
271 
272 static int pstore_compress(const void *in, void *out,
273 			   unsigned int inlen, unsigned int outlen)
274 {
275 	struct scatterlist src, dst;
276 	int ret;
277 
278 	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
279 		return -EINVAL;
280 
281 	sg_init_table(&src, 1);
282 	sg_set_buf(&src, in, inlen);
283 
284 	sg_init_table(&dst, 1);
285 	sg_set_buf(&dst, out, outlen);
286 
287 	acomp_request_set_params(creq, &src, &dst, inlen, outlen);
288 
289 	ret = crypto_acomp_compress(creq);
290 	if (ret) {
291 		pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
292 		return ret;
293 	}
294 
295 	return outlen;
296 }
297 
298 static void allocate_buf_for_compression(void)
299 {
300 	struct crypto_acomp *acomp;
301 	int size;
302 	char *buf;
303 
304 	/* Skip if not built-in or compression backend not selected yet. */
305 	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
306 		return;
307 
308 	/* Skip if no pstore backend yet or compression init already done. */
309 	if (!psinfo || tfm)
310 		return;
311 
312 	if (!crypto_has_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC)) {
313 		pr_err("Unknown compression: %s\n", zbackend->name);
314 		return;
315 	}
316 
317 	size = zbackend->zbufsize(psinfo->bufsize);
318 	if (size <= 0) {
319 		pr_err("Invalid compression size for %s: %d\n",
320 		       zbackend->name, size);
321 		return;
322 	}
323 
324 	buf = kmalloc(size, GFP_KERNEL);
325 	if (!buf) {
326 		pr_err("Failed %d byte compression buffer allocation for: %s\n",
327 		       size, zbackend->name);
328 		return;
329 	}
330 
331 	acomp = crypto_alloc_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC);
332 	if (IS_ERR_OR_NULL(acomp)) {
333 		kfree(buf);
334 		pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
335 		       PTR_ERR(acomp));
336 		return;
337 	}
338 
339 	creq = acomp_request_alloc(acomp);
340 	if (!creq) {
341 		crypto_free_acomp(acomp);
342 		kfree(buf);
343 		pr_err("acomp_request_alloc('%s') failed\n", zbackend->name);
344 		return;
345 	}
346 
347 	/* A non-NULL big_oops_buf indicates compression is available. */
348 	tfm = acomp;
349 	big_oops_buf_sz = size;
350 	big_oops_buf = buf;
351 
352 	pr_info("Using crash dump compression: %s\n", zbackend->name);
353 }
354 
355 static void free_buf_for_compression(void)
356 {
357 	if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
358 		acomp_request_free(creq);
359 		crypto_free_acomp(tfm);
360 		tfm = NULL;
361 	}
362 	kfree(big_oops_buf);
363 	big_oops_buf = NULL;
364 	big_oops_buf_sz = 0;
365 }
366 
367 /*
368  * Called when compression fails, since the printk buffer
369  * would be fetched for compression calling it again when
370  * compression fails would have moved the iterator of
371  * printk buffer which results in fetching old contents.
372  * Copy the recent messages from big_oops_buf to psinfo->buf
373  */
374 static size_t copy_kmsg_to_buffer(int hsize, size_t len)
375 {
376 	size_t total_len;
377 	size_t diff;
378 
379 	total_len = hsize + len;
380 
381 	if (total_len > psinfo->bufsize) {
382 		diff = total_len - psinfo->bufsize + hsize;
383 		memcpy(psinfo->buf, big_oops_buf, hsize);
384 		memcpy(psinfo->buf + hsize, big_oops_buf + diff,
385 					psinfo->bufsize - hsize);
386 		total_len = psinfo->bufsize;
387 	} else
388 		memcpy(psinfo->buf, big_oops_buf, total_len);
389 
390 	return total_len;
391 }
392 
393 void pstore_record_init(struct pstore_record *record,
394 			struct pstore_info *psinfo)
395 {
396 	memset(record, 0, sizeof(*record));
397 
398 	record->psi = psinfo;
399 
400 	/* Report zeroed timestamp if called before timekeeping has resumed. */
401 	record->time = ns_to_timespec64(ktime_get_real_fast_ns());
402 }
403 
404 /*
405  * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
406  * end of the buffer.
407  */
408 static void pstore_dump(struct kmsg_dumper *dumper,
409 			enum kmsg_dump_reason reason)
410 {
411 	struct kmsg_dump_iter iter;
412 	unsigned long	total = 0;
413 	const char	*why;
414 	unsigned int	part = 1;
415 	unsigned long	flags = 0;
416 	int		ret;
417 
418 	why = kmsg_dump_reason_str(reason);
419 
420 	if (pstore_cannot_block_path(reason)) {
421 		if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
422 			pr_err("dump skipped in %s path because of concurrent dump\n",
423 					in_nmi() ? "NMI" : why);
424 			return;
425 		}
426 	} else {
427 		spin_lock_irqsave(&psinfo->buf_lock, flags);
428 	}
429 
430 	kmsg_dump_rewind(&iter);
431 
432 	oopscount++;
433 	while (total < kmsg_bytes) {
434 		char *dst;
435 		size_t dst_size;
436 		int header_size;
437 		int zipped_len = -1;
438 		size_t dump_size;
439 		struct pstore_record record;
440 
441 		pstore_record_init(&record, psinfo);
442 		record.type = PSTORE_TYPE_DMESG;
443 		record.count = oopscount;
444 		record.reason = reason;
445 		record.part = part;
446 		record.buf = psinfo->buf;
447 
448 		if (big_oops_buf) {
449 			dst = big_oops_buf;
450 			dst_size = big_oops_buf_sz;
451 		} else {
452 			dst = psinfo->buf;
453 			dst_size = psinfo->bufsize;
454 		}
455 
456 		/* Write dump header. */
457 		header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
458 				 oopscount, part);
459 		dst_size -= header_size;
460 
461 		/* Write dump contents. */
462 		if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
463 					  dst_size, &dump_size))
464 			break;
465 
466 		if (big_oops_buf) {
467 			zipped_len = pstore_compress(dst, psinfo->buf,
468 						header_size + dump_size,
469 						psinfo->bufsize);
470 
471 			if (zipped_len > 0) {
472 				record.compressed = true;
473 				record.size = zipped_len;
474 			} else {
475 				record.size = copy_kmsg_to_buffer(header_size,
476 								  dump_size);
477 			}
478 		} else {
479 			record.size = header_size + dump_size;
480 		}
481 
482 		ret = psinfo->write(&record);
483 		if (ret == 0 && reason == KMSG_DUMP_OOPS) {
484 			pstore_new_entry = 1;
485 			pstore_timer_kick();
486 		}
487 
488 		total += record.size;
489 		part++;
490 	}
491 	spin_unlock_irqrestore(&psinfo->buf_lock, flags);
492 }
493 
494 static struct kmsg_dumper pstore_dumper = {
495 	.dump = pstore_dump,
496 };
497 
498 /*
499  * Register with kmsg_dump to save last part of console log on panic.
500  */
501 static void pstore_register_kmsg(void)
502 {
503 	kmsg_dump_register(&pstore_dumper);
504 }
505 
506 static void pstore_unregister_kmsg(void)
507 {
508 	kmsg_dump_unregister(&pstore_dumper);
509 }
510 
511 #ifdef CONFIG_PSTORE_CONSOLE
512 static void pstore_console_write(struct console *con, const char *s, unsigned c)
513 {
514 	struct pstore_record record;
515 
516 	if (!c)
517 		return;
518 
519 	pstore_record_init(&record, psinfo);
520 	record.type = PSTORE_TYPE_CONSOLE;
521 
522 	record.buf = (char *)s;
523 	record.size = c;
524 	psinfo->write(&record);
525 }
526 
527 static struct console pstore_console = {
528 	.write	= pstore_console_write,
529 	.index	= -1,
530 };
531 
532 static void pstore_register_console(void)
533 {
534 	/* Show which backend is going to get console writes. */
535 	strscpy(pstore_console.name, psinfo->name,
536 		sizeof(pstore_console.name));
537 	/*
538 	 * Always initialize flags here since prior unregister_console()
539 	 * calls may have changed settings (specifically CON_ENABLED).
540 	 */
541 	pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
542 	register_console(&pstore_console);
543 }
544 
545 static void pstore_unregister_console(void)
546 {
547 	unregister_console(&pstore_console);
548 }
549 #else
550 static void pstore_register_console(void) {}
551 static void pstore_unregister_console(void) {}
552 #endif
553 
554 static int pstore_write_user_compat(struct pstore_record *record,
555 				    const char __user *buf)
556 {
557 	int ret = 0;
558 
559 	if (record->buf)
560 		return -EINVAL;
561 
562 	record->buf = memdup_user(buf, record->size);
563 	if (IS_ERR(record->buf)) {
564 		ret = PTR_ERR(record->buf);
565 		goto out;
566 	}
567 
568 	ret = record->psi->write(record);
569 
570 	kfree(record->buf);
571 out:
572 	record->buf = NULL;
573 
574 	return unlikely(ret < 0) ? ret : record->size;
575 }
576 
577 /*
578  * platform specific persistent storage driver registers with
579  * us here. If pstore is already mounted, call the platform
580  * read function right away to populate the file system. If not
581  * then the pstore mount code will call us later to fill out
582  * the file system.
583  */
584 int pstore_register(struct pstore_info *psi)
585 {
586 	if (backend && strcmp(backend, psi->name)) {
587 		pr_warn("ignoring unexpected backend '%s'\n", psi->name);
588 		return -EPERM;
589 	}
590 
591 	/* Sanity check flags. */
592 	if (!psi->flags) {
593 		pr_warn("backend '%s' must support at least one frontend\n",
594 			psi->name);
595 		return -EINVAL;
596 	}
597 
598 	/* Check for required functions. */
599 	if (!psi->read || !psi->write) {
600 		pr_warn("backend '%s' must implement read() and write()\n",
601 			psi->name);
602 		return -EINVAL;
603 	}
604 
605 	mutex_lock(&psinfo_lock);
606 	if (psinfo) {
607 		pr_warn("backend '%s' already loaded: ignoring '%s'\n",
608 			psinfo->name, psi->name);
609 		mutex_unlock(&psinfo_lock);
610 		return -EBUSY;
611 	}
612 
613 	if (!psi->write_user)
614 		psi->write_user = pstore_write_user_compat;
615 	psinfo = psi;
616 	mutex_init(&psinfo->read_mutex);
617 	spin_lock_init(&psinfo->buf_lock);
618 
619 	if (psi->flags & PSTORE_FLAGS_DMESG)
620 		allocate_buf_for_compression();
621 
622 	pstore_get_records(0);
623 
624 	if (psi->flags & PSTORE_FLAGS_DMESG) {
625 		pstore_dumper.max_reason = psinfo->max_reason;
626 		pstore_register_kmsg();
627 	}
628 	if (psi->flags & PSTORE_FLAGS_CONSOLE)
629 		pstore_register_console();
630 	if (psi->flags & PSTORE_FLAGS_FTRACE)
631 		pstore_register_ftrace();
632 	if (psi->flags & PSTORE_FLAGS_PMSG)
633 		pstore_register_pmsg();
634 
635 	/* Start watching for new records, if desired. */
636 	pstore_timer_kick();
637 
638 	/*
639 	 * Update the module parameter backend, so it is visible
640 	 * through /sys/module/pstore/parameters/backend
641 	 */
642 	backend = kstrdup(psi->name, GFP_KERNEL);
643 
644 	pr_info("Registered %s as persistent store backend\n", psi->name);
645 
646 	mutex_unlock(&psinfo_lock);
647 	return 0;
648 }
649 EXPORT_SYMBOL_GPL(pstore_register);
650 
651 void pstore_unregister(struct pstore_info *psi)
652 {
653 	/* It's okay to unregister nothing. */
654 	if (!psi)
655 		return;
656 
657 	mutex_lock(&psinfo_lock);
658 
659 	/* Only one backend can be registered at a time. */
660 	if (WARN_ON(psi != psinfo)) {
661 		mutex_unlock(&psinfo_lock);
662 		return;
663 	}
664 
665 	/* Unregister all callbacks. */
666 	if (psi->flags & PSTORE_FLAGS_PMSG)
667 		pstore_unregister_pmsg();
668 	if (psi->flags & PSTORE_FLAGS_FTRACE)
669 		pstore_unregister_ftrace();
670 	if (psi->flags & PSTORE_FLAGS_CONSOLE)
671 		pstore_unregister_console();
672 	if (psi->flags & PSTORE_FLAGS_DMESG)
673 		pstore_unregister_kmsg();
674 
675 	/* Stop timer and make sure all work has finished. */
676 	del_timer_sync(&pstore_timer);
677 	flush_work(&pstore_work);
678 
679 	/* Remove all backend records from filesystem tree. */
680 	pstore_put_backend_records(psi);
681 
682 	free_buf_for_compression();
683 
684 	psinfo = NULL;
685 	kfree(backend);
686 	backend = NULL;
687 	mutex_unlock(&psinfo_lock);
688 }
689 EXPORT_SYMBOL_GPL(pstore_unregister);
690 
691 static void decompress_record(struct pstore_record *record)
692 {
693 	int ret;
694 	int unzipped_len;
695 	char *unzipped, *workspace;
696 	struct acomp_req *dreq;
697 	struct scatterlist src, dst;
698 
699 	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
700 		return;
701 
702 	/* Only PSTORE_TYPE_DMESG support compression. */
703 	if (record->type != PSTORE_TYPE_DMESG) {
704 		pr_warn("ignored compressed record type %d\n", record->type);
705 		return;
706 	}
707 
708 	/* Missing compression buffer means compression was not initialized. */
709 	if (!big_oops_buf) {
710 		pr_warn("no decompression method initialized!\n");
711 		return;
712 	}
713 
714 	/* Allocate enough space to hold max decompression and ECC. */
715 	unzipped_len = big_oops_buf_sz;
716 	workspace = kmalloc(unzipped_len + record->ecc_notice_size,
717 			    GFP_KERNEL);
718 	if (!workspace)
719 		return;
720 
721 	dreq = acomp_request_alloc(tfm);
722 	if (!dreq) {
723 		kfree(workspace);
724 		return;
725 	}
726 
727 	sg_init_table(&src, 1);
728 	sg_set_buf(&src, record->buf, record->size);
729 
730 	sg_init_table(&dst, 1);
731 	sg_set_buf(&dst, workspace, unzipped_len);
732 
733 	acomp_request_set_params(dreq, &src, &dst, record->size, unzipped_len);
734 
735 	/* After decompression "unzipped_len" is almost certainly smaller. */
736 	ret = crypto_acomp_decompress(dreq);
737 	if (ret) {
738 		pr_err("crypto_acomp_decompress failed, ret = %d!\n", ret);
739 		kfree(workspace);
740 		return;
741 	}
742 
743 	/* Append ECC notice to decompressed buffer. */
744 	unzipped_len = dreq->dlen;
745 	memcpy(workspace + unzipped_len, record->buf + record->size,
746 	       record->ecc_notice_size);
747 
748 	/* Copy decompressed contents into an minimum-sized allocation. */
749 	unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
750 			   GFP_KERNEL);
751 	kfree(workspace);
752 	acomp_request_free(dreq);
753 	if (!unzipped)
754 		return;
755 
756 	/* Swap out compressed contents with decompressed contents. */
757 	kfree(record->buf);
758 	record->buf = unzipped;
759 	record->size = unzipped_len;
760 	record->compressed = false;
761 }
762 
763 /*
764  * Read all the records from one persistent store backend. Create
765  * files in our filesystem.  Don't warn about -EEXIST errors
766  * when we are re-scanning the backing store looking to add new
767  * error records.
768  */
769 void pstore_get_backend_records(struct pstore_info *psi,
770 				struct dentry *root, int quiet)
771 {
772 	int failed = 0;
773 	unsigned int stop_loop = 65536;
774 
775 	if (!psi || !root)
776 		return;
777 
778 	mutex_lock(&psi->read_mutex);
779 	if (psi->open && psi->open(psi))
780 		goto out;
781 
782 	/*
783 	 * Backend callback read() allocates record.buf. decompress_record()
784 	 * may reallocate record.buf. On success, pstore_mkfile() will keep
785 	 * the record.buf, so free it only on failure.
786 	 */
787 	for (; stop_loop; stop_loop--) {
788 		struct pstore_record *record;
789 		int rc;
790 
791 		record = kzalloc(sizeof(*record), GFP_KERNEL);
792 		if (!record) {
793 			pr_err("out of memory creating record\n");
794 			break;
795 		}
796 		pstore_record_init(record, psi);
797 
798 		record->size = psi->read(record);
799 
800 		/* No more records left in backend? */
801 		if (record->size <= 0) {
802 			kfree(record);
803 			break;
804 		}
805 
806 		decompress_record(record);
807 		rc = pstore_mkfile(root, record);
808 		if (rc) {
809 			/* pstore_mkfile() did not take record, so free it. */
810 			kfree(record->buf);
811 			kfree(record->priv);
812 			kfree(record);
813 			if (rc != -EEXIST || !quiet)
814 				failed++;
815 		}
816 	}
817 	if (psi->close)
818 		psi->close(psi);
819 out:
820 	mutex_unlock(&psi->read_mutex);
821 
822 	if (failed)
823 		pr_warn("failed to create %d record(s) from '%s'\n",
824 			failed, psi->name);
825 	if (!stop_loop)
826 		pr_err("looping? Too many records seen from '%s'\n",
827 			psi->name);
828 }
829 
830 static void pstore_dowork(struct work_struct *work)
831 {
832 	pstore_get_records(1);
833 }
834 
835 static void pstore_timefunc(struct timer_list *unused)
836 {
837 	if (pstore_new_entry) {
838 		pstore_new_entry = 0;
839 		schedule_work(&pstore_work);
840 	}
841 
842 	pstore_timer_kick();
843 }
844 
845 static void __init pstore_choose_compression(void)
846 {
847 	const struct pstore_zbackend *step;
848 
849 	if (!compress)
850 		return;
851 
852 	for (step = zbackends; step->name; step++) {
853 		if (!strcmp(compress, step->name)) {
854 			zbackend = step;
855 			return;
856 		}
857 	}
858 }
859 
860 static int __init pstore_init(void)
861 {
862 	int ret;
863 
864 	pstore_choose_compression();
865 
866 	/*
867 	 * Check if any pstore backends registered earlier but did not
868 	 * initialize compression because crypto was not ready. If so,
869 	 * initialize compression now.
870 	 */
871 	allocate_buf_for_compression();
872 
873 	ret = pstore_init_fs();
874 	if (ret)
875 		free_buf_for_compression();
876 
877 	return ret;
878 }
879 late_initcall(pstore_init);
880 
881 static void __exit pstore_exit(void)
882 {
883 	pstore_exit_fs();
884 }
885 module_exit(pstore_exit)
886 
887 MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
888 MODULE_LICENSE("GPL");
889