1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Persistent Storage - platform driver interface parts.
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7 */
8
9 #define pr_fmt(fmt) "pstore: " fmt
10
11 #include <linux/atomic.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kmsg_dump.h>
16 #include <linux/console.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/pstore.h>
20 #include <linux/string.h>
21 #include <linux/timer.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/jiffies.h>
25 #include <linux/vmalloc.h>
26 #include <linux/workqueue.h>
27 #include <linux/zlib.h>
28
29 #include "internal.h"
30
31 /*
32 * We defer making "oops" entries appear in pstore - see
33 * whether the system is actually still running well enough
34 * to let someone see the entry
35 */
36 static int pstore_update_ms = -1;
37 module_param_named(update_ms, pstore_update_ms, int, 0600);
38 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
39 "(default is -1, which means runtime updates are disabled; "
40 "enabling this option may not be safe; it may lead to further "
41 "corruption on Oopses)");
42
43 /* Names should be in the same order as the enum pstore_type_id */
44 static const char * const pstore_type_names[] = {
45 "dmesg",
46 "mce",
47 "console",
48 "ftrace",
49 "rtas",
50 "powerpc-ofw",
51 "powerpc-common",
52 "pmsg",
53 "powerpc-opal",
54 };
55
56 static int pstore_new_entry;
57
58 static void pstore_timefunc(struct timer_list *);
59 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
60
61 static void pstore_dowork(struct work_struct *);
62 static DECLARE_WORK(pstore_work, pstore_dowork);
63
64 /*
65 * psinfo_lock protects "psinfo" during calls to
66 * pstore_register(), pstore_unregister(), and
67 * the filesystem mount/unmount routines.
68 */
69 static DEFINE_MUTEX(psinfo_lock);
70 struct pstore_info *psinfo;
71
72 static char *backend;
73 module_param(backend, charp, 0444);
74 MODULE_PARM_DESC(backend, "specific backend to use");
75
76 /*
77 * pstore no longer implements compression via the crypto API, and only
78 * supports zlib deflate compression implemented using the zlib library
79 * interface. This removes additional complexity which is hard to justify for a
80 * diagnostic facility that has to operate in conditions where the system may
81 * have become unstable. Zlib deflate is comparatively small in terms of code
82 * size, and compresses ASCII text comparatively well. In terms of compression
83 * speed, deflate is not the best performer but for recording the log output on
84 * a kernel panic, this is not considered critical.
85 *
86 * The only remaining arguments supported by the compress= module parameter are
87 * 'deflate' and 'none'. To retain compatibility with existing installations,
88 * all other values are logged and replaced with 'deflate'.
89 */
90 static char *compress = "deflate";
91 module_param(compress, charp, 0444);
92 MODULE_PARM_DESC(compress, "compression to use");
93
94 /* How much of the kernel log to snapshot */
95 unsigned int kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
96 module_param(kmsg_bytes, uint, 0444);
97 MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
98
99 static void *compress_workspace;
100
101 /*
102 * Compression is only used for dmesg output, which consists of low-entropy
103 * ASCII text, and so we can assume worst-case 60%.
104 */
105 #define DMESG_COMP_PERCENT 60
106
107 static char *big_oops_buf;
108 static size_t max_compressed_size;
109
pstore_set_kmsg_bytes(unsigned int bytes)110 void pstore_set_kmsg_bytes(unsigned int bytes)
111 {
112 WRITE_ONCE(kmsg_bytes, bytes);
113 }
114
115 /* Tag each group of saved records with a sequence number */
116 static int oopscount;
117
pstore_type_to_name(enum pstore_type_id type)118 const char *pstore_type_to_name(enum pstore_type_id type)
119 {
120 BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
121
122 if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
123 return "unknown";
124
125 return pstore_type_names[type];
126 }
127 EXPORT_SYMBOL_GPL(pstore_type_to_name);
128
pstore_name_to_type(const char * name)129 enum pstore_type_id pstore_name_to_type(const char *name)
130 {
131 int i;
132
133 for (i = 0; i < PSTORE_TYPE_MAX; i++) {
134 if (!strcmp(pstore_type_names[i], name))
135 return i;
136 }
137
138 return PSTORE_TYPE_MAX;
139 }
140 EXPORT_SYMBOL_GPL(pstore_name_to_type);
141
pstore_timer_kick(void)142 static void pstore_timer_kick(void)
143 {
144 if (pstore_update_ms < 0)
145 return;
146
147 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
148 }
149
pstore_cannot_block_path(enum kmsg_dump_reason reason)150 static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
151 {
152 /*
153 * In case of NMI path, pstore shouldn't be blocked
154 * regardless of reason.
155 */
156 if (in_nmi())
157 return true;
158
159 switch (reason) {
160 /* In panic case, other cpus are stopped by smp_send_stop(). */
161 case KMSG_DUMP_PANIC:
162 /*
163 * Emergency restart shouldn't be blocked by spinning on
164 * pstore_info::buf_lock.
165 */
166 case KMSG_DUMP_EMERG:
167 return true;
168 default:
169 return false;
170 }
171 }
172
pstore_compress(const void * in,void * out,unsigned int inlen,unsigned int outlen)173 static int pstore_compress(const void *in, void *out,
174 unsigned int inlen, unsigned int outlen)
175 {
176 struct z_stream_s zstream = {
177 .next_in = in,
178 .avail_in = inlen,
179 .next_out = out,
180 .avail_out = outlen,
181 .workspace = compress_workspace,
182 };
183 int ret;
184
185 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
186 return -EINVAL;
187
188 ret = zlib_deflateInit2(&zstream, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
189 -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
190 if (ret != Z_OK)
191 return -EINVAL;
192
193 ret = zlib_deflate(&zstream, Z_FINISH);
194 if (ret != Z_STREAM_END)
195 return -EINVAL;
196
197 ret = zlib_deflateEnd(&zstream);
198 if (ret != Z_OK)
199 pr_warn_once("zlib_deflateEnd() failed: %d\n", ret);
200
201 return zstream.total_out;
202 }
203
allocate_buf_for_compression(void)204 static void allocate_buf_for_compression(void)
205 {
206 size_t compressed_size;
207 char *buf;
208
209 /* Skip if not built-in or compression disabled. */
210 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !compress ||
211 !strcmp(compress, "none")) {
212 compress = NULL;
213 return;
214 }
215
216 if (strcmp(compress, "deflate")) {
217 pr_err("Unsupported compression '%s', falling back to deflate\n",
218 compress);
219 compress = "deflate";
220 }
221
222 /*
223 * The compression buffer only needs to be as large as the maximum
224 * uncompressed record size, since any record that would be expanded by
225 * compression is just stored uncompressed.
226 */
227 compressed_size = (psinfo->bufsize * 100) / DMESG_COMP_PERCENT;
228 buf = kvzalloc(compressed_size, GFP_KERNEL);
229 if (!buf) {
230 pr_err("Failed %zu byte compression buffer allocation for: %s\n",
231 psinfo->bufsize, compress);
232 return;
233 }
234
235 compress_workspace =
236 vmalloc(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL));
237 if (!compress_workspace) {
238 pr_err("Failed to allocate zlib deflate workspace\n");
239 kvfree(buf);
240 return;
241 }
242
243 /* A non-NULL big_oops_buf indicates compression is available. */
244 big_oops_buf = buf;
245 max_compressed_size = compressed_size;
246
247 pr_info("Using crash dump compression: %s\n", compress);
248 }
249
free_buf_for_compression(void)250 static void free_buf_for_compression(void)
251 {
252 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress_workspace) {
253 vfree(compress_workspace);
254 compress_workspace = NULL;
255 }
256
257 kvfree(big_oops_buf);
258 big_oops_buf = NULL;
259 max_compressed_size = 0;
260 }
261
pstore_record_init(struct pstore_record * record,struct pstore_info * psinfo)262 void pstore_record_init(struct pstore_record *record,
263 struct pstore_info *psinfo)
264 {
265 memset(record, 0, sizeof(*record));
266
267 record->psi = psinfo;
268
269 /* Report zeroed timestamp if called before timekeeping has resumed. */
270 record->time = ns_to_timespec64(ktime_get_real_fast_ns());
271 }
272
273 /*
274 * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
275 * end of the buffer.
276 */
pstore_dump(struct kmsg_dumper * dumper,enum kmsg_dump_reason reason)277 static void pstore_dump(struct kmsg_dumper *dumper,
278 enum kmsg_dump_reason reason)
279 {
280 struct kmsg_dump_iter iter;
281 unsigned int remaining = READ_ONCE(kmsg_bytes);
282 unsigned long total = 0;
283 const char *why;
284 unsigned int part = 1;
285 unsigned long flags = 0;
286 int saved_ret = 0;
287 int ret;
288
289 why = kmsg_dump_reason_str(reason);
290
291 if (pstore_cannot_block_path(reason)) {
292 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
293 pr_err("dump skipped in %s path because of concurrent dump\n",
294 in_nmi() ? "NMI" : why);
295 return;
296 }
297 } else {
298 spin_lock_irqsave(&psinfo->buf_lock, flags);
299 }
300
301 kmsg_dump_rewind(&iter);
302
303 oopscount++;
304 while (total < remaining) {
305 char *dst;
306 size_t dst_size;
307 int header_size;
308 int zipped_len = -1;
309 size_t dump_size;
310 struct pstore_record record;
311
312 pstore_record_init(&record, psinfo);
313 record.type = PSTORE_TYPE_DMESG;
314 record.count = oopscount;
315 record.reason = reason;
316 record.part = part;
317 record.buf = psinfo->buf;
318
319 dst = big_oops_buf ?: psinfo->buf;
320 dst_size = max_compressed_size ?: psinfo->bufsize;
321
322 /* Write dump header. */
323 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
324 oopscount, part);
325 dst_size -= header_size;
326
327 /* Write dump contents. */
328 if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
329 dst_size, &dump_size))
330 break;
331
332 if (big_oops_buf) {
333 zipped_len = pstore_compress(dst, psinfo->buf,
334 header_size + dump_size,
335 psinfo->bufsize);
336
337 if (zipped_len > 0) {
338 record.compressed = true;
339 record.size = zipped_len;
340 } else {
341 /*
342 * Compression failed, so the buffer is most
343 * likely filled with binary data that does not
344 * compress as well as ASCII text. Copy as much
345 * of the uncompressed data as possible into
346 * the pstore record, and discard the rest.
347 */
348 record.size = psinfo->bufsize;
349 memcpy(psinfo->buf, dst, psinfo->bufsize);
350 }
351 } else {
352 record.size = header_size + dump_size;
353 }
354
355 ret = psinfo->write(&record);
356 if (ret == 0 && reason == KMSG_DUMP_OOPS) {
357 pstore_new_entry = 1;
358 pstore_timer_kick();
359 } else {
360 /* Preserve only the first non-zero returned value. */
361 if (!saved_ret)
362 saved_ret = ret;
363 }
364
365 total += record.size;
366 part++;
367 }
368 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
369
370 if (saved_ret) {
371 pr_err_once("backend (%s) writing error (%d)\n", psinfo->name,
372 saved_ret);
373 }
374 }
375
376 static struct kmsg_dumper pstore_dumper = {
377 .dump = pstore_dump,
378 };
379
380 /*
381 * Register with kmsg_dump to save last part of console log on panic.
382 */
pstore_register_kmsg(void)383 static void pstore_register_kmsg(void)
384 {
385 kmsg_dump_register(&pstore_dumper);
386 }
387
pstore_unregister_kmsg(void)388 static void pstore_unregister_kmsg(void)
389 {
390 kmsg_dump_unregister(&pstore_dumper);
391 }
392
393 #ifdef CONFIG_PSTORE_CONSOLE
pstore_console_write(struct console * con,const char * s,unsigned c)394 static void pstore_console_write(struct console *con, const char *s, unsigned c)
395 {
396 struct pstore_record record;
397
398 if (!c)
399 return;
400
401 pstore_record_init(&record, psinfo);
402 record.type = PSTORE_TYPE_CONSOLE;
403
404 record.buf = (char *)s;
405 record.size = c;
406 psinfo->write(&record);
407 }
408
409 static struct console pstore_console = {
410 .write = pstore_console_write,
411 .index = -1,
412 };
413
pstore_register_console(void)414 static void pstore_register_console(void)
415 {
416 /* Show which backend is going to get console writes. */
417 strscpy(pstore_console.name, psinfo->name,
418 sizeof(pstore_console.name));
419 /*
420 * Always initialize flags here since prior unregister_console()
421 * calls may have changed settings (specifically CON_ENABLED).
422 */
423 pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
424 register_console(&pstore_console);
425 }
426
pstore_unregister_console(void)427 static void pstore_unregister_console(void)
428 {
429 unregister_console(&pstore_console);
430 }
431 #else
pstore_register_console(void)432 static void pstore_register_console(void) {}
pstore_unregister_console(void)433 static void pstore_unregister_console(void) {}
434 #endif
435
pstore_write_user_compat(struct pstore_record * record,const char __user * buf)436 static int pstore_write_user_compat(struct pstore_record *record,
437 const char __user *buf)
438 {
439 int ret = 0;
440
441 if (record->buf)
442 return -EINVAL;
443
444 record->buf = vmemdup_user(buf, record->size);
445 if (IS_ERR(record->buf)) {
446 ret = PTR_ERR(record->buf);
447 goto out;
448 }
449
450 ret = record->psi->write(record);
451
452 kvfree(record->buf);
453 out:
454 record->buf = NULL;
455
456 return unlikely(ret < 0) ? ret : record->size;
457 }
458
459 /*
460 * platform specific persistent storage driver registers with
461 * us here. If pstore is already mounted, call the platform
462 * read function right away to populate the file system. If not
463 * then the pstore mount code will call us later to fill out
464 * the file system.
465 */
pstore_register(struct pstore_info * psi)466 int pstore_register(struct pstore_info *psi)
467 {
468 char *new_backend;
469
470 if (backend && strcmp(backend, psi->name)) {
471 pr_warn("backend '%s' already in use: ignoring '%s'\n",
472 backend, psi->name);
473 return -EBUSY;
474 }
475
476 /* Sanity check flags. */
477 if (!psi->flags) {
478 pr_warn("backend '%s' must support at least one frontend\n",
479 psi->name);
480 return -EINVAL;
481 }
482
483 /* Check for required functions. */
484 if (!psi->read || !psi->write) {
485 pr_warn("backend '%s' must implement read() and write()\n",
486 psi->name);
487 return -EINVAL;
488 }
489
490 new_backend = kstrdup(psi->name, GFP_KERNEL);
491 if (!new_backend)
492 return -ENOMEM;
493
494 mutex_lock(&psinfo_lock);
495 if (psinfo) {
496 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
497 psinfo->name, psi->name);
498 mutex_unlock(&psinfo_lock);
499 kfree(new_backend);
500 return -EBUSY;
501 }
502
503 if (!psi->write_user)
504 psi->write_user = pstore_write_user_compat;
505 psinfo = psi;
506 mutex_init(&psinfo->read_mutex);
507 spin_lock_init(&psinfo->buf_lock);
508
509 if (psi->flags & PSTORE_FLAGS_DMESG)
510 allocate_buf_for_compression();
511
512 pstore_get_records(0);
513
514 if (psi->flags & PSTORE_FLAGS_DMESG) {
515 pstore_dumper.max_reason = psinfo->max_reason;
516 pstore_register_kmsg();
517 }
518 if (psi->flags & PSTORE_FLAGS_CONSOLE)
519 pstore_register_console();
520 if (psi->flags & PSTORE_FLAGS_FTRACE)
521 pstore_register_ftrace();
522 if (psi->flags & PSTORE_FLAGS_PMSG)
523 pstore_register_pmsg();
524
525 /* Start watching for new records, if desired. */
526 pstore_timer_kick();
527
528 /*
529 * Update the module parameter backend, so it is visible
530 * through /sys/module/pstore/parameters/backend
531 */
532 backend = new_backend;
533
534 pr_info("Registered %s as persistent store backend\n", psi->name);
535
536 mutex_unlock(&psinfo_lock);
537 return 0;
538 }
539 EXPORT_SYMBOL_GPL(pstore_register);
540
pstore_unregister(struct pstore_info * psi)541 void pstore_unregister(struct pstore_info *psi)
542 {
543 /* It's okay to unregister nothing. */
544 if (!psi)
545 return;
546
547 mutex_lock(&psinfo_lock);
548
549 /* Only one backend can be registered at a time. */
550 if (WARN_ON(psi != psinfo)) {
551 mutex_unlock(&psinfo_lock);
552 return;
553 }
554
555 /* Unregister all callbacks. */
556 if (psi->flags & PSTORE_FLAGS_PMSG)
557 pstore_unregister_pmsg();
558 if (psi->flags & PSTORE_FLAGS_FTRACE)
559 pstore_unregister_ftrace();
560 if (psi->flags & PSTORE_FLAGS_CONSOLE)
561 pstore_unregister_console();
562 if (psi->flags & PSTORE_FLAGS_DMESG)
563 pstore_unregister_kmsg();
564
565 /* Stop timer and make sure all work has finished. */
566 del_timer_sync(&pstore_timer);
567 flush_work(&pstore_work);
568
569 /* Remove all backend records from filesystem tree. */
570 pstore_put_backend_records(psi);
571
572 free_buf_for_compression();
573
574 psinfo = NULL;
575 kfree(backend);
576 backend = NULL;
577
578 pr_info("Unregistered %s as persistent store backend\n", psi->name);
579 mutex_unlock(&psinfo_lock);
580 }
581 EXPORT_SYMBOL_GPL(pstore_unregister);
582
decompress_record(struct pstore_record * record,struct z_stream_s * zstream)583 static void decompress_record(struct pstore_record *record,
584 struct z_stream_s *zstream)
585 {
586 int ret;
587 int unzipped_len;
588 char *unzipped, *workspace;
589 size_t max_uncompressed_size;
590
591 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
592 return;
593
594 /* Only PSTORE_TYPE_DMESG support compression. */
595 if (record->type != PSTORE_TYPE_DMESG) {
596 pr_warn("ignored compressed record type %d\n", record->type);
597 return;
598 }
599
600 /* Missing compression buffer means compression was not initialized. */
601 if (!zstream->workspace) {
602 pr_warn("no decompression method initialized!\n");
603 return;
604 }
605
606 ret = zlib_inflateReset(zstream);
607 if (ret != Z_OK) {
608 pr_err("zlib_inflateReset() failed, ret = %d!\n", ret);
609 return;
610 }
611
612 /* Allocate enough space to hold max decompression and ECC. */
613 max_uncompressed_size = 3 * psinfo->bufsize;
614 workspace = kvzalloc(max_uncompressed_size + record->ecc_notice_size,
615 GFP_KERNEL);
616 if (!workspace)
617 return;
618
619 zstream->next_in = record->buf;
620 zstream->avail_in = record->size;
621 zstream->next_out = workspace;
622 zstream->avail_out = max_uncompressed_size;
623
624 ret = zlib_inflate(zstream, Z_FINISH);
625 if (ret != Z_STREAM_END) {
626 pr_err_ratelimited("zlib_inflate() failed, ret = %d!\n", ret);
627 kvfree(workspace);
628 return;
629 }
630
631 unzipped_len = zstream->total_out;
632
633 /* Append ECC notice to decompressed buffer. */
634 memcpy(workspace + unzipped_len, record->buf + record->size,
635 record->ecc_notice_size);
636
637 /* Copy decompressed contents into an minimum-sized allocation. */
638 unzipped = kvmemdup(workspace, unzipped_len + record->ecc_notice_size,
639 GFP_KERNEL);
640 kvfree(workspace);
641 if (!unzipped)
642 return;
643
644 /* Swap out compressed contents with decompressed contents. */
645 kvfree(record->buf);
646 record->buf = unzipped;
647 record->size = unzipped_len;
648 record->compressed = false;
649 }
650
651 /*
652 * Read all the records from one persistent store backend. Create
653 * files in our filesystem. Don't warn about -EEXIST errors
654 * when we are re-scanning the backing store looking to add new
655 * error records.
656 */
pstore_get_backend_records(struct pstore_info * psi,struct dentry * root,int quiet)657 void pstore_get_backend_records(struct pstore_info *psi,
658 struct dentry *root, int quiet)
659 {
660 int failed = 0;
661 unsigned int stop_loop = 65536;
662 struct z_stream_s zstream = {};
663
664 if (!psi || !root)
665 return;
666
667 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
668 zstream.workspace = kvmalloc(zlib_inflate_workspacesize(),
669 GFP_KERNEL);
670 zlib_inflateInit2(&zstream, -DEF_WBITS);
671 }
672
673 mutex_lock(&psi->read_mutex);
674 if (psi->open && psi->open(psi))
675 goto out;
676
677 /*
678 * Backend callback read() allocates record.buf. decompress_record()
679 * may reallocate record.buf. On success, pstore_mkfile() will keep
680 * the record.buf, so free it only on failure.
681 */
682 for (; stop_loop; stop_loop--) {
683 struct pstore_record *record;
684 int rc;
685
686 record = kzalloc(sizeof(*record), GFP_KERNEL);
687 if (!record) {
688 pr_err("out of memory creating record\n");
689 break;
690 }
691 pstore_record_init(record, psi);
692
693 record->size = psi->read(record);
694
695 /* No more records left in backend? */
696 if (record->size <= 0) {
697 kfree(record);
698 break;
699 }
700
701 decompress_record(record, &zstream);
702 rc = pstore_mkfile(root, record);
703 if (rc) {
704 /* pstore_mkfile() did not take record, so free it. */
705 kvfree(record->buf);
706 kfree(record->priv);
707 kfree(record);
708 if (rc != -EEXIST || !quiet)
709 failed++;
710 }
711 }
712 if (psi->close)
713 psi->close(psi);
714 out:
715 mutex_unlock(&psi->read_mutex);
716
717 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
718 if (zlib_inflateEnd(&zstream) != Z_OK)
719 pr_warn("zlib_inflateEnd() failed\n");
720 kvfree(zstream.workspace);
721 }
722
723 if (failed)
724 pr_warn("failed to create %d record(s) from '%s'\n",
725 failed, psi->name);
726 if (!stop_loop)
727 pr_err("looping? Too many records seen from '%s'\n",
728 psi->name);
729 }
730
pstore_dowork(struct work_struct * work)731 static void pstore_dowork(struct work_struct *work)
732 {
733 pstore_get_records(1);
734 }
735
pstore_timefunc(struct timer_list * unused)736 static void pstore_timefunc(struct timer_list *unused)
737 {
738 if (pstore_new_entry) {
739 pstore_new_entry = 0;
740 schedule_work(&pstore_work);
741 }
742
743 pstore_timer_kick();
744 }
745
pstore_init(void)746 static int __init pstore_init(void)
747 {
748 int ret;
749
750 ret = pstore_init_fs();
751 if (ret)
752 free_buf_for_compression();
753
754 return ret;
755 }
756 late_initcall(pstore_init);
757
pstore_exit(void)758 static void __exit pstore_exit(void)
759 {
760 pstore_exit_fs();
761 }
762 module_exit(pstore_exit)
763
764 MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
765 MODULE_LICENSE("GPL");
766