Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Persistent Storage - platform driver interface parts.
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7 */
8
9#define pr_fmt(fmt) "pstore: " fmt
10
11#include <linux/atomic.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/kmsg_dump.h>
16#include <linux/console.h>
17#include <linux/module.h>
18#include <linux/pstore.h>
19#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
20#include <linux/lzo.h>
21#endif
22#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
23#include <linux/lz4.h>
24#endif
25#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
26#include <linux/zstd.h>
27#endif
28#include <linux/crypto.h>
29#include <linux/string.h>
30#include <linux/timer.h>
31#include <linux/slab.h>
32#include <linux/uaccess.h>
33#include <linux/jiffies.h>
34#include <linux/workqueue.h>
35
36#include "internal.h"
37
38/*
39 * We defer making "oops" entries appear in pstore - see
40 * whether the system is actually still running well enough
41 * to let someone see the entry
42 */
43static int pstore_update_ms = -1;
44module_param_named(update_ms, pstore_update_ms, int, 0600);
45MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
46 "(default is -1, which means runtime updates are disabled; "
47 "enabling this option may not be safe; it may lead to further "
48 "corruption on Oopses)");
49
50/* Names should be in the same order as the enum pstore_type_id */
51static const char * const pstore_type_names[] = {
52 "dmesg",
53 "mce",
54 "console",
55 "ftrace",
56 "rtas",
57 "powerpc-ofw",
58 "powerpc-common",
59 "pmsg",
60 "powerpc-opal",
61};
62
63static int pstore_new_entry;
64
65static void pstore_timefunc(struct timer_list *);
66static DEFINE_TIMER(pstore_timer, pstore_timefunc);
67
68static void pstore_dowork(struct work_struct *);
69static DECLARE_WORK(pstore_work, pstore_dowork);
70
71/*
72 * psinfo_lock protects "psinfo" during calls to
73 * pstore_register(), pstore_unregister(), and
74 * the filesystem mount/unmount routines.
75 */
76static DEFINE_MUTEX(psinfo_lock);
77struct pstore_info *psinfo;
78
79static char *backend;
80module_param(backend, charp, 0444);
81MODULE_PARM_DESC(backend, "specific backend to use");
82
83static char *compress =
84#ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
85 CONFIG_PSTORE_COMPRESS_DEFAULT;
86#else
87 NULL;
88#endif
89module_param(compress, charp, 0444);
90MODULE_PARM_DESC(compress, "compression to use");
91
92/* How much of the kernel log to snapshot */
93unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
94module_param(kmsg_bytes, ulong, 0444);
95MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
96
97/* Compression parameters */
98static struct crypto_comp *tfm;
99
100struct pstore_zbackend {
101 int (*zbufsize)(size_t size);
102 const char *name;
103};
104
105static char *big_oops_buf;
106static size_t big_oops_buf_sz;
107
108void pstore_set_kmsg_bytes(int bytes)
109{
110 kmsg_bytes = bytes;
111}
112
113/* Tag each group of saved records with a sequence number */
114static int oopscount;
115
116const char *pstore_type_to_name(enum pstore_type_id type)
117{
118 BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
119
120 if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
121 return "unknown";
122
123 return pstore_type_names[type];
124}
125EXPORT_SYMBOL_GPL(pstore_type_to_name);
126
127enum pstore_type_id pstore_name_to_type(const char *name)
128{
129 int i;
130
131 for (i = 0; i < PSTORE_TYPE_MAX; i++) {
132 if (!strcmp(pstore_type_names[i], name))
133 return i;
134 }
135
136 return PSTORE_TYPE_MAX;
137}
138EXPORT_SYMBOL_GPL(pstore_name_to_type);
139
140static void pstore_timer_kick(void)
141{
142 if (pstore_update_ms < 0)
143 return;
144
145 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
146}
147
148static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
149{
150 /*
151 * In case of NMI path, pstore shouldn't be blocked
152 * regardless of reason.
153 */
154 if (in_nmi())
155 return true;
156
157 switch (reason) {
158 /* In panic case, other cpus are stopped by smp_send_stop(). */
159 case KMSG_DUMP_PANIC:
160 /*
161 * Emergency restart shouldn't be blocked by spinning on
162 * pstore_info::buf_lock.
163 */
164 case KMSG_DUMP_EMERG:
165 return true;
166 default:
167 return false;
168 }
169}
170
171#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
172static int zbufsize_deflate(size_t size)
173{
174 size_t cmpr;
175
176 switch (size) {
177 /* buffer range for efivars */
178 case 1000 ... 2000:
179 cmpr = 56;
180 break;
181 case 2001 ... 3000:
182 cmpr = 54;
183 break;
184 case 3001 ... 3999:
185 cmpr = 52;
186 break;
187 /* buffer range for nvram, erst */
188 case 4000 ... 10000:
189 cmpr = 45;
190 break;
191 default:
192 cmpr = 60;
193 break;
194 }
195
196 return (size * 100) / cmpr;
197}
198#endif
199
200#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
201static int zbufsize_lzo(size_t size)
202{
203 return lzo1x_worst_compress(size);
204}
205#endif
206
207#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
208static int zbufsize_lz4(size_t size)
209{
210 return LZ4_compressBound(size);
211}
212#endif
213
214#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
215static int zbufsize_842(size_t size)
216{
217 return size;
218}
219#endif
220
221#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
222static int zbufsize_zstd(size_t size)
223{
224 return zstd_compress_bound(size);
225}
226#endif
227
228static const struct pstore_zbackend *zbackend __ro_after_init;
229
230static const struct pstore_zbackend zbackends[] = {
231#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
232 {
233 .zbufsize = zbufsize_deflate,
234 .name = "deflate",
235 },
236#endif
237#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
238 {
239 .zbufsize = zbufsize_lzo,
240 .name = "lzo",
241 },
242#endif
243#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
244 {
245 .zbufsize = zbufsize_lz4,
246 .name = "lz4",
247 },
248#endif
249#if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
250 {
251 .zbufsize = zbufsize_lz4,
252 .name = "lz4hc",
253 },
254#endif
255#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
256 {
257 .zbufsize = zbufsize_842,
258 .name = "842",
259 },
260#endif
261#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
262 {
263 .zbufsize = zbufsize_zstd,
264 .name = "zstd",
265 },
266#endif
267 { }
268};
269
270static int pstore_compress(const void *in, void *out,
271 unsigned int inlen, unsigned int outlen)
272{
273 int ret;
274
275 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
276 return -EINVAL;
277
278 ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
279 if (ret) {
280 pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
281 return ret;
282 }
283
284 return outlen;
285}
286
287static void allocate_buf_for_compression(void)
288{
289 struct crypto_comp *ctx;
290 int size;
291 char *buf;
292
293 /* Skip if not built-in or compression backend not selected yet. */
294 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
295 return;
296
297 /* Skip if no pstore backend yet or compression init already done. */
298 if (!psinfo || tfm)
299 return;
300
301 if (!crypto_has_comp(zbackend->name, 0, 0)) {
302 pr_err("Unknown compression: %s\n", zbackend->name);
303 return;
304 }
305
306 size = zbackend->zbufsize(psinfo->bufsize);
307 if (size <= 0) {
308 pr_err("Invalid compression size for %s: %d\n",
309 zbackend->name, size);
310 return;
311 }
312
313 buf = kmalloc(size, GFP_KERNEL);
314 if (!buf) {
315 pr_err("Failed %d byte compression buffer allocation for: %s\n",
316 size, zbackend->name);
317 return;
318 }
319
320 ctx = crypto_alloc_comp(zbackend->name, 0, 0);
321 if (IS_ERR_OR_NULL(ctx)) {
322 kfree(buf);
323 pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
324 PTR_ERR(ctx));
325 return;
326 }
327
328 /* A non-NULL big_oops_buf indicates compression is available. */
329 tfm = ctx;
330 big_oops_buf_sz = size;
331 big_oops_buf = buf;
332
333 pr_info("Using crash dump compression: %s\n", zbackend->name);
334}
335
336static void free_buf_for_compression(void)
337{
338 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
339 crypto_free_comp(tfm);
340 tfm = NULL;
341 }
342 kfree(big_oops_buf);
343 big_oops_buf = NULL;
344 big_oops_buf_sz = 0;
345}
346
347/*
348 * Called when compression fails, since the printk buffer
349 * would be fetched for compression calling it again when
350 * compression fails would have moved the iterator of
351 * printk buffer which results in fetching old contents.
352 * Copy the recent messages from big_oops_buf to psinfo->buf
353 */
354static size_t copy_kmsg_to_buffer(int hsize, size_t len)
355{
356 size_t total_len;
357 size_t diff;
358
359 total_len = hsize + len;
360
361 if (total_len > psinfo->bufsize) {
362 diff = total_len - psinfo->bufsize + hsize;
363 memcpy(psinfo->buf, big_oops_buf, hsize);
364 memcpy(psinfo->buf + hsize, big_oops_buf + diff,
365 psinfo->bufsize - hsize);
366 total_len = psinfo->bufsize;
367 } else
368 memcpy(psinfo->buf, big_oops_buf, total_len);
369
370 return total_len;
371}
372
373void pstore_record_init(struct pstore_record *record,
374 struct pstore_info *psinfo)
375{
376 memset(record, 0, sizeof(*record));
377
378 record->psi = psinfo;
379
380 /* Report zeroed timestamp if called before timekeeping has resumed. */
381 record->time = ns_to_timespec64(ktime_get_real_fast_ns());
382}
383
384/*
385 * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
386 * end of the buffer.
387 */
388static void pstore_dump(struct kmsg_dumper *dumper,
389 enum kmsg_dump_reason reason)
390{
391 struct kmsg_dump_iter iter;
392 unsigned long total = 0;
393 const char *why;
394 unsigned int part = 1;
395 unsigned long flags = 0;
396 int saved_ret = 0;
397 int ret;
398
399 why = kmsg_dump_reason_str(reason);
400
401 if (pstore_cannot_block_path(reason)) {
402 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
403 pr_err("dump skipped in %s path because of concurrent dump\n",
404 in_nmi() ? "NMI" : why);
405 return;
406 }
407 } else {
408 spin_lock_irqsave(&psinfo->buf_lock, flags);
409 }
410
411 kmsg_dump_rewind(&iter);
412
413 oopscount++;
414 while (total < kmsg_bytes) {
415 char *dst;
416 size_t dst_size;
417 int header_size;
418 int zipped_len = -1;
419 size_t dump_size;
420 struct pstore_record record;
421
422 pstore_record_init(&record, psinfo);
423 record.type = PSTORE_TYPE_DMESG;
424 record.count = oopscount;
425 record.reason = reason;
426 record.part = part;
427 record.buf = psinfo->buf;
428
429 if (big_oops_buf) {
430 dst = big_oops_buf;
431 dst_size = big_oops_buf_sz;
432 } else {
433 dst = psinfo->buf;
434 dst_size = psinfo->bufsize;
435 }
436
437 /* Write dump header. */
438 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
439 oopscount, part);
440 dst_size -= header_size;
441
442 /* Write dump contents. */
443 if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
444 dst_size, &dump_size))
445 break;
446
447 if (big_oops_buf) {
448 zipped_len = pstore_compress(dst, psinfo->buf,
449 header_size + dump_size,
450 psinfo->bufsize);
451
452 if (zipped_len > 0) {
453 record.compressed = true;
454 record.size = zipped_len;
455 } else {
456 record.size = copy_kmsg_to_buffer(header_size,
457 dump_size);
458 }
459 } else {
460 record.size = header_size + dump_size;
461 }
462
463 ret = psinfo->write(&record);
464 if (ret == 0 && reason == KMSG_DUMP_OOPS) {
465 pstore_new_entry = 1;
466 pstore_timer_kick();
467 } else {
468 /* Preserve only the first non-zero returned value. */
469 if (!saved_ret)
470 saved_ret = ret;
471 }
472
473 total += record.size;
474 part++;
475 }
476 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
477
478 if (saved_ret) {
479 pr_err_once("backend (%s) writing error (%d)\n", psinfo->name,
480 saved_ret);
481 }
482}
483
484static struct kmsg_dumper pstore_dumper = {
485 .dump = pstore_dump,
486};
487
488/*
489 * Register with kmsg_dump to save last part of console log on panic.
490 */
491static void pstore_register_kmsg(void)
492{
493 kmsg_dump_register(&pstore_dumper);
494}
495
496static void pstore_unregister_kmsg(void)
497{
498 kmsg_dump_unregister(&pstore_dumper);
499}
500
501#ifdef CONFIG_PSTORE_CONSOLE
502static void pstore_console_write(struct console *con, const char *s, unsigned c)
503{
504 struct pstore_record record;
505
506 if (!c)
507 return;
508
509 pstore_record_init(&record, psinfo);
510 record.type = PSTORE_TYPE_CONSOLE;
511
512 record.buf = (char *)s;
513 record.size = c;
514 psinfo->write(&record);
515}
516
517static struct console pstore_console = {
518 .write = pstore_console_write,
519 .index = -1,
520};
521
522static void pstore_register_console(void)
523{
524 /* Show which backend is going to get console writes. */
525 strscpy(pstore_console.name, psinfo->name,
526 sizeof(pstore_console.name));
527 /*
528 * Always initialize flags here since prior unregister_console()
529 * calls may have changed settings (specifically CON_ENABLED).
530 */
531 pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
532 register_console(&pstore_console);
533}
534
535static void pstore_unregister_console(void)
536{
537 unregister_console(&pstore_console);
538}
539#else
540static void pstore_register_console(void) {}
541static void pstore_unregister_console(void) {}
542#endif
543
544static int pstore_write_user_compat(struct pstore_record *record,
545 const char __user *buf)
546{
547 int ret = 0;
548
549 if (record->buf)
550 return -EINVAL;
551
552 record->buf = memdup_user(buf, record->size);
553 if (IS_ERR(record->buf)) {
554 ret = PTR_ERR(record->buf);
555 goto out;
556 }
557
558 ret = record->psi->write(record);
559
560 kfree(record->buf);
561out:
562 record->buf = NULL;
563
564 return unlikely(ret < 0) ? ret : record->size;
565}
566
567/*
568 * platform specific persistent storage driver registers with
569 * us here. If pstore is already mounted, call the platform
570 * read function right away to populate the file system. If not
571 * then the pstore mount code will call us later to fill out
572 * the file system.
573 */
574int pstore_register(struct pstore_info *psi)
575{
576 if (backend && strcmp(backend, psi->name)) {
577 pr_warn("backend '%s' already in use: ignoring '%s'\n",
578 backend, psi->name);
579 return -EBUSY;
580 }
581
582 /* Sanity check flags. */
583 if (!psi->flags) {
584 pr_warn("backend '%s' must support at least one frontend\n",
585 psi->name);
586 return -EINVAL;
587 }
588
589 /* Check for required functions. */
590 if (!psi->read || !psi->write) {
591 pr_warn("backend '%s' must implement read() and write()\n",
592 psi->name);
593 return -EINVAL;
594 }
595
596 mutex_lock(&psinfo_lock);
597 if (psinfo) {
598 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
599 psinfo->name, psi->name);
600 mutex_unlock(&psinfo_lock);
601 return -EBUSY;
602 }
603
604 if (!psi->write_user)
605 psi->write_user = pstore_write_user_compat;
606 psinfo = psi;
607 mutex_init(&psinfo->read_mutex);
608 spin_lock_init(&psinfo->buf_lock);
609
610 if (psi->flags & PSTORE_FLAGS_DMESG)
611 allocate_buf_for_compression();
612
613 pstore_get_records(0);
614
615 if (psi->flags & PSTORE_FLAGS_DMESG) {
616 pstore_dumper.max_reason = psinfo->max_reason;
617 pstore_register_kmsg();
618 }
619 if (psi->flags & PSTORE_FLAGS_CONSOLE)
620 pstore_register_console();
621 if (psi->flags & PSTORE_FLAGS_FTRACE)
622 pstore_register_ftrace();
623 if (psi->flags & PSTORE_FLAGS_PMSG)
624 pstore_register_pmsg();
625
626 /* Start watching for new records, if desired. */
627 pstore_timer_kick();
628
629 /*
630 * Update the module parameter backend, so it is visible
631 * through /sys/module/pstore/parameters/backend
632 */
633 backend = kstrdup(psi->name, GFP_KERNEL);
634
635 pr_info("Registered %s as persistent store backend\n", psi->name);
636
637 mutex_unlock(&psinfo_lock);
638 return 0;
639}
640EXPORT_SYMBOL_GPL(pstore_register);
641
642void pstore_unregister(struct pstore_info *psi)
643{
644 /* It's okay to unregister nothing. */
645 if (!psi)
646 return;
647
648 mutex_lock(&psinfo_lock);
649
650 /* Only one backend can be registered at a time. */
651 if (WARN_ON(psi != psinfo)) {
652 mutex_unlock(&psinfo_lock);
653 return;
654 }
655
656 /* Unregister all callbacks. */
657 if (psi->flags & PSTORE_FLAGS_PMSG)
658 pstore_unregister_pmsg();
659 if (psi->flags & PSTORE_FLAGS_FTRACE)
660 pstore_unregister_ftrace();
661 if (psi->flags & PSTORE_FLAGS_CONSOLE)
662 pstore_unregister_console();
663 if (psi->flags & PSTORE_FLAGS_DMESG)
664 pstore_unregister_kmsg();
665
666 /* Stop timer and make sure all work has finished. */
667 del_timer_sync(&pstore_timer);
668 flush_work(&pstore_work);
669
670 /* Remove all backend records from filesystem tree. */
671 pstore_put_backend_records(psi);
672
673 free_buf_for_compression();
674
675 psinfo = NULL;
676 kfree(backend);
677 backend = NULL;
678
679 pr_info("Unregistered %s as persistent store backend\n", psi->name);
680 mutex_unlock(&psinfo_lock);
681}
682EXPORT_SYMBOL_GPL(pstore_unregister);
683
684static void decompress_record(struct pstore_record *record)
685{
686 int ret;
687 int unzipped_len;
688 char *unzipped, *workspace;
689
690 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
691 return;
692
693 /* Only PSTORE_TYPE_DMESG support compression. */
694 if (record->type != PSTORE_TYPE_DMESG) {
695 pr_warn("ignored compressed record type %d\n", record->type);
696 return;
697 }
698
699 /* Missing compression buffer means compression was not initialized. */
700 if (!big_oops_buf) {
701 pr_warn("no decompression method initialized!\n");
702 return;
703 }
704
705 /* Allocate enough space to hold max decompression and ECC. */
706 unzipped_len = big_oops_buf_sz;
707 workspace = kmalloc(unzipped_len + record->ecc_notice_size,
708 GFP_KERNEL);
709 if (!workspace)
710 return;
711
712 /* After decompression "unzipped_len" is almost certainly smaller. */
713 ret = crypto_comp_decompress(tfm, record->buf, record->size,
714 workspace, &unzipped_len);
715 if (ret) {
716 pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
717 kfree(workspace);
718 return;
719 }
720
721 /* Append ECC notice to decompressed buffer. */
722 memcpy(workspace + unzipped_len, record->buf + record->size,
723 record->ecc_notice_size);
724
725 /* Copy decompressed contents into an minimum-sized allocation. */
726 unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
727 GFP_KERNEL);
728 kfree(workspace);
729 if (!unzipped)
730 return;
731
732 /* Swap out compressed contents with decompressed contents. */
733 kfree(record->buf);
734 record->buf = unzipped;
735 record->size = unzipped_len;
736 record->compressed = false;
737}
738
739/*
740 * Read all the records from one persistent store backend. Create
741 * files in our filesystem. Don't warn about -EEXIST errors
742 * when we are re-scanning the backing store looking to add new
743 * error records.
744 */
745void pstore_get_backend_records(struct pstore_info *psi,
746 struct dentry *root, int quiet)
747{
748 int failed = 0;
749 unsigned int stop_loop = 65536;
750
751 if (!psi || !root)
752 return;
753
754 mutex_lock(&psi->read_mutex);
755 if (psi->open && psi->open(psi))
756 goto out;
757
758 /*
759 * Backend callback read() allocates record.buf. decompress_record()
760 * may reallocate record.buf. On success, pstore_mkfile() will keep
761 * the record.buf, so free it only on failure.
762 */
763 for (; stop_loop; stop_loop--) {
764 struct pstore_record *record;
765 int rc;
766
767 record = kzalloc(sizeof(*record), GFP_KERNEL);
768 if (!record) {
769 pr_err("out of memory creating record\n");
770 break;
771 }
772 pstore_record_init(record, psi);
773
774 record->size = psi->read(record);
775
776 /* No more records left in backend? */
777 if (record->size <= 0) {
778 kfree(record);
779 break;
780 }
781
782 decompress_record(record);
783 rc = pstore_mkfile(root, record);
784 if (rc) {
785 /* pstore_mkfile() did not take record, so free it. */
786 kfree(record->buf);
787 kfree(record->priv);
788 kfree(record);
789 if (rc != -EEXIST || !quiet)
790 failed++;
791 }
792 }
793 if (psi->close)
794 psi->close(psi);
795out:
796 mutex_unlock(&psi->read_mutex);
797
798 if (failed)
799 pr_warn("failed to create %d record(s) from '%s'\n",
800 failed, psi->name);
801 if (!stop_loop)
802 pr_err("looping? Too many records seen from '%s'\n",
803 psi->name);
804}
805
806static void pstore_dowork(struct work_struct *work)
807{
808 pstore_get_records(1);
809}
810
811static void pstore_timefunc(struct timer_list *unused)
812{
813 if (pstore_new_entry) {
814 pstore_new_entry = 0;
815 schedule_work(&pstore_work);
816 }
817
818 pstore_timer_kick();
819}
820
821static void __init pstore_choose_compression(void)
822{
823 const struct pstore_zbackend *step;
824
825 if (!compress)
826 return;
827
828 for (step = zbackends; step->name; step++) {
829 if (!strcmp(compress, step->name)) {
830 zbackend = step;
831 return;
832 }
833 }
834}
835
836static int __init pstore_init(void)
837{
838 int ret;
839
840 pstore_choose_compression();
841
842 /*
843 * Check if any pstore backends registered earlier but did not
844 * initialize compression because crypto was not ready. If so,
845 * initialize compression now.
846 */
847 allocate_buf_for_compression();
848
849 ret = pstore_init_fs();
850 if (ret)
851 free_buf_for_compression();
852
853 return ret;
854}
855late_initcall(pstore_init);
856
857static void __exit pstore_exit(void)
858{
859 pstore_exit_fs();
860}
861module_exit(pstore_exit)
862
863MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
864MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Persistent Storage - platform driver interface parts.
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7 */
8
9#define pr_fmt(fmt) "pstore: " fmt
10
11#include <linux/atomic.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/kmsg_dump.h>
16#include <linux/console.h>
17#include <linux/mm.h>
18#include <linux/module.h>
19#include <linux/pstore.h>
20#include <linux/string.h>
21#include <linux/timer.h>
22#include <linux/slab.h>
23#include <linux/uaccess.h>
24#include <linux/jiffies.h>
25#include <linux/vmalloc.h>
26#include <linux/workqueue.h>
27#include <linux/zlib.h>
28
29#include "internal.h"
30
31/*
32 * We defer making "oops" entries appear in pstore - see
33 * whether the system is actually still running well enough
34 * to let someone see the entry
35 */
36static int pstore_update_ms = -1;
37module_param_named(update_ms, pstore_update_ms, int, 0600);
38MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
39 "(default is -1, which means runtime updates are disabled; "
40 "enabling this option may not be safe; it may lead to further "
41 "corruption on Oopses)");
42
43/* Names should be in the same order as the enum pstore_type_id */
44static const char * const pstore_type_names[] = {
45 "dmesg",
46 "mce",
47 "console",
48 "ftrace",
49 "rtas",
50 "powerpc-ofw",
51 "powerpc-common",
52 "pmsg",
53 "powerpc-opal",
54};
55
56static int pstore_new_entry;
57
58static void pstore_timefunc(struct timer_list *);
59static DEFINE_TIMER(pstore_timer, pstore_timefunc);
60
61static void pstore_dowork(struct work_struct *);
62static DECLARE_WORK(pstore_work, pstore_dowork);
63
64/*
65 * psinfo_lock protects "psinfo" during calls to
66 * pstore_register(), pstore_unregister(), and
67 * the filesystem mount/unmount routines.
68 */
69static DEFINE_MUTEX(psinfo_lock);
70struct pstore_info *psinfo;
71
72static char *backend;
73module_param(backend, charp, 0444);
74MODULE_PARM_DESC(backend, "specific backend to use");
75
76/*
77 * pstore no longer implements compression via the crypto API, and only
78 * supports zlib deflate compression implemented using the zlib library
79 * interface. This removes additional complexity which is hard to justify for a
80 * diagnostic facility that has to operate in conditions where the system may
81 * have become unstable. Zlib deflate is comparatively small in terms of code
82 * size, and compresses ASCII text comparatively well. In terms of compression
83 * speed, deflate is not the best performer but for recording the log output on
84 * a kernel panic, this is not considered critical.
85 *
86 * The only remaining arguments supported by the compress= module parameter are
87 * 'deflate' and 'none'. To retain compatibility with existing installations,
88 * all other values are logged and replaced with 'deflate'.
89 */
90static char *compress = "deflate";
91module_param(compress, charp, 0444);
92MODULE_PARM_DESC(compress, "compression to use");
93
94/* How much of the kernel log to snapshot */
95unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
96module_param(kmsg_bytes, ulong, 0444);
97MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
98
99static void *compress_workspace;
100
101/*
102 * Compression is only used for dmesg output, which consists of low-entropy
103 * ASCII text, and so we can assume worst-case 60%.
104 */
105#define DMESG_COMP_PERCENT 60
106
107static char *big_oops_buf;
108static size_t max_compressed_size;
109
110void pstore_set_kmsg_bytes(int bytes)
111{
112 kmsg_bytes = bytes;
113}
114
115/* Tag each group of saved records with a sequence number */
116static int oopscount;
117
118const char *pstore_type_to_name(enum pstore_type_id type)
119{
120 BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
121
122 if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
123 return "unknown";
124
125 return pstore_type_names[type];
126}
127EXPORT_SYMBOL_GPL(pstore_type_to_name);
128
129enum pstore_type_id pstore_name_to_type(const char *name)
130{
131 int i;
132
133 for (i = 0; i < PSTORE_TYPE_MAX; i++) {
134 if (!strcmp(pstore_type_names[i], name))
135 return i;
136 }
137
138 return PSTORE_TYPE_MAX;
139}
140EXPORT_SYMBOL_GPL(pstore_name_to_type);
141
142static void pstore_timer_kick(void)
143{
144 if (pstore_update_ms < 0)
145 return;
146
147 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
148}
149
150static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
151{
152 /*
153 * In case of NMI path, pstore shouldn't be blocked
154 * regardless of reason.
155 */
156 if (in_nmi())
157 return true;
158
159 switch (reason) {
160 /* In panic case, other cpus are stopped by smp_send_stop(). */
161 case KMSG_DUMP_PANIC:
162 /*
163 * Emergency restart shouldn't be blocked by spinning on
164 * pstore_info::buf_lock.
165 */
166 case KMSG_DUMP_EMERG:
167 return true;
168 default:
169 return false;
170 }
171}
172
173static int pstore_compress(const void *in, void *out,
174 unsigned int inlen, unsigned int outlen)
175{
176 struct z_stream_s zstream = {
177 .next_in = in,
178 .avail_in = inlen,
179 .next_out = out,
180 .avail_out = outlen,
181 .workspace = compress_workspace,
182 };
183 int ret;
184
185 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
186 return -EINVAL;
187
188 ret = zlib_deflateInit2(&zstream, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
189 -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
190 if (ret != Z_OK)
191 return -EINVAL;
192
193 ret = zlib_deflate(&zstream, Z_FINISH);
194 if (ret != Z_STREAM_END)
195 return -EINVAL;
196
197 ret = zlib_deflateEnd(&zstream);
198 if (ret != Z_OK)
199 pr_warn_once("zlib_deflateEnd() failed: %d\n", ret);
200
201 return zstream.total_out;
202}
203
204static void allocate_buf_for_compression(void)
205{
206 size_t compressed_size;
207 char *buf;
208
209 /* Skip if not built-in or compression disabled. */
210 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !compress ||
211 !strcmp(compress, "none")) {
212 compress = NULL;
213 return;
214 }
215
216 if (strcmp(compress, "deflate")) {
217 pr_err("Unsupported compression '%s', falling back to deflate\n",
218 compress);
219 compress = "deflate";
220 }
221
222 /*
223 * The compression buffer only needs to be as large as the maximum
224 * uncompressed record size, since any record that would be expanded by
225 * compression is just stored uncompressed.
226 */
227 compressed_size = (psinfo->bufsize * 100) / DMESG_COMP_PERCENT;
228 buf = kvzalloc(compressed_size, GFP_KERNEL);
229 if (!buf) {
230 pr_err("Failed %zu byte compression buffer allocation for: %s\n",
231 psinfo->bufsize, compress);
232 return;
233 }
234
235 compress_workspace =
236 vmalloc(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL));
237 if (!compress_workspace) {
238 pr_err("Failed to allocate zlib deflate workspace\n");
239 kvfree(buf);
240 return;
241 }
242
243 /* A non-NULL big_oops_buf indicates compression is available. */
244 big_oops_buf = buf;
245 max_compressed_size = compressed_size;
246
247 pr_info("Using crash dump compression: %s\n", compress);
248}
249
250static void free_buf_for_compression(void)
251{
252 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress_workspace) {
253 vfree(compress_workspace);
254 compress_workspace = NULL;
255 }
256
257 kvfree(big_oops_buf);
258 big_oops_buf = NULL;
259 max_compressed_size = 0;
260}
261
262void pstore_record_init(struct pstore_record *record,
263 struct pstore_info *psinfo)
264{
265 memset(record, 0, sizeof(*record));
266
267 record->psi = psinfo;
268
269 /* Report zeroed timestamp if called before timekeeping has resumed. */
270 record->time = ns_to_timespec64(ktime_get_real_fast_ns());
271}
272
273/*
274 * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
275 * end of the buffer.
276 */
277static void pstore_dump(struct kmsg_dumper *dumper,
278 struct kmsg_dump_detail *detail)
279{
280 struct kmsg_dump_iter iter;
281 unsigned long total = 0;
282 const char *why;
283 unsigned int part = 1;
284 unsigned long flags = 0;
285 int saved_ret = 0;
286 int ret;
287
288 why = kmsg_dump_reason_str(detail->reason);
289
290 if (pstore_cannot_block_path(detail->reason)) {
291 if (!raw_spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
292 pr_err("dump skipped in %s path because of concurrent dump\n",
293 in_nmi() ? "NMI" : why);
294 return;
295 }
296 } else {
297 raw_spin_lock_irqsave(&psinfo->buf_lock, flags);
298 }
299
300 kmsg_dump_rewind(&iter);
301
302 oopscount++;
303 while (total < kmsg_bytes) {
304 char *dst;
305 size_t dst_size;
306 int header_size;
307 int zipped_len = -1;
308 size_t dump_size;
309 struct pstore_record record;
310
311 pstore_record_init(&record, psinfo);
312 record.type = PSTORE_TYPE_DMESG;
313 record.count = oopscount;
314 record.reason = detail->reason;
315 record.part = part;
316 record.buf = psinfo->buf;
317
318 dst = big_oops_buf ?: psinfo->buf;
319 dst_size = max_compressed_size ?: psinfo->bufsize;
320
321 /* Write dump header. */
322 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
323 oopscount, part);
324 dst_size -= header_size;
325
326 /* Write dump contents. */
327 if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
328 dst_size, &dump_size))
329 break;
330
331 if (big_oops_buf) {
332 zipped_len = pstore_compress(dst, psinfo->buf,
333 header_size + dump_size,
334 psinfo->bufsize);
335
336 if (zipped_len > 0) {
337 record.compressed = true;
338 record.size = zipped_len;
339 } else {
340 /*
341 * Compression failed, so the buffer is most
342 * likely filled with binary data that does not
343 * compress as well as ASCII text. Copy as much
344 * of the uncompressed data as possible into
345 * the pstore record, and discard the rest.
346 */
347 record.size = psinfo->bufsize;
348 memcpy(psinfo->buf, dst, psinfo->bufsize);
349 }
350 } else {
351 record.size = header_size + dump_size;
352 }
353
354 ret = psinfo->write(&record);
355 if (ret == 0 && detail->reason == KMSG_DUMP_OOPS) {
356 pstore_new_entry = 1;
357 pstore_timer_kick();
358 } else {
359 /* Preserve only the first non-zero returned value. */
360 if (!saved_ret)
361 saved_ret = ret;
362 }
363
364 total += record.size;
365 part++;
366 }
367 raw_spin_unlock_irqrestore(&psinfo->buf_lock, flags);
368
369 if (saved_ret) {
370 pr_err_once("backend (%s) writing error (%d)\n", psinfo->name,
371 saved_ret);
372 }
373}
374
375static struct kmsg_dumper pstore_dumper = {
376 .dump = pstore_dump,
377};
378
379/*
380 * Register with kmsg_dump to save last part of console log on panic.
381 */
382static void pstore_register_kmsg(void)
383{
384 kmsg_dump_register(&pstore_dumper);
385}
386
387static void pstore_unregister_kmsg(void)
388{
389 kmsg_dump_unregister(&pstore_dumper);
390}
391
392#ifdef CONFIG_PSTORE_CONSOLE
393static void pstore_console_write(struct console *con, const char *s, unsigned c)
394{
395 struct pstore_record record;
396
397 if (!c)
398 return;
399
400 pstore_record_init(&record, psinfo);
401 record.type = PSTORE_TYPE_CONSOLE;
402
403 record.buf = (char *)s;
404 record.size = c;
405 psinfo->write(&record);
406}
407
408static struct console pstore_console = {
409 .write = pstore_console_write,
410 .index = -1,
411};
412
413static void pstore_register_console(void)
414{
415 /* Show which backend is going to get console writes. */
416 strscpy(pstore_console.name, psinfo->name,
417 sizeof(pstore_console.name));
418 /*
419 * Always initialize flags here since prior unregister_console()
420 * calls may have changed settings (specifically CON_ENABLED).
421 */
422 pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
423 register_console(&pstore_console);
424}
425
426static void pstore_unregister_console(void)
427{
428 unregister_console(&pstore_console);
429}
430#else
431static void pstore_register_console(void) {}
432static void pstore_unregister_console(void) {}
433#endif
434
435static int pstore_write_user_compat(struct pstore_record *record,
436 const char __user *buf)
437{
438 int ret = 0;
439
440 if (record->buf)
441 return -EINVAL;
442
443 record->buf = vmemdup_user(buf, record->size);
444 if (IS_ERR(record->buf)) {
445 ret = PTR_ERR(record->buf);
446 goto out;
447 }
448
449 ret = record->psi->write(record);
450
451 kvfree(record->buf);
452out:
453 record->buf = NULL;
454
455 return unlikely(ret < 0) ? ret : record->size;
456}
457
458/*
459 * platform specific persistent storage driver registers with
460 * us here. If pstore is already mounted, call the platform
461 * read function right away to populate the file system. If not
462 * then the pstore mount code will call us later to fill out
463 * the file system.
464 */
465int pstore_register(struct pstore_info *psi)
466{
467 char *new_backend;
468
469 if (backend && strcmp(backend, psi->name)) {
470 pr_warn("backend '%s' already in use: ignoring '%s'\n",
471 backend, psi->name);
472 return -EBUSY;
473 }
474
475 /* Sanity check flags. */
476 if (!psi->flags) {
477 pr_warn("backend '%s' must support at least one frontend\n",
478 psi->name);
479 return -EINVAL;
480 }
481
482 /* Check for required functions. */
483 if (!psi->read || !psi->write) {
484 pr_warn("backend '%s' must implement read() and write()\n",
485 psi->name);
486 return -EINVAL;
487 }
488
489 new_backend = kstrdup(psi->name, GFP_KERNEL);
490 if (!new_backend)
491 return -ENOMEM;
492
493 mutex_lock(&psinfo_lock);
494 if (psinfo) {
495 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
496 psinfo->name, psi->name);
497 mutex_unlock(&psinfo_lock);
498 kfree(new_backend);
499 return -EBUSY;
500 }
501
502 if (!psi->write_user)
503 psi->write_user = pstore_write_user_compat;
504 psinfo = psi;
505 mutex_init(&psinfo->read_mutex);
506 raw_spin_lock_init(&psinfo->buf_lock);
507
508 if (psi->flags & PSTORE_FLAGS_DMESG)
509 allocate_buf_for_compression();
510
511 pstore_get_records(0);
512
513 if (psi->flags & PSTORE_FLAGS_DMESG) {
514 pstore_dumper.max_reason = psinfo->max_reason;
515 pstore_register_kmsg();
516 }
517 if (psi->flags & PSTORE_FLAGS_CONSOLE)
518 pstore_register_console();
519 if (psi->flags & PSTORE_FLAGS_FTRACE)
520 pstore_register_ftrace();
521 if (psi->flags & PSTORE_FLAGS_PMSG)
522 pstore_register_pmsg();
523
524 /* Start watching for new records, if desired. */
525 pstore_timer_kick();
526
527 /*
528 * Update the module parameter backend, so it is visible
529 * through /sys/module/pstore/parameters/backend
530 */
531 backend = new_backend;
532
533 pr_info("Registered %s as persistent store backend\n", psi->name);
534
535 mutex_unlock(&psinfo_lock);
536 return 0;
537}
538EXPORT_SYMBOL_GPL(pstore_register);
539
540void pstore_unregister(struct pstore_info *psi)
541{
542 /* It's okay to unregister nothing. */
543 if (!psi)
544 return;
545
546 mutex_lock(&psinfo_lock);
547
548 /* Only one backend can be registered at a time. */
549 if (WARN_ON(psi != psinfo)) {
550 mutex_unlock(&psinfo_lock);
551 return;
552 }
553
554 /* Unregister all callbacks. */
555 if (psi->flags & PSTORE_FLAGS_PMSG)
556 pstore_unregister_pmsg();
557 if (psi->flags & PSTORE_FLAGS_FTRACE)
558 pstore_unregister_ftrace();
559 if (psi->flags & PSTORE_FLAGS_CONSOLE)
560 pstore_unregister_console();
561 if (psi->flags & PSTORE_FLAGS_DMESG)
562 pstore_unregister_kmsg();
563
564 /* Stop timer and make sure all work has finished. */
565 del_timer_sync(&pstore_timer);
566 flush_work(&pstore_work);
567
568 /* Remove all backend records from filesystem tree. */
569 pstore_put_backend_records(psi);
570
571 free_buf_for_compression();
572
573 psinfo = NULL;
574 kfree(backend);
575 backend = NULL;
576
577 pr_info("Unregistered %s as persistent store backend\n", psi->name);
578 mutex_unlock(&psinfo_lock);
579}
580EXPORT_SYMBOL_GPL(pstore_unregister);
581
582static void decompress_record(struct pstore_record *record,
583 struct z_stream_s *zstream)
584{
585 int ret;
586 int unzipped_len;
587 char *unzipped, *workspace;
588 size_t max_uncompressed_size;
589
590 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
591 return;
592
593 /* Only PSTORE_TYPE_DMESG support compression. */
594 if (record->type != PSTORE_TYPE_DMESG) {
595 pr_warn("ignored compressed record type %d\n", record->type);
596 return;
597 }
598
599 /* Missing compression buffer means compression was not initialized. */
600 if (!zstream->workspace) {
601 pr_warn("no decompression method initialized!\n");
602 return;
603 }
604
605 ret = zlib_inflateReset(zstream);
606 if (ret != Z_OK) {
607 pr_err("zlib_inflateReset() failed, ret = %d!\n", ret);
608 return;
609 }
610
611 /* Allocate enough space to hold max decompression and ECC. */
612 max_uncompressed_size = 3 * psinfo->bufsize;
613 workspace = kvzalloc(max_uncompressed_size + record->ecc_notice_size,
614 GFP_KERNEL);
615 if (!workspace)
616 return;
617
618 zstream->next_in = record->buf;
619 zstream->avail_in = record->size;
620 zstream->next_out = workspace;
621 zstream->avail_out = max_uncompressed_size;
622
623 ret = zlib_inflate(zstream, Z_FINISH);
624 if (ret != Z_STREAM_END) {
625 pr_err_ratelimited("zlib_inflate() failed, ret = %d!\n", ret);
626 kvfree(workspace);
627 return;
628 }
629
630 unzipped_len = zstream->total_out;
631
632 /* Append ECC notice to decompressed buffer. */
633 memcpy(workspace + unzipped_len, record->buf + record->size,
634 record->ecc_notice_size);
635
636 /* Copy decompressed contents into an minimum-sized allocation. */
637 unzipped = kvmemdup(workspace, unzipped_len + record->ecc_notice_size,
638 GFP_KERNEL);
639 kvfree(workspace);
640 if (!unzipped)
641 return;
642
643 /* Swap out compressed contents with decompressed contents. */
644 kvfree(record->buf);
645 record->buf = unzipped;
646 record->size = unzipped_len;
647 record->compressed = false;
648}
649
650/*
651 * Read all the records from one persistent store backend. Create
652 * files in our filesystem. Don't warn about -EEXIST errors
653 * when we are re-scanning the backing store looking to add new
654 * error records.
655 */
656void pstore_get_backend_records(struct pstore_info *psi,
657 struct dentry *root, int quiet)
658{
659 int failed = 0;
660 unsigned int stop_loop = 65536;
661 struct z_stream_s zstream = {};
662
663 if (!psi || !root)
664 return;
665
666 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
667 zstream.workspace = kvmalloc(zlib_inflate_workspacesize(),
668 GFP_KERNEL);
669 zlib_inflateInit2(&zstream, -DEF_WBITS);
670 }
671
672 mutex_lock(&psi->read_mutex);
673 if (psi->open && psi->open(psi))
674 goto out;
675
676 /*
677 * Backend callback read() allocates record.buf. decompress_record()
678 * may reallocate record.buf. On success, pstore_mkfile() will keep
679 * the record.buf, so free it only on failure.
680 */
681 for (; stop_loop; stop_loop--) {
682 struct pstore_record *record;
683 int rc;
684
685 record = kzalloc(sizeof(*record), GFP_KERNEL);
686 if (!record) {
687 pr_err("out of memory creating record\n");
688 break;
689 }
690 pstore_record_init(record, psi);
691
692 record->size = psi->read(record);
693
694 /* No more records left in backend? */
695 if (record->size <= 0) {
696 kfree(record);
697 break;
698 }
699
700 decompress_record(record, &zstream);
701 rc = pstore_mkfile(root, record);
702 if (rc) {
703 /* pstore_mkfile() did not take record, so free it. */
704 kvfree(record->buf);
705 kfree(record->priv);
706 kfree(record);
707 if (rc != -EEXIST || !quiet)
708 failed++;
709 }
710 }
711 if (psi->close)
712 psi->close(psi);
713out:
714 mutex_unlock(&psi->read_mutex);
715
716 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
717 if (zlib_inflateEnd(&zstream) != Z_OK)
718 pr_warn("zlib_inflateEnd() failed\n");
719 kvfree(zstream.workspace);
720 }
721
722 if (failed)
723 pr_warn("failed to create %d record(s) from '%s'\n",
724 failed, psi->name);
725 if (!stop_loop)
726 pr_err("looping? Too many records seen from '%s'\n",
727 psi->name);
728}
729
730static void pstore_dowork(struct work_struct *work)
731{
732 pstore_get_records(1);
733}
734
735static void pstore_timefunc(struct timer_list *unused)
736{
737 if (pstore_new_entry) {
738 pstore_new_entry = 0;
739 schedule_work(&pstore_work);
740 }
741
742 pstore_timer_kick();
743}
744
745static int __init pstore_init(void)
746{
747 int ret;
748
749 ret = pstore_init_fs();
750 if (ret)
751 free_buf_for_compression();
752
753 return ret;
754}
755late_initcall(pstore_init);
756
757static void __exit pstore_exit(void)
758{
759 pstore_exit_fs();
760}
761module_exit(pstore_exit)
762
763MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
764MODULE_DESCRIPTION("Persistent Storage - platform driver interface");
765MODULE_LICENSE("GPL");