Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory preserving reboot related code.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/highmem.h>
12#include <linux/crash_dump.h>
13
14#include <linux/uaccess.h>
15
16static void *kdump_buf_page;
17
18static inline bool is_crashed_pfn_valid(unsigned long pfn)
19{
20#ifndef CONFIG_X86_PAE
21 /*
22 * non-PAE kdump kernel executed from a PAE one will crop high pte
23 * bits and poke unwanted space counting again from address 0, we
24 * don't want that. pte must fit into unsigned long. In fact the
25 * test checks high 12 bits for being zero (pfn will be shifted left
26 * by PAGE_SHIFT).
27 */
28 return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn;
29#else
30 return true;
31#endif
32}
33
34/**
35 * copy_oldmem_page - copy one page from "oldmem"
36 * @pfn: page frame number to be copied
37 * @buf: target memory address for the copy; this can be in kernel address
38 * space or user address space (see @userbuf)
39 * @csize: number of bytes to copy
40 * @offset: offset in bytes into the page (based on pfn) to begin the copy
41 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
42 * otherwise @buf is in kernel address space, use memcpy().
43 *
44 * Copy a page from "oldmem". For this page, there is no pte mapped
45 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
46 *
47 * Calling copy_to_user() in atomic context is not desirable. Hence first
48 * copying the data to a pre-allocated kernel page and then copying to user
49 * space in non-atomic context.
50 */
51ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
52 size_t csize, unsigned long offset, int userbuf)
53{
54 void *vaddr;
55
56 if (!csize)
57 return 0;
58
59 if (!is_crashed_pfn_valid(pfn))
60 return -EFAULT;
61
62 vaddr = kmap_atomic_pfn(pfn);
63
64 if (!userbuf) {
65 memcpy(buf, (vaddr + offset), csize);
66 kunmap_atomic(vaddr);
67 } else {
68 if (!kdump_buf_page) {
69 printk(KERN_WARNING "Kdump: Kdump buffer page not"
70 " allocated\n");
71 kunmap_atomic(vaddr);
72 return -EFAULT;
73 }
74 copy_page(kdump_buf_page, vaddr);
75 kunmap_atomic(vaddr);
76 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
77 return -EFAULT;
78 }
79
80 return csize;
81}
82
83static int __init kdump_buf_page_init(void)
84{
85 int ret = 0;
86
87 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
88 if (!kdump_buf_page) {
89 printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
90 " page\n");
91 ret = -ENOMEM;
92 }
93
94 return ret;
95}
96arch_initcall(kdump_buf_page_init);
1/*
2 * Memory preserving reboot related code.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * Copyright (C) IBM Corporation, 2004. All rights reserved
6 */
7
8#include <linux/slab.h>
9#include <linux/errno.h>
10#include <linux/highmem.h>
11#include <linux/crash_dump.h>
12
13#include <asm/uaccess.h>
14
15static void *kdump_buf_page;
16
17static inline bool is_crashed_pfn_valid(unsigned long pfn)
18{
19#ifndef CONFIG_X86_PAE
20 /*
21 * non-PAE kdump kernel executed from a PAE one will crop high pte
22 * bits and poke unwanted space counting again from address 0, we
23 * don't want that. pte must fit into unsigned long. In fact the
24 * test checks high 12 bits for being zero (pfn will be shifted left
25 * by PAGE_SHIFT).
26 */
27 return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn;
28#else
29 return true;
30#endif
31}
32
33/**
34 * copy_oldmem_page - copy one page from "oldmem"
35 * @pfn: page frame number to be copied
36 * @buf: target memory address for the copy; this can be in kernel address
37 * space or user address space (see @userbuf)
38 * @csize: number of bytes to copy
39 * @offset: offset in bytes into the page (based on pfn) to begin the copy
40 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
41 * otherwise @buf is in kernel address space, use memcpy().
42 *
43 * Copy a page from "oldmem". For this page, there is no pte mapped
44 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
45 *
46 * Calling copy_to_user() in atomic context is not desirable. Hence first
47 * copying the data to a pre-allocated kernel page and then copying to user
48 * space in non-atomic context.
49 */
50ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
51 size_t csize, unsigned long offset, int userbuf)
52{
53 void *vaddr;
54
55 if (!csize)
56 return 0;
57
58 if (!is_crashed_pfn_valid(pfn))
59 return -EFAULT;
60
61 vaddr = kmap_atomic_pfn(pfn);
62
63 if (!userbuf) {
64 memcpy(buf, (vaddr + offset), csize);
65 kunmap_atomic(vaddr);
66 } else {
67 if (!kdump_buf_page) {
68 printk(KERN_WARNING "Kdump: Kdump buffer page not"
69 " allocated\n");
70 kunmap_atomic(vaddr);
71 return -EFAULT;
72 }
73 copy_page(kdump_buf_page, vaddr);
74 kunmap_atomic(vaddr);
75 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
76 return -EFAULT;
77 }
78
79 return csize;
80}
81
82static int __init kdump_buf_page_init(void)
83{
84 int ret = 0;
85
86 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
87 if (!kdump_buf_page) {
88 printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
89 " page\n");
90 ret = -ENOMEM;
91 }
92
93 return ret;
94}
95arch_initcall(kdump_buf_page_init);