Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory preserving reboot related code.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 */
8
9#include <linux/errno.h>
10#include <linux/crash_dump.h>
11#include <linux/uaccess.h>
12#include <linux/io.h>
13
14static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
15 unsigned long offset, int userbuf,
16 bool encrypted)
17{
18 void *vaddr;
19
20 if (!csize)
21 return 0;
22
23 if (encrypted)
24 vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE);
25 else
26 vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
27
28 if (!vaddr)
29 return -ENOMEM;
30
31 if (userbuf) {
32 if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
33 iounmap((void __iomem *)vaddr);
34 return -EFAULT;
35 }
36 } else
37 memcpy(buf, vaddr + offset, csize);
38
39 set_iounmap_nonlazy();
40 iounmap((void __iomem *)vaddr);
41 return csize;
42}
43
44/**
45 * copy_oldmem_page - copy one page of memory
46 * @pfn: page frame number to be copied
47 * @buf: target memory address for the copy; this can be in kernel address
48 * space or user address space (see @userbuf)
49 * @csize: number of bytes to copy
50 * @offset: offset in bytes into the page (based on pfn) to begin the copy
51 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
52 * otherwise @buf is in kernel address space, use memcpy().
53 *
54 * Copy a page from the old kernel's memory. For this page, there is no pte
55 * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
56 */
57ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
58 unsigned long offset, int userbuf)
59{
60 return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
61}
62
63/**
64 * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
65 * memory with the encryption mask set to accommodate kdump on SME-enabled
66 * machines.
67 */
68ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
69 unsigned long offset, int userbuf)
70{
71 return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
72}
73
74ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
75{
76 return read_from_oldmem(buf, count, ppos, 0, sev_active());
77}
1/*
2 * Memory preserving reboot related code.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * Copyright (C) IBM Corporation, 2004. All rights reserved
6 */
7
8#include <linux/errno.h>
9#include <linux/crash_dump.h>
10#include <linux/uaccess.h>
11#include <linux/io.h>
12
13/**
14 * copy_oldmem_page - copy one page from "oldmem"
15 * @pfn: page frame number to be copied
16 * @buf: target memory address for the copy; this can be in kernel address
17 * space or user address space (see @userbuf)
18 * @csize: number of bytes to copy
19 * @offset: offset in bytes into the page (based on pfn) to begin the copy
20 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
21 * otherwise @buf is in kernel address space, use memcpy().
22 *
23 * Copy a page from "oldmem". For this page, there is no pte mapped
24 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
25 */
26ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
27 size_t csize, unsigned long offset, int userbuf)
28{
29 void *vaddr;
30
31 if (!csize)
32 return 0;
33
34 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
35 if (!vaddr)
36 return -ENOMEM;
37
38 if (userbuf) {
39 if (copy_to_user(buf, vaddr + offset, csize)) {
40 iounmap(vaddr);
41 return -EFAULT;
42 }
43 } else
44 memcpy(buf, vaddr + offset, csize);
45
46 set_iounmap_nonlazy();
47 iounmap(vaddr);
48 return csize;
49}