Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory preserving reboot related code.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 */
8
9#include <linux/errno.h>
10#include <linux/crash_dump.h>
11#include <linux/uaccess.h>
12#include <linux/io.h>
13
14/**
15 * copy_oldmem_page - copy one page from "oldmem"
16 * @pfn: page frame number to be copied
17 * @buf: target memory address for the copy; this can be in kernel address
18 * space or user address space (see @userbuf)
19 * @csize: number of bytes to copy
20 * @offset: offset in bytes into the page (based on pfn) to begin the copy
21 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
22 * otherwise @buf is in kernel address space, use memcpy().
23 *
24 * Copy a page from "oldmem". For this page, there is no pte mapped
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 */
27ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf)
29{
30 void *vaddr;
31
32 if (!csize)
33 return 0;
34
35 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
36 if (!vaddr)
37 return -ENOMEM;
38
39 if (userbuf) {
40 if (copy_to_user(buf, vaddr + offset, csize)) {
41 iounmap(vaddr);
42 return -EFAULT;
43 }
44 } else
45 memcpy(buf, vaddr + offset, csize);
46
47 set_iounmap_nonlazy();
48 iounmap(vaddr);
49 return csize;
50}
1/*
2 * Memory preserving reboot related code.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * Copyright (C) IBM Corporation, 2004. All rights reserved
6 */
7
8#include <linux/errno.h>
9#include <linux/crash_dump.h>
10#include <linux/uaccess.h>
11#include <linux/io.h>
12
13/**
14 * copy_oldmem_page - copy one page from "oldmem"
15 * @pfn: page frame number to be copied
16 * @buf: target memory address for the copy; this can be in kernel address
17 * space or user address space (see @userbuf)
18 * @csize: number of bytes to copy
19 * @offset: offset in bytes into the page (based on pfn) to begin the copy
20 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
21 * otherwise @buf is in kernel address space, use memcpy().
22 *
23 * Copy a page from "oldmem". For this page, there is no pte mapped
24 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
25 */
26ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
27 size_t csize, unsigned long offset, int userbuf)
28{
29 void *vaddr;
30
31 if (!csize)
32 return 0;
33
34 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
35 if (!vaddr)
36 return -ENOMEM;
37
38 if (userbuf) {
39 if (copy_to_user(buf, vaddr + offset, csize)) {
40 iounmap(vaddr);
41 return -EFAULT;
42 }
43 } else
44 memcpy(buf, vaddr + offset, csize);
45
46 set_iounmap_nonlazy();
47 iounmap(vaddr);
48 return csize;
49}