Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Routines for doing kexec-based kdump.
4 *
5 * Copyright (C) 2005, IBM Corp.
6 *
7 * Created by: Michael Ellerman
8 */
9
10#undef DEBUG
11
12#include <linux/crash_dump.h>
13#include <linux/io.h>
14#include <linux/memblock.h>
15#include <asm/code-patching.h>
16#include <asm/kdump.h>
17#include <asm/prom.h>
18#include <asm/firmware.h>
19#include <linux/uaccess.h>
20#include <asm/rtas.h>
21#include <asm/inst.h>
22
23#ifdef DEBUG
24#include <asm/udbg.h>
25#define DBG(fmt...) udbg_printf(fmt)
26#else
27#define DBG(fmt...)
28#endif
29
30#ifndef CONFIG_NONSTATIC_KERNEL
31void __init reserve_kdump_trampoline(void)
32{
33 memblock_reserve(0, KDUMP_RESERVE_LIMIT);
34}
35
36static void __init create_trampoline(unsigned long addr)
37{
38 struct ppc_inst *p = (struct ppc_inst *)addr;
39
40 /* The maximum range of a single instruction branch, is the current
41 * instruction's address + (32 MB - 4) bytes. For the trampoline we
42 * need to branch to current address + 32 MB. So we insert a nop at
43 * the trampoline address, then the next instruction (+ 4 bytes)
44 * does a branch to (32 MB - 4). The net effect is that when we
45 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
46 * two instructions it doesn't require any registers.
47 */
48 patch_instruction(p, ppc_inst(PPC_INST_NOP));
49 patch_branch((void *)p + 4, addr + PHYSICAL_START, 0);
50}
51
52void __init setup_kdump_trampoline(void)
53{
54 unsigned long i;
55
56 DBG(" -> setup_kdump_trampoline()\n");
57
58 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
59 create_trampoline(i);
60 }
61
62#ifdef CONFIG_PPC_PSERIES
63 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
64 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
65#endif /* CONFIG_PPC_PSERIES */
66
67 DBG(" <- setup_kdump_trampoline()\n");
68}
69#endif /* CONFIG_NONSTATIC_KERNEL */
70
71static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
72 unsigned long offset, int userbuf)
73{
74 if (userbuf) {
75 if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
76 return -EFAULT;
77 } else
78 memcpy(buf, (vaddr + offset), csize);
79
80 return csize;
81}
82
83/**
84 * copy_oldmem_page - copy one page from "oldmem"
85 * @pfn: page frame number to be copied
86 * @buf: target memory address for the copy; this can be in kernel address
87 * space or user address space (see @userbuf)
88 * @csize: number of bytes to copy
89 * @offset: offset in bytes into the page (based on pfn) to begin the copy
90 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
91 * otherwise @buf is in kernel address space, use memcpy().
92 *
93 * Copy a page from "oldmem". For this page, there is no pte mapped
94 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
95 */
96ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
97 size_t csize, unsigned long offset, int userbuf)
98{
99 void *vaddr;
100 phys_addr_t paddr;
101
102 if (!csize)
103 return 0;
104
105 csize = min_t(size_t, csize, PAGE_SIZE);
106 paddr = pfn << PAGE_SHIFT;
107
108 if (memblock_is_region_memory(paddr, csize)) {
109 vaddr = __va(paddr);
110 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
111 } else {
112 vaddr = ioremap_cache(paddr, PAGE_SIZE);
113 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
114 iounmap(vaddr);
115 }
116
117 return csize;
118}
119
120#ifdef CONFIG_PPC_RTAS
121/*
122 * The crashkernel region will almost always overlap the RTAS region, so
123 * we have to be careful when shrinking the crashkernel region.
124 */
125void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
126{
127 unsigned long addr;
128 const __be32 *basep, *sizep;
129 unsigned int rtas_start = 0, rtas_end = 0;
130
131 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
132 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
133
134 if (basep && sizep) {
135 rtas_start = be32_to_cpup(basep);
136 rtas_end = rtas_start + be32_to_cpup(sizep);
137 }
138
139 for (addr = begin; addr < end; addr += PAGE_SIZE) {
140 /* Does this page overlap with the RTAS region? */
141 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
142 continue;
143
144 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
145 }
146}
147#endif
1/*
2 * Routines for doing kexec-based kdump.
3 *
4 * Copyright (C) 2005, IBM Corp.
5 *
6 * Created by: Michael Ellerman
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12#undef DEBUG
13
14#include <linux/crash_dump.h>
15#include <linux/bootmem.h>
16#include <linux/memblock.h>
17#include <asm/code-patching.h>
18#include <asm/kdump.h>
19#include <asm/prom.h>
20#include <asm/firmware.h>
21#include <asm/uaccess.h>
22#include <asm/rtas.h>
23
24#ifdef DEBUG
25#include <asm/udbg.h>
26#define DBG(fmt...) udbg_printf(fmt)
27#else
28#define DBG(fmt...)
29#endif
30
31#ifndef CONFIG_NONSTATIC_KERNEL
32void __init reserve_kdump_trampoline(void)
33{
34 memblock_reserve(0, KDUMP_RESERVE_LIMIT);
35}
36
37static void __init create_trampoline(unsigned long addr)
38{
39 unsigned int *p = (unsigned int *)addr;
40
41 /* The maximum range of a single instruction branch, is the current
42 * instruction's address + (32 MB - 4) bytes. For the trampoline we
43 * need to branch to current address + 32 MB. So we insert a nop at
44 * the trampoline address, then the next instruction (+ 4 bytes)
45 * does a branch to (32 MB - 4). The net effect is that when we
46 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
47 * two instructions it doesn't require any registers.
48 */
49 patch_instruction(p, PPC_INST_NOP);
50 patch_branch(++p, addr + PHYSICAL_START, 0);
51}
52
53void __init setup_kdump_trampoline(void)
54{
55 unsigned long i;
56
57 DBG(" -> setup_kdump_trampoline()\n");
58
59 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
60 create_trampoline(i);
61 }
62
63#ifdef CONFIG_PPC_PSERIES
64 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
65 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
66#endif /* CONFIG_PPC_PSERIES */
67
68 DBG(" <- setup_kdump_trampoline()\n");
69}
70#endif /* CONFIG_NONSTATIC_KERNEL */
71
72static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
73 unsigned long offset, int userbuf)
74{
75 if (userbuf) {
76 if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
77 return -EFAULT;
78 } else
79 memcpy(buf, (vaddr + offset), csize);
80
81 return csize;
82}
83
84/**
85 * copy_oldmem_page - copy one page from "oldmem"
86 * @pfn: page frame number to be copied
87 * @buf: target memory address for the copy; this can be in kernel address
88 * space or user address space (see @userbuf)
89 * @csize: number of bytes to copy
90 * @offset: offset in bytes into the page (based on pfn) to begin the copy
91 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
92 * otherwise @buf is in kernel address space, use memcpy().
93 *
94 * Copy a page from "oldmem". For this page, there is no pte mapped
95 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
96 */
97ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
98 size_t csize, unsigned long offset, int userbuf)
99{
100 void *vaddr;
101 phys_addr_t paddr;
102
103 if (!csize)
104 return 0;
105
106 csize = min_t(size_t, csize, PAGE_SIZE);
107 paddr = pfn << PAGE_SHIFT;
108
109 if (memblock_is_region_memory(paddr, csize)) {
110 vaddr = __va(paddr);
111 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
112 } else {
113 vaddr = __ioremap(paddr, PAGE_SIZE, 0);
114 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
115 iounmap(vaddr);
116 }
117
118 return csize;
119}
120
121#ifdef CONFIG_PPC_RTAS
122/*
123 * The crashkernel region will almost always overlap the RTAS region, so
124 * we have to be careful when shrinking the crashkernel region.
125 */
126void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
127{
128 unsigned long addr;
129 const __be32 *basep, *sizep;
130 unsigned int rtas_start = 0, rtas_end = 0;
131
132 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
133 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
134
135 if (basep && sizep) {
136 rtas_start = be32_to_cpup(basep);
137 rtas_end = rtas_start + be32_to_cpup(sizep);
138 }
139
140 for (addr = begin; addr < end; addr += PAGE_SIZE) {
141 /* Does this page overlap with the RTAS region? */
142 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
143 continue;
144
145 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
146 }
147}
148#endif