Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Routines for doing kexec-based kdump.
4 *
5 * Copyright (C) 2005, IBM Corp.
6 *
7 * Created by: Michael Ellerman
8 */
9
10#undef DEBUG
11
12#include <linux/crash_dump.h>
13#include <linux/io.h>
14#include <linux/memblock.h>
15#include <linux/of.h>
16#include <asm/code-patching.h>
17#include <asm/kdump.h>
18#include <asm/firmware.h>
19#include <linux/uio.h>
20#include <asm/rtas.h>
21#include <asm/inst.h>
22
23#ifdef DEBUG
24#include <asm/udbg.h>
25#define DBG(fmt...) udbg_printf(fmt)
26#else
27#define DBG(fmt...)
28#endif
29
30#ifndef CONFIG_NONSTATIC_KERNEL
31void __init reserve_kdump_trampoline(void)
32{
33 memblock_reserve(0, KDUMP_RESERVE_LIMIT);
34}
35
36static void __init create_trampoline(unsigned long addr)
37{
38 u32 *p = (u32 *)addr;
39
40 /* The maximum range of a single instruction branch, is the current
41 * instruction's address + (32 MB - 4) bytes. For the trampoline we
42 * need to branch to current address + 32 MB. So we insert a nop at
43 * the trampoline address, then the next instruction (+ 4 bytes)
44 * does a branch to (32 MB - 4). The net effect is that when we
45 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
46 * two instructions it doesn't require any registers.
47 */
48 patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
49 patch_branch(p + 1, addr + PHYSICAL_START, 0);
50}
51
52void __init setup_kdump_trampoline(void)
53{
54 unsigned long i;
55
56 DBG(" -> setup_kdump_trampoline()\n");
57
58 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
59 create_trampoline(i);
60 }
61
62#ifdef CONFIG_PPC_PSERIES
63 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
64 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
65#endif /* CONFIG_PPC_PSERIES */
66
67 DBG(" <- setup_kdump_trampoline()\n");
68}
69#endif /* CONFIG_NONSTATIC_KERNEL */
70
71ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
72 size_t csize, unsigned long offset)
73{
74 void *vaddr;
75 phys_addr_t paddr;
76
77 if (!csize)
78 return 0;
79
80 csize = min_t(size_t, csize, PAGE_SIZE);
81 paddr = pfn << PAGE_SHIFT;
82
83 if (memblock_is_region_memory(paddr, csize)) {
84 vaddr = __va(paddr);
85 csize = copy_to_iter(vaddr + offset, csize, iter);
86 } else {
87 vaddr = ioremap_cache(paddr, PAGE_SIZE);
88 csize = copy_to_iter(vaddr + offset, csize, iter);
89 iounmap(vaddr);
90 }
91
92 return csize;
93}
94
95#ifdef CONFIG_PPC_RTAS
96/*
97 * The crashkernel region will almost always overlap the RTAS region, so
98 * we have to be careful when shrinking the crashkernel region.
99 */
100void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
101{
102 unsigned long addr;
103 const __be32 *basep, *sizep;
104 unsigned int rtas_start = 0, rtas_end = 0;
105
106 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
107 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
108
109 if (basep && sizep) {
110 rtas_start = be32_to_cpup(basep);
111 rtas_end = rtas_start + be32_to_cpup(sizep);
112 }
113
114 for (addr = begin; addr < end; addr += PAGE_SIZE) {
115 /* Does this page overlap with the RTAS region? */
116 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
117 continue;
118
119 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
120 }
121}
122#endif
1/*
2 * Routines for doing kexec-based kdump.
3 *
4 * Copyright (C) 2005, IBM Corp.
5 *
6 * Created by: Michael Ellerman
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12#undef DEBUG
13
14#include <linux/crash_dump.h>
15#include <linux/bootmem.h>
16#include <linux/memblock.h>
17#include <asm/code-patching.h>
18#include <asm/kdump.h>
19#include <asm/prom.h>
20#include <asm/firmware.h>
21#include <asm/uaccess.h>
22#include <asm/rtas.h>
23
24#ifdef DEBUG
25#include <asm/udbg.h>
26#define DBG(fmt...) udbg_printf(fmt)
27#else
28#define DBG(fmt...)
29#endif
30
31#ifndef CONFIG_RELOCATABLE
32void __init reserve_kdump_trampoline(void)
33{
34 memblock_reserve(0, KDUMP_RESERVE_LIMIT);
35}
36
37static void __init create_trampoline(unsigned long addr)
38{
39 unsigned int *p = (unsigned int *)addr;
40
41 /* The maximum range of a single instruction branch, is the current
42 * instruction's address + (32 MB - 4) bytes. For the trampoline we
43 * need to branch to current address + 32 MB. So we insert a nop at
44 * the trampoline address, then the next instruction (+ 4 bytes)
45 * does a branch to (32 MB - 4). The net effect is that when we
46 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
47 * two instructions it doesn't require any registers.
48 */
49 patch_instruction(p, PPC_INST_NOP);
50 patch_branch(++p, addr + PHYSICAL_START, 0);
51}
52
53void __init setup_kdump_trampoline(void)
54{
55 unsigned long i;
56
57 DBG(" -> setup_kdump_trampoline()\n");
58
59 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
60 create_trampoline(i);
61 }
62
63#ifdef CONFIG_PPC_PSERIES
64 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
65 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
66#endif /* CONFIG_PPC_PSERIES */
67
68 DBG(" <- setup_kdump_trampoline()\n");
69}
70#endif /* CONFIG_RELOCATABLE */
71
72static int __init parse_savemaxmem(char *p)
73{
74 if (p)
75 saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
76
77 return 1;
78}
79__setup("savemaxmem=", parse_savemaxmem);
80
81
82static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
83 unsigned long offset, int userbuf)
84{
85 if (userbuf) {
86 if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
87 return -EFAULT;
88 } else
89 memcpy(buf, (vaddr + offset), csize);
90
91 return csize;
92}
93
94/**
95 * copy_oldmem_page - copy one page from "oldmem"
96 * @pfn: page frame number to be copied
97 * @buf: target memory address for the copy; this can be in kernel address
98 * space or user address space (see @userbuf)
99 * @csize: number of bytes to copy
100 * @offset: offset in bytes into the page (based on pfn) to begin the copy
101 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
102 * otherwise @buf is in kernel address space, use memcpy().
103 *
104 * Copy a page from "oldmem". For this page, there is no pte mapped
105 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
106 */
107ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
108 size_t csize, unsigned long offset, int userbuf)
109{
110 void *vaddr;
111
112 if (!csize)
113 return 0;
114
115 csize = min_t(size_t, csize, PAGE_SIZE);
116
117 if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
118 vaddr = __va(pfn << PAGE_SHIFT);
119 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
120 } else {
121 vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
122 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
123 iounmap(vaddr);
124 }
125
126 return csize;
127}
128
129#ifdef CONFIG_PPC_RTAS
130/*
131 * The crashkernel region will almost always overlap the RTAS region, so
132 * we have to be careful when shrinking the crashkernel region.
133 */
134void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
135{
136 unsigned long addr;
137 const u32 *basep, *sizep;
138 unsigned int rtas_start = 0, rtas_end = 0;
139
140 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
141 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
142
143 if (basep && sizep) {
144 rtas_start = *basep;
145 rtas_end = *basep + *sizep;
146 }
147
148 for (addr = begin; addr < end; addr += PAGE_SIZE) {
149 /* Does this page overlap with the RTAS region? */
150 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
151 continue;
152
153 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
154 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
155 free_page((unsigned long)__va(addr));
156 totalram_pages++;
157 }
158}
159#endif