Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020 SiFive
4 */
5
6#include <linux/spinlock.h>
7#include <linux/mm.h>
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/stop_machine.h>
11#include <asm/kprobes.h>
12#include <asm/cacheflush.h>
13#include <asm/fixmap.h>
14#include <asm/patch.h>
15
16struct patch_insn {
17 void *addr;
18 u32 insn;
19 atomic_t cpu_count;
20};
21
22#ifdef CONFIG_MMU
23/*
24 * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
25 * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
26 * So use '__always_inline' and 'const unsigned int fixmap' here.
27 */
28static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
29{
30 uintptr_t uintaddr = (uintptr_t) addr;
31 struct page *page;
32
33 if (core_kernel_text(uintaddr))
34 page = phys_to_page(__pa_symbol(addr));
35 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
36 page = vmalloc_to_page(addr);
37 else
38 return addr;
39
40 BUG_ON(!page);
41
42 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
43 (uintaddr & ~PAGE_MASK));
44}
45
46static void patch_unmap(int fixmap)
47{
48 clear_fixmap(fixmap);
49}
50NOKPROBE_SYMBOL(patch_unmap);
51
52static int patch_insn_write(void *addr, const void *insn, size_t len)
53{
54 void *waddr = addr;
55 bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
56 int ret;
57
58 /*
59 * Before reaching here, it was expected to lock the text_mutex
60 * already, so we don't need to give another lock here and could
61 * ensure that it was safe between each cores.
62 */
63 lockdep_assert_held(&text_mutex);
64
65 if (across_pages)
66 patch_map(addr + len, FIX_TEXT_POKE1);
67
68 waddr = patch_map(addr, FIX_TEXT_POKE0);
69
70 ret = copy_to_kernel_nofault(waddr, insn, len);
71
72 patch_unmap(FIX_TEXT_POKE0);
73
74 if (across_pages)
75 patch_unmap(FIX_TEXT_POKE1);
76
77 return ret;
78}
79NOKPROBE_SYMBOL(patch_insn_write);
80#else
81static int patch_insn_write(void *addr, const void *insn, size_t len)
82{
83 return copy_to_kernel_nofault(addr, insn, len);
84}
85NOKPROBE_SYMBOL(patch_insn_write);
86#endif /* CONFIG_MMU */
87
88int patch_text_nosync(void *addr, const void *insns, size_t len)
89{
90 u32 *tp = addr;
91 int ret;
92
93 ret = patch_insn_write(tp, insns, len);
94
95 if (!ret)
96 flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
97
98 return ret;
99}
100NOKPROBE_SYMBOL(patch_text_nosync);
101
102static int patch_text_cb(void *data)
103{
104 struct patch_insn *patch = data;
105 int ret = 0;
106
107 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
108 ret =
109 patch_text_nosync(patch->addr, &patch->insn,
110 GET_INSN_LENGTH(patch->insn));
111 atomic_inc(&patch->cpu_count);
112 } else {
113 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
114 cpu_relax();
115 smp_mb();
116 }
117
118 return ret;
119}
120NOKPROBE_SYMBOL(patch_text_cb);
121
122int patch_text(void *addr, u32 insn)
123{
124 struct patch_insn patch = {
125 .addr = addr,
126 .insn = insn,
127 .cpu_count = ATOMIC_INIT(0),
128 };
129
130 return stop_machine_cpuslocked(patch_text_cb,
131 &patch, cpu_online_mask);
132}
133NOKPROBE_SYMBOL(patch_text);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020 SiFive
4 */
5
6#include <linux/spinlock.h>
7#include <linux/mm.h>
8#include <linux/memory.h>
9#include <linux/string.h>
10#include <linux/uaccess.h>
11#include <linux/stop_machine.h>
12#include <asm/kprobes.h>
13#include <asm/cacheflush.h>
14#include <asm/fixmap.h>
15#include <asm/ftrace.h>
16#include <asm/text-patching.h>
17#include <asm/sections.h>
18
19struct patch_insn {
20 void *addr;
21 u32 *insns;
22 size_t len;
23 atomic_t cpu_count;
24};
25
26int riscv_patch_in_stop_machine = false;
27
28#ifdef CONFIG_MMU
29
30static inline bool is_kernel_exittext(uintptr_t addr)
31{
32 return system_state < SYSTEM_RUNNING &&
33 addr >= (uintptr_t)__exittext_begin &&
34 addr < (uintptr_t)__exittext_end;
35}
36
37/*
38 * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
39 * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
40 * So use '__always_inline' and 'const unsigned int fixmap' here.
41 */
42static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
43{
44 uintptr_t uintaddr = (uintptr_t) addr;
45 struct page *page;
46
47 if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
48 page = phys_to_page(__pa_symbol(addr));
49 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
50 page = vmalloc_to_page(addr);
51 else
52 return addr;
53
54 BUG_ON(!page);
55
56 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
57 offset_in_page(addr));
58}
59
60static void patch_unmap(int fixmap)
61{
62 clear_fixmap(fixmap);
63}
64NOKPROBE_SYMBOL(patch_unmap);
65
66static int __patch_insn_set(void *addr, u8 c, size_t len)
67{
68 bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
69 void *waddr = addr;
70
71 /*
72 * Only two pages can be mapped at a time for writing.
73 */
74 if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
75 return -EINVAL;
76 /*
77 * Before reaching here, it was expected to lock the text_mutex
78 * already, so we don't need to give another lock here and could
79 * ensure that it was safe between each cores.
80 */
81 lockdep_assert_held(&text_mutex);
82
83 preempt_disable();
84
85 if (across_pages)
86 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
87
88 waddr = patch_map(addr, FIX_TEXT_POKE0);
89
90 memset(waddr, c, len);
91
92 /*
93 * We could have just patched a function that is about to be
94 * called so make sure we don't execute partially patched
95 * instructions by flushing the icache as soon as possible.
96 */
97 local_flush_icache_range((unsigned long)waddr,
98 (unsigned long)waddr + len);
99
100 patch_unmap(FIX_TEXT_POKE0);
101
102 if (across_pages)
103 patch_unmap(FIX_TEXT_POKE1);
104
105 preempt_enable();
106
107 return 0;
108}
109NOKPROBE_SYMBOL(__patch_insn_set);
110
111static int __patch_insn_write(void *addr, const void *insn, size_t len)
112{
113 bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
114 void *waddr = addr;
115 int ret;
116
117 /*
118 * Only two pages can be mapped at a time for writing.
119 */
120 if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
121 return -EINVAL;
122
123 /*
124 * Before reaching here, it was expected to lock the text_mutex
125 * already, so we don't need to give another lock here and could
126 * ensure that it was safe between each cores.
127 *
128 * We're currently using stop_machine() for ftrace & kprobes, and while
129 * that ensures text_mutex is held before installing the mappings it
130 * does not ensure text_mutex is held by the calling thread. That's
131 * safe but triggers a lockdep failure, so just elide it for that
132 * specific case.
133 */
134 if (!riscv_patch_in_stop_machine)
135 lockdep_assert_held(&text_mutex);
136
137 preempt_disable();
138
139 if (across_pages)
140 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
141
142 waddr = patch_map(addr, FIX_TEXT_POKE0);
143
144 ret = copy_to_kernel_nofault(waddr, insn, len);
145
146 /*
147 * We could have just patched a function that is about to be
148 * called so make sure we don't execute partially patched
149 * instructions by flushing the icache as soon as possible.
150 */
151 local_flush_icache_range((unsigned long)waddr,
152 (unsigned long)waddr + len);
153
154 patch_unmap(FIX_TEXT_POKE0);
155
156 if (across_pages)
157 patch_unmap(FIX_TEXT_POKE1);
158
159 preempt_enable();
160
161 return ret;
162}
163NOKPROBE_SYMBOL(__patch_insn_write);
164#else
165static int __patch_insn_set(void *addr, u8 c, size_t len)
166{
167 memset(addr, c, len);
168
169 return 0;
170}
171NOKPROBE_SYMBOL(__patch_insn_set);
172
173static int __patch_insn_write(void *addr, const void *insn, size_t len)
174{
175 return copy_to_kernel_nofault(addr, insn, len);
176}
177NOKPROBE_SYMBOL(__patch_insn_write);
178#endif /* CONFIG_MMU */
179
180static int patch_insn_set(void *addr, u8 c, size_t len)
181{
182 size_t size;
183 int ret;
184
185 /*
186 * __patch_insn_set() can only work on 2 pages at a time so call it in a
187 * loop with len <= 2 * PAGE_SIZE.
188 */
189 while (len) {
190 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
191 ret = __patch_insn_set(addr, c, size);
192 if (ret)
193 return ret;
194
195 addr += size;
196 len -= size;
197 }
198
199 return 0;
200}
201NOKPROBE_SYMBOL(patch_insn_set);
202
203int patch_text_set_nosync(void *addr, u8 c, size_t len)
204{
205 int ret;
206
207 ret = patch_insn_set(addr, c, len);
208 if (!ret)
209 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
210
211 return ret;
212}
213NOKPROBE_SYMBOL(patch_text_set_nosync);
214
215int patch_insn_write(void *addr, const void *insn, size_t len)
216{
217 size_t size;
218 int ret;
219
220 /*
221 * Copy the instructions to the destination address, two pages at a time
222 * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
223 */
224 while (len) {
225 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
226 ret = __patch_insn_write(addr, insn, size);
227 if (ret)
228 return ret;
229
230 addr += size;
231 insn += size;
232 len -= size;
233 }
234
235 return 0;
236}
237NOKPROBE_SYMBOL(patch_insn_write);
238
239int patch_text_nosync(void *addr, const void *insns, size_t len)
240{
241 int ret;
242
243 ret = patch_insn_write(addr, insns, len);
244 if (!ret)
245 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
246
247 return ret;
248}
249NOKPROBE_SYMBOL(patch_text_nosync);
250
251static int patch_text_cb(void *data)
252{
253 struct patch_insn *patch = data;
254 int ret = 0;
255
256 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
257 ret = patch_insn_write(patch->addr, patch->insns, patch->len);
258 /*
259 * Make sure the patching store is effective *before* we
260 * increment the counter which releases all waiting CPUs
261 * by using the release variant of atomic increment. The
262 * release pairs with the call to local_flush_icache_all()
263 * on the waiting CPU.
264 */
265 atomic_inc_return_release(&patch->cpu_count);
266 } else {
267 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
268 cpu_relax();
269
270 local_flush_icache_all();
271 }
272
273 return ret;
274}
275NOKPROBE_SYMBOL(patch_text_cb);
276
277int patch_text(void *addr, u32 *insns, size_t len)
278{
279 int ret;
280 struct patch_insn patch = {
281 .addr = addr,
282 .insns = insns,
283 .len = len,
284 .cpu_count = ATOMIC_INIT(0),
285 };
286
287 /*
288 * kprobes takes text_mutex, before calling patch_text(), but as we call
289 * calls stop_machine(), the lockdep assertion in patch_insn_write()
290 * gets confused by the context in which the lock is taken.
291 * Instead, ensure the lock is held before calling stop_machine(), and
292 * set riscv_patch_in_stop_machine to skip the check in
293 * patch_insn_write().
294 */
295 lockdep_assert_held(&text_mutex);
296 riscv_patch_in_stop_machine = true;
297 ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
298 riscv_patch_in_stop_machine = false;
299 return ret;
300}
301NOKPROBE_SYMBOL(patch_text);