Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) "SMP alternatives: " fmt
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/mutex.h>
7#include <linux/list.h>
8#include <linux/stringify.h>
9#include <linux/mm.h>
10#include <linux/vmalloc.h>
11#include <linux/memory.h>
12#include <linux/stop_machine.h>
13#include <linux/slab.h>
14#include <linux/kdebug.h>
15#include <linux/kprobes.h>
16#include <linux/mmu_context.h>
17#include <linux/bsearch.h>
18#include <asm/text-patching.h>
19#include <asm/alternative.h>
20#include <asm/sections.h>
21#include <asm/pgtable.h>
22#include <asm/mce.h>
23#include <asm/nmi.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/io.h>
27#include <asm/fixmap.h>
28
29int __read_mostly alternatives_patched;
30
31EXPORT_SYMBOL_GPL(alternatives_patched);
32
33#define MAX_PATCH_LEN (255-1)
34
35static int __initdata_or_module debug_alternative;
36
37static int __init debug_alt(char *str)
38{
39 debug_alternative = 1;
40 return 1;
41}
42__setup("debug-alternative", debug_alt);
43
44static int noreplace_smp;
45
46static int __init setup_noreplace_smp(char *str)
47{
48 noreplace_smp = 1;
49 return 1;
50}
51__setup("noreplace-smp", setup_noreplace_smp);
52
53#define DPRINTK(fmt, args...) \
54do { \
55 if (debug_alternative) \
56 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
57} while (0)
58
59#define DUMP_BYTES(buf, len, fmt, args...) \
60do { \
61 if (unlikely(debug_alternative)) { \
62 int j; \
63 \
64 if (!(len)) \
65 break; \
66 \
67 printk(KERN_DEBUG fmt, ##args); \
68 for (j = 0; j < (len) - 1; j++) \
69 printk(KERN_CONT "%02hhx ", buf[j]); \
70 printk(KERN_CONT "%02hhx\n", buf[j]); \
71 } \
72} while (0)
73
74/*
75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
76 * that correspond to that nop. Getting from one nop to the next, we
77 * add to the array the offset that is equal to the sum of all sizes of
78 * nops preceding the one we are after.
79 *
80 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
81 * nice symmetry of sizes of the previous nops.
82 */
83#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
84static const unsigned char intelnops[] =
85{
86 GENERIC_NOP1,
87 GENERIC_NOP2,
88 GENERIC_NOP3,
89 GENERIC_NOP4,
90 GENERIC_NOP5,
91 GENERIC_NOP6,
92 GENERIC_NOP7,
93 GENERIC_NOP8,
94 GENERIC_NOP5_ATOMIC
95};
96static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
97{
98 NULL,
99 intelnops,
100 intelnops + 1,
101 intelnops + 1 + 2,
102 intelnops + 1 + 2 + 3,
103 intelnops + 1 + 2 + 3 + 4,
104 intelnops + 1 + 2 + 3 + 4 + 5,
105 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
106 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
107 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108};
109#endif
110
111#ifdef K8_NOP1
112static const unsigned char k8nops[] =
113{
114 K8_NOP1,
115 K8_NOP2,
116 K8_NOP3,
117 K8_NOP4,
118 K8_NOP5,
119 K8_NOP6,
120 K8_NOP7,
121 K8_NOP8,
122 K8_NOP5_ATOMIC
123};
124static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
125{
126 NULL,
127 k8nops,
128 k8nops + 1,
129 k8nops + 1 + 2,
130 k8nops + 1 + 2 + 3,
131 k8nops + 1 + 2 + 3 + 4,
132 k8nops + 1 + 2 + 3 + 4 + 5,
133 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
134 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
135 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
136};
137#endif
138
139#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
140static const unsigned char k7nops[] =
141{
142 K7_NOP1,
143 K7_NOP2,
144 K7_NOP3,
145 K7_NOP4,
146 K7_NOP5,
147 K7_NOP6,
148 K7_NOP7,
149 K7_NOP8,
150 K7_NOP5_ATOMIC
151};
152static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
153{
154 NULL,
155 k7nops,
156 k7nops + 1,
157 k7nops + 1 + 2,
158 k7nops + 1 + 2 + 3,
159 k7nops + 1 + 2 + 3 + 4,
160 k7nops + 1 + 2 + 3 + 4 + 5,
161 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
162 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
163 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164};
165#endif
166
167#ifdef P6_NOP1
168static const unsigned char p6nops[] =
169{
170 P6_NOP1,
171 P6_NOP2,
172 P6_NOP3,
173 P6_NOP4,
174 P6_NOP5,
175 P6_NOP6,
176 P6_NOP7,
177 P6_NOP8,
178 P6_NOP5_ATOMIC
179};
180static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
181{
182 NULL,
183 p6nops,
184 p6nops + 1,
185 p6nops + 1 + 2,
186 p6nops + 1 + 2 + 3,
187 p6nops + 1 + 2 + 3 + 4,
188 p6nops + 1 + 2 + 3 + 4 + 5,
189 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
190 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
191 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
192};
193#endif
194
195/* Initialize these to a safe default */
196#ifdef CONFIG_X86_64
197const unsigned char * const *ideal_nops = p6_nops;
198#else
199const unsigned char * const *ideal_nops = intel_nops;
200#endif
201
202void __init arch_init_ideal_nops(void)
203{
204 switch (boot_cpu_data.x86_vendor) {
205 case X86_VENDOR_INTEL:
206 /*
207 * Due to a decoder implementation quirk, some
208 * specific Intel CPUs actually perform better with
209 * the "k8_nops" than with the SDM-recommended NOPs.
210 */
211 if (boot_cpu_data.x86 == 6 &&
212 boot_cpu_data.x86_model >= 0x0f &&
213 boot_cpu_data.x86_model != 0x1c &&
214 boot_cpu_data.x86_model != 0x26 &&
215 boot_cpu_data.x86_model != 0x27 &&
216 boot_cpu_data.x86_model < 0x30) {
217 ideal_nops = k8_nops;
218 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
219 ideal_nops = p6_nops;
220 } else {
221#ifdef CONFIG_X86_64
222 ideal_nops = k8_nops;
223#else
224 ideal_nops = intel_nops;
225#endif
226 }
227 break;
228
229 case X86_VENDOR_HYGON:
230 ideal_nops = p6_nops;
231 return;
232
233 case X86_VENDOR_AMD:
234 if (boot_cpu_data.x86 > 0xf) {
235 ideal_nops = p6_nops;
236 return;
237 }
238
239 /* fall through */
240
241 default:
242#ifdef CONFIG_X86_64
243 ideal_nops = k8_nops;
244#else
245 if (boot_cpu_has(X86_FEATURE_K8))
246 ideal_nops = k8_nops;
247 else if (boot_cpu_has(X86_FEATURE_K7))
248 ideal_nops = k7_nops;
249 else
250 ideal_nops = intel_nops;
251#endif
252 }
253}
254
255/* Use this to add nops to a buffer, then text_poke the whole buffer. */
256static void __init_or_module add_nops(void *insns, unsigned int len)
257{
258 while (len > 0) {
259 unsigned int noplen = len;
260 if (noplen > ASM_NOP_MAX)
261 noplen = ASM_NOP_MAX;
262 memcpy(insns, ideal_nops[noplen], noplen);
263 insns += noplen;
264 len -= noplen;
265 }
266}
267
268extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
269extern s32 __smp_locks[], __smp_locks_end[];
270void text_poke_early(void *addr, const void *opcode, size_t len);
271
272/*
273 * Are we looking at a near JMP with a 1 or 4-byte displacement.
274 */
275static inline bool is_jmp(const u8 opcode)
276{
277 return opcode == 0xeb || opcode == 0xe9;
278}
279
280static void __init_or_module
281recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
282{
283 u8 *next_rip, *tgt_rip;
284 s32 n_dspl, o_dspl;
285 int repl_len;
286
287 if (a->replacementlen != 5)
288 return;
289
290 o_dspl = *(s32 *)(insn_buff + 1);
291
292 /* next_rip of the replacement JMP */
293 next_rip = repl_insn + a->replacementlen;
294 /* target rip of the replacement JMP */
295 tgt_rip = next_rip + o_dspl;
296 n_dspl = tgt_rip - orig_insn;
297
298 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
299
300 if (tgt_rip - orig_insn >= 0) {
301 if (n_dspl - 2 <= 127)
302 goto two_byte_jmp;
303 else
304 goto five_byte_jmp;
305 /* negative offset */
306 } else {
307 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
308 goto two_byte_jmp;
309 else
310 goto five_byte_jmp;
311 }
312
313two_byte_jmp:
314 n_dspl -= 2;
315
316 insn_buff[0] = 0xeb;
317 insn_buff[1] = (s8)n_dspl;
318 add_nops(insn_buff + 2, 3);
319
320 repl_len = 2;
321 goto done;
322
323five_byte_jmp:
324 n_dspl -= 5;
325
326 insn_buff[0] = 0xe9;
327 *(s32 *)&insn_buff[1] = n_dspl;
328
329 repl_len = 5;
330
331done:
332
333 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
334 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
335}
336
337/*
338 * "noinline" to cause control flow change and thus invalidate I$ and
339 * cause refetch after modification.
340 */
341static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
342{
343 unsigned long flags;
344 int i;
345
346 for (i = 0; i < a->padlen; i++) {
347 if (instr[i] != 0x90)
348 return;
349 }
350
351 local_irq_save(flags);
352 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
353 local_irq_restore(flags);
354
355 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
356 instr, a->instrlen - a->padlen, a->padlen);
357}
358
359/*
360 * Replace instructions with better alternatives for this CPU type. This runs
361 * before SMP is initialized to avoid SMP problems with self modifying code.
362 * This implies that asymmetric systems where APs have less capabilities than
363 * the boot processor are not handled. Tough. Make sure you disable such
364 * features by hand.
365 *
366 * Marked "noinline" to cause control flow change and thus insn cache
367 * to refetch changed I$ lines.
368 */
369void __init_or_module noinline apply_alternatives(struct alt_instr *start,
370 struct alt_instr *end)
371{
372 struct alt_instr *a;
373 u8 *instr, *replacement;
374 u8 insn_buff[MAX_PATCH_LEN];
375
376 DPRINTK("alt table %px, -> %px", start, end);
377 /*
378 * The scan order should be from start to end. A later scanned
379 * alternative code can overwrite previously scanned alternative code.
380 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
381 * patch code.
382 *
383 * So be careful if you want to change the scan order to any other
384 * order.
385 */
386 for (a = start; a < end; a++) {
387 int insn_buff_sz = 0;
388
389 instr = (u8 *)&a->instr_offset + a->instr_offset;
390 replacement = (u8 *)&a->repl_offset + a->repl_offset;
391 BUG_ON(a->instrlen > sizeof(insn_buff));
392 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
393 if (!boot_cpu_has(a->cpuid)) {
394 if (a->padlen > 1)
395 optimize_nops(a, instr);
396
397 continue;
398 }
399
400 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
401 a->cpuid >> 5,
402 a->cpuid & 0x1f,
403 instr, instr, a->instrlen,
404 replacement, a->replacementlen, a->padlen);
405
406 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
407 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
408
409 memcpy(insn_buff, replacement, a->replacementlen);
410 insn_buff_sz = a->replacementlen;
411
412 /*
413 * 0xe8 is a relative jump; fix the offset.
414 *
415 * Instruction length is checked before the opcode to avoid
416 * accessing uninitialized bytes for zero-length replacements.
417 */
418 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
419 *(s32 *)(insn_buff + 1) += replacement - instr;
420 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
421 *(s32 *)(insn_buff + 1),
422 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
423 }
424
425 if (a->replacementlen && is_jmp(replacement[0]))
426 recompute_jump(a, instr, replacement, insn_buff);
427
428 if (a->instrlen > a->replacementlen) {
429 add_nops(insn_buff + a->replacementlen,
430 a->instrlen - a->replacementlen);
431 insn_buff_sz += a->instrlen - a->replacementlen;
432 }
433 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
434
435 text_poke_early(instr, insn_buff, insn_buff_sz);
436 }
437}
438
439#ifdef CONFIG_SMP
440static void alternatives_smp_lock(const s32 *start, const s32 *end,
441 u8 *text, u8 *text_end)
442{
443 const s32 *poff;
444
445 for (poff = start; poff < end; poff++) {
446 u8 *ptr = (u8 *)poff + *poff;
447
448 if (!*poff || ptr < text || ptr >= text_end)
449 continue;
450 /* turn DS segment override prefix into lock prefix */
451 if (*ptr == 0x3e)
452 text_poke(ptr, ((unsigned char []){0xf0}), 1);
453 }
454}
455
456static void alternatives_smp_unlock(const s32 *start, const s32 *end,
457 u8 *text, u8 *text_end)
458{
459 const s32 *poff;
460
461 for (poff = start; poff < end; poff++) {
462 u8 *ptr = (u8 *)poff + *poff;
463
464 if (!*poff || ptr < text || ptr >= text_end)
465 continue;
466 /* turn lock prefix into DS segment override prefix */
467 if (*ptr == 0xf0)
468 text_poke(ptr, ((unsigned char []){0x3E}), 1);
469 }
470}
471
472struct smp_alt_module {
473 /* what is this ??? */
474 struct module *mod;
475 char *name;
476
477 /* ptrs to lock prefixes */
478 const s32 *locks;
479 const s32 *locks_end;
480
481 /* .text segment, needed to avoid patching init code ;) */
482 u8 *text;
483 u8 *text_end;
484
485 struct list_head next;
486};
487static LIST_HEAD(smp_alt_modules);
488static bool uniproc_patched = false; /* protected by text_mutex */
489
490void __init_or_module alternatives_smp_module_add(struct module *mod,
491 char *name,
492 void *locks, void *locks_end,
493 void *text, void *text_end)
494{
495 struct smp_alt_module *smp;
496
497 mutex_lock(&text_mutex);
498 if (!uniproc_patched)
499 goto unlock;
500
501 if (num_possible_cpus() == 1)
502 /* Don't bother remembering, we'll never have to undo it. */
503 goto smp_unlock;
504
505 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
506 if (NULL == smp)
507 /* we'll run the (safe but slow) SMP code then ... */
508 goto unlock;
509
510 smp->mod = mod;
511 smp->name = name;
512 smp->locks = locks;
513 smp->locks_end = locks_end;
514 smp->text = text;
515 smp->text_end = text_end;
516 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
517 smp->locks, smp->locks_end,
518 smp->text, smp->text_end, smp->name);
519
520 list_add_tail(&smp->next, &smp_alt_modules);
521smp_unlock:
522 alternatives_smp_unlock(locks, locks_end, text, text_end);
523unlock:
524 mutex_unlock(&text_mutex);
525}
526
527void __init_or_module alternatives_smp_module_del(struct module *mod)
528{
529 struct smp_alt_module *item;
530
531 mutex_lock(&text_mutex);
532 list_for_each_entry(item, &smp_alt_modules, next) {
533 if (mod != item->mod)
534 continue;
535 list_del(&item->next);
536 kfree(item);
537 break;
538 }
539 mutex_unlock(&text_mutex);
540}
541
542void alternatives_enable_smp(void)
543{
544 struct smp_alt_module *mod;
545
546 /* Why bother if there are no other CPUs? */
547 BUG_ON(num_possible_cpus() == 1);
548
549 mutex_lock(&text_mutex);
550
551 if (uniproc_patched) {
552 pr_info("switching to SMP code\n");
553 BUG_ON(num_online_cpus() != 1);
554 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
555 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
556 list_for_each_entry(mod, &smp_alt_modules, next)
557 alternatives_smp_lock(mod->locks, mod->locks_end,
558 mod->text, mod->text_end);
559 uniproc_patched = false;
560 }
561 mutex_unlock(&text_mutex);
562}
563
564/*
565 * Return 1 if the address range is reserved for SMP-alternatives.
566 * Must hold text_mutex.
567 */
568int alternatives_text_reserved(void *start, void *end)
569{
570 struct smp_alt_module *mod;
571 const s32 *poff;
572 u8 *text_start = start;
573 u8 *text_end = end;
574
575 lockdep_assert_held(&text_mutex);
576
577 list_for_each_entry(mod, &smp_alt_modules, next) {
578 if (mod->text > text_end || mod->text_end < text_start)
579 continue;
580 for (poff = mod->locks; poff < mod->locks_end; poff++) {
581 const u8 *ptr = (const u8 *)poff + *poff;
582
583 if (text_start <= ptr && text_end > ptr)
584 return 1;
585 }
586 }
587
588 return 0;
589}
590#endif /* CONFIG_SMP */
591
592#ifdef CONFIG_PARAVIRT
593void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
594 struct paravirt_patch_site *end)
595{
596 struct paravirt_patch_site *p;
597 char insn_buff[MAX_PATCH_LEN];
598
599 for (p = start; p < end; p++) {
600 unsigned int used;
601
602 BUG_ON(p->len > MAX_PATCH_LEN);
603 /* prep the buffer with the original instructions */
604 memcpy(insn_buff, p->instr, p->len);
605 used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
606
607 BUG_ON(used > p->len);
608
609 /* Pad the rest with nops */
610 add_nops(insn_buff + used, p->len - used);
611 text_poke_early(p->instr, insn_buff, p->len);
612 }
613}
614extern struct paravirt_patch_site __start_parainstructions[],
615 __stop_parainstructions[];
616#endif /* CONFIG_PARAVIRT */
617
618/*
619 * Self-test for the INT3 based CALL emulation code.
620 *
621 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
622 * properly and that there is a stack gap between the INT3 frame and the
623 * previous context. Without this gap doing a virtual PUSH on the interrupted
624 * stack would corrupt the INT3 IRET frame.
625 *
626 * See entry_{32,64}.S for more details.
627 */
628
629/*
630 * We define the int3_magic() function in assembly to control the calling
631 * convention such that we can 'call' it from assembly.
632 */
633
634extern void int3_magic(unsigned int *ptr); /* defined in asm */
635
636asm (
637" .pushsection .init.text, \"ax\", @progbits\n"
638" .type int3_magic, @function\n"
639"int3_magic:\n"
640" movl $1, (%" _ASM_ARG1 ")\n"
641" ret\n"
642" .size int3_magic, .-int3_magic\n"
643" .popsection\n"
644);
645
646extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
647
648static int __init
649int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
650{
651 struct die_args *args = data;
652 struct pt_regs *regs = args->regs;
653
654 if (!regs || user_mode(regs))
655 return NOTIFY_DONE;
656
657 if (val != DIE_INT3)
658 return NOTIFY_DONE;
659
660 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
661 return NOTIFY_DONE;
662
663 int3_emulate_call(regs, (unsigned long)&int3_magic);
664 return NOTIFY_STOP;
665}
666
667static void __init int3_selftest(void)
668{
669 static __initdata struct notifier_block int3_exception_nb = {
670 .notifier_call = int3_exception_notify,
671 .priority = INT_MAX-1, /* last */
672 };
673 unsigned int val = 0;
674
675 BUG_ON(register_die_notifier(&int3_exception_nb));
676
677 /*
678 * Basically: int3_magic(&val); but really complicated :-)
679 *
680 * Stick the address of the INT3 instruction into int3_selftest_ip,
681 * then trigger the INT3, padded with NOPs to match a CALL instruction
682 * length.
683 */
684 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
685 ".pushsection .init.data,\"aw\"\n\t"
686 ".align " __ASM_SEL(4, 8) "\n\t"
687 ".type int3_selftest_ip, @object\n\t"
688 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
689 "int3_selftest_ip:\n\t"
690 __ASM_SEL(.long, .quad) " 1b\n\t"
691 ".popsection\n\t"
692 : ASM_CALL_CONSTRAINT
693 : __ASM_SEL_RAW(a, D) (&val)
694 : "memory");
695
696 BUG_ON(val != 1);
697
698 unregister_die_notifier(&int3_exception_nb);
699}
700
701void __init alternative_instructions(void)
702{
703 int3_selftest();
704
705 /*
706 * The patching is not fully atomic, so try to avoid local
707 * interruptions that might execute the to be patched code.
708 * Other CPUs are not running.
709 */
710 stop_nmi();
711
712 /*
713 * Don't stop machine check exceptions while patching.
714 * MCEs only happen when something got corrupted and in this
715 * case we must do something about the corruption.
716 * Ignoring it is worse than an unlikely patching race.
717 * Also machine checks tend to be broadcast and if one CPU
718 * goes into machine check the others follow quickly, so we don't
719 * expect a machine check to cause undue problems during to code
720 * patching.
721 */
722
723 apply_alternatives(__alt_instructions, __alt_instructions_end);
724
725#ifdef CONFIG_SMP
726 /* Patch to UP if other cpus not imminent. */
727 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
728 uniproc_patched = true;
729 alternatives_smp_module_add(NULL, "core kernel",
730 __smp_locks, __smp_locks_end,
731 _text, _etext);
732 }
733
734 if (!uniproc_patched || num_possible_cpus() == 1) {
735 free_init_pages("SMP alternatives",
736 (unsigned long)__smp_locks,
737 (unsigned long)__smp_locks_end);
738 }
739#endif
740
741 apply_paravirt(__parainstructions, __parainstructions_end);
742
743 restart_nmi();
744 alternatives_patched = 1;
745}
746
747/**
748 * text_poke_early - Update instructions on a live kernel at boot time
749 * @addr: address to modify
750 * @opcode: source of the copy
751 * @len: length to copy
752 *
753 * When you use this code to patch more than one byte of an instruction
754 * you need to make sure that other CPUs cannot execute this code in parallel.
755 * Also no thread must be currently preempted in the middle of these
756 * instructions. And on the local CPU you need to be protected against NMI or
757 * MCE handlers seeing an inconsistent instruction while you patch.
758 */
759void __init_or_module text_poke_early(void *addr, const void *opcode,
760 size_t len)
761{
762 unsigned long flags;
763
764 if (boot_cpu_has(X86_FEATURE_NX) &&
765 is_module_text_address((unsigned long)addr)) {
766 /*
767 * Modules text is marked initially as non-executable, so the
768 * code cannot be running and speculative code-fetches are
769 * prevented. Just change the code.
770 */
771 memcpy(addr, opcode, len);
772 } else {
773 local_irq_save(flags);
774 memcpy(addr, opcode, len);
775 local_irq_restore(flags);
776 sync_core();
777
778 /*
779 * Could also do a CLFLUSH here to speed up CPU recovery; but
780 * that causes hangs on some VIA CPUs.
781 */
782 }
783}
784
785__ro_after_init struct mm_struct *poking_mm;
786__ro_after_init unsigned long poking_addr;
787
788static void *__text_poke(void *addr, const void *opcode, size_t len)
789{
790 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
791 struct page *pages[2] = {NULL};
792 temp_mm_state_t prev;
793 unsigned long flags;
794 pte_t pte, *ptep;
795 spinlock_t *ptl;
796 pgprot_t pgprot;
797
798 /*
799 * While boot memory allocator is running we cannot use struct pages as
800 * they are not yet initialized. There is no way to recover.
801 */
802 BUG_ON(!after_bootmem);
803
804 if (!core_kernel_text((unsigned long)addr)) {
805 pages[0] = vmalloc_to_page(addr);
806 if (cross_page_boundary)
807 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
808 } else {
809 pages[0] = virt_to_page(addr);
810 WARN_ON(!PageReserved(pages[0]));
811 if (cross_page_boundary)
812 pages[1] = virt_to_page(addr + PAGE_SIZE);
813 }
814 /*
815 * If something went wrong, crash and burn since recovery paths are not
816 * implemented.
817 */
818 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
819
820 local_irq_save(flags);
821
822 /*
823 * Map the page without the global bit, as TLB flushing is done with
824 * flush_tlb_mm_range(), which is intended for non-global PTEs.
825 */
826 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
827
828 /*
829 * The lock is not really needed, but this allows to avoid open-coding.
830 */
831 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
832
833 /*
834 * This must not fail; preallocated in poking_init().
835 */
836 VM_BUG_ON(!ptep);
837
838 pte = mk_pte(pages[0], pgprot);
839 set_pte_at(poking_mm, poking_addr, ptep, pte);
840
841 if (cross_page_boundary) {
842 pte = mk_pte(pages[1], pgprot);
843 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
844 }
845
846 /*
847 * Loading the temporary mm behaves as a compiler barrier, which
848 * guarantees that the PTE will be set at the time memcpy() is done.
849 */
850 prev = use_temporary_mm(poking_mm);
851
852 kasan_disable_current();
853 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
854 kasan_enable_current();
855
856 /*
857 * Ensure that the PTE is only cleared after the instructions of memcpy
858 * were issued by using a compiler barrier.
859 */
860 barrier();
861
862 pte_clear(poking_mm, poking_addr, ptep);
863 if (cross_page_boundary)
864 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
865
866 /*
867 * Loading the previous page-table hierarchy requires a serializing
868 * instruction that already allows the core to see the updated version.
869 * Xen-PV is assumed to serialize execution in a similar manner.
870 */
871 unuse_temporary_mm(prev);
872
873 /*
874 * Flushing the TLB might involve IPIs, which would require enabled
875 * IRQs, but not if the mm is not used, as it is in this point.
876 */
877 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
878 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
879 PAGE_SHIFT, false);
880
881 /*
882 * If the text does not match what we just wrote then something is
883 * fundamentally screwy; there's nothing we can really do about that.
884 */
885 BUG_ON(memcmp(addr, opcode, len));
886
887 pte_unmap_unlock(ptep, ptl);
888 local_irq_restore(flags);
889 return addr;
890}
891
892/**
893 * text_poke - Update instructions on a live kernel
894 * @addr: address to modify
895 * @opcode: source of the copy
896 * @len: length to copy
897 *
898 * Only atomic text poke/set should be allowed when not doing early patching.
899 * It means the size must be writable atomically and the address must be aligned
900 * in a way that permits an atomic write. It also makes sure we fit on a single
901 * page.
902 *
903 * Note that the caller must ensure that if the modified code is part of a
904 * module, the module would not be removed during poking. This can be achieved
905 * by registering a module notifier, and ordering module removal and patching
906 * trough a mutex.
907 */
908void *text_poke(void *addr, const void *opcode, size_t len)
909{
910 lockdep_assert_held(&text_mutex);
911
912 return __text_poke(addr, opcode, len);
913}
914
915/**
916 * text_poke_kgdb - Update instructions on a live kernel by kgdb
917 * @addr: address to modify
918 * @opcode: source of the copy
919 * @len: length to copy
920 *
921 * Only atomic text poke/set should be allowed when not doing early patching.
922 * It means the size must be writable atomically and the address must be aligned
923 * in a way that permits an atomic write. It also makes sure we fit on a single
924 * page.
925 *
926 * Context: should only be used by kgdb, which ensures no other core is running,
927 * despite the fact it does not hold the text_mutex.
928 */
929void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
930{
931 return __text_poke(addr, opcode, len);
932}
933
934static void do_sync_core(void *info)
935{
936 sync_core();
937}
938
939static struct bp_patching_desc {
940 struct text_poke_loc *vec;
941 int nr_entries;
942} bp_patching;
943
944static int patch_cmp(const void *key, const void *elt)
945{
946 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
947
948 if (key < tp->addr)
949 return -1;
950 if (key > tp->addr)
951 return 1;
952 return 0;
953}
954NOKPROBE_SYMBOL(patch_cmp);
955
956int poke_int3_handler(struct pt_regs *regs)
957{
958 struct text_poke_loc *tp;
959 unsigned char int3 = 0xcc;
960 void *ip;
961
962 /*
963 * Having observed our INT3 instruction, we now must observe
964 * bp_patching.nr_entries.
965 *
966 * nr_entries != 0 INT3
967 * WMB RMB
968 * write INT3 if (nr_entries)
969 *
970 * Idem for other elements in bp_patching.
971 */
972 smp_rmb();
973
974 if (likely(!bp_patching.nr_entries))
975 return 0;
976
977 if (user_mode(regs))
978 return 0;
979
980 /*
981 * Discount the sizeof(int3). See text_poke_bp_batch().
982 */
983 ip = (void *) regs->ip - sizeof(int3);
984
985 /*
986 * Skip the binary search if there is a single member in the vector.
987 */
988 if (unlikely(bp_patching.nr_entries > 1)) {
989 tp = bsearch(ip, bp_patching.vec, bp_patching.nr_entries,
990 sizeof(struct text_poke_loc),
991 patch_cmp);
992 if (!tp)
993 return 0;
994 } else {
995 tp = bp_patching.vec;
996 if (tp->addr != ip)
997 return 0;
998 }
999
1000 /* set up the specified breakpoint detour */
1001 regs->ip = (unsigned long) tp->detour;
1002
1003 return 1;
1004}
1005NOKPROBE_SYMBOL(poke_int3_handler);
1006
1007/**
1008 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1009 * @tp: vector of instructions to patch
1010 * @nr_entries: number of entries in the vector
1011 *
1012 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1013 * We completely avoid stop_machine() here, and achieve the
1014 * synchronization using int3 breakpoint.
1015 *
1016 * The way it is done:
1017 * - For each entry in the vector:
1018 * - add a int3 trap to the address that will be patched
1019 * - sync cores
1020 * - For each entry in the vector:
1021 * - update all but the first byte of the patched range
1022 * - sync cores
1023 * - For each entry in the vector:
1024 * - replace the first byte (int3) by the first byte of
1025 * replacing opcode
1026 * - sync cores
1027 */
1028void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1029{
1030 int patched_all_but_first = 0;
1031 unsigned char int3 = 0xcc;
1032 unsigned int i;
1033
1034 lockdep_assert_held(&text_mutex);
1035
1036 bp_patching.vec = tp;
1037 bp_patching.nr_entries = nr_entries;
1038
1039 /*
1040 * Corresponding read barrier in int3 notifier for making sure the
1041 * nr_entries and handler are correctly ordered wrt. patching.
1042 */
1043 smp_wmb();
1044
1045 /*
1046 * First step: add a int3 trap to the address that will be patched.
1047 */
1048 for (i = 0; i < nr_entries; i++)
1049 text_poke(tp[i].addr, &int3, sizeof(int3));
1050
1051 on_each_cpu(do_sync_core, NULL, 1);
1052
1053 /*
1054 * Second step: update all but the first byte of the patched range.
1055 */
1056 for (i = 0; i < nr_entries; i++) {
1057 if (tp[i].len - sizeof(int3) > 0) {
1058 text_poke((char *)tp[i].addr + sizeof(int3),
1059 (const char *)tp[i].opcode + sizeof(int3),
1060 tp[i].len - sizeof(int3));
1061 patched_all_but_first++;
1062 }
1063 }
1064
1065 if (patched_all_but_first) {
1066 /*
1067 * According to Intel, this core syncing is very likely
1068 * not necessary and we'd be safe even without it. But
1069 * better safe than sorry (plus there's not only Intel).
1070 */
1071 on_each_cpu(do_sync_core, NULL, 1);
1072 }
1073
1074 /*
1075 * Third step: replace the first byte (int3) by the first byte of
1076 * replacing opcode.
1077 */
1078 for (i = 0; i < nr_entries; i++)
1079 text_poke(tp[i].addr, tp[i].opcode, sizeof(int3));
1080
1081 on_each_cpu(do_sync_core, NULL, 1);
1082 /*
1083 * sync_core() implies an smp_mb() and orders this store against
1084 * the writing of the new instruction.
1085 */
1086 bp_patching.vec = NULL;
1087 bp_patching.nr_entries = 0;
1088}
1089
1090/**
1091 * text_poke_bp() -- update instructions on live kernel on SMP
1092 * @addr: address to patch
1093 * @opcode: opcode of new instruction
1094 * @len: length to copy
1095 * @handler: address to jump to when the temporary breakpoint is hit
1096 *
1097 * Update a single instruction with the vector in the stack, avoiding
1098 * dynamically allocated memory. This function should be used when it is
1099 * not possible to allocate memory.
1100 */
1101void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
1102{
1103 struct text_poke_loc tp = {
1104 .detour = handler,
1105 .addr = addr,
1106 .len = len,
1107 };
1108
1109 if (len > POKE_MAX_OPCODE_SIZE) {
1110 WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE);
1111 return;
1112 }
1113
1114 memcpy((void *)tp.opcode, opcode, len);
1115
1116 text_poke_bp_batch(&tp, 1);
1117}
1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) "SMP alternatives: " fmt
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/perf_event.h>
7#include <linux/mutex.h>
8#include <linux/list.h>
9#include <linux/stringify.h>
10#include <linux/highmem.h>
11#include <linux/mm.h>
12#include <linux/vmalloc.h>
13#include <linux/memory.h>
14#include <linux/stop_machine.h>
15#include <linux/slab.h>
16#include <linux/kdebug.h>
17#include <linux/kprobes.h>
18#include <linux/mmu_context.h>
19#include <linux/bsearch.h>
20#include <linux/sync_core.h>
21#include <asm/text-patching.h>
22#include <asm/alternative.h>
23#include <asm/sections.h>
24#include <asm/mce.h>
25#include <asm/nmi.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/insn.h>
29#include <asm/io.h>
30#include <asm/fixmap.h>
31#include <asm/paravirt.h>
32
33int __read_mostly alternatives_patched;
34
35EXPORT_SYMBOL_GPL(alternatives_patched);
36
37#define MAX_PATCH_LEN (255-1)
38
39static int __initdata_or_module debug_alternative;
40
41static int __init debug_alt(char *str)
42{
43 debug_alternative = 1;
44 return 1;
45}
46__setup("debug-alternative", debug_alt);
47
48static int noreplace_smp;
49
50static int __init setup_noreplace_smp(char *str)
51{
52 noreplace_smp = 1;
53 return 1;
54}
55__setup("noreplace-smp", setup_noreplace_smp);
56
57#define DPRINTK(fmt, args...) \
58do { \
59 if (debug_alternative) \
60 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
61} while (0)
62
63#define DUMP_BYTES(buf, len, fmt, args...) \
64do { \
65 if (unlikely(debug_alternative)) { \
66 int j; \
67 \
68 if (!(len)) \
69 break; \
70 \
71 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
72 for (j = 0; j < (len) - 1; j++) \
73 printk(KERN_CONT "%02hhx ", buf[j]); \
74 printk(KERN_CONT "%02hhx\n", buf[j]); \
75 } \
76} while (0)
77
78static const unsigned char x86nops[] =
79{
80 BYTES_NOP1,
81 BYTES_NOP2,
82 BYTES_NOP3,
83 BYTES_NOP4,
84 BYTES_NOP5,
85 BYTES_NOP6,
86 BYTES_NOP7,
87 BYTES_NOP8,
88};
89
90const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
91{
92 NULL,
93 x86nops,
94 x86nops + 1,
95 x86nops + 1 + 2,
96 x86nops + 1 + 2 + 3,
97 x86nops + 1 + 2 + 3 + 4,
98 x86nops + 1 + 2 + 3 + 4 + 5,
99 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
100 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
101};
102
103/* Use this to add nops to a buffer, then text_poke the whole buffer. */
104static void __init_or_module add_nops(void *insns, unsigned int len)
105{
106 while (len > 0) {
107 unsigned int noplen = len;
108 if (noplen > ASM_NOP_MAX)
109 noplen = ASM_NOP_MAX;
110 memcpy(insns, x86_nops[noplen], noplen);
111 insns += noplen;
112 len -= noplen;
113 }
114}
115
116extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
117extern s32 __smp_locks[], __smp_locks_end[];
118void text_poke_early(void *addr, const void *opcode, size_t len);
119
120/*
121 * Are we looking at a near JMP with a 1 or 4-byte displacement.
122 */
123static inline bool is_jmp(const u8 opcode)
124{
125 return opcode == 0xeb || opcode == 0xe9;
126}
127
128static void __init_or_module
129recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
130{
131 u8 *next_rip, *tgt_rip;
132 s32 n_dspl, o_dspl;
133 int repl_len;
134
135 if (a->replacementlen != 5)
136 return;
137
138 o_dspl = *(s32 *)(insn_buff + 1);
139
140 /* next_rip of the replacement JMP */
141 next_rip = repl_insn + a->replacementlen;
142 /* target rip of the replacement JMP */
143 tgt_rip = next_rip + o_dspl;
144 n_dspl = tgt_rip - orig_insn;
145
146 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
147
148 if (tgt_rip - orig_insn >= 0) {
149 if (n_dspl - 2 <= 127)
150 goto two_byte_jmp;
151 else
152 goto five_byte_jmp;
153 /* negative offset */
154 } else {
155 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
156 goto two_byte_jmp;
157 else
158 goto five_byte_jmp;
159 }
160
161two_byte_jmp:
162 n_dspl -= 2;
163
164 insn_buff[0] = 0xeb;
165 insn_buff[1] = (s8)n_dspl;
166 add_nops(insn_buff + 2, 3);
167
168 repl_len = 2;
169 goto done;
170
171five_byte_jmp:
172 n_dspl -= 5;
173
174 insn_buff[0] = 0xe9;
175 *(s32 *)&insn_buff[1] = n_dspl;
176
177 repl_len = 5;
178
179done:
180
181 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
182 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
183}
184
185/*
186 * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
187 *
188 * @instr: instruction byte stream
189 * @instrlen: length of the above
190 * @off: offset within @instr where the first NOP has been detected
191 *
192 * Return: number of NOPs found (and replaced).
193 */
194static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
195{
196 unsigned long flags;
197 int i = off, nnops;
198
199 while (i < instrlen) {
200 if (instr[i] != 0x90)
201 break;
202
203 i++;
204 }
205
206 nnops = i - off;
207
208 if (nnops <= 1)
209 return nnops;
210
211 local_irq_save(flags);
212 add_nops(instr + off, nnops);
213 local_irq_restore(flags);
214
215 DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
216
217 return nnops;
218}
219
220/*
221 * "noinline" to cause control flow change and thus invalidate I$ and
222 * cause refetch after modification.
223 */
224static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
225{
226 struct insn insn;
227 int i = 0;
228
229 /*
230 * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
231 * ones.
232 */
233 for (;;) {
234 if (insn_decode_kernel(&insn, &instr[i]))
235 return;
236
237 /*
238 * See if this and any potentially following NOPs can be
239 * optimized.
240 */
241 if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
242 i += optimize_nops_range(instr, a->instrlen, i);
243 else
244 i += insn.length;
245
246 if (i >= a->instrlen)
247 return;
248 }
249}
250
251/*
252 * Replace instructions with better alternatives for this CPU type. This runs
253 * before SMP is initialized to avoid SMP problems with self modifying code.
254 * This implies that asymmetric systems where APs have less capabilities than
255 * the boot processor are not handled. Tough. Make sure you disable such
256 * features by hand.
257 *
258 * Marked "noinline" to cause control flow change and thus insn cache
259 * to refetch changed I$ lines.
260 */
261void __init_or_module noinline apply_alternatives(struct alt_instr *start,
262 struct alt_instr *end)
263{
264 struct alt_instr *a;
265 u8 *instr, *replacement;
266 u8 insn_buff[MAX_PATCH_LEN];
267
268 DPRINTK("alt table %px, -> %px", start, end);
269 /*
270 * The scan order should be from start to end. A later scanned
271 * alternative code can overwrite previously scanned alternative code.
272 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
273 * patch code.
274 *
275 * So be careful if you want to change the scan order to any other
276 * order.
277 */
278 for (a = start; a < end; a++) {
279 int insn_buff_sz = 0;
280 /* Mask away "NOT" flag bit for feature to test. */
281 u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
282
283 instr = (u8 *)&a->instr_offset + a->instr_offset;
284 replacement = (u8 *)&a->repl_offset + a->repl_offset;
285 BUG_ON(a->instrlen > sizeof(insn_buff));
286 BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
287
288 /*
289 * Patch if either:
290 * - feature is present
291 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
292 * patch if feature is *NOT* present.
293 */
294 if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
295 goto next;
296
297 DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
298 (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
299 feature >> 5,
300 feature & 0x1f,
301 instr, instr, a->instrlen,
302 replacement, a->replacementlen);
303
304 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
305 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
306
307 memcpy(insn_buff, replacement, a->replacementlen);
308 insn_buff_sz = a->replacementlen;
309
310 /*
311 * 0xe8 is a relative jump; fix the offset.
312 *
313 * Instruction length is checked before the opcode to avoid
314 * accessing uninitialized bytes for zero-length replacements.
315 */
316 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
317 *(s32 *)(insn_buff + 1) += replacement - instr;
318 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
319 *(s32 *)(insn_buff + 1),
320 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
321 }
322
323 if (a->replacementlen && is_jmp(replacement[0]))
324 recompute_jump(a, instr, replacement, insn_buff);
325
326 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
327 insn_buff[insn_buff_sz] = 0x90;
328
329 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
330
331 text_poke_early(instr, insn_buff, insn_buff_sz);
332
333next:
334 optimize_nops(a, instr);
335 }
336}
337
338#ifdef CONFIG_SMP
339static void alternatives_smp_lock(const s32 *start, const s32 *end,
340 u8 *text, u8 *text_end)
341{
342 const s32 *poff;
343
344 for (poff = start; poff < end; poff++) {
345 u8 *ptr = (u8 *)poff + *poff;
346
347 if (!*poff || ptr < text || ptr >= text_end)
348 continue;
349 /* turn DS segment override prefix into lock prefix */
350 if (*ptr == 0x3e)
351 text_poke(ptr, ((unsigned char []){0xf0}), 1);
352 }
353}
354
355static void alternatives_smp_unlock(const s32 *start, const s32 *end,
356 u8 *text, u8 *text_end)
357{
358 const s32 *poff;
359
360 for (poff = start; poff < end; poff++) {
361 u8 *ptr = (u8 *)poff + *poff;
362
363 if (!*poff || ptr < text || ptr >= text_end)
364 continue;
365 /* turn lock prefix into DS segment override prefix */
366 if (*ptr == 0xf0)
367 text_poke(ptr, ((unsigned char []){0x3E}), 1);
368 }
369}
370
371struct smp_alt_module {
372 /* what is this ??? */
373 struct module *mod;
374 char *name;
375
376 /* ptrs to lock prefixes */
377 const s32 *locks;
378 const s32 *locks_end;
379
380 /* .text segment, needed to avoid patching init code ;) */
381 u8 *text;
382 u8 *text_end;
383
384 struct list_head next;
385};
386static LIST_HEAD(smp_alt_modules);
387static bool uniproc_patched = false; /* protected by text_mutex */
388
389void __init_or_module alternatives_smp_module_add(struct module *mod,
390 char *name,
391 void *locks, void *locks_end,
392 void *text, void *text_end)
393{
394 struct smp_alt_module *smp;
395
396 mutex_lock(&text_mutex);
397 if (!uniproc_patched)
398 goto unlock;
399
400 if (num_possible_cpus() == 1)
401 /* Don't bother remembering, we'll never have to undo it. */
402 goto smp_unlock;
403
404 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
405 if (NULL == smp)
406 /* we'll run the (safe but slow) SMP code then ... */
407 goto unlock;
408
409 smp->mod = mod;
410 smp->name = name;
411 smp->locks = locks;
412 smp->locks_end = locks_end;
413 smp->text = text;
414 smp->text_end = text_end;
415 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
416 smp->locks, smp->locks_end,
417 smp->text, smp->text_end, smp->name);
418
419 list_add_tail(&smp->next, &smp_alt_modules);
420smp_unlock:
421 alternatives_smp_unlock(locks, locks_end, text, text_end);
422unlock:
423 mutex_unlock(&text_mutex);
424}
425
426void __init_or_module alternatives_smp_module_del(struct module *mod)
427{
428 struct smp_alt_module *item;
429
430 mutex_lock(&text_mutex);
431 list_for_each_entry(item, &smp_alt_modules, next) {
432 if (mod != item->mod)
433 continue;
434 list_del(&item->next);
435 kfree(item);
436 break;
437 }
438 mutex_unlock(&text_mutex);
439}
440
441void alternatives_enable_smp(void)
442{
443 struct smp_alt_module *mod;
444
445 /* Why bother if there are no other CPUs? */
446 BUG_ON(num_possible_cpus() == 1);
447
448 mutex_lock(&text_mutex);
449
450 if (uniproc_patched) {
451 pr_info("switching to SMP code\n");
452 BUG_ON(num_online_cpus() != 1);
453 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
454 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
455 list_for_each_entry(mod, &smp_alt_modules, next)
456 alternatives_smp_lock(mod->locks, mod->locks_end,
457 mod->text, mod->text_end);
458 uniproc_patched = false;
459 }
460 mutex_unlock(&text_mutex);
461}
462
463/*
464 * Return 1 if the address range is reserved for SMP-alternatives.
465 * Must hold text_mutex.
466 */
467int alternatives_text_reserved(void *start, void *end)
468{
469 struct smp_alt_module *mod;
470 const s32 *poff;
471 u8 *text_start = start;
472 u8 *text_end = end;
473
474 lockdep_assert_held(&text_mutex);
475
476 list_for_each_entry(mod, &smp_alt_modules, next) {
477 if (mod->text > text_end || mod->text_end < text_start)
478 continue;
479 for (poff = mod->locks; poff < mod->locks_end; poff++) {
480 const u8 *ptr = (const u8 *)poff + *poff;
481
482 if (text_start <= ptr && text_end > ptr)
483 return 1;
484 }
485 }
486
487 return 0;
488}
489#endif /* CONFIG_SMP */
490
491#ifdef CONFIG_PARAVIRT
492void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
493 struct paravirt_patch_site *end)
494{
495 struct paravirt_patch_site *p;
496 char insn_buff[MAX_PATCH_LEN];
497
498 for (p = start; p < end; p++) {
499 unsigned int used;
500
501 BUG_ON(p->len > MAX_PATCH_LEN);
502 /* prep the buffer with the original instructions */
503 memcpy(insn_buff, p->instr, p->len);
504 used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
505
506 BUG_ON(used > p->len);
507
508 /* Pad the rest with nops */
509 add_nops(insn_buff + used, p->len - used);
510 text_poke_early(p->instr, insn_buff, p->len);
511 }
512}
513extern struct paravirt_patch_site __start_parainstructions[],
514 __stop_parainstructions[];
515#endif /* CONFIG_PARAVIRT */
516
517/*
518 * Self-test for the INT3 based CALL emulation code.
519 *
520 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
521 * properly and that there is a stack gap between the INT3 frame and the
522 * previous context. Without this gap doing a virtual PUSH on the interrupted
523 * stack would corrupt the INT3 IRET frame.
524 *
525 * See entry_{32,64}.S for more details.
526 */
527
528/*
529 * We define the int3_magic() function in assembly to control the calling
530 * convention such that we can 'call' it from assembly.
531 */
532
533extern void int3_magic(unsigned int *ptr); /* defined in asm */
534
535asm (
536" .pushsection .init.text, \"ax\", @progbits\n"
537" .type int3_magic, @function\n"
538"int3_magic:\n"
539" movl $1, (%" _ASM_ARG1 ")\n"
540" ret\n"
541" .size int3_magic, .-int3_magic\n"
542" .popsection\n"
543);
544
545extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
546
547static int __init
548int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
549{
550 struct die_args *args = data;
551 struct pt_regs *regs = args->regs;
552
553 if (!regs || user_mode(regs))
554 return NOTIFY_DONE;
555
556 if (val != DIE_INT3)
557 return NOTIFY_DONE;
558
559 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
560 return NOTIFY_DONE;
561
562 int3_emulate_call(regs, (unsigned long)&int3_magic);
563 return NOTIFY_STOP;
564}
565
566static void __init int3_selftest(void)
567{
568 static __initdata struct notifier_block int3_exception_nb = {
569 .notifier_call = int3_exception_notify,
570 .priority = INT_MAX-1, /* last */
571 };
572 unsigned int val = 0;
573
574 BUG_ON(register_die_notifier(&int3_exception_nb));
575
576 /*
577 * Basically: int3_magic(&val); but really complicated :-)
578 *
579 * Stick the address of the INT3 instruction into int3_selftest_ip,
580 * then trigger the INT3, padded with NOPs to match a CALL instruction
581 * length.
582 */
583 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
584 ".pushsection .init.data,\"aw\"\n\t"
585 ".align " __ASM_SEL(4, 8) "\n\t"
586 ".type int3_selftest_ip, @object\n\t"
587 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
588 "int3_selftest_ip:\n\t"
589 __ASM_SEL(.long, .quad) " 1b\n\t"
590 ".popsection\n\t"
591 : ASM_CALL_CONSTRAINT
592 : __ASM_SEL_RAW(a, D) (&val)
593 : "memory");
594
595 BUG_ON(val != 1);
596
597 unregister_die_notifier(&int3_exception_nb);
598}
599
600void __init alternative_instructions(void)
601{
602 int3_selftest();
603
604 /*
605 * The patching is not fully atomic, so try to avoid local
606 * interruptions that might execute the to be patched code.
607 * Other CPUs are not running.
608 */
609 stop_nmi();
610
611 /*
612 * Don't stop machine check exceptions while patching.
613 * MCEs only happen when something got corrupted and in this
614 * case we must do something about the corruption.
615 * Ignoring it is worse than an unlikely patching race.
616 * Also machine checks tend to be broadcast and if one CPU
617 * goes into machine check the others follow quickly, so we don't
618 * expect a machine check to cause undue problems during to code
619 * patching.
620 */
621
622 /*
623 * Paravirt patching and alternative patching can be combined to
624 * replace a function call with a short direct code sequence (e.g.
625 * by setting a constant return value instead of doing that in an
626 * external function).
627 * In order to make this work the following sequence is required:
628 * 1. set (artificial) features depending on used paravirt
629 * functions which can later influence alternative patching
630 * 2. apply paravirt patching (generally replacing an indirect
631 * function call with a direct one)
632 * 3. apply alternative patching (e.g. replacing a direct function
633 * call with a custom code sequence)
634 * Doing paravirt patching after alternative patching would clobber
635 * the optimization of the custom code with a function call again.
636 */
637 paravirt_set_cap();
638
639 /*
640 * First patch paravirt functions, such that we overwrite the indirect
641 * call with the direct call.
642 */
643 apply_paravirt(__parainstructions, __parainstructions_end);
644
645 /*
646 * Then patch alternatives, such that those paravirt calls that are in
647 * alternatives can be overwritten by their immediate fragments.
648 */
649 apply_alternatives(__alt_instructions, __alt_instructions_end);
650
651#ifdef CONFIG_SMP
652 /* Patch to UP if other cpus not imminent. */
653 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
654 uniproc_patched = true;
655 alternatives_smp_module_add(NULL, "core kernel",
656 __smp_locks, __smp_locks_end,
657 _text, _etext);
658 }
659
660 if (!uniproc_patched || num_possible_cpus() == 1) {
661 free_init_pages("SMP alternatives",
662 (unsigned long)__smp_locks,
663 (unsigned long)__smp_locks_end);
664 }
665#endif
666
667 restart_nmi();
668 alternatives_patched = 1;
669}
670
671/**
672 * text_poke_early - Update instructions on a live kernel at boot time
673 * @addr: address to modify
674 * @opcode: source of the copy
675 * @len: length to copy
676 *
677 * When you use this code to patch more than one byte of an instruction
678 * you need to make sure that other CPUs cannot execute this code in parallel.
679 * Also no thread must be currently preempted in the middle of these
680 * instructions. And on the local CPU you need to be protected against NMI or
681 * MCE handlers seeing an inconsistent instruction while you patch.
682 */
683void __init_or_module text_poke_early(void *addr, const void *opcode,
684 size_t len)
685{
686 unsigned long flags;
687
688 if (boot_cpu_has(X86_FEATURE_NX) &&
689 is_module_text_address((unsigned long)addr)) {
690 /*
691 * Modules text is marked initially as non-executable, so the
692 * code cannot be running and speculative code-fetches are
693 * prevented. Just change the code.
694 */
695 memcpy(addr, opcode, len);
696 } else {
697 local_irq_save(flags);
698 memcpy(addr, opcode, len);
699 local_irq_restore(flags);
700 sync_core();
701
702 /*
703 * Could also do a CLFLUSH here to speed up CPU recovery; but
704 * that causes hangs on some VIA CPUs.
705 */
706 }
707}
708
709typedef struct {
710 struct mm_struct *mm;
711} temp_mm_state_t;
712
713/*
714 * Using a temporary mm allows to set temporary mappings that are not accessible
715 * by other CPUs. Such mappings are needed to perform sensitive memory writes
716 * that override the kernel memory protections (e.g., W^X), without exposing the
717 * temporary page-table mappings that are required for these write operations to
718 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
719 * mapping is torn down.
720 *
721 * Context: The temporary mm needs to be used exclusively by a single core. To
722 * harden security IRQs must be disabled while the temporary mm is
723 * loaded, thereby preventing interrupt handler bugs from overriding
724 * the kernel memory protection.
725 */
726static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
727{
728 temp_mm_state_t temp_state;
729
730 lockdep_assert_irqs_disabled();
731
732 /*
733 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
734 * with a stale address space WITHOUT being in lazy mode after
735 * restoring the previous mm.
736 */
737 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
738 leave_mm(smp_processor_id());
739
740 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
741 switch_mm_irqs_off(NULL, mm, current);
742
743 /*
744 * If breakpoints are enabled, disable them while the temporary mm is
745 * used. Userspace might set up watchpoints on addresses that are used
746 * in the temporary mm, which would lead to wrong signals being sent or
747 * crashes.
748 *
749 * Note that breakpoints are not disabled selectively, which also causes
750 * kernel breakpoints (e.g., perf's) to be disabled. This might be
751 * undesirable, but still seems reasonable as the code that runs in the
752 * temporary mm should be short.
753 */
754 if (hw_breakpoint_active())
755 hw_breakpoint_disable();
756
757 return temp_state;
758}
759
760static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
761{
762 lockdep_assert_irqs_disabled();
763 switch_mm_irqs_off(NULL, prev_state.mm, current);
764
765 /*
766 * Restore the breakpoints if they were disabled before the temporary mm
767 * was loaded.
768 */
769 if (hw_breakpoint_active())
770 hw_breakpoint_restore();
771}
772
773__ro_after_init struct mm_struct *poking_mm;
774__ro_after_init unsigned long poking_addr;
775
776static void *__text_poke(void *addr, const void *opcode, size_t len)
777{
778 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
779 struct page *pages[2] = {NULL};
780 temp_mm_state_t prev;
781 unsigned long flags;
782 pte_t pte, *ptep;
783 spinlock_t *ptl;
784 pgprot_t pgprot;
785
786 /*
787 * While boot memory allocator is running we cannot use struct pages as
788 * they are not yet initialized. There is no way to recover.
789 */
790 BUG_ON(!after_bootmem);
791
792 if (!core_kernel_text((unsigned long)addr)) {
793 pages[0] = vmalloc_to_page(addr);
794 if (cross_page_boundary)
795 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
796 } else {
797 pages[0] = virt_to_page(addr);
798 WARN_ON(!PageReserved(pages[0]));
799 if (cross_page_boundary)
800 pages[1] = virt_to_page(addr + PAGE_SIZE);
801 }
802 /*
803 * If something went wrong, crash and burn since recovery paths are not
804 * implemented.
805 */
806 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
807
808 /*
809 * Map the page without the global bit, as TLB flushing is done with
810 * flush_tlb_mm_range(), which is intended for non-global PTEs.
811 */
812 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
813
814 /*
815 * The lock is not really needed, but this allows to avoid open-coding.
816 */
817 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
818
819 /*
820 * This must not fail; preallocated in poking_init().
821 */
822 VM_BUG_ON(!ptep);
823
824 local_irq_save(flags);
825
826 pte = mk_pte(pages[0], pgprot);
827 set_pte_at(poking_mm, poking_addr, ptep, pte);
828
829 if (cross_page_boundary) {
830 pte = mk_pte(pages[1], pgprot);
831 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
832 }
833
834 /*
835 * Loading the temporary mm behaves as a compiler barrier, which
836 * guarantees that the PTE will be set at the time memcpy() is done.
837 */
838 prev = use_temporary_mm(poking_mm);
839
840 kasan_disable_current();
841 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
842 kasan_enable_current();
843
844 /*
845 * Ensure that the PTE is only cleared after the instructions of memcpy
846 * were issued by using a compiler barrier.
847 */
848 barrier();
849
850 pte_clear(poking_mm, poking_addr, ptep);
851 if (cross_page_boundary)
852 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
853
854 /*
855 * Loading the previous page-table hierarchy requires a serializing
856 * instruction that already allows the core to see the updated version.
857 * Xen-PV is assumed to serialize execution in a similar manner.
858 */
859 unuse_temporary_mm(prev);
860
861 /*
862 * Flushing the TLB might involve IPIs, which would require enabled
863 * IRQs, but not if the mm is not used, as it is in this point.
864 */
865 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
866 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
867 PAGE_SHIFT, false);
868
869 /*
870 * If the text does not match what we just wrote then something is
871 * fundamentally screwy; there's nothing we can really do about that.
872 */
873 BUG_ON(memcmp(addr, opcode, len));
874
875 local_irq_restore(flags);
876 pte_unmap_unlock(ptep, ptl);
877 return addr;
878}
879
880/**
881 * text_poke - Update instructions on a live kernel
882 * @addr: address to modify
883 * @opcode: source of the copy
884 * @len: length to copy
885 *
886 * Only atomic text poke/set should be allowed when not doing early patching.
887 * It means the size must be writable atomically and the address must be aligned
888 * in a way that permits an atomic write. It also makes sure we fit on a single
889 * page.
890 *
891 * Note that the caller must ensure that if the modified code is part of a
892 * module, the module would not be removed during poking. This can be achieved
893 * by registering a module notifier, and ordering module removal and patching
894 * trough a mutex.
895 */
896void *text_poke(void *addr, const void *opcode, size_t len)
897{
898 lockdep_assert_held(&text_mutex);
899
900 return __text_poke(addr, opcode, len);
901}
902
903/**
904 * text_poke_kgdb - Update instructions on a live kernel by kgdb
905 * @addr: address to modify
906 * @opcode: source of the copy
907 * @len: length to copy
908 *
909 * Only atomic text poke/set should be allowed when not doing early patching.
910 * It means the size must be writable atomically and the address must be aligned
911 * in a way that permits an atomic write. It also makes sure we fit on a single
912 * page.
913 *
914 * Context: should only be used by kgdb, which ensures no other core is running,
915 * despite the fact it does not hold the text_mutex.
916 */
917void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
918{
919 return __text_poke(addr, opcode, len);
920}
921
922static void do_sync_core(void *info)
923{
924 sync_core();
925}
926
927void text_poke_sync(void)
928{
929 on_each_cpu(do_sync_core, NULL, 1);
930}
931
932struct text_poke_loc {
933 s32 rel_addr; /* addr := _stext + rel_addr */
934 s32 rel32;
935 u8 opcode;
936 const u8 text[POKE_MAX_OPCODE_SIZE];
937 u8 old;
938};
939
940struct bp_patching_desc {
941 struct text_poke_loc *vec;
942 int nr_entries;
943 atomic_t refs;
944};
945
946static struct bp_patching_desc *bp_desc;
947
948static __always_inline
949struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
950{
951 struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
952
953 if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
954 return NULL;
955
956 return desc;
957}
958
959static __always_inline void put_desc(struct bp_patching_desc *desc)
960{
961 smp_mb__before_atomic();
962 arch_atomic_dec(&desc->refs);
963}
964
965static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
966{
967 return _stext + tp->rel_addr;
968}
969
970static __always_inline int patch_cmp(const void *key, const void *elt)
971{
972 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
973
974 if (key < text_poke_addr(tp))
975 return -1;
976 if (key > text_poke_addr(tp))
977 return 1;
978 return 0;
979}
980
981noinstr int poke_int3_handler(struct pt_regs *regs)
982{
983 struct bp_patching_desc *desc;
984 struct text_poke_loc *tp;
985 int len, ret = 0;
986 void *ip;
987
988 if (user_mode(regs))
989 return 0;
990
991 /*
992 * Having observed our INT3 instruction, we now must observe
993 * bp_desc:
994 *
995 * bp_desc = desc INT3
996 * WMB RMB
997 * write INT3 if (desc)
998 */
999 smp_rmb();
1000
1001 desc = try_get_desc(&bp_desc);
1002 if (!desc)
1003 return 0;
1004
1005 /*
1006 * Discount the INT3. See text_poke_bp_batch().
1007 */
1008 ip = (void *) regs->ip - INT3_INSN_SIZE;
1009
1010 /*
1011 * Skip the binary search if there is a single member in the vector.
1012 */
1013 if (unlikely(desc->nr_entries > 1)) {
1014 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1015 sizeof(struct text_poke_loc),
1016 patch_cmp);
1017 if (!tp)
1018 goto out_put;
1019 } else {
1020 tp = desc->vec;
1021 if (text_poke_addr(tp) != ip)
1022 goto out_put;
1023 }
1024
1025 len = text_opcode_size(tp->opcode);
1026 ip += len;
1027
1028 switch (tp->opcode) {
1029 case INT3_INSN_OPCODE:
1030 /*
1031 * Someone poked an explicit INT3, they'll want to handle it,
1032 * do not consume.
1033 */
1034 goto out_put;
1035
1036 case RET_INSN_OPCODE:
1037 int3_emulate_ret(regs);
1038 break;
1039
1040 case CALL_INSN_OPCODE:
1041 int3_emulate_call(regs, (long)ip + tp->rel32);
1042 break;
1043
1044 case JMP32_INSN_OPCODE:
1045 case JMP8_INSN_OPCODE:
1046 int3_emulate_jmp(regs, (long)ip + tp->rel32);
1047 break;
1048
1049 default:
1050 BUG();
1051 }
1052
1053 ret = 1;
1054
1055out_put:
1056 put_desc(desc);
1057 return ret;
1058}
1059
1060#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1061static struct text_poke_loc tp_vec[TP_VEC_MAX];
1062static int tp_vec_nr;
1063
1064/**
1065 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1066 * @tp: vector of instructions to patch
1067 * @nr_entries: number of entries in the vector
1068 *
1069 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1070 * We completely avoid stop_machine() here, and achieve the
1071 * synchronization using int3 breakpoint.
1072 *
1073 * The way it is done:
1074 * - For each entry in the vector:
1075 * - add a int3 trap to the address that will be patched
1076 * - sync cores
1077 * - For each entry in the vector:
1078 * - update all but the first byte of the patched range
1079 * - sync cores
1080 * - For each entry in the vector:
1081 * - replace the first byte (int3) by the first byte of
1082 * replacing opcode
1083 * - sync cores
1084 */
1085static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1086{
1087 struct bp_patching_desc desc = {
1088 .vec = tp,
1089 .nr_entries = nr_entries,
1090 .refs = ATOMIC_INIT(1),
1091 };
1092 unsigned char int3 = INT3_INSN_OPCODE;
1093 unsigned int i;
1094 int do_sync;
1095
1096 lockdep_assert_held(&text_mutex);
1097
1098 smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1099
1100 /*
1101 * Corresponding read barrier in int3 notifier for making sure the
1102 * nr_entries and handler are correctly ordered wrt. patching.
1103 */
1104 smp_wmb();
1105
1106 /*
1107 * First step: add a int3 trap to the address that will be patched.
1108 */
1109 for (i = 0; i < nr_entries; i++) {
1110 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1111 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1112 }
1113
1114 text_poke_sync();
1115
1116 /*
1117 * Second step: update all but the first byte of the patched range.
1118 */
1119 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1120 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1121 int len = text_opcode_size(tp[i].opcode);
1122
1123 if (len - INT3_INSN_SIZE > 0) {
1124 memcpy(old + INT3_INSN_SIZE,
1125 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1126 len - INT3_INSN_SIZE);
1127 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1128 (const char *)tp[i].text + INT3_INSN_SIZE,
1129 len - INT3_INSN_SIZE);
1130 do_sync++;
1131 }
1132
1133 /*
1134 * Emit a perf event to record the text poke, primarily to
1135 * support Intel PT decoding which must walk the executable code
1136 * to reconstruct the trace. The flow up to here is:
1137 * - write INT3 byte
1138 * - IPI-SYNC
1139 * - write instruction tail
1140 * At this point the actual control flow will be through the
1141 * INT3 and handler and not hit the old or new instruction.
1142 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1143 * can still be decoded. Subsequently:
1144 * - emit RECORD_TEXT_POKE with the new instruction
1145 * - IPI-SYNC
1146 * - write first byte
1147 * - IPI-SYNC
1148 * So before the text poke event timestamp, the decoder will see
1149 * either the old instruction flow or FUP/TIP of INT3. After the
1150 * text poke event timestamp, the decoder will see either the
1151 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1152 * use the timestamp as the point at which to modify the
1153 * executable code.
1154 * The old instruction is recorded so that the event can be
1155 * processed forwards or backwards.
1156 */
1157 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1158 tp[i].text, len);
1159 }
1160
1161 if (do_sync) {
1162 /*
1163 * According to Intel, this core syncing is very likely
1164 * not necessary and we'd be safe even without it. But
1165 * better safe than sorry (plus there's not only Intel).
1166 */
1167 text_poke_sync();
1168 }
1169
1170 /*
1171 * Third step: replace the first byte (int3) by the first byte of
1172 * replacing opcode.
1173 */
1174 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1175 if (tp[i].text[0] == INT3_INSN_OPCODE)
1176 continue;
1177
1178 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1179 do_sync++;
1180 }
1181
1182 if (do_sync)
1183 text_poke_sync();
1184
1185 /*
1186 * Remove and synchronize_rcu(), except we have a very primitive
1187 * refcount based completion.
1188 */
1189 WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1190 if (!atomic_dec_and_test(&desc.refs))
1191 atomic_cond_read_acquire(&desc.refs, !VAL);
1192}
1193
1194static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1195 const void *opcode, size_t len, const void *emulate)
1196{
1197 struct insn insn;
1198 int ret;
1199
1200 memcpy((void *)tp->text, opcode, len);
1201 if (!emulate)
1202 emulate = opcode;
1203
1204 ret = insn_decode_kernel(&insn, emulate);
1205
1206 BUG_ON(ret < 0);
1207 BUG_ON(len != insn.length);
1208
1209 tp->rel_addr = addr - (void *)_stext;
1210 tp->opcode = insn.opcode.bytes[0];
1211
1212 switch (tp->opcode) {
1213 case INT3_INSN_OPCODE:
1214 case RET_INSN_OPCODE:
1215 break;
1216
1217 case CALL_INSN_OPCODE:
1218 case JMP32_INSN_OPCODE:
1219 case JMP8_INSN_OPCODE:
1220 tp->rel32 = insn.immediate.value;
1221 break;
1222
1223 default: /* assume NOP */
1224 switch (len) {
1225 case 2: /* NOP2 -- emulate as JMP8+0 */
1226 BUG_ON(memcmp(emulate, x86_nops[len], len));
1227 tp->opcode = JMP8_INSN_OPCODE;
1228 tp->rel32 = 0;
1229 break;
1230
1231 case 5: /* NOP5 -- emulate as JMP32+0 */
1232 BUG_ON(memcmp(emulate, x86_nops[len], len));
1233 tp->opcode = JMP32_INSN_OPCODE;
1234 tp->rel32 = 0;
1235 break;
1236
1237 default: /* unknown instruction */
1238 BUG();
1239 }
1240 break;
1241 }
1242}
1243
1244/*
1245 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1246 * early if needed.
1247 */
1248static bool tp_order_fail(void *addr)
1249{
1250 struct text_poke_loc *tp;
1251
1252 if (!tp_vec_nr)
1253 return false;
1254
1255 if (!addr) /* force */
1256 return true;
1257
1258 tp = &tp_vec[tp_vec_nr - 1];
1259 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1260 return true;
1261
1262 return false;
1263}
1264
1265static void text_poke_flush(void *addr)
1266{
1267 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1268 text_poke_bp_batch(tp_vec, tp_vec_nr);
1269 tp_vec_nr = 0;
1270 }
1271}
1272
1273void text_poke_finish(void)
1274{
1275 text_poke_flush(NULL);
1276}
1277
1278void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1279{
1280 struct text_poke_loc *tp;
1281
1282 if (unlikely(system_state == SYSTEM_BOOTING)) {
1283 text_poke_early(addr, opcode, len);
1284 return;
1285 }
1286
1287 text_poke_flush(addr);
1288
1289 tp = &tp_vec[tp_vec_nr++];
1290 text_poke_loc_init(tp, addr, opcode, len, emulate);
1291}
1292
1293/**
1294 * text_poke_bp() -- update instructions on live kernel on SMP
1295 * @addr: address to patch
1296 * @opcode: opcode of new instruction
1297 * @len: length to copy
1298 * @emulate: instruction to be emulated
1299 *
1300 * Update a single instruction with the vector in the stack, avoiding
1301 * dynamically allocated memory. This function should be used when it is
1302 * not possible to allocate memory.
1303 */
1304void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1305{
1306 struct text_poke_loc tp;
1307
1308 if (unlikely(system_state == SYSTEM_BOOTING)) {
1309 text_poke_early(addr, opcode, len);
1310 return;
1311 }
1312
1313 text_poke_loc_init(&tp, addr, opcode, len, emulate);
1314 text_poke_bp_batch(&tp, 1);
1315}