Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) "SMP alternatives: " fmt
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/mutex.h>
7#include <linux/list.h>
8#include <linux/stringify.h>
9#include <linux/mm.h>
10#include <linux/vmalloc.h>
11#include <linux/memory.h>
12#include <linux/stop_machine.h>
13#include <linux/slab.h>
14#include <linux/kdebug.h>
15#include <linux/kprobes.h>
16#include <linux/mmu_context.h>
17#include <linux/bsearch.h>
18#include <asm/text-patching.h>
19#include <asm/alternative.h>
20#include <asm/sections.h>
21#include <asm/pgtable.h>
22#include <asm/mce.h>
23#include <asm/nmi.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/io.h>
27#include <asm/fixmap.h>
28
29int __read_mostly alternatives_patched;
30
31EXPORT_SYMBOL_GPL(alternatives_patched);
32
33#define MAX_PATCH_LEN (255-1)
34
35static int __initdata_or_module debug_alternative;
36
37static int __init debug_alt(char *str)
38{
39 debug_alternative = 1;
40 return 1;
41}
42__setup("debug-alternative", debug_alt);
43
44static int noreplace_smp;
45
46static int __init setup_noreplace_smp(char *str)
47{
48 noreplace_smp = 1;
49 return 1;
50}
51__setup("noreplace-smp", setup_noreplace_smp);
52
53#define DPRINTK(fmt, args...) \
54do { \
55 if (debug_alternative) \
56 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
57} while (0)
58
59#define DUMP_BYTES(buf, len, fmt, args...) \
60do { \
61 if (unlikely(debug_alternative)) { \
62 int j; \
63 \
64 if (!(len)) \
65 break; \
66 \
67 printk(KERN_DEBUG fmt, ##args); \
68 for (j = 0; j < (len) - 1; j++) \
69 printk(KERN_CONT "%02hhx ", buf[j]); \
70 printk(KERN_CONT "%02hhx\n", buf[j]); \
71 } \
72} while (0)
73
74/*
75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
76 * that correspond to that nop. Getting from one nop to the next, we
77 * add to the array the offset that is equal to the sum of all sizes of
78 * nops preceding the one we are after.
79 *
80 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
81 * nice symmetry of sizes of the previous nops.
82 */
83#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
84static const unsigned char intelnops[] =
85{
86 GENERIC_NOP1,
87 GENERIC_NOP2,
88 GENERIC_NOP3,
89 GENERIC_NOP4,
90 GENERIC_NOP5,
91 GENERIC_NOP6,
92 GENERIC_NOP7,
93 GENERIC_NOP8,
94 GENERIC_NOP5_ATOMIC
95};
96static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
97{
98 NULL,
99 intelnops,
100 intelnops + 1,
101 intelnops + 1 + 2,
102 intelnops + 1 + 2 + 3,
103 intelnops + 1 + 2 + 3 + 4,
104 intelnops + 1 + 2 + 3 + 4 + 5,
105 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
106 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
107 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108};
109#endif
110
111#ifdef K8_NOP1
112static const unsigned char k8nops[] =
113{
114 K8_NOP1,
115 K8_NOP2,
116 K8_NOP3,
117 K8_NOP4,
118 K8_NOP5,
119 K8_NOP6,
120 K8_NOP7,
121 K8_NOP8,
122 K8_NOP5_ATOMIC
123};
124static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
125{
126 NULL,
127 k8nops,
128 k8nops + 1,
129 k8nops + 1 + 2,
130 k8nops + 1 + 2 + 3,
131 k8nops + 1 + 2 + 3 + 4,
132 k8nops + 1 + 2 + 3 + 4 + 5,
133 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
134 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
135 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
136};
137#endif
138
139#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
140static const unsigned char k7nops[] =
141{
142 K7_NOP1,
143 K7_NOP2,
144 K7_NOP3,
145 K7_NOP4,
146 K7_NOP5,
147 K7_NOP6,
148 K7_NOP7,
149 K7_NOP8,
150 K7_NOP5_ATOMIC
151};
152static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
153{
154 NULL,
155 k7nops,
156 k7nops + 1,
157 k7nops + 1 + 2,
158 k7nops + 1 + 2 + 3,
159 k7nops + 1 + 2 + 3 + 4,
160 k7nops + 1 + 2 + 3 + 4 + 5,
161 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
162 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
163 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164};
165#endif
166
167#ifdef P6_NOP1
168static const unsigned char p6nops[] =
169{
170 P6_NOP1,
171 P6_NOP2,
172 P6_NOP3,
173 P6_NOP4,
174 P6_NOP5,
175 P6_NOP6,
176 P6_NOP7,
177 P6_NOP8,
178 P6_NOP5_ATOMIC
179};
180static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
181{
182 NULL,
183 p6nops,
184 p6nops + 1,
185 p6nops + 1 + 2,
186 p6nops + 1 + 2 + 3,
187 p6nops + 1 + 2 + 3 + 4,
188 p6nops + 1 + 2 + 3 + 4 + 5,
189 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
190 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
191 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
192};
193#endif
194
195/* Initialize these to a safe default */
196#ifdef CONFIG_X86_64
197const unsigned char * const *ideal_nops = p6_nops;
198#else
199const unsigned char * const *ideal_nops = intel_nops;
200#endif
201
202void __init arch_init_ideal_nops(void)
203{
204 switch (boot_cpu_data.x86_vendor) {
205 case X86_VENDOR_INTEL:
206 /*
207 * Due to a decoder implementation quirk, some
208 * specific Intel CPUs actually perform better with
209 * the "k8_nops" than with the SDM-recommended NOPs.
210 */
211 if (boot_cpu_data.x86 == 6 &&
212 boot_cpu_data.x86_model >= 0x0f &&
213 boot_cpu_data.x86_model != 0x1c &&
214 boot_cpu_data.x86_model != 0x26 &&
215 boot_cpu_data.x86_model != 0x27 &&
216 boot_cpu_data.x86_model < 0x30) {
217 ideal_nops = k8_nops;
218 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
219 ideal_nops = p6_nops;
220 } else {
221#ifdef CONFIG_X86_64
222 ideal_nops = k8_nops;
223#else
224 ideal_nops = intel_nops;
225#endif
226 }
227 break;
228
229 case X86_VENDOR_HYGON:
230 ideal_nops = p6_nops;
231 return;
232
233 case X86_VENDOR_AMD:
234 if (boot_cpu_data.x86 > 0xf) {
235 ideal_nops = p6_nops;
236 return;
237 }
238
239 /* fall through */
240
241 default:
242#ifdef CONFIG_X86_64
243 ideal_nops = k8_nops;
244#else
245 if (boot_cpu_has(X86_FEATURE_K8))
246 ideal_nops = k8_nops;
247 else if (boot_cpu_has(X86_FEATURE_K7))
248 ideal_nops = k7_nops;
249 else
250 ideal_nops = intel_nops;
251#endif
252 }
253}
254
255/* Use this to add nops to a buffer, then text_poke the whole buffer. */
256static void __init_or_module add_nops(void *insns, unsigned int len)
257{
258 while (len > 0) {
259 unsigned int noplen = len;
260 if (noplen > ASM_NOP_MAX)
261 noplen = ASM_NOP_MAX;
262 memcpy(insns, ideal_nops[noplen], noplen);
263 insns += noplen;
264 len -= noplen;
265 }
266}
267
268extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
269extern s32 __smp_locks[], __smp_locks_end[];
270void text_poke_early(void *addr, const void *opcode, size_t len);
271
272/*
273 * Are we looking at a near JMP with a 1 or 4-byte displacement.
274 */
275static inline bool is_jmp(const u8 opcode)
276{
277 return opcode == 0xeb || opcode == 0xe9;
278}
279
280static void __init_or_module
281recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
282{
283 u8 *next_rip, *tgt_rip;
284 s32 n_dspl, o_dspl;
285 int repl_len;
286
287 if (a->replacementlen != 5)
288 return;
289
290 o_dspl = *(s32 *)(insn_buff + 1);
291
292 /* next_rip of the replacement JMP */
293 next_rip = repl_insn + a->replacementlen;
294 /* target rip of the replacement JMP */
295 tgt_rip = next_rip + o_dspl;
296 n_dspl = tgt_rip - orig_insn;
297
298 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
299
300 if (tgt_rip - orig_insn >= 0) {
301 if (n_dspl - 2 <= 127)
302 goto two_byte_jmp;
303 else
304 goto five_byte_jmp;
305 /* negative offset */
306 } else {
307 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
308 goto two_byte_jmp;
309 else
310 goto five_byte_jmp;
311 }
312
313two_byte_jmp:
314 n_dspl -= 2;
315
316 insn_buff[0] = 0xeb;
317 insn_buff[1] = (s8)n_dspl;
318 add_nops(insn_buff + 2, 3);
319
320 repl_len = 2;
321 goto done;
322
323five_byte_jmp:
324 n_dspl -= 5;
325
326 insn_buff[0] = 0xe9;
327 *(s32 *)&insn_buff[1] = n_dspl;
328
329 repl_len = 5;
330
331done:
332
333 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
334 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
335}
336
337/*
338 * "noinline" to cause control flow change and thus invalidate I$ and
339 * cause refetch after modification.
340 */
341static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
342{
343 unsigned long flags;
344 int i;
345
346 for (i = 0; i < a->padlen; i++) {
347 if (instr[i] != 0x90)
348 return;
349 }
350
351 local_irq_save(flags);
352 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
353 local_irq_restore(flags);
354
355 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
356 instr, a->instrlen - a->padlen, a->padlen);
357}
358
359/*
360 * Replace instructions with better alternatives for this CPU type. This runs
361 * before SMP is initialized to avoid SMP problems with self modifying code.
362 * This implies that asymmetric systems where APs have less capabilities than
363 * the boot processor are not handled. Tough. Make sure you disable such
364 * features by hand.
365 *
366 * Marked "noinline" to cause control flow change and thus insn cache
367 * to refetch changed I$ lines.
368 */
369void __init_or_module noinline apply_alternatives(struct alt_instr *start,
370 struct alt_instr *end)
371{
372 struct alt_instr *a;
373 u8 *instr, *replacement;
374 u8 insn_buff[MAX_PATCH_LEN];
375
376 DPRINTK("alt table %px, -> %px", start, end);
377 /*
378 * The scan order should be from start to end. A later scanned
379 * alternative code can overwrite previously scanned alternative code.
380 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
381 * patch code.
382 *
383 * So be careful if you want to change the scan order to any other
384 * order.
385 */
386 for (a = start; a < end; a++) {
387 int insn_buff_sz = 0;
388
389 instr = (u8 *)&a->instr_offset + a->instr_offset;
390 replacement = (u8 *)&a->repl_offset + a->repl_offset;
391 BUG_ON(a->instrlen > sizeof(insn_buff));
392 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
393 if (!boot_cpu_has(a->cpuid)) {
394 if (a->padlen > 1)
395 optimize_nops(a, instr);
396
397 continue;
398 }
399
400 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
401 a->cpuid >> 5,
402 a->cpuid & 0x1f,
403 instr, instr, a->instrlen,
404 replacement, a->replacementlen, a->padlen);
405
406 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
407 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
408
409 memcpy(insn_buff, replacement, a->replacementlen);
410 insn_buff_sz = a->replacementlen;
411
412 /*
413 * 0xe8 is a relative jump; fix the offset.
414 *
415 * Instruction length is checked before the opcode to avoid
416 * accessing uninitialized bytes for zero-length replacements.
417 */
418 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
419 *(s32 *)(insn_buff + 1) += replacement - instr;
420 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
421 *(s32 *)(insn_buff + 1),
422 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
423 }
424
425 if (a->replacementlen && is_jmp(replacement[0]))
426 recompute_jump(a, instr, replacement, insn_buff);
427
428 if (a->instrlen > a->replacementlen) {
429 add_nops(insn_buff + a->replacementlen,
430 a->instrlen - a->replacementlen);
431 insn_buff_sz += a->instrlen - a->replacementlen;
432 }
433 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
434
435 text_poke_early(instr, insn_buff, insn_buff_sz);
436 }
437}
438
439#ifdef CONFIG_SMP
440static void alternatives_smp_lock(const s32 *start, const s32 *end,
441 u8 *text, u8 *text_end)
442{
443 const s32 *poff;
444
445 for (poff = start; poff < end; poff++) {
446 u8 *ptr = (u8 *)poff + *poff;
447
448 if (!*poff || ptr < text || ptr >= text_end)
449 continue;
450 /* turn DS segment override prefix into lock prefix */
451 if (*ptr == 0x3e)
452 text_poke(ptr, ((unsigned char []){0xf0}), 1);
453 }
454}
455
456static void alternatives_smp_unlock(const s32 *start, const s32 *end,
457 u8 *text, u8 *text_end)
458{
459 const s32 *poff;
460
461 for (poff = start; poff < end; poff++) {
462 u8 *ptr = (u8 *)poff + *poff;
463
464 if (!*poff || ptr < text || ptr >= text_end)
465 continue;
466 /* turn lock prefix into DS segment override prefix */
467 if (*ptr == 0xf0)
468 text_poke(ptr, ((unsigned char []){0x3E}), 1);
469 }
470}
471
472struct smp_alt_module {
473 /* what is this ??? */
474 struct module *mod;
475 char *name;
476
477 /* ptrs to lock prefixes */
478 const s32 *locks;
479 const s32 *locks_end;
480
481 /* .text segment, needed to avoid patching init code ;) */
482 u8 *text;
483 u8 *text_end;
484
485 struct list_head next;
486};
487static LIST_HEAD(smp_alt_modules);
488static bool uniproc_patched = false; /* protected by text_mutex */
489
490void __init_or_module alternatives_smp_module_add(struct module *mod,
491 char *name,
492 void *locks, void *locks_end,
493 void *text, void *text_end)
494{
495 struct smp_alt_module *smp;
496
497 mutex_lock(&text_mutex);
498 if (!uniproc_patched)
499 goto unlock;
500
501 if (num_possible_cpus() == 1)
502 /* Don't bother remembering, we'll never have to undo it. */
503 goto smp_unlock;
504
505 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
506 if (NULL == smp)
507 /* we'll run the (safe but slow) SMP code then ... */
508 goto unlock;
509
510 smp->mod = mod;
511 smp->name = name;
512 smp->locks = locks;
513 smp->locks_end = locks_end;
514 smp->text = text;
515 smp->text_end = text_end;
516 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
517 smp->locks, smp->locks_end,
518 smp->text, smp->text_end, smp->name);
519
520 list_add_tail(&smp->next, &smp_alt_modules);
521smp_unlock:
522 alternatives_smp_unlock(locks, locks_end, text, text_end);
523unlock:
524 mutex_unlock(&text_mutex);
525}
526
527void __init_or_module alternatives_smp_module_del(struct module *mod)
528{
529 struct smp_alt_module *item;
530
531 mutex_lock(&text_mutex);
532 list_for_each_entry(item, &smp_alt_modules, next) {
533 if (mod != item->mod)
534 continue;
535 list_del(&item->next);
536 kfree(item);
537 break;
538 }
539 mutex_unlock(&text_mutex);
540}
541
542void alternatives_enable_smp(void)
543{
544 struct smp_alt_module *mod;
545
546 /* Why bother if there are no other CPUs? */
547 BUG_ON(num_possible_cpus() == 1);
548
549 mutex_lock(&text_mutex);
550
551 if (uniproc_patched) {
552 pr_info("switching to SMP code\n");
553 BUG_ON(num_online_cpus() != 1);
554 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
555 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
556 list_for_each_entry(mod, &smp_alt_modules, next)
557 alternatives_smp_lock(mod->locks, mod->locks_end,
558 mod->text, mod->text_end);
559 uniproc_patched = false;
560 }
561 mutex_unlock(&text_mutex);
562}
563
564/*
565 * Return 1 if the address range is reserved for SMP-alternatives.
566 * Must hold text_mutex.
567 */
568int alternatives_text_reserved(void *start, void *end)
569{
570 struct smp_alt_module *mod;
571 const s32 *poff;
572 u8 *text_start = start;
573 u8 *text_end = end;
574
575 lockdep_assert_held(&text_mutex);
576
577 list_for_each_entry(mod, &smp_alt_modules, next) {
578 if (mod->text > text_end || mod->text_end < text_start)
579 continue;
580 for (poff = mod->locks; poff < mod->locks_end; poff++) {
581 const u8 *ptr = (const u8 *)poff + *poff;
582
583 if (text_start <= ptr && text_end > ptr)
584 return 1;
585 }
586 }
587
588 return 0;
589}
590#endif /* CONFIG_SMP */
591
592#ifdef CONFIG_PARAVIRT
593void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
594 struct paravirt_patch_site *end)
595{
596 struct paravirt_patch_site *p;
597 char insn_buff[MAX_PATCH_LEN];
598
599 for (p = start; p < end; p++) {
600 unsigned int used;
601
602 BUG_ON(p->len > MAX_PATCH_LEN);
603 /* prep the buffer with the original instructions */
604 memcpy(insn_buff, p->instr, p->len);
605 used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
606
607 BUG_ON(used > p->len);
608
609 /* Pad the rest with nops */
610 add_nops(insn_buff + used, p->len - used);
611 text_poke_early(p->instr, insn_buff, p->len);
612 }
613}
614extern struct paravirt_patch_site __start_parainstructions[],
615 __stop_parainstructions[];
616#endif /* CONFIG_PARAVIRT */
617
618/*
619 * Self-test for the INT3 based CALL emulation code.
620 *
621 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
622 * properly and that there is a stack gap between the INT3 frame and the
623 * previous context. Without this gap doing a virtual PUSH on the interrupted
624 * stack would corrupt the INT3 IRET frame.
625 *
626 * See entry_{32,64}.S for more details.
627 */
628
629/*
630 * We define the int3_magic() function in assembly to control the calling
631 * convention such that we can 'call' it from assembly.
632 */
633
634extern void int3_magic(unsigned int *ptr); /* defined in asm */
635
636asm (
637" .pushsection .init.text, \"ax\", @progbits\n"
638" .type int3_magic, @function\n"
639"int3_magic:\n"
640" movl $1, (%" _ASM_ARG1 ")\n"
641" ret\n"
642" .size int3_magic, .-int3_magic\n"
643" .popsection\n"
644);
645
646extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
647
648static int __init
649int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
650{
651 struct die_args *args = data;
652 struct pt_regs *regs = args->regs;
653
654 if (!regs || user_mode(regs))
655 return NOTIFY_DONE;
656
657 if (val != DIE_INT3)
658 return NOTIFY_DONE;
659
660 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
661 return NOTIFY_DONE;
662
663 int3_emulate_call(regs, (unsigned long)&int3_magic);
664 return NOTIFY_STOP;
665}
666
667static void __init int3_selftest(void)
668{
669 static __initdata struct notifier_block int3_exception_nb = {
670 .notifier_call = int3_exception_notify,
671 .priority = INT_MAX-1, /* last */
672 };
673 unsigned int val = 0;
674
675 BUG_ON(register_die_notifier(&int3_exception_nb));
676
677 /*
678 * Basically: int3_magic(&val); but really complicated :-)
679 *
680 * Stick the address of the INT3 instruction into int3_selftest_ip,
681 * then trigger the INT3, padded with NOPs to match a CALL instruction
682 * length.
683 */
684 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
685 ".pushsection .init.data,\"aw\"\n\t"
686 ".align " __ASM_SEL(4, 8) "\n\t"
687 ".type int3_selftest_ip, @object\n\t"
688 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
689 "int3_selftest_ip:\n\t"
690 __ASM_SEL(.long, .quad) " 1b\n\t"
691 ".popsection\n\t"
692 : ASM_CALL_CONSTRAINT
693 : __ASM_SEL_RAW(a, D) (&val)
694 : "memory");
695
696 BUG_ON(val != 1);
697
698 unregister_die_notifier(&int3_exception_nb);
699}
700
701void __init alternative_instructions(void)
702{
703 int3_selftest();
704
705 /*
706 * The patching is not fully atomic, so try to avoid local
707 * interruptions that might execute the to be patched code.
708 * Other CPUs are not running.
709 */
710 stop_nmi();
711
712 /*
713 * Don't stop machine check exceptions while patching.
714 * MCEs only happen when something got corrupted and in this
715 * case we must do something about the corruption.
716 * Ignoring it is worse than an unlikely patching race.
717 * Also machine checks tend to be broadcast and if one CPU
718 * goes into machine check the others follow quickly, so we don't
719 * expect a machine check to cause undue problems during to code
720 * patching.
721 */
722
723 apply_alternatives(__alt_instructions, __alt_instructions_end);
724
725#ifdef CONFIG_SMP
726 /* Patch to UP if other cpus not imminent. */
727 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
728 uniproc_patched = true;
729 alternatives_smp_module_add(NULL, "core kernel",
730 __smp_locks, __smp_locks_end,
731 _text, _etext);
732 }
733
734 if (!uniproc_patched || num_possible_cpus() == 1) {
735 free_init_pages("SMP alternatives",
736 (unsigned long)__smp_locks,
737 (unsigned long)__smp_locks_end);
738 }
739#endif
740
741 apply_paravirt(__parainstructions, __parainstructions_end);
742
743 restart_nmi();
744 alternatives_patched = 1;
745}
746
747/**
748 * text_poke_early - Update instructions on a live kernel at boot time
749 * @addr: address to modify
750 * @opcode: source of the copy
751 * @len: length to copy
752 *
753 * When you use this code to patch more than one byte of an instruction
754 * you need to make sure that other CPUs cannot execute this code in parallel.
755 * Also no thread must be currently preempted in the middle of these
756 * instructions. And on the local CPU you need to be protected against NMI or
757 * MCE handlers seeing an inconsistent instruction while you patch.
758 */
759void __init_or_module text_poke_early(void *addr, const void *opcode,
760 size_t len)
761{
762 unsigned long flags;
763
764 if (boot_cpu_has(X86_FEATURE_NX) &&
765 is_module_text_address((unsigned long)addr)) {
766 /*
767 * Modules text is marked initially as non-executable, so the
768 * code cannot be running and speculative code-fetches are
769 * prevented. Just change the code.
770 */
771 memcpy(addr, opcode, len);
772 } else {
773 local_irq_save(flags);
774 memcpy(addr, opcode, len);
775 local_irq_restore(flags);
776 sync_core();
777
778 /*
779 * Could also do a CLFLUSH here to speed up CPU recovery; but
780 * that causes hangs on some VIA CPUs.
781 */
782 }
783}
784
785__ro_after_init struct mm_struct *poking_mm;
786__ro_after_init unsigned long poking_addr;
787
788static void *__text_poke(void *addr, const void *opcode, size_t len)
789{
790 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
791 struct page *pages[2] = {NULL};
792 temp_mm_state_t prev;
793 unsigned long flags;
794 pte_t pte, *ptep;
795 spinlock_t *ptl;
796 pgprot_t pgprot;
797
798 /*
799 * While boot memory allocator is running we cannot use struct pages as
800 * they are not yet initialized. There is no way to recover.
801 */
802 BUG_ON(!after_bootmem);
803
804 if (!core_kernel_text((unsigned long)addr)) {
805 pages[0] = vmalloc_to_page(addr);
806 if (cross_page_boundary)
807 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
808 } else {
809 pages[0] = virt_to_page(addr);
810 WARN_ON(!PageReserved(pages[0]));
811 if (cross_page_boundary)
812 pages[1] = virt_to_page(addr + PAGE_SIZE);
813 }
814 /*
815 * If something went wrong, crash and burn since recovery paths are not
816 * implemented.
817 */
818 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
819
820 local_irq_save(flags);
821
822 /*
823 * Map the page without the global bit, as TLB flushing is done with
824 * flush_tlb_mm_range(), which is intended for non-global PTEs.
825 */
826 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
827
828 /*
829 * The lock is not really needed, but this allows to avoid open-coding.
830 */
831 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
832
833 /*
834 * This must not fail; preallocated in poking_init().
835 */
836 VM_BUG_ON(!ptep);
837
838 pte = mk_pte(pages[0], pgprot);
839 set_pte_at(poking_mm, poking_addr, ptep, pte);
840
841 if (cross_page_boundary) {
842 pte = mk_pte(pages[1], pgprot);
843 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
844 }
845
846 /*
847 * Loading the temporary mm behaves as a compiler barrier, which
848 * guarantees that the PTE will be set at the time memcpy() is done.
849 */
850 prev = use_temporary_mm(poking_mm);
851
852 kasan_disable_current();
853 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
854 kasan_enable_current();
855
856 /*
857 * Ensure that the PTE is only cleared after the instructions of memcpy
858 * were issued by using a compiler barrier.
859 */
860 barrier();
861
862 pte_clear(poking_mm, poking_addr, ptep);
863 if (cross_page_boundary)
864 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
865
866 /*
867 * Loading the previous page-table hierarchy requires a serializing
868 * instruction that already allows the core to see the updated version.
869 * Xen-PV is assumed to serialize execution in a similar manner.
870 */
871 unuse_temporary_mm(prev);
872
873 /*
874 * Flushing the TLB might involve IPIs, which would require enabled
875 * IRQs, but not if the mm is not used, as it is in this point.
876 */
877 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
878 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
879 PAGE_SHIFT, false);
880
881 /*
882 * If the text does not match what we just wrote then something is
883 * fundamentally screwy; there's nothing we can really do about that.
884 */
885 BUG_ON(memcmp(addr, opcode, len));
886
887 pte_unmap_unlock(ptep, ptl);
888 local_irq_restore(flags);
889 return addr;
890}
891
892/**
893 * text_poke - Update instructions on a live kernel
894 * @addr: address to modify
895 * @opcode: source of the copy
896 * @len: length to copy
897 *
898 * Only atomic text poke/set should be allowed when not doing early patching.
899 * It means the size must be writable atomically and the address must be aligned
900 * in a way that permits an atomic write. It also makes sure we fit on a single
901 * page.
902 *
903 * Note that the caller must ensure that if the modified code is part of a
904 * module, the module would not be removed during poking. This can be achieved
905 * by registering a module notifier, and ordering module removal and patching
906 * trough a mutex.
907 */
908void *text_poke(void *addr, const void *opcode, size_t len)
909{
910 lockdep_assert_held(&text_mutex);
911
912 return __text_poke(addr, opcode, len);
913}
914
915/**
916 * text_poke_kgdb - Update instructions on a live kernel by kgdb
917 * @addr: address to modify
918 * @opcode: source of the copy
919 * @len: length to copy
920 *
921 * Only atomic text poke/set should be allowed when not doing early patching.
922 * It means the size must be writable atomically and the address must be aligned
923 * in a way that permits an atomic write. It also makes sure we fit on a single
924 * page.
925 *
926 * Context: should only be used by kgdb, which ensures no other core is running,
927 * despite the fact it does not hold the text_mutex.
928 */
929void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
930{
931 return __text_poke(addr, opcode, len);
932}
933
934static void do_sync_core(void *info)
935{
936 sync_core();
937}
938
939static struct bp_patching_desc {
940 struct text_poke_loc *vec;
941 int nr_entries;
942} bp_patching;
943
944static int patch_cmp(const void *key, const void *elt)
945{
946 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
947
948 if (key < tp->addr)
949 return -1;
950 if (key > tp->addr)
951 return 1;
952 return 0;
953}
954NOKPROBE_SYMBOL(patch_cmp);
955
956int poke_int3_handler(struct pt_regs *regs)
957{
958 struct text_poke_loc *tp;
959 unsigned char int3 = 0xcc;
960 void *ip;
961
962 /*
963 * Having observed our INT3 instruction, we now must observe
964 * bp_patching.nr_entries.
965 *
966 * nr_entries != 0 INT3
967 * WMB RMB
968 * write INT3 if (nr_entries)
969 *
970 * Idem for other elements in bp_patching.
971 */
972 smp_rmb();
973
974 if (likely(!bp_patching.nr_entries))
975 return 0;
976
977 if (user_mode(regs))
978 return 0;
979
980 /*
981 * Discount the sizeof(int3). See text_poke_bp_batch().
982 */
983 ip = (void *) regs->ip - sizeof(int3);
984
985 /*
986 * Skip the binary search if there is a single member in the vector.
987 */
988 if (unlikely(bp_patching.nr_entries > 1)) {
989 tp = bsearch(ip, bp_patching.vec, bp_patching.nr_entries,
990 sizeof(struct text_poke_loc),
991 patch_cmp);
992 if (!tp)
993 return 0;
994 } else {
995 tp = bp_patching.vec;
996 if (tp->addr != ip)
997 return 0;
998 }
999
1000 /* set up the specified breakpoint detour */
1001 regs->ip = (unsigned long) tp->detour;
1002
1003 return 1;
1004}
1005NOKPROBE_SYMBOL(poke_int3_handler);
1006
1007/**
1008 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1009 * @tp: vector of instructions to patch
1010 * @nr_entries: number of entries in the vector
1011 *
1012 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1013 * We completely avoid stop_machine() here, and achieve the
1014 * synchronization using int3 breakpoint.
1015 *
1016 * The way it is done:
1017 * - For each entry in the vector:
1018 * - add a int3 trap to the address that will be patched
1019 * - sync cores
1020 * - For each entry in the vector:
1021 * - update all but the first byte of the patched range
1022 * - sync cores
1023 * - For each entry in the vector:
1024 * - replace the first byte (int3) by the first byte of
1025 * replacing opcode
1026 * - sync cores
1027 */
1028void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1029{
1030 int patched_all_but_first = 0;
1031 unsigned char int3 = 0xcc;
1032 unsigned int i;
1033
1034 lockdep_assert_held(&text_mutex);
1035
1036 bp_patching.vec = tp;
1037 bp_patching.nr_entries = nr_entries;
1038
1039 /*
1040 * Corresponding read barrier in int3 notifier for making sure the
1041 * nr_entries and handler are correctly ordered wrt. patching.
1042 */
1043 smp_wmb();
1044
1045 /*
1046 * First step: add a int3 trap to the address that will be patched.
1047 */
1048 for (i = 0; i < nr_entries; i++)
1049 text_poke(tp[i].addr, &int3, sizeof(int3));
1050
1051 on_each_cpu(do_sync_core, NULL, 1);
1052
1053 /*
1054 * Second step: update all but the first byte of the patched range.
1055 */
1056 for (i = 0; i < nr_entries; i++) {
1057 if (tp[i].len - sizeof(int3) > 0) {
1058 text_poke((char *)tp[i].addr + sizeof(int3),
1059 (const char *)tp[i].opcode + sizeof(int3),
1060 tp[i].len - sizeof(int3));
1061 patched_all_but_first++;
1062 }
1063 }
1064
1065 if (patched_all_but_first) {
1066 /*
1067 * According to Intel, this core syncing is very likely
1068 * not necessary and we'd be safe even without it. But
1069 * better safe than sorry (plus there's not only Intel).
1070 */
1071 on_each_cpu(do_sync_core, NULL, 1);
1072 }
1073
1074 /*
1075 * Third step: replace the first byte (int3) by the first byte of
1076 * replacing opcode.
1077 */
1078 for (i = 0; i < nr_entries; i++)
1079 text_poke(tp[i].addr, tp[i].opcode, sizeof(int3));
1080
1081 on_each_cpu(do_sync_core, NULL, 1);
1082 /*
1083 * sync_core() implies an smp_mb() and orders this store against
1084 * the writing of the new instruction.
1085 */
1086 bp_patching.vec = NULL;
1087 bp_patching.nr_entries = 0;
1088}
1089
1090/**
1091 * text_poke_bp() -- update instructions on live kernel on SMP
1092 * @addr: address to patch
1093 * @opcode: opcode of new instruction
1094 * @len: length to copy
1095 * @handler: address to jump to when the temporary breakpoint is hit
1096 *
1097 * Update a single instruction with the vector in the stack, avoiding
1098 * dynamically allocated memory. This function should be used when it is
1099 * not possible to allocate memory.
1100 */
1101void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
1102{
1103 struct text_poke_loc tp = {
1104 .detour = handler,
1105 .addr = addr,
1106 .len = len,
1107 };
1108
1109 if (len > POKE_MAX_OPCODE_SIZE) {
1110 WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE);
1111 return;
1112 }
1113
1114 memcpy((void *)tp.opcode, opcode, len);
1115
1116 text_poke_bp_batch(&tp, 1);
1117}
1#include <linux/module.h>
2#include <linux/sched.h>
3#include <linux/mutex.h>
4#include <linux/list.h>
5#include <linux/stringify.h>
6#include <linux/kprobes.h>
7#include <linux/mm.h>
8#include <linux/vmalloc.h>
9#include <linux/memory.h>
10#include <linux/stop_machine.h>
11#include <linux/slab.h>
12#include <asm/alternative.h>
13#include <asm/sections.h>
14#include <asm/pgtable.h>
15#include <asm/mce.h>
16#include <asm/nmi.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/io.h>
20#include <asm/fixmap.h>
21
22#define MAX_PATCH_LEN (255-1)
23
24#ifdef CONFIG_HOTPLUG_CPU
25static int smp_alt_once;
26
27static int __init bootonly(char *str)
28{
29 smp_alt_once = 1;
30 return 1;
31}
32__setup("smp-alt-boot", bootonly);
33#else
34#define smp_alt_once 1
35#endif
36
37static int __initdata_or_module debug_alternative;
38
39static int __init debug_alt(char *str)
40{
41 debug_alternative = 1;
42 return 1;
43}
44__setup("debug-alternative", debug_alt);
45
46static int noreplace_smp;
47
48static int __init setup_noreplace_smp(char *str)
49{
50 noreplace_smp = 1;
51 return 1;
52}
53__setup("noreplace-smp", setup_noreplace_smp);
54
55#ifdef CONFIG_PARAVIRT
56static int __initdata_or_module noreplace_paravirt = 0;
57
58static int __init setup_noreplace_paravirt(char *str)
59{
60 noreplace_paravirt = 1;
61 return 1;
62}
63__setup("noreplace-paravirt", setup_noreplace_paravirt);
64#endif
65
66#define DPRINTK(fmt, args...) if (debug_alternative) \
67 printk(KERN_DEBUG fmt, args)
68
69/*
70 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
71 * that correspond to that nop. Getting from one nop to the next, we
72 * add to the array the offset that is equal to the sum of all sizes of
73 * nops preceding the one we are after.
74 *
75 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
76 * nice symmetry of sizes of the previous nops.
77 */
78#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
79static const unsigned char intelnops[] =
80{
81 GENERIC_NOP1,
82 GENERIC_NOP2,
83 GENERIC_NOP3,
84 GENERIC_NOP4,
85 GENERIC_NOP5,
86 GENERIC_NOP6,
87 GENERIC_NOP7,
88 GENERIC_NOP8,
89 GENERIC_NOP5_ATOMIC
90};
91static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
92{
93 NULL,
94 intelnops,
95 intelnops + 1,
96 intelnops + 1 + 2,
97 intelnops + 1 + 2 + 3,
98 intelnops + 1 + 2 + 3 + 4,
99 intelnops + 1 + 2 + 3 + 4 + 5,
100 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
103};
104#endif
105
106#ifdef K8_NOP1
107static const unsigned char k8nops[] =
108{
109 K8_NOP1,
110 K8_NOP2,
111 K8_NOP3,
112 K8_NOP4,
113 K8_NOP5,
114 K8_NOP6,
115 K8_NOP7,
116 K8_NOP8,
117 K8_NOP5_ATOMIC
118};
119static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
120{
121 NULL,
122 k8nops,
123 k8nops + 1,
124 k8nops + 1 + 2,
125 k8nops + 1 + 2 + 3,
126 k8nops + 1 + 2 + 3 + 4,
127 k8nops + 1 + 2 + 3 + 4 + 5,
128 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
131};
132#endif
133
134#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
135static const unsigned char k7nops[] =
136{
137 K7_NOP1,
138 K7_NOP2,
139 K7_NOP3,
140 K7_NOP4,
141 K7_NOP5,
142 K7_NOP6,
143 K7_NOP7,
144 K7_NOP8,
145 K7_NOP5_ATOMIC
146};
147static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
148{
149 NULL,
150 k7nops,
151 k7nops + 1,
152 k7nops + 1 + 2,
153 k7nops + 1 + 2 + 3,
154 k7nops + 1 + 2 + 3 + 4,
155 k7nops + 1 + 2 + 3 + 4 + 5,
156 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
159};
160#endif
161
162#ifdef P6_NOP1
163static const unsigned char __initconst_or_module p6nops[] =
164{
165 P6_NOP1,
166 P6_NOP2,
167 P6_NOP3,
168 P6_NOP4,
169 P6_NOP5,
170 P6_NOP6,
171 P6_NOP7,
172 P6_NOP8,
173 P6_NOP5_ATOMIC
174};
175static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
176{
177 NULL,
178 p6nops,
179 p6nops + 1,
180 p6nops + 1 + 2,
181 p6nops + 1 + 2 + 3,
182 p6nops + 1 + 2 + 3 + 4,
183 p6nops + 1 + 2 + 3 + 4 + 5,
184 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
187};
188#endif
189
190/* Initialize these to a safe default */
191#ifdef CONFIG_X86_64
192const unsigned char * const *ideal_nops = p6_nops;
193#else
194const unsigned char * const *ideal_nops = intel_nops;
195#endif
196
197void __init arch_init_ideal_nops(void)
198{
199 switch (boot_cpu_data.x86_vendor) {
200 case X86_VENDOR_INTEL:
201 /*
202 * Due to a decoder implementation quirk, some
203 * specific Intel CPUs actually perform better with
204 * the "k8_nops" than with the SDM-recommended NOPs.
205 */
206 if (boot_cpu_data.x86 == 6 &&
207 boot_cpu_data.x86_model >= 0x0f &&
208 boot_cpu_data.x86_model != 0x1c &&
209 boot_cpu_data.x86_model != 0x26 &&
210 boot_cpu_data.x86_model != 0x27 &&
211 boot_cpu_data.x86_model < 0x30) {
212 ideal_nops = k8_nops;
213 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
214 ideal_nops = p6_nops;
215 } else {
216#ifdef CONFIG_X86_64
217 ideal_nops = k8_nops;
218#else
219 ideal_nops = intel_nops;
220#endif
221 }
222
223 default:
224#ifdef CONFIG_X86_64
225 ideal_nops = k8_nops;
226#else
227 if (boot_cpu_has(X86_FEATURE_K8))
228 ideal_nops = k8_nops;
229 else if (boot_cpu_has(X86_FEATURE_K7))
230 ideal_nops = k7_nops;
231 else
232 ideal_nops = intel_nops;
233#endif
234 }
235}
236
237/* Use this to add nops to a buffer, then text_poke the whole buffer. */
238static void __init_or_module add_nops(void *insns, unsigned int len)
239{
240 while (len > 0) {
241 unsigned int noplen = len;
242 if (noplen > ASM_NOP_MAX)
243 noplen = ASM_NOP_MAX;
244 memcpy(insns, ideal_nops[noplen], noplen);
245 insns += noplen;
246 len -= noplen;
247 }
248}
249
250extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
251extern s32 __smp_locks[], __smp_locks_end[];
252void *text_poke_early(void *addr, const void *opcode, size_t len);
253
254/* Replace instructions with better alternatives for this CPU type.
255 This runs before SMP is initialized to avoid SMP problems with
256 self modifying code. This implies that asymmetric systems where
257 APs have less capabilities than the boot processor are not handled.
258 Tough. Make sure you disable such features by hand. */
259
260void __init_or_module apply_alternatives(struct alt_instr *start,
261 struct alt_instr *end)
262{
263 struct alt_instr *a;
264 u8 *instr, *replacement;
265 u8 insnbuf[MAX_PATCH_LEN];
266
267 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
268 /*
269 * The scan order should be from start to end. A later scanned
270 * alternative code can overwrite a previous scanned alternative code.
271 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
272 * patch code.
273 *
274 * So be careful if you want to change the scan order to any other
275 * order.
276 */
277 for (a = start; a < end; a++) {
278 instr = (u8 *)&a->instr_offset + a->instr_offset;
279 replacement = (u8 *)&a->repl_offset + a->repl_offset;
280 BUG_ON(a->replacementlen > a->instrlen);
281 BUG_ON(a->instrlen > sizeof(insnbuf));
282 BUG_ON(a->cpuid >= NCAPINTS*32);
283 if (!boot_cpu_has(a->cpuid))
284 continue;
285
286 memcpy(insnbuf, replacement, a->replacementlen);
287
288 /* 0xe8 is a relative jump; fix the offset. */
289 if (*insnbuf == 0xe8 && a->replacementlen == 5)
290 *(s32 *)(insnbuf + 1) += replacement - instr;
291
292 add_nops(insnbuf + a->replacementlen,
293 a->instrlen - a->replacementlen);
294
295 text_poke_early(instr, insnbuf, a->instrlen);
296 }
297}
298
299#ifdef CONFIG_SMP
300
301static void alternatives_smp_lock(const s32 *start, const s32 *end,
302 u8 *text, u8 *text_end)
303{
304 const s32 *poff;
305
306 mutex_lock(&text_mutex);
307 for (poff = start; poff < end; poff++) {
308 u8 *ptr = (u8 *)poff + *poff;
309
310 if (!*poff || ptr < text || ptr >= text_end)
311 continue;
312 /* turn DS segment override prefix into lock prefix */
313 if (*ptr == 0x3e)
314 text_poke(ptr, ((unsigned char []){0xf0}), 1);
315 };
316 mutex_unlock(&text_mutex);
317}
318
319static void alternatives_smp_unlock(const s32 *start, const s32 *end,
320 u8 *text, u8 *text_end)
321{
322 const s32 *poff;
323
324 if (noreplace_smp)
325 return;
326
327 mutex_lock(&text_mutex);
328 for (poff = start; poff < end; poff++) {
329 u8 *ptr = (u8 *)poff + *poff;
330
331 if (!*poff || ptr < text || ptr >= text_end)
332 continue;
333 /* turn lock prefix into DS segment override prefix */
334 if (*ptr == 0xf0)
335 text_poke(ptr, ((unsigned char []){0x3E}), 1);
336 };
337 mutex_unlock(&text_mutex);
338}
339
340struct smp_alt_module {
341 /* what is this ??? */
342 struct module *mod;
343 char *name;
344
345 /* ptrs to lock prefixes */
346 const s32 *locks;
347 const s32 *locks_end;
348
349 /* .text segment, needed to avoid patching init code ;) */
350 u8 *text;
351 u8 *text_end;
352
353 struct list_head next;
354};
355static LIST_HEAD(smp_alt_modules);
356static DEFINE_MUTEX(smp_alt);
357static int smp_mode = 1; /* protected by smp_alt */
358
359void __init_or_module alternatives_smp_module_add(struct module *mod,
360 char *name,
361 void *locks, void *locks_end,
362 void *text, void *text_end)
363{
364 struct smp_alt_module *smp;
365
366 if (noreplace_smp)
367 return;
368
369 if (smp_alt_once) {
370 if (boot_cpu_has(X86_FEATURE_UP))
371 alternatives_smp_unlock(locks, locks_end,
372 text, text_end);
373 return;
374 }
375
376 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
377 if (NULL == smp)
378 return; /* we'll run the (safe but slow) SMP code then ... */
379
380 smp->mod = mod;
381 smp->name = name;
382 smp->locks = locks;
383 smp->locks_end = locks_end;
384 smp->text = text;
385 smp->text_end = text_end;
386 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
387 __func__, smp->locks, smp->locks_end,
388 smp->text, smp->text_end, smp->name);
389
390 mutex_lock(&smp_alt);
391 list_add_tail(&smp->next, &smp_alt_modules);
392 if (boot_cpu_has(X86_FEATURE_UP))
393 alternatives_smp_unlock(smp->locks, smp->locks_end,
394 smp->text, smp->text_end);
395 mutex_unlock(&smp_alt);
396}
397
398void __init_or_module alternatives_smp_module_del(struct module *mod)
399{
400 struct smp_alt_module *item;
401
402 if (smp_alt_once || noreplace_smp)
403 return;
404
405 mutex_lock(&smp_alt);
406 list_for_each_entry(item, &smp_alt_modules, next) {
407 if (mod != item->mod)
408 continue;
409 list_del(&item->next);
410 mutex_unlock(&smp_alt);
411 DPRINTK("%s: %s\n", __func__, item->name);
412 kfree(item);
413 return;
414 }
415 mutex_unlock(&smp_alt);
416}
417
418bool skip_smp_alternatives;
419void alternatives_smp_switch(int smp)
420{
421 struct smp_alt_module *mod;
422
423#ifdef CONFIG_LOCKDEP
424 /*
425 * Older binutils section handling bug prevented
426 * alternatives-replacement from working reliably.
427 *
428 * If this still occurs then you should see a hang
429 * or crash shortly after this line:
430 */
431 printk("lockdep: fixing up alternatives.\n");
432#endif
433
434 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
435 return;
436 BUG_ON(!smp && (num_online_cpus() > 1));
437
438 mutex_lock(&smp_alt);
439
440 /*
441 * Avoid unnecessary switches because it forces JIT based VMs to
442 * throw away all cached translations, which can be quite costly.
443 */
444 if (smp == smp_mode) {
445 /* nothing */
446 } else if (smp) {
447 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
448 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
449 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
450 list_for_each_entry(mod, &smp_alt_modules, next)
451 alternatives_smp_lock(mod->locks, mod->locks_end,
452 mod->text, mod->text_end);
453 } else {
454 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
455 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
456 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
457 list_for_each_entry(mod, &smp_alt_modules, next)
458 alternatives_smp_unlock(mod->locks, mod->locks_end,
459 mod->text, mod->text_end);
460 }
461 smp_mode = smp;
462 mutex_unlock(&smp_alt);
463}
464
465/* Return 1 if the address range is reserved for smp-alternatives */
466int alternatives_text_reserved(void *start, void *end)
467{
468 struct smp_alt_module *mod;
469 const s32 *poff;
470 u8 *text_start = start;
471 u8 *text_end = end;
472
473 list_for_each_entry(mod, &smp_alt_modules, next) {
474 if (mod->text > text_end || mod->text_end < text_start)
475 continue;
476 for (poff = mod->locks; poff < mod->locks_end; poff++) {
477 const u8 *ptr = (const u8 *)poff + *poff;
478
479 if (text_start <= ptr && text_end > ptr)
480 return 1;
481 }
482 }
483
484 return 0;
485}
486#endif
487
488#ifdef CONFIG_PARAVIRT
489void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
490 struct paravirt_patch_site *end)
491{
492 struct paravirt_patch_site *p;
493 char insnbuf[MAX_PATCH_LEN];
494
495 if (noreplace_paravirt)
496 return;
497
498 for (p = start; p < end; p++) {
499 unsigned int used;
500
501 BUG_ON(p->len > MAX_PATCH_LEN);
502 /* prep the buffer with the original instructions */
503 memcpy(insnbuf, p->instr, p->len);
504 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
505 (unsigned long)p->instr, p->len);
506
507 BUG_ON(used > p->len);
508
509 /* Pad the rest with nops */
510 add_nops(insnbuf + used, p->len - used);
511 text_poke_early(p->instr, insnbuf, p->len);
512 }
513}
514extern struct paravirt_patch_site __start_parainstructions[],
515 __stop_parainstructions[];
516#endif /* CONFIG_PARAVIRT */
517
518void __init alternative_instructions(void)
519{
520 /* The patching is not fully atomic, so try to avoid local interruptions
521 that might execute the to be patched code.
522 Other CPUs are not running. */
523 stop_nmi();
524
525 /*
526 * Don't stop machine check exceptions while patching.
527 * MCEs only happen when something got corrupted and in this
528 * case we must do something about the corruption.
529 * Ignoring it is worse than a unlikely patching race.
530 * Also machine checks tend to be broadcast and if one CPU
531 * goes into machine check the others follow quickly, so we don't
532 * expect a machine check to cause undue problems during to code
533 * patching.
534 */
535
536 apply_alternatives(__alt_instructions, __alt_instructions_end);
537
538 /* switch to patch-once-at-boottime-only mode and free the
539 * tables in case we know the number of CPUs will never ever
540 * change */
541#ifdef CONFIG_HOTPLUG_CPU
542 if (num_possible_cpus() < 2)
543 smp_alt_once = 1;
544#endif
545
546#ifdef CONFIG_SMP
547 if (smp_alt_once) {
548 if (1 == num_possible_cpus()) {
549 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
550 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
551 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
552
553 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
554 _text, _etext);
555 }
556 } else {
557 alternatives_smp_module_add(NULL, "core kernel",
558 __smp_locks, __smp_locks_end,
559 _text, _etext);
560
561 /* Only switch to UP mode if we don't immediately boot others */
562 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
563 alternatives_smp_switch(0);
564 }
565#endif
566 apply_paravirt(__parainstructions, __parainstructions_end);
567
568 if (smp_alt_once)
569 free_init_pages("SMP alternatives",
570 (unsigned long)__smp_locks,
571 (unsigned long)__smp_locks_end);
572
573 restart_nmi();
574}
575
576/**
577 * text_poke_early - Update instructions on a live kernel at boot time
578 * @addr: address to modify
579 * @opcode: source of the copy
580 * @len: length to copy
581 *
582 * When you use this code to patch more than one byte of an instruction
583 * you need to make sure that other CPUs cannot execute this code in parallel.
584 * Also no thread must be currently preempted in the middle of these
585 * instructions. And on the local CPU you need to be protected again NMI or MCE
586 * handlers seeing an inconsistent instruction while you patch.
587 */
588void *__init_or_module text_poke_early(void *addr, const void *opcode,
589 size_t len)
590{
591 unsigned long flags;
592 local_irq_save(flags);
593 memcpy(addr, opcode, len);
594 sync_core();
595 local_irq_restore(flags);
596 /* Could also do a CLFLUSH here to speed up CPU recovery; but
597 that causes hangs on some VIA CPUs. */
598 return addr;
599}
600
601/**
602 * text_poke - Update instructions on a live kernel
603 * @addr: address to modify
604 * @opcode: source of the copy
605 * @len: length to copy
606 *
607 * Only atomic text poke/set should be allowed when not doing early patching.
608 * It means the size must be writable atomically and the address must be aligned
609 * in a way that permits an atomic write. It also makes sure we fit on a single
610 * page.
611 *
612 * Note: Must be called under text_mutex.
613 */
614void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
615{
616 unsigned long flags;
617 char *vaddr;
618 struct page *pages[2];
619 int i;
620
621 if (!core_kernel_text((unsigned long)addr)) {
622 pages[0] = vmalloc_to_page(addr);
623 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
624 } else {
625 pages[0] = virt_to_page(addr);
626 WARN_ON(!PageReserved(pages[0]));
627 pages[1] = virt_to_page(addr + PAGE_SIZE);
628 }
629 BUG_ON(!pages[0]);
630 local_irq_save(flags);
631 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
632 if (pages[1])
633 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
634 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
635 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
636 clear_fixmap(FIX_TEXT_POKE0);
637 if (pages[1])
638 clear_fixmap(FIX_TEXT_POKE1);
639 local_flush_tlb();
640 sync_core();
641 /* Could also do a CLFLUSH here to speed up CPU recovery; but
642 that causes hangs on some VIA CPUs. */
643 for (i = 0; i < len; i++)
644 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
645 local_irq_restore(flags);
646 return addr;
647}
648
649/*
650 * Cross-modifying kernel text with stop_machine().
651 * This code originally comes from immediate value.
652 */
653static atomic_t stop_machine_first;
654static int wrote_text;
655
656struct text_poke_params {
657 struct text_poke_param *params;
658 int nparams;
659};
660
661static int __kprobes stop_machine_text_poke(void *data)
662{
663 struct text_poke_params *tpp = data;
664 struct text_poke_param *p;
665 int i;
666
667 if (atomic_dec_and_test(&stop_machine_first)) {
668 for (i = 0; i < tpp->nparams; i++) {
669 p = &tpp->params[i];
670 text_poke(p->addr, p->opcode, p->len);
671 }
672 smp_wmb(); /* Make sure other cpus see that this has run */
673 wrote_text = 1;
674 } else {
675 while (!wrote_text)
676 cpu_relax();
677 smp_mb(); /* Load wrote_text before following execution */
678 }
679
680 for (i = 0; i < tpp->nparams; i++) {
681 p = &tpp->params[i];
682 flush_icache_range((unsigned long)p->addr,
683 (unsigned long)p->addr + p->len);
684 }
685 /*
686 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
687 * that a core serializing instruction such as "cpuid" should be
688 * executed on _each_ core before the new instruction is made visible.
689 */
690 sync_core();
691 return 0;
692}
693
694/**
695 * text_poke_smp - Update instructions on a live kernel on SMP
696 * @addr: address to modify
697 * @opcode: source of the copy
698 * @len: length to copy
699 *
700 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
701 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
702 * should be allowed, since stop_machine() does _not_ protect code against
703 * NMI and MCE.
704 *
705 * Note: Must be called under get_online_cpus() and text_mutex.
706 */
707void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
708{
709 struct text_poke_params tpp;
710 struct text_poke_param p;
711
712 p.addr = addr;
713 p.opcode = opcode;
714 p.len = len;
715 tpp.params = &p;
716 tpp.nparams = 1;
717 atomic_set(&stop_machine_first, 1);
718 wrote_text = 0;
719 /* Use __stop_machine() because the caller already got online_cpus. */
720 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
721 return addr;
722}
723
724/**
725 * text_poke_smp_batch - Update instructions on a live kernel on SMP
726 * @params: an array of text_poke parameters
727 * @n: the number of elements in params.
728 *
729 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
730 * stop_machine() is heavy task, it is better to aggregate text_poke requests
731 * and do it once if possible.
732 *
733 * Note: Must be called under get_online_cpus() and text_mutex.
734 */
735void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
736{
737 struct text_poke_params tpp = {.params = params, .nparams = n};
738
739 atomic_set(&stop_machine_first, 1);
740 wrote_text = 0;
741 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
742}