Loading...
1#include <linux/module.h>
2#include <linux/sched.h>
3#include <linux/mutex.h>
4#include <linux/list.h>
5#include <linux/stringify.h>
6#include <linux/kprobes.h>
7#include <linux/mm.h>
8#include <linux/vmalloc.h>
9#include <linux/memory.h>
10#include <linux/stop_machine.h>
11#include <linux/slab.h>
12#include <asm/alternative.h>
13#include <asm/sections.h>
14#include <asm/pgtable.h>
15#include <asm/mce.h>
16#include <asm/nmi.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/io.h>
20#include <asm/fixmap.h>
21
22#define MAX_PATCH_LEN (255-1)
23
24#ifdef CONFIG_HOTPLUG_CPU
25static int smp_alt_once;
26
27static int __init bootonly(char *str)
28{
29 smp_alt_once = 1;
30 return 1;
31}
32__setup("smp-alt-boot", bootonly);
33#else
34#define smp_alt_once 1
35#endif
36
37static int __initdata_or_module debug_alternative;
38
39static int __init debug_alt(char *str)
40{
41 debug_alternative = 1;
42 return 1;
43}
44__setup("debug-alternative", debug_alt);
45
46static int noreplace_smp;
47
48static int __init setup_noreplace_smp(char *str)
49{
50 noreplace_smp = 1;
51 return 1;
52}
53__setup("noreplace-smp", setup_noreplace_smp);
54
55#ifdef CONFIG_PARAVIRT
56static int __initdata_or_module noreplace_paravirt = 0;
57
58static int __init setup_noreplace_paravirt(char *str)
59{
60 noreplace_paravirt = 1;
61 return 1;
62}
63__setup("noreplace-paravirt", setup_noreplace_paravirt);
64#endif
65
66#define DPRINTK(fmt, args...) if (debug_alternative) \
67 printk(KERN_DEBUG fmt, args)
68
69/*
70 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
71 * that correspond to that nop. Getting from one nop to the next, we
72 * add to the array the offset that is equal to the sum of all sizes of
73 * nops preceding the one we are after.
74 *
75 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
76 * nice symmetry of sizes of the previous nops.
77 */
78#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
79static const unsigned char intelnops[] =
80{
81 GENERIC_NOP1,
82 GENERIC_NOP2,
83 GENERIC_NOP3,
84 GENERIC_NOP4,
85 GENERIC_NOP5,
86 GENERIC_NOP6,
87 GENERIC_NOP7,
88 GENERIC_NOP8,
89 GENERIC_NOP5_ATOMIC
90};
91static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
92{
93 NULL,
94 intelnops,
95 intelnops + 1,
96 intelnops + 1 + 2,
97 intelnops + 1 + 2 + 3,
98 intelnops + 1 + 2 + 3 + 4,
99 intelnops + 1 + 2 + 3 + 4 + 5,
100 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
103};
104#endif
105
106#ifdef K8_NOP1
107static const unsigned char k8nops[] =
108{
109 K8_NOP1,
110 K8_NOP2,
111 K8_NOP3,
112 K8_NOP4,
113 K8_NOP5,
114 K8_NOP6,
115 K8_NOP7,
116 K8_NOP8,
117 K8_NOP5_ATOMIC
118};
119static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
120{
121 NULL,
122 k8nops,
123 k8nops + 1,
124 k8nops + 1 + 2,
125 k8nops + 1 + 2 + 3,
126 k8nops + 1 + 2 + 3 + 4,
127 k8nops + 1 + 2 + 3 + 4 + 5,
128 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
131};
132#endif
133
134#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
135static const unsigned char k7nops[] =
136{
137 K7_NOP1,
138 K7_NOP2,
139 K7_NOP3,
140 K7_NOP4,
141 K7_NOP5,
142 K7_NOP6,
143 K7_NOP7,
144 K7_NOP8,
145 K7_NOP5_ATOMIC
146};
147static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
148{
149 NULL,
150 k7nops,
151 k7nops + 1,
152 k7nops + 1 + 2,
153 k7nops + 1 + 2 + 3,
154 k7nops + 1 + 2 + 3 + 4,
155 k7nops + 1 + 2 + 3 + 4 + 5,
156 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
159};
160#endif
161
162#ifdef P6_NOP1
163static const unsigned char p6nops[] =
164{
165 P6_NOP1,
166 P6_NOP2,
167 P6_NOP3,
168 P6_NOP4,
169 P6_NOP5,
170 P6_NOP6,
171 P6_NOP7,
172 P6_NOP8,
173 P6_NOP5_ATOMIC
174};
175static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
176{
177 NULL,
178 p6nops,
179 p6nops + 1,
180 p6nops + 1 + 2,
181 p6nops + 1 + 2 + 3,
182 p6nops + 1 + 2 + 3 + 4,
183 p6nops + 1 + 2 + 3 + 4 + 5,
184 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
187};
188#endif
189
190/* Initialize these to a safe default */
191#ifdef CONFIG_X86_64
192const unsigned char * const *ideal_nops = p6_nops;
193#else
194const unsigned char * const *ideal_nops = intel_nops;
195#endif
196
197void __init arch_init_ideal_nops(void)
198{
199 switch (boot_cpu_data.x86_vendor) {
200 case X86_VENDOR_INTEL:
201 /*
202 * Due to a decoder implementation quirk, some
203 * specific Intel CPUs actually perform better with
204 * the "k8_nops" than with the SDM-recommended NOPs.
205 */
206 if (boot_cpu_data.x86 == 6 &&
207 boot_cpu_data.x86_model >= 0x0f &&
208 boot_cpu_data.x86_model != 0x1c &&
209 boot_cpu_data.x86_model != 0x26 &&
210 boot_cpu_data.x86_model != 0x27 &&
211 boot_cpu_data.x86_model < 0x30) {
212 ideal_nops = k8_nops;
213 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
214 ideal_nops = p6_nops;
215 } else {
216#ifdef CONFIG_X86_64
217 ideal_nops = k8_nops;
218#else
219 ideal_nops = intel_nops;
220#endif
221 }
222 break;
223 default:
224#ifdef CONFIG_X86_64
225 ideal_nops = k8_nops;
226#else
227 if (boot_cpu_has(X86_FEATURE_K8))
228 ideal_nops = k8_nops;
229 else if (boot_cpu_has(X86_FEATURE_K7))
230 ideal_nops = k7_nops;
231 else
232 ideal_nops = intel_nops;
233#endif
234 }
235}
236
237/* Use this to add nops to a buffer, then text_poke the whole buffer. */
238static void __init_or_module add_nops(void *insns, unsigned int len)
239{
240 while (len > 0) {
241 unsigned int noplen = len;
242 if (noplen > ASM_NOP_MAX)
243 noplen = ASM_NOP_MAX;
244 memcpy(insns, ideal_nops[noplen], noplen);
245 insns += noplen;
246 len -= noplen;
247 }
248}
249
250extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
251extern s32 __smp_locks[], __smp_locks_end[];
252void *text_poke_early(void *addr, const void *opcode, size_t len);
253
254/* Replace instructions with better alternatives for this CPU type.
255 This runs before SMP is initialized to avoid SMP problems with
256 self modifying code. This implies that asymmetric systems where
257 APs have less capabilities than the boot processor are not handled.
258 Tough. Make sure you disable such features by hand. */
259
260void __init_or_module apply_alternatives(struct alt_instr *start,
261 struct alt_instr *end)
262{
263 struct alt_instr *a;
264 u8 *instr, *replacement;
265 u8 insnbuf[MAX_PATCH_LEN];
266
267 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
268 /*
269 * The scan order should be from start to end. A later scanned
270 * alternative code can overwrite a previous scanned alternative code.
271 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
272 * patch code.
273 *
274 * So be careful if you want to change the scan order to any other
275 * order.
276 */
277 for (a = start; a < end; a++) {
278 instr = (u8 *)&a->instr_offset + a->instr_offset;
279 replacement = (u8 *)&a->repl_offset + a->repl_offset;
280 BUG_ON(a->replacementlen > a->instrlen);
281 BUG_ON(a->instrlen > sizeof(insnbuf));
282 BUG_ON(a->cpuid >= NCAPINTS*32);
283 if (!boot_cpu_has(a->cpuid))
284 continue;
285
286 memcpy(insnbuf, replacement, a->replacementlen);
287
288 /* 0xe8 is a relative jump; fix the offset. */
289 if (*insnbuf == 0xe8 && a->replacementlen == 5)
290 *(s32 *)(insnbuf + 1) += replacement - instr;
291
292 add_nops(insnbuf + a->replacementlen,
293 a->instrlen - a->replacementlen);
294
295 text_poke_early(instr, insnbuf, a->instrlen);
296 }
297}
298
299#ifdef CONFIG_SMP
300
301static void alternatives_smp_lock(const s32 *start, const s32 *end,
302 u8 *text, u8 *text_end)
303{
304 const s32 *poff;
305
306 mutex_lock(&text_mutex);
307 for (poff = start; poff < end; poff++) {
308 u8 *ptr = (u8 *)poff + *poff;
309
310 if (!*poff || ptr < text || ptr >= text_end)
311 continue;
312 /* turn DS segment override prefix into lock prefix */
313 if (*ptr == 0x3e)
314 text_poke(ptr, ((unsigned char []){0xf0}), 1);
315 };
316 mutex_unlock(&text_mutex);
317}
318
319static void alternatives_smp_unlock(const s32 *start, const s32 *end,
320 u8 *text, u8 *text_end)
321{
322 const s32 *poff;
323
324 if (noreplace_smp)
325 return;
326
327 mutex_lock(&text_mutex);
328 for (poff = start; poff < end; poff++) {
329 u8 *ptr = (u8 *)poff + *poff;
330
331 if (!*poff || ptr < text || ptr >= text_end)
332 continue;
333 /* turn lock prefix into DS segment override prefix */
334 if (*ptr == 0xf0)
335 text_poke(ptr, ((unsigned char []){0x3E}), 1);
336 };
337 mutex_unlock(&text_mutex);
338}
339
340struct smp_alt_module {
341 /* what is this ??? */
342 struct module *mod;
343 char *name;
344
345 /* ptrs to lock prefixes */
346 const s32 *locks;
347 const s32 *locks_end;
348
349 /* .text segment, needed to avoid patching init code ;) */
350 u8 *text;
351 u8 *text_end;
352
353 struct list_head next;
354};
355static LIST_HEAD(smp_alt_modules);
356static DEFINE_MUTEX(smp_alt);
357static int smp_mode = 1; /* protected by smp_alt */
358
359void __init_or_module alternatives_smp_module_add(struct module *mod,
360 char *name,
361 void *locks, void *locks_end,
362 void *text, void *text_end)
363{
364 struct smp_alt_module *smp;
365
366 if (noreplace_smp)
367 return;
368
369 if (smp_alt_once) {
370 if (boot_cpu_has(X86_FEATURE_UP))
371 alternatives_smp_unlock(locks, locks_end,
372 text, text_end);
373 return;
374 }
375
376 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
377 if (NULL == smp)
378 return; /* we'll run the (safe but slow) SMP code then ... */
379
380 smp->mod = mod;
381 smp->name = name;
382 smp->locks = locks;
383 smp->locks_end = locks_end;
384 smp->text = text;
385 smp->text_end = text_end;
386 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
387 __func__, smp->locks, smp->locks_end,
388 smp->text, smp->text_end, smp->name);
389
390 mutex_lock(&smp_alt);
391 list_add_tail(&smp->next, &smp_alt_modules);
392 if (boot_cpu_has(X86_FEATURE_UP))
393 alternatives_smp_unlock(smp->locks, smp->locks_end,
394 smp->text, smp->text_end);
395 mutex_unlock(&smp_alt);
396}
397
398void __init_or_module alternatives_smp_module_del(struct module *mod)
399{
400 struct smp_alt_module *item;
401
402 if (smp_alt_once || noreplace_smp)
403 return;
404
405 mutex_lock(&smp_alt);
406 list_for_each_entry(item, &smp_alt_modules, next) {
407 if (mod != item->mod)
408 continue;
409 list_del(&item->next);
410 mutex_unlock(&smp_alt);
411 DPRINTK("%s: %s\n", __func__, item->name);
412 kfree(item);
413 return;
414 }
415 mutex_unlock(&smp_alt);
416}
417
418bool skip_smp_alternatives;
419void alternatives_smp_switch(int smp)
420{
421 struct smp_alt_module *mod;
422
423#ifdef CONFIG_LOCKDEP
424 /*
425 * Older binutils section handling bug prevented
426 * alternatives-replacement from working reliably.
427 *
428 * If this still occurs then you should see a hang
429 * or crash shortly after this line:
430 */
431 printk("lockdep: fixing up alternatives.\n");
432#endif
433
434 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
435 return;
436 BUG_ON(!smp && (num_online_cpus() > 1));
437
438 mutex_lock(&smp_alt);
439
440 /*
441 * Avoid unnecessary switches because it forces JIT based VMs to
442 * throw away all cached translations, which can be quite costly.
443 */
444 if (smp == smp_mode) {
445 /* nothing */
446 } else if (smp) {
447 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
448 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
449 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
450 list_for_each_entry(mod, &smp_alt_modules, next)
451 alternatives_smp_lock(mod->locks, mod->locks_end,
452 mod->text, mod->text_end);
453 } else {
454 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
455 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
456 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
457 list_for_each_entry(mod, &smp_alt_modules, next)
458 alternatives_smp_unlock(mod->locks, mod->locks_end,
459 mod->text, mod->text_end);
460 }
461 smp_mode = smp;
462 mutex_unlock(&smp_alt);
463}
464
465/* Return 1 if the address range is reserved for smp-alternatives */
466int alternatives_text_reserved(void *start, void *end)
467{
468 struct smp_alt_module *mod;
469 const s32 *poff;
470 u8 *text_start = start;
471 u8 *text_end = end;
472
473 list_for_each_entry(mod, &smp_alt_modules, next) {
474 if (mod->text > text_end || mod->text_end < text_start)
475 continue;
476 for (poff = mod->locks; poff < mod->locks_end; poff++) {
477 const u8 *ptr = (const u8 *)poff + *poff;
478
479 if (text_start <= ptr && text_end > ptr)
480 return 1;
481 }
482 }
483
484 return 0;
485}
486#endif
487
488#ifdef CONFIG_PARAVIRT
489void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
490 struct paravirt_patch_site *end)
491{
492 struct paravirt_patch_site *p;
493 char insnbuf[MAX_PATCH_LEN];
494
495 if (noreplace_paravirt)
496 return;
497
498 for (p = start; p < end; p++) {
499 unsigned int used;
500
501 BUG_ON(p->len > MAX_PATCH_LEN);
502 /* prep the buffer with the original instructions */
503 memcpy(insnbuf, p->instr, p->len);
504 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
505 (unsigned long)p->instr, p->len);
506
507 BUG_ON(used > p->len);
508
509 /* Pad the rest with nops */
510 add_nops(insnbuf + used, p->len - used);
511 text_poke_early(p->instr, insnbuf, p->len);
512 }
513}
514extern struct paravirt_patch_site __start_parainstructions[],
515 __stop_parainstructions[];
516#endif /* CONFIG_PARAVIRT */
517
518void __init alternative_instructions(void)
519{
520 /* The patching is not fully atomic, so try to avoid local interruptions
521 that might execute the to be patched code.
522 Other CPUs are not running. */
523 stop_nmi();
524
525 /*
526 * Don't stop machine check exceptions while patching.
527 * MCEs only happen when something got corrupted and in this
528 * case we must do something about the corruption.
529 * Ignoring it is worse than a unlikely patching race.
530 * Also machine checks tend to be broadcast and if one CPU
531 * goes into machine check the others follow quickly, so we don't
532 * expect a machine check to cause undue problems during to code
533 * patching.
534 */
535
536 apply_alternatives(__alt_instructions, __alt_instructions_end);
537
538 /* switch to patch-once-at-boottime-only mode and free the
539 * tables in case we know the number of CPUs will never ever
540 * change */
541#ifdef CONFIG_HOTPLUG_CPU
542 if (num_possible_cpus() < 2)
543 smp_alt_once = 1;
544#endif
545
546#ifdef CONFIG_SMP
547 if (smp_alt_once) {
548 if (1 == num_possible_cpus()) {
549 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
550 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
551 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
552
553 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
554 _text, _etext);
555 }
556 } else {
557 alternatives_smp_module_add(NULL, "core kernel",
558 __smp_locks, __smp_locks_end,
559 _text, _etext);
560
561 /* Only switch to UP mode if we don't immediately boot others */
562 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
563 alternatives_smp_switch(0);
564 }
565#endif
566 apply_paravirt(__parainstructions, __parainstructions_end);
567
568 if (smp_alt_once)
569 free_init_pages("SMP alternatives",
570 (unsigned long)__smp_locks,
571 (unsigned long)__smp_locks_end);
572
573 restart_nmi();
574}
575
576/**
577 * text_poke_early - Update instructions on a live kernel at boot time
578 * @addr: address to modify
579 * @opcode: source of the copy
580 * @len: length to copy
581 *
582 * When you use this code to patch more than one byte of an instruction
583 * you need to make sure that other CPUs cannot execute this code in parallel.
584 * Also no thread must be currently preempted in the middle of these
585 * instructions. And on the local CPU you need to be protected again NMI or MCE
586 * handlers seeing an inconsistent instruction while you patch.
587 */
588void *__init_or_module text_poke_early(void *addr, const void *opcode,
589 size_t len)
590{
591 unsigned long flags;
592 local_irq_save(flags);
593 memcpy(addr, opcode, len);
594 sync_core();
595 local_irq_restore(flags);
596 /* Could also do a CLFLUSH here to speed up CPU recovery; but
597 that causes hangs on some VIA CPUs. */
598 return addr;
599}
600
601/**
602 * text_poke - Update instructions on a live kernel
603 * @addr: address to modify
604 * @opcode: source of the copy
605 * @len: length to copy
606 *
607 * Only atomic text poke/set should be allowed when not doing early patching.
608 * It means the size must be writable atomically and the address must be aligned
609 * in a way that permits an atomic write. It also makes sure we fit on a single
610 * page.
611 *
612 * Note: Must be called under text_mutex.
613 */
614void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
615{
616 unsigned long flags;
617 char *vaddr;
618 struct page *pages[2];
619 int i;
620
621 if (!core_kernel_text((unsigned long)addr)) {
622 pages[0] = vmalloc_to_page(addr);
623 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
624 } else {
625 pages[0] = virt_to_page(addr);
626 WARN_ON(!PageReserved(pages[0]));
627 pages[1] = virt_to_page(addr + PAGE_SIZE);
628 }
629 BUG_ON(!pages[0]);
630 local_irq_save(flags);
631 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
632 if (pages[1])
633 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
634 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
635 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
636 clear_fixmap(FIX_TEXT_POKE0);
637 if (pages[1])
638 clear_fixmap(FIX_TEXT_POKE1);
639 local_flush_tlb();
640 sync_core();
641 /* Could also do a CLFLUSH here to speed up CPU recovery; but
642 that causes hangs on some VIA CPUs. */
643 for (i = 0; i < len; i++)
644 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
645 local_irq_restore(flags);
646 return addr;
647}
648
649/*
650 * Cross-modifying kernel text with stop_machine().
651 * This code originally comes from immediate value.
652 */
653static atomic_t stop_machine_first;
654static int wrote_text;
655
656struct text_poke_params {
657 struct text_poke_param *params;
658 int nparams;
659};
660
661static int __kprobes stop_machine_text_poke(void *data)
662{
663 struct text_poke_params *tpp = data;
664 struct text_poke_param *p;
665 int i;
666
667 if (atomic_dec_and_test(&stop_machine_first)) {
668 for (i = 0; i < tpp->nparams; i++) {
669 p = &tpp->params[i];
670 text_poke(p->addr, p->opcode, p->len);
671 }
672 smp_wmb(); /* Make sure other cpus see that this has run */
673 wrote_text = 1;
674 } else {
675 while (!wrote_text)
676 cpu_relax();
677 smp_mb(); /* Load wrote_text before following execution */
678 }
679
680 for (i = 0; i < tpp->nparams; i++) {
681 p = &tpp->params[i];
682 flush_icache_range((unsigned long)p->addr,
683 (unsigned long)p->addr + p->len);
684 }
685 /*
686 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
687 * that a core serializing instruction such as "cpuid" should be
688 * executed on _each_ core before the new instruction is made visible.
689 */
690 sync_core();
691 return 0;
692}
693
694/**
695 * text_poke_smp - Update instructions on a live kernel on SMP
696 * @addr: address to modify
697 * @opcode: source of the copy
698 * @len: length to copy
699 *
700 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
701 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
702 * should be allowed, since stop_machine() does _not_ protect code against
703 * NMI and MCE.
704 *
705 * Note: Must be called under get_online_cpus() and text_mutex.
706 */
707void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
708{
709 struct text_poke_params tpp;
710 struct text_poke_param p;
711
712 p.addr = addr;
713 p.opcode = opcode;
714 p.len = len;
715 tpp.params = &p;
716 tpp.nparams = 1;
717 atomic_set(&stop_machine_first, 1);
718 wrote_text = 0;
719 /* Use __stop_machine() because the caller already got online_cpus. */
720 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
721 return addr;
722}
723
724/**
725 * text_poke_smp_batch - Update instructions on a live kernel on SMP
726 * @params: an array of text_poke parameters
727 * @n: the number of elements in params.
728 *
729 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
730 * stop_machine() is heavy task, it is better to aggregate text_poke requests
731 * and do it once if possible.
732 *
733 * Note: Must be called under get_online_cpus() and text_mutex.
734 */
735void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
736{
737 struct text_poke_params tpp = {.params = params, .nparams = n};
738
739 atomic_set(&stop_machine_first, 1);
740 wrote_text = 0;
741 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
742}
1#define pr_fmt(fmt) "SMP alternatives: " fmt
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/mutex.h>
6#include <linux/list.h>
7#include <linux/stringify.h>
8#include <linux/mm.h>
9#include <linux/vmalloc.h>
10#include <linux/memory.h>
11#include <linux/stop_machine.h>
12#include <linux/slab.h>
13#include <linux/kdebug.h>
14#include <asm/text-patching.h>
15#include <asm/alternative.h>
16#include <asm/sections.h>
17#include <asm/pgtable.h>
18#include <asm/mce.h>
19#include <asm/nmi.h>
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22#include <asm/io.h>
23#include <asm/fixmap.h>
24
25int __read_mostly alternatives_patched;
26
27EXPORT_SYMBOL_GPL(alternatives_patched);
28
29#define MAX_PATCH_LEN (255-1)
30
31static int __initdata_or_module debug_alternative;
32
33static int __init debug_alt(char *str)
34{
35 debug_alternative = 1;
36 return 1;
37}
38__setup("debug-alternative", debug_alt);
39
40static int noreplace_smp;
41
42static int __init setup_noreplace_smp(char *str)
43{
44 noreplace_smp = 1;
45 return 1;
46}
47__setup("noreplace-smp", setup_noreplace_smp);
48
49#define DPRINTK(fmt, args...) \
50do { \
51 if (debug_alternative) \
52 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
53} while (0)
54
55#define DUMP_BYTES(buf, len, fmt, args...) \
56do { \
57 if (unlikely(debug_alternative)) { \
58 int j; \
59 \
60 if (!(len)) \
61 break; \
62 \
63 printk(KERN_DEBUG fmt, ##args); \
64 for (j = 0; j < (len) - 1; j++) \
65 printk(KERN_CONT "%02hhx ", buf[j]); \
66 printk(KERN_CONT "%02hhx\n", buf[j]); \
67 } \
68} while (0)
69
70/*
71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72 * that correspond to that nop. Getting from one nop to the next, we
73 * add to the array the offset that is equal to the sum of all sizes of
74 * nops preceding the one we are after.
75 *
76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77 * nice symmetry of sizes of the previous nops.
78 */
79#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80static const unsigned char intelnops[] =
81{
82 GENERIC_NOP1,
83 GENERIC_NOP2,
84 GENERIC_NOP3,
85 GENERIC_NOP4,
86 GENERIC_NOP5,
87 GENERIC_NOP6,
88 GENERIC_NOP7,
89 GENERIC_NOP8,
90 GENERIC_NOP5_ATOMIC
91};
92static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
93{
94 NULL,
95 intelnops,
96 intelnops + 1,
97 intelnops + 1 + 2,
98 intelnops + 1 + 2 + 3,
99 intelnops + 1 + 2 + 3 + 4,
100 intelnops + 1 + 2 + 3 + 4 + 5,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
104};
105#endif
106
107#ifdef K8_NOP1
108static const unsigned char k8nops[] =
109{
110 K8_NOP1,
111 K8_NOP2,
112 K8_NOP3,
113 K8_NOP4,
114 K8_NOP5,
115 K8_NOP6,
116 K8_NOP7,
117 K8_NOP8,
118 K8_NOP5_ATOMIC
119};
120static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
121{
122 NULL,
123 k8nops,
124 k8nops + 1,
125 k8nops + 1 + 2,
126 k8nops + 1 + 2 + 3,
127 k8nops + 1 + 2 + 3 + 4,
128 k8nops + 1 + 2 + 3 + 4 + 5,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
132};
133#endif
134
135#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136static const unsigned char k7nops[] =
137{
138 K7_NOP1,
139 K7_NOP2,
140 K7_NOP3,
141 K7_NOP4,
142 K7_NOP5,
143 K7_NOP6,
144 K7_NOP7,
145 K7_NOP8,
146 K7_NOP5_ATOMIC
147};
148static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
149{
150 NULL,
151 k7nops,
152 k7nops + 1,
153 k7nops + 1 + 2,
154 k7nops + 1 + 2 + 3,
155 k7nops + 1 + 2 + 3 + 4,
156 k7nops + 1 + 2 + 3 + 4 + 5,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
160};
161#endif
162
163#ifdef P6_NOP1
164static const unsigned char p6nops[] =
165{
166 P6_NOP1,
167 P6_NOP2,
168 P6_NOP3,
169 P6_NOP4,
170 P6_NOP5,
171 P6_NOP6,
172 P6_NOP7,
173 P6_NOP8,
174 P6_NOP5_ATOMIC
175};
176static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
177{
178 NULL,
179 p6nops,
180 p6nops + 1,
181 p6nops + 1 + 2,
182 p6nops + 1 + 2 + 3,
183 p6nops + 1 + 2 + 3 + 4,
184 p6nops + 1 + 2 + 3 + 4 + 5,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
188};
189#endif
190
191/* Initialize these to a safe default */
192#ifdef CONFIG_X86_64
193const unsigned char * const *ideal_nops = p6_nops;
194#else
195const unsigned char * const *ideal_nops = intel_nops;
196#endif
197
198void __init arch_init_ideal_nops(void)
199{
200 switch (boot_cpu_data.x86_vendor) {
201 case X86_VENDOR_INTEL:
202 /*
203 * Due to a decoder implementation quirk, some
204 * specific Intel CPUs actually perform better with
205 * the "k8_nops" than with the SDM-recommended NOPs.
206 */
207 if (boot_cpu_data.x86 == 6 &&
208 boot_cpu_data.x86_model >= 0x0f &&
209 boot_cpu_data.x86_model != 0x1c &&
210 boot_cpu_data.x86_model != 0x26 &&
211 boot_cpu_data.x86_model != 0x27 &&
212 boot_cpu_data.x86_model < 0x30) {
213 ideal_nops = k8_nops;
214 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215 ideal_nops = p6_nops;
216 } else {
217#ifdef CONFIG_X86_64
218 ideal_nops = k8_nops;
219#else
220 ideal_nops = intel_nops;
221#endif
222 }
223 break;
224
225 case X86_VENDOR_AMD:
226 if (boot_cpu_data.x86 > 0xf) {
227 ideal_nops = p6_nops;
228 return;
229 }
230
231 /* fall through */
232
233 default:
234#ifdef CONFIG_X86_64
235 ideal_nops = k8_nops;
236#else
237 if (boot_cpu_has(X86_FEATURE_K8))
238 ideal_nops = k8_nops;
239 else if (boot_cpu_has(X86_FEATURE_K7))
240 ideal_nops = k7_nops;
241 else
242 ideal_nops = intel_nops;
243#endif
244 }
245}
246
247/* Use this to add nops to a buffer, then text_poke the whole buffer. */
248static void __init_or_module add_nops(void *insns, unsigned int len)
249{
250 while (len > 0) {
251 unsigned int noplen = len;
252 if (noplen > ASM_NOP_MAX)
253 noplen = ASM_NOP_MAX;
254 memcpy(insns, ideal_nops[noplen], noplen);
255 insns += noplen;
256 len -= noplen;
257 }
258}
259
260extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
261extern s32 __smp_locks[], __smp_locks_end[];
262void *text_poke_early(void *addr, const void *opcode, size_t len);
263
264/*
265 * Are we looking at a near JMP with a 1 or 4-byte displacement.
266 */
267static inline bool is_jmp(const u8 opcode)
268{
269 return opcode == 0xeb || opcode == 0xe9;
270}
271
272static void __init_or_module
273recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
274{
275 u8 *next_rip, *tgt_rip;
276 s32 n_dspl, o_dspl;
277 int repl_len;
278
279 if (a->replacementlen != 5)
280 return;
281
282 o_dspl = *(s32 *)(insnbuf + 1);
283
284 /* next_rip of the replacement JMP */
285 next_rip = repl_insn + a->replacementlen;
286 /* target rip of the replacement JMP */
287 tgt_rip = next_rip + o_dspl;
288 n_dspl = tgt_rip - orig_insn;
289
290 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
291
292 if (tgt_rip - orig_insn >= 0) {
293 if (n_dspl - 2 <= 127)
294 goto two_byte_jmp;
295 else
296 goto five_byte_jmp;
297 /* negative offset */
298 } else {
299 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
300 goto two_byte_jmp;
301 else
302 goto five_byte_jmp;
303 }
304
305two_byte_jmp:
306 n_dspl -= 2;
307
308 insnbuf[0] = 0xeb;
309 insnbuf[1] = (s8)n_dspl;
310 add_nops(insnbuf + 2, 3);
311
312 repl_len = 2;
313 goto done;
314
315five_byte_jmp:
316 n_dspl -= 5;
317
318 insnbuf[0] = 0xe9;
319 *(s32 *)&insnbuf[1] = n_dspl;
320
321 repl_len = 5;
322
323done:
324
325 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
326 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
327}
328
329/*
330 * "noinline" to cause control flow change and thus invalidate I$ and
331 * cause refetch after modification.
332 */
333static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
334{
335 unsigned long flags;
336 int i;
337
338 for (i = 0; i < a->padlen; i++) {
339 if (instr[i] != 0x90)
340 return;
341 }
342
343 local_irq_save(flags);
344 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
345 local_irq_restore(flags);
346
347 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
348 instr, a->instrlen - a->padlen, a->padlen);
349}
350
351/*
352 * Replace instructions with better alternatives for this CPU type. This runs
353 * before SMP is initialized to avoid SMP problems with self modifying code.
354 * This implies that asymmetric systems where APs have less capabilities than
355 * the boot processor are not handled. Tough. Make sure you disable such
356 * features by hand.
357 *
358 * Marked "noinline" to cause control flow change and thus insn cache
359 * to refetch changed I$ lines.
360 */
361void __init_or_module noinline apply_alternatives(struct alt_instr *start,
362 struct alt_instr *end)
363{
364 struct alt_instr *a;
365 u8 *instr, *replacement;
366 u8 insnbuf[MAX_PATCH_LEN];
367
368 DPRINTK("alt table %px, -> %px", start, end);
369 /*
370 * The scan order should be from start to end. A later scanned
371 * alternative code can overwrite previously scanned alternative code.
372 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
373 * patch code.
374 *
375 * So be careful if you want to change the scan order to any other
376 * order.
377 */
378 for (a = start; a < end; a++) {
379 int insnbuf_sz = 0;
380
381 instr = (u8 *)&a->instr_offset + a->instr_offset;
382 replacement = (u8 *)&a->repl_offset + a->repl_offset;
383 BUG_ON(a->instrlen > sizeof(insnbuf));
384 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
385 if (!boot_cpu_has(a->cpuid)) {
386 if (a->padlen > 1)
387 optimize_nops(a, instr);
388
389 continue;
390 }
391
392 DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
393 a->cpuid >> 5,
394 a->cpuid & 0x1f,
395 instr, a->instrlen,
396 replacement, a->replacementlen, a->padlen);
397
398 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
399 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
400
401 memcpy(insnbuf, replacement, a->replacementlen);
402 insnbuf_sz = a->replacementlen;
403
404 /*
405 * 0xe8 is a relative jump; fix the offset.
406 *
407 * Instruction length is checked before the opcode to avoid
408 * accessing uninitialized bytes for zero-length replacements.
409 */
410 if (a->replacementlen == 5 && *insnbuf == 0xe8) {
411 *(s32 *)(insnbuf + 1) += replacement - instr;
412 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
413 *(s32 *)(insnbuf + 1),
414 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
415 }
416
417 if (a->replacementlen && is_jmp(replacement[0]))
418 recompute_jump(a, instr, replacement, insnbuf);
419
420 if (a->instrlen > a->replacementlen) {
421 add_nops(insnbuf + a->replacementlen,
422 a->instrlen - a->replacementlen);
423 insnbuf_sz += a->instrlen - a->replacementlen;
424 }
425 DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
426
427 text_poke_early(instr, insnbuf, insnbuf_sz);
428 }
429}
430
431#ifdef CONFIG_SMP
432static void alternatives_smp_lock(const s32 *start, const s32 *end,
433 u8 *text, u8 *text_end)
434{
435 const s32 *poff;
436
437 for (poff = start; poff < end; poff++) {
438 u8 *ptr = (u8 *)poff + *poff;
439
440 if (!*poff || ptr < text || ptr >= text_end)
441 continue;
442 /* turn DS segment override prefix into lock prefix */
443 if (*ptr == 0x3e)
444 text_poke(ptr, ((unsigned char []){0xf0}), 1);
445 }
446}
447
448static void alternatives_smp_unlock(const s32 *start, const s32 *end,
449 u8 *text, u8 *text_end)
450{
451 const s32 *poff;
452
453 for (poff = start; poff < end; poff++) {
454 u8 *ptr = (u8 *)poff + *poff;
455
456 if (!*poff || ptr < text || ptr >= text_end)
457 continue;
458 /* turn lock prefix into DS segment override prefix */
459 if (*ptr == 0xf0)
460 text_poke(ptr, ((unsigned char []){0x3E}), 1);
461 }
462}
463
464struct smp_alt_module {
465 /* what is this ??? */
466 struct module *mod;
467 char *name;
468
469 /* ptrs to lock prefixes */
470 const s32 *locks;
471 const s32 *locks_end;
472
473 /* .text segment, needed to avoid patching init code ;) */
474 u8 *text;
475 u8 *text_end;
476
477 struct list_head next;
478};
479static LIST_HEAD(smp_alt_modules);
480static bool uniproc_patched = false; /* protected by text_mutex */
481
482void __init_or_module alternatives_smp_module_add(struct module *mod,
483 char *name,
484 void *locks, void *locks_end,
485 void *text, void *text_end)
486{
487 struct smp_alt_module *smp;
488
489 mutex_lock(&text_mutex);
490 if (!uniproc_patched)
491 goto unlock;
492
493 if (num_possible_cpus() == 1)
494 /* Don't bother remembering, we'll never have to undo it. */
495 goto smp_unlock;
496
497 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
498 if (NULL == smp)
499 /* we'll run the (safe but slow) SMP code then ... */
500 goto unlock;
501
502 smp->mod = mod;
503 smp->name = name;
504 smp->locks = locks;
505 smp->locks_end = locks_end;
506 smp->text = text;
507 smp->text_end = text_end;
508 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
509 smp->locks, smp->locks_end,
510 smp->text, smp->text_end, smp->name);
511
512 list_add_tail(&smp->next, &smp_alt_modules);
513smp_unlock:
514 alternatives_smp_unlock(locks, locks_end, text, text_end);
515unlock:
516 mutex_unlock(&text_mutex);
517}
518
519void __init_or_module alternatives_smp_module_del(struct module *mod)
520{
521 struct smp_alt_module *item;
522
523 mutex_lock(&text_mutex);
524 list_for_each_entry(item, &smp_alt_modules, next) {
525 if (mod != item->mod)
526 continue;
527 list_del(&item->next);
528 kfree(item);
529 break;
530 }
531 mutex_unlock(&text_mutex);
532}
533
534void alternatives_enable_smp(void)
535{
536 struct smp_alt_module *mod;
537
538 /* Why bother if there are no other CPUs? */
539 BUG_ON(num_possible_cpus() == 1);
540
541 mutex_lock(&text_mutex);
542
543 if (uniproc_patched) {
544 pr_info("switching to SMP code\n");
545 BUG_ON(num_online_cpus() != 1);
546 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
547 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
548 list_for_each_entry(mod, &smp_alt_modules, next)
549 alternatives_smp_lock(mod->locks, mod->locks_end,
550 mod->text, mod->text_end);
551 uniproc_patched = false;
552 }
553 mutex_unlock(&text_mutex);
554}
555
556/*
557 * Return 1 if the address range is reserved for SMP-alternatives.
558 * Must hold text_mutex.
559 */
560int alternatives_text_reserved(void *start, void *end)
561{
562 struct smp_alt_module *mod;
563 const s32 *poff;
564 u8 *text_start = start;
565 u8 *text_end = end;
566
567 lockdep_assert_held(&text_mutex);
568
569 list_for_each_entry(mod, &smp_alt_modules, next) {
570 if (mod->text > text_end || mod->text_end < text_start)
571 continue;
572 for (poff = mod->locks; poff < mod->locks_end; poff++) {
573 const u8 *ptr = (const u8 *)poff + *poff;
574
575 if (text_start <= ptr && text_end > ptr)
576 return 1;
577 }
578 }
579
580 return 0;
581}
582#endif /* CONFIG_SMP */
583
584#ifdef CONFIG_PARAVIRT
585void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
586 struct paravirt_patch_site *end)
587{
588 struct paravirt_patch_site *p;
589 char insnbuf[MAX_PATCH_LEN];
590
591 for (p = start; p < end; p++) {
592 unsigned int used;
593
594 BUG_ON(p->len > MAX_PATCH_LEN);
595 /* prep the buffer with the original instructions */
596 memcpy(insnbuf, p->instr, p->len);
597 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
598 (unsigned long)p->instr, p->len);
599
600 BUG_ON(used > p->len);
601
602 /* Pad the rest with nops */
603 add_nops(insnbuf + used, p->len - used);
604 text_poke_early(p->instr, insnbuf, p->len);
605 }
606}
607extern struct paravirt_patch_site __start_parainstructions[],
608 __stop_parainstructions[];
609#endif /* CONFIG_PARAVIRT */
610
611void __init alternative_instructions(void)
612{
613 /* The patching is not fully atomic, so try to avoid local interruptions
614 that might execute the to be patched code.
615 Other CPUs are not running. */
616 stop_nmi();
617
618 /*
619 * Don't stop machine check exceptions while patching.
620 * MCEs only happen when something got corrupted and in this
621 * case we must do something about the corruption.
622 * Ignoring it is worse than a unlikely patching race.
623 * Also machine checks tend to be broadcast and if one CPU
624 * goes into machine check the others follow quickly, so we don't
625 * expect a machine check to cause undue problems during to code
626 * patching.
627 */
628
629 apply_alternatives(__alt_instructions, __alt_instructions_end);
630
631#ifdef CONFIG_SMP
632 /* Patch to UP if other cpus not imminent. */
633 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
634 uniproc_patched = true;
635 alternatives_smp_module_add(NULL, "core kernel",
636 __smp_locks, __smp_locks_end,
637 _text, _etext);
638 }
639
640 if (!uniproc_patched || num_possible_cpus() == 1)
641 free_init_pages("SMP alternatives",
642 (unsigned long)__smp_locks,
643 (unsigned long)__smp_locks_end);
644#endif
645
646 apply_paravirt(__parainstructions, __parainstructions_end);
647
648 restart_nmi();
649 alternatives_patched = 1;
650}
651
652/**
653 * text_poke_early - Update instructions on a live kernel at boot time
654 * @addr: address to modify
655 * @opcode: source of the copy
656 * @len: length to copy
657 *
658 * When you use this code to patch more than one byte of an instruction
659 * you need to make sure that other CPUs cannot execute this code in parallel.
660 * Also no thread must be currently preempted in the middle of these
661 * instructions. And on the local CPU you need to be protected again NMI or MCE
662 * handlers seeing an inconsistent instruction while you patch.
663 */
664void *__init_or_module text_poke_early(void *addr, const void *opcode,
665 size_t len)
666{
667 unsigned long flags;
668 local_irq_save(flags);
669 memcpy(addr, opcode, len);
670 local_irq_restore(flags);
671 /* Could also do a CLFLUSH here to speed up CPU recovery; but
672 that causes hangs on some VIA CPUs. */
673 return addr;
674}
675
676/**
677 * text_poke - Update instructions on a live kernel
678 * @addr: address to modify
679 * @opcode: source of the copy
680 * @len: length to copy
681 *
682 * Only atomic text poke/set should be allowed when not doing early patching.
683 * It means the size must be writable atomically and the address must be aligned
684 * in a way that permits an atomic write. It also makes sure we fit on a single
685 * page.
686 *
687 * Note: Must be called under text_mutex.
688 */
689void *text_poke(void *addr, const void *opcode, size_t len)
690{
691 unsigned long flags;
692 char *vaddr;
693 struct page *pages[2];
694 int i;
695
696 if (!core_kernel_text((unsigned long)addr)) {
697 pages[0] = vmalloc_to_page(addr);
698 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
699 } else {
700 pages[0] = virt_to_page(addr);
701 WARN_ON(!PageReserved(pages[0]));
702 pages[1] = virt_to_page(addr + PAGE_SIZE);
703 }
704 BUG_ON(!pages[0]);
705 local_irq_save(flags);
706 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
707 if (pages[1])
708 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
709 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
710 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
711 clear_fixmap(FIX_TEXT_POKE0);
712 if (pages[1])
713 clear_fixmap(FIX_TEXT_POKE1);
714 local_flush_tlb();
715 sync_core();
716 /* Could also do a CLFLUSH here to speed up CPU recovery; but
717 that causes hangs on some VIA CPUs. */
718 for (i = 0; i < len; i++)
719 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
720 local_irq_restore(flags);
721 return addr;
722}
723
724static void do_sync_core(void *info)
725{
726 sync_core();
727}
728
729static bool bp_patching_in_progress;
730static void *bp_int3_handler, *bp_int3_addr;
731
732int poke_int3_handler(struct pt_regs *regs)
733{
734 /*
735 * Having observed our INT3 instruction, we now must observe
736 * bp_patching_in_progress.
737 *
738 * in_progress = TRUE INT3
739 * WMB RMB
740 * write INT3 if (in_progress)
741 *
742 * Idem for bp_int3_handler.
743 */
744 smp_rmb();
745
746 if (likely(!bp_patching_in_progress))
747 return 0;
748
749 if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
750 return 0;
751
752 /* set up the specified breakpoint handler */
753 regs->ip = (unsigned long) bp_int3_handler;
754
755 return 1;
756
757}
758
759/**
760 * text_poke_bp() -- update instructions on live kernel on SMP
761 * @addr: address to patch
762 * @opcode: opcode of new instruction
763 * @len: length to copy
764 * @handler: address to jump to when the temporary breakpoint is hit
765 *
766 * Modify multi-byte instruction by using int3 breakpoint on SMP.
767 * We completely avoid stop_machine() here, and achieve the
768 * synchronization using int3 breakpoint.
769 *
770 * The way it is done:
771 * - add a int3 trap to the address that will be patched
772 * - sync cores
773 * - update all but the first byte of the patched range
774 * - sync cores
775 * - replace the first byte (int3) by the first byte of
776 * replacing opcode
777 * - sync cores
778 *
779 * Note: must be called under text_mutex.
780 */
781void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
782{
783 unsigned char int3 = 0xcc;
784
785 bp_int3_handler = handler;
786 bp_int3_addr = (u8 *)addr + sizeof(int3);
787 bp_patching_in_progress = true;
788 /*
789 * Corresponding read barrier in int3 notifier for making sure the
790 * in_progress and handler are correctly ordered wrt. patching.
791 */
792 smp_wmb();
793
794 text_poke(addr, &int3, sizeof(int3));
795
796 on_each_cpu(do_sync_core, NULL, 1);
797
798 if (len - sizeof(int3) > 0) {
799 /* patch all but the first byte */
800 text_poke((char *)addr + sizeof(int3),
801 (const char *) opcode + sizeof(int3),
802 len - sizeof(int3));
803 /*
804 * According to Intel, this core syncing is very likely
805 * not necessary and we'd be safe even without it. But
806 * better safe than sorry (plus there's not only Intel).
807 */
808 on_each_cpu(do_sync_core, NULL, 1);
809 }
810
811 /* patch the first byte */
812 text_poke(addr, opcode, sizeof(int3));
813
814 on_each_cpu(do_sync_core, NULL, 1);
815 /*
816 * sync_core() implies an smp_mb() and orders this store against
817 * the writing of the new instruction.
818 */
819 bp_patching_in_progress = false;
820
821 return addr;
822}
823