Linux Audio

Check our new training course

Loading...
v4.10.11
  1#define pr_fmt(fmt) "SMP alternatives: " fmt
  2
  3#include <linux/module.h>
  4#include <linux/sched.h>
  5#include <linux/mutex.h>
  6#include <linux/list.h>
  7#include <linux/stringify.h>
  8#include <linux/mm.h>
  9#include <linux/vmalloc.h>
 10#include <linux/memory.h>
 11#include <linux/stop_machine.h>
 12#include <linux/slab.h>
 13#include <linux/kdebug.h>
 14#include <asm/text-patching.h>
 15#include <asm/alternative.h>
 16#include <asm/sections.h>
 17#include <asm/pgtable.h>
 18#include <asm/mce.h>
 19#include <asm/nmi.h>
 20#include <asm/cacheflush.h>
 21#include <asm/tlbflush.h>
 22#include <asm/io.h>
 23#include <asm/fixmap.h>
 24
 25int __read_mostly alternatives_patched;
 26
 27EXPORT_SYMBOL_GPL(alternatives_patched);
 28
 29#define MAX_PATCH_LEN (255-1)
 30
 31static int __initdata_or_module debug_alternative;
 32
 33static int __init debug_alt(char *str)
 34{
 35	debug_alternative = 1;
 36	return 1;
 37}
 38__setup("debug-alternative", debug_alt);
 39
 40static int noreplace_smp;
 41
 42static int __init setup_noreplace_smp(char *str)
 43{
 44	noreplace_smp = 1;
 45	return 1;
 46}
 47__setup("noreplace-smp", setup_noreplace_smp);
 48
 49#ifdef CONFIG_PARAVIRT
 50static int __initdata_or_module noreplace_paravirt = 0;
 51
 52static int __init setup_noreplace_paravirt(char *str)
 53{
 54	noreplace_paravirt = 1;
 55	return 1;
 56}
 57__setup("noreplace-paravirt", setup_noreplace_paravirt);
 58#endif
 59
 60#define DPRINTK(fmt, args...)						\
 61do {									\
 62	if (debug_alternative)						\
 63		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
 64} while (0)
 65
 66#define DUMP_BYTES(buf, len, fmt, args...)				\
 67do {									\
 68	if (unlikely(debug_alternative)) {				\
 69		int j;							\
 70									\
 71		if (!(len))						\
 72			break;						\
 73									\
 74		printk(KERN_DEBUG fmt, ##args);				\
 75		for (j = 0; j < (len) - 1; j++)				\
 76			printk(KERN_CONT "%02hhx ", buf[j]);		\
 77		printk(KERN_CONT "%02hhx\n", buf[j]);			\
 78	}								\
 79} while (0)
 80
 81/*
 82 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
 83 * that correspond to that nop. Getting from one nop to the next, we
 84 * add to the array the offset that is equal to the sum of all sizes of
 85 * nops preceding the one we are after.
 86 *
 87 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
 88 * nice symmetry of sizes of the previous nops.
 89 */
 90#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
 91static const unsigned char intelnops[] =
 92{
 93	GENERIC_NOP1,
 94	GENERIC_NOP2,
 95	GENERIC_NOP3,
 96	GENERIC_NOP4,
 97	GENERIC_NOP5,
 98	GENERIC_NOP6,
 99	GENERIC_NOP7,
100	GENERIC_NOP8,
101	GENERIC_NOP5_ATOMIC
102};
103static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
104{
105	NULL,
106	intelnops,
107	intelnops + 1,
108	intelnops + 1 + 2,
109	intelnops + 1 + 2 + 3,
110	intelnops + 1 + 2 + 3 + 4,
111	intelnops + 1 + 2 + 3 + 4 + 5,
112	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
113	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
114	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
115};
116#endif
117
118#ifdef K8_NOP1
119static const unsigned char k8nops[] =
120{
121	K8_NOP1,
122	K8_NOP2,
123	K8_NOP3,
124	K8_NOP4,
125	K8_NOP5,
126	K8_NOP6,
127	K8_NOP7,
128	K8_NOP8,
129	K8_NOP5_ATOMIC
130};
131static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
132{
133	NULL,
134	k8nops,
135	k8nops + 1,
136	k8nops + 1 + 2,
137	k8nops + 1 + 2 + 3,
138	k8nops + 1 + 2 + 3 + 4,
139	k8nops + 1 + 2 + 3 + 4 + 5,
140	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
141	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
142	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
143};
144#endif
145
146#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
147static const unsigned char k7nops[] =
148{
149	K7_NOP1,
150	K7_NOP2,
151	K7_NOP3,
152	K7_NOP4,
153	K7_NOP5,
154	K7_NOP6,
155	K7_NOP7,
156	K7_NOP8,
157	K7_NOP5_ATOMIC
158};
159static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
160{
161	NULL,
162	k7nops,
163	k7nops + 1,
164	k7nops + 1 + 2,
165	k7nops + 1 + 2 + 3,
166	k7nops + 1 + 2 + 3 + 4,
167	k7nops + 1 + 2 + 3 + 4 + 5,
168	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
169	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
170	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
171};
172#endif
173
174#ifdef P6_NOP1
175static const unsigned char p6nops[] =
176{
177	P6_NOP1,
178	P6_NOP2,
179	P6_NOP3,
180	P6_NOP4,
181	P6_NOP5,
182	P6_NOP6,
183	P6_NOP7,
184	P6_NOP8,
185	P6_NOP5_ATOMIC
186};
187static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
188{
189	NULL,
190	p6nops,
191	p6nops + 1,
192	p6nops + 1 + 2,
193	p6nops + 1 + 2 + 3,
194	p6nops + 1 + 2 + 3 + 4,
195	p6nops + 1 + 2 + 3 + 4 + 5,
196	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
197	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
198	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
199};
200#endif
201
202/* Initialize these to a safe default */
203#ifdef CONFIG_X86_64
204const unsigned char * const *ideal_nops = p6_nops;
205#else
206const unsigned char * const *ideal_nops = intel_nops;
207#endif
208
209void __init arch_init_ideal_nops(void)
210{
211	switch (boot_cpu_data.x86_vendor) {
212	case X86_VENDOR_INTEL:
213		/*
214		 * Due to a decoder implementation quirk, some
215		 * specific Intel CPUs actually perform better with
216		 * the "k8_nops" than with the SDM-recommended NOPs.
217		 */
218		if (boot_cpu_data.x86 == 6 &&
219		    boot_cpu_data.x86_model >= 0x0f &&
220		    boot_cpu_data.x86_model != 0x1c &&
221		    boot_cpu_data.x86_model != 0x26 &&
222		    boot_cpu_data.x86_model != 0x27 &&
223		    boot_cpu_data.x86_model < 0x30) {
224			ideal_nops = k8_nops;
225		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
226			   ideal_nops = p6_nops;
227		} else {
228#ifdef CONFIG_X86_64
229			ideal_nops = k8_nops;
230#else
231			ideal_nops = intel_nops;
232#endif
233		}
234		break;
235
236	case X86_VENDOR_AMD:
237		if (boot_cpu_data.x86 > 0xf) {
238			ideal_nops = p6_nops;
239			return;
240		}
241
242		/* fall through */
243
244	default:
245#ifdef CONFIG_X86_64
246		ideal_nops = k8_nops;
247#else
248		if (boot_cpu_has(X86_FEATURE_K8))
249			ideal_nops = k8_nops;
250		else if (boot_cpu_has(X86_FEATURE_K7))
251			ideal_nops = k7_nops;
252		else
253			ideal_nops = intel_nops;
254#endif
255	}
256}
257
258/* Use this to add nops to a buffer, then text_poke the whole buffer. */
259static void __init_or_module add_nops(void *insns, unsigned int len)
260{
261	while (len > 0) {
262		unsigned int noplen = len;
263		if (noplen > ASM_NOP_MAX)
264			noplen = ASM_NOP_MAX;
265		memcpy(insns, ideal_nops[noplen], noplen);
266		insns += noplen;
267		len -= noplen;
268	}
269}
270
271extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
272extern s32 __smp_locks[], __smp_locks_end[];
273void *text_poke_early(void *addr, const void *opcode, size_t len);
274
275/*
276 * Are we looking at a near JMP with a 1 or 4-byte displacement.
277 */
278static inline bool is_jmp(const u8 opcode)
279{
280	return opcode == 0xeb || opcode == 0xe9;
281}
282
283static void __init_or_module
284recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
285{
286	u8 *next_rip, *tgt_rip;
287	s32 n_dspl, o_dspl;
288	int repl_len;
289
290	if (a->replacementlen != 5)
291		return;
292
293	o_dspl = *(s32 *)(insnbuf + 1);
294
295	/* next_rip of the replacement JMP */
296	next_rip = repl_insn + a->replacementlen;
297	/* target rip of the replacement JMP */
298	tgt_rip  = next_rip + o_dspl;
299	n_dspl = tgt_rip - orig_insn;
300
301	DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
302
303	if (tgt_rip - orig_insn >= 0) {
304		if (n_dspl - 2 <= 127)
305			goto two_byte_jmp;
306		else
307			goto five_byte_jmp;
308	/* negative offset */
309	} else {
310		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
311			goto two_byte_jmp;
312		else
313			goto five_byte_jmp;
314	}
315
316two_byte_jmp:
317	n_dspl -= 2;
318
319	insnbuf[0] = 0xeb;
320	insnbuf[1] = (s8)n_dspl;
321	add_nops(insnbuf + 2, 3);
322
323	repl_len = 2;
324	goto done;
325
326five_byte_jmp:
327	n_dspl -= 5;
328
329	insnbuf[0] = 0xe9;
330	*(s32 *)&insnbuf[1] = n_dspl;
331
332	repl_len = 5;
333
334done:
335
336	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
337		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
338}
339
340/*
341 * "noinline" to cause control flow change and thus invalidate I$ and
342 * cause refetch after modification.
343 */
344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
345{
346	unsigned long flags;
 
347
348	if (instr[0] != 0x90)
349		return;
 
 
350
351	local_irq_save(flags);
352	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
353	local_irq_restore(flags);
354
355	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
356		   instr, a->instrlen - a->padlen, a->padlen);
357}
358
359/*
360 * Replace instructions with better alternatives for this CPU type. This runs
361 * before SMP is initialized to avoid SMP problems with self modifying code.
362 * This implies that asymmetric systems where APs have less capabilities than
363 * the boot processor are not handled. Tough. Make sure you disable such
364 * features by hand.
365 *
366 * Marked "noinline" to cause control flow change and thus insn cache
367 * to refetch changed I$ lines.
368 */
369void __init_or_module noinline apply_alternatives(struct alt_instr *start,
370						  struct alt_instr *end)
371{
372	struct alt_instr *a;
373	u8 *instr, *replacement;
374	u8 insnbuf[MAX_PATCH_LEN];
375
376	DPRINTK("alt table %p -> %p", start, end);
377	/*
378	 * The scan order should be from start to end. A later scanned
379	 * alternative code can overwrite previously scanned alternative code.
380	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
381	 * patch code.
382	 *
383	 * So be careful if you want to change the scan order to any other
384	 * order.
385	 */
386	for (a = start; a < end; a++) {
387		int insnbuf_sz = 0;
388
389		instr = (u8 *)&a->instr_offset + a->instr_offset;
390		replacement = (u8 *)&a->repl_offset + a->repl_offset;
391		BUG_ON(a->instrlen > sizeof(insnbuf));
392		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
393		if (!boot_cpu_has(a->cpuid)) {
394			if (a->padlen > 1)
395				optimize_nops(a, instr);
396
397			continue;
398		}
399
400		DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
401			a->cpuid >> 5,
402			a->cpuid & 0x1f,
403			instr, a->instrlen,
404			replacement, a->replacementlen, a->padlen);
405
406		DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
407		DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
408
409		memcpy(insnbuf, replacement, a->replacementlen);
410		insnbuf_sz = a->replacementlen;
411
412		/* 0xe8 is a relative jump; fix the offset. */
413		if (*insnbuf == 0xe8 && a->replacementlen == 5) {
 
 
 
 
 
414			*(s32 *)(insnbuf + 1) += replacement - instr;
415			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
416				*(s32 *)(insnbuf + 1),
417				(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
418		}
419
420		if (a->replacementlen && is_jmp(replacement[0]))
421			recompute_jump(a, instr, replacement, insnbuf);
422
423		if (a->instrlen > a->replacementlen) {
424			add_nops(insnbuf + a->replacementlen,
425				 a->instrlen - a->replacementlen);
426			insnbuf_sz += a->instrlen - a->replacementlen;
427		}
428		DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
429
430		text_poke_early(instr, insnbuf, insnbuf_sz);
431	}
432}
433
434#ifdef CONFIG_SMP
435static void alternatives_smp_lock(const s32 *start, const s32 *end,
436				  u8 *text, u8 *text_end)
437{
438	const s32 *poff;
439
440	mutex_lock(&text_mutex);
441	for (poff = start; poff < end; poff++) {
442		u8 *ptr = (u8 *)poff + *poff;
443
444		if (!*poff || ptr < text || ptr >= text_end)
445			continue;
446		/* turn DS segment override prefix into lock prefix */
447		if (*ptr == 0x3e)
448			text_poke(ptr, ((unsigned char []){0xf0}), 1);
449	}
450	mutex_unlock(&text_mutex);
451}
452
453static void alternatives_smp_unlock(const s32 *start, const s32 *end,
454				    u8 *text, u8 *text_end)
455{
456	const s32 *poff;
457
458	mutex_lock(&text_mutex);
459	for (poff = start; poff < end; poff++) {
460		u8 *ptr = (u8 *)poff + *poff;
461
462		if (!*poff || ptr < text || ptr >= text_end)
463			continue;
464		/* turn lock prefix into DS segment override prefix */
465		if (*ptr == 0xf0)
466			text_poke(ptr, ((unsigned char []){0x3E}), 1);
467	}
468	mutex_unlock(&text_mutex);
469}
470
471struct smp_alt_module {
472	/* what is this ??? */
473	struct module	*mod;
474	char		*name;
475
476	/* ptrs to lock prefixes */
477	const s32	*locks;
478	const s32	*locks_end;
479
480	/* .text segment, needed to avoid patching init code ;) */
481	u8		*text;
482	u8		*text_end;
483
484	struct list_head next;
485};
486static LIST_HEAD(smp_alt_modules);
487static DEFINE_MUTEX(smp_alt);
488static bool uniproc_patched = false;	/* protected by smp_alt */
489
490void __init_or_module alternatives_smp_module_add(struct module *mod,
491						  char *name,
492						  void *locks, void *locks_end,
493						  void *text,  void *text_end)
494{
495	struct smp_alt_module *smp;
496
497	mutex_lock(&smp_alt);
498	if (!uniproc_patched)
499		goto unlock;
500
501	if (num_possible_cpus() == 1)
502		/* Don't bother remembering, we'll never have to undo it. */
503		goto smp_unlock;
504
505	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
506	if (NULL == smp)
507		/* we'll run the (safe but slow) SMP code then ... */
508		goto unlock;
509
510	smp->mod	= mod;
511	smp->name	= name;
512	smp->locks	= locks;
513	smp->locks_end	= locks_end;
514	smp->text	= text;
515	smp->text_end	= text_end;
516	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
517		smp->locks, smp->locks_end,
518		smp->text, smp->text_end, smp->name);
519
520	list_add_tail(&smp->next, &smp_alt_modules);
521smp_unlock:
522	alternatives_smp_unlock(locks, locks_end, text, text_end);
523unlock:
524	mutex_unlock(&smp_alt);
525}
526
527void __init_or_module alternatives_smp_module_del(struct module *mod)
528{
529	struct smp_alt_module *item;
530
531	mutex_lock(&smp_alt);
532	list_for_each_entry(item, &smp_alt_modules, next) {
533		if (mod != item->mod)
534			continue;
535		list_del(&item->next);
536		kfree(item);
537		break;
538	}
539	mutex_unlock(&smp_alt);
540}
541
542void alternatives_enable_smp(void)
543{
544	struct smp_alt_module *mod;
545
546	/* Why bother if there are no other CPUs? */
547	BUG_ON(num_possible_cpus() == 1);
548
549	mutex_lock(&smp_alt);
550
551	if (uniproc_patched) {
552		pr_info("switching to SMP code\n");
553		BUG_ON(num_online_cpus() != 1);
554		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
555		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
556		list_for_each_entry(mod, &smp_alt_modules, next)
557			alternatives_smp_lock(mod->locks, mod->locks_end,
558					      mod->text, mod->text_end);
559		uniproc_patched = false;
560	}
561	mutex_unlock(&smp_alt);
562}
563
564/* Return 1 if the address range is reserved for smp-alternatives */
 
 
 
565int alternatives_text_reserved(void *start, void *end)
566{
567	struct smp_alt_module *mod;
568	const s32 *poff;
569	u8 *text_start = start;
570	u8 *text_end = end;
571
 
 
572	list_for_each_entry(mod, &smp_alt_modules, next) {
573		if (mod->text > text_end || mod->text_end < text_start)
574			continue;
575		for (poff = mod->locks; poff < mod->locks_end; poff++) {
576			const u8 *ptr = (const u8 *)poff + *poff;
577
578			if (text_start <= ptr && text_end > ptr)
579				return 1;
580		}
581	}
582
583	return 0;
584}
585#endif /* CONFIG_SMP */
586
587#ifdef CONFIG_PARAVIRT
588void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
589				     struct paravirt_patch_site *end)
590{
591	struct paravirt_patch_site *p;
592	char insnbuf[MAX_PATCH_LEN];
593
594	if (noreplace_paravirt)
595		return;
596
597	for (p = start; p < end; p++) {
598		unsigned int used;
599
600		BUG_ON(p->len > MAX_PATCH_LEN);
601		/* prep the buffer with the original instructions */
602		memcpy(insnbuf, p->instr, p->len);
603		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
604					 (unsigned long)p->instr, p->len);
605
606		BUG_ON(used > p->len);
607
608		/* Pad the rest with nops */
609		add_nops(insnbuf + used, p->len - used);
610		text_poke_early(p->instr, insnbuf, p->len);
611	}
612}
613extern struct paravirt_patch_site __start_parainstructions[],
614	__stop_parainstructions[];
615#endif	/* CONFIG_PARAVIRT */
616
617void __init alternative_instructions(void)
618{
619	/* The patching is not fully atomic, so try to avoid local interruptions
620	   that might execute the to be patched code.
621	   Other CPUs are not running. */
622	stop_nmi();
623
624	/*
625	 * Don't stop machine check exceptions while patching.
626	 * MCEs only happen when something got corrupted and in this
627	 * case we must do something about the corruption.
628	 * Ignoring it is worse than a unlikely patching race.
629	 * Also machine checks tend to be broadcast and if one CPU
630	 * goes into machine check the others follow quickly, so we don't
631	 * expect a machine check to cause undue problems during to code
632	 * patching.
633	 */
634
635	apply_alternatives(__alt_instructions, __alt_instructions_end);
636
637#ifdef CONFIG_SMP
638	/* Patch to UP if other cpus not imminent. */
639	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
640		uniproc_patched = true;
641		alternatives_smp_module_add(NULL, "core kernel",
642					    __smp_locks, __smp_locks_end,
643					    _text, _etext);
644	}
645
646	if (!uniproc_patched || num_possible_cpus() == 1)
647		free_init_pages("SMP alternatives",
648				(unsigned long)__smp_locks,
649				(unsigned long)__smp_locks_end);
650#endif
651
652	apply_paravirt(__parainstructions, __parainstructions_end);
653
654	restart_nmi();
655	alternatives_patched = 1;
656}
657
658/**
659 * text_poke_early - Update instructions on a live kernel at boot time
660 * @addr: address to modify
661 * @opcode: source of the copy
662 * @len: length to copy
663 *
664 * When you use this code to patch more than one byte of an instruction
665 * you need to make sure that other CPUs cannot execute this code in parallel.
666 * Also no thread must be currently preempted in the middle of these
667 * instructions. And on the local CPU you need to be protected again NMI or MCE
668 * handlers seeing an inconsistent instruction while you patch.
669 */
670void *__init_or_module text_poke_early(void *addr, const void *opcode,
671					      size_t len)
672{
673	unsigned long flags;
674	local_irq_save(flags);
675	memcpy(addr, opcode, len);
676	local_irq_restore(flags);
677	/* Could also do a CLFLUSH here to speed up CPU recovery; but
678	   that causes hangs on some VIA CPUs. */
679	return addr;
680}
681
682/**
683 * text_poke - Update instructions on a live kernel
684 * @addr: address to modify
685 * @opcode: source of the copy
686 * @len: length to copy
687 *
688 * Only atomic text poke/set should be allowed when not doing early patching.
689 * It means the size must be writable atomically and the address must be aligned
690 * in a way that permits an atomic write. It also makes sure we fit on a single
691 * page.
692 *
693 * Note: Must be called under text_mutex.
694 */
695void *text_poke(void *addr, const void *opcode, size_t len)
696{
697	unsigned long flags;
698	char *vaddr;
699	struct page *pages[2];
700	int i;
701
702	if (!core_kernel_text((unsigned long)addr)) {
703		pages[0] = vmalloc_to_page(addr);
704		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
705	} else {
706		pages[0] = virt_to_page(addr);
707		WARN_ON(!PageReserved(pages[0]));
708		pages[1] = virt_to_page(addr + PAGE_SIZE);
709	}
710	BUG_ON(!pages[0]);
711	local_irq_save(flags);
712	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
713	if (pages[1])
714		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
715	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
716	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
717	clear_fixmap(FIX_TEXT_POKE0);
718	if (pages[1])
719		clear_fixmap(FIX_TEXT_POKE1);
720	local_flush_tlb();
721	sync_core();
722	/* Could also do a CLFLUSH here to speed up CPU recovery; but
723	   that causes hangs on some VIA CPUs. */
724	for (i = 0; i < len; i++)
725		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
726	local_irq_restore(flags);
727	return addr;
728}
729
730static void do_sync_core(void *info)
731{
732	sync_core();
733}
734
735static bool bp_patching_in_progress;
736static void *bp_int3_handler, *bp_int3_addr;
737
738int poke_int3_handler(struct pt_regs *regs)
739{
740	/* bp_patching_in_progress */
 
 
 
 
 
 
 
 
 
741	smp_rmb();
742
743	if (likely(!bp_patching_in_progress))
744		return 0;
745
746	if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
747		return 0;
748
749	/* set up the specified breakpoint handler */
750	regs->ip = (unsigned long) bp_int3_handler;
751
752	return 1;
753
754}
755
756/**
757 * text_poke_bp() -- update instructions on live kernel on SMP
758 * @addr:	address to patch
759 * @opcode:	opcode of new instruction
760 * @len:	length to copy
761 * @handler:	address to jump to when the temporary breakpoint is hit
762 *
763 * Modify multi-byte instruction by using int3 breakpoint on SMP.
764 * We completely avoid stop_machine() here, and achieve the
765 * synchronization using int3 breakpoint.
766 *
767 * The way it is done:
768 *	- add a int3 trap to the address that will be patched
769 *	- sync cores
770 *	- update all but the first byte of the patched range
771 *	- sync cores
772 *	- replace the first byte (int3) by the first byte of
773 *	  replacing opcode
774 *	- sync cores
775 *
776 * Note: must be called under text_mutex.
777 */
778void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
779{
780	unsigned char int3 = 0xcc;
781
782	bp_int3_handler = handler;
783	bp_int3_addr = (u8 *)addr + sizeof(int3);
784	bp_patching_in_progress = true;
785	/*
786	 * Corresponding read barrier in int3 notifier for
787	 * making sure the in_progress flags is correctly ordered wrt.
788	 * patching
789	 */
790	smp_wmb();
791
792	text_poke(addr, &int3, sizeof(int3));
793
794	on_each_cpu(do_sync_core, NULL, 1);
795
796	if (len - sizeof(int3) > 0) {
797		/* patch all but the first byte */
798		text_poke((char *)addr + sizeof(int3),
799			  (const char *) opcode + sizeof(int3),
800			  len - sizeof(int3));
801		/*
802		 * According to Intel, this core syncing is very likely
803		 * not necessary and we'd be safe even without it. But
804		 * better safe than sorry (plus there's not only Intel).
805		 */
806		on_each_cpu(do_sync_core, NULL, 1);
807	}
808
809	/* patch the first byte */
810	text_poke(addr, opcode, sizeof(int3));
811
812	on_each_cpu(do_sync_core, NULL, 1);
813
 
 
 
814	bp_patching_in_progress = false;
815	smp_wmb();
816
817	return addr;
818}
819
v4.17
  1#define pr_fmt(fmt) "SMP alternatives: " fmt
  2
  3#include <linux/module.h>
  4#include <linux/sched.h>
  5#include <linux/mutex.h>
  6#include <linux/list.h>
  7#include <linux/stringify.h>
  8#include <linux/mm.h>
  9#include <linux/vmalloc.h>
 10#include <linux/memory.h>
 11#include <linux/stop_machine.h>
 12#include <linux/slab.h>
 13#include <linux/kdebug.h>
 14#include <asm/text-patching.h>
 15#include <asm/alternative.h>
 16#include <asm/sections.h>
 17#include <asm/pgtable.h>
 18#include <asm/mce.h>
 19#include <asm/nmi.h>
 20#include <asm/cacheflush.h>
 21#include <asm/tlbflush.h>
 22#include <asm/io.h>
 23#include <asm/fixmap.h>
 24
 25int __read_mostly alternatives_patched;
 26
 27EXPORT_SYMBOL_GPL(alternatives_patched);
 28
 29#define MAX_PATCH_LEN (255-1)
 30
 31static int __initdata_or_module debug_alternative;
 32
 33static int __init debug_alt(char *str)
 34{
 35	debug_alternative = 1;
 36	return 1;
 37}
 38__setup("debug-alternative", debug_alt);
 39
 40static int noreplace_smp;
 41
 42static int __init setup_noreplace_smp(char *str)
 43{
 44	noreplace_smp = 1;
 45	return 1;
 46}
 47__setup("noreplace-smp", setup_noreplace_smp);
 48
 
 
 
 
 
 
 
 
 
 
 
 49#define DPRINTK(fmt, args...)						\
 50do {									\
 51	if (debug_alternative)						\
 52		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
 53} while (0)
 54
 55#define DUMP_BYTES(buf, len, fmt, args...)				\
 56do {									\
 57	if (unlikely(debug_alternative)) {				\
 58		int j;							\
 59									\
 60		if (!(len))						\
 61			break;						\
 62									\
 63		printk(KERN_DEBUG fmt, ##args);				\
 64		for (j = 0; j < (len) - 1; j++)				\
 65			printk(KERN_CONT "%02hhx ", buf[j]);		\
 66		printk(KERN_CONT "%02hhx\n", buf[j]);			\
 67	}								\
 68} while (0)
 69
 70/*
 71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
 72 * that correspond to that nop. Getting from one nop to the next, we
 73 * add to the array the offset that is equal to the sum of all sizes of
 74 * nops preceding the one we are after.
 75 *
 76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
 77 * nice symmetry of sizes of the previous nops.
 78 */
 79#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
 80static const unsigned char intelnops[] =
 81{
 82	GENERIC_NOP1,
 83	GENERIC_NOP2,
 84	GENERIC_NOP3,
 85	GENERIC_NOP4,
 86	GENERIC_NOP5,
 87	GENERIC_NOP6,
 88	GENERIC_NOP7,
 89	GENERIC_NOP8,
 90	GENERIC_NOP5_ATOMIC
 91};
 92static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
 93{
 94	NULL,
 95	intelnops,
 96	intelnops + 1,
 97	intelnops + 1 + 2,
 98	intelnops + 1 + 2 + 3,
 99	intelnops + 1 + 2 + 3 + 4,
100	intelnops + 1 + 2 + 3 + 4 + 5,
101	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
104};
105#endif
106
107#ifdef K8_NOP1
108static const unsigned char k8nops[] =
109{
110	K8_NOP1,
111	K8_NOP2,
112	K8_NOP3,
113	K8_NOP4,
114	K8_NOP5,
115	K8_NOP6,
116	K8_NOP7,
117	K8_NOP8,
118	K8_NOP5_ATOMIC
119};
120static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
121{
122	NULL,
123	k8nops,
124	k8nops + 1,
125	k8nops + 1 + 2,
126	k8nops + 1 + 2 + 3,
127	k8nops + 1 + 2 + 3 + 4,
128	k8nops + 1 + 2 + 3 + 4 + 5,
129	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
132};
133#endif
134
135#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136static const unsigned char k7nops[] =
137{
138	K7_NOP1,
139	K7_NOP2,
140	K7_NOP3,
141	K7_NOP4,
142	K7_NOP5,
143	K7_NOP6,
144	K7_NOP7,
145	K7_NOP8,
146	K7_NOP5_ATOMIC
147};
148static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
149{
150	NULL,
151	k7nops,
152	k7nops + 1,
153	k7nops + 1 + 2,
154	k7nops + 1 + 2 + 3,
155	k7nops + 1 + 2 + 3 + 4,
156	k7nops + 1 + 2 + 3 + 4 + 5,
157	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
160};
161#endif
162
163#ifdef P6_NOP1
164static const unsigned char p6nops[] =
165{
166	P6_NOP1,
167	P6_NOP2,
168	P6_NOP3,
169	P6_NOP4,
170	P6_NOP5,
171	P6_NOP6,
172	P6_NOP7,
173	P6_NOP8,
174	P6_NOP5_ATOMIC
175};
176static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
177{
178	NULL,
179	p6nops,
180	p6nops + 1,
181	p6nops + 1 + 2,
182	p6nops + 1 + 2 + 3,
183	p6nops + 1 + 2 + 3 + 4,
184	p6nops + 1 + 2 + 3 + 4 + 5,
185	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
188};
189#endif
190
191/* Initialize these to a safe default */
192#ifdef CONFIG_X86_64
193const unsigned char * const *ideal_nops = p6_nops;
194#else
195const unsigned char * const *ideal_nops = intel_nops;
196#endif
197
198void __init arch_init_ideal_nops(void)
199{
200	switch (boot_cpu_data.x86_vendor) {
201	case X86_VENDOR_INTEL:
202		/*
203		 * Due to a decoder implementation quirk, some
204		 * specific Intel CPUs actually perform better with
205		 * the "k8_nops" than with the SDM-recommended NOPs.
206		 */
207		if (boot_cpu_data.x86 == 6 &&
208		    boot_cpu_data.x86_model >= 0x0f &&
209		    boot_cpu_data.x86_model != 0x1c &&
210		    boot_cpu_data.x86_model != 0x26 &&
211		    boot_cpu_data.x86_model != 0x27 &&
212		    boot_cpu_data.x86_model < 0x30) {
213			ideal_nops = k8_nops;
214		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215			   ideal_nops = p6_nops;
216		} else {
217#ifdef CONFIG_X86_64
218			ideal_nops = k8_nops;
219#else
220			ideal_nops = intel_nops;
221#endif
222		}
223		break;
224
225	case X86_VENDOR_AMD:
226		if (boot_cpu_data.x86 > 0xf) {
227			ideal_nops = p6_nops;
228			return;
229		}
230
231		/* fall through */
232
233	default:
234#ifdef CONFIG_X86_64
235		ideal_nops = k8_nops;
236#else
237		if (boot_cpu_has(X86_FEATURE_K8))
238			ideal_nops = k8_nops;
239		else if (boot_cpu_has(X86_FEATURE_K7))
240			ideal_nops = k7_nops;
241		else
242			ideal_nops = intel_nops;
243#endif
244	}
245}
246
247/* Use this to add nops to a buffer, then text_poke the whole buffer. */
248static void __init_or_module add_nops(void *insns, unsigned int len)
249{
250	while (len > 0) {
251		unsigned int noplen = len;
252		if (noplen > ASM_NOP_MAX)
253			noplen = ASM_NOP_MAX;
254		memcpy(insns, ideal_nops[noplen], noplen);
255		insns += noplen;
256		len -= noplen;
257	}
258}
259
260extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
261extern s32 __smp_locks[], __smp_locks_end[];
262void *text_poke_early(void *addr, const void *opcode, size_t len);
263
264/*
265 * Are we looking at a near JMP with a 1 or 4-byte displacement.
266 */
267static inline bool is_jmp(const u8 opcode)
268{
269	return opcode == 0xeb || opcode == 0xe9;
270}
271
272static void __init_or_module
273recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
274{
275	u8 *next_rip, *tgt_rip;
276	s32 n_dspl, o_dspl;
277	int repl_len;
278
279	if (a->replacementlen != 5)
280		return;
281
282	o_dspl = *(s32 *)(insnbuf + 1);
283
284	/* next_rip of the replacement JMP */
285	next_rip = repl_insn + a->replacementlen;
286	/* target rip of the replacement JMP */
287	tgt_rip  = next_rip + o_dspl;
288	n_dspl = tgt_rip - orig_insn;
289
290	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
291
292	if (tgt_rip - orig_insn >= 0) {
293		if (n_dspl - 2 <= 127)
294			goto two_byte_jmp;
295		else
296			goto five_byte_jmp;
297	/* negative offset */
298	} else {
299		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
300			goto two_byte_jmp;
301		else
302			goto five_byte_jmp;
303	}
304
305two_byte_jmp:
306	n_dspl -= 2;
307
308	insnbuf[0] = 0xeb;
309	insnbuf[1] = (s8)n_dspl;
310	add_nops(insnbuf + 2, 3);
311
312	repl_len = 2;
313	goto done;
314
315five_byte_jmp:
316	n_dspl -= 5;
317
318	insnbuf[0] = 0xe9;
319	*(s32 *)&insnbuf[1] = n_dspl;
320
321	repl_len = 5;
322
323done:
324
325	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
326		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
327}
328
329/*
330 * "noinline" to cause control flow change and thus invalidate I$ and
331 * cause refetch after modification.
332 */
333static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
334{
335	unsigned long flags;
336	int i;
337
338	for (i = 0; i < a->padlen; i++) {
339		if (instr[i] != 0x90)
340			return;
341	}
342
343	local_irq_save(flags);
344	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
345	local_irq_restore(flags);
346
347	DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
348		   instr, a->instrlen - a->padlen, a->padlen);
349}
350
351/*
352 * Replace instructions with better alternatives for this CPU type. This runs
353 * before SMP is initialized to avoid SMP problems with self modifying code.
354 * This implies that asymmetric systems where APs have less capabilities than
355 * the boot processor are not handled. Tough. Make sure you disable such
356 * features by hand.
357 *
358 * Marked "noinline" to cause control flow change and thus insn cache
359 * to refetch changed I$ lines.
360 */
361void __init_or_module noinline apply_alternatives(struct alt_instr *start,
362						  struct alt_instr *end)
363{
364	struct alt_instr *a;
365	u8 *instr, *replacement;
366	u8 insnbuf[MAX_PATCH_LEN];
367
368	DPRINTK("alt table %px, -> %px", start, end);
369	/*
370	 * The scan order should be from start to end. A later scanned
371	 * alternative code can overwrite previously scanned alternative code.
372	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
373	 * patch code.
374	 *
375	 * So be careful if you want to change the scan order to any other
376	 * order.
377	 */
378	for (a = start; a < end; a++) {
379		int insnbuf_sz = 0;
380
381		instr = (u8 *)&a->instr_offset + a->instr_offset;
382		replacement = (u8 *)&a->repl_offset + a->repl_offset;
383		BUG_ON(a->instrlen > sizeof(insnbuf));
384		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
385		if (!boot_cpu_has(a->cpuid)) {
386			if (a->padlen > 1)
387				optimize_nops(a, instr);
388
389			continue;
390		}
391
392		DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
393			a->cpuid >> 5,
394			a->cpuid & 0x1f,
395			instr, a->instrlen,
396			replacement, a->replacementlen, a->padlen);
397
398		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
399		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
400
401		memcpy(insnbuf, replacement, a->replacementlen);
402		insnbuf_sz = a->replacementlen;
403
404		/*
405		 * 0xe8 is a relative jump; fix the offset.
406		 *
407		 * Instruction length is checked before the opcode to avoid
408		 * accessing uninitialized bytes for zero-length replacements.
409		 */
410		if (a->replacementlen == 5 && *insnbuf == 0xe8) {
411			*(s32 *)(insnbuf + 1) += replacement - instr;
412			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
413				*(s32 *)(insnbuf + 1),
414				(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
415		}
416
417		if (a->replacementlen && is_jmp(replacement[0]))
418			recompute_jump(a, instr, replacement, insnbuf);
419
420		if (a->instrlen > a->replacementlen) {
421			add_nops(insnbuf + a->replacementlen,
422				 a->instrlen - a->replacementlen);
423			insnbuf_sz += a->instrlen - a->replacementlen;
424		}
425		DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
426
427		text_poke_early(instr, insnbuf, insnbuf_sz);
428	}
429}
430
431#ifdef CONFIG_SMP
432static void alternatives_smp_lock(const s32 *start, const s32 *end,
433				  u8 *text, u8 *text_end)
434{
435	const s32 *poff;
436
 
437	for (poff = start; poff < end; poff++) {
438		u8 *ptr = (u8 *)poff + *poff;
439
440		if (!*poff || ptr < text || ptr >= text_end)
441			continue;
442		/* turn DS segment override prefix into lock prefix */
443		if (*ptr == 0x3e)
444			text_poke(ptr, ((unsigned char []){0xf0}), 1);
445	}
 
446}
447
448static void alternatives_smp_unlock(const s32 *start, const s32 *end,
449				    u8 *text, u8 *text_end)
450{
451	const s32 *poff;
452
 
453	for (poff = start; poff < end; poff++) {
454		u8 *ptr = (u8 *)poff + *poff;
455
456		if (!*poff || ptr < text || ptr >= text_end)
457			continue;
458		/* turn lock prefix into DS segment override prefix */
459		if (*ptr == 0xf0)
460			text_poke(ptr, ((unsigned char []){0x3E}), 1);
461	}
 
462}
463
464struct smp_alt_module {
465	/* what is this ??? */
466	struct module	*mod;
467	char		*name;
468
469	/* ptrs to lock prefixes */
470	const s32	*locks;
471	const s32	*locks_end;
472
473	/* .text segment, needed to avoid patching init code ;) */
474	u8		*text;
475	u8		*text_end;
476
477	struct list_head next;
478};
479static LIST_HEAD(smp_alt_modules);
480static bool uniproc_patched = false;	/* protected by text_mutex */
 
481
482void __init_or_module alternatives_smp_module_add(struct module *mod,
483						  char *name,
484						  void *locks, void *locks_end,
485						  void *text,  void *text_end)
486{
487	struct smp_alt_module *smp;
488
489	mutex_lock(&text_mutex);
490	if (!uniproc_patched)
491		goto unlock;
492
493	if (num_possible_cpus() == 1)
494		/* Don't bother remembering, we'll never have to undo it. */
495		goto smp_unlock;
496
497	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
498	if (NULL == smp)
499		/* we'll run the (safe but slow) SMP code then ... */
500		goto unlock;
501
502	smp->mod	= mod;
503	smp->name	= name;
504	smp->locks	= locks;
505	smp->locks_end	= locks_end;
506	smp->text	= text;
507	smp->text_end	= text_end;
508	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
509		smp->locks, smp->locks_end,
510		smp->text, smp->text_end, smp->name);
511
512	list_add_tail(&smp->next, &smp_alt_modules);
513smp_unlock:
514	alternatives_smp_unlock(locks, locks_end, text, text_end);
515unlock:
516	mutex_unlock(&text_mutex);
517}
518
519void __init_or_module alternatives_smp_module_del(struct module *mod)
520{
521	struct smp_alt_module *item;
522
523	mutex_lock(&text_mutex);
524	list_for_each_entry(item, &smp_alt_modules, next) {
525		if (mod != item->mod)
526			continue;
527		list_del(&item->next);
528		kfree(item);
529		break;
530	}
531	mutex_unlock(&text_mutex);
532}
533
534void alternatives_enable_smp(void)
535{
536	struct smp_alt_module *mod;
537
538	/* Why bother if there are no other CPUs? */
539	BUG_ON(num_possible_cpus() == 1);
540
541	mutex_lock(&text_mutex);
542
543	if (uniproc_patched) {
544		pr_info("switching to SMP code\n");
545		BUG_ON(num_online_cpus() != 1);
546		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
547		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
548		list_for_each_entry(mod, &smp_alt_modules, next)
549			alternatives_smp_lock(mod->locks, mod->locks_end,
550					      mod->text, mod->text_end);
551		uniproc_patched = false;
552	}
553	mutex_unlock(&text_mutex);
554}
555
556/*
557 * Return 1 if the address range is reserved for SMP-alternatives.
558 * Must hold text_mutex.
559 */
560int alternatives_text_reserved(void *start, void *end)
561{
562	struct smp_alt_module *mod;
563	const s32 *poff;
564	u8 *text_start = start;
565	u8 *text_end = end;
566
567	lockdep_assert_held(&text_mutex);
568
569	list_for_each_entry(mod, &smp_alt_modules, next) {
570		if (mod->text > text_end || mod->text_end < text_start)
571			continue;
572		for (poff = mod->locks; poff < mod->locks_end; poff++) {
573			const u8 *ptr = (const u8 *)poff + *poff;
574
575			if (text_start <= ptr && text_end > ptr)
576				return 1;
577		}
578	}
579
580	return 0;
581}
582#endif /* CONFIG_SMP */
583
584#ifdef CONFIG_PARAVIRT
585void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
586				     struct paravirt_patch_site *end)
587{
588	struct paravirt_patch_site *p;
589	char insnbuf[MAX_PATCH_LEN];
590
 
 
 
591	for (p = start; p < end; p++) {
592		unsigned int used;
593
594		BUG_ON(p->len > MAX_PATCH_LEN);
595		/* prep the buffer with the original instructions */
596		memcpy(insnbuf, p->instr, p->len);
597		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
598					 (unsigned long)p->instr, p->len);
599
600		BUG_ON(used > p->len);
601
602		/* Pad the rest with nops */
603		add_nops(insnbuf + used, p->len - used);
604		text_poke_early(p->instr, insnbuf, p->len);
605	}
606}
607extern struct paravirt_patch_site __start_parainstructions[],
608	__stop_parainstructions[];
609#endif	/* CONFIG_PARAVIRT */
610
611void __init alternative_instructions(void)
612{
613	/* The patching is not fully atomic, so try to avoid local interruptions
614	   that might execute the to be patched code.
615	   Other CPUs are not running. */
616	stop_nmi();
617
618	/*
619	 * Don't stop machine check exceptions while patching.
620	 * MCEs only happen when something got corrupted and in this
621	 * case we must do something about the corruption.
622	 * Ignoring it is worse than a unlikely patching race.
623	 * Also machine checks tend to be broadcast and if one CPU
624	 * goes into machine check the others follow quickly, so we don't
625	 * expect a machine check to cause undue problems during to code
626	 * patching.
627	 */
628
629	apply_alternatives(__alt_instructions, __alt_instructions_end);
630
631#ifdef CONFIG_SMP
632	/* Patch to UP if other cpus not imminent. */
633	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
634		uniproc_patched = true;
635		alternatives_smp_module_add(NULL, "core kernel",
636					    __smp_locks, __smp_locks_end,
637					    _text, _etext);
638	}
639
640	if (!uniproc_patched || num_possible_cpus() == 1)
641		free_init_pages("SMP alternatives",
642				(unsigned long)__smp_locks,
643				(unsigned long)__smp_locks_end);
644#endif
645
646	apply_paravirt(__parainstructions, __parainstructions_end);
647
648	restart_nmi();
649	alternatives_patched = 1;
650}
651
652/**
653 * text_poke_early - Update instructions on a live kernel at boot time
654 * @addr: address to modify
655 * @opcode: source of the copy
656 * @len: length to copy
657 *
658 * When you use this code to patch more than one byte of an instruction
659 * you need to make sure that other CPUs cannot execute this code in parallel.
660 * Also no thread must be currently preempted in the middle of these
661 * instructions. And on the local CPU you need to be protected again NMI or MCE
662 * handlers seeing an inconsistent instruction while you patch.
663 */
664void *__init_or_module text_poke_early(void *addr, const void *opcode,
665					      size_t len)
666{
667	unsigned long flags;
668	local_irq_save(flags);
669	memcpy(addr, opcode, len);
670	local_irq_restore(flags);
671	/* Could also do a CLFLUSH here to speed up CPU recovery; but
672	   that causes hangs on some VIA CPUs. */
673	return addr;
674}
675
676/**
677 * text_poke - Update instructions on a live kernel
678 * @addr: address to modify
679 * @opcode: source of the copy
680 * @len: length to copy
681 *
682 * Only atomic text poke/set should be allowed when not doing early patching.
683 * It means the size must be writable atomically and the address must be aligned
684 * in a way that permits an atomic write. It also makes sure we fit on a single
685 * page.
686 *
687 * Note: Must be called under text_mutex.
688 */
689void *text_poke(void *addr, const void *opcode, size_t len)
690{
691	unsigned long flags;
692	char *vaddr;
693	struct page *pages[2];
694	int i;
695
696	if (!core_kernel_text((unsigned long)addr)) {
697		pages[0] = vmalloc_to_page(addr);
698		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
699	} else {
700		pages[0] = virt_to_page(addr);
701		WARN_ON(!PageReserved(pages[0]));
702		pages[1] = virt_to_page(addr + PAGE_SIZE);
703	}
704	BUG_ON(!pages[0]);
705	local_irq_save(flags);
706	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
707	if (pages[1])
708		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
709	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
710	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
711	clear_fixmap(FIX_TEXT_POKE0);
712	if (pages[1])
713		clear_fixmap(FIX_TEXT_POKE1);
714	local_flush_tlb();
715	sync_core();
716	/* Could also do a CLFLUSH here to speed up CPU recovery; but
717	   that causes hangs on some VIA CPUs. */
718	for (i = 0; i < len; i++)
719		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
720	local_irq_restore(flags);
721	return addr;
722}
723
724static void do_sync_core(void *info)
725{
726	sync_core();
727}
728
729static bool bp_patching_in_progress;
730static void *bp_int3_handler, *bp_int3_addr;
731
732int poke_int3_handler(struct pt_regs *regs)
733{
734	/*
735	 * Having observed our INT3 instruction, we now must observe
736	 * bp_patching_in_progress.
737	 *
738	 * 	in_progress = TRUE		INT3
739	 * 	WMB				RMB
740	 * 	write INT3			if (in_progress)
741	 *
742	 * Idem for bp_int3_handler.
743	 */
744	smp_rmb();
745
746	if (likely(!bp_patching_in_progress))
747		return 0;
748
749	if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
750		return 0;
751
752	/* set up the specified breakpoint handler */
753	regs->ip = (unsigned long) bp_int3_handler;
754
755	return 1;
756
757}
758
759/**
760 * text_poke_bp() -- update instructions on live kernel on SMP
761 * @addr:	address to patch
762 * @opcode:	opcode of new instruction
763 * @len:	length to copy
764 * @handler:	address to jump to when the temporary breakpoint is hit
765 *
766 * Modify multi-byte instruction by using int3 breakpoint on SMP.
767 * We completely avoid stop_machine() here, and achieve the
768 * synchronization using int3 breakpoint.
769 *
770 * The way it is done:
771 *	- add a int3 trap to the address that will be patched
772 *	- sync cores
773 *	- update all but the first byte of the patched range
774 *	- sync cores
775 *	- replace the first byte (int3) by the first byte of
776 *	  replacing opcode
777 *	- sync cores
778 *
779 * Note: must be called under text_mutex.
780 */
781void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
782{
783	unsigned char int3 = 0xcc;
784
785	bp_int3_handler = handler;
786	bp_int3_addr = (u8 *)addr + sizeof(int3);
787	bp_patching_in_progress = true;
788	/*
789	 * Corresponding read barrier in int3 notifier for making sure the
790	 * in_progress and handler are correctly ordered wrt. patching.
 
791	 */
792	smp_wmb();
793
794	text_poke(addr, &int3, sizeof(int3));
795
796	on_each_cpu(do_sync_core, NULL, 1);
797
798	if (len - sizeof(int3) > 0) {
799		/* patch all but the first byte */
800		text_poke((char *)addr + sizeof(int3),
801			  (const char *) opcode + sizeof(int3),
802			  len - sizeof(int3));
803		/*
804		 * According to Intel, this core syncing is very likely
805		 * not necessary and we'd be safe even without it. But
806		 * better safe than sorry (plus there's not only Intel).
807		 */
808		on_each_cpu(do_sync_core, NULL, 1);
809	}
810
811	/* patch the first byte */
812	text_poke(addr, opcode, sizeof(int3));
813
814	on_each_cpu(do_sync_core, NULL, 1);
815	/*
816	 * sync_core() implies an smp_mb() and orders this store against
817	 * the writing of the new instruction.
818	 */
819	bp_patching_in_progress = false;
 
820
821	return addr;
822}
823