Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) "SMP alternatives: " fmt
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
 
   6#include <linux/mutex.h>
   7#include <linux/list.h>
   8#include <linux/stringify.h>
 
   9#include <linux/mm.h>
  10#include <linux/vmalloc.h>
  11#include <linux/memory.h>
  12#include <linux/stop_machine.h>
  13#include <linux/slab.h>
  14#include <linux/kdebug.h>
  15#include <linux/kprobes.h>
  16#include <linux/mmu_context.h>
  17#include <linux/bsearch.h>
 
  18#include <asm/text-patching.h>
  19#include <asm/alternative.h>
  20#include <asm/sections.h>
  21#include <asm/pgtable.h>
  22#include <asm/mce.h>
  23#include <asm/nmi.h>
  24#include <asm/cacheflush.h>
  25#include <asm/tlbflush.h>
 
  26#include <asm/io.h>
  27#include <asm/fixmap.h>
  28
  29int __read_mostly alternatives_patched;
  30
  31EXPORT_SYMBOL_GPL(alternatives_patched);
  32
  33#define MAX_PATCH_LEN (255-1)
  34
  35static int __initdata_or_module debug_alternative;
  36
  37static int __init debug_alt(char *str)
  38{
  39	debug_alternative = 1;
  40	return 1;
  41}
  42__setup("debug-alternative", debug_alt);
  43
  44static int noreplace_smp;
  45
  46static int __init setup_noreplace_smp(char *str)
  47{
  48	noreplace_smp = 1;
  49	return 1;
  50}
  51__setup("noreplace-smp", setup_noreplace_smp);
  52
  53#define DPRINTK(fmt, args...)						\
  54do {									\
  55	if (debug_alternative)						\
  56		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
  57} while (0)
  58
  59#define DUMP_BYTES(buf, len, fmt, args...)				\
  60do {									\
  61	if (unlikely(debug_alternative)) {				\
  62		int j;							\
  63									\
  64		if (!(len))						\
  65			break;						\
  66									\
  67		printk(KERN_DEBUG fmt, ##args);				\
  68		for (j = 0; j < (len) - 1; j++)				\
  69			printk(KERN_CONT "%02hhx ", buf[j]);		\
  70		printk(KERN_CONT "%02hhx\n", buf[j]);			\
  71	}								\
  72} while (0)
  73
  74/*
  75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
  76 * that correspond to that nop. Getting from one nop to the next, we
  77 * add to the array the offset that is equal to the sum of all sizes of
  78 * nops preceding the one we are after.
  79 *
  80 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
  81 * nice symmetry of sizes of the previous nops.
  82 */
  83#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
  84static const unsigned char intelnops[] =
  85{
  86	GENERIC_NOP1,
  87	GENERIC_NOP2,
  88	GENERIC_NOP3,
  89	GENERIC_NOP4,
  90	GENERIC_NOP5,
  91	GENERIC_NOP6,
  92	GENERIC_NOP7,
  93	GENERIC_NOP8,
  94	GENERIC_NOP5_ATOMIC
  95};
  96static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
  97{
  98	NULL,
  99	intelnops,
 100	intelnops + 1,
 101	intelnops + 1 + 2,
 102	intelnops + 1 + 2 + 3,
 103	intelnops + 1 + 2 + 3 + 4,
 104	intelnops + 1 + 2 + 3 + 4 + 5,
 105	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
 106	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 107	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 108};
 109#endif
 110
 111#ifdef K8_NOP1
 112static const unsigned char k8nops[] =
 113{
 114	K8_NOP1,
 115	K8_NOP2,
 116	K8_NOP3,
 117	K8_NOP4,
 118	K8_NOP5,
 119	K8_NOP6,
 120	K8_NOP7,
 121	K8_NOP8,
 122	K8_NOP5_ATOMIC
 123};
 124static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
 125{
 126	NULL,
 127	k8nops,
 128	k8nops + 1,
 129	k8nops + 1 + 2,
 130	k8nops + 1 + 2 + 3,
 131	k8nops + 1 + 2 + 3 + 4,
 132	k8nops + 1 + 2 + 3 + 4 + 5,
 133	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
 134	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 135	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 136};
 137#endif
 138
 139#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
 140static const unsigned char k7nops[] =
 141{
 142	K7_NOP1,
 143	K7_NOP2,
 144	K7_NOP3,
 145	K7_NOP4,
 146	K7_NOP5,
 147	K7_NOP6,
 148	K7_NOP7,
 149	K7_NOP8,
 150	K7_NOP5_ATOMIC
 151};
 152static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
 153{
 154	NULL,
 155	k7nops,
 156	k7nops + 1,
 157	k7nops + 1 + 2,
 158	k7nops + 1 + 2 + 3,
 159	k7nops + 1 + 2 + 3 + 4,
 160	k7nops + 1 + 2 + 3 + 4 + 5,
 161	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
 162	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 163	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 164};
 165#endif
 166
 167#ifdef P6_NOP1
 168static const unsigned char p6nops[] =
 169{
 170	P6_NOP1,
 171	P6_NOP2,
 172	P6_NOP3,
 173	P6_NOP4,
 174	P6_NOP5,
 175	P6_NOP6,
 176	P6_NOP7,
 177	P6_NOP8,
 178	P6_NOP5_ATOMIC
 179};
 180static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
 181{
 182	NULL,
 183	p6nops,
 184	p6nops + 1,
 185	p6nops + 1 + 2,
 186	p6nops + 1 + 2 + 3,
 187	p6nops + 1 + 2 + 3 + 4,
 188	p6nops + 1 + 2 + 3 + 4 + 5,
 189	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
 190	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 191	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 192};
 193#endif
 194
 195/* Initialize these to a safe default */
 196#ifdef CONFIG_X86_64
 197const unsigned char * const *ideal_nops = p6_nops;
 198#else
 199const unsigned char * const *ideal_nops = intel_nops;
 200#endif
 201
 202void __init arch_init_ideal_nops(void)
 203{
 204	switch (boot_cpu_data.x86_vendor) {
 205	case X86_VENDOR_INTEL:
 206		/*
 207		 * Due to a decoder implementation quirk, some
 208		 * specific Intel CPUs actually perform better with
 209		 * the "k8_nops" than with the SDM-recommended NOPs.
 210		 */
 211		if (boot_cpu_data.x86 == 6 &&
 212		    boot_cpu_data.x86_model >= 0x0f &&
 213		    boot_cpu_data.x86_model != 0x1c &&
 214		    boot_cpu_data.x86_model != 0x26 &&
 215		    boot_cpu_data.x86_model != 0x27 &&
 216		    boot_cpu_data.x86_model < 0x30) {
 217			ideal_nops = k8_nops;
 218		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
 219			   ideal_nops = p6_nops;
 220		} else {
 221#ifdef CONFIG_X86_64
 222			ideal_nops = k8_nops;
 223#else
 224			ideal_nops = intel_nops;
 225#endif
 226		}
 227		break;
 228
 229	case X86_VENDOR_HYGON:
 230		ideal_nops = p6_nops;
 231		return;
 232
 233	case X86_VENDOR_AMD:
 234		if (boot_cpu_data.x86 > 0xf) {
 235			ideal_nops = p6_nops;
 236			return;
 237		}
 238
 239		/* fall through */
 240
 241	default:
 242#ifdef CONFIG_X86_64
 243		ideal_nops = k8_nops;
 244#else
 245		if (boot_cpu_has(X86_FEATURE_K8))
 246			ideal_nops = k8_nops;
 247		else if (boot_cpu_has(X86_FEATURE_K7))
 248			ideal_nops = k7_nops;
 249		else
 250			ideal_nops = intel_nops;
 251#endif
 252	}
 253}
 254
 255/* Use this to add nops to a buffer, then text_poke the whole buffer. */
 256static void __init_or_module add_nops(void *insns, unsigned int len)
 257{
 258	while (len > 0) {
 259		unsigned int noplen = len;
 260		if (noplen > ASM_NOP_MAX)
 261			noplen = ASM_NOP_MAX;
 262		memcpy(insns, ideal_nops[noplen], noplen);
 263		insns += noplen;
 264		len -= noplen;
 265	}
 266}
 267
 268extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 269extern s32 __smp_locks[], __smp_locks_end[];
 270void text_poke_early(void *addr, const void *opcode, size_t len);
 271
 272/*
 273 * Are we looking at a near JMP with a 1 or 4-byte displacement.
 274 */
 275static inline bool is_jmp(const u8 opcode)
 276{
 277	return opcode == 0xeb || opcode == 0xe9;
 278}
 279
 280static void __init_or_module
 281recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
 282{
 283	u8 *next_rip, *tgt_rip;
 284	s32 n_dspl, o_dspl;
 285	int repl_len;
 286
 287	if (a->replacementlen != 5)
 288		return;
 289
 290	o_dspl = *(s32 *)(insn_buff + 1);
 291
 292	/* next_rip of the replacement JMP */
 293	next_rip = repl_insn + a->replacementlen;
 294	/* target rip of the replacement JMP */
 295	tgt_rip  = next_rip + o_dspl;
 296	n_dspl = tgt_rip - orig_insn;
 297
 298	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
 299
 300	if (tgt_rip - orig_insn >= 0) {
 301		if (n_dspl - 2 <= 127)
 302			goto two_byte_jmp;
 303		else
 304			goto five_byte_jmp;
 305	/* negative offset */
 306	} else {
 307		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
 308			goto two_byte_jmp;
 309		else
 310			goto five_byte_jmp;
 311	}
 312
 313two_byte_jmp:
 314	n_dspl -= 2;
 315
 316	insn_buff[0] = 0xeb;
 317	insn_buff[1] = (s8)n_dspl;
 318	add_nops(insn_buff + 2, 3);
 319
 320	repl_len = 2;
 321	goto done;
 322
 323five_byte_jmp:
 324	n_dspl -= 5;
 325
 326	insn_buff[0] = 0xe9;
 327	*(s32 *)&insn_buff[1] = n_dspl;
 328
 329	repl_len = 5;
 330
 331done:
 332
 333	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
 334		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
 335}
 336
 337/*
 338 * "noinline" to cause control flow change and thus invalidate I$ and
 339 * cause refetch after modification.
 340 */
 341static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 342{
 343	unsigned long flags;
 344	int i;
 345
 346	for (i = 0; i < a->padlen; i++) {
 347		if (instr[i] != 0x90)
 348			return;
 349	}
 350
 351	local_irq_save(flags);
 352	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
 353	local_irq_restore(flags);
 354
 355	DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
 356		   instr, a->instrlen - a->padlen, a->padlen);
 357}
 358
 359/*
 360 * Replace instructions with better alternatives for this CPU type. This runs
 361 * before SMP is initialized to avoid SMP problems with self modifying code.
 362 * This implies that asymmetric systems where APs have less capabilities than
 363 * the boot processor are not handled. Tough. Make sure you disable such
 364 * features by hand.
 365 *
 366 * Marked "noinline" to cause control flow change and thus insn cache
 367 * to refetch changed I$ lines.
 368 */
 369void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 370						  struct alt_instr *end)
 371{
 372	struct alt_instr *a;
 373	u8 *instr, *replacement;
 374	u8 insn_buff[MAX_PATCH_LEN];
 375
 376	DPRINTK("alt table %px, -> %px", start, end);
 377	/*
 378	 * The scan order should be from start to end. A later scanned
 379	 * alternative code can overwrite previously scanned alternative code.
 380	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
 381	 * patch code.
 382	 *
 383	 * So be careful if you want to change the scan order to any other
 384	 * order.
 385	 */
 386	for (a = start; a < end; a++) {
 387		int insn_buff_sz = 0;
 388
 389		instr = (u8 *)&a->instr_offset + a->instr_offset;
 390		replacement = (u8 *)&a->repl_offset + a->repl_offset;
 391		BUG_ON(a->instrlen > sizeof(insn_buff));
 392		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
 393		if (!boot_cpu_has(a->cpuid)) {
 394			if (a->padlen > 1)
 395				optimize_nops(a, instr);
 396
 397			continue;
 398		}
 399
 400		DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
 401			a->cpuid >> 5,
 402			a->cpuid & 0x1f,
 403			instr, instr, a->instrlen,
 404			replacement, a->replacementlen, a->padlen);
 405
 406		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
 407		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
 408
 409		memcpy(insn_buff, replacement, a->replacementlen);
 410		insn_buff_sz = a->replacementlen;
 411
 412		/*
 413		 * 0xe8 is a relative jump; fix the offset.
 414		 *
 415		 * Instruction length is checked before the opcode to avoid
 416		 * accessing uninitialized bytes for zero-length replacements.
 417		 */
 418		if (a->replacementlen == 5 && *insn_buff == 0xe8) {
 419			*(s32 *)(insn_buff + 1) += replacement - instr;
 420			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
 421				*(s32 *)(insn_buff + 1),
 422				(unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
 423		}
 424
 425		if (a->replacementlen && is_jmp(replacement[0]))
 426			recompute_jump(a, instr, replacement, insn_buff);
 427
 428		if (a->instrlen > a->replacementlen) {
 429			add_nops(insn_buff + a->replacementlen,
 430				 a->instrlen - a->replacementlen);
 431			insn_buff_sz += a->instrlen - a->replacementlen;
 432		}
 433		DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
 434
 435		text_poke_early(instr, insn_buff, insn_buff_sz);
 436	}
 437}
 438
 439#ifdef CONFIG_SMP
 440static void alternatives_smp_lock(const s32 *start, const s32 *end,
 441				  u8 *text, u8 *text_end)
 442{
 443	const s32 *poff;
 444
 445	for (poff = start; poff < end; poff++) {
 446		u8 *ptr = (u8 *)poff + *poff;
 447
 448		if (!*poff || ptr < text || ptr >= text_end)
 449			continue;
 450		/* turn DS segment override prefix into lock prefix */
 451		if (*ptr == 0x3e)
 452			text_poke(ptr, ((unsigned char []){0xf0}), 1);
 453	}
 454}
 455
 456static void alternatives_smp_unlock(const s32 *start, const s32 *end,
 457				    u8 *text, u8 *text_end)
 458{
 459	const s32 *poff;
 460
 461	for (poff = start; poff < end; poff++) {
 462		u8 *ptr = (u8 *)poff + *poff;
 463
 464		if (!*poff || ptr < text || ptr >= text_end)
 465			continue;
 466		/* turn lock prefix into DS segment override prefix */
 467		if (*ptr == 0xf0)
 468			text_poke(ptr, ((unsigned char []){0x3E}), 1);
 469	}
 470}
 471
 472struct smp_alt_module {
 473	/* what is this ??? */
 474	struct module	*mod;
 475	char		*name;
 476
 477	/* ptrs to lock prefixes */
 478	const s32	*locks;
 479	const s32	*locks_end;
 480
 481	/* .text segment, needed to avoid patching init code ;) */
 482	u8		*text;
 483	u8		*text_end;
 484
 485	struct list_head next;
 486};
 487static LIST_HEAD(smp_alt_modules);
 488static bool uniproc_patched = false;	/* protected by text_mutex */
 489
 490void __init_or_module alternatives_smp_module_add(struct module *mod,
 491						  char *name,
 492						  void *locks, void *locks_end,
 493						  void *text,  void *text_end)
 494{
 495	struct smp_alt_module *smp;
 496
 497	mutex_lock(&text_mutex);
 498	if (!uniproc_patched)
 499		goto unlock;
 500
 501	if (num_possible_cpus() == 1)
 502		/* Don't bother remembering, we'll never have to undo it. */
 503		goto smp_unlock;
 504
 505	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
 506	if (NULL == smp)
 507		/* we'll run the (safe but slow) SMP code then ... */
 508		goto unlock;
 509
 510	smp->mod	= mod;
 511	smp->name	= name;
 512	smp->locks	= locks;
 513	smp->locks_end	= locks_end;
 514	smp->text	= text;
 515	smp->text_end	= text_end;
 516	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
 517		smp->locks, smp->locks_end,
 518		smp->text, smp->text_end, smp->name);
 519
 520	list_add_tail(&smp->next, &smp_alt_modules);
 521smp_unlock:
 522	alternatives_smp_unlock(locks, locks_end, text, text_end);
 523unlock:
 524	mutex_unlock(&text_mutex);
 525}
 526
 527void __init_or_module alternatives_smp_module_del(struct module *mod)
 528{
 529	struct smp_alt_module *item;
 530
 531	mutex_lock(&text_mutex);
 532	list_for_each_entry(item, &smp_alt_modules, next) {
 533		if (mod != item->mod)
 534			continue;
 535		list_del(&item->next);
 536		kfree(item);
 537		break;
 538	}
 539	mutex_unlock(&text_mutex);
 540}
 541
 542void alternatives_enable_smp(void)
 543{
 544	struct smp_alt_module *mod;
 545
 546	/* Why bother if there are no other CPUs? */
 547	BUG_ON(num_possible_cpus() == 1);
 548
 549	mutex_lock(&text_mutex);
 550
 551	if (uniproc_patched) {
 552		pr_info("switching to SMP code\n");
 553		BUG_ON(num_online_cpus() != 1);
 554		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
 555		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
 556		list_for_each_entry(mod, &smp_alt_modules, next)
 557			alternatives_smp_lock(mod->locks, mod->locks_end,
 558					      mod->text, mod->text_end);
 559		uniproc_patched = false;
 560	}
 561	mutex_unlock(&text_mutex);
 562}
 563
 564/*
 565 * Return 1 if the address range is reserved for SMP-alternatives.
 566 * Must hold text_mutex.
 567 */
 568int alternatives_text_reserved(void *start, void *end)
 569{
 570	struct smp_alt_module *mod;
 571	const s32 *poff;
 572	u8 *text_start = start;
 573	u8 *text_end = end;
 574
 575	lockdep_assert_held(&text_mutex);
 576
 577	list_for_each_entry(mod, &smp_alt_modules, next) {
 578		if (mod->text > text_end || mod->text_end < text_start)
 579			continue;
 580		for (poff = mod->locks; poff < mod->locks_end; poff++) {
 581			const u8 *ptr = (const u8 *)poff + *poff;
 582
 583			if (text_start <= ptr && text_end > ptr)
 584				return 1;
 585		}
 586	}
 587
 588	return 0;
 589}
 590#endif /* CONFIG_SMP */
 591
 592#ifdef CONFIG_PARAVIRT
 593void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
 594				     struct paravirt_patch_site *end)
 595{
 596	struct paravirt_patch_site *p;
 597	char insn_buff[MAX_PATCH_LEN];
 598
 599	for (p = start; p < end; p++) {
 600		unsigned int used;
 601
 602		BUG_ON(p->len > MAX_PATCH_LEN);
 603		/* prep the buffer with the original instructions */
 604		memcpy(insn_buff, p->instr, p->len);
 605		used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
 606
 607		BUG_ON(used > p->len);
 608
 609		/* Pad the rest with nops */
 610		add_nops(insn_buff + used, p->len - used);
 611		text_poke_early(p->instr, insn_buff, p->len);
 612	}
 613}
 614extern struct paravirt_patch_site __start_parainstructions[],
 615	__stop_parainstructions[];
 616#endif	/* CONFIG_PARAVIRT */
 617
 618/*
 619 * Self-test for the INT3 based CALL emulation code.
 620 *
 621 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
 622 * properly and that there is a stack gap between the INT3 frame and the
 623 * previous context. Without this gap doing a virtual PUSH on the interrupted
 624 * stack would corrupt the INT3 IRET frame.
 625 *
 626 * See entry_{32,64}.S for more details.
 627 */
 628
 629/*
 630 * We define the int3_magic() function in assembly to control the calling
 631 * convention such that we can 'call' it from assembly.
 632 */
 633
 634extern void int3_magic(unsigned int *ptr); /* defined in asm */
 635
 636asm (
 637"	.pushsection	.init.text, \"ax\", @progbits\n"
 638"	.type		int3_magic, @function\n"
 639"int3_magic:\n"
 640"	movl	$1, (%" _ASM_ARG1 ")\n"
 641"	ret\n"
 642"	.size		int3_magic, .-int3_magic\n"
 643"	.popsection\n"
 644);
 645
 646extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
 647
 648static int __init
 649int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
 650{
 651	struct die_args *args = data;
 652	struct pt_regs *regs = args->regs;
 653
 654	if (!regs || user_mode(regs))
 655		return NOTIFY_DONE;
 656
 657	if (val != DIE_INT3)
 658		return NOTIFY_DONE;
 659
 660	if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
 661		return NOTIFY_DONE;
 662
 663	int3_emulate_call(regs, (unsigned long)&int3_magic);
 664	return NOTIFY_STOP;
 665}
 666
 667static void __init int3_selftest(void)
 668{
 669	static __initdata struct notifier_block int3_exception_nb = {
 670		.notifier_call	= int3_exception_notify,
 671		.priority	= INT_MAX-1, /* last */
 672	};
 673	unsigned int val = 0;
 674
 675	BUG_ON(register_die_notifier(&int3_exception_nb));
 676
 677	/*
 678	 * Basically: int3_magic(&val); but really complicated :-)
 679	 *
 680	 * Stick the address of the INT3 instruction into int3_selftest_ip,
 681	 * then trigger the INT3, padded with NOPs to match a CALL instruction
 682	 * length.
 683	 */
 684	asm volatile ("1: int3; nop; nop; nop; nop\n\t"
 685		      ".pushsection .init.data,\"aw\"\n\t"
 686		      ".align " __ASM_SEL(4, 8) "\n\t"
 687		      ".type int3_selftest_ip, @object\n\t"
 688		      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
 689		      "int3_selftest_ip:\n\t"
 690		      __ASM_SEL(.long, .quad) " 1b\n\t"
 691		      ".popsection\n\t"
 692		      : ASM_CALL_CONSTRAINT
 693		      : __ASM_SEL_RAW(a, D) (&val)
 694		      : "memory");
 695
 696	BUG_ON(val != 1);
 697
 698	unregister_die_notifier(&int3_exception_nb);
 699}
 700
 701void __init alternative_instructions(void)
 702{
 703	int3_selftest();
 704
 705	/*
 706	 * The patching is not fully atomic, so try to avoid local
 707	 * interruptions that might execute the to be patched code.
 708	 * Other CPUs are not running.
 709	 */
 710	stop_nmi();
 711
 712	/*
 713	 * Don't stop machine check exceptions while patching.
 714	 * MCEs only happen when something got corrupted and in this
 715	 * case we must do something about the corruption.
 716	 * Ignoring it is worse than an unlikely patching race.
 717	 * Also machine checks tend to be broadcast and if one CPU
 718	 * goes into machine check the others follow quickly, so we don't
 719	 * expect a machine check to cause undue problems during to code
 720	 * patching.
 721	 */
 722
 723	apply_alternatives(__alt_instructions, __alt_instructions_end);
 724
 725#ifdef CONFIG_SMP
 726	/* Patch to UP if other cpus not imminent. */
 727	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
 728		uniproc_patched = true;
 729		alternatives_smp_module_add(NULL, "core kernel",
 730					    __smp_locks, __smp_locks_end,
 731					    _text, _etext);
 732	}
 733
 734	if (!uniproc_patched || num_possible_cpus() == 1) {
 735		free_init_pages("SMP alternatives",
 736				(unsigned long)__smp_locks,
 737				(unsigned long)__smp_locks_end);
 738	}
 739#endif
 740
 741	apply_paravirt(__parainstructions, __parainstructions_end);
 742
 743	restart_nmi();
 744	alternatives_patched = 1;
 745}
 746
 747/**
 748 * text_poke_early - Update instructions on a live kernel at boot time
 749 * @addr: address to modify
 750 * @opcode: source of the copy
 751 * @len: length to copy
 752 *
 753 * When you use this code to patch more than one byte of an instruction
 754 * you need to make sure that other CPUs cannot execute this code in parallel.
 755 * Also no thread must be currently preempted in the middle of these
 756 * instructions. And on the local CPU you need to be protected against NMI or
 757 * MCE handlers seeing an inconsistent instruction while you patch.
 758 */
 759void __init_or_module text_poke_early(void *addr, const void *opcode,
 760				      size_t len)
 761{
 762	unsigned long flags;
 763
 764	if (boot_cpu_has(X86_FEATURE_NX) &&
 765	    is_module_text_address((unsigned long)addr)) {
 766		/*
 767		 * Modules text is marked initially as non-executable, so the
 768		 * code cannot be running and speculative code-fetches are
 769		 * prevented. Just change the code.
 770		 */
 771		memcpy(addr, opcode, len);
 772	} else {
 773		local_irq_save(flags);
 774		memcpy(addr, opcode, len);
 775		local_irq_restore(flags);
 776		sync_core();
 777
 778		/*
 779		 * Could also do a CLFLUSH here to speed up CPU recovery; but
 780		 * that causes hangs on some VIA CPUs.
 781		 */
 782	}
 783}
 784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785__ro_after_init struct mm_struct *poking_mm;
 786__ro_after_init unsigned long poking_addr;
 787
 788static void *__text_poke(void *addr, const void *opcode, size_t len)
 789{
 790	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
 791	struct page *pages[2] = {NULL};
 792	temp_mm_state_t prev;
 793	unsigned long flags;
 794	pte_t pte, *ptep;
 795	spinlock_t *ptl;
 796	pgprot_t pgprot;
 797
 798	/*
 799	 * While boot memory allocator is running we cannot use struct pages as
 800	 * they are not yet initialized. There is no way to recover.
 801	 */
 802	BUG_ON(!after_bootmem);
 803
 804	if (!core_kernel_text((unsigned long)addr)) {
 805		pages[0] = vmalloc_to_page(addr);
 806		if (cross_page_boundary)
 807			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
 808	} else {
 809		pages[0] = virt_to_page(addr);
 810		WARN_ON(!PageReserved(pages[0]));
 811		if (cross_page_boundary)
 812			pages[1] = virt_to_page(addr + PAGE_SIZE);
 813	}
 814	/*
 815	 * If something went wrong, crash and burn since recovery paths are not
 816	 * implemented.
 817	 */
 818	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
 819
 820	local_irq_save(flags);
 821
 822	/*
 823	 * Map the page without the global bit, as TLB flushing is done with
 824	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
 825	 */
 826	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
 827
 828	/*
 829	 * The lock is not really needed, but this allows to avoid open-coding.
 830	 */
 831	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
 832
 833	/*
 834	 * This must not fail; preallocated in poking_init().
 835	 */
 836	VM_BUG_ON(!ptep);
 837
 
 
 838	pte = mk_pte(pages[0], pgprot);
 839	set_pte_at(poking_mm, poking_addr, ptep, pte);
 840
 841	if (cross_page_boundary) {
 842		pte = mk_pte(pages[1], pgprot);
 843		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
 844	}
 845
 846	/*
 847	 * Loading the temporary mm behaves as a compiler barrier, which
 848	 * guarantees that the PTE will be set at the time memcpy() is done.
 849	 */
 850	prev = use_temporary_mm(poking_mm);
 851
 852	kasan_disable_current();
 853	memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
 854	kasan_enable_current();
 855
 856	/*
 857	 * Ensure that the PTE is only cleared after the instructions of memcpy
 858	 * were issued by using a compiler barrier.
 859	 */
 860	barrier();
 861
 862	pte_clear(poking_mm, poking_addr, ptep);
 863	if (cross_page_boundary)
 864		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
 865
 866	/*
 867	 * Loading the previous page-table hierarchy requires a serializing
 868	 * instruction that already allows the core to see the updated version.
 869	 * Xen-PV is assumed to serialize execution in a similar manner.
 870	 */
 871	unuse_temporary_mm(prev);
 872
 873	/*
 874	 * Flushing the TLB might involve IPIs, which would require enabled
 875	 * IRQs, but not if the mm is not used, as it is in this point.
 876	 */
 877	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
 878			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
 879			   PAGE_SHIFT, false);
 880
 881	/*
 882	 * If the text does not match what we just wrote then something is
 883	 * fundamentally screwy; there's nothing we can really do about that.
 884	 */
 885	BUG_ON(memcmp(addr, opcode, len));
 886
 887	pte_unmap_unlock(ptep, ptl);
 888	local_irq_restore(flags);
 
 889	return addr;
 890}
 891
 892/**
 893 * text_poke - Update instructions on a live kernel
 894 * @addr: address to modify
 895 * @opcode: source of the copy
 896 * @len: length to copy
 897 *
 898 * Only atomic text poke/set should be allowed when not doing early patching.
 899 * It means the size must be writable atomically and the address must be aligned
 900 * in a way that permits an atomic write. It also makes sure we fit on a single
 901 * page.
 902 *
 903 * Note that the caller must ensure that if the modified code is part of a
 904 * module, the module would not be removed during poking. This can be achieved
 905 * by registering a module notifier, and ordering module removal and patching
 906 * trough a mutex.
 907 */
 908void *text_poke(void *addr, const void *opcode, size_t len)
 909{
 910	lockdep_assert_held(&text_mutex);
 911
 912	return __text_poke(addr, opcode, len);
 913}
 914
 915/**
 916 * text_poke_kgdb - Update instructions on a live kernel by kgdb
 917 * @addr: address to modify
 918 * @opcode: source of the copy
 919 * @len: length to copy
 920 *
 921 * Only atomic text poke/set should be allowed when not doing early patching.
 922 * It means the size must be writable atomically and the address must be aligned
 923 * in a way that permits an atomic write. It also makes sure we fit on a single
 924 * page.
 925 *
 926 * Context: should only be used by kgdb, which ensures no other core is running,
 927 *	    despite the fact it does not hold the text_mutex.
 928 */
 929void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
 930{
 931	return __text_poke(addr, opcode, len);
 932}
 933
 934static void do_sync_core(void *info)
 935{
 936	sync_core();
 937}
 938
 939static struct bp_patching_desc {
 
 
 
 
 
 
 
 
 
 
 
 
 
 940	struct text_poke_loc *vec;
 941	int nr_entries;
 942} bp_patching;
 
 
 
 
 
 
 
 
 943
 944static int patch_cmp(const void *key, const void *elt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945{
 946	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
 947
 948	if (key < tp->addr)
 949		return -1;
 950	if (key > tp->addr)
 951		return 1;
 952	return 0;
 953}
 954NOKPROBE_SYMBOL(patch_cmp);
 955
 956int poke_int3_handler(struct pt_regs *regs)
 957{
 
 958	struct text_poke_loc *tp;
 959	unsigned char int3 = 0xcc;
 960	void *ip;
 961
 
 
 
 962	/*
 963	 * Having observed our INT3 instruction, we now must observe
 964	 * bp_patching.nr_entries.
 965	 *
 966	 * 	nr_entries != 0			INT3
 967	 * 	WMB				RMB
 968	 * 	write INT3			if (nr_entries)
 969	 *
 970	 * Idem for other elements in bp_patching.
 
 
 971	 */
 972	smp_rmb();
 973
 974	if (likely(!bp_patching.nr_entries))
 975		return 0;
 976
 977	if (user_mode(regs))
 978		return 0;
 979
 980	/*
 981	 * Discount the sizeof(int3). See text_poke_bp_batch().
 982	 */
 983	ip = (void *) regs->ip - sizeof(int3);
 984
 985	/*
 986	 * Skip the binary search if there is a single member in the vector.
 987	 */
 988	if (unlikely(bp_patching.nr_entries > 1)) {
 989		tp = bsearch(ip, bp_patching.vec, bp_patching.nr_entries,
 990			     sizeof(struct text_poke_loc),
 991			     patch_cmp);
 992		if (!tp)
 993			return 0;
 994	} else {
 995		tp = bp_patching.vec;
 996		if (tp->addr != ip)
 997			return 0;
 998	}
 999
1000	/* set up the specified breakpoint detour */
1001	regs->ip = (unsigned long) tp->detour;
1002
1003	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004}
1005NOKPROBE_SYMBOL(poke_int3_handler);
 
 
 
1006
1007/**
1008 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1009 * @tp:			vector of instructions to patch
1010 * @nr_entries:		number of entries in the vector
1011 *
1012 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1013 * We completely avoid stop_machine() here, and achieve the
1014 * synchronization using int3 breakpoint.
1015 *
1016 * The way it is done:
1017 * 	- For each entry in the vector:
1018 *		- add a int3 trap to the address that will be patched
1019 *	- sync cores
1020 *	- For each entry in the vector:
1021 *		- update all but the first byte of the patched range
1022 *	- sync cores
1023 *	- For each entry in the vector:
1024 *		- replace the first byte (int3) by the first byte of
1025 *		  replacing opcode
1026 *	- sync cores
1027 */
1028void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1029{
1030	int patched_all_but_first = 0;
1031	unsigned char int3 = 0xcc;
 
 
 
 
1032	unsigned int i;
 
1033
1034	lockdep_assert_held(&text_mutex);
1035
1036	bp_patching.vec = tp;
1037	bp_patching.nr_entries = nr_entries;
1038
1039	/*
1040	 * Corresponding read barrier in int3 notifier for making sure the
1041	 * nr_entries and handler are correctly ordered wrt. patching.
1042	 */
1043	smp_wmb();
1044
1045	/*
1046	 * First step: add a int3 trap to the address that will be patched.
1047	 */
1048	for (i = 0; i < nr_entries; i++)
1049		text_poke(tp[i].addr, &int3, sizeof(int3));
 
 
1050
1051	on_each_cpu(do_sync_core, NULL, 1);
1052
1053	/*
1054	 * Second step: update all but the first byte of the patched range.
1055	 */
1056	for (i = 0; i < nr_entries; i++) {
1057		if (tp[i].len - sizeof(int3) > 0) {
1058			text_poke((char *)tp[i].addr + sizeof(int3),
1059				  (const char *)tp[i].opcode + sizeof(int3),
1060				  tp[i].len - sizeof(int3));
1061			patched_all_but_first++;
 
 
 
 
 
 
1062		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1063	}
1064
1065	if (patched_all_but_first) {
1066		/*
1067		 * According to Intel, this core syncing is very likely
1068		 * not necessary and we'd be safe even without it. But
1069		 * better safe than sorry (plus there's not only Intel).
1070		 */
1071		on_each_cpu(do_sync_core, NULL, 1);
1072	}
1073
1074	/*
1075	 * Third step: replace the first byte (int3) by the first byte of
1076	 * replacing opcode.
1077	 */
1078	for (i = 0; i < nr_entries; i++)
1079		text_poke(tp[i].addr, tp[i].opcode, sizeof(int3));
 
 
 
 
 
 
 
 
1080
1081	on_each_cpu(do_sync_core, NULL, 1);
1082	/*
1083	 * sync_core() implies an smp_mb() and orders this store against
1084	 * the writing of the new instruction.
1085	 */
1086	bp_patching.vec = NULL;
1087	bp_patching.nr_entries = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088}
1089
1090/**
1091 * text_poke_bp() -- update instructions on live kernel on SMP
1092 * @addr:	address to patch
1093 * @opcode:	opcode of new instruction
1094 * @len:	length to copy
1095 * @handler:	address to jump to when the temporary breakpoint is hit
1096 *
1097 * Update a single instruction with the vector in the stack, avoiding
1098 * dynamically allocated memory. This function should be used when it is
1099 * not possible to allocate memory.
1100 */
1101void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
1102{
1103	struct text_poke_loc tp = {
1104		.detour = handler,
1105		.addr = addr,
1106		.len = len,
1107	};
1108
1109	if (len > POKE_MAX_OPCODE_SIZE) {
1110		WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE);
1111		return;
1112	}
1113
1114	memcpy((void *)tp.opcode, opcode, len);
1115
1116	text_poke_bp_batch(&tp, 1);
1117}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) "SMP alternatives: " fmt
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/perf_event.h>
   7#include <linux/mutex.h>
   8#include <linux/list.h>
   9#include <linux/stringify.h>
  10#include <linux/highmem.h>
  11#include <linux/mm.h>
  12#include <linux/vmalloc.h>
  13#include <linux/memory.h>
  14#include <linux/stop_machine.h>
  15#include <linux/slab.h>
  16#include <linux/kdebug.h>
  17#include <linux/kprobes.h>
  18#include <linux/mmu_context.h>
  19#include <linux/bsearch.h>
  20#include <linux/sync_core.h>
  21#include <asm/text-patching.h>
  22#include <asm/alternative.h>
  23#include <asm/sections.h>
 
  24#include <asm/mce.h>
  25#include <asm/nmi.h>
  26#include <asm/cacheflush.h>
  27#include <asm/tlbflush.h>
  28#include <asm/insn.h>
  29#include <asm/io.h>
  30#include <asm/fixmap.h>
  31
  32int __read_mostly alternatives_patched;
  33
  34EXPORT_SYMBOL_GPL(alternatives_patched);
  35
  36#define MAX_PATCH_LEN (255-1)
  37
  38static int __initdata_or_module debug_alternative;
  39
  40static int __init debug_alt(char *str)
  41{
  42	debug_alternative = 1;
  43	return 1;
  44}
  45__setup("debug-alternative", debug_alt);
  46
  47static int noreplace_smp;
  48
  49static int __init setup_noreplace_smp(char *str)
  50{
  51	noreplace_smp = 1;
  52	return 1;
  53}
  54__setup("noreplace-smp", setup_noreplace_smp);
  55
  56#define DPRINTK(fmt, args...)						\
  57do {									\
  58	if (debug_alternative)						\
  59		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
  60} while (0)
  61
  62#define DUMP_BYTES(buf, len, fmt, args...)				\
  63do {									\
  64	if (unlikely(debug_alternative)) {				\
  65		int j;							\
  66									\
  67		if (!(len))						\
  68			break;						\
  69									\
  70		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
  71		for (j = 0; j < (len) - 1; j++)				\
  72			printk(KERN_CONT "%02hhx ", buf[j]);		\
  73		printk(KERN_CONT "%02hhx\n", buf[j]);			\
  74	}								\
  75} while (0)
  76
  77/*
  78 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
  79 * that correspond to that nop. Getting from one nop to the next, we
  80 * add to the array the offset that is equal to the sum of all sizes of
  81 * nops preceding the one we are after.
  82 *
  83 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
  84 * nice symmetry of sizes of the previous nops.
  85 */
  86#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
  87static const unsigned char intelnops[] =
  88{
  89	GENERIC_NOP1,
  90	GENERIC_NOP2,
  91	GENERIC_NOP3,
  92	GENERIC_NOP4,
  93	GENERIC_NOP5,
  94	GENERIC_NOP6,
  95	GENERIC_NOP7,
  96	GENERIC_NOP8,
  97	GENERIC_NOP5_ATOMIC
  98};
  99static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
 100{
 101	NULL,
 102	intelnops,
 103	intelnops + 1,
 104	intelnops + 1 + 2,
 105	intelnops + 1 + 2 + 3,
 106	intelnops + 1 + 2 + 3 + 4,
 107	intelnops + 1 + 2 + 3 + 4 + 5,
 108	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
 109	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 110	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 111};
 112#endif
 113
 114#ifdef K8_NOP1
 115static const unsigned char k8nops[] =
 116{
 117	K8_NOP1,
 118	K8_NOP2,
 119	K8_NOP3,
 120	K8_NOP4,
 121	K8_NOP5,
 122	K8_NOP6,
 123	K8_NOP7,
 124	K8_NOP8,
 125	K8_NOP5_ATOMIC
 126};
 127static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
 128{
 129	NULL,
 130	k8nops,
 131	k8nops + 1,
 132	k8nops + 1 + 2,
 133	k8nops + 1 + 2 + 3,
 134	k8nops + 1 + 2 + 3 + 4,
 135	k8nops + 1 + 2 + 3 + 4 + 5,
 136	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
 137	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 138	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 139};
 140#endif
 141
 142#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
 143static const unsigned char k7nops[] =
 144{
 145	K7_NOP1,
 146	K7_NOP2,
 147	K7_NOP3,
 148	K7_NOP4,
 149	K7_NOP5,
 150	K7_NOP6,
 151	K7_NOP7,
 152	K7_NOP8,
 153	K7_NOP5_ATOMIC
 154};
 155static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
 156{
 157	NULL,
 158	k7nops,
 159	k7nops + 1,
 160	k7nops + 1 + 2,
 161	k7nops + 1 + 2 + 3,
 162	k7nops + 1 + 2 + 3 + 4,
 163	k7nops + 1 + 2 + 3 + 4 + 5,
 164	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
 165	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 166	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 167};
 168#endif
 169
 170#ifdef P6_NOP1
 171static const unsigned char p6nops[] =
 172{
 173	P6_NOP1,
 174	P6_NOP2,
 175	P6_NOP3,
 176	P6_NOP4,
 177	P6_NOP5,
 178	P6_NOP6,
 179	P6_NOP7,
 180	P6_NOP8,
 181	P6_NOP5_ATOMIC
 182};
 183static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
 184{
 185	NULL,
 186	p6nops,
 187	p6nops + 1,
 188	p6nops + 1 + 2,
 189	p6nops + 1 + 2 + 3,
 190	p6nops + 1 + 2 + 3 + 4,
 191	p6nops + 1 + 2 + 3 + 4 + 5,
 192	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
 193	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 194	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 195};
 196#endif
 197
 198/* Initialize these to a safe default */
 199#ifdef CONFIG_X86_64
 200const unsigned char * const *ideal_nops = p6_nops;
 201#else
 202const unsigned char * const *ideal_nops = intel_nops;
 203#endif
 204
 205void __init arch_init_ideal_nops(void)
 206{
 207	switch (boot_cpu_data.x86_vendor) {
 208	case X86_VENDOR_INTEL:
 209		/*
 210		 * Due to a decoder implementation quirk, some
 211		 * specific Intel CPUs actually perform better with
 212		 * the "k8_nops" than with the SDM-recommended NOPs.
 213		 */
 214		if (boot_cpu_data.x86 == 6 &&
 215		    boot_cpu_data.x86_model >= 0x0f &&
 216		    boot_cpu_data.x86_model != 0x1c &&
 217		    boot_cpu_data.x86_model != 0x26 &&
 218		    boot_cpu_data.x86_model != 0x27 &&
 219		    boot_cpu_data.x86_model < 0x30) {
 220			ideal_nops = k8_nops;
 221		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
 222			   ideal_nops = p6_nops;
 223		} else {
 224#ifdef CONFIG_X86_64
 225			ideal_nops = k8_nops;
 226#else
 227			ideal_nops = intel_nops;
 228#endif
 229		}
 230		break;
 231
 232	case X86_VENDOR_HYGON:
 233		ideal_nops = p6_nops;
 234		return;
 235
 236	case X86_VENDOR_AMD:
 237		if (boot_cpu_data.x86 > 0xf) {
 238			ideal_nops = p6_nops;
 239			return;
 240		}
 241
 242		fallthrough;
 243
 244	default:
 245#ifdef CONFIG_X86_64
 246		ideal_nops = k8_nops;
 247#else
 248		if (boot_cpu_has(X86_FEATURE_K8))
 249			ideal_nops = k8_nops;
 250		else if (boot_cpu_has(X86_FEATURE_K7))
 251			ideal_nops = k7_nops;
 252		else
 253			ideal_nops = intel_nops;
 254#endif
 255	}
 256}
 257
 258/* Use this to add nops to a buffer, then text_poke the whole buffer. */
 259static void __init_or_module add_nops(void *insns, unsigned int len)
 260{
 261	while (len > 0) {
 262		unsigned int noplen = len;
 263		if (noplen > ASM_NOP_MAX)
 264			noplen = ASM_NOP_MAX;
 265		memcpy(insns, ideal_nops[noplen], noplen);
 266		insns += noplen;
 267		len -= noplen;
 268	}
 269}
 270
 271extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 272extern s32 __smp_locks[], __smp_locks_end[];
 273void text_poke_early(void *addr, const void *opcode, size_t len);
 274
 275/*
 276 * Are we looking at a near JMP with a 1 or 4-byte displacement.
 277 */
 278static inline bool is_jmp(const u8 opcode)
 279{
 280	return opcode == 0xeb || opcode == 0xe9;
 281}
 282
 283static void __init_or_module
 284recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
 285{
 286	u8 *next_rip, *tgt_rip;
 287	s32 n_dspl, o_dspl;
 288	int repl_len;
 289
 290	if (a->replacementlen != 5)
 291		return;
 292
 293	o_dspl = *(s32 *)(insn_buff + 1);
 294
 295	/* next_rip of the replacement JMP */
 296	next_rip = repl_insn + a->replacementlen;
 297	/* target rip of the replacement JMP */
 298	tgt_rip  = next_rip + o_dspl;
 299	n_dspl = tgt_rip - orig_insn;
 300
 301	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
 302
 303	if (tgt_rip - orig_insn >= 0) {
 304		if (n_dspl - 2 <= 127)
 305			goto two_byte_jmp;
 306		else
 307			goto five_byte_jmp;
 308	/* negative offset */
 309	} else {
 310		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
 311			goto two_byte_jmp;
 312		else
 313			goto five_byte_jmp;
 314	}
 315
 316two_byte_jmp:
 317	n_dspl -= 2;
 318
 319	insn_buff[0] = 0xeb;
 320	insn_buff[1] = (s8)n_dspl;
 321	add_nops(insn_buff + 2, 3);
 322
 323	repl_len = 2;
 324	goto done;
 325
 326five_byte_jmp:
 327	n_dspl -= 5;
 328
 329	insn_buff[0] = 0xe9;
 330	*(s32 *)&insn_buff[1] = n_dspl;
 331
 332	repl_len = 5;
 333
 334done:
 335
 336	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
 337		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
 338}
 339
 340/*
 341 * "noinline" to cause control flow change and thus invalidate I$ and
 342 * cause refetch after modification.
 343 */
 344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 345{
 346	unsigned long flags;
 347	int i;
 348
 349	for (i = 0; i < a->padlen; i++) {
 350		if (instr[i] != 0x90)
 351			return;
 352	}
 353
 354	local_irq_save(flags);
 355	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
 356	local_irq_restore(flags);
 357
 358	DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
 359		   instr, a->instrlen - a->padlen, a->padlen);
 360}
 361
 362/*
 363 * Replace instructions with better alternatives for this CPU type. This runs
 364 * before SMP is initialized to avoid SMP problems with self modifying code.
 365 * This implies that asymmetric systems where APs have less capabilities than
 366 * the boot processor are not handled. Tough. Make sure you disable such
 367 * features by hand.
 368 *
 369 * Marked "noinline" to cause control flow change and thus insn cache
 370 * to refetch changed I$ lines.
 371 */
 372void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 373						  struct alt_instr *end)
 374{
 375	struct alt_instr *a;
 376	u8 *instr, *replacement;
 377	u8 insn_buff[MAX_PATCH_LEN];
 378
 379	DPRINTK("alt table %px, -> %px", start, end);
 380	/*
 381	 * The scan order should be from start to end. A later scanned
 382	 * alternative code can overwrite previously scanned alternative code.
 383	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
 384	 * patch code.
 385	 *
 386	 * So be careful if you want to change the scan order to any other
 387	 * order.
 388	 */
 389	for (a = start; a < end; a++) {
 390		int insn_buff_sz = 0;
 391
 392		instr = (u8 *)&a->instr_offset + a->instr_offset;
 393		replacement = (u8 *)&a->repl_offset + a->repl_offset;
 394		BUG_ON(a->instrlen > sizeof(insn_buff));
 395		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
 396		if (!boot_cpu_has(a->cpuid)) {
 397			if (a->padlen > 1)
 398				optimize_nops(a, instr);
 399
 400			continue;
 401		}
 402
 403		DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
 404			a->cpuid >> 5,
 405			a->cpuid & 0x1f,
 406			instr, instr, a->instrlen,
 407			replacement, a->replacementlen, a->padlen);
 408
 409		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
 410		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
 411
 412		memcpy(insn_buff, replacement, a->replacementlen);
 413		insn_buff_sz = a->replacementlen;
 414
 415		/*
 416		 * 0xe8 is a relative jump; fix the offset.
 417		 *
 418		 * Instruction length is checked before the opcode to avoid
 419		 * accessing uninitialized bytes for zero-length replacements.
 420		 */
 421		if (a->replacementlen == 5 && *insn_buff == 0xe8) {
 422			*(s32 *)(insn_buff + 1) += replacement - instr;
 423			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
 424				*(s32 *)(insn_buff + 1),
 425				(unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
 426		}
 427
 428		if (a->replacementlen && is_jmp(replacement[0]))
 429			recompute_jump(a, instr, replacement, insn_buff);
 430
 431		if (a->instrlen > a->replacementlen) {
 432			add_nops(insn_buff + a->replacementlen,
 433				 a->instrlen - a->replacementlen);
 434			insn_buff_sz += a->instrlen - a->replacementlen;
 435		}
 436		DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
 437
 438		text_poke_early(instr, insn_buff, insn_buff_sz);
 439	}
 440}
 441
 442#ifdef CONFIG_SMP
 443static void alternatives_smp_lock(const s32 *start, const s32 *end,
 444				  u8 *text, u8 *text_end)
 445{
 446	const s32 *poff;
 447
 448	for (poff = start; poff < end; poff++) {
 449		u8 *ptr = (u8 *)poff + *poff;
 450
 451		if (!*poff || ptr < text || ptr >= text_end)
 452			continue;
 453		/* turn DS segment override prefix into lock prefix */
 454		if (*ptr == 0x3e)
 455			text_poke(ptr, ((unsigned char []){0xf0}), 1);
 456	}
 457}
 458
 459static void alternatives_smp_unlock(const s32 *start, const s32 *end,
 460				    u8 *text, u8 *text_end)
 461{
 462	const s32 *poff;
 463
 464	for (poff = start; poff < end; poff++) {
 465		u8 *ptr = (u8 *)poff + *poff;
 466
 467		if (!*poff || ptr < text || ptr >= text_end)
 468			continue;
 469		/* turn lock prefix into DS segment override prefix */
 470		if (*ptr == 0xf0)
 471			text_poke(ptr, ((unsigned char []){0x3E}), 1);
 472	}
 473}
 474
 475struct smp_alt_module {
 476	/* what is this ??? */
 477	struct module	*mod;
 478	char		*name;
 479
 480	/* ptrs to lock prefixes */
 481	const s32	*locks;
 482	const s32	*locks_end;
 483
 484	/* .text segment, needed to avoid patching init code ;) */
 485	u8		*text;
 486	u8		*text_end;
 487
 488	struct list_head next;
 489};
 490static LIST_HEAD(smp_alt_modules);
 491static bool uniproc_patched = false;	/* protected by text_mutex */
 492
 493void __init_or_module alternatives_smp_module_add(struct module *mod,
 494						  char *name,
 495						  void *locks, void *locks_end,
 496						  void *text,  void *text_end)
 497{
 498	struct smp_alt_module *smp;
 499
 500	mutex_lock(&text_mutex);
 501	if (!uniproc_patched)
 502		goto unlock;
 503
 504	if (num_possible_cpus() == 1)
 505		/* Don't bother remembering, we'll never have to undo it. */
 506		goto smp_unlock;
 507
 508	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
 509	if (NULL == smp)
 510		/* we'll run the (safe but slow) SMP code then ... */
 511		goto unlock;
 512
 513	smp->mod	= mod;
 514	smp->name	= name;
 515	smp->locks	= locks;
 516	smp->locks_end	= locks_end;
 517	smp->text	= text;
 518	smp->text_end	= text_end;
 519	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
 520		smp->locks, smp->locks_end,
 521		smp->text, smp->text_end, smp->name);
 522
 523	list_add_tail(&smp->next, &smp_alt_modules);
 524smp_unlock:
 525	alternatives_smp_unlock(locks, locks_end, text, text_end);
 526unlock:
 527	mutex_unlock(&text_mutex);
 528}
 529
 530void __init_or_module alternatives_smp_module_del(struct module *mod)
 531{
 532	struct smp_alt_module *item;
 533
 534	mutex_lock(&text_mutex);
 535	list_for_each_entry(item, &smp_alt_modules, next) {
 536		if (mod != item->mod)
 537			continue;
 538		list_del(&item->next);
 539		kfree(item);
 540		break;
 541	}
 542	mutex_unlock(&text_mutex);
 543}
 544
 545void alternatives_enable_smp(void)
 546{
 547	struct smp_alt_module *mod;
 548
 549	/* Why bother if there are no other CPUs? */
 550	BUG_ON(num_possible_cpus() == 1);
 551
 552	mutex_lock(&text_mutex);
 553
 554	if (uniproc_patched) {
 555		pr_info("switching to SMP code\n");
 556		BUG_ON(num_online_cpus() != 1);
 557		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
 558		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
 559		list_for_each_entry(mod, &smp_alt_modules, next)
 560			alternatives_smp_lock(mod->locks, mod->locks_end,
 561					      mod->text, mod->text_end);
 562		uniproc_patched = false;
 563	}
 564	mutex_unlock(&text_mutex);
 565}
 566
 567/*
 568 * Return 1 if the address range is reserved for SMP-alternatives.
 569 * Must hold text_mutex.
 570 */
 571int alternatives_text_reserved(void *start, void *end)
 572{
 573	struct smp_alt_module *mod;
 574	const s32 *poff;
 575	u8 *text_start = start;
 576	u8 *text_end = end;
 577
 578	lockdep_assert_held(&text_mutex);
 579
 580	list_for_each_entry(mod, &smp_alt_modules, next) {
 581		if (mod->text > text_end || mod->text_end < text_start)
 582			continue;
 583		for (poff = mod->locks; poff < mod->locks_end; poff++) {
 584			const u8 *ptr = (const u8 *)poff + *poff;
 585
 586			if (text_start <= ptr && text_end > ptr)
 587				return 1;
 588		}
 589	}
 590
 591	return 0;
 592}
 593#endif /* CONFIG_SMP */
 594
 595#ifdef CONFIG_PARAVIRT
 596void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
 597				     struct paravirt_patch_site *end)
 598{
 599	struct paravirt_patch_site *p;
 600	char insn_buff[MAX_PATCH_LEN];
 601
 602	for (p = start; p < end; p++) {
 603		unsigned int used;
 604
 605		BUG_ON(p->len > MAX_PATCH_LEN);
 606		/* prep the buffer with the original instructions */
 607		memcpy(insn_buff, p->instr, p->len);
 608		used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
 609
 610		BUG_ON(used > p->len);
 611
 612		/* Pad the rest with nops */
 613		add_nops(insn_buff + used, p->len - used);
 614		text_poke_early(p->instr, insn_buff, p->len);
 615	}
 616}
 617extern struct paravirt_patch_site __start_parainstructions[],
 618	__stop_parainstructions[];
 619#endif	/* CONFIG_PARAVIRT */
 620
 621/*
 622 * Self-test for the INT3 based CALL emulation code.
 623 *
 624 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
 625 * properly and that there is a stack gap between the INT3 frame and the
 626 * previous context. Without this gap doing a virtual PUSH on the interrupted
 627 * stack would corrupt the INT3 IRET frame.
 628 *
 629 * See entry_{32,64}.S for more details.
 630 */
 631
 632/*
 633 * We define the int3_magic() function in assembly to control the calling
 634 * convention such that we can 'call' it from assembly.
 635 */
 636
 637extern void int3_magic(unsigned int *ptr); /* defined in asm */
 638
 639asm (
 640"	.pushsection	.init.text, \"ax\", @progbits\n"
 641"	.type		int3_magic, @function\n"
 642"int3_magic:\n"
 643"	movl	$1, (%" _ASM_ARG1 ")\n"
 644"	ret\n"
 645"	.size		int3_magic, .-int3_magic\n"
 646"	.popsection\n"
 647);
 648
 649extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
 650
 651static int __init
 652int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
 653{
 654	struct die_args *args = data;
 655	struct pt_regs *regs = args->regs;
 656
 657	if (!regs || user_mode(regs))
 658		return NOTIFY_DONE;
 659
 660	if (val != DIE_INT3)
 661		return NOTIFY_DONE;
 662
 663	if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
 664		return NOTIFY_DONE;
 665
 666	int3_emulate_call(regs, (unsigned long)&int3_magic);
 667	return NOTIFY_STOP;
 668}
 669
 670static void __init int3_selftest(void)
 671{
 672	static __initdata struct notifier_block int3_exception_nb = {
 673		.notifier_call	= int3_exception_notify,
 674		.priority	= INT_MAX-1, /* last */
 675	};
 676	unsigned int val = 0;
 677
 678	BUG_ON(register_die_notifier(&int3_exception_nb));
 679
 680	/*
 681	 * Basically: int3_magic(&val); but really complicated :-)
 682	 *
 683	 * Stick the address of the INT3 instruction into int3_selftest_ip,
 684	 * then trigger the INT3, padded with NOPs to match a CALL instruction
 685	 * length.
 686	 */
 687	asm volatile ("1: int3; nop; nop; nop; nop\n\t"
 688		      ".pushsection .init.data,\"aw\"\n\t"
 689		      ".align " __ASM_SEL(4, 8) "\n\t"
 690		      ".type int3_selftest_ip, @object\n\t"
 691		      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
 692		      "int3_selftest_ip:\n\t"
 693		      __ASM_SEL(.long, .quad) " 1b\n\t"
 694		      ".popsection\n\t"
 695		      : ASM_CALL_CONSTRAINT
 696		      : __ASM_SEL_RAW(a, D) (&val)
 697		      : "memory");
 698
 699	BUG_ON(val != 1);
 700
 701	unregister_die_notifier(&int3_exception_nb);
 702}
 703
 704void __init alternative_instructions(void)
 705{
 706	int3_selftest();
 707
 708	/*
 709	 * The patching is not fully atomic, so try to avoid local
 710	 * interruptions that might execute the to be patched code.
 711	 * Other CPUs are not running.
 712	 */
 713	stop_nmi();
 714
 715	/*
 716	 * Don't stop machine check exceptions while patching.
 717	 * MCEs only happen when something got corrupted and in this
 718	 * case we must do something about the corruption.
 719	 * Ignoring it is worse than an unlikely patching race.
 720	 * Also machine checks tend to be broadcast and if one CPU
 721	 * goes into machine check the others follow quickly, so we don't
 722	 * expect a machine check to cause undue problems during to code
 723	 * patching.
 724	 */
 725
 726	apply_alternatives(__alt_instructions, __alt_instructions_end);
 727
 728#ifdef CONFIG_SMP
 729	/* Patch to UP if other cpus not imminent. */
 730	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
 731		uniproc_patched = true;
 732		alternatives_smp_module_add(NULL, "core kernel",
 733					    __smp_locks, __smp_locks_end,
 734					    _text, _etext);
 735	}
 736
 737	if (!uniproc_patched || num_possible_cpus() == 1) {
 738		free_init_pages("SMP alternatives",
 739				(unsigned long)__smp_locks,
 740				(unsigned long)__smp_locks_end);
 741	}
 742#endif
 743
 744	apply_paravirt(__parainstructions, __parainstructions_end);
 745
 746	restart_nmi();
 747	alternatives_patched = 1;
 748}
 749
 750/**
 751 * text_poke_early - Update instructions on a live kernel at boot time
 752 * @addr: address to modify
 753 * @opcode: source of the copy
 754 * @len: length to copy
 755 *
 756 * When you use this code to patch more than one byte of an instruction
 757 * you need to make sure that other CPUs cannot execute this code in parallel.
 758 * Also no thread must be currently preempted in the middle of these
 759 * instructions. And on the local CPU you need to be protected against NMI or
 760 * MCE handlers seeing an inconsistent instruction while you patch.
 761 */
 762void __init_or_module text_poke_early(void *addr, const void *opcode,
 763				      size_t len)
 764{
 765	unsigned long flags;
 766
 767	if (boot_cpu_has(X86_FEATURE_NX) &&
 768	    is_module_text_address((unsigned long)addr)) {
 769		/*
 770		 * Modules text is marked initially as non-executable, so the
 771		 * code cannot be running and speculative code-fetches are
 772		 * prevented. Just change the code.
 773		 */
 774		memcpy(addr, opcode, len);
 775	} else {
 776		local_irq_save(flags);
 777		memcpy(addr, opcode, len);
 778		local_irq_restore(flags);
 779		sync_core();
 780
 781		/*
 782		 * Could also do a CLFLUSH here to speed up CPU recovery; but
 783		 * that causes hangs on some VIA CPUs.
 784		 */
 785	}
 786}
 787
 788typedef struct {
 789	struct mm_struct *mm;
 790} temp_mm_state_t;
 791
 792/*
 793 * Using a temporary mm allows to set temporary mappings that are not accessible
 794 * by other CPUs. Such mappings are needed to perform sensitive memory writes
 795 * that override the kernel memory protections (e.g., W^X), without exposing the
 796 * temporary page-table mappings that are required for these write operations to
 797 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
 798 * mapping is torn down.
 799 *
 800 * Context: The temporary mm needs to be used exclusively by a single core. To
 801 *          harden security IRQs must be disabled while the temporary mm is
 802 *          loaded, thereby preventing interrupt handler bugs from overriding
 803 *          the kernel memory protection.
 804 */
 805static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
 806{
 807	temp_mm_state_t temp_state;
 808
 809	lockdep_assert_irqs_disabled();
 810	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
 811	switch_mm_irqs_off(NULL, mm, current);
 812
 813	/*
 814	 * If breakpoints are enabled, disable them while the temporary mm is
 815	 * used. Userspace might set up watchpoints on addresses that are used
 816	 * in the temporary mm, which would lead to wrong signals being sent or
 817	 * crashes.
 818	 *
 819	 * Note that breakpoints are not disabled selectively, which also causes
 820	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
 821	 * undesirable, but still seems reasonable as the code that runs in the
 822	 * temporary mm should be short.
 823	 */
 824	if (hw_breakpoint_active())
 825		hw_breakpoint_disable();
 826
 827	return temp_state;
 828}
 829
 830static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
 831{
 832	lockdep_assert_irqs_disabled();
 833	switch_mm_irqs_off(NULL, prev_state.mm, current);
 834
 835	/*
 836	 * Restore the breakpoints if they were disabled before the temporary mm
 837	 * was loaded.
 838	 */
 839	if (hw_breakpoint_active())
 840		hw_breakpoint_restore();
 841}
 842
 843__ro_after_init struct mm_struct *poking_mm;
 844__ro_after_init unsigned long poking_addr;
 845
 846static void *__text_poke(void *addr, const void *opcode, size_t len)
 847{
 848	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
 849	struct page *pages[2] = {NULL};
 850	temp_mm_state_t prev;
 851	unsigned long flags;
 852	pte_t pte, *ptep;
 853	spinlock_t *ptl;
 854	pgprot_t pgprot;
 855
 856	/*
 857	 * While boot memory allocator is running we cannot use struct pages as
 858	 * they are not yet initialized. There is no way to recover.
 859	 */
 860	BUG_ON(!after_bootmem);
 861
 862	if (!core_kernel_text((unsigned long)addr)) {
 863		pages[0] = vmalloc_to_page(addr);
 864		if (cross_page_boundary)
 865			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
 866	} else {
 867		pages[0] = virt_to_page(addr);
 868		WARN_ON(!PageReserved(pages[0]));
 869		if (cross_page_boundary)
 870			pages[1] = virt_to_page(addr + PAGE_SIZE);
 871	}
 872	/*
 873	 * If something went wrong, crash and burn since recovery paths are not
 874	 * implemented.
 875	 */
 876	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
 877
 
 
 878	/*
 879	 * Map the page without the global bit, as TLB flushing is done with
 880	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
 881	 */
 882	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
 883
 884	/*
 885	 * The lock is not really needed, but this allows to avoid open-coding.
 886	 */
 887	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
 888
 889	/*
 890	 * This must not fail; preallocated in poking_init().
 891	 */
 892	VM_BUG_ON(!ptep);
 893
 894	local_irq_save(flags);
 895
 896	pte = mk_pte(pages[0], pgprot);
 897	set_pte_at(poking_mm, poking_addr, ptep, pte);
 898
 899	if (cross_page_boundary) {
 900		pte = mk_pte(pages[1], pgprot);
 901		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
 902	}
 903
 904	/*
 905	 * Loading the temporary mm behaves as a compiler barrier, which
 906	 * guarantees that the PTE will be set at the time memcpy() is done.
 907	 */
 908	prev = use_temporary_mm(poking_mm);
 909
 910	kasan_disable_current();
 911	memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
 912	kasan_enable_current();
 913
 914	/*
 915	 * Ensure that the PTE is only cleared after the instructions of memcpy
 916	 * were issued by using a compiler barrier.
 917	 */
 918	barrier();
 919
 920	pte_clear(poking_mm, poking_addr, ptep);
 921	if (cross_page_boundary)
 922		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
 923
 924	/*
 925	 * Loading the previous page-table hierarchy requires a serializing
 926	 * instruction that already allows the core to see the updated version.
 927	 * Xen-PV is assumed to serialize execution in a similar manner.
 928	 */
 929	unuse_temporary_mm(prev);
 930
 931	/*
 932	 * Flushing the TLB might involve IPIs, which would require enabled
 933	 * IRQs, but not if the mm is not used, as it is in this point.
 934	 */
 935	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
 936			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
 937			   PAGE_SHIFT, false);
 938
 939	/*
 940	 * If the text does not match what we just wrote then something is
 941	 * fundamentally screwy; there's nothing we can really do about that.
 942	 */
 943	BUG_ON(memcmp(addr, opcode, len));
 944
 
 945	local_irq_restore(flags);
 946	pte_unmap_unlock(ptep, ptl);
 947	return addr;
 948}
 949
 950/**
 951 * text_poke - Update instructions on a live kernel
 952 * @addr: address to modify
 953 * @opcode: source of the copy
 954 * @len: length to copy
 955 *
 956 * Only atomic text poke/set should be allowed when not doing early patching.
 957 * It means the size must be writable atomically and the address must be aligned
 958 * in a way that permits an atomic write. It also makes sure we fit on a single
 959 * page.
 960 *
 961 * Note that the caller must ensure that if the modified code is part of a
 962 * module, the module would not be removed during poking. This can be achieved
 963 * by registering a module notifier, and ordering module removal and patching
 964 * trough a mutex.
 965 */
 966void *text_poke(void *addr, const void *opcode, size_t len)
 967{
 968	lockdep_assert_held(&text_mutex);
 969
 970	return __text_poke(addr, opcode, len);
 971}
 972
 973/**
 974 * text_poke_kgdb - Update instructions on a live kernel by kgdb
 975 * @addr: address to modify
 976 * @opcode: source of the copy
 977 * @len: length to copy
 978 *
 979 * Only atomic text poke/set should be allowed when not doing early patching.
 980 * It means the size must be writable atomically and the address must be aligned
 981 * in a way that permits an atomic write. It also makes sure we fit on a single
 982 * page.
 983 *
 984 * Context: should only be used by kgdb, which ensures no other core is running,
 985 *	    despite the fact it does not hold the text_mutex.
 986 */
 987void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
 988{
 989	return __text_poke(addr, opcode, len);
 990}
 991
 992static void do_sync_core(void *info)
 993{
 994	sync_core();
 995}
 996
 997void text_poke_sync(void)
 998{
 999	on_each_cpu(do_sync_core, NULL, 1);
1000}
1001
1002struct text_poke_loc {
1003	s32 rel_addr; /* addr := _stext + rel_addr */
1004	s32 rel32;
1005	u8 opcode;
1006	const u8 text[POKE_MAX_OPCODE_SIZE];
1007	u8 old;
1008};
1009
1010struct bp_patching_desc {
1011	struct text_poke_loc *vec;
1012	int nr_entries;
1013	atomic_t refs;
1014};
1015
1016static struct bp_patching_desc *bp_desc;
1017
1018static __always_inline
1019struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
1020{
1021	struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
1022
1023	if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
1024		return NULL;
1025
1026	return desc;
1027}
1028
1029static __always_inline void put_desc(struct bp_patching_desc *desc)
1030{
1031	smp_mb__before_atomic();
1032	arch_atomic_dec(&desc->refs);
1033}
1034
1035static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1036{
1037	return _stext + tp->rel_addr;
1038}
1039
1040static __always_inline int patch_cmp(const void *key, const void *elt)
1041{
1042	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1043
1044	if (key < text_poke_addr(tp))
1045		return -1;
1046	if (key > text_poke_addr(tp))
1047		return 1;
1048	return 0;
1049}
 
1050
1051noinstr int poke_int3_handler(struct pt_regs *regs)
1052{
1053	struct bp_patching_desc *desc;
1054	struct text_poke_loc *tp;
1055	int len, ret = 0;
1056	void *ip;
1057
1058	if (user_mode(regs))
1059		return 0;
1060
1061	/*
1062	 * Having observed our INT3 instruction, we now must observe
1063	 * bp_desc:
 
 
 
 
1064	 *
1065	 *	bp_desc = desc			INT3
1066	 *	WMB				RMB
1067	 *	write INT3			if (desc)
1068	 */
1069	smp_rmb();
1070
1071	desc = try_get_desc(&bp_desc);
1072	if (!desc)
 
 
1073		return 0;
1074
1075	/*
1076	 * Discount the INT3. See text_poke_bp_batch().
1077	 */
1078	ip = (void *) regs->ip - INT3_INSN_SIZE;
1079
1080	/*
1081	 * Skip the binary search if there is a single member in the vector.
1082	 */
1083	if (unlikely(desc->nr_entries > 1)) {
1084		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1085				      sizeof(struct text_poke_loc),
1086				      patch_cmp);
1087		if (!tp)
1088			goto out_put;
1089	} else {
1090		tp = desc->vec;
1091		if (text_poke_addr(tp) != ip)
1092			goto out_put;
1093	}
1094
1095	len = text_opcode_size(tp->opcode);
1096	ip += len;
1097
1098	switch (tp->opcode) {
1099	case INT3_INSN_OPCODE:
1100		/*
1101		 * Someone poked an explicit INT3, they'll want to handle it,
1102		 * do not consume.
1103		 */
1104		goto out_put;
1105
1106	case CALL_INSN_OPCODE:
1107		int3_emulate_call(regs, (long)ip + tp->rel32);
1108		break;
1109
1110	case JMP32_INSN_OPCODE:
1111	case JMP8_INSN_OPCODE:
1112		int3_emulate_jmp(regs, (long)ip + tp->rel32);
1113		break;
1114
1115	default:
1116		BUG();
1117	}
1118
1119	ret = 1;
1120
1121out_put:
1122	put_desc(desc);
1123	return ret;
1124}
1125
1126#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1127static struct text_poke_loc tp_vec[TP_VEC_MAX];
1128static int tp_vec_nr;
1129
1130/**
1131 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1132 * @tp:			vector of instructions to patch
1133 * @nr_entries:		number of entries in the vector
1134 *
1135 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1136 * We completely avoid stop_machine() here, and achieve the
1137 * synchronization using int3 breakpoint.
1138 *
1139 * The way it is done:
1140 *	- For each entry in the vector:
1141 *		- add a int3 trap to the address that will be patched
1142 *	- sync cores
1143 *	- For each entry in the vector:
1144 *		- update all but the first byte of the patched range
1145 *	- sync cores
1146 *	- For each entry in the vector:
1147 *		- replace the first byte (int3) by the first byte of
1148 *		  replacing opcode
1149 *	- sync cores
1150 */
1151static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1152{
1153	struct bp_patching_desc desc = {
1154		.vec = tp,
1155		.nr_entries = nr_entries,
1156		.refs = ATOMIC_INIT(1),
1157	};
1158	unsigned char int3 = INT3_INSN_OPCODE;
1159	unsigned int i;
1160	int do_sync;
1161
1162	lockdep_assert_held(&text_mutex);
1163
1164	smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
 
1165
1166	/*
1167	 * Corresponding read barrier in int3 notifier for making sure the
1168	 * nr_entries and handler are correctly ordered wrt. patching.
1169	 */
1170	smp_wmb();
1171
1172	/*
1173	 * First step: add a int3 trap to the address that will be patched.
1174	 */
1175	for (i = 0; i < nr_entries; i++) {
1176		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1177		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1178	}
1179
1180	text_poke_sync();
1181
1182	/*
1183	 * Second step: update all but the first byte of the patched range.
1184	 */
1185	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1186		u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1187		int len = text_opcode_size(tp[i].opcode);
1188
1189		if (len - INT3_INSN_SIZE > 0) {
1190			memcpy(old + INT3_INSN_SIZE,
1191			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1192			       len - INT3_INSN_SIZE);
1193			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1194				  (const char *)tp[i].text + INT3_INSN_SIZE,
1195				  len - INT3_INSN_SIZE);
1196			do_sync++;
1197		}
1198
1199		/*
1200		 * Emit a perf event to record the text poke, primarily to
1201		 * support Intel PT decoding which must walk the executable code
1202		 * to reconstruct the trace. The flow up to here is:
1203		 *   - write INT3 byte
1204		 *   - IPI-SYNC
1205		 *   - write instruction tail
1206		 * At this point the actual control flow will be through the
1207		 * INT3 and handler and not hit the old or new instruction.
1208		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1209		 * can still be decoded. Subsequently:
1210		 *   - emit RECORD_TEXT_POKE with the new instruction
1211		 *   - IPI-SYNC
1212		 *   - write first byte
1213		 *   - IPI-SYNC
1214		 * So before the text poke event timestamp, the decoder will see
1215		 * either the old instruction flow or FUP/TIP of INT3. After the
1216		 * text poke event timestamp, the decoder will see either the
1217		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1218		 * use the timestamp as the point at which to modify the
1219		 * executable code.
1220		 * The old instruction is recorded so that the event can be
1221		 * processed forwards or backwards.
1222		 */
1223		perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1224				     tp[i].text, len);
1225	}
1226
1227	if (do_sync) {
1228		/*
1229		 * According to Intel, this core syncing is very likely
1230		 * not necessary and we'd be safe even without it. But
1231		 * better safe than sorry (plus there's not only Intel).
1232		 */
1233		text_poke_sync();
1234	}
1235
1236	/*
1237	 * Third step: replace the first byte (int3) by the first byte of
1238	 * replacing opcode.
1239	 */
1240	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1241		if (tp[i].text[0] == INT3_INSN_OPCODE)
1242			continue;
1243
1244		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1245		do_sync++;
1246	}
1247
1248	if (do_sync)
1249		text_poke_sync();
1250
 
1251	/*
1252	 * Remove and synchronize_rcu(), except we have a very primitive
1253	 * refcount based completion.
1254	 */
1255	WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1256	if (!atomic_dec_and_test(&desc.refs))
1257		atomic_cond_read_acquire(&desc.refs, !VAL);
1258}
1259
1260static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1261			       const void *opcode, size_t len, const void *emulate)
1262{
1263	struct insn insn;
1264
1265	memcpy((void *)tp->text, opcode, len);
1266	if (!emulate)
1267		emulate = opcode;
1268
1269	kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
1270	insn_get_length(&insn);
1271
1272	BUG_ON(!insn_complete(&insn));
1273	BUG_ON(len != insn.length);
1274
1275	tp->rel_addr = addr - (void *)_stext;
1276	tp->opcode = insn.opcode.bytes[0];
1277
1278	switch (tp->opcode) {
1279	case INT3_INSN_OPCODE:
1280		break;
1281
1282	case CALL_INSN_OPCODE:
1283	case JMP32_INSN_OPCODE:
1284	case JMP8_INSN_OPCODE:
1285		tp->rel32 = insn.immediate.value;
1286		break;
1287
1288	default: /* assume NOP */
1289		switch (len) {
1290		case 2: /* NOP2 -- emulate as JMP8+0 */
1291			BUG_ON(memcmp(emulate, ideal_nops[len], len));
1292			tp->opcode = JMP8_INSN_OPCODE;
1293			tp->rel32 = 0;
1294			break;
1295
1296		case 5: /* NOP5 -- emulate as JMP32+0 */
1297			BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1298			tp->opcode = JMP32_INSN_OPCODE;
1299			tp->rel32 = 0;
1300			break;
1301
1302		default: /* unknown instruction */
1303			BUG();
1304		}
1305		break;
1306	}
1307}
1308
1309/*
1310 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1311 * early if needed.
1312 */
1313static bool tp_order_fail(void *addr)
1314{
1315	struct text_poke_loc *tp;
1316
1317	if (!tp_vec_nr)
1318		return false;
1319
1320	if (!addr) /* force */
1321		return true;
1322
1323	tp = &tp_vec[tp_vec_nr - 1];
1324	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1325		return true;
1326
1327	return false;
1328}
1329
1330static void text_poke_flush(void *addr)
1331{
1332	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1333		text_poke_bp_batch(tp_vec, tp_vec_nr);
1334		tp_vec_nr = 0;
1335	}
1336}
1337
1338void text_poke_finish(void)
1339{
1340	text_poke_flush(NULL);
1341}
1342
1343void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1344{
1345	struct text_poke_loc *tp;
1346
1347	if (unlikely(system_state == SYSTEM_BOOTING)) {
1348		text_poke_early(addr, opcode, len);
1349		return;
1350	}
1351
1352	text_poke_flush(addr);
1353
1354	tp = &tp_vec[tp_vec_nr++];
1355	text_poke_loc_init(tp, addr, opcode, len, emulate);
1356}
1357
1358/**
1359 * text_poke_bp() -- update instructions on live kernel on SMP
1360 * @addr:	address to patch
1361 * @opcode:	opcode of new instruction
1362 * @len:	length to copy
1363 * @handler:	address to jump to when the temporary breakpoint is hit
1364 *
1365 * Update a single instruction with the vector in the stack, avoiding
1366 * dynamically allocated memory. This function should be used when it is
1367 * not possible to allocate memory.
1368 */
1369void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1370{
1371	struct text_poke_loc tp;
 
 
 
 
1372
1373	if (unlikely(system_state == SYSTEM_BOOTING)) {
1374		text_poke_early(addr, opcode, len);
1375		return;
1376	}
1377
1378	text_poke_loc_init(&tp, addr, opcode, len, emulate);
 
1379	text_poke_bp_batch(&tp, 1);
1380}