Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) "SMP alternatives: " fmt
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/perf_event.h>
   7#include <linux/mutex.h>
   8#include <linux/list.h>
   9#include <linux/stringify.h>
  10#include <linux/highmem.h>
  11#include <linux/mm.h>
  12#include <linux/vmalloc.h>
  13#include <linux/memory.h>
  14#include <linux/stop_machine.h>
  15#include <linux/slab.h>
  16#include <linux/kdebug.h>
  17#include <linux/kprobes.h>
  18#include <linux/mmu_context.h>
  19#include <linux/bsearch.h>
  20#include <linux/sync_core.h>
  21#include <asm/text-patching.h>
  22#include <asm/alternative.h>
  23#include <asm/sections.h>
  24#include <asm/mce.h>
  25#include <asm/nmi.h>
  26#include <asm/cacheflush.h>
  27#include <asm/tlbflush.h>
  28#include <asm/insn.h>
  29#include <asm/io.h>
  30#include <asm/fixmap.h>
  31#include <asm/paravirt.h>
  32#include <asm/asm-prototypes.h>
  33#include <asm/cfi.h>
  34
  35int __read_mostly alternatives_patched;
  36
  37EXPORT_SYMBOL_GPL(alternatives_patched);
  38
  39#define MAX_PATCH_LEN (255-1)
  40
  41#define DA_ALL		(~0)
  42#define DA_ALT		0x01
  43#define DA_RET		0x02
  44#define DA_RETPOLINE	0x04
  45#define DA_ENDBR	0x08
  46#define DA_SMP		0x10
  47
  48static unsigned int __initdata_or_module debug_alternative;
  49
  50static int __init debug_alt(char *str)
  51{
  52	if (str && *str == '=')
  53		str++;
  54
  55	if (!str || kstrtouint(str, 0, &debug_alternative))
  56		debug_alternative = DA_ALL;
  57
  58	return 1;
  59}
  60__setup("debug-alternative", debug_alt);
  61
  62static int noreplace_smp;
  63
  64static int __init setup_noreplace_smp(char *str)
  65{
  66	noreplace_smp = 1;
  67	return 1;
  68}
  69__setup("noreplace-smp", setup_noreplace_smp);
  70
  71#define DPRINTK(type, fmt, args...)					\
  72do {									\
  73	if (debug_alternative & DA_##type)				\
  74		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
  75} while (0)
  76
  77#define DUMP_BYTES(type, buf, len, fmt, args...)			\
  78do {									\
  79	if (unlikely(debug_alternative & DA_##type)) {			\
  80		int j;							\
  81									\
  82		if (!(len))						\
  83			break;						\
  84									\
  85		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
  86		for (j = 0; j < (len) - 1; j++)				\
  87			printk(KERN_CONT "%02hhx ", buf[j]);		\
  88		printk(KERN_CONT "%02hhx\n", buf[j]);			\
  89	}								\
  90} while (0)
  91
  92static const unsigned char x86nops[] =
  93{
  94	BYTES_NOP1,
  95	BYTES_NOP2,
  96	BYTES_NOP3,
  97	BYTES_NOP4,
  98	BYTES_NOP5,
  99	BYTES_NOP6,
 100	BYTES_NOP7,
 101	BYTES_NOP8,
 102#ifdef CONFIG_64BIT
 103	BYTES_NOP9,
 104	BYTES_NOP10,
 105	BYTES_NOP11,
 106#endif
 107};
 108
 109const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
 110{
 111	NULL,
 112	x86nops,
 113	x86nops + 1,
 114	x86nops + 1 + 2,
 115	x86nops + 1 + 2 + 3,
 116	x86nops + 1 + 2 + 3 + 4,
 117	x86nops + 1 + 2 + 3 + 4 + 5,
 118	x86nops + 1 + 2 + 3 + 4 + 5 + 6,
 119	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 120#ifdef CONFIG_64BIT
 121	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 122	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
 123	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
 124#endif
 125};
 126
 127/*
 128 * Fill the buffer with a single effective instruction of size @len.
 129 *
 130 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
 131 * for every single-byte NOP, try to generate the maximally available NOP of
 132 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
 133 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
 134 * *jump* over instead of executing long and daft NOPs.
 135 */
 136static void __init_or_module add_nop(u8 *instr, unsigned int len)
 137{
 138	u8 *target = instr + len;
 139
 140	if (!len)
 141		return;
 142
 143	if (len <= ASM_NOP_MAX) {
 144		memcpy(instr, x86_nops[len], len);
 145		return;
 146	}
 147
 148	if (len < 128) {
 149		__text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
 150		instr += JMP8_INSN_SIZE;
 151	} else {
 152		__text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
 153		instr += JMP32_INSN_SIZE;
 154	}
 155
 156	for (;instr < target; instr++)
 157		*instr = INT3_INSN_OPCODE;
 158}
 159
 160extern s32 __retpoline_sites[], __retpoline_sites_end[];
 161extern s32 __return_sites[], __return_sites_end[];
 162extern s32 __cfi_sites[], __cfi_sites_end[];
 163extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
 164extern s32 __smp_locks[], __smp_locks_end[];
 165void text_poke_early(void *addr, const void *opcode, size_t len);
 166
 167/*
 168 * Matches NOP and NOPL, not any of the other possible NOPs.
 169 */
 170static bool insn_is_nop(struct insn *insn)
 171{
 172	/* Anything NOP, but no REP NOP */
 173	if (insn->opcode.bytes[0] == 0x90 &&
 174	    (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
 175		return true;
 176
 177	/* NOPL */
 178	if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
 179		return true;
 180
 181	/* TODO: more nops */
 182
 183	return false;
 184}
 185
 186/*
 187 * Find the offset of the first non-NOP instruction starting at @offset
 188 * but no further than @len.
 189 */
 190static int skip_nops(u8 *instr, int offset, int len)
 191{
 192	struct insn insn;
 193
 194	for (; offset < len; offset += insn.length) {
 195		if (insn_decode_kernel(&insn, &instr[offset]))
 196			break;
 197
 198		if (!insn_is_nop(&insn))
 199			break;
 200	}
 201
 202	return offset;
 203}
 204
 205/*
 206 * Optimize a sequence of NOPs, possibly preceded by an unconditional jump
 207 * to the end of the NOP sequence into a single NOP.
 208 */
 209static bool __init_or_module
 210__optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
 211{
 212	int i = *next - insn->length;
 213
 214	switch (insn->opcode.bytes[0]) {
 215	case JMP8_INSN_OPCODE:
 216	case JMP32_INSN_OPCODE:
 217		*prev = i;
 218		*target = *next + insn->immediate.value;
 219		return false;
 220	}
 221
 222	if (insn_is_nop(insn)) {
 223		int nop = i;
 224
 225		*next = skip_nops(instr, *next, len);
 226		if (*target && *next == *target)
 227			nop = *prev;
 228
 229		add_nop(instr + nop, *next - nop);
 230		DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
 231		return true;
 232	}
 233
 234	*target = 0;
 235	return false;
 236}
 237
 238/*
 239 * "noinline" to cause control flow change and thus invalidate I$ and
 240 * cause refetch after modification.
 241 */
 242static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
 243{
 244	int prev, target = 0;
 245
 246	for (int next, i = 0; i < len; i = next) {
 247		struct insn insn;
 248
 249		if (insn_decode_kernel(&insn, &instr[i]))
 250			return;
 251
 252		next = i + insn.length;
 253
 254		__optimize_nops(instr, len, &insn, &next, &prev, &target);
 255	}
 256}
 257
 258static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
 259{
 260	unsigned long flags;
 261
 262	local_irq_save(flags);
 263	optimize_nops(instr, len);
 264	sync_core();
 265	local_irq_restore(flags);
 266}
 267
 268/*
 269 * In this context, "source" is where the instructions are placed in the
 270 * section .altinstr_replacement, for example during kernel build by the
 271 * toolchain.
 272 * "Destination" is where the instructions are being patched in by this
 273 * machinery.
 274 *
 275 * The source offset is:
 276 *
 277 *   src_imm = target - src_next_ip                  (1)
 278 *
 279 * and the target offset is:
 280 *
 281 *   dst_imm = target - dst_next_ip                  (2)
 282 *
 283 * so rework (1) as an expression for target like:
 284 *
 285 *   target = src_imm + src_next_ip                  (1a)
 286 *
 287 * and substitute in (2) to get:
 288 *
 289 *   dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
 290 *
 291 * Now, since the instruction stream is 'identical' at src and dst (it
 292 * is being copied after all) it can be stated that:
 293 *
 294 *   src_next_ip = src + ip_offset
 295 *   dst_next_ip = dst + ip_offset                   (4)
 296 *
 297 * Substitute (4) in (3) and observe ip_offset being cancelled out to
 298 * obtain:
 299 *
 300 *   dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
 301 *           = src_imm + src - dst + ip_offset - ip_offset
 302 *           = src_imm + src - dst                   (5)
 303 *
 304 * IOW, only the relative displacement of the code block matters.
 305 */
 306
 307#define apply_reloc_n(n_, p_, d_)				\
 308	do {							\
 309		s32 v = *(s##n_ *)(p_);				\
 310		v += (d_);					\
 311		BUG_ON((v >> 31) != (v >> (n_-1)));		\
 312		*(s##n_ *)(p_) = (s##n_)v;			\
 313	} while (0)
 314
 315
 316static __always_inline
 317void apply_reloc(int n, void *ptr, uintptr_t diff)
 318{
 319	switch (n) {
 320	case 1: apply_reloc_n(8, ptr, diff); break;
 321	case 2: apply_reloc_n(16, ptr, diff); break;
 322	case 4: apply_reloc_n(32, ptr, diff); break;
 323	default: BUG();
 324	}
 325}
 326
 327static __always_inline
 328bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
 329{
 330	u8 *target = src + offset;
 331	/*
 332	 * If the target is inside the patched block, it's relative to the
 333	 * block itself and does not need relocation.
 334	 */
 335	return (target < src || target > src + src_len);
 336}
 337
 338static void __init_or_module noinline
 339apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
 340{
 341	int prev, target = 0;
 342
 343	for (int next, i = 0; i < len; i = next) {
 344		struct insn insn;
 345
 346		if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
 347			return;
 348
 349		next = i + insn.length;
 350
 351		if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
 352			continue;
 353
 354		switch (insn.opcode.bytes[0]) {
 355		case 0x0f:
 356			if (insn.opcode.bytes[1] < 0x80 ||
 357			    insn.opcode.bytes[1] > 0x8f)
 358				break;
 359
 360			fallthrough;	/* Jcc.d32 */
 361		case 0x70 ... 0x7f:	/* Jcc.d8 */
 362		case JMP8_INSN_OPCODE:
 363		case JMP32_INSN_OPCODE:
 364		case CALL_INSN_OPCODE:
 365			if (need_reloc(next + insn.immediate.value, src, src_len)) {
 366				apply_reloc(insn.immediate.nbytes,
 367					    buf + i + insn_offset_immediate(&insn),
 368					    src - dest);
 369			}
 370
 371			/*
 372			 * Where possible, convert JMP.d32 into JMP.d8.
 373			 */
 374			if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
 375				s32 imm = insn.immediate.value;
 376				imm += src - dest;
 377				imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
 378				if ((imm >> 31) == (imm >> 7)) {
 379					buf[i+0] = JMP8_INSN_OPCODE;
 380					buf[i+1] = (s8)imm;
 381
 382					memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
 383				}
 384			}
 385			break;
 386		}
 387
 388		if (insn_rip_relative(&insn)) {
 389			if (need_reloc(next + insn.displacement.value, src, src_len)) {
 390				apply_reloc(insn.displacement.nbytes,
 391					    buf + i + insn_offset_displacement(&insn),
 392					    src - dest);
 393			}
 394		}
 395	}
 396}
 397
 398/* Low-level backend functions usable from alternative code replacements. */
 399DEFINE_ASM_FUNC(nop_func, "", .entry.text);
 400EXPORT_SYMBOL_GPL(nop_func);
 401
 402noinstr void BUG_func(void)
 403{
 404	BUG();
 405}
 406EXPORT_SYMBOL(BUG_func);
 407
 408#define CALL_RIP_REL_OPCODE	0xff
 409#define CALL_RIP_REL_MODRM	0x15
 410
 411/*
 412 * Rewrite the "call BUG_func" replacement to point to the target of the
 413 * indirect pv_ops call "call *disp(%ip)".
 414 */
 415static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
 416{
 417	void *target, *bug = &BUG_func;
 418	s32 disp;
 419
 420	if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
 421		pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
 422		BUG();
 423	}
 424
 425	if (a->instrlen != 6 ||
 426	    instr[0] != CALL_RIP_REL_OPCODE ||
 427	    instr[1] != CALL_RIP_REL_MODRM) {
 428		pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
 429		BUG();
 430	}
 431
 432	/* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
 433	disp = *(s32 *)(instr + 2);
 434#ifdef CONFIG_X86_64
 435	/* ff 15 00 00 00 00   call   *0x0(%rip) */
 436	/* target address is stored at "next instruction + disp". */
 437	target = *(void **)(instr + a->instrlen + disp);
 438#else
 439	/* ff 15 00 00 00 00   call   *0x0 */
 440	/* target address is stored at disp. */
 441	target = *(void **)disp;
 442#endif
 443	if (!target)
 444		target = bug;
 445
 446	/* (BUG_func - .) + (target - BUG_func) := target - . */
 447	*(s32 *)(insn_buff + 1) += target - bug;
 448
 449	if (target == &nop_func)
 450		return 0;
 451
 452	return 5;
 453}
 454
 455/*
 456 * Replace instructions with better alternatives for this CPU type. This runs
 457 * before SMP is initialized to avoid SMP problems with self modifying code.
 458 * This implies that asymmetric systems where APs have less capabilities than
 459 * the boot processor are not handled. Tough. Make sure you disable such
 460 * features by hand.
 461 *
 462 * Marked "noinline" to cause control flow change and thus insn cache
 463 * to refetch changed I$ lines.
 464 */
 465void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 466						  struct alt_instr *end)
 467{
 468	struct alt_instr *a;
 469	u8 *instr, *replacement;
 470	u8 insn_buff[MAX_PATCH_LEN];
 471
 472	DPRINTK(ALT, "alt table %px, -> %px", start, end);
 473
 474	/*
 475	 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
 476	 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
 477	 * During the process, KASAN becomes confused seeing partial LA57
 478	 * conversion and triggers a false-positive out-of-bound report.
 479	 *
 480	 * Disable KASAN until the patching is complete.
 481	 */
 482	kasan_disable_current();
 483
 484	/*
 485	 * The scan order should be from start to end. A later scanned
 486	 * alternative code can overwrite previously scanned alternative code.
 487	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
 488	 * patch code.
 489	 *
 490	 * So be careful if you want to change the scan order to any other
 491	 * order.
 492	 */
 493	for (a = start; a < end; a++) {
 494		int insn_buff_sz = 0;
 495
 496		instr = (u8 *)&a->instr_offset + a->instr_offset;
 497		replacement = (u8 *)&a->repl_offset + a->repl_offset;
 498		BUG_ON(a->instrlen > sizeof(insn_buff));
 499		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
 500
 501		/*
 502		 * Patch if either:
 503		 * - feature is present
 504		 * - feature not present but ALT_FLAG_NOT is set to mean,
 505		 *   patch if feature is *NOT* present.
 506		 */
 507		if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
 508			optimize_nops_inplace(instr, a->instrlen);
 509			continue;
 510		}
 511
 512		DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
 513			a->cpuid >> 5,
 514			a->cpuid & 0x1f,
 515			instr, instr, a->instrlen,
 516			replacement, a->replacementlen, a->flags);
 517
 518		memcpy(insn_buff, replacement, a->replacementlen);
 519		insn_buff_sz = a->replacementlen;
 520
 521		if (a->flags & ALT_FLAG_DIRECT_CALL) {
 522			insn_buff_sz = alt_replace_call(instr, insn_buff, a);
 523			if (insn_buff_sz < 0)
 524				continue;
 525		}
 526
 527		for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
 528			insn_buff[insn_buff_sz] = 0x90;
 529
 530		apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
 531
 532		DUMP_BYTES(ALT, instr, a->instrlen, "%px:   old_insn: ", instr);
 533		DUMP_BYTES(ALT, replacement, a->replacementlen, "%px:   rpl_insn: ", replacement);
 534		DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
 535
 536		text_poke_early(instr, insn_buff, insn_buff_sz);
 537	}
 538
 539	kasan_enable_current();
 540}
 541
 542static inline bool is_jcc32(struct insn *insn)
 543{
 544	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
 545	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
 546}
 547
 548#if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
 549
 550/*
 551 * CALL/JMP *%\reg
 552 */
 553static int emit_indirect(int op, int reg, u8 *bytes)
 554{
 555	int i = 0;
 556	u8 modrm;
 557
 558	switch (op) {
 559	case CALL_INSN_OPCODE:
 560		modrm = 0x10; /* Reg = 2; CALL r/m */
 561		break;
 562
 563	case JMP32_INSN_OPCODE:
 564		modrm = 0x20; /* Reg = 4; JMP r/m */
 565		break;
 566
 567	default:
 568		WARN_ON_ONCE(1);
 569		return -1;
 570	}
 571
 572	if (reg >= 8) {
 573		bytes[i++] = 0x41; /* REX.B prefix */
 574		reg -= 8;
 575	}
 576
 577	modrm |= 0xc0; /* Mod = 3 */
 578	modrm += reg;
 579
 580	bytes[i++] = 0xff; /* opcode */
 581	bytes[i++] = modrm;
 582
 583	return i;
 584}
 585
 586static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
 587{
 588	u8 op = insn->opcode.bytes[0];
 589	int i = 0;
 590
 591	/*
 592	 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
 593	 * tail-calls. Deal with them.
 594	 */
 595	if (is_jcc32(insn)) {
 596		bytes[i++] = op;
 597		op = insn->opcode.bytes[1];
 598		goto clang_jcc;
 599	}
 600
 601	if (insn->length == 6)
 602		bytes[i++] = 0x2e; /* CS-prefix */
 603
 604	switch (op) {
 605	case CALL_INSN_OPCODE:
 606		__text_gen_insn(bytes+i, op, addr+i,
 607				__x86_indirect_call_thunk_array[reg],
 608				CALL_INSN_SIZE);
 609		i += CALL_INSN_SIZE;
 610		break;
 611
 612	case JMP32_INSN_OPCODE:
 613clang_jcc:
 614		__text_gen_insn(bytes+i, op, addr+i,
 615				__x86_indirect_jump_thunk_array[reg],
 616				JMP32_INSN_SIZE);
 617		i += JMP32_INSN_SIZE;
 618		break;
 619
 620	default:
 621		WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
 622		return -1;
 623	}
 624
 625	WARN_ON_ONCE(i != insn->length);
 626
 627	return i;
 628}
 629
 630/*
 631 * Rewrite the compiler generated retpoline thunk calls.
 632 *
 633 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
 634 * indirect instructions, avoiding the extra indirection.
 635 *
 636 * For example, convert:
 637 *
 638 *   CALL __x86_indirect_thunk_\reg
 639 *
 640 * into:
 641 *
 642 *   CALL *%\reg
 643 *
 644 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
 645 */
 646static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
 647{
 648	retpoline_thunk_t *target;
 649	int reg, ret, i = 0;
 650	u8 op, cc;
 651
 652	target = addr + insn->length + insn->immediate.value;
 653	reg = target - __x86_indirect_thunk_array;
 654
 655	if (WARN_ON_ONCE(reg & ~0xf))
 656		return -1;
 657
 658	/* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
 659	BUG_ON(reg == 4);
 660
 661	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
 662	    !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
 663		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
 664			return emit_call_track_retpoline(addr, insn, reg, bytes);
 665
 666		return -1;
 667	}
 668
 669	op = insn->opcode.bytes[0];
 670
 671	/*
 672	 * Convert:
 673	 *
 674	 *   Jcc.d32 __x86_indirect_thunk_\reg
 675	 *
 676	 * into:
 677	 *
 678	 *   Jncc.d8 1f
 679	 *   [ LFENCE ]
 680	 *   JMP *%\reg
 681	 *   [ NOP ]
 682	 * 1:
 683	 */
 684	if (is_jcc32(insn)) {
 685		cc = insn->opcode.bytes[1] & 0xf;
 686		cc ^= 1; /* invert condition */
 687
 688		bytes[i++] = 0x70 + cc;        /* Jcc.d8 */
 689		bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
 690
 691		/* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
 692		op = JMP32_INSN_OPCODE;
 693	}
 694
 695	/*
 696	 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
 697	 */
 698	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
 699		bytes[i++] = 0x0f;
 700		bytes[i++] = 0xae;
 701		bytes[i++] = 0xe8; /* LFENCE */
 702	}
 703
 704	ret = emit_indirect(op, reg, bytes + i);
 705	if (ret < 0)
 706		return ret;
 707	i += ret;
 708
 709	/*
 710	 * The compiler is supposed to EMIT an INT3 after every unconditional
 711	 * JMP instruction due to AMD BTC. However, if the compiler is too old
 712	 * or SLS isn't enabled, we still need an INT3 after indirect JMPs
 713	 * even on Intel.
 714	 */
 715	if (op == JMP32_INSN_OPCODE && i < insn->length)
 716		bytes[i++] = INT3_INSN_OPCODE;
 717
 718	for (; i < insn->length;)
 719		bytes[i++] = BYTES_NOP1;
 720
 721	return i;
 722}
 723
 724/*
 725 * Generated by 'objtool --retpoline'.
 726 */
 727void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
 728{
 729	s32 *s;
 730
 731	for (s = start; s < end; s++) {
 732		void *addr = (void *)s + *s;
 733		struct insn insn;
 734		int len, ret;
 735		u8 bytes[16];
 736		u8 op1, op2;
 737
 738		ret = insn_decode_kernel(&insn, addr);
 739		if (WARN_ON_ONCE(ret < 0))
 740			continue;
 741
 742		op1 = insn.opcode.bytes[0];
 743		op2 = insn.opcode.bytes[1];
 744
 745		switch (op1) {
 746		case CALL_INSN_OPCODE:
 747		case JMP32_INSN_OPCODE:
 748			break;
 749
 750		case 0x0f: /* escape */
 751			if (op2 >= 0x80 && op2 <= 0x8f)
 752				break;
 753			fallthrough;
 754		default:
 755			WARN_ON_ONCE(1);
 756			continue;
 757		}
 758
 759		DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
 760			addr, addr, insn.length,
 761			addr + insn.length + insn.immediate.value);
 762
 763		len = patch_retpoline(addr, &insn, bytes);
 764		if (len == insn.length) {
 765			optimize_nops(bytes, len);
 766			DUMP_BYTES(RETPOLINE, ((u8*)addr),  len, "%px: orig: ", addr);
 767			DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
 768			text_poke_early(addr, bytes, len);
 769		}
 770	}
 771}
 772
 773#ifdef CONFIG_RETHUNK
 774
 775/*
 776 * Rewrite the compiler generated return thunk tail-calls.
 777 *
 778 * For example, convert:
 779 *
 780 *   JMP __x86_return_thunk
 781 *
 782 * into:
 783 *
 784 *   RET
 785 */
 786static int patch_return(void *addr, struct insn *insn, u8 *bytes)
 787{
 788	int i = 0;
 789
 790	/* Patch the custom return thunks... */
 791	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
 792		i = JMP32_INSN_SIZE;
 793		__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
 794	} else {
 795		/* ... or patch them out if not needed. */
 796		bytes[i++] = RET_INSN_OPCODE;
 797	}
 798
 799	for (; i < insn->length;)
 800		bytes[i++] = INT3_INSN_OPCODE;
 801	return i;
 802}
 803
 804void __init_or_module noinline apply_returns(s32 *start, s32 *end)
 805{
 806	s32 *s;
 807
 808	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
 809		static_call_force_reinit();
 810
 811	for (s = start; s < end; s++) {
 812		void *dest = NULL, *addr = (void *)s + *s;
 813		struct insn insn;
 814		int len, ret;
 815		u8 bytes[16];
 816		u8 op;
 817
 818		ret = insn_decode_kernel(&insn, addr);
 819		if (WARN_ON_ONCE(ret < 0))
 820			continue;
 821
 822		op = insn.opcode.bytes[0];
 823		if (op == JMP32_INSN_OPCODE)
 824			dest = addr + insn.length + insn.immediate.value;
 825
 826		if (__static_call_fixup(addr, op, dest) ||
 827		    WARN_ONCE(dest != &__x86_return_thunk,
 828			      "missing return thunk: %pS-%pS: %*ph",
 829			      addr, dest, 5, addr))
 830			continue;
 831
 832		DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
 833			addr, addr, insn.length,
 834			addr + insn.length + insn.immediate.value);
 835
 836		len = patch_return(addr, &insn, bytes);
 837		if (len == insn.length) {
 838			DUMP_BYTES(RET, ((u8*)addr),  len, "%px: orig: ", addr);
 839			DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
 840			text_poke_early(addr, bytes, len);
 841		}
 842	}
 843}
 844#else
 845void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 846#endif /* CONFIG_RETHUNK */
 847
 848#else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
 849
 850void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
 851void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 852
 853#endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
 854
 855#ifdef CONFIG_X86_KERNEL_IBT
 856
 857static void poison_cfi(void *addr);
 858
 859static void __init_or_module poison_endbr(void *addr, bool warn)
 860{
 861	u32 endbr, poison = gen_endbr_poison();
 862
 863	if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
 864		return;
 865
 866	if (!is_endbr(endbr)) {
 867		WARN_ON_ONCE(warn);
 868		return;
 869	}
 870
 871	DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
 872
 873	/*
 874	 * When we have IBT, the lack of ENDBR will trigger #CP
 875	 */
 876	DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
 877	DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
 878	text_poke_early(addr, &poison, 4);
 879}
 880
 881/*
 882 * Generated by: objtool --ibt
 883 *
 884 * Seal the functions for indirect calls by clobbering the ENDBR instructions
 885 * and the kCFI hash value.
 886 */
 887void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
 888{
 889	s32 *s;
 890
 891	for (s = start; s < end; s++) {
 892		void *addr = (void *)s + *s;
 893
 894		poison_endbr(addr, true);
 895		if (IS_ENABLED(CONFIG_FINEIBT))
 896			poison_cfi(addr - 16);
 897	}
 898}
 899
 900#else
 901
 902void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
 903
 904#endif /* CONFIG_X86_KERNEL_IBT */
 905
 906#ifdef CONFIG_FINEIBT
 907#define __CFI_DEFAULT	CFI_DEFAULT
 908#elif defined(CONFIG_CFI_CLANG)
 909#define __CFI_DEFAULT	CFI_KCFI
 910#else
 911#define __CFI_DEFAULT	CFI_OFF
 912#endif
 913
 914enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
 915
 916#ifdef CONFIG_CFI_CLANG
 917struct bpf_insn;
 918
 919/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
 920extern unsigned int __bpf_prog_runX(const void *ctx,
 921				    const struct bpf_insn *insn);
 922
 923/*
 924 * Force a reference to the external symbol so the compiler generates
 925 * __kcfi_typid.
 926 */
 927__ADDRESSABLE(__bpf_prog_runX);
 928
 929/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
 930asm (
 931"	.pushsection	.data..ro_after_init,\"aw\",@progbits	\n"
 932"	.type	cfi_bpf_hash,@object				\n"
 933"	.globl	cfi_bpf_hash					\n"
 934"	.p2align	2, 0x0					\n"
 935"cfi_bpf_hash:							\n"
 936"	.long	__kcfi_typeid___bpf_prog_runX			\n"
 937"	.size	cfi_bpf_hash, 4					\n"
 938"	.popsection						\n"
 939);
 940
 941/* Must match bpf_callback_t */
 942extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
 943
 944__ADDRESSABLE(__bpf_callback_fn);
 945
 946/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
 947asm (
 948"	.pushsection	.data..ro_after_init,\"aw\",@progbits	\n"
 949"	.type	cfi_bpf_subprog_hash,@object			\n"
 950"	.globl	cfi_bpf_subprog_hash				\n"
 951"	.p2align	2, 0x0					\n"
 952"cfi_bpf_subprog_hash:						\n"
 953"	.long	__kcfi_typeid___bpf_callback_fn			\n"
 954"	.size	cfi_bpf_subprog_hash, 4				\n"
 955"	.popsection						\n"
 956);
 957
 958u32 cfi_get_func_hash(void *func)
 959{
 960	u32 hash;
 961
 962	func -= cfi_get_offset();
 963	switch (cfi_mode) {
 964	case CFI_FINEIBT:
 965		func += 7;
 966		break;
 967	case CFI_KCFI:
 968		func += 1;
 969		break;
 970	default:
 971		return 0;
 972	}
 973
 974	if (get_kernel_nofault(hash, func))
 975		return 0;
 976
 977	return hash;
 978}
 979#endif
 980
 981#ifdef CONFIG_FINEIBT
 982
 983static bool cfi_rand __ro_after_init = true;
 984static u32  cfi_seed __ro_after_init;
 985
 986/*
 987 * Re-hash the CFI hash with a boot-time seed while making sure the result is
 988 * not a valid ENDBR instruction.
 989 */
 990static u32 cfi_rehash(u32 hash)
 991{
 992	hash ^= cfi_seed;
 993	while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
 994		bool lsb = hash & 1;
 995		hash >>= 1;
 996		if (lsb)
 997			hash ^= 0x80200003;
 998	}
 999	return hash;
1000}
1001
1002static __init int cfi_parse_cmdline(char *str)
1003{
1004	if (!str)
1005		return -EINVAL;
1006
1007	while (str) {
1008		char *next = strchr(str, ',');
1009		if (next) {
1010			*next = 0;
1011			next++;
1012		}
1013
1014		if (!strcmp(str, "auto")) {
1015			cfi_mode = CFI_DEFAULT;
1016		} else if (!strcmp(str, "off")) {
1017			cfi_mode = CFI_OFF;
1018			cfi_rand = false;
1019		} else if (!strcmp(str, "kcfi")) {
1020			cfi_mode = CFI_KCFI;
1021		} else if (!strcmp(str, "fineibt")) {
1022			cfi_mode = CFI_FINEIBT;
1023		} else if (!strcmp(str, "norand")) {
1024			cfi_rand = false;
1025		} else {
1026			pr_err("Ignoring unknown cfi option (%s).", str);
1027		}
1028
1029		str = next;
1030	}
1031
1032	return 0;
1033}
1034early_param("cfi", cfi_parse_cmdline);
1035
1036/*
1037 * kCFI						FineIBT
1038 *
1039 * __cfi_\func:					__cfi_\func:
1040 *	movl   $0x12345678,%eax		// 5	     endbr64			// 4
1041 *	nop					     subl   $0x12345678,%r10d   // 7
1042 *	nop					     jz     1f			// 2
1043 *	nop					     ud2			// 2
1044 *	nop					1:   nop			// 1
1045 *	nop
1046 *	nop
1047 *	nop
1048 *	nop
1049 *	nop
1050 *	nop
1051 *	nop
1052 *
1053 *
1054 * caller:					caller:
1055 *	movl	$(-0x12345678),%r10d	 // 6	     movl   $0x12345678,%r10d	// 6
1056 *	addl	$-15(%r11),%r10d	 // 4	     sub    $16,%r11		// 4
1057 *	je	1f			 // 2	     nop4			// 4
1058 *	ud2				 // 2
1059 * 1:	call	__x86_indirect_thunk_r11 // 5	     call   *%r11; nop2;	// 5
1060 *
1061 */
1062
1063asm(	".pushsection .rodata			\n"
1064	"fineibt_preamble_start:		\n"
1065	"	endbr64				\n"
1066	"	subl	$0x12345678, %r10d	\n"
1067	"	je	fineibt_preamble_end	\n"
1068	"	ud2				\n"
1069	"	nop				\n"
1070	"fineibt_preamble_end:			\n"
1071	".popsection\n"
1072);
1073
1074extern u8 fineibt_preamble_start[];
1075extern u8 fineibt_preamble_end[];
1076
1077#define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1078#define fineibt_preamble_hash 7
1079
1080asm(	".pushsection .rodata			\n"
1081	"fineibt_caller_start:			\n"
1082	"	movl	$0x12345678, %r10d	\n"
1083	"	sub	$16, %r11		\n"
1084	ASM_NOP4
1085	"fineibt_caller_end:			\n"
1086	".popsection				\n"
1087);
1088
1089extern u8 fineibt_caller_start[];
1090extern u8 fineibt_caller_end[];
1091
1092#define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1093#define fineibt_caller_hash 2
1094
1095#define fineibt_caller_jmp (fineibt_caller_size - 2)
1096
1097static u32 decode_preamble_hash(void *addr)
1098{
1099	u8 *p = addr;
1100
1101	/* b8 78 56 34 12          mov    $0x12345678,%eax */
1102	if (p[0] == 0xb8)
1103		return *(u32 *)(addr + 1);
1104
1105	return 0; /* invalid hash value */
1106}
1107
1108static u32 decode_caller_hash(void *addr)
1109{
1110	u8 *p = addr;
1111
1112	/* 41 ba 78 56 34 12       mov    $0x12345678,%r10d */
1113	if (p[0] == 0x41 && p[1] == 0xba)
1114		return -*(u32 *)(addr + 2);
1115
1116	/* e8 0c 78 56 34 12	   jmp.d8  +12 */
1117	if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
1118		return -*(u32 *)(addr + 2);
1119
1120	return 0; /* invalid hash value */
1121}
1122
1123/* .retpoline_sites */
1124static int cfi_disable_callers(s32 *start, s32 *end)
1125{
1126	/*
1127	 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
1128	 * in tact for later usage. Also see decode_caller_hash() and
1129	 * cfi_rewrite_callers().
1130	 */
1131	const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1132	s32 *s;
1133
1134	for (s = start; s < end; s++) {
1135		void *addr = (void *)s + *s;
1136		u32 hash;
1137
1138		addr -= fineibt_caller_size;
1139		hash = decode_caller_hash(addr);
1140		if (!hash) /* nocfi callers */
1141			continue;
1142
1143		text_poke_early(addr, jmp, 2);
1144	}
1145
1146	return 0;
1147}
1148
1149static int cfi_enable_callers(s32 *start, s32 *end)
1150{
1151	/*
1152	 * Re-enable kCFI, undo what cfi_disable_callers() did.
1153	 */
1154	const u8 mov[] = { 0x41, 0xba };
1155	s32 *s;
1156
1157	for (s = start; s < end; s++) {
1158		void *addr = (void *)s + *s;
1159		u32 hash;
1160
1161		addr -= fineibt_caller_size;
1162		hash = decode_caller_hash(addr);
1163		if (!hash) /* nocfi callers */
1164			continue;
1165
1166		text_poke_early(addr, mov, 2);
1167	}
1168
1169	return 0;
1170}
1171
1172/* .cfi_sites */
1173static int cfi_rand_preamble(s32 *start, s32 *end)
1174{
1175	s32 *s;
1176
1177	for (s = start; s < end; s++) {
1178		void *addr = (void *)s + *s;
1179		u32 hash;
1180
1181		hash = decode_preamble_hash(addr);
1182		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1183			 addr, addr, 5, addr))
1184			return -EINVAL;
1185
1186		hash = cfi_rehash(hash);
1187		text_poke_early(addr + 1, &hash, 4);
1188	}
1189
1190	return 0;
1191}
1192
1193static int cfi_rewrite_preamble(s32 *start, s32 *end)
1194{
1195	s32 *s;
1196
1197	for (s = start; s < end; s++) {
1198		void *addr = (void *)s + *s;
1199		u32 hash;
1200
1201		hash = decode_preamble_hash(addr);
1202		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1203			 addr, addr, 5, addr))
1204			return -EINVAL;
1205
1206		text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
1207		WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
1208		text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
1209	}
1210
1211	return 0;
1212}
1213
1214static void cfi_rewrite_endbr(s32 *start, s32 *end)
1215{
1216	s32 *s;
1217
1218	for (s = start; s < end; s++) {
1219		void *addr = (void *)s + *s;
1220
1221		poison_endbr(addr+16, false);
1222	}
1223}
1224
1225/* .retpoline_sites */
1226static int cfi_rand_callers(s32 *start, s32 *end)
1227{
1228	s32 *s;
1229
1230	for (s = start; s < end; s++) {
1231		void *addr = (void *)s + *s;
1232		u32 hash;
1233
1234		addr -= fineibt_caller_size;
1235		hash = decode_caller_hash(addr);
1236		if (hash) {
1237			hash = -cfi_rehash(hash);
1238			text_poke_early(addr + 2, &hash, 4);
1239		}
1240	}
1241
1242	return 0;
1243}
1244
1245static int cfi_rewrite_callers(s32 *start, s32 *end)
1246{
1247	s32 *s;
1248
1249	for (s = start; s < end; s++) {
1250		void *addr = (void *)s + *s;
1251		u32 hash;
1252
1253		addr -= fineibt_caller_size;
1254		hash = decode_caller_hash(addr);
1255		if (hash) {
1256			text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
1257			WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
1258			text_poke_early(addr + fineibt_caller_hash, &hash, 4);
1259		}
1260		/* rely on apply_retpolines() */
1261	}
1262
1263	return 0;
1264}
1265
1266static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1267			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1268{
1269	int ret;
1270
1271	if (WARN_ONCE(fineibt_preamble_size != 16,
1272		      "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1273		return;
1274
1275	if (cfi_mode == CFI_DEFAULT) {
1276		cfi_mode = CFI_KCFI;
1277		if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1278			cfi_mode = CFI_FINEIBT;
1279	}
1280
1281	/*
1282	 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1283	 * rewrite them. This disables all CFI. If this succeeds but any of the
1284	 * later stages fails, we're without CFI.
1285	 */
1286	ret = cfi_disable_callers(start_retpoline, end_retpoline);
1287	if (ret)
1288		goto err;
1289
1290	if (cfi_rand) {
1291		if (builtin) {
1292			cfi_seed = get_random_u32();
1293			cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
1294			cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
1295		}
1296
1297		ret = cfi_rand_preamble(start_cfi, end_cfi);
1298		if (ret)
1299			goto err;
1300
1301		ret = cfi_rand_callers(start_retpoline, end_retpoline);
1302		if (ret)
1303			goto err;
1304	}
1305
1306	switch (cfi_mode) {
1307	case CFI_OFF:
1308		if (builtin)
1309			pr_info("Disabling CFI\n");
1310		return;
1311
1312	case CFI_KCFI:
1313		ret = cfi_enable_callers(start_retpoline, end_retpoline);
1314		if (ret)
1315			goto err;
1316
1317		if (builtin)
1318			pr_info("Using kCFI\n");
1319		return;
1320
1321	case CFI_FINEIBT:
1322		/* place the FineIBT preamble at func()-16 */
1323		ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1324		if (ret)
1325			goto err;
1326
1327		/* rewrite the callers to target func()-16 */
1328		ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1329		if (ret)
1330			goto err;
1331
1332		/* now that nobody targets func()+0, remove ENDBR there */
1333		cfi_rewrite_endbr(start_cfi, end_cfi);
1334
1335		if (builtin)
1336			pr_info("Using FineIBT CFI\n");
1337		return;
1338
1339	default:
1340		break;
1341	}
1342
1343err:
1344	pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1345}
1346
1347static inline void poison_hash(void *addr)
1348{
1349	*(u32 *)addr = 0;
1350}
1351
1352static void poison_cfi(void *addr)
1353{
1354	switch (cfi_mode) {
1355	case CFI_FINEIBT:
1356		/*
1357		 * __cfi_\func:
1358		 *	osp nopl (%rax)
1359		 *	subl	$0, %r10d
1360		 *	jz	1f
1361		 *	ud2
1362		 * 1:	nop
1363		 */
1364		poison_endbr(addr, false);
1365		poison_hash(addr + fineibt_preamble_hash);
1366		break;
1367
1368	case CFI_KCFI:
1369		/*
1370		 * __cfi_\func:
1371		 *	movl	$0, %eax
1372		 *	.skip	11, 0x90
1373		 */
1374		poison_hash(addr + 1);
1375		break;
1376
1377	default:
1378		break;
1379	}
1380}
1381
1382#else
1383
1384static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1385			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1386{
1387}
1388
1389#ifdef CONFIG_X86_KERNEL_IBT
1390static void poison_cfi(void *addr) { }
1391#endif
1392
1393#endif
1394
1395void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1396		   s32 *start_cfi, s32 *end_cfi)
1397{
1398	return __apply_fineibt(start_retpoline, end_retpoline,
1399			       start_cfi, end_cfi,
1400			       /* .builtin = */ false);
1401}
1402
1403#ifdef CONFIG_SMP
1404static void alternatives_smp_lock(const s32 *start, const s32 *end,
1405				  u8 *text, u8 *text_end)
1406{
1407	const s32 *poff;
1408
1409	for (poff = start; poff < end; poff++) {
1410		u8 *ptr = (u8 *)poff + *poff;
1411
1412		if (!*poff || ptr < text || ptr >= text_end)
1413			continue;
1414		/* turn DS segment override prefix into lock prefix */
1415		if (*ptr == 0x3e)
1416			text_poke(ptr, ((unsigned char []){0xf0}), 1);
1417	}
1418}
1419
1420static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1421				    u8 *text, u8 *text_end)
1422{
1423	const s32 *poff;
1424
1425	for (poff = start; poff < end; poff++) {
1426		u8 *ptr = (u8 *)poff + *poff;
1427
1428		if (!*poff || ptr < text || ptr >= text_end)
1429			continue;
1430		/* turn lock prefix into DS segment override prefix */
1431		if (*ptr == 0xf0)
1432			text_poke(ptr, ((unsigned char []){0x3E}), 1);
1433	}
1434}
1435
1436struct smp_alt_module {
1437	/* what is this ??? */
1438	struct module	*mod;
1439	char		*name;
1440
1441	/* ptrs to lock prefixes */
1442	const s32	*locks;
1443	const s32	*locks_end;
1444
1445	/* .text segment, needed to avoid patching init code ;) */
1446	u8		*text;
1447	u8		*text_end;
1448
1449	struct list_head next;
1450};
1451static LIST_HEAD(smp_alt_modules);
1452static bool uniproc_patched = false;	/* protected by text_mutex */
1453
1454void __init_or_module alternatives_smp_module_add(struct module *mod,
1455						  char *name,
1456						  void *locks, void *locks_end,
1457						  void *text,  void *text_end)
1458{
1459	struct smp_alt_module *smp;
1460
1461	mutex_lock(&text_mutex);
1462	if (!uniproc_patched)
1463		goto unlock;
1464
1465	if (num_possible_cpus() == 1)
1466		/* Don't bother remembering, we'll never have to undo it. */
1467		goto smp_unlock;
1468
1469	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1470	if (NULL == smp)
1471		/* we'll run the (safe but slow) SMP code then ... */
1472		goto unlock;
1473
1474	smp->mod	= mod;
1475	smp->name	= name;
1476	smp->locks	= locks;
1477	smp->locks_end	= locks_end;
1478	smp->text	= text;
1479	smp->text_end	= text_end;
1480	DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1481		smp->locks, smp->locks_end,
1482		smp->text, smp->text_end, smp->name);
1483
1484	list_add_tail(&smp->next, &smp_alt_modules);
1485smp_unlock:
1486	alternatives_smp_unlock(locks, locks_end, text, text_end);
1487unlock:
1488	mutex_unlock(&text_mutex);
1489}
1490
1491void __init_or_module alternatives_smp_module_del(struct module *mod)
1492{
1493	struct smp_alt_module *item;
1494
1495	mutex_lock(&text_mutex);
1496	list_for_each_entry(item, &smp_alt_modules, next) {
1497		if (mod != item->mod)
1498			continue;
1499		list_del(&item->next);
1500		kfree(item);
1501		break;
1502	}
1503	mutex_unlock(&text_mutex);
1504}
1505
1506void alternatives_enable_smp(void)
1507{
1508	struct smp_alt_module *mod;
1509
1510	/* Why bother if there are no other CPUs? */
1511	BUG_ON(num_possible_cpus() == 1);
1512
1513	mutex_lock(&text_mutex);
1514
1515	if (uniproc_patched) {
1516		pr_info("switching to SMP code\n");
1517		BUG_ON(num_online_cpus() != 1);
1518		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1519		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1520		list_for_each_entry(mod, &smp_alt_modules, next)
1521			alternatives_smp_lock(mod->locks, mod->locks_end,
1522					      mod->text, mod->text_end);
1523		uniproc_patched = false;
1524	}
1525	mutex_unlock(&text_mutex);
1526}
1527
1528/*
1529 * Return 1 if the address range is reserved for SMP-alternatives.
1530 * Must hold text_mutex.
1531 */
1532int alternatives_text_reserved(void *start, void *end)
1533{
1534	struct smp_alt_module *mod;
1535	const s32 *poff;
1536	u8 *text_start = start;
1537	u8 *text_end = end;
1538
1539	lockdep_assert_held(&text_mutex);
1540
1541	list_for_each_entry(mod, &smp_alt_modules, next) {
1542		if (mod->text > text_end || mod->text_end < text_start)
1543			continue;
1544		for (poff = mod->locks; poff < mod->locks_end; poff++) {
1545			const u8 *ptr = (const u8 *)poff + *poff;
1546
1547			if (text_start <= ptr && text_end > ptr)
1548				return 1;
1549		}
1550	}
1551
1552	return 0;
1553}
1554#endif /* CONFIG_SMP */
1555
1556/*
1557 * Self-test for the INT3 based CALL emulation code.
1558 *
1559 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1560 * properly and that there is a stack gap between the INT3 frame and the
1561 * previous context. Without this gap doing a virtual PUSH on the interrupted
1562 * stack would corrupt the INT3 IRET frame.
1563 *
1564 * See entry_{32,64}.S for more details.
1565 */
1566
1567/*
1568 * We define the int3_magic() function in assembly to control the calling
1569 * convention such that we can 'call' it from assembly.
1570 */
1571
1572extern void int3_magic(unsigned int *ptr); /* defined in asm */
1573
1574asm (
1575"	.pushsection	.init.text, \"ax\", @progbits\n"
1576"	.type		int3_magic, @function\n"
1577"int3_magic:\n"
1578	ANNOTATE_NOENDBR
1579"	movl	$1, (%" _ASM_ARG1 ")\n"
1580	ASM_RET
1581"	.size		int3_magic, .-int3_magic\n"
1582"	.popsection\n"
1583);
1584
1585extern void int3_selftest_ip(void); /* defined in asm below */
1586
1587static int __init
1588int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1589{
1590	unsigned long selftest = (unsigned long)&int3_selftest_ip;
1591	struct die_args *args = data;
1592	struct pt_regs *regs = args->regs;
1593
1594	OPTIMIZER_HIDE_VAR(selftest);
1595
1596	if (!regs || user_mode(regs))
1597		return NOTIFY_DONE;
1598
1599	if (val != DIE_INT3)
1600		return NOTIFY_DONE;
1601
1602	if (regs->ip - INT3_INSN_SIZE != selftest)
1603		return NOTIFY_DONE;
1604
1605	int3_emulate_call(regs, (unsigned long)&int3_magic);
1606	return NOTIFY_STOP;
1607}
1608
1609/* Must be noinline to ensure uniqueness of int3_selftest_ip. */
1610static noinline void __init int3_selftest(void)
1611{
1612	static __initdata struct notifier_block int3_exception_nb = {
1613		.notifier_call	= int3_exception_notify,
1614		.priority	= INT_MAX-1, /* last */
1615	};
1616	unsigned int val = 0;
1617
1618	BUG_ON(register_die_notifier(&int3_exception_nb));
1619
1620	/*
1621	 * Basically: int3_magic(&val); but really complicated :-)
1622	 *
1623	 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1624	 * notifier above will emulate CALL for us.
1625	 */
1626	asm volatile ("int3_selftest_ip:\n\t"
1627		      ANNOTATE_NOENDBR
1628		      "    int3; nop; nop; nop; nop\n\t"
1629		      : ASM_CALL_CONSTRAINT
1630		      : __ASM_SEL_RAW(a, D) (&val)
1631		      : "memory");
1632
1633	BUG_ON(val != 1);
1634
1635	unregister_die_notifier(&int3_exception_nb);
1636}
1637
1638static __initdata int __alt_reloc_selftest_addr;
1639
1640extern void __init __alt_reloc_selftest(void *arg);
1641__visible noinline void __init __alt_reloc_selftest(void *arg)
1642{
1643	WARN_ON(arg != &__alt_reloc_selftest_addr);
1644}
1645
1646static noinline void __init alt_reloc_selftest(void)
1647{
1648	/*
1649	 * Tests apply_relocation().
1650	 *
1651	 * This has a relative immediate (CALL) in a place other than the first
1652	 * instruction and additionally on x86_64 we get a RIP-relative LEA:
1653	 *
1654	 *   lea    0x0(%rip),%rdi  # 5d0: R_X86_64_PC32    .init.data+0x5566c
1655	 *   call   +0              # 5d5: R_X86_64_PLT32   __alt_reloc_selftest-0x4
1656	 *
1657	 * Getting this wrong will either crash and burn or tickle the WARN
1658	 * above.
1659	 */
1660	asm_inline volatile (
1661		ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
1662		: /* output */
1663		: [mem] "m" (__alt_reloc_selftest_addr)
1664		: _ASM_ARG1
1665	);
1666}
1667
1668void __init alternative_instructions(void)
1669{
1670	int3_selftest();
1671
1672	/*
1673	 * The patching is not fully atomic, so try to avoid local
1674	 * interruptions that might execute the to be patched code.
1675	 * Other CPUs are not running.
1676	 */
1677	stop_nmi();
1678
1679	/*
1680	 * Don't stop machine check exceptions while patching.
1681	 * MCEs only happen when something got corrupted and in this
1682	 * case we must do something about the corruption.
1683	 * Ignoring it is worse than an unlikely patching race.
1684	 * Also machine checks tend to be broadcast and if one CPU
1685	 * goes into machine check the others follow quickly, so we don't
1686	 * expect a machine check to cause undue problems during to code
1687	 * patching.
1688	 */
1689
1690	/*
1691	 * Make sure to set (artificial) features depending on used paravirt
1692	 * functions which can later influence alternative patching.
1693	 */
1694	paravirt_set_cap();
1695
1696	__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1697			__cfi_sites, __cfi_sites_end, true);
1698
1699	/*
1700	 * Rewrite the retpolines, must be done before alternatives since
1701	 * those can rewrite the retpoline thunks.
1702	 */
1703	apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1704	apply_returns(__return_sites, __return_sites_end);
1705
1706	apply_alternatives(__alt_instructions, __alt_instructions_end);
1707
1708	/*
1709	 * Now all calls are established. Apply the call thunks if
1710	 * required.
1711	 */
1712	callthunks_patch_builtin_calls();
1713
1714	/*
1715	 * Seal all functions that do not have their address taken.
1716	 */
1717	apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
1718
1719#ifdef CONFIG_SMP
1720	/* Patch to UP if other cpus not imminent. */
1721	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1722		uniproc_patched = true;
1723		alternatives_smp_module_add(NULL, "core kernel",
1724					    __smp_locks, __smp_locks_end,
1725					    _text, _etext);
1726	}
1727
1728	if (!uniproc_patched || num_possible_cpus() == 1) {
1729		free_init_pages("SMP alternatives",
1730				(unsigned long)__smp_locks,
1731				(unsigned long)__smp_locks_end);
1732	}
1733#endif
1734
1735	restart_nmi();
1736	alternatives_patched = 1;
1737
1738	alt_reloc_selftest();
1739}
1740
1741/**
1742 * text_poke_early - Update instructions on a live kernel at boot time
1743 * @addr: address to modify
1744 * @opcode: source of the copy
1745 * @len: length to copy
1746 *
1747 * When you use this code to patch more than one byte of an instruction
1748 * you need to make sure that other CPUs cannot execute this code in parallel.
1749 * Also no thread must be currently preempted in the middle of these
1750 * instructions. And on the local CPU you need to be protected against NMI or
1751 * MCE handlers seeing an inconsistent instruction while you patch.
1752 */
1753void __init_or_module text_poke_early(void *addr, const void *opcode,
1754				      size_t len)
1755{
1756	unsigned long flags;
1757
1758	if (boot_cpu_has(X86_FEATURE_NX) &&
1759	    is_module_text_address((unsigned long)addr)) {
1760		/*
1761		 * Modules text is marked initially as non-executable, so the
1762		 * code cannot be running and speculative code-fetches are
1763		 * prevented. Just change the code.
1764		 */
1765		memcpy(addr, opcode, len);
1766	} else {
1767		local_irq_save(flags);
1768		memcpy(addr, opcode, len);
1769		sync_core();
1770		local_irq_restore(flags);
1771
1772		/*
1773		 * Could also do a CLFLUSH here to speed up CPU recovery; but
1774		 * that causes hangs on some VIA CPUs.
1775		 */
1776	}
1777}
1778
1779typedef struct {
1780	struct mm_struct *mm;
1781} temp_mm_state_t;
1782
1783/*
1784 * Using a temporary mm allows to set temporary mappings that are not accessible
1785 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1786 * that override the kernel memory protections (e.g., W^X), without exposing the
1787 * temporary page-table mappings that are required for these write operations to
1788 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1789 * mapping is torn down.
1790 *
1791 * Context: The temporary mm needs to be used exclusively by a single core. To
1792 *          harden security IRQs must be disabled while the temporary mm is
1793 *          loaded, thereby preventing interrupt handler bugs from overriding
1794 *          the kernel memory protection.
1795 */
1796static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1797{
1798	temp_mm_state_t temp_state;
1799
1800	lockdep_assert_irqs_disabled();
1801
1802	/*
1803	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1804	 * with a stale address space WITHOUT being in lazy mode after
1805	 * restoring the previous mm.
1806	 */
1807	if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1808		leave_mm(smp_processor_id());
1809
1810	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1811	switch_mm_irqs_off(NULL, mm, current);
1812
1813	/*
1814	 * If breakpoints are enabled, disable them while the temporary mm is
1815	 * used. Userspace might set up watchpoints on addresses that are used
1816	 * in the temporary mm, which would lead to wrong signals being sent or
1817	 * crashes.
1818	 *
1819	 * Note that breakpoints are not disabled selectively, which also causes
1820	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1821	 * undesirable, but still seems reasonable as the code that runs in the
1822	 * temporary mm should be short.
1823	 */
1824	if (hw_breakpoint_active())
1825		hw_breakpoint_disable();
1826
1827	return temp_state;
1828}
1829
1830static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1831{
1832	lockdep_assert_irqs_disabled();
1833	switch_mm_irqs_off(NULL, prev_state.mm, current);
1834
1835	/*
1836	 * Restore the breakpoints if they were disabled before the temporary mm
1837	 * was loaded.
1838	 */
1839	if (hw_breakpoint_active())
1840		hw_breakpoint_restore();
1841}
1842
1843__ro_after_init struct mm_struct *poking_mm;
1844__ro_after_init unsigned long poking_addr;
1845
1846static void text_poke_memcpy(void *dst, const void *src, size_t len)
1847{
1848	memcpy(dst, src, len);
1849}
1850
1851static void text_poke_memset(void *dst, const void *src, size_t len)
1852{
1853	int c = *(const int *)src;
1854
1855	memset(dst, c, len);
1856}
1857
1858typedef void text_poke_f(void *dst, const void *src, size_t len);
1859
1860static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1861{
1862	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1863	struct page *pages[2] = {NULL};
1864	temp_mm_state_t prev;
1865	unsigned long flags;
1866	pte_t pte, *ptep;
1867	spinlock_t *ptl;
1868	pgprot_t pgprot;
1869
1870	/*
1871	 * While boot memory allocator is running we cannot use struct pages as
1872	 * they are not yet initialized. There is no way to recover.
1873	 */
1874	BUG_ON(!after_bootmem);
1875
1876	if (!core_kernel_text((unsigned long)addr)) {
1877		pages[0] = vmalloc_to_page(addr);
1878		if (cross_page_boundary)
1879			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1880	} else {
1881		pages[0] = virt_to_page(addr);
1882		WARN_ON(!PageReserved(pages[0]));
1883		if (cross_page_boundary)
1884			pages[1] = virt_to_page(addr + PAGE_SIZE);
1885	}
1886	/*
1887	 * If something went wrong, crash and burn since recovery paths are not
1888	 * implemented.
1889	 */
1890	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1891
1892	/*
1893	 * Map the page without the global bit, as TLB flushing is done with
1894	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1895	 */
1896	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1897
1898	/*
1899	 * The lock is not really needed, but this allows to avoid open-coding.
1900	 */
1901	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1902
1903	/*
1904	 * This must not fail; preallocated in poking_init().
1905	 */
1906	VM_BUG_ON(!ptep);
1907
1908	local_irq_save(flags);
1909
1910	pte = mk_pte(pages[0], pgprot);
1911	set_pte_at(poking_mm, poking_addr, ptep, pte);
1912
1913	if (cross_page_boundary) {
1914		pte = mk_pte(pages[1], pgprot);
1915		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1916	}
1917
1918	/*
1919	 * Loading the temporary mm behaves as a compiler barrier, which
1920	 * guarantees that the PTE will be set at the time memcpy() is done.
1921	 */
1922	prev = use_temporary_mm(poking_mm);
1923
1924	kasan_disable_current();
1925	func((u8 *)poking_addr + offset_in_page(addr), src, len);
1926	kasan_enable_current();
1927
1928	/*
1929	 * Ensure that the PTE is only cleared after the instructions of memcpy
1930	 * were issued by using a compiler barrier.
1931	 */
1932	barrier();
1933
1934	pte_clear(poking_mm, poking_addr, ptep);
1935	if (cross_page_boundary)
1936		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1937
1938	/*
1939	 * Loading the previous page-table hierarchy requires a serializing
1940	 * instruction that already allows the core to see the updated version.
1941	 * Xen-PV is assumed to serialize execution in a similar manner.
1942	 */
1943	unuse_temporary_mm(prev);
1944
1945	/*
1946	 * Flushing the TLB might involve IPIs, which would require enabled
1947	 * IRQs, but not if the mm is not used, as it is in this point.
1948	 */
1949	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1950			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1951			   PAGE_SHIFT, false);
1952
1953	if (func == text_poke_memcpy) {
1954		/*
1955		 * If the text does not match what we just wrote then something is
1956		 * fundamentally screwy; there's nothing we can really do about that.
1957		 */
1958		BUG_ON(memcmp(addr, src, len));
1959	}
1960
1961	local_irq_restore(flags);
1962	pte_unmap_unlock(ptep, ptl);
1963	return addr;
1964}
1965
1966/**
1967 * text_poke - Update instructions on a live kernel
1968 * @addr: address to modify
1969 * @opcode: source of the copy
1970 * @len: length to copy
1971 *
1972 * Only atomic text poke/set should be allowed when not doing early patching.
1973 * It means the size must be writable atomically and the address must be aligned
1974 * in a way that permits an atomic write. It also makes sure we fit on a single
1975 * page.
1976 *
1977 * Note that the caller must ensure that if the modified code is part of a
1978 * module, the module would not be removed during poking. This can be achieved
1979 * by registering a module notifier, and ordering module removal and patching
1980 * through a mutex.
1981 */
1982void *text_poke(void *addr, const void *opcode, size_t len)
1983{
1984	lockdep_assert_held(&text_mutex);
1985
1986	return __text_poke(text_poke_memcpy, addr, opcode, len);
1987}
1988
1989/**
1990 * text_poke_kgdb - Update instructions on a live kernel by kgdb
1991 * @addr: address to modify
1992 * @opcode: source of the copy
1993 * @len: length to copy
1994 *
1995 * Only atomic text poke/set should be allowed when not doing early patching.
1996 * It means the size must be writable atomically and the address must be aligned
1997 * in a way that permits an atomic write. It also makes sure we fit on a single
1998 * page.
1999 *
2000 * Context: should only be used by kgdb, which ensures no other core is running,
2001 *	    despite the fact it does not hold the text_mutex.
2002 */
2003void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
2004{
2005	return __text_poke(text_poke_memcpy, addr, opcode, len);
2006}
2007
2008void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
2009			    bool core_ok)
2010{
2011	unsigned long start = (unsigned long)addr;
2012	size_t patched = 0;
2013
2014	if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
2015		return NULL;
2016
2017	while (patched < len) {
2018		unsigned long ptr = start + patched;
2019		size_t s;
2020
2021		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2022
2023		__text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
2024		patched += s;
2025	}
2026	return addr;
2027}
2028
2029/**
2030 * text_poke_copy - Copy instructions into (an unused part of) RX memory
2031 * @addr: address to modify
2032 * @opcode: source of the copy
2033 * @len: length to copy, could be more than 2x PAGE_SIZE
2034 *
2035 * Not safe against concurrent execution; useful for JITs to dump
2036 * new code blocks into unused regions of RX memory. Can be used in
2037 * conjunction with synchronize_rcu_tasks() to wait for existing
2038 * execution to quiesce after having made sure no existing functions
2039 * pointers are live.
2040 */
2041void *text_poke_copy(void *addr, const void *opcode, size_t len)
2042{
2043	mutex_lock(&text_mutex);
2044	addr = text_poke_copy_locked(addr, opcode, len, false);
2045	mutex_unlock(&text_mutex);
2046	return addr;
2047}
2048
2049/**
2050 * text_poke_set - memset into (an unused part of) RX memory
2051 * @addr: address to modify
2052 * @c: the byte to fill the area with
2053 * @len: length to copy, could be more than 2x PAGE_SIZE
2054 *
2055 * This is useful to overwrite unused regions of RX memory with illegal
2056 * instructions.
2057 */
2058void *text_poke_set(void *addr, int c, size_t len)
2059{
2060	unsigned long start = (unsigned long)addr;
2061	size_t patched = 0;
2062
2063	if (WARN_ON_ONCE(core_kernel_text(start)))
2064		return NULL;
2065
2066	mutex_lock(&text_mutex);
2067	while (patched < len) {
2068		unsigned long ptr = start + patched;
2069		size_t s;
2070
2071		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2072
2073		__text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2074		patched += s;
2075	}
2076	mutex_unlock(&text_mutex);
2077	return addr;
2078}
2079
2080static void do_sync_core(void *info)
2081{
2082	sync_core();
2083}
2084
2085void text_poke_sync(void)
2086{
2087	on_each_cpu(do_sync_core, NULL, 1);
2088}
2089
2090/*
2091 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2092 * this thing. When len == 6 everything is prefixed with 0x0f and we map
2093 * opcode to Jcc.d8, using len to distinguish.
2094 */
2095struct text_poke_loc {
2096	/* addr := _stext + rel_addr */
2097	s32 rel_addr;
2098	s32 disp;
2099	u8 len;
2100	u8 opcode;
2101	const u8 text[POKE_MAX_OPCODE_SIZE];
2102	/* see text_poke_bp_batch() */
2103	u8 old;
2104};
2105
2106struct bp_patching_desc {
2107	struct text_poke_loc *vec;
2108	int nr_entries;
2109	atomic_t refs;
2110};
2111
2112static struct bp_patching_desc bp_desc;
2113
2114static __always_inline
2115struct bp_patching_desc *try_get_desc(void)
2116{
2117	struct bp_patching_desc *desc = &bp_desc;
2118
2119	if (!raw_atomic_inc_not_zero(&desc->refs))
2120		return NULL;
2121
2122	return desc;
2123}
2124
2125static __always_inline void put_desc(void)
2126{
2127	struct bp_patching_desc *desc = &bp_desc;
2128
2129	smp_mb__before_atomic();
2130	raw_atomic_dec(&desc->refs);
2131}
2132
2133static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2134{
2135	return _stext + tp->rel_addr;
2136}
2137
2138static __always_inline int patch_cmp(const void *key, const void *elt)
2139{
2140	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2141
2142	if (key < text_poke_addr(tp))
2143		return -1;
2144	if (key > text_poke_addr(tp))
2145		return 1;
2146	return 0;
2147}
2148
2149noinstr int poke_int3_handler(struct pt_regs *regs)
2150{
2151	struct bp_patching_desc *desc;
2152	struct text_poke_loc *tp;
2153	int ret = 0;
2154	void *ip;
2155
2156	if (user_mode(regs))
2157		return 0;
2158
2159	/*
2160	 * Having observed our INT3 instruction, we now must observe
2161	 * bp_desc with non-zero refcount:
2162	 *
2163	 *	bp_desc.refs = 1		INT3
2164	 *	WMB				RMB
2165	 *	write INT3			if (bp_desc.refs != 0)
2166	 */
2167	smp_rmb();
2168
2169	desc = try_get_desc();
2170	if (!desc)
2171		return 0;
2172
2173	/*
2174	 * Discount the INT3. See text_poke_bp_batch().
2175	 */
2176	ip = (void *) regs->ip - INT3_INSN_SIZE;
2177
2178	/*
2179	 * Skip the binary search if there is a single member in the vector.
2180	 */
2181	if (unlikely(desc->nr_entries > 1)) {
2182		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2183				      sizeof(struct text_poke_loc),
2184				      patch_cmp);
2185		if (!tp)
2186			goto out_put;
2187	} else {
2188		tp = desc->vec;
2189		if (text_poke_addr(tp) != ip)
2190			goto out_put;
2191	}
2192
2193	ip += tp->len;
2194
2195	switch (tp->opcode) {
2196	case INT3_INSN_OPCODE:
2197		/*
2198		 * Someone poked an explicit INT3, they'll want to handle it,
2199		 * do not consume.
2200		 */
2201		goto out_put;
2202
2203	case RET_INSN_OPCODE:
2204		int3_emulate_ret(regs);
2205		break;
2206
2207	case CALL_INSN_OPCODE:
2208		int3_emulate_call(regs, (long)ip + tp->disp);
2209		break;
2210
2211	case JMP32_INSN_OPCODE:
2212	case JMP8_INSN_OPCODE:
2213		int3_emulate_jmp(regs, (long)ip + tp->disp);
2214		break;
2215
2216	case 0x70 ... 0x7f: /* Jcc */
2217		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2218		break;
2219
2220	default:
2221		BUG();
2222	}
2223
2224	ret = 1;
2225
2226out_put:
2227	put_desc();
2228	return ret;
2229}
2230
2231#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2232static struct text_poke_loc tp_vec[TP_VEC_MAX];
2233static int tp_vec_nr;
2234
2235/**
2236 * text_poke_bp_batch() -- update instructions on live kernel on SMP
2237 * @tp:			vector of instructions to patch
2238 * @nr_entries:		number of entries in the vector
2239 *
2240 * Modify multi-byte instruction by using int3 breakpoint on SMP.
2241 * We completely avoid stop_machine() here, and achieve the
2242 * synchronization using int3 breakpoint.
2243 *
2244 * The way it is done:
2245 *	- For each entry in the vector:
2246 *		- add a int3 trap to the address that will be patched
2247 *	- sync cores
2248 *	- For each entry in the vector:
2249 *		- update all but the first byte of the patched range
2250 *	- sync cores
2251 *	- For each entry in the vector:
2252 *		- replace the first byte (int3) by the first byte of
2253 *		  replacing opcode
2254 *	- sync cores
2255 */
2256static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2257{
2258	unsigned char int3 = INT3_INSN_OPCODE;
2259	unsigned int i;
2260	int do_sync;
2261
2262	lockdep_assert_held(&text_mutex);
2263
2264	bp_desc.vec = tp;
2265	bp_desc.nr_entries = nr_entries;
2266
2267	/*
2268	 * Corresponds to the implicit memory barrier in try_get_desc() to
2269	 * ensure reading a non-zero refcount provides up to date bp_desc data.
2270	 */
2271	atomic_set_release(&bp_desc.refs, 1);
2272
2273	/*
2274	 * Function tracing can enable thousands of places that need to be
2275	 * updated. This can take quite some time, and with full kernel debugging
2276	 * enabled, this could cause the softlockup watchdog to trigger.
2277	 * This function gets called every 256 entries added to be patched.
2278	 * Call cond_resched() here to make sure that other tasks can get scheduled
2279	 * while processing all the functions being patched.
2280	 */
2281	cond_resched();
2282
2283	/*
2284	 * Corresponding read barrier in int3 notifier for making sure the
2285	 * nr_entries and handler are correctly ordered wrt. patching.
2286	 */
2287	smp_wmb();
2288
2289	/*
2290	 * First step: add a int3 trap to the address that will be patched.
2291	 */
2292	for (i = 0; i < nr_entries; i++) {
2293		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2294		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2295	}
2296
2297	text_poke_sync();
2298
2299	/*
2300	 * Second step: update all but the first byte of the patched range.
2301	 */
2302	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2303		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2304		u8 _new[POKE_MAX_OPCODE_SIZE+1];
2305		const u8 *new = tp[i].text;
2306		int len = tp[i].len;
2307
2308		if (len - INT3_INSN_SIZE > 0) {
2309			memcpy(old + INT3_INSN_SIZE,
2310			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2311			       len - INT3_INSN_SIZE);
2312
2313			if (len == 6) {
2314				_new[0] = 0x0f;
2315				memcpy(_new + 1, new, 5);
2316				new = _new;
2317			}
2318
2319			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2320				  new + INT3_INSN_SIZE,
2321				  len - INT3_INSN_SIZE);
2322
2323			do_sync++;
2324		}
2325
2326		/*
2327		 * Emit a perf event to record the text poke, primarily to
2328		 * support Intel PT decoding which must walk the executable code
2329		 * to reconstruct the trace. The flow up to here is:
2330		 *   - write INT3 byte
2331		 *   - IPI-SYNC
2332		 *   - write instruction tail
2333		 * At this point the actual control flow will be through the
2334		 * INT3 and handler and not hit the old or new instruction.
2335		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
2336		 * can still be decoded. Subsequently:
2337		 *   - emit RECORD_TEXT_POKE with the new instruction
2338		 *   - IPI-SYNC
2339		 *   - write first byte
2340		 *   - IPI-SYNC
2341		 * So before the text poke event timestamp, the decoder will see
2342		 * either the old instruction flow or FUP/TIP of INT3. After the
2343		 * text poke event timestamp, the decoder will see either the
2344		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2345		 * use the timestamp as the point at which to modify the
2346		 * executable code.
2347		 * The old instruction is recorded so that the event can be
2348		 * processed forwards or backwards.
2349		 */
2350		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
2351	}
2352
2353	if (do_sync) {
2354		/*
2355		 * According to Intel, this core syncing is very likely
2356		 * not necessary and we'd be safe even without it. But
2357		 * better safe than sorry (plus there's not only Intel).
2358		 */
2359		text_poke_sync();
2360	}
2361
2362	/*
2363	 * Third step: replace the first byte (int3) by the first byte of
2364	 * replacing opcode.
2365	 */
2366	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2367		u8 byte = tp[i].text[0];
2368
2369		if (tp[i].len == 6)
2370			byte = 0x0f;
2371
2372		if (byte == INT3_INSN_OPCODE)
2373			continue;
2374
2375		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
2376		do_sync++;
2377	}
2378
2379	if (do_sync)
2380		text_poke_sync();
2381
2382	/*
2383	 * Remove and wait for refs to be zero.
2384	 */
2385	if (!atomic_dec_and_test(&bp_desc.refs))
2386		atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2387}
2388
2389static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2390			       const void *opcode, size_t len, const void *emulate)
2391{
2392	struct insn insn;
2393	int ret, i = 0;
2394
2395	if (len == 6)
2396		i = 1;
2397	memcpy((void *)tp->text, opcode+i, len-i);
2398	if (!emulate)
2399		emulate = opcode;
2400
2401	ret = insn_decode_kernel(&insn, emulate);
2402	BUG_ON(ret < 0);
2403
2404	tp->rel_addr = addr - (void *)_stext;
2405	tp->len = len;
2406	tp->opcode = insn.opcode.bytes[0];
2407
2408	if (is_jcc32(&insn)) {
2409		/*
2410		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
2411		 */
2412		tp->opcode = insn.opcode.bytes[1] - 0x10;
2413	}
2414
2415	switch (tp->opcode) {
2416	case RET_INSN_OPCODE:
2417	case JMP32_INSN_OPCODE:
2418	case JMP8_INSN_OPCODE:
2419		/*
2420		 * Control flow instructions without implied execution of the
2421		 * next instruction can be padded with INT3.
2422		 */
2423		for (i = insn.length; i < len; i++)
2424			BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2425		break;
2426
2427	default:
2428		BUG_ON(len != insn.length);
2429	}
2430
2431	switch (tp->opcode) {
2432	case INT3_INSN_OPCODE:
2433	case RET_INSN_OPCODE:
2434		break;
2435
2436	case CALL_INSN_OPCODE:
2437	case JMP32_INSN_OPCODE:
2438	case JMP8_INSN_OPCODE:
2439	case 0x70 ... 0x7f: /* Jcc */
2440		tp->disp = insn.immediate.value;
2441		break;
2442
2443	default: /* assume NOP */
2444		switch (len) {
2445		case 2: /* NOP2 -- emulate as JMP8+0 */
2446			BUG_ON(memcmp(emulate, x86_nops[len], len));
2447			tp->opcode = JMP8_INSN_OPCODE;
2448			tp->disp = 0;
2449			break;
2450
2451		case 5: /* NOP5 -- emulate as JMP32+0 */
2452			BUG_ON(memcmp(emulate, x86_nops[len], len));
2453			tp->opcode = JMP32_INSN_OPCODE;
2454			tp->disp = 0;
2455			break;
2456
2457		default: /* unknown instruction */
2458			BUG();
2459		}
2460		break;
2461	}
2462}
2463
2464/*
2465 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2466 * early if needed.
2467 */
2468static bool tp_order_fail(void *addr)
2469{
2470	struct text_poke_loc *tp;
2471
2472	if (!tp_vec_nr)
2473		return false;
2474
2475	if (!addr) /* force */
2476		return true;
2477
2478	tp = &tp_vec[tp_vec_nr - 1];
2479	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2480		return true;
2481
2482	return false;
2483}
2484
2485static void text_poke_flush(void *addr)
2486{
2487	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2488		text_poke_bp_batch(tp_vec, tp_vec_nr);
2489		tp_vec_nr = 0;
2490	}
2491}
2492
2493void text_poke_finish(void)
2494{
2495	text_poke_flush(NULL);
2496}
2497
2498void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2499{
2500	struct text_poke_loc *tp;
2501
2502	text_poke_flush(addr);
2503
2504	tp = &tp_vec[tp_vec_nr++];
2505	text_poke_loc_init(tp, addr, opcode, len, emulate);
2506}
2507
2508/**
2509 * text_poke_bp() -- update instructions on live kernel on SMP
2510 * @addr:	address to patch
2511 * @opcode:	opcode of new instruction
2512 * @len:	length to copy
2513 * @emulate:	instruction to be emulated
2514 *
2515 * Update a single instruction with the vector in the stack, avoiding
2516 * dynamically allocated memory. This function should be used when it is
2517 * not possible to allocate memory.
2518 */
2519void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2520{
2521	struct text_poke_loc tp;
2522
2523	text_poke_loc_init(&tp, addr, opcode, len, emulate);
2524	text_poke_bp_batch(&tp, 1);
2525}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) "SMP alternatives: " fmt
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/perf_event.h>
   7#include <linux/mutex.h>
   8#include <linux/list.h>
   9#include <linux/stringify.h>
  10#include <linux/highmem.h>
  11#include <linux/mm.h>
  12#include <linux/vmalloc.h>
  13#include <linux/memory.h>
  14#include <linux/stop_machine.h>
  15#include <linux/slab.h>
  16#include <linux/kdebug.h>
  17#include <linux/kprobes.h>
  18#include <linux/mmu_context.h>
  19#include <linux/bsearch.h>
  20#include <linux/sync_core.h>
  21#include <asm/text-patching.h>
  22#include <asm/alternative.h>
  23#include <asm/sections.h>
  24#include <asm/mce.h>
  25#include <asm/nmi.h>
  26#include <asm/cacheflush.h>
  27#include <asm/tlbflush.h>
  28#include <asm/insn.h>
  29#include <asm/io.h>
  30#include <asm/fixmap.h>
  31#include <asm/paravirt.h>
  32#include <asm/asm-prototypes.h>
  33#include <asm/cfi.h>
  34
  35int __read_mostly alternatives_patched;
  36
  37EXPORT_SYMBOL_GPL(alternatives_patched);
  38
  39#define MAX_PATCH_LEN (255-1)
  40
  41#define DA_ALL		(~0)
  42#define DA_ALT		0x01
  43#define DA_RET		0x02
  44#define DA_RETPOLINE	0x04
  45#define DA_ENDBR	0x08
  46#define DA_SMP		0x10
  47
  48static unsigned int debug_alternative;
  49
  50static int __init debug_alt(char *str)
  51{
  52	if (str && *str == '=')
  53		str++;
  54
  55	if (!str || kstrtouint(str, 0, &debug_alternative))
  56		debug_alternative = DA_ALL;
  57
  58	return 1;
  59}
  60__setup("debug-alternative", debug_alt);
  61
  62static int noreplace_smp;
  63
  64static int __init setup_noreplace_smp(char *str)
  65{
  66	noreplace_smp = 1;
  67	return 1;
  68}
  69__setup("noreplace-smp", setup_noreplace_smp);
  70
  71#define DPRINTK(type, fmt, args...)					\
  72do {									\
  73	if (debug_alternative & DA_##type)				\
  74		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
  75} while (0)
  76
  77#define DUMP_BYTES(type, buf, len, fmt, args...)			\
  78do {									\
  79	if (unlikely(debug_alternative & DA_##type)) {			\
  80		int j;							\
  81									\
  82		if (!(len))						\
  83			break;						\
  84									\
  85		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
  86		for (j = 0; j < (len) - 1; j++)				\
  87			printk(KERN_CONT "%02hhx ", buf[j]);		\
  88		printk(KERN_CONT "%02hhx\n", buf[j]);			\
  89	}								\
  90} while (0)
  91
  92static const unsigned char x86nops[] =
  93{
  94	BYTES_NOP1,
  95	BYTES_NOP2,
  96	BYTES_NOP3,
  97	BYTES_NOP4,
  98	BYTES_NOP5,
  99	BYTES_NOP6,
 100	BYTES_NOP7,
 101	BYTES_NOP8,
 102#ifdef CONFIG_64BIT
 103	BYTES_NOP9,
 104	BYTES_NOP10,
 105	BYTES_NOP11,
 106#endif
 107};
 108
 109const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
 110{
 111	NULL,
 112	x86nops,
 113	x86nops + 1,
 114	x86nops + 1 + 2,
 115	x86nops + 1 + 2 + 3,
 116	x86nops + 1 + 2 + 3 + 4,
 117	x86nops + 1 + 2 + 3 + 4 + 5,
 118	x86nops + 1 + 2 + 3 + 4 + 5 + 6,
 119	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 120#ifdef CONFIG_64BIT
 121	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 122	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
 123	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
 124#endif
 125};
 126
 127/*
 128 * Fill the buffer with a single effective instruction of size @len.
 129 *
 130 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
 131 * for every single-byte NOP, try to generate the maximally available NOP of
 132 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
 133 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
 134 * *jump* over instead of executing long and daft NOPs.
 135 */
 136static void add_nop(u8 *instr, unsigned int len)
 137{
 138	u8 *target = instr + len;
 139
 140	if (!len)
 141		return;
 142
 143	if (len <= ASM_NOP_MAX) {
 144		memcpy(instr, x86_nops[len], len);
 145		return;
 146	}
 147
 148	if (len < 128) {
 149		__text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
 150		instr += JMP8_INSN_SIZE;
 151	} else {
 152		__text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
 153		instr += JMP32_INSN_SIZE;
 154	}
 155
 156	for (;instr < target; instr++)
 157		*instr = INT3_INSN_OPCODE;
 158}
 159
 160extern s32 __retpoline_sites[], __retpoline_sites_end[];
 161extern s32 __return_sites[], __return_sites_end[];
 162extern s32 __cfi_sites[], __cfi_sites_end[];
 163extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
 164extern s32 __smp_locks[], __smp_locks_end[];
 165void text_poke_early(void *addr, const void *opcode, size_t len);
 166
 167/*
 168 * Matches NOP and NOPL, not any of the other possible NOPs.
 169 */
 170static bool insn_is_nop(struct insn *insn)
 171{
 172	/* Anything NOP, but no REP NOP */
 173	if (insn->opcode.bytes[0] == 0x90 &&
 174	    (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
 175		return true;
 176
 177	/* NOPL */
 178	if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
 179		return true;
 180
 181	/* TODO: more nops */
 182
 183	return false;
 184}
 185
 186/*
 187 * Find the offset of the first non-NOP instruction starting at @offset
 188 * but no further than @len.
 189 */
 190static int skip_nops(u8 *instr, int offset, int len)
 191{
 192	struct insn insn;
 193
 194	for (; offset < len; offset += insn.length) {
 195		if (insn_decode_kernel(&insn, &instr[offset]))
 196			break;
 197
 198		if (!insn_is_nop(&insn))
 199			break;
 200	}
 201
 202	return offset;
 203}
 204
 205/*
 206 * Optimize a sequence of NOPs, possibly preceded by an unconditional jump
 207 * to the end of the NOP sequence into a single NOP.
 208 */
 209static bool
 210__optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
 211{
 212	int i = *next - insn->length;
 213
 214	switch (insn->opcode.bytes[0]) {
 215	case JMP8_INSN_OPCODE:
 216	case JMP32_INSN_OPCODE:
 217		*prev = i;
 218		*target = *next + insn->immediate.value;
 219		return false;
 220	}
 221
 222	if (insn_is_nop(insn)) {
 223		int nop = i;
 224
 225		*next = skip_nops(instr, *next, len);
 226		if (*target && *next == *target)
 227			nop = *prev;
 228
 229		add_nop(instr + nop, *next - nop);
 230		DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
 231		return true;
 232	}
 233
 234	*target = 0;
 235	return false;
 236}
 237
 238/*
 239 * "noinline" to cause control flow change and thus invalidate I$ and
 240 * cause refetch after modification.
 241 */
 242static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
 243{
 244	int prev, target = 0;
 245
 246	for (int next, i = 0; i < len; i = next) {
 247		struct insn insn;
 248
 249		if (insn_decode_kernel(&insn, &instr[i]))
 250			return;
 251
 252		next = i + insn.length;
 253
 254		__optimize_nops(instr, len, &insn, &next, &prev, &target);
 255	}
 256}
 257
 258static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
 259{
 260	unsigned long flags;
 261
 262	local_irq_save(flags);
 263	optimize_nops(instr, len);
 264	sync_core();
 265	local_irq_restore(flags);
 266}
 267
 268/*
 269 * In this context, "source" is where the instructions are placed in the
 270 * section .altinstr_replacement, for example during kernel build by the
 271 * toolchain.
 272 * "Destination" is where the instructions are being patched in by this
 273 * machinery.
 274 *
 275 * The source offset is:
 276 *
 277 *   src_imm = target - src_next_ip                  (1)
 278 *
 279 * and the target offset is:
 280 *
 281 *   dst_imm = target - dst_next_ip                  (2)
 282 *
 283 * so rework (1) as an expression for target like:
 284 *
 285 *   target = src_imm + src_next_ip                  (1a)
 286 *
 287 * and substitute in (2) to get:
 288 *
 289 *   dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
 290 *
 291 * Now, since the instruction stream is 'identical' at src and dst (it
 292 * is being copied after all) it can be stated that:
 293 *
 294 *   src_next_ip = src + ip_offset
 295 *   dst_next_ip = dst + ip_offset                   (4)
 296 *
 297 * Substitute (4) in (3) and observe ip_offset being cancelled out to
 298 * obtain:
 299 *
 300 *   dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
 301 *           = src_imm + src - dst + ip_offset - ip_offset
 302 *           = src_imm + src - dst                   (5)
 303 *
 304 * IOW, only the relative displacement of the code block matters.
 305 */
 306
 307#define apply_reloc_n(n_, p_, d_)				\
 308	do {							\
 309		s32 v = *(s##n_ *)(p_);				\
 310		v += (d_);					\
 311		BUG_ON((v >> 31) != (v >> (n_-1)));		\
 312		*(s##n_ *)(p_) = (s##n_)v;			\
 313	} while (0)
 314
 315
 316static __always_inline
 317void apply_reloc(int n, void *ptr, uintptr_t diff)
 318{
 319	switch (n) {
 320	case 1: apply_reloc_n(8, ptr, diff); break;
 321	case 2: apply_reloc_n(16, ptr, diff); break;
 322	case 4: apply_reloc_n(32, ptr, diff); break;
 323	default: BUG();
 324	}
 325}
 326
 327static __always_inline
 328bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
 329{
 330	u8 *target = src + offset;
 331	/*
 332	 * If the target is inside the patched block, it's relative to the
 333	 * block itself and does not need relocation.
 334	 */
 335	return (target < src || target > src + src_len);
 336}
 337
 338void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
 
 339{
 340	int prev, target = 0;
 341
 342	for (int next, i = 0; i < len; i = next) {
 343		struct insn insn;
 344
 345		if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
 346			return;
 347
 348		next = i + insn.length;
 349
 350		if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
 351			continue;
 352
 353		switch (insn.opcode.bytes[0]) {
 354		case 0x0f:
 355			if (insn.opcode.bytes[1] < 0x80 ||
 356			    insn.opcode.bytes[1] > 0x8f)
 357				break;
 358
 359			fallthrough;	/* Jcc.d32 */
 360		case 0x70 ... 0x7f:	/* Jcc.d8 */
 361		case JMP8_INSN_OPCODE:
 362		case JMP32_INSN_OPCODE:
 363		case CALL_INSN_OPCODE:
 364			if (need_reloc(next + insn.immediate.value, src, src_len)) {
 365				apply_reloc(insn.immediate.nbytes,
 366					    buf + i + insn_offset_immediate(&insn),
 367					    src - dest);
 368			}
 369
 370			/*
 371			 * Where possible, convert JMP.d32 into JMP.d8.
 372			 */
 373			if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
 374				s32 imm = insn.immediate.value;
 375				imm += src - dest;
 376				imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
 377				if ((imm >> 31) == (imm >> 7)) {
 378					buf[i+0] = JMP8_INSN_OPCODE;
 379					buf[i+1] = (s8)imm;
 380
 381					memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
 382				}
 383			}
 384			break;
 385		}
 386
 387		if (insn_rip_relative(&insn)) {
 388			if (need_reloc(next + insn.displacement.value, src, src_len)) {
 389				apply_reloc(insn.displacement.nbytes,
 390					    buf + i + insn_offset_displacement(&insn),
 391					    src - dest);
 392			}
 393		}
 394	}
 395}
 396
 397/* Low-level backend functions usable from alternative code replacements. */
 398DEFINE_ASM_FUNC(nop_func, "", .entry.text);
 399EXPORT_SYMBOL_GPL(nop_func);
 400
 401noinstr void BUG_func(void)
 402{
 403	BUG();
 404}
 405EXPORT_SYMBOL(BUG_func);
 406
 407#define CALL_RIP_REL_OPCODE	0xff
 408#define CALL_RIP_REL_MODRM	0x15
 409
 410/*
 411 * Rewrite the "call BUG_func" replacement to point to the target of the
 412 * indirect pv_ops call "call *disp(%ip)".
 413 */
 414static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
 415{
 416	void *target, *bug = &BUG_func;
 417	s32 disp;
 418
 419	if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
 420		pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
 421		BUG();
 422	}
 423
 424	if (a->instrlen != 6 ||
 425	    instr[0] != CALL_RIP_REL_OPCODE ||
 426	    instr[1] != CALL_RIP_REL_MODRM) {
 427		pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
 428		BUG();
 429	}
 430
 431	/* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
 432	disp = *(s32 *)(instr + 2);
 433#ifdef CONFIG_X86_64
 434	/* ff 15 00 00 00 00   call   *0x0(%rip) */
 435	/* target address is stored at "next instruction + disp". */
 436	target = *(void **)(instr + a->instrlen + disp);
 437#else
 438	/* ff 15 00 00 00 00   call   *0x0 */
 439	/* target address is stored at disp. */
 440	target = *(void **)disp;
 441#endif
 442	if (!target)
 443		target = bug;
 444
 445	/* (BUG_func - .) + (target - BUG_func) := target - . */
 446	*(s32 *)(insn_buff + 1) += target - bug;
 447
 448	if (target == &nop_func)
 449		return 0;
 450
 451	return 5;
 452}
 453
 454/*
 455 * Replace instructions with better alternatives for this CPU type. This runs
 456 * before SMP is initialized to avoid SMP problems with self modifying code.
 457 * This implies that asymmetric systems where APs have less capabilities than
 458 * the boot processor are not handled. Tough. Make sure you disable such
 459 * features by hand.
 460 *
 461 * Marked "noinline" to cause control flow change and thus insn cache
 462 * to refetch changed I$ lines.
 463 */
 464void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 465						  struct alt_instr *end)
 466{
 467	struct alt_instr *a;
 468	u8 *instr, *replacement;
 469	u8 insn_buff[MAX_PATCH_LEN];
 470
 471	DPRINTK(ALT, "alt table %px, -> %px", start, end);
 472
 473	/*
 474	 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
 475	 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
 476	 * During the process, KASAN becomes confused seeing partial LA57
 477	 * conversion and triggers a false-positive out-of-bound report.
 478	 *
 479	 * Disable KASAN until the patching is complete.
 480	 */
 481	kasan_disable_current();
 482
 483	/*
 484	 * The scan order should be from start to end. A later scanned
 485	 * alternative code can overwrite previously scanned alternative code.
 486	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
 487	 * patch code.
 488	 *
 489	 * So be careful if you want to change the scan order to any other
 490	 * order.
 491	 */
 492	for (a = start; a < end; a++) {
 493		int insn_buff_sz = 0;
 494
 495		instr = (u8 *)&a->instr_offset + a->instr_offset;
 496		replacement = (u8 *)&a->repl_offset + a->repl_offset;
 497		BUG_ON(a->instrlen > sizeof(insn_buff));
 498		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
 499
 500		/*
 501		 * Patch if either:
 502		 * - feature is present
 503		 * - feature not present but ALT_FLAG_NOT is set to mean,
 504		 *   patch if feature is *NOT* present.
 505		 */
 506		if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
 507			optimize_nops_inplace(instr, a->instrlen);
 508			continue;
 509		}
 510
 511		DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
 512			a->cpuid >> 5,
 513			a->cpuid & 0x1f,
 514			instr, instr, a->instrlen,
 515			replacement, a->replacementlen, a->flags);
 516
 517		memcpy(insn_buff, replacement, a->replacementlen);
 518		insn_buff_sz = a->replacementlen;
 519
 520		if (a->flags & ALT_FLAG_DIRECT_CALL) {
 521			insn_buff_sz = alt_replace_call(instr, insn_buff, a);
 522			if (insn_buff_sz < 0)
 523				continue;
 524		}
 525
 526		for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
 527			insn_buff[insn_buff_sz] = 0x90;
 528
 529		apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
 530
 531		DUMP_BYTES(ALT, instr, a->instrlen, "%px:   old_insn: ", instr);
 532		DUMP_BYTES(ALT, replacement, a->replacementlen, "%px:   rpl_insn: ", replacement);
 533		DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
 534
 535		text_poke_early(instr, insn_buff, insn_buff_sz);
 536	}
 537
 538	kasan_enable_current();
 539}
 540
 541static inline bool is_jcc32(struct insn *insn)
 542{
 543	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
 544	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
 545}
 546
 547#if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL)
 548
 549/*
 550 * CALL/JMP *%\reg
 551 */
 552static int emit_indirect(int op, int reg, u8 *bytes)
 553{
 554	int i = 0;
 555	u8 modrm;
 556
 557	switch (op) {
 558	case CALL_INSN_OPCODE:
 559		modrm = 0x10; /* Reg = 2; CALL r/m */
 560		break;
 561
 562	case JMP32_INSN_OPCODE:
 563		modrm = 0x20; /* Reg = 4; JMP r/m */
 564		break;
 565
 566	default:
 567		WARN_ON_ONCE(1);
 568		return -1;
 569	}
 570
 571	if (reg >= 8) {
 572		bytes[i++] = 0x41; /* REX.B prefix */
 573		reg -= 8;
 574	}
 575
 576	modrm |= 0xc0; /* Mod = 3 */
 577	modrm += reg;
 578
 579	bytes[i++] = 0xff; /* opcode */
 580	bytes[i++] = modrm;
 581
 582	return i;
 583}
 584
 585static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
 586{
 587	u8 op = insn->opcode.bytes[0];
 588	int i = 0;
 589
 590	/*
 591	 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
 592	 * tail-calls. Deal with them.
 593	 */
 594	if (is_jcc32(insn)) {
 595		bytes[i++] = op;
 596		op = insn->opcode.bytes[1];
 597		goto clang_jcc;
 598	}
 599
 600	if (insn->length == 6)
 601		bytes[i++] = 0x2e; /* CS-prefix */
 602
 603	switch (op) {
 604	case CALL_INSN_OPCODE:
 605		__text_gen_insn(bytes+i, op, addr+i,
 606				__x86_indirect_call_thunk_array[reg],
 607				CALL_INSN_SIZE);
 608		i += CALL_INSN_SIZE;
 609		break;
 610
 611	case JMP32_INSN_OPCODE:
 612clang_jcc:
 613		__text_gen_insn(bytes+i, op, addr+i,
 614				__x86_indirect_jump_thunk_array[reg],
 615				JMP32_INSN_SIZE);
 616		i += JMP32_INSN_SIZE;
 617		break;
 618
 619	default:
 620		WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
 621		return -1;
 622	}
 623
 624	WARN_ON_ONCE(i != insn->length);
 625
 626	return i;
 627}
 628
 629/*
 630 * Rewrite the compiler generated retpoline thunk calls.
 631 *
 632 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
 633 * indirect instructions, avoiding the extra indirection.
 634 *
 635 * For example, convert:
 636 *
 637 *   CALL __x86_indirect_thunk_\reg
 638 *
 639 * into:
 640 *
 641 *   CALL *%\reg
 642 *
 643 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
 644 */
 645static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
 646{
 647	retpoline_thunk_t *target;
 648	int reg, ret, i = 0;
 649	u8 op, cc;
 650
 651	target = addr + insn->length + insn->immediate.value;
 652	reg = target - __x86_indirect_thunk_array;
 653
 654	if (WARN_ON_ONCE(reg & ~0xf))
 655		return -1;
 656
 657	/* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
 658	BUG_ON(reg == 4);
 659
 660	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
 661	    !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
 662		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
 663			return emit_call_track_retpoline(addr, insn, reg, bytes);
 664
 665		return -1;
 666	}
 667
 668	op = insn->opcode.bytes[0];
 669
 670	/*
 671	 * Convert:
 672	 *
 673	 *   Jcc.d32 __x86_indirect_thunk_\reg
 674	 *
 675	 * into:
 676	 *
 677	 *   Jncc.d8 1f
 678	 *   [ LFENCE ]
 679	 *   JMP *%\reg
 680	 *   [ NOP ]
 681	 * 1:
 682	 */
 683	if (is_jcc32(insn)) {
 684		cc = insn->opcode.bytes[1] & 0xf;
 685		cc ^= 1; /* invert condition */
 686
 687		bytes[i++] = 0x70 + cc;        /* Jcc.d8 */
 688		bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
 689
 690		/* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
 691		op = JMP32_INSN_OPCODE;
 692	}
 693
 694	/*
 695	 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
 696	 */
 697	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
 698		bytes[i++] = 0x0f;
 699		bytes[i++] = 0xae;
 700		bytes[i++] = 0xe8; /* LFENCE */
 701	}
 702
 703	ret = emit_indirect(op, reg, bytes + i);
 704	if (ret < 0)
 705		return ret;
 706	i += ret;
 707
 708	/*
 709	 * The compiler is supposed to EMIT an INT3 after every unconditional
 710	 * JMP instruction due to AMD BTC. However, if the compiler is too old
 711	 * or MITIGATION_SLS isn't enabled, we still need an INT3 after
 712	 * indirect JMPs even on Intel.
 713	 */
 714	if (op == JMP32_INSN_OPCODE && i < insn->length)
 715		bytes[i++] = INT3_INSN_OPCODE;
 716
 717	for (; i < insn->length;)
 718		bytes[i++] = BYTES_NOP1;
 719
 720	return i;
 721}
 722
 723/*
 724 * Generated by 'objtool --retpoline'.
 725 */
 726void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
 727{
 728	s32 *s;
 729
 730	for (s = start; s < end; s++) {
 731		void *addr = (void *)s + *s;
 732		struct insn insn;
 733		int len, ret;
 734		u8 bytes[16];
 735		u8 op1, op2;
 736
 737		ret = insn_decode_kernel(&insn, addr);
 738		if (WARN_ON_ONCE(ret < 0))
 739			continue;
 740
 741		op1 = insn.opcode.bytes[0];
 742		op2 = insn.opcode.bytes[1];
 743
 744		switch (op1) {
 745		case CALL_INSN_OPCODE:
 746		case JMP32_INSN_OPCODE:
 747			break;
 748
 749		case 0x0f: /* escape */
 750			if (op2 >= 0x80 && op2 <= 0x8f)
 751				break;
 752			fallthrough;
 753		default:
 754			WARN_ON_ONCE(1);
 755			continue;
 756		}
 757
 758		DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
 759			addr, addr, insn.length,
 760			addr + insn.length + insn.immediate.value);
 761
 762		len = patch_retpoline(addr, &insn, bytes);
 763		if (len == insn.length) {
 764			optimize_nops(bytes, len);
 765			DUMP_BYTES(RETPOLINE, ((u8*)addr),  len, "%px: orig: ", addr);
 766			DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
 767			text_poke_early(addr, bytes, len);
 768		}
 769	}
 770}
 771
 772#ifdef CONFIG_MITIGATION_RETHUNK
 773
 774/*
 775 * Rewrite the compiler generated return thunk tail-calls.
 776 *
 777 * For example, convert:
 778 *
 779 *   JMP __x86_return_thunk
 780 *
 781 * into:
 782 *
 783 *   RET
 784 */
 785static int patch_return(void *addr, struct insn *insn, u8 *bytes)
 786{
 787	int i = 0;
 788
 789	/* Patch the custom return thunks... */
 790	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
 791		i = JMP32_INSN_SIZE;
 792		__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
 793	} else {
 794		/* ... or patch them out if not needed. */
 795		bytes[i++] = RET_INSN_OPCODE;
 796	}
 797
 798	for (; i < insn->length;)
 799		bytes[i++] = INT3_INSN_OPCODE;
 800	return i;
 801}
 802
 803void __init_or_module noinline apply_returns(s32 *start, s32 *end)
 804{
 805	s32 *s;
 806
 807	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
 808		static_call_force_reinit();
 809
 810	for (s = start; s < end; s++) {
 811		void *dest = NULL, *addr = (void *)s + *s;
 812		struct insn insn;
 813		int len, ret;
 814		u8 bytes[16];
 815		u8 op;
 816
 817		ret = insn_decode_kernel(&insn, addr);
 818		if (WARN_ON_ONCE(ret < 0))
 819			continue;
 820
 821		op = insn.opcode.bytes[0];
 822		if (op == JMP32_INSN_OPCODE)
 823			dest = addr + insn.length + insn.immediate.value;
 824
 825		if (__static_call_fixup(addr, op, dest) ||
 826		    WARN_ONCE(dest != &__x86_return_thunk,
 827			      "missing return thunk: %pS-%pS: %*ph",
 828			      addr, dest, 5, addr))
 829			continue;
 830
 831		DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
 832			addr, addr, insn.length,
 833			addr + insn.length + insn.immediate.value);
 834
 835		len = patch_return(addr, &insn, bytes);
 836		if (len == insn.length) {
 837			DUMP_BYTES(RET, ((u8*)addr),  len, "%px: orig: ", addr);
 838			DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
 839			text_poke_early(addr, bytes, len);
 840		}
 841	}
 842}
 843#else
 844void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 845#endif /* CONFIG_MITIGATION_RETHUNK */
 846
 847#else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
 848
 849void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
 850void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 851
 852#endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */
 853
 854#ifdef CONFIG_X86_KERNEL_IBT
 855
 856static void poison_cfi(void *addr);
 857
 858static void __init_or_module poison_endbr(void *addr, bool warn)
 859{
 860	u32 endbr, poison = gen_endbr_poison();
 861
 862	if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
 863		return;
 864
 865	if (!is_endbr(endbr)) {
 866		WARN_ON_ONCE(warn);
 867		return;
 868	}
 869
 870	DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
 871
 872	/*
 873	 * When we have IBT, the lack of ENDBR will trigger #CP
 874	 */
 875	DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
 876	DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
 877	text_poke_early(addr, &poison, 4);
 878}
 879
 880/*
 881 * Generated by: objtool --ibt
 882 *
 883 * Seal the functions for indirect calls by clobbering the ENDBR instructions
 884 * and the kCFI hash value.
 885 */
 886void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
 887{
 888	s32 *s;
 889
 890	for (s = start; s < end; s++) {
 891		void *addr = (void *)s + *s;
 892
 893		poison_endbr(addr, true);
 894		if (IS_ENABLED(CONFIG_FINEIBT))
 895			poison_cfi(addr - 16);
 896	}
 897}
 898
 899#else
 900
 901void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
 902
 903#endif /* CONFIG_X86_KERNEL_IBT */
 904
 905#ifdef CONFIG_FINEIBT
 906#define __CFI_DEFAULT	CFI_DEFAULT
 907#elif defined(CONFIG_CFI_CLANG)
 908#define __CFI_DEFAULT	CFI_KCFI
 909#else
 910#define __CFI_DEFAULT	CFI_OFF
 911#endif
 912
 913enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
 914
 915#ifdef CONFIG_CFI_CLANG
 916struct bpf_insn;
 917
 918/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
 919extern unsigned int __bpf_prog_runX(const void *ctx,
 920				    const struct bpf_insn *insn);
 921
 922/*
 923 * Force a reference to the external symbol so the compiler generates
 924 * __kcfi_typid.
 925 */
 926__ADDRESSABLE(__bpf_prog_runX);
 927
 928/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
 929asm (
 930"	.pushsection	.data..ro_after_init,\"aw\",@progbits	\n"
 931"	.type	cfi_bpf_hash,@object				\n"
 932"	.globl	cfi_bpf_hash					\n"
 933"	.p2align	2, 0x0					\n"
 934"cfi_bpf_hash:							\n"
 935"	.long	__kcfi_typeid___bpf_prog_runX			\n"
 936"	.size	cfi_bpf_hash, 4					\n"
 937"	.popsection						\n"
 938);
 939
 940/* Must match bpf_callback_t */
 941extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
 942
 943__ADDRESSABLE(__bpf_callback_fn);
 944
 945/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
 946asm (
 947"	.pushsection	.data..ro_after_init,\"aw\",@progbits	\n"
 948"	.type	cfi_bpf_subprog_hash,@object			\n"
 949"	.globl	cfi_bpf_subprog_hash				\n"
 950"	.p2align	2, 0x0					\n"
 951"cfi_bpf_subprog_hash:						\n"
 952"	.long	__kcfi_typeid___bpf_callback_fn			\n"
 953"	.size	cfi_bpf_subprog_hash, 4				\n"
 954"	.popsection						\n"
 955);
 956
 957u32 cfi_get_func_hash(void *func)
 958{
 959	u32 hash;
 960
 961	func -= cfi_get_offset();
 962	switch (cfi_mode) {
 963	case CFI_FINEIBT:
 964		func += 7;
 965		break;
 966	case CFI_KCFI:
 967		func += 1;
 968		break;
 969	default:
 970		return 0;
 971	}
 972
 973	if (get_kernel_nofault(hash, func))
 974		return 0;
 975
 976	return hash;
 977}
 978#endif
 979
 980#ifdef CONFIG_FINEIBT
 981
 982static bool cfi_rand __ro_after_init = true;
 983static u32  cfi_seed __ro_after_init;
 984
 985/*
 986 * Re-hash the CFI hash with a boot-time seed while making sure the result is
 987 * not a valid ENDBR instruction.
 988 */
 989static u32 cfi_rehash(u32 hash)
 990{
 991	hash ^= cfi_seed;
 992	while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
 993		bool lsb = hash & 1;
 994		hash >>= 1;
 995		if (lsb)
 996			hash ^= 0x80200003;
 997	}
 998	return hash;
 999}
1000
1001static __init int cfi_parse_cmdline(char *str)
1002{
1003	if (!str)
1004		return -EINVAL;
1005
1006	while (str) {
1007		char *next = strchr(str, ',');
1008		if (next) {
1009			*next = 0;
1010			next++;
1011		}
1012
1013		if (!strcmp(str, "auto")) {
1014			cfi_mode = CFI_DEFAULT;
1015		} else if (!strcmp(str, "off")) {
1016			cfi_mode = CFI_OFF;
1017			cfi_rand = false;
1018		} else if (!strcmp(str, "kcfi")) {
1019			cfi_mode = CFI_KCFI;
1020		} else if (!strcmp(str, "fineibt")) {
1021			cfi_mode = CFI_FINEIBT;
1022		} else if (!strcmp(str, "norand")) {
1023			cfi_rand = false;
1024		} else {
1025			pr_err("Ignoring unknown cfi option (%s).", str);
1026		}
1027
1028		str = next;
1029	}
1030
1031	return 0;
1032}
1033early_param("cfi", cfi_parse_cmdline);
1034
1035/*
1036 * kCFI						FineIBT
1037 *
1038 * __cfi_\func:					__cfi_\func:
1039 *	movl   $0x12345678,%eax		// 5	     endbr64			// 4
1040 *	nop					     subl   $0x12345678,%r10d   // 7
1041 *	nop					     jz     1f			// 2
1042 *	nop					     ud2			// 2
1043 *	nop					1:   nop			// 1
1044 *	nop
1045 *	nop
1046 *	nop
1047 *	nop
1048 *	nop
1049 *	nop
1050 *	nop
1051 *
1052 *
1053 * caller:					caller:
1054 *	movl	$(-0x12345678),%r10d	 // 6	     movl   $0x12345678,%r10d	// 6
1055 *	addl	$-15(%r11),%r10d	 // 4	     sub    $16,%r11		// 4
1056 *	je	1f			 // 2	     nop4			// 4
1057 *	ud2				 // 2
1058 * 1:	call	__x86_indirect_thunk_r11 // 5	     call   *%r11; nop2;	// 5
1059 *
1060 */
1061
1062asm(	".pushsection .rodata			\n"
1063	"fineibt_preamble_start:		\n"
1064	"	endbr64				\n"
1065	"	subl	$0x12345678, %r10d	\n"
1066	"	je	fineibt_preamble_end	\n"
1067	"	ud2				\n"
1068	"	nop				\n"
1069	"fineibt_preamble_end:			\n"
1070	".popsection\n"
1071);
1072
1073extern u8 fineibt_preamble_start[];
1074extern u8 fineibt_preamble_end[];
1075
1076#define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1077#define fineibt_preamble_hash 7
1078
1079asm(	".pushsection .rodata			\n"
1080	"fineibt_caller_start:			\n"
1081	"	movl	$0x12345678, %r10d	\n"
1082	"	sub	$16, %r11		\n"
1083	ASM_NOP4
1084	"fineibt_caller_end:			\n"
1085	".popsection				\n"
1086);
1087
1088extern u8 fineibt_caller_start[];
1089extern u8 fineibt_caller_end[];
1090
1091#define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1092#define fineibt_caller_hash 2
1093
1094#define fineibt_caller_jmp (fineibt_caller_size - 2)
1095
1096static u32 decode_preamble_hash(void *addr)
1097{
1098	u8 *p = addr;
1099
1100	/* b8 78 56 34 12          mov    $0x12345678,%eax */
1101	if (p[0] == 0xb8)
1102		return *(u32 *)(addr + 1);
1103
1104	return 0; /* invalid hash value */
1105}
1106
1107static u32 decode_caller_hash(void *addr)
1108{
1109	u8 *p = addr;
1110
1111	/* 41 ba 78 56 34 12       mov    $0x12345678,%r10d */
1112	if (p[0] == 0x41 && p[1] == 0xba)
1113		return -*(u32 *)(addr + 2);
1114
1115	/* e8 0c 78 56 34 12	   jmp.d8  +12 */
1116	if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
1117		return -*(u32 *)(addr + 2);
1118
1119	return 0; /* invalid hash value */
1120}
1121
1122/* .retpoline_sites */
1123static int cfi_disable_callers(s32 *start, s32 *end)
1124{
1125	/*
1126	 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
1127	 * in tact for later usage. Also see decode_caller_hash() and
1128	 * cfi_rewrite_callers().
1129	 */
1130	const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1131	s32 *s;
1132
1133	for (s = start; s < end; s++) {
1134		void *addr = (void *)s + *s;
1135		u32 hash;
1136
1137		addr -= fineibt_caller_size;
1138		hash = decode_caller_hash(addr);
1139		if (!hash) /* nocfi callers */
1140			continue;
1141
1142		text_poke_early(addr, jmp, 2);
1143	}
1144
1145	return 0;
1146}
1147
1148static int cfi_enable_callers(s32 *start, s32 *end)
1149{
1150	/*
1151	 * Re-enable kCFI, undo what cfi_disable_callers() did.
1152	 */
1153	const u8 mov[] = { 0x41, 0xba };
1154	s32 *s;
1155
1156	for (s = start; s < end; s++) {
1157		void *addr = (void *)s + *s;
1158		u32 hash;
1159
1160		addr -= fineibt_caller_size;
1161		hash = decode_caller_hash(addr);
1162		if (!hash) /* nocfi callers */
1163			continue;
1164
1165		text_poke_early(addr, mov, 2);
1166	}
1167
1168	return 0;
1169}
1170
1171/* .cfi_sites */
1172static int cfi_rand_preamble(s32 *start, s32 *end)
1173{
1174	s32 *s;
1175
1176	for (s = start; s < end; s++) {
1177		void *addr = (void *)s + *s;
1178		u32 hash;
1179
1180		hash = decode_preamble_hash(addr);
1181		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1182			 addr, addr, 5, addr))
1183			return -EINVAL;
1184
1185		hash = cfi_rehash(hash);
1186		text_poke_early(addr + 1, &hash, 4);
1187	}
1188
1189	return 0;
1190}
1191
1192static int cfi_rewrite_preamble(s32 *start, s32 *end)
1193{
1194	s32 *s;
1195
1196	for (s = start; s < end; s++) {
1197		void *addr = (void *)s + *s;
1198		u32 hash;
1199
1200		hash = decode_preamble_hash(addr);
1201		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1202			 addr, addr, 5, addr))
1203			return -EINVAL;
1204
1205		text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
1206		WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
1207		text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
1208	}
1209
1210	return 0;
1211}
1212
1213static void cfi_rewrite_endbr(s32 *start, s32 *end)
1214{
1215	s32 *s;
1216
1217	for (s = start; s < end; s++) {
1218		void *addr = (void *)s + *s;
1219
1220		poison_endbr(addr+16, false);
1221	}
1222}
1223
1224/* .retpoline_sites */
1225static int cfi_rand_callers(s32 *start, s32 *end)
1226{
1227	s32 *s;
1228
1229	for (s = start; s < end; s++) {
1230		void *addr = (void *)s + *s;
1231		u32 hash;
1232
1233		addr -= fineibt_caller_size;
1234		hash = decode_caller_hash(addr);
1235		if (hash) {
1236			hash = -cfi_rehash(hash);
1237			text_poke_early(addr + 2, &hash, 4);
1238		}
1239	}
1240
1241	return 0;
1242}
1243
1244static int cfi_rewrite_callers(s32 *start, s32 *end)
1245{
1246	s32 *s;
1247
1248	for (s = start; s < end; s++) {
1249		void *addr = (void *)s + *s;
1250		u32 hash;
1251
1252		addr -= fineibt_caller_size;
1253		hash = decode_caller_hash(addr);
1254		if (hash) {
1255			text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
1256			WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
1257			text_poke_early(addr + fineibt_caller_hash, &hash, 4);
1258		}
1259		/* rely on apply_retpolines() */
1260	}
1261
1262	return 0;
1263}
1264
1265static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1266			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1267{
1268	int ret;
1269
1270	if (WARN_ONCE(fineibt_preamble_size != 16,
1271		      "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1272		return;
1273
1274	if (cfi_mode == CFI_DEFAULT) {
1275		cfi_mode = CFI_KCFI;
1276		if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1277			cfi_mode = CFI_FINEIBT;
1278	}
1279
1280	/*
1281	 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1282	 * rewrite them. This disables all CFI. If this succeeds but any of the
1283	 * later stages fails, we're without CFI.
1284	 */
1285	ret = cfi_disable_callers(start_retpoline, end_retpoline);
1286	if (ret)
1287		goto err;
1288
1289	if (cfi_rand) {
1290		if (builtin) {
1291			cfi_seed = get_random_u32();
1292			cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
1293			cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
1294		}
1295
1296		ret = cfi_rand_preamble(start_cfi, end_cfi);
1297		if (ret)
1298			goto err;
1299
1300		ret = cfi_rand_callers(start_retpoline, end_retpoline);
1301		if (ret)
1302			goto err;
1303	}
1304
1305	switch (cfi_mode) {
1306	case CFI_OFF:
1307		if (builtin)
1308			pr_info("Disabling CFI\n");
1309		return;
1310
1311	case CFI_KCFI:
1312		ret = cfi_enable_callers(start_retpoline, end_retpoline);
1313		if (ret)
1314			goto err;
1315
1316		if (builtin)
1317			pr_info("Using kCFI\n");
1318		return;
1319
1320	case CFI_FINEIBT:
1321		/* place the FineIBT preamble at func()-16 */
1322		ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1323		if (ret)
1324			goto err;
1325
1326		/* rewrite the callers to target func()-16 */
1327		ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1328		if (ret)
1329			goto err;
1330
1331		/* now that nobody targets func()+0, remove ENDBR there */
1332		cfi_rewrite_endbr(start_cfi, end_cfi);
1333
1334		if (builtin)
1335			pr_info("Using FineIBT CFI\n");
1336		return;
1337
1338	default:
1339		break;
1340	}
1341
1342err:
1343	pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1344}
1345
1346static inline void poison_hash(void *addr)
1347{
1348	*(u32 *)addr = 0;
1349}
1350
1351static void poison_cfi(void *addr)
1352{
1353	switch (cfi_mode) {
1354	case CFI_FINEIBT:
1355		/*
1356		 * __cfi_\func:
1357		 *	osp nopl (%rax)
1358		 *	subl	$0, %r10d
1359		 *	jz	1f
1360		 *	ud2
1361		 * 1:	nop
1362		 */
1363		poison_endbr(addr, false);
1364		poison_hash(addr + fineibt_preamble_hash);
1365		break;
1366
1367	case CFI_KCFI:
1368		/*
1369		 * __cfi_\func:
1370		 *	movl	$0, %eax
1371		 *	.skip	11, 0x90
1372		 */
1373		poison_hash(addr + 1);
1374		break;
1375
1376	default:
1377		break;
1378	}
1379}
1380
1381#else
1382
1383static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1384			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1385{
1386}
1387
1388#ifdef CONFIG_X86_KERNEL_IBT
1389static void poison_cfi(void *addr) { }
1390#endif
1391
1392#endif
1393
1394void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1395		   s32 *start_cfi, s32 *end_cfi)
1396{
1397	return __apply_fineibt(start_retpoline, end_retpoline,
1398			       start_cfi, end_cfi,
1399			       /* .builtin = */ false);
1400}
1401
1402#ifdef CONFIG_SMP
1403static void alternatives_smp_lock(const s32 *start, const s32 *end,
1404				  u8 *text, u8 *text_end)
1405{
1406	const s32 *poff;
1407
1408	for (poff = start; poff < end; poff++) {
1409		u8 *ptr = (u8 *)poff + *poff;
1410
1411		if (!*poff || ptr < text || ptr >= text_end)
1412			continue;
1413		/* turn DS segment override prefix into lock prefix */
1414		if (*ptr == 0x3e)
1415			text_poke(ptr, ((unsigned char []){0xf0}), 1);
1416	}
1417}
1418
1419static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1420				    u8 *text, u8 *text_end)
1421{
1422	const s32 *poff;
1423
1424	for (poff = start; poff < end; poff++) {
1425		u8 *ptr = (u8 *)poff + *poff;
1426
1427		if (!*poff || ptr < text || ptr >= text_end)
1428			continue;
1429		/* turn lock prefix into DS segment override prefix */
1430		if (*ptr == 0xf0)
1431			text_poke(ptr, ((unsigned char []){0x3E}), 1);
1432	}
1433}
1434
1435struct smp_alt_module {
1436	/* what is this ??? */
1437	struct module	*mod;
1438	char		*name;
1439
1440	/* ptrs to lock prefixes */
1441	const s32	*locks;
1442	const s32	*locks_end;
1443
1444	/* .text segment, needed to avoid patching init code ;) */
1445	u8		*text;
1446	u8		*text_end;
1447
1448	struct list_head next;
1449};
1450static LIST_HEAD(smp_alt_modules);
1451static bool uniproc_patched = false;	/* protected by text_mutex */
1452
1453void __init_or_module alternatives_smp_module_add(struct module *mod,
1454						  char *name,
1455						  void *locks, void *locks_end,
1456						  void *text,  void *text_end)
1457{
1458	struct smp_alt_module *smp;
1459
1460	mutex_lock(&text_mutex);
1461	if (!uniproc_patched)
1462		goto unlock;
1463
1464	if (num_possible_cpus() == 1)
1465		/* Don't bother remembering, we'll never have to undo it. */
1466		goto smp_unlock;
1467
1468	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1469	if (NULL == smp)
1470		/* we'll run the (safe but slow) SMP code then ... */
1471		goto unlock;
1472
1473	smp->mod	= mod;
1474	smp->name	= name;
1475	smp->locks	= locks;
1476	smp->locks_end	= locks_end;
1477	smp->text	= text;
1478	smp->text_end	= text_end;
1479	DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1480		smp->locks, smp->locks_end,
1481		smp->text, smp->text_end, smp->name);
1482
1483	list_add_tail(&smp->next, &smp_alt_modules);
1484smp_unlock:
1485	alternatives_smp_unlock(locks, locks_end, text, text_end);
1486unlock:
1487	mutex_unlock(&text_mutex);
1488}
1489
1490void __init_or_module alternatives_smp_module_del(struct module *mod)
1491{
1492	struct smp_alt_module *item;
1493
1494	mutex_lock(&text_mutex);
1495	list_for_each_entry(item, &smp_alt_modules, next) {
1496		if (mod != item->mod)
1497			continue;
1498		list_del(&item->next);
1499		kfree(item);
1500		break;
1501	}
1502	mutex_unlock(&text_mutex);
1503}
1504
1505void alternatives_enable_smp(void)
1506{
1507	struct smp_alt_module *mod;
1508
1509	/* Why bother if there are no other CPUs? */
1510	BUG_ON(num_possible_cpus() == 1);
1511
1512	mutex_lock(&text_mutex);
1513
1514	if (uniproc_patched) {
1515		pr_info("switching to SMP code\n");
1516		BUG_ON(num_online_cpus() != 1);
1517		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1518		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1519		list_for_each_entry(mod, &smp_alt_modules, next)
1520			alternatives_smp_lock(mod->locks, mod->locks_end,
1521					      mod->text, mod->text_end);
1522		uniproc_patched = false;
1523	}
1524	mutex_unlock(&text_mutex);
1525}
1526
1527/*
1528 * Return 1 if the address range is reserved for SMP-alternatives.
1529 * Must hold text_mutex.
1530 */
1531int alternatives_text_reserved(void *start, void *end)
1532{
1533	struct smp_alt_module *mod;
1534	const s32 *poff;
1535	u8 *text_start = start;
1536	u8 *text_end = end;
1537
1538	lockdep_assert_held(&text_mutex);
1539
1540	list_for_each_entry(mod, &smp_alt_modules, next) {
1541		if (mod->text > text_end || mod->text_end < text_start)
1542			continue;
1543		for (poff = mod->locks; poff < mod->locks_end; poff++) {
1544			const u8 *ptr = (const u8 *)poff + *poff;
1545
1546			if (text_start <= ptr && text_end > ptr)
1547				return 1;
1548		}
1549	}
1550
1551	return 0;
1552}
1553#endif /* CONFIG_SMP */
1554
1555/*
1556 * Self-test for the INT3 based CALL emulation code.
1557 *
1558 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1559 * properly and that there is a stack gap between the INT3 frame and the
1560 * previous context. Without this gap doing a virtual PUSH on the interrupted
1561 * stack would corrupt the INT3 IRET frame.
1562 *
1563 * See entry_{32,64}.S for more details.
1564 */
1565
1566/*
1567 * We define the int3_magic() function in assembly to control the calling
1568 * convention such that we can 'call' it from assembly.
1569 */
1570
1571extern void int3_magic(unsigned int *ptr); /* defined in asm */
1572
1573asm (
1574"	.pushsection	.init.text, \"ax\", @progbits\n"
1575"	.type		int3_magic, @function\n"
1576"int3_magic:\n"
1577	ANNOTATE_NOENDBR
1578"	movl	$1, (%" _ASM_ARG1 ")\n"
1579	ASM_RET
1580"	.size		int3_magic, .-int3_magic\n"
1581"	.popsection\n"
1582);
1583
1584extern void int3_selftest_ip(void); /* defined in asm below */
1585
1586static int __init
1587int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1588{
1589	unsigned long selftest = (unsigned long)&int3_selftest_ip;
1590	struct die_args *args = data;
1591	struct pt_regs *regs = args->regs;
1592
1593	OPTIMIZER_HIDE_VAR(selftest);
1594
1595	if (!regs || user_mode(regs))
1596		return NOTIFY_DONE;
1597
1598	if (val != DIE_INT3)
1599		return NOTIFY_DONE;
1600
1601	if (regs->ip - INT3_INSN_SIZE != selftest)
1602		return NOTIFY_DONE;
1603
1604	int3_emulate_call(regs, (unsigned long)&int3_magic);
1605	return NOTIFY_STOP;
1606}
1607
1608/* Must be noinline to ensure uniqueness of int3_selftest_ip. */
1609static noinline void __init int3_selftest(void)
1610{
1611	static __initdata struct notifier_block int3_exception_nb = {
1612		.notifier_call	= int3_exception_notify,
1613		.priority	= INT_MAX-1, /* last */
1614	};
1615	unsigned int val = 0;
1616
1617	BUG_ON(register_die_notifier(&int3_exception_nb));
1618
1619	/*
1620	 * Basically: int3_magic(&val); but really complicated :-)
1621	 *
1622	 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1623	 * notifier above will emulate CALL for us.
1624	 */
1625	asm volatile ("int3_selftest_ip:\n\t"
1626		      ANNOTATE_NOENDBR
1627		      "    int3; nop; nop; nop; nop\n\t"
1628		      : ASM_CALL_CONSTRAINT
1629		      : __ASM_SEL_RAW(a, D) (&val)
1630		      : "memory");
1631
1632	BUG_ON(val != 1);
1633
1634	unregister_die_notifier(&int3_exception_nb);
1635}
1636
1637static __initdata int __alt_reloc_selftest_addr;
1638
1639extern void __init __alt_reloc_selftest(void *arg);
1640__visible noinline void __init __alt_reloc_selftest(void *arg)
1641{
1642	WARN_ON(arg != &__alt_reloc_selftest_addr);
1643}
1644
1645static noinline void __init alt_reloc_selftest(void)
1646{
1647	/*
1648	 * Tests apply_relocation().
1649	 *
1650	 * This has a relative immediate (CALL) in a place other than the first
1651	 * instruction and additionally on x86_64 we get a RIP-relative LEA:
1652	 *
1653	 *   lea    0x0(%rip),%rdi  # 5d0: R_X86_64_PC32    .init.data+0x5566c
1654	 *   call   +0              # 5d5: R_X86_64_PLT32   __alt_reloc_selftest-0x4
1655	 *
1656	 * Getting this wrong will either crash and burn or tickle the WARN
1657	 * above.
1658	 */
1659	asm_inline volatile (
1660		ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
1661		: /* output */
1662		: [mem] "m" (__alt_reloc_selftest_addr)
1663		: _ASM_ARG1
1664	);
1665}
1666
1667void __init alternative_instructions(void)
1668{
1669	int3_selftest();
1670
1671	/*
1672	 * The patching is not fully atomic, so try to avoid local
1673	 * interruptions that might execute the to be patched code.
1674	 * Other CPUs are not running.
1675	 */
1676	stop_nmi();
1677
1678	/*
1679	 * Don't stop machine check exceptions while patching.
1680	 * MCEs only happen when something got corrupted and in this
1681	 * case we must do something about the corruption.
1682	 * Ignoring it is worse than an unlikely patching race.
1683	 * Also machine checks tend to be broadcast and if one CPU
1684	 * goes into machine check the others follow quickly, so we don't
1685	 * expect a machine check to cause undue problems during to code
1686	 * patching.
1687	 */
1688
1689	/*
1690	 * Make sure to set (artificial) features depending on used paravirt
1691	 * functions which can later influence alternative patching.
1692	 */
1693	paravirt_set_cap();
1694
1695	__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1696			__cfi_sites, __cfi_sites_end, true);
1697
1698	/*
1699	 * Rewrite the retpolines, must be done before alternatives since
1700	 * those can rewrite the retpoline thunks.
1701	 */
1702	apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1703	apply_returns(__return_sites, __return_sites_end);
1704
1705	apply_alternatives(__alt_instructions, __alt_instructions_end);
1706
1707	/*
1708	 * Now all calls are established. Apply the call thunks if
1709	 * required.
1710	 */
1711	callthunks_patch_builtin_calls();
1712
1713	/*
1714	 * Seal all functions that do not have their address taken.
1715	 */
1716	apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
1717
1718#ifdef CONFIG_SMP
1719	/* Patch to UP if other cpus not imminent. */
1720	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1721		uniproc_patched = true;
1722		alternatives_smp_module_add(NULL, "core kernel",
1723					    __smp_locks, __smp_locks_end,
1724					    _text, _etext);
1725	}
1726
1727	if (!uniproc_patched || num_possible_cpus() == 1) {
1728		free_init_pages("SMP alternatives",
1729				(unsigned long)__smp_locks,
1730				(unsigned long)__smp_locks_end);
1731	}
1732#endif
1733
1734	restart_nmi();
1735	alternatives_patched = 1;
1736
1737	alt_reloc_selftest();
1738}
1739
1740/**
1741 * text_poke_early - Update instructions on a live kernel at boot time
1742 * @addr: address to modify
1743 * @opcode: source of the copy
1744 * @len: length to copy
1745 *
1746 * When you use this code to patch more than one byte of an instruction
1747 * you need to make sure that other CPUs cannot execute this code in parallel.
1748 * Also no thread must be currently preempted in the middle of these
1749 * instructions. And on the local CPU you need to be protected against NMI or
1750 * MCE handlers seeing an inconsistent instruction while you patch.
1751 */
1752void __init_or_module text_poke_early(void *addr, const void *opcode,
1753				      size_t len)
1754{
1755	unsigned long flags;
1756
1757	if (boot_cpu_has(X86_FEATURE_NX) &&
1758	    is_module_text_address((unsigned long)addr)) {
1759		/*
1760		 * Modules text is marked initially as non-executable, so the
1761		 * code cannot be running and speculative code-fetches are
1762		 * prevented. Just change the code.
1763		 */
1764		memcpy(addr, opcode, len);
1765	} else {
1766		local_irq_save(flags);
1767		memcpy(addr, opcode, len);
1768		sync_core();
1769		local_irq_restore(flags);
1770
1771		/*
1772		 * Could also do a CLFLUSH here to speed up CPU recovery; but
1773		 * that causes hangs on some VIA CPUs.
1774		 */
1775	}
1776}
1777
1778typedef struct {
1779	struct mm_struct *mm;
1780} temp_mm_state_t;
1781
1782/*
1783 * Using a temporary mm allows to set temporary mappings that are not accessible
1784 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1785 * that override the kernel memory protections (e.g., W^X), without exposing the
1786 * temporary page-table mappings that are required for these write operations to
1787 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1788 * mapping is torn down.
1789 *
1790 * Context: The temporary mm needs to be used exclusively by a single core. To
1791 *          harden security IRQs must be disabled while the temporary mm is
1792 *          loaded, thereby preventing interrupt handler bugs from overriding
1793 *          the kernel memory protection.
1794 */
1795static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1796{
1797	temp_mm_state_t temp_state;
1798
1799	lockdep_assert_irqs_disabled();
1800
1801	/*
1802	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1803	 * with a stale address space WITHOUT being in lazy mode after
1804	 * restoring the previous mm.
1805	 */
1806	if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1807		leave_mm();
1808
1809	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1810	switch_mm_irqs_off(NULL, mm, current);
1811
1812	/*
1813	 * If breakpoints are enabled, disable them while the temporary mm is
1814	 * used. Userspace might set up watchpoints on addresses that are used
1815	 * in the temporary mm, which would lead to wrong signals being sent or
1816	 * crashes.
1817	 *
1818	 * Note that breakpoints are not disabled selectively, which also causes
1819	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1820	 * undesirable, but still seems reasonable as the code that runs in the
1821	 * temporary mm should be short.
1822	 */
1823	if (hw_breakpoint_active())
1824		hw_breakpoint_disable();
1825
1826	return temp_state;
1827}
1828
1829static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1830{
1831	lockdep_assert_irqs_disabled();
1832	switch_mm_irqs_off(NULL, prev_state.mm, current);
1833
1834	/*
1835	 * Restore the breakpoints if they were disabled before the temporary mm
1836	 * was loaded.
1837	 */
1838	if (hw_breakpoint_active())
1839		hw_breakpoint_restore();
1840}
1841
1842__ro_after_init struct mm_struct *poking_mm;
1843__ro_after_init unsigned long poking_addr;
1844
1845static void text_poke_memcpy(void *dst, const void *src, size_t len)
1846{
1847	memcpy(dst, src, len);
1848}
1849
1850static void text_poke_memset(void *dst, const void *src, size_t len)
1851{
1852	int c = *(const int *)src;
1853
1854	memset(dst, c, len);
1855}
1856
1857typedef void text_poke_f(void *dst, const void *src, size_t len);
1858
1859static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1860{
1861	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1862	struct page *pages[2] = {NULL};
1863	temp_mm_state_t prev;
1864	unsigned long flags;
1865	pte_t pte, *ptep;
1866	spinlock_t *ptl;
1867	pgprot_t pgprot;
1868
1869	/*
1870	 * While boot memory allocator is running we cannot use struct pages as
1871	 * they are not yet initialized. There is no way to recover.
1872	 */
1873	BUG_ON(!after_bootmem);
1874
1875	if (!core_kernel_text((unsigned long)addr)) {
1876		pages[0] = vmalloc_to_page(addr);
1877		if (cross_page_boundary)
1878			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1879	} else {
1880		pages[0] = virt_to_page(addr);
1881		WARN_ON(!PageReserved(pages[0]));
1882		if (cross_page_boundary)
1883			pages[1] = virt_to_page(addr + PAGE_SIZE);
1884	}
1885	/*
1886	 * If something went wrong, crash and burn since recovery paths are not
1887	 * implemented.
1888	 */
1889	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1890
1891	/*
1892	 * Map the page without the global bit, as TLB flushing is done with
1893	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1894	 */
1895	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1896
1897	/*
1898	 * The lock is not really needed, but this allows to avoid open-coding.
1899	 */
1900	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1901
1902	/*
1903	 * This must not fail; preallocated in poking_init().
1904	 */
1905	VM_BUG_ON(!ptep);
1906
1907	local_irq_save(flags);
1908
1909	pte = mk_pte(pages[0], pgprot);
1910	set_pte_at(poking_mm, poking_addr, ptep, pte);
1911
1912	if (cross_page_boundary) {
1913		pte = mk_pte(pages[1], pgprot);
1914		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1915	}
1916
1917	/*
1918	 * Loading the temporary mm behaves as a compiler barrier, which
1919	 * guarantees that the PTE will be set at the time memcpy() is done.
1920	 */
1921	prev = use_temporary_mm(poking_mm);
1922
1923	kasan_disable_current();
1924	func((u8 *)poking_addr + offset_in_page(addr), src, len);
1925	kasan_enable_current();
1926
1927	/*
1928	 * Ensure that the PTE is only cleared after the instructions of memcpy
1929	 * were issued by using a compiler barrier.
1930	 */
1931	barrier();
1932
1933	pte_clear(poking_mm, poking_addr, ptep);
1934	if (cross_page_boundary)
1935		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1936
1937	/*
1938	 * Loading the previous page-table hierarchy requires a serializing
1939	 * instruction that already allows the core to see the updated version.
1940	 * Xen-PV is assumed to serialize execution in a similar manner.
1941	 */
1942	unuse_temporary_mm(prev);
1943
1944	/*
1945	 * Flushing the TLB might involve IPIs, which would require enabled
1946	 * IRQs, but not if the mm is not used, as it is in this point.
1947	 */
1948	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1949			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1950			   PAGE_SHIFT, false);
1951
1952	if (func == text_poke_memcpy) {
1953		/*
1954		 * If the text does not match what we just wrote then something is
1955		 * fundamentally screwy; there's nothing we can really do about that.
1956		 */
1957		BUG_ON(memcmp(addr, src, len));
1958	}
1959
1960	local_irq_restore(flags);
1961	pte_unmap_unlock(ptep, ptl);
1962	return addr;
1963}
1964
1965/**
1966 * text_poke - Update instructions on a live kernel
1967 * @addr: address to modify
1968 * @opcode: source of the copy
1969 * @len: length to copy
1970 *
1971 * Only atomic text poke/set should be allowed when not doing early patching.
1972 * It means the size must be writable atomically and the address must be aligned
1973 * in a way that permits an atomic write. It also makes sure we fit on a single
1974 * page.
1975 *
1976 * Note that the caller must ensure that if the modified code is part of a
1977 * module, the module would not be removed during poking. This can be achieved
1978 * by registering a module notifier, and ordering module removal and patching
1979 * through a mutex.
1980 */
1981void *text_poke(void *addr, const void *opcode, size_t len)
1982{
1983	lockdep_assert_held(&text_mutex);
1984
1985	return __text_poke(text_poke_memcpy, addr, opcode, len);
1986}
1987
1988/**
1989 * text_poke_kgdb - Update instructions on a live kernel by kgdb
1990 * @addr: address to modify
1991 * @opcode: source of the copy
1992 * @len: length to copy
1993 *
1994 * Only atomic text poke/set should be allowed when not doing early patching.
1995 * It means the size must be writable atomically and the address must be aligned
1996 * in a way that permits an atomic write. It also makes sure we fit on a single
1997 * page.
1998 *
1999 * Context: should only be used by kgdb, which ensures no other core is running,
2000 *	    despite the fact it does not hold the text_mutex.
2001 */
2002void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
2003{
2004	return __text_poke(text_poke_memcpy, addr, opcode, len);
2005}
2006
2007void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
2008			    bool core_ok)
2009{
2010	unsigned long start = (unsigned long)addr;
2011	size_t patched = 0;
2012
2013	if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
2014		return NULL;
2015
2016	while (patched < len) {
2017		unsigned long ptr = start + patched;
2018		size_t s;
2019
2020		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2021
2022		__text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
2023		patched += s;
2024	}
2025	return addr;
2026}
2027
2028/**
2029 * text_poke_copy - Copy instructions into (an unused part of) RX memory
2030 * @addr: address to modify
2031 * @opcode: source of the copy
2032 * @len: length to copy, could be more than 2x PAGE_SIZE
2033 *
2034 * Not safe against concurrent execution; useful for JITs to dump
2035 * new code blocks into unused regions of RX memory. Can be used in
2036 * conjunction with synchronize_rcu_tasks() to wait for existing
2037 * execution to quiesce after having made sure no existing functions
2038 * pointers are live.
2039 */
2040void *text_poke_copy(void *addr, const void *opcode, size_t len)
2041{
2042	mutex_lock(&text_mutex);
2043	addr = text_poke_copy_locked(addr, opcode, len, false);
2044	mutex_unlock(&text_mutex);
2045	return addr;
2046}
2047
2048/**
2049 * text_poke_set - memset into (an unused part of) RX memory
2050 * @addr: address to modify
2051 * @c: the byte to fill the area with
2052 * @len: length to copy, could be more than 2x PAGE_SIZE
2053 *
2054 * This is useful to overwrite unused regions of RX memory with illegal
2055 * instructions.
2056 */
2057void *text_poke_set(void *addr, int c, size_t len)
2058{
2059	unsigned long start = (unsigned long)addr;
2060	size_t patched = 0;
2061
2062	if (WARN_ON_ONCE(core_kernel_text(start)))
2063		return NULL;
2064
2065	mutex_lock(&text_mutex);
2066	while (patched < len) {
2067		unsigned long ptr = start + patched;
2068		size_t s;
2069
2070		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2071
2072		__text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2073		patched += s;
2074	}
2075	mutex_unlock(&text_mutex);
2076	return addr;
2077}
2078
2079static void do_sync_core(void *info)
2080{
2081	sync_core();
2082}
2083
2084void text_poke_sync(void)
2085{
2086	on_each_cpu(do_sync_core, NULL, 1);
2087}
2088
2089/*
2090 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2091 * this thing. When len == 6 everything is prefixed with 0x0f and we map
2092 * opcode to Jcc.d8, using len to distinguish.
2093 */
2094struct text_poke_loc {
2095	/* addr := _stext + rel_addr */
2096	s32 rel_addr;
2097	s32 disp;
2098	u8 len;
2099	u8 opcode;
2100	const u8 text[POKE_MAX_OPCODE_SIZE];
2101	/* see text_poke_bp_batch() */
2102	u8 old;
2103};
2104
2105struct bp_patching_desc {
2106	struct text_poke_loc *vec;
2107	int nr_entries;
2108	atomic_t refs;
2109};
2110
2111static struct bp_patching_desc bp_desc;
2112
2113static __always_inline
2114struct bp_patching_desc *try_get_desc(void)
2115{
2116	struct bp_patching_desc *desc = &bp_desc;
2117
2118	if (!raw_atomic_inc_not_zero(&desc->refs))
2119		return NULL;
2120
2121	return desc;
2122}
2123
2124static __always_inline void put_desc(void)
2125{
2126	struct bp_patching_desc *desc = &bp_desc;
2127
2128	smp_mb__before_atomic();
2129	raw_atomic_dec(&desc->refs);
2130}
2131
2132static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2133{
2134	return _stext + tp->rel_addr;
2135}
2136
2137static __always_inline int patch_cmp(const void *key, const void *elt)
2138{
2139	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2140
2141	if (key < text_poke_addr(tp))
2142		return -1;
2143	if (key > text_poke_addr(tp))
2144		return 1;
2145	return 0;
2146}
2147
2148noinstr int poke_int3_handler(struct pt_regs *regs)
2149{
2150	struct bp_patching_desc *desc;
2151	struct text_poke_loc *tp;
2152	int ret = 0;
2153	void *ip;
2154
2155	if (user_mode(regs))
2156		return 0;
2157
2158	/*
2159	 * Having observed our INT3 instruction, we now must observe
2160	 * bp_desc with non-zero refcount:
2161	 *
2162	 *	bp_desc.refs = 1		INT3
2163	 *	WMB				RMB
2164	 *	write INT3			if (bp_desc.refs != 0)
2165	 */
2166	smp_rmb();
2167
2168	desc = try_get_desc();
2169	if (!desc)
2170		return 0;
2171
2172	/*
2173	 * Discount the INT3. See text_poke_bp_batch().
2174	 */
2175	ip = (void *) regs->ip - INT3_INSN_SIZE;
2176
2177	/*
2178	 * Skip the binary search if there is a single member in the vector.
2179	 */
2180	if (unlikely(desc->nr_entries > 1)) {
2181		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2182				      sizeof(struct text_poke_loc),
2183				      patch_cmp);
2184		if (!tp)
2185			goto out_put;
2186	} else {
2187		tp = desc->vec;
2188		if (text_poke_addr(tp) != ip)
2189			goto out_put;
2190	}
2191
2192	ip += tp->len;
2193
2194	switch (tp->opcode) {
2195	case INT3_INSN_OPCODE:
2196		/*
2197		 * Someone poked an explicit INT3, they'll want to handle it,
2198		 * do not consume.
2199		 */
2200		goto out_put;
2201
2202	case RET_INSN_OPCODE:
2203		int3_emulate_ret(regs);
2204		break;
2205
2206	case CALL_INSN_OPCODE:
2207		int3_emulate_call(regs, (long)ip + tp->disp);
2208		break;
2209
2210	case JMP32_INSN_OPCODE:
2211	case JMP8_INSN_OPCODE:
2212		int3_emulate_jmp(regs, (long)ip + tp->disp);
2213		break;
2214
2215	case 0x70 ... 0x7f: /* Jcc */
2216		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2217		break;
2218
2219	default:
2220		BUG();
2221	}
2222
2223	ret = 1;
2224
2225out_put:
2226	put_desc();
2227	return ret;
2228}
2229
2230#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2231static struct text_poke_loc tp_vec[TP_VEC_MAX];
2232static int tp_vec_nr;
2233
2234/**
2235 * text_poke_bp_batch() -- update instructions on live kernel on SMP
2236 * @tp:			vector of instructions to patch
2237 * @nr_entries:		number of entries in the vector
2238 *
2239 * Modify multi-byte instruction by using int3 breakpoint on SMP.
2240 * We completely avoid stop_machine() here, and achieve the
2241 * synchronization using int3 breakpoint.
2242 *
2243 * The way it is done:
2244 *	- For each entry in the vector:
2245 *		- add a int3 trap to the address that will be patched
2246 *	- sync cores
2247 *	- For each entry in the vector:
2248 *		- update all but the first byte of the patched range
2249 *	- sync cores
2250 *	- For each entry in the vector:
2251 *		- replace the first byte (int3) by the first byte of
2252 *		  replacing opcode
2253 *	- sync cores
2254 */
2255static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2256{
2257	unsigned char int3 = INT3_INSN_OPCODE;
2258	unsigned int i;
2259	int do_sync;
2260
2261	lockdep_assert_held(&text_mutex);
2262
2263	bp_desc.vec = tp;
2264	bp_desc.nr_entries = nr_entries;
2265
2266	/*
2267	 * Corresponds to the implicit memory barrier in try_get_desc() to
2268	 * ensure reading a non-zero refcount provides up to date bp_desc data.
2269	 */
2270	atomic_set_release(&bp_desc.refs, 1);
2271
2272	/*
2273	 * Function tracing can enable thousands of places that need to be
2274	 * updated. This can take quite some time, and with full kernel debugging
2275	 * enabled, this could cause the softlockup watchdog to trigger.
2276	 * This function gets called every 256 entries added to be patched.
2277	 * Call cond_resched() here to make sure that other tasks can get scheduled
2278	 * while processing all the functions being patched.
2279	 */
2280	cond_resched();
2281
2282	/*
2283	 * Corresponding read barrier in int3 notifier for making sure the
2284	 * nr_entries and handler are correctly ordered wrt. patching.
2285	 */
2286	smp_wmb();
2287
2288	/*
2289	 * First step: add a int3 trap to the address that will be patched.
2290	 */
2291	for (i = 0; i < nr_entries; i++) {
2292		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2293		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2294	}
2295
2296	text_poke_sync();
2297
2298	/*
2299	 * Second step: update all but the first byte of the patched range.
2300	 */
2301	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2302		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2303		u8 _new[POKE_MAX_OPCODE_SIZE+1];
2304		const u8 *new = tp[i].text;
2305		int len = tp[i].len;
2306
2307		if (len - INT3_INSN_SIZE > 0) {
2308			memcpy(old + INT3_INSN_SIZE,
2309			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2310			       len - INT3_INSN_SIZE);
2311
2312			if (len == 6) {
2313				_new[0] = 0x0f;
2314				memcpy(_new + 1, new, 5);
2315				new = _new;
2316			}
2317
2318			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2319				  new + INT3_INSN_SIZE,
2320				  len - INT3_INSN_SIZE);
2321
2322			do_sync++;
2323		}
2324
2325		/*
2326		 * Emit a perf event to record the text poke, primarily to
2327		 * support Intel PT decoding which must walk the executable code
2328		 * to reconstruct the trace. The flow up to here is:
2329		 *   - write INT3 byte
2330		 *   - IPI-SYNC
2331		 *   - write instruction tail
2332		 * At this point the actual control flow will be through the
2333		 * INT3 and handler and not hit the old or new instruction.
2334		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
2335		 * can still be decoded. Subsequently:
2336		 *   - emit RECORD_TEXT_POKE with the new instruction
2337		 *   - IPI-SYNC
2338		 *   - write first byte
2339		 *   - IPI-SYNC
2340		 * So before the text poke event timestamp, the decoder will see
2341		 * either the old instruction flow or FUP/TIP of INT3. After the
2342		 * text poke event timestamp, the decoder will see either the
2343		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2344		 * use the timestamp as the point at which to modify the
2345		 * executable code.
2346		 * The old instruction is recorded so that the event can be
2347		 * processed forwards or backwards.
2348		 */
2349		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
2350	}
2351
2352	if (do_sync) {
2353		/*
2354		 * According to Intel, this core syncing is very likely
2355		 * not necessary and we'd be safe even without it. But
2356		 * better safe than sorry (plus there's not only Intel).
2357		 */
2358		text_poke_sync();
2359	}
2360
2361	/*
2362	 * Third step: replace the first byte (int3) by the first byte of
2363	 * replacing opcode.
2364	 */
2365	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2366		u8 byte = tp[i].text[0];
2367
2368		if (tp[i].len == 6)
2369			byte = 0x0f;
2370
2371		if (byte == INT3_INSN_OPCODE)
2372			continue;
2373
2374		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
2375		do_sync++;
2376	}
2377
2378	if (do_sync)
2379		text_poke_sync();
2380
2381	/*
2382	 * Remove and wait for refs to be zero.
2383	 */
2384	if (!atomic_dec_and_test(&bp_desc.refs))
2385		atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2386}
2387
2388static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2389			       const void *opcode, size_t len, const void *emulate)
2390{
2391	struct insn insn;
2392	int ret, i = 0;
2393
2394	if (len == 6)
2395		i = 1;
2396	memcpy((void *)tp->text, opcode+i, len-i);
2397	if (!emulate)
2398		emulate = opcode;
2399
2400	ret = insn_decode_kernel(&insn, emulate);
2401	BUG_ON(ret < 0);
2402
2403	tp->rel_addr = addr - (void *)_stext;
2404	tp->len = len;
2405	tp->opcode = insn.opcode.bytes[0];
2406
2407	if (is_jcc32(&insn)) {
2408		/*
2409		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
2410		 */
2411		tp->opcode = insn.opcode.bytes[1] - 0x10;
2412	}
2413
2414	switch (tp->opcode) {
2415	case RET_INSN_OPCODE:
2416	case JMP32_INSN_OPCODE:
2417	case JMP8_INSN_OPCODE:
2418		/*
2419		 * Control flow instructions without implied execution of the
2420		 * next instruction can be padded with INT3.
2421		 */
2422		for (i = insn.length; i < len; i++)
2423			BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2424		break;
2425
2426	default:
2427		BUG_ON(len != insn.length);
2428	}
2429
2430	switch (tp->opcode) {
2431	case INT3_INSN_OPCODE:
2432	case RET_INSN_OPCODE:
2433		break;
2434
2435	case CALL_INSN_OPCODE:
2436	case JMP32_INSN_OPCODE:
2437	case JMP8_INSN_OPCODE:
2438	case 0x70 ... 0x7f: /* Jcc */
2439		tp->disp = insn.immediate.value;
2440		break;
2441
2442	default: /* assume NOP */
2443		switch (len) {
2444		case 2: /* NOP2 -- emulate as JMP8+0 */
2445			BUG_ON(memcmp(emulate, x86_nops[len], len));
2446			tp->opcode = JMP8_INSN_OPCODE;
2447			tp->disp = 0;
2448			break;
2449
2450		case 5: /* NOP5 -- emulate as JMP32+0 */
2451			BUG_ON(memcmp(emulate, x86_nops[len], len));
2452			tp->opcode = JMP32_INSN_OPCODE;
2453			tp->disp = 0;
2454			break;
2455
2456		default: /* unknown instruction */
2457			BUG();
2458		}
2459		break;
2460	}
2461}
2462
2463/*
2464 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2465 * early if needed.
2466 */
2467static bool tp_order_fail(void *addr)
2468{
2469	struct text_poke_loc *tp;
2470
2471	if (!tp_vec_nr)
2472		return false;
2473
2474	if (!addr) /* force */
2475		return true;
2476
2477	tp = &tp_vec[tp_vec_nr - 1];
2478	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2479		return true;
2480
2481	return false;
2482}
2483
2484static void text_poke_flush(void *addr)
2485{
2486	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2487		text_poke_bp_batch(tp_vec, tp_vec_nr);
2488		tp_vec_nr = 0;
2489	}
2490}
2491
2492void text_poke_finish(void)
2493{
2494	text_poke_flush(NULL);
2495}
2496
2497void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2498{
2499	struct text_poke_loc *tp;
2500
2501	text_poke_flush(addr);
2502
2503	tp = &tp_vec[tp_vec_nr++];
2504	text_poke_loc_init(tp, addr, opcode, len, emulate);
2505}
2506
2507/**
2508 * text_poke_bp() -- update instructions on live kernel on SMP
2509 * @addr:	address to patch
2510 * @opcode:	opcode of new instruction
2511 * @len:	length to copy
2512 * @emulate:	instruction to be emulated
2513 *
2514 * Update a single instruction with the vector in the stack, avoiding
2515 * dynamically allocated memory. This function should be used when it is
2516 * not possible to allocate memory.
2517 */
2518void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2519{
2520	struct text_poke_loc tp;
2521
2522	text_poke_loc_init(&tp, addr, opcode, len, emulate);
2523	text_poke_bp_batch(&tp, 1);
2524}