Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_TEXT_PATCHING_H
  3#define _ASM_X86_TEXT_PATCHING_H
  4
  5#include <linux/types.h>
  6#include <linux/stddef.h>
  7#include <asm/ptrace.h>
  8
  9struct paravirt_patch_site;
 10#ifdef CONFIG_PARAVIRT
 11void apply_paravirt(struct paravirt_patch_site *start,
 12		    struct paravirt_patch_site *end);
 13#else
 14static inline void apply_paravirt(struct paravirt_patch_site *start,
 15				  struct paravirt_patch_site *end)
 16{}
 17#define __parainstructions	NULL
 18#define __parainstructions_end	NULL
 19#endif
 20
 21/*
 22 * Currently, the max observed size in the kernel code is
 23 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
 24 * Raise it if needed.
 25 */
 26#define POKE_MAX_OPCODE_SIZE	5
 27
 28extern void text_poke_early(void *addr, const void *opcode, size_t len);
 29
 30/*
 31 * Clear and restore the kernel write-protection flag on the local CPU.
 32 * Allows the kernel to edit read-only pages.
 33 * Side-effect: any interrupt handler running between save and restore will have
 34 * the ability to write to read-only pages.
 35 *
 36 * Warning:
 37 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
 38 * no thread can be preempted in the instructions being modified (no iret to an
 39 * invalid instruction possible) or if the instructions are changed from a
 40 * consistent state to another consistent state atomically.
 41 * On the local CPU you need to be protected against NMI or MCE handlers seeing
 42 * an inconsistent instruction while you patch.
 43 */
 44extern void *text_poke(void *addr, const void *opcode, size_t len);
 45extern void text_poke_sync(void);
 46extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
 47extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
 48extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok);
 49extern void *text_poke_set(void *addr, int c, size_t len);
 50extern int poke_int3_handler(struct pt_regs *regs);
 51extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
 52
 53extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
 54extern void text_poke_finish(void);
 55
 56#define INT3_INSN_SIZE		1
 57#define INT3_INSN_OPCODE	0xCC
 58
 59#define RET_INSN_SIZE		1
 60#define RET_INSN_OPCODE		0xC3
 61
 62#define CALL_INSN_SIZE		5
 63#define CALL_INSN_OPCODE	0xE8
 64
 65#define JMP32_INSN_SIZE		5
 66#define JMP32_INSN_OPCODE	0xE9
 67
 68#define JMP8_INSN_SIZE		2
 69#define JMP8_INSN_OPCODE	0xEB
 70
 71#define DISP32_SIZE		4
 72
 73static __always_inline int text_opcode_size(u8 opcode)
 74{
 75	int size = 0;
 76
 77#define __CASE(insn)	\
 78	case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
 79
 80	switch(opcode) {
 81	__CASE(INT3);
 82	__CASE(RET);
 83	__CASE(CALL);
 84	__CASE(JMP32);
 85	__CASE(JMP8);
 86	}
 87
 88#undef __CASE
 89
 90	return size;
 91}
 92
 93union text_poke_insn {
 94	u8 text[POKE_MAX_OPCODE_SIZE];
 95	struct {
 96		u8 opcode;
 97		s32 disp;
 98	} __attribute__((packed));
 99};
100
101static __always_inline
102void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size)
103{
104	union text_poke_insn *insn = buf;
105
106	BUG_ON(size < text_opcode_size(opcode));
107
108	/*
109	 * Hide the addresses to avoid the compiler folding in constants when
110	 * referencing code, these can mess up annotations like
111	 * ANNOTATE_NOENDBR.
112	 */
113	OPTIMIZER_HIDE_VAR(insn);
114	OPTIMIZER_HIDE_VAR(addr);
115	OPTIMIZER_HIDE_VAR(dest);
116
117	insn->opcode = opcode;
118
119	if (size > 1) {
120		insn->disp = (long)dest - (long)(addr + size);
121		if (size == 2) {
122			/*
123			 * Ensure that for JMP8 the displacement
124			 * actually fits the signed byte.
125			 */
126			BUG_ON((insn->disp >> 31) != (insn->disp >> 7));
127		}
128	}
129}
130
131static __always_inline
132void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
133{
134	static union text_poke_insn insn; /* per instance */
135	__text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode));
136	return &insn.text;
137}
138
139extern int after_bootmem;
140extern __ro_after_init struct mm_struct *poking_mm;
141extern __ro_after_init unsigned long poking_addr;
142
143#ifndef CONFIG_UML_X86
144static __always_inline
145void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
146{
147	regs->ip = ip;
148}
149
150static __always_inline
151void int3_emulate_push(struct pt_regs *regs, unsigned long val)
152{
153	/*
154	 * The int3 handler in entry_64.S adds a gap between the
155	 * stack where the break point happened, and the saving of
156	 * pt_regs. We can extend the original stack because of
157	 * this gap. See the idtentry macro's create_gap option.
158	 *
159	 * Similarly entry_32.S will have a gap on the stack for (any) hardware
160	 * exception and pt_regs; see FIXUP_FRAME.
161	 */
162	regs->sp -= sizeof(unsigned long);
163	*(unsigned long *)regs->sp = val;
164}
165
166static __always_inline
167unsigned long int3_emulate_pop(struct pt_regs *regs)
168{
169	unsigned long val = *(unsigned long *)regs->sp;
170	regs->sp += sizeof(unsigned long);
171	return val;
172}
173
174static __always_inline
175void int3_emulate_call(struct pt_regs *regs, unsigned long func)
176{
177	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
178	int3_emulate_jmp(regs, func);
179}
180
181static __always_inline
182void int3_emulate_ret(struct pt_regs *regs)
183{
184	unsigned long ip = int3_emulate_pop(regs);
185	int3_emulate_jmp(regs, ip);
186}
187#endif /* !CONFIG_UML_X86 */
188
189#endif /* _ASM_X86_TEXT_PATCHING_H */