Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * jump label x86 support
  4 *
  5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  6 *
  7 */
  8#include <linux/jump_label.h>
  9#include <linux/memory.h>
 10#include <linux/uaccess.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/jhash.h>
 14#include <linux/cpu.h>
 15#include <asm/kprobes.h>
 16#include <asm/alternative.h>
 17#include <asm/text-patching.h>
 
 18
 19static void bug_at(const void *ip, int line)
 20{
 21	/*
 22	 * The location is not an op that we were expecting.
 23	 * Something went wrong. Crash the box, as something could be
 24	 * corrupting the kernel.
 25	 */
 26	pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
 27	BUG();
 28}
 29
 30static const void *
 31__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, int init)
 
 
 
 
 
 32{
 33	const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 34	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 35	const void *expect, *code;
 36	const void *addr, *dest;
 37	int line;
 38
 39	addr = (void *)jump_entry_code(entry);
 40	dest = (void *)jump_entry_target(entry);
 41
 42	code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
 
 
 
 
 
 
 
 
 
 
 43
 44	if (init) {
 45		expect = default_nop; line = __LINE__;
 46	} else if (type == JUMP_LABEL_JMP) {
 47		expect = ideal_nop; line = __LINE__;
 48	} else {
 49		expect = code; line = __LINE__;
 50	}
 51
 52	if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
 53		bug_at(addr, line);
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55	if (type == JUMP_LABEL_NOP)
 56		code = ideal_nop;
 57
 58	return code;
 59}
 60
 61static inline void __jump_label_transform(struct jump_entry *entry,
 62					  enum jump_label_type type,
 63					  int init)
 
 64{
 65	const void *opcode = __jump_label_set_jump_code(entry, type, init);
 66
 67	/*
 68	 * As long as only a single processor is running and the code is still
 69	 * not marked as RO, text_poke_early() can be used; Checking that
 70	 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
 71	 * SYSTEM_SCHEDULING before other cores are awaken and before the
 72	 * code is write-protected.
 73	 *
 74	 * At the time the change is being done, just ignore whether we
 75	 * are doing nop -> jump or jump -> nop transition, and assume
 76	 * always nop being the 'currently valid' instruction
 77	 */
 78	if (init || system_state == SYSTEM_BOOTING) {
 79		text_poke_early((void *)jump_entry_code(entry), opcode,
 80				JUMP_LABEL_NOP_SIZE);
 81		return;
 82	}
 83
 84	text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
 85}
 86
 87static void __ref jump_label_transform(struct jump_entry *entry,
 88				       enum jump_label_type type,
 89				       int init)
 90{
 91	mutex_lock(&text_mutex);
 92	__jump_label_transform(entry, type, init);
 93	mutex_unlock(&text_mutex);
 94}
 95
 96void arch_jump_label_transform(struct jump_entry *entry,
 97			       enum jump_label_type type)
 98{
 99	jump_label_transform(entry, type, 0);
100}
101
102bool arch_jump_label_transform_queue(struct jump_entry *entry,
103				     enum jump_label_type type)
104{
105	const void *opcode;
106
107	if (system_state == SYSTEM_BOOTING) {
108		/*
109		 * Fallback to the non-batching mode.
110		 */
111		arch_jump_label_transform(entry, type);
112		return true;
113	}
114
115	mutex_lock(&text_mutex);
116	opcode = __jump_label_set_jump_code(entry, type, 0);
117	text_poke_queue((void *)jump_entry_code(entry),
118			opcode, JUMP_LABEL_NOP_SIZE, NULL);
119	mutex_unlock(&text_mutex);
120	return true;
121}
122
123void arch_jump_label_transform_apply(void)
124{
125	mutex_lock(&text_mutex);
126	text_poke_finish();
127	mutex_unlock(&text_mutex);
128}
129
130static enum {
131	JL_STATE_START,
132	JL_STATE_NO_UPDATE,
133	JL_STATE_UPDATE,
134} jlstate __initdata_or_module = JL_STATE_START;
135
136__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
137				      enum jump_label_type type)
138{
139	/*
140	 * This function is called at boot up and when modules are
141	 * first loaded. Check if the default nop, the one that is
142	 * inserted at compile time, is the ideal nop. If it is, then
143	 * we do not need to update the nop, and we can leave it as is.
144	 * If it is not, then we need to update the nop to the ideal nop.
145	 */
146	if (jlstate == JL_STATE_START) {
147		const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
148		const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
149
150		if (memcmp(ideal_nop, default_nop, 5) != 0)
151			jlstate = JL_STATE_UPDATE;
152		else
153			jlstate = JL_STATE_NO_UPDATE;
154	}
155	if (jlstate == JL_STATE_UPDATE)
156		jump_label_transform(entry, type, 1);
157}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * jump label x86 support
  4 *
  5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  6 *
  7 */
  8#include <linux/jump_label.h>
  9#include <linux/memory.h>
 10#include <linux/uaccess.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/jhash.h>
 14#include <linux/cpu.h>
 15#include <asm/kprobes.h>
 16#include <asm/alternative.h>
 17#include <asm/text-patching.h>
 18#include <asm/insn.h>
 19
 20int arch_jump_entry_size(struct jump_entry *entry)
 21{
 22	struct insn insn = {};
 23
 24	insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
 25	BUG_ON(insn.length != 2 && insn.length != 5);
 26
 27	return insn.length;
 
 28}
 29
 30struct jump_label_patch {
 31	const void *code;
 32	int size;
 33};
 34
 35static struct jump_label_patch
 36__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
 37{
 38	const void *expect, *code, *nop;
 
 
 39	const void *addr, *dest;
 40	int size;
 41
 42	addr = (void *)jump_entry_code(entry);
 43	dest = (void *)jump_entry_target(entry);
 44
 45	size = arch_jump_entry_size(entry);
 46	switch (size) {
 47	case JMP8_INSN_SIZE:
 48		code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
 49		nop = x86_nops[size];
 50		break;
 51
 52	case JMP32_INSN_SIZE:
 53		code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
 54		nop = x86_nops[size];
 55		break;
 56
 57	default: BUG();
 
 
 
 
 
 58	}
 59
 60	if (type == JUMP_LABEL_JMP)
 61		expect = nop;
 62	else
 63		expect = code;
 64
 65	if (memcmp(addr, expect, size)) {
 66		/*
 67		 * The location is not an op that we were expecting.
 68		 * Something went wrong. Crash the box, as something could be
 69		 * corrupting the kernel.
 70		 */
 71		pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
 72				addr, addr, addr, expect, size, type);
 73		BUG();
 74	}
 75
 76	if (type == JUMP_LABEL_NOP)
 77		code = nop;
 78
 79	return (struct jump_label_patch){.code = code, .size = size};
 80}
 81
 82static __always_inline void
 83__jump_label_transform(struct jump_entry *entry,
 84		       enum jump_label_type type,
 85		       int init)
 86{
 87	const struct jump_label_patch jlp = __jump_label_patch(entry, type);
 88
 89	/*
 90	 * As long as only a single processor is running and the code is still
 91	 * not marked as RO, text_poke_early() can be used; Checking that
 92	 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
 93	 * SYSTEM_SCHEDULING before other cores are awaken and before the
 94	 * code is write-protected.
 95	 *
 96	 * At the time the change is being done, just ignore whether we
 97	 * are doing nop -> jump or jump -> nop transition, and assume
 98	 * always nop being the 'currently valid' instruction
 99	 */
100	if (init || system_state == SYSTEM_BOOTING) {
101		text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
 
102		return;
103	}
104
105	text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
106}
107
108static void __ref jump_label_transform(struct jump_entry *entry,
109				       enum jump_label_type type,
110				       int init)
111{
112	mutex_lock(&text_mutex);
113	__jump_label_transform(entry, type, init);
114	mutex_unlock(&text_mutex);
115}
116
117void arch_jump_label_transform(struct jump_entry *entry,
118			       enum jump_label_type type)
119{
120	jump_label_transform(entry, type, 0);
121}
122
123bool arch_jump_label_transform_queue(struct jump_entry *entry,
124				     enum jump_label_type type)
125{
126	struct jump_label_patch jlp;
127
128	if (system_state == SYSTEM_BOOTING) {
129		/*
130		 * Fallback to the non-batching mode.
131		 */
132		arch_jump_label_transform(entry, type);
133		return true;
134	}
135
136	mutex_lock(&text_mutex);
137	jlp = __jump_label_patch(entry, type);
138	text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
 
139	mutex_unlock(&text_mutex);
140	return true;
141}
142
143void arch_jump_label_transform_apply(void)
144{
145	mutex_lock(&text_mutex);
146	text_poke_finish();
147	mutex_unlock(&text_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148}