Linux Audio

Check our new training course

Loading...
v3.5.6
 
 1/*
 2 * jump label x86 support
 3 *
 4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
 5 *
 6 */
 7#include <linux/jump_label.h>
 8#include <linux/memory.h>
 9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/jhash.h>
13#include <linux/cpu.h>
14#include <asm/kprobes.h>
15#include <asm/alternative.h>
 
 
16
17#ifdef HAVE_JUMP_LABEL
 
 
 
 
 
 
 
 
18
19union jump_code_union {
20	char code[JUMP_LABEL_NOP_SIZE];
21	struct {
22		char jump;
23		int offset;
24	} __attribute__((packed));
25};
26
27static void __jump_label_transform(struct jump_entry *entry,
28				   enum jump_label_type type,
29				   void *(*poker)(void *, const void *, size_t))
30{
31	union jump_code_union code;
32
33	if (type == JUMP_LABEL_ENABLE) {
34		code.jump = 0xe9;
35		code.offset = entry->target -
36				(entry->code + JUMP_LABEL_NOP_SIZE);
37	} else
38		memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
40	(*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
 
 
 
 
 
 
41}
42
43void arch_jump_label_transform(struct jump_entry *entry,
44			       enum jump_label_type type)
45{
46	get_online_cpus();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47	mutex_lock(&text_mutex);
48	__jump_label_transform(entry, type, text_poke_smp);
 
49	mutex_unlock(&text_mutex);
50	put_online_cpus();
51}
52
53__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
54				      enum jump_label_type type)
55{
56	__jump_label_transform(entry, type, text_poke_early);
 
 
57}
58
59#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * jump label x86 support
  4 *
  5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  6 *
  7 */
  8#include <linux/jump_label.h>
  9#include <linux/memory.h>
 10#include <linux/uaccess.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/jhash.h>
 14#include <linux/cpu.h>
 15#include <asm/kprobes.h>
 16#include <asm/alternative.h>
 17#include <asm/text-patching.h>
 18#include <asm/insn.h>
 19
 20int arch_jump_entry_size(struct jump_entry *entry)
 21{
 22	struct insn insn = {};
 23
 24	insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
 25	BUG_ON(insn.length != 2 && insn.length != 5);
 26
 27	return insn.length;
 28}
 29
 30struct jump_label_patch {
 31	const void *code;
 32	int size;
 
 
 
 33};
 34
 35static struct jump_label_patch
 36__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
 37{
 38	const void *expect, *code, *nop;
 39	const void *addr, *dest;
 40	int size;
 41
 42	addr = (void *)jump_entry_code(entry);
 43	dest = (void *)jump_entry_target(entry);
 44
 45	size = arch_jump_entry_size(entry);
 46	switch (size) {
 47	case JMP8_INSN_SIZE:
 48		code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
 49		nop = x86_nops[size];
 50		break;
 51
 52	case JMP32_INSN_SIZE:
 53		code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
 54		nop = x86_nops[size];
 55		break;
 56
 57	default: BUG();
 58	}
 59
 60	if (type == JUMP_LABEL_JMP)
 61		expect = nop;
 62	else
 63		expect = code;
 64
 65	if (memcmp(addr, expect, size)) {
 66		/*
 67		 * The location is not an op that we were expecting.
 68		 * Something went wrong. Crash the box, as something could be
 69		 * corrupting the kernel.
 70		 */
 71		pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
 72				addr, addr, addr, expect, size, type);
 73		BUG();
 74	}
 75
 76	if (type == JUMP_LABEL_NOP)
 77		code = nop;
 78
 79	return (struct jump_label_patch){.code = code, .size = size};
 80}
 81
 82static __always_inline void
 83__jump_label_transform(struct jump_entry *entry,
 84		       enum jump_label_type type,
 85		       int init)
 86{
 87	const struct jump_label_patch jlp = __jump_label_patch(entry, type);
 88
 89	/*
 90	 * As long as only a single processor is running and the code is still
 91	 * not marked as RO, text_poke_early() can be used; Checking that
 92	 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
 93	 * SYSTEM_SCHEDULING before other cores are awaken and before the
 94	 * code is write-protected.
 95	 *
 96	 * At the time the change is being done, just ignore whether we
 97	 * are doing nop -> jump or jump -> nop transition, and assume
 98	 * always nop being the 'currently valid' instruction
 99	 */
100	if (init || system_state == SYSTEM_BOOTING) {
101		text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
102		return;
103	}
104
105	text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
106}
107
108static void __ref jump_label_transform(struct jump_entry *entry,
109				       enum jump_label_type type,
110				       int init)
111{
112	mutex_lock(&text_mutex);
113	__jump_label_transform(entry, type, init);
114	mutex_unlock(&text_mutex);
115}
116
117void arch_jump_label_transform(struct jump_entry *entry,
118			       enum jump_label_type type)
119{
120	jump_label_transform(entry, type, 0);
121}
122
123bool arch_jump_label_transform_queue(struct jump_entry *entry,
124				     enum jump_label_type type)
125{
126	struct jump_label_patch jlp;
127
128	if (system_state == SYSTEM_BOOTING) {
129		/*
130		 * Fallback to the non-batching mode.
131		 */
132		arch_jump_label_transform(entry, type);
133		return true;
134	}
135
136	mutex_lock(&text_mutex);
137	jlp = __jump_label_patch(entry, type);
138	text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
139	mutex_unlock(&text_mutex);
140	return true;
141}
142
143void arch_jump_label_transform_apply(void)
 
144{
145	mutex_lock(&text_mutex);
146	text_poke_finish();
147	mutex_unlock(&text_mutex);
148}