Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * jump label x86 support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 *
7 */
8#include <linux/jump_label.h>
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/cpu.h>
15#include <asm/kprobes.h>
16#include <asm/alternative.h>
17#include <asm/text-patching.h>
18
19union jump_code_union {
20 char code[JUMP_LABEL_NOP_SIZE];
21 struct {
22 char jump;
23 int offset;
24 } __attribute__((packed));
25};
26
27static void bug_at(unsigned char *ip, int line)
28{
29 /*
30 * The location is not an op that we were expecting.
31 * Something went wrong. Crash the box, as something could be
32 * corrupting the kernel.
33 */
34 pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
35 BUG();
36}
37
38static void __jump_label_set_jump_code(struct jump_entry *entry,
39 enum jump_label_type type,
40 union jump_code_union *code,
41 int init)
42{
43 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
44 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
45 const void *expect;
46 int line;
47
48 code->jump = 0xe9;
49 code->offset = jump_entry_target(entry) -
50 (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
51
52 if (init) {
53 expect = default_nop; line = __LINE__;
54 } else if (type == JUMP_LABEL_JMP) {
55 expect = ideal_nop; line = __LINE__;
56 } else {
57 expect = code->code; line = __LINE__;
58 }
59
60 if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
61 bug_at((void *)jump_entry_code(entry), line);
62
63 if (type == JUMP_LABEL_NOP)
64 memcpy(code, ideal_nop, JUMP_LABEL_NOP_SIZE);
65}
66
67static void __ref __jump_label_transform(struct jump_entry *entry,
68 enum jump_label_type type,
69 int init)
70{
71 union jump_code_union code;
72
73 __jump_label_set_jump_code(entry, type, &code, init);
74
75 /*
76 * As long as only a single processor is running and the code is still
77 * not marked as RO, text_poke_early() can be used; Checking that
78 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
79 * SYSTEM_SCHEDULING before other cores are awaken and before the
80 * code is write-protected.
81 *
82 * At the time the change is being done, just ignore whether we
83 * are doing nop -> jump or jump -> nop transition, and assume
84 * always nop being the 'currently valid' instruction
85 */
86 if (init || system_state == SYSTEM_BOOTING) {
87 text_poke_early((void *)jump_entry_code(entry), &code,
88 JUMP_LABEL_NOP_SIZE);
89 return;
90 }
91
92 text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE,
93 (void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
94}
95
96void arch_jump_label_transform(struct jump_entry *entry,
97 enum jump_label_type type)
98{
99 mutex_lock(&text_mutex);
100 __jump_label_transform(entry, type, 0);
101 mutex_unlock(&text_mutex);
102}
103
104#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
105static struct text_poke_loc tp_vec[TP_VEC_MAX];
106static int tp_vec_nr;
107
108bool arch_jump_label_transform_queue(struct jump_entry *entry,
109 enum jump_label_type type)
110{
111 struct text_poke_loc *tp;
112 void *entry_code;
113
114 if (system_state == SYSTEM_BOOTING) {
115 /*
116 * Fallback to the non-batching mode.
117 */
118 arch_jump_label_transform(entry, type);
119 return true;
120 }
121
122 /*
123 * No more space in the vector, tell upper layer to apply
124 * the queue before continuing.
125 */
126 if (tp_vec_nr == TP_VEC_MAX)
127 return false;
128
129 tp = &tp_vec[tp_vec_nr];
130
131 entry_code = (void *)jump_entry_code(entry);
132
133 /*
134 * The INT3 handler will do a bsearch in the queue, so we need entries
135 * to be sorted. We can survive an unsorted list by rejecting the entry,
136 * forcing the generic jump_label code to apply the queue. Warning once,
137 * to raise the attention to the case of an unsorted entry that is
138 * better not happen, because, in the worst case we will perform in the
139 * same way as we do without batching - with some more overhead.
140 */
141 if (tp_vec_nr > 0) {
142 int prev = tp_vec_nr - 1;
143 struct text_poke_loc *prev_tp = &tp_vec[prev];
144
145 if (WARN_ON_ONCE(prev_tp->addr > entry_code))
146 return false;
147 }
148
149 __jump_label_set_jump_code(entry, type,
150 (union jump_code_union *) &tp->opcode, 0);
151
152 tp->addr = entry_code;
153 tp->detour = entry_code + JUMP_LABEL_NOP_SIZE;
154 tp->len = JUMP_LABEL_NOP_SIZE;
155
156 tp_vec_nr++;
157
158 return true;
159}
160
161void arch_jump_label_transform_apply(void)
162{
163 if (!tp_vec_nr)
164 return;
165
166 mutex_lock(&text_mutex);
167 text_poke_bp_batch(tp_vec, tp_vec_nr);
168 mutex_unlock(&text_mutex);
169
170 tp_vec_nr = 0;
171}
172
173static enum {
174 JL_STATE_START,
175 JL_STATE_NO_UPDATE,
176 JL_STATE_UPDATE,
177} jlstate __initdata_or_module = JL_STATE_START;
178
179__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
180 enum jump_label_type type)
181{
182 /*
183 * This function is called at boot up and when modules are
184 * first loaded. Check if the default nop, the one that is
185 * inserted at compile time, is the ideal nop. If it is, then
186 * we do not need to update the nop, and we can leave it as is.
187 * If it is not, then we need to update the nop to the ideal nop.
188 */
189 if (jlstate == JL_STATE_START) {
190 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
191 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
192
193 if (memcmp(ideal_nop, default_nop, 5) != 0)
194 jlstate = JL_STATE_UPDATE;
195 else
196 jlstate = JL_STATE_NO_UPDATE;
197 }
198 if (jlstate == JL_STATE_UPDATE)
199 __jump_label_transform(entry, type, 1);
200}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * jump label x86 support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 *
7 */
8#include <linux/jump_label.h>
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/cpu.h>
15#include <asm/kprobes.h>
16#include <asm/alternative.h>
17#include <asm/text-patching.h>
18#include <asm/insn.h>
19
20int arch_jump_entry_size(struct jump_entry *entry)
21{
22 struct insn insn = {};
23
24 insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
25 BUG_ON(insn.length != 2 && insn.length != 5);
26
27 return insn.length;
28}
29
30struct jump_label_patch {
31 const void *code;
32 int size;
33};
34
35static struct jump_label_patch
36__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
37{
38 const void *expect, *code, *nop;
39 const void *addr, *dest;
40 int size;
41
42 addr = (void *)jump_entry_code(entry);
43 dest = (void *)jump_entry_target(entry);
44
45 size = arch_jump_entry_size(entry);
46 switch (size) {
47 case JMP8_INSN_SIZE:
48 code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
49 nop = x86_nops[size];
50 break;
51
52 case JMP32_INSN_SIZE:
53 code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
54 nop = x86_nops[size];
55 break;
56
57 default: BUG();
58 }
59
60 if (type == JUMP_LABEL_JMP)
61 expect = nop;
62 else
63 expect = code;
64
65 if (memcmp(addr, expect, size)) {
66 /*
67 * The location is not an op that we were expecting.
68 * Something went wrong. Crash the box, as something could be
69 * corrupting the kernel.
70 */
71 pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
72 addr, addr, addr, expect, size, type);
73 BUG();
74 }
75
76 if (type == JUMP_LABEL_NOP)
77 code = nop;
78
79 return (struct jump_label_patch){.code = code, .size = size};
80}
81
82static __always_inline void
83__jump_label_transform(struct jump_entry *entry,
84 enum jump_label_type type,
85 int init)
86{
87 const struct jump_label_patch jlp = __jump_label_patch(entry, type);
88
89 /*
90 * As long as only a single processor is running and the code is still
91 * not marked as RO, text_poke_early() can be used; Checking that
92 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
93 * SYSTEM_SCHEDULING before other cores are awaken and before the
94 * code is write-protected.
95 *
96 * At the time the change is being done, just ignore whether we
97 * are doing nop -> jump or jump -> nop transition, and assume
98 * always nop being the 'currently valid' instruction
99 */
100 if (init || system_state == SYSTEM_BOOTING) {
101 text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
102 return;
103 }
104
105 text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
106}
107
108static void __ref jump_label_transform(struct jump_entry *entry,
109 enum jump_label_type type,
110 int init)
111{
112 mutex_lock(&text_mutex);
113 __jump_label_transform(entry, type, init);
114 mutex_unlock(&text_mutex);
115}
116
117void arch_jump_label_transform(struct jump_entry *entry,
118 enum jump_label_type type)
119{
120 jump_label_transform(entry, type, 0);
121}
122
123bool arch_jump_label_transform_queue(struct jump_entry *entry,
124 enum jump_label_type type)
125{
126 struct jump_label_patch jlp;
127
128 if (system_state == SYSTEM_BOOTING) {
129 /*
130 * Fallback to the non-batching mode.
131 */
132 arch_jump_label_transform(entry, type);
133 return true;
134 }
135
136 mutex_lock(&text_mutex);
137 jlp = __jump_label_patch(entry, type);
138 text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
139 mutex_unlock(&text_mutex);
140 return true;
141}
142
143void arch_jump_label_transform_apply(void)
144{
145 mutex_lock(&text_mutex);
146 text_poke_finish();
147 mutex_unlock(&text_mutex);
148}