Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * jump label x86 support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 *
7 */
8#include <linux/jump_label.h>
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/cpu.h>
15#include <asm/kprobes.h>
16#include <asm/alternative.h>
17#include <asm/text-patching.h>
18#include <asm/insn.h>
19
20int arch_jump_entry_size(struct jump_entry *entry)
21{
22 struct insn insn = {};
23
24 insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
25 BUG_ON(insn.length != 2 && insn.length != 5);
26
27 return insn.length;
28}
29
30struct jump_label_patch {
31 const void *code;
32 int size;
33};
34
35static struct jump_label_patch
36__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
37{
38 const void *expect, *code, *nop;
39 const void *addr, *dest;
40 int size;
41
42 addr = (void *)jump_entry_code(entry);
43 dest = (void *)jump_entry_target(entry);
44
45 size = arch_jump_entry_size(entry);
46 switch (size) {
47 case JMP8_INSN_SIZE:
48 code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
49 nop = x86_nops[size];
50 break;
51
52 case JMP32_INSN_SIZE:
53 code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
54 nop = x86_nops[size];
55 break;
56
57 default: BUG();
58 }
59
60 if (type == JUMP_LABEL_JMP)
61 expect = nop;
62 else
63 expect = code;
64
65 if (memcmp(addr, expect, size)) {
66 /*
67 * The location is not an op that we were expecting.
68 * Something went wrong. Crash the box, as something could be
69 * corrupting the kernel.
70 */
71 pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
72 addr, addr, addr, expect, size, type);
73 BUG();
74 }
75
76 if (type == JUMP_LABEL_NOP)
77 code = nop;
78
79 return (struct jump_label_patch){.code = code, .size = size};
80}
81
82static __always_inline void
83__jump_label_transform(struct jump_entry *entry,
84 enum jump_label_type type,
85 int init)
86{
87 const struct jump_label_patch jlp = __jump_label_patch(entry, type);
88
89 /*
90 * As long as only a single processor is running and the code is still
91 * not marked as RO, text_poke_early() can be used; Checking that
92 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
93 * SYSTEM_SCHEDULING before other cores are awaken and before the
94 * code is write-protected.
95 *
96 * At the time the change is being done, just ignore whether we
97 * are doing nop -> jump or jump -> nop transition, and assume
98 * always nop being the 'currently valid' instruction
99 */
100 if (init || system_state == SYSTEM_BOOTING) {
101 text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
102 return;
103 }
104
105 text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
106}
107
108static void __ref jump_label_transform(struct jump_entry *entry,
109 enum jump_label_type type,
110 int init)
111{
112 mutex_lock(&text_mutex);
113 __jump_label_transform(entry, type, init);
114 mutex_unlock(&text_mutex);
115}
116
117void arch_jump_label_transform(struct jump_entry *entry,
118 enum jump_label_type type)
119{
120 jump_label_transform(entry, type, 0);
121}
122
123bool arch_jump_label_transform_queue(struct jump_entry *entry,
124 enum jump_label_type type)
125{
126 struct jump_label_patch jlp;
127
128 if (system_state == SYSTEM_BOOTING) {
129 /*
130 * Fallback to the non-batching mode.
131 */
132 arch_jump_label_transform(entry, type);
133 return true;
134 }
135
136 mutex_lock(&text_mutex);
137 jlp = __jump_label_patch(entry, type);
138 text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
139 mutex_unlock(&text_mutex);
140 return true;
141}
142
143void arch_jump_label_transform_apply(void)
144{
145 mutex_lock(&text_mutex);
146 text_poke_finish();
147 mutex_unlock(&text_mutex);
148}
1/*
2 * jump label x86 support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 *
6 */
7#include <linux/jump_label.h>
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/jhash.h>
13#include <linux/cpu.h>
14#include <asm/kprobes.h>
15#include <asm/alternative.h>
16
17#ifdef HAVE_JUMP_LABEL
18
19union jump_code_union {
20 char code[JUMP_LABEL_NOP_SIZE];
21 struct {
22 char jump;
23 int offset;
24 } __attribute__((packed));
25};
26
27static void bug_at(unsigned char *ip, int line)
28{
29 /*
30 * The location is not an op that we were expecting.
31 * Something went wrong. Crash the box, as something could be
32 * corrupting the kernel.
33 */
34 pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
35 ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
36 BUG();
37}
38
39static void __jump_label_transform(struct jump_entry *entry,
40 enum jump_label_type type,
41 void *(*poker)(void *, const void *, size_t),
42 int init)
43{
44 union jump_code_union code;
45 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
46 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
47
48 if (type == JUMP_LABEL_JMP) {
49 if (init) {
50 /*
51 * Jump label is enabled for the first time.
52 * So we expect a default_nop...
53 */
54 if (unlikely(memcmp((void *)entry->code, default_nop, 5)
55 != 0))
56 bug_at((void *)entry->code, __LINE__);
57 } else {
58 /*
59 * ...otherwise expect an ideal_nop. Otherwise
60 * something went horribly wrong.
61 */
62 if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
63 != 0))
64 bug_at((void *)entry->code, __LINE__);
65 }
66
67 code.jump = 0xe9;
68 code.offset = entry->target -
69 (entry->code + JUMP_LABEL_NOP_SIZE);
70 } else {
71 /*
72 * We are disabling this jump label. If it is not what
73 * we think it is, then something must have gone wrong.
74 * If this is the first initialization call, then we
75 * are converting the default nop to the ideal nop.
76 */
77 if (init) {
78 if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
79 bug_at((void *)entry->code, __LINE__);
80 } else {
81 code.jump = 0xe9;
82 code.offset = entry->target -
83 (entry->code + JUMP_LABEL_NOP_SIZE);
84 if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
85 bug_at((void *)entry->code, __LINE__);
86 }
87 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
88 }
89
90 /*
91 * Make text_poke_bp() a default fallback poker.
92 *
93 * At the time the change is being done, just ignore whether we
94 * are doing nop -> jump or jump -> nop transition, and assume
95 * always nop being the 'currently valid' instruction
96 *
97 */
98 if (poker)
99 (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
100 else
101 text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE,
102 (void *)entry->code + JUMP_LABEL_NOP_SIZE);
103}
104
105void arch_jump_label_transform(struct jump_entry *entry,
106 enum jump_label_type type)
107{
108 get_online_cpus();
109 mutex_lock(&text_mutex);
110 __jump_label_transform(entry, type, NULL, 0);
111 mutex_unlock(&text_mutex);
112 put_online_cpus();
113}
114
115static enum {
116 JL_STATE_START,
117 JL_STATE_NO_UPDATE,
118 JL_STATE_UPDATE,
119} jlstate __initdata_or_module = JL_STATE_START;
120
121__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
122 enum jump_label_type type)
123{
124 /*
125 * This function is called at boot up and when modules are
126 * first loaded. Check if the default nop, the one that is
127 * inserted at compile time, is the ideal nop. If it is, then
128 * we do not need to update the nop, and we can leave it as is.
129 * If it is not, then we need to update the nop to the ideal nop.
130 */
131 if (jlstate == JL_STATE_START) {
132 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
133 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
134
135 if (memcmp(ideal_nop, default_nop, 5) != 0)
136 jlstate = JL_STATE_UPDATE;
137 else
138 jlstate = JL_STATE_NO_UPDATE;
139 }
140 if (jlstate == JL_STATE_UPDATE)
141 __jump_label_transform(entry, type, text_poke_early, 1);
142}
143
144#endif