Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6/*
7 * This file reads all the special sections which have alternate instructions
8 * which can be patched in or redirected to at runtime.
9 */
10
11#include <stdlib.h>
12#include <string.h>
13
14#include <arch/special.h>
15#include <objtool/builtin.h>
16#include <objtool/special.h>
17#include <objtool/warn.h>
18#include <objtool/endianness.h>
19
20struct special_entry {
21 const char *sec;
22 bool group, jump_or_nop;
23 unsigned char size, orig, new;
24 unsigned char orig_len, new_len; /* group only */
25 unsigned char feature; /* ALTERNATIVE macro CPU feature */
26 unsigned char key; /* jump_label key */
27};
28
29struct special_entry entries[] = {
30 {
31 .sec = ".altinstructions",
32 .group = true,
33 .size = ALT_ENTRY_SIZE,
34 .orig = ALT_ORIG_OFFSET,
35 .orig_len = ALT_ORIG_LEN_OFFSET,
36 .new = ALT_NEW_OFFSET,
37 .new_len = ALT_NEW_LEN_OFFSET,
38 .feature = ALT_FEATURE_OFFSET,
39 },
40 {
41 .sec = "__jump_table",
42 .jump_or_nop = true,
43 .size = JUMP_ENTRY_SIZE,
44 .orig = JUMP_ORIG_OFFSET,
45 .new = JUMP_NEW_OFFSET,
46 .key = JUMP_KEY_OFFSET,
47 },
48 {
49 .sec = "__ex_table",
50 .size = EX_ENTRY_SIZE,
51 .orig = EX_ORIG_OFFSET,
52 .new = EX_NEW_OFFSET,
53 },
54 {},
55};
56
57void __weak arch_handle_alternative(unsigned short feature, struct special_alt *alt)
58{
59}
60
61static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
62 unsigned long *off)
63{
64 *sec = reloc->sym->sec;
65 *off = reloc->sym->offset + reloc->addend;
66}
67
68static int get_alt_entry(struct elf *elf, struct special_entry *entry,
69 struct section *sec, int idx,
70 struct special_alt *alt)
71{
72 struct reloc *orig_reloc, *new_reloc;
73 unsigned long offset;
74
75 offset = idx * entry->size;
76
77 alt->group = entry->group;
78 alt->jump_or_nop = entry->jump_or_nop;
79
80 if (alt->group) {
81 alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
82 entry->orig_len);
83 alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
84 entry->new_len);
85 }
86
87 if (entry->feature) {
88 unsigned short feature;
89
90 feature = bswap_if_needed(*(unsigned short *)(sec->data->d_buf +
91 offset +
92 entry->feature));
93 arch_handle_alternative(feature, alt);
94 }
95
96 orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
97 if (!orig_reloc) {
98 WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
99 return -1;
100 }
101
102 reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
103
104 if (!entry->group || alt->new_len) {
105 new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
106 if (!new_reloc) {
107 WARN_FUNC("can't find new reloc",
108 sec, offset + entry->new);
109 return -1;
110 }
111
112 /*
113 * Skip retpoline .altinstr_replacement... we already rewrite the
114 * instructions for retpolines anyway, see arch_is_retpoline()
115 * usage in add_{call,jump}_destinations().
116 */
117 if (arch_is_retpoline(new_reloc->sym))
118 return 1;
119
120 reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
121
122 /* _ASM_EXTABLE_EX hack */
123 if (alt->new_off >= 0x7ffffff0)
124 alt->new_off -= 0x7ffffff0;
125 }
126
127 if (entry->key) {
128 struct reloc *key_reloc;
129
130 key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
131 if (!key_reloc) {
132 WARN_FUNC("can't find key reloc",
133 sec, offset + entry->key);
134 return -1;
135 }
136 alt->key_addend = key_reloc->addend;
137 }
138
139 return 0;
140}
141
142/*
143 * Read all the special sections and create a list of special_alt structs which
144 * describe all the alternate instructions which can be patched in or
145 * redirected to at runtime.
146 */
147int special_get_alts(struct elf *elf, struct list_head *alts)
148{
149 struct special_entry *entry;
150 struct section *sec;
151 unsigned int nr_entries;
152 struct special_alt *alt;
153 int idx, ret;
154
155 INIT_LIST_HEAD(alts);
156
157 for (entry = entries; entry->sec; entry++) {
158 sec = find_section_by_name(elf, entry->sec);
159 if (!sec)
160 continue;
161
162 if (sec->len % entry->size != 0) {
163 WARN("%s size not a multiple of %d",
164 sec->name, entry->size);
165 return -1;
166 }
167
168 nr_entries = sec->len / entry->size;
169
170 for (idx = 0; idx < nr_entries; idx++) {
171 alt = malloc(sizeof(*alt));
172 if (!alt) {
173 WARN("malloc failed");
174 return -1;
175 }
176 memset(alt, 0, sizeof(*alt));
177
178 ret = get_alt_entry(elf, entry, sec, idx, alt);
179 if (ret > 0)
180 continue;
181 if (ret < 0)
182 return ret;
183
184 list_add_tail(&alt->list, alts);
185 }
186 }
187
188 return 0;
189}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6/*
7 * This file reads all the special sections which have alternate instructions
8 * which can be patched in or redirected to at runtime.
9 */
10
11#include <stdlib.h>
12#include <string.h>
13
14#include "builtin.h"
15#include "special.h"
16#include "warn.h"
17
18#define EX_ENTRY_SIZE 12
19#define EX_ORIG_OFFSET 0
20#define EX_NEW_OFFSET 4
21
22#define JUMP_ENTRY_SIZE 16
23#define JUMP_ORIG_OFFSET 0
24#define JUMP_NEW_OFFSET 4
25
26#define ALT_ENTRY_SIZE 13
27#define ALT_ORIG_OFFSET 0
28#define ALT_NEW_OFFSET 4
29#define ALT_FEATURE_OFFSET 8
30#define ALT_ORIG_LEN_OFFSET 10
31#define ALT_NEW_LEN_OFFSET 11
32
33#define X86_FEATURE_POPCNT (4*32+23)
34#define X86_FEATURE_SMAP (9*32+20)
35
36struct special_entry {
37 const char *sec;
38 bool group, jump_or_nop;
39 unsigned char size, orig, new;
40 unsigned char orig_len, new_len; /* group only */
41 unsigned char feature; /* ALTERNATIVE macro CPU feature */
42};
43
44struct special_entry entries[] = {
45 {
46 .sec = ".altinstructions",
47 .group = true,
48 .size = ALT_ENTRY_SIZE,
49 .orig = ALT_ORIG_OFFSET,
50 .orig_len = ALT_ORIG_LEN_OFFSET,
51 .new = ALT_NEW_OFFSET,
52 .new_len = ALT_NEW_LEN_OFFSET,
53 .feature = ALT_FEATURE_OFFSET,
54 },
55 {
56 .sec = "__jump_table",
57 .jump_or_nop = true,
58 .size = JUMP_ENTRY_SIZE,
59 .orig = JUMP_ORIG_OFFSET,
60 .new = JUMP_NEW_OFFSET,
61 },
62 {
63 .sec = "__ex_table",
64 .size = EX_ENTRY_SIZE,
65 .orig = EX_ORIG_OFFSET,
66 .new = EX_NEW_OFFSET,
67 },
68 {},
69};
70
71static int get_alt_entry(struct elf *elf, struct special_entry *entry,
72 struct section *sec, int idx,
73 struct special_alt *alt)
74{
75 struct rela *orig_rela, *new_rela;
76 unsigned long offset;
77
78 offset = idx * entry->size;
79
80 alt->group = entry->group;
81 alt->jump_or_nop = entry->jump_or_nop;
82
83 if (alt->group) {
84 alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
85 entry->orig_len);
86 alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
87 entry->new_len);
88 }
89
90 if (entry->feature) {
91 unsigned short feature;
92
93 feature = *(unsigned short *)(sec->data->d_buf + offset +
94 entry->feature);
95
96 /*
97 * It has been requested that we don't validate the !POPCNT
98 * feature path which is a "very very small percentage of
99 * machines".
100 */
101 if (feature == X86_FEATURE_POPCNT)
102 alt->skip_orig = true;
103
104 /*
105 * If UACCESS validation is enabled; force that alternative;
106 * otherwise force it the other way.
107 *
108 * What we want to avoid is having both the original and the
109 * alternative code flow at the same time, in that case we can
110 * find paths that see the STAC but take the NOP instead of
111 * CLAC and the other way around.
112 */
113 if (feature == X86_FEATURE_SMAP) {
114 if (uaccess)
115 alt->skip_orig = true;
116 else
117 alt->skip_alt = true;
118 }
119 }
120
121 orig_rela = find_rela_by_dest(sec, offset + entry->orig);
122 if (!orig_rela) {
123 WARN_FUNC("can't find orig rela", sec, offset + entry->orig);
124 return -1;
125 }
126 if (orig_rela->sym->type != STT_SECTION) {
127 WARN_FUNC("don't know how to handle non-section rela symbol %s",
128 sec, offset + entry->orig, orig_rela->sym->name);
129 return -1;
130 }
131
132 alt->orig_sec = orig_rela->sym->sec;
133 alt->orig_off = orig_rela->addend;
134
135 if (!entry->group || alt->new_len) {
136 new_rela = find_rela_by_dest(sec, offset + entry->new);
137 if (!new_rela) {
138 WARN_FUNC("can't find new rela",
139 sec, offset + entry->new);
140 return -1;
141 }
142
143 alt->new_sec = new_rela->sym->sec;
144 alt->new_off = (unsigned int)new_rela->addend;
145
146 /* _ASM_EXTABLE_EX hack */
147 if (alt->new_off >= 0x7ffffff0)
148 alt->new_off -= 0x7ffffff0;
149 }
150
151 return 0;
152}
153
154/*
155 * Read all the special sections and create a list of special_alt structs which
156 * describe all the alternate instructions which can be patched in or
157 * redirected to at runtime.
158 */
159int special_get_alts(struct elf *elf, struct list_head *alts)
160{
161 struct special_entry *entry;
162 struct section *sec;
163 unsigned int nr_entries;
164 struct special_alt *alt;
165 int idx, ret;
166
167 INIT_LIST_HEAD(alts);
168
169 for (entry = entries; entry->sec; entry++) {
170 sec = find_section_by_name(elf, entry->sec);
171 if (!sec)
172 continue;
173
174 if (sec->len % entry->size != 0) {
175 WARN("%s size not a multiple of %d",
176 sec->name, entry->size);
177 return -1;
178 }
179
180 nr_entries = sec->len / entry->size;
181
182 for (idx = 0; idx < nr_entries; idx++) {
183 alt = malloc(sizeof(*alt));
184 if (!alt) {
185 WARN("malloc failed");
186 return -1;
187 }
188 memset(alt, 0, sizeof(*alt));
189
190 ret = get_alt_entry(elf, entry, sec, idx, alt);
191 if (ret)
192 return ret;
193
194 list_add_tail(&alt->list, alts);
195 }
196 }
197
198 return 0;
199}