Loading...
1/*
2 * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/elf.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sort.h>
13
14struct plt_entry {
15 /*
16 * A program that conforms to the AArch64 Procedure Call Standard
17 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
18 * IP1 (x17) may be inserted at any branch instruction that is
19 * exposed to a relocation that supports long branches. Since that
20 * is exactly what we are dealing with here, we are free to use x16
21 * as a scratch register in the PLT veneers.
22 */
23 __le32 mov0; /* movn x16, #0x.... */
24 __le32 mov1; /* movk x16, #0x...., lsl #16 */
25 __le32 mov2; /* movk x16, #0x...., lsl #32 */
26 __le32 br; /* br x16 */
27};
28
29u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
30 Elf64_Sym *sym)
31{
32 struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
33 int i = mod->arch.plt_num_entries;
34 u64 val = sym->st_value + rela->r_addend;
35
36 /*
37 * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
38 * which are listed in the ELF symtab section, but without a type
39 * or a size.
40 * So, similar to how the module loader uses the Elf64_Sym::st_value
41 * field to store the resolved addresses of undefined symbols, let's
42 * borrow the Elf64_Sym::st_size field (whose value is never used by
43 * the module loader, even for symbols that are defined) to record
44 * the address of a symbol's associated PLT entry as we emit it for a
45 * zero addend relocation (which is the only kind we have to deal with
46 * in practice). This allows us to find duplicates without having to
47 * go through the table every time.
48 */
49 if (rela->r_addend == 0 && sym->st_size != 0) {
50 BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
51 return sym->st_size;
52 }
53
54 mod->arch.plt_num_entries++;
55 BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
56
57 /*
58 * MOVK/MOVN/MOVZ opcode:
59 * +--------+------------+--------+-----------+-------------+---------+
60 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
61 * +--------+------------+--------+-----------+-------------+---------+
62 *
63 * Rd := 0x10 (x16)
64 * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
65 * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
66 * sf := 1 (64-bit variant)
67 */
68 plt[i] = (struct plt_entry){
69 cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
70 cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
71 cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
72 cpu_to_le32(0xd61f0200)
73 };
74
75 if (rela->r_addend == 0)
76 sym->st_size = (u64)&plt[i];
77
78 return (u64)&plt[i];
79}
80
81#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
82
83static int cmp_rela(const void *a, const void *b)
84{
85 const Elf64_Rela *x = a, *y = b;
86 int i;
87
88 /* sort by type, symbol index and addend */
89 i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
90 if (i == 0)
91 i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
92 if (i == 0)
93 i = cmp_3way(x->r_addend, y->r_addend);
94 return i;
95}
96
97static bool duplicate_rel(const Elf64_Rela *rela, int num)
98{
99 /*
100 * Entries are sorted by type, symbol index and addend. That means
101 * that, if a duplicate entry exists, it must be in the preceding
102 * slot.
103 */
104 return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
105}
106
107static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
108{
109 unsigned int ret = 0;
110 Elf64_Sym *s;
111 int i;
112
113 for (i = 0; i < num; i++) {
114 switch (ELF64_R_TYPE(rela[i].r_info)) {
115 case R_AARCH64_JUMP26:
116 case R_AARCH64_CALL26:
117 /*
118 * We only have to consider branch targets that resolve
119 * to undefined symbols. This is not simply a heuristic,
120 * it is a fundamental limitation, since the PLT itself
121 * is part of the module, and needs to be within 128 MB
122 * as well, so modules can never grow beyond that limit.
123 */
124 s = syms + ELF64_R_SYM(rela[i].r_info);
125 if (s->st_shndx != SHN_UNDEF)
126 break;
127
128 /*
129 * Jump relocations with non-zero addends against
130 * undefined symbols are supported by the ELF spec, but
131 * do not occur in practice (e.g., 'jump n bytes past
132 * the entry point of undefined function symbol f').
133 * So we need to support them, but there is no need to
134 * take them into consideration when trying to optimize
135 * this code. So let's only check for duplicates when
136 * the addend is zero: this allows us to record the PLT
137 * entry address in the symbol table itself, rather than
138 * having to search the list for duplicates each time we
139 * emit one.
140 */
141 if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
142 ret++;
143 break;
144 }
145 }
146 return ret;
147}
148
149int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
150 char *secstrings, struct module *mod)
151{
152 unsigned long plt_max_entries = 0;
153 Elf64_Sym *syms = NULL;
154 int i;
155
156 /*
157 * Find the empty .plt section so we can expand it to store the PLT
158 * entries. Record the symtab address as well.
159 */
160 for (i = 0; i < ehdr->e_shnum; i++) {
161 if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
162 mod->arch.plt = sechdrs + i;
163 else if (sechdrs[i].sh_type == SHT_SYMTAB)
164 syms = (Elf64_Sym *)sechdrs[i].sh_addr;
165 }
166
167 if (!mod->arch.plt) {
168 pr_err("%s: module PLT section missing\n", mod->name);
169 return -ENOEXEC;
170 }
171 if (!syms) {
172 pr_err("%s: module symtab section missing\n", mod->name);
173 return -ENOEXEC;
174 }
175
176 for (i = 0; i < ehdr->e_shnum; i++) {
177 Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
178 int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
179 Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
180
181 if (sechdrs[i].sh_type != SHT_RELA)
182 continue;
183
184 /* ignore relocations that operate on non-exec sections */
185 if (!(dstsec->sh_flags & SHF_EXECINSTR))
186 continue;
187
188 /* sort by type, symbol index and addend */
189 sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
190
191 plt_max_entries += count_plts(syms, rels, numrels);
192 }
193
194 mod->arch.plt->sh_type = SHT_NOBITS;
195 mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
196 mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
197 mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
198 mod->arch.plt_num_entries = 0;
199 mod->arch.plt_max_entries = plt_max_entries;
200 return 0;
201}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 */
5
6#include <linux/elf.h>
7#include <linux/ftrace.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/moduleloader.h>
11#include <linux/sort.h>
12
13static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
14 enum aarch64_insn_register reg)
15{
16 u32 adrp, add;
17
18 adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
19 add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
20 AARCH64_INSN_VARIANT_64BIT,
21 AARCH64_INSN_ADSB_ADD);
22
23 return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
24}
25
26struct plt_entry get_plt_entry(u64 dst, void *pc)
27{
28 struct plt_entry plt;
29 static u32 br;
30
31 if (!br)
32 br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
33 AARCH64_INSN_BRANCH_NOLINK);
34
35 plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
36 plt.br = cpu_to_le32(br);
37
38 return plt;
39}
40
41static bool plt_entries_equal(const struct plt_entry *a,
42 const struct plt_entry *b)
43{
44 u64 p, q;
45
46 /*
47 * Check whether both entries refer to the same target:
48 * do the cheapest checks first.
49 * If the 'add' or 'br' opcodes are different, then the target
50 * cannot be the same.
51 */
52 if (a->add != b->add || a->br != b->br)
53 return false;
54
55 p = ALIGN_DOWN((u64)a, SZ_4K);
56 q = ALIGN_DOWN((u64)b, SZ_4K);
57
58 /*
59 * If the 'adrp' opcodes are the same then we just need to check
60 * that they refer to the same 4k region.
61 */
62 if (a->adrp == b->adrp && p == q)
63 return true;
64
65 return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
66 (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
67}
68
69u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
70 void *loc, const Elf64_Rela *rela,
71 Elf64_Sym *sym)
72{
73 struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
74 &mod->arch.core : &mod->arch.init;
75 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
76 int i = pltsec->plt_num_entries;
77 int j = i - 1;
78 u64 val = sym->st_value + rela->r_addend;
79
80 if (is_forbidden_offset_for_adrp(&plt[i].adrp))
81 i++;
82
83 plt[i] = get_plt_entry(val, &plt[i]);
84
85 /*
86 * Check if the entry we just created is a duplicate. Given that the
87 * relocations are sorted, this will be the last entry we allocated.
88 * (if one exists).
89 */
90 if (j >= 0 && plt_entries_equal(plt + i, plt + j))
91 return (u64)&plt[j];
92
93 pltsec->plt_num_entries += i - j;
94 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
95 return 0;
96
97 return (u64)&plt[i];
98}
99
100#ifdef CONFIG_ARM64_ERRATUM_843419
101u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
102 void *loc, u64 val)
103{
104 struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
105 &mod->arch.core : &mod->arch.init;
106 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
107 int i = pltsec->plt_num_entries++;
108 u32 br;
109 int rd;
110
111 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
112 return 0;
113
114 if (is_forbidden_offset_for_adrp(&plt[i].adrp))
115 i = pltsec->plt_num_entries++;
116
117 /* get the destination register of the ADRP instruction */
118 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
119 le32_to_cpup((__le32 *)loc));
120
121 br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
122 AARCH64_INSN_BRANCH_NOLINK);
123
124 plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
125 plt[i].br = cpu_to_le32(br);
126
127 return (u64)&plt[i];
128}
129#endif
130
131#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
132
133static int cmp_rela(const void *a, const void *b)
134{
135 const Elf64_Rela *x = a, *y = b;
136 int i;
137
138 /* sort by type, symbol index and addend */
139 i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
140 if (i == 0)
141 i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
142 if (i == 0)
143 i = cmp_3way(x->r_addend, y->r_addend);
144 return i;
145}
146
147static bool duplicate_rel(const Elf64_Rela *rela, int num)
148{
149 /*
150 * Entries are sorted by type, symbol index and addend. That means
151 * that, if a duplicate entry exists, it must be in the preceding
152 * slot.
153 */
154 return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
155}
156
157static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
158 Elf64_Word dstidx, Elf_Shdr *dstsec)
159{
160 unsigned int ret = 0;
161 Elf64_Sym *s;
162 int i;
163
164 for (i = 0; i < num; i++) {
165 u64 min_align;
166
167 switch (ELF64_R_TYPE(rela[i].r_info)) {
168 case R_AARCH64_JUMP26:
169 case R_AARCH64_CALL26:
170 /*
171 * We only have to consider branch targets that resolve
172 * to symbols that are defined in a different section.
173 * This is not simply a heuristic, it is a fundamental
174 * limitation, since there is no guaranteed way to emit
175 * PLT entries sufficiently close to the branch if the
176 * section size exceeds the range of a branch
177 * instruction. So ignore relocations against defined
178 * symbols if they live in the same section as the
179 * relocation target.
180 */
181 s = syms + ELF64_R_SYM(rela[i].r_info);
182 if (s->st_shndx == dstidx)
183 break;
184
185 /*
186 * Jump relocations with non-zero addends against
187 * undefined symbols are supported by the ELF spec, but
188 * do not occur in practice (e.g., 'jump n bytes past
189 * the entry point of undefined function symbol f').
190 * So we need to support them, but there is no need to
191 * take them into consideration when trying to optimize
192 * this code. So let's only check for duplicates when
193 * the addend is zero: this allows us to record the PLT
194 * entry address in the symbol table itself, rather than
195 * having to search the list for duplicates each time we
196 * emit one.
197 */
198 if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
199 ret++;
200 break;
201 case R_AARCH64_ADR_PREL_PG_HI21_NC:
202 case R_AARCH64_ADR_PREL_PG_HI21:
203 if (!cpus_have_final_cap(ARM64_WORKAROUND_843419))
204 break;
205
206 /*
207 * Determine the minimal safe alignment for this ADRP
208 * instruction: the section alignment at which it is
209 * guaranteed not to appear at a vulnerable offset.
210 *
211 * This comes down to finding the least significant zero
212 * bit in bits [11:3] of the section offset, and
213 * increasing the section's alignment so that the
214 * resulting address of this instruction is guaranteed
215 * to equal the offset in that particular bit (as well
216 * as all less significant bits). This ensures that the
217 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
218 * have all ones in bits [11:3])
219 */
220 min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
221
222 /*
223 * Allocate veneer space for each ADRP that may appear
224 * at a vulnerable offset nonetheless. At relocation
225 * time, some of these will remain unused since some
226 * ADRP instructions can be patched to ADR instructions
227 * instead.
228 */
229 if (min_align > SZ_4K)
230 ret++;
231 else
232 dstsec->sh_addralign = max(dstsec->sh_addralign,
233 min_align);
234 break;
235 }
236 }
237
238 if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) {
239 /*
240 * Add some slack so we can skip PLT slots that may trigger
241 * the erratum due to the placement of the ADRP instruction.
242 */
243 ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
244 }
245
246 return ret;
247}
248
249static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
250 Elf64_Word dstidx)
251{
252
253 Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
254
255 if (s->st_shndx == dstidx)
256 return false;
257
258 return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
259 ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
260}
261
262/* Group branch PLT relas at the front end of the array. */
263static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
264 int numrels, Elf64_Word dstidx)
265{
266 int i = 0, j = numrels - 1;
267
268 while (i < j) {
269 if (branch_rela_needs_plt(syms, &rela[i], dstidx))
270 i++;
271 else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
272 swap(rela[i], rela[j]);
273 else
274 j--;
275 }
276
277 return i;
278}
279
280int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
281 char *secstrings, struct module *mod)
282{
283 unsigned long core_plts = 0;
284 unsigned long init_plts = 0;
285 Elf64_Sym *syms = NULL;
286 Elf_Shdr *pltsec, *tramp = NULL;
287 int i;
288
289 /*
290 * Find the empty .plt section so we can expand it to store the PLT
291 * entries. Record the symtab address as well.
292 */
293 for (i = 0; i < ehdr->e_shnum; i++) {
294 if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
295 mod->arch.core.plt_shndx = i;
296 else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
297 mod->arch.init.plt_shndx = i;
298 else if (!strcmp(secstrings + sechdrs[i].sh_name,
299 ".text.ftrace_trampoline"))
300 tramp = sechdrs + i;
301 else if (sechdrs[i].sh_type == SHT_SYMTAB)
302 syms = (Elf64_Sym *)sechdrs[i].sh_addr;
303 }
304
305 if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
306 pr_err("%s: module PLT section(s) missing\n", mod->name);
307 return -ENOEXEC;
308 }
309 if (!syms) {
310 pr_err("%s: module symtab section missing\n", mod->name);
311 return -ENOEXEC;
312 }
313
314 for (i = 0; i < ehdr->e_shnum; i++) {
315 Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
316 int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
317 Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
318
319 if (sechdrs[i].sh_type != SHT_RELA)
320 continue;
321
322 /* ignore relocations that operate on non-exec sections */
323 if (!(dstsec->sh_flags & SHF_EXECINSTR))
324 continue;
325
326 /*
327 * sort branch relocations requiring a PLT by type, symbol index
328 * and addend
329 */
330 nents = partition_branch_plt_relas(syms, rels, numrels,
331 sechdrs[i].sh_info);
332 if (nents)
333 sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
334
335 if (!module_init_layout_section(secstrings + dstsec->sh_name))
336 core_plts += count_plts(syms, rels, numrels,
337 sechdrs[i].sh_info, dstsec);
338 else
339 init_plts += count_plts(syms, rels, numrels,
340 sechdrs[i].sh_info, dstsec);
341 }
342
343 pltsec = sechdrs + mod->arch.core.plt_shndx;
344 pltsec->sh_type = SHT_NOBITS;
345 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
346 pltsec->sh_addralign = L1_CACHE_BYTES;
347 pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
348 mod->arch.core.plt_num_entries = 0;
349 mod->arch.core.plt_max_entries = core_plts;
350
351 pltsec = sechdrs + mod->arch.init.plt_shndx;
352 pltsec->sh_type = SHT_NOBITS;
353 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
354 pltsec->sh_addralign = L1_CACHE_BYTES;
355 pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
356 mod->arch.init.plt_num_entries = 0;
357 mod->arch.init.plt_max_entries = init_plts;
358
359 if (tramp) {
360 tramp->sh_type = SHT_NOBITS;
361 tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
362 tramp->sh_addralign = __alignof__(struct plt_entry);
363 tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
364 }
365
366 return 0;
367}