Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  4 */
  5
  6#include <linux/elf.h>
  7#include <linux/ftrace.h>
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/sort.h>
 11
 12static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
 13					    enum aarch64_insn_register reg)
 14{
 15	u32 adrp, add;
 16
 17	adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
 18	add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
 19					   AARCH64_INSN_VARIANT_64BIT,
 20					   AARCH64_INSN_ADSB_ADD);
 21
 22	return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
 23}
 24
 25struct plt_entry get_plt_entry(u64 dst, void *pc)
 26{
 27	struct plt_entry plt;
 28	static u32 br;
 29
 30	if (!br)
 31		br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
 32						 AARCH64_INSN_BRANCH_NOLINK);
 33
 34	plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
 35	plt.br = cpu_to_le32(br);
 36
 37	return plt;
 38}
 39
 40static bool plt_entries_equal(const struct plt_entry *a,
 41			      const struct plt_entry *b)
 42{
 43	u64 p, q;
 44
 45	/*
 46	 * Check whether both entries refer to the same target:
 47	 * do the cheapest checks first.
 48	 * If the 'add' or 'br' opcodes are different, then the target
 49	 * cannot be the same.
 50	 */
 51	if (a->add != b->add || a->br != b->br)
 52		return false;
 53
 54	p = ALIGN_DOWN((u64)a, SZ_4K);
 55	q = ALIGN_DOWN((u64)b, SZ_4K);
 56
 57	/*
 58	 * If the 'adrp' opcodes are the same then we just need to check
 59	 * that they refer to the same 4k region.
 60	 */
 61	if (a->adrp == b->adrp && p == q)
 62		return true;
 63
 64	return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
 65	       (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
 66}
 67
 68static bool in_init(const struct module *mod, void *loc)
 69{
 70	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
 71}
 72
 73u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
 74			  void *loc, const Elf64_Rela *rela,
 75			  Elf64_Sym *sym)
 76{
 77	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
 78							  &mod->arch.init;
 79	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
 80	int i = pltsec->plt_num_entries;
 81	int j = i - 1;
 82	u64 val = sym->st_value + rela->r_addend;
 83
 84	if (is_forbidden_offset_for_adrp(&plt[i].adrp))
 85		i++;
 86
 87	plt[i] = get_plt_entry(val, &plt[i]);
 88
 89	/*
 90	 * Check if the entry we just created is a duplicate. Given that the
 91	 * relocations are sorted, this will be the last entry we allocated.
 92	 * (if one exists).
 93	 */
 94	if (j >= 0 && plt_entries_equal(plt + i, plt + j))
 95		return (u64)&plt[j];
 96
 97	pltsec->plt_num_entries += i - j;
 98	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
 99		return 0;
100
101	return (u64)&plt[i];
102}
103
104#ifdef CONFIG_ARM64_ERRATUM_843419
105u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
106				void *loc, u64 val)
107{
108	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
109							  &mod->arch.init;
110	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
111	int i = pltsec->plt_num_entries++;
112	u32 br;
113	int rd;
114
115	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
116		return 0;
117
118	if (is_forbidden_offset_for_adrp(&plt[i].adrp))
119		i = pltsec->plt_num_entries++;
120
121	/* get the destination register of the ADRP instruction */
122	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
123					  le32_to_cpup((__le32 *)loc));
124
125	br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
126					 AARCH64_INSN_BRANCH_NOLINK);
127
128	plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
129	plt[i].br = cpu_to_le32(br);
130
131	return (u64)&plt[i];
132}
133#endif
134
135#define cmp_3way(a, b)	((a) < (b) ? -1 : (a) > (b))
136
137static int cmp_rela(const void *a, const void *b)
138{
139	const Elf64_Rela *x = a, *y = b;
140	int i;
141
142	/* sort by type, symbol index and addend */
143	i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
144	if (i == 0)
145		i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
146	if (i == 0)
147		i = cmp_3way(x->r_addend, y->r_addend);
148	return i;
149}
150
151static bool duplicate_rel(const Elf64_Rela *rela, int num)
152{
153	/*
154	 * Entries are sorted by type, symbol index and addend. That means
155	 * that, if a duplicate entry exists, it must be in the preceding
156	 * slot.
157	 */
158	return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
159}
160
161static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
162			       Elf64_Word dstidx, Elf_Shdr *dstsec)
163{
164	unsigned int ret = 0;
165	Elf64_Sym *s;
166	int i;
167
168	for (i = 0; i < num; i++) {
169		u64 min_align;
170
171		switch (ELF64_R_TYPE(rela[i].r_info)) {
172		case R_AARCH64_JUMP26:
173		case R_AARCH64_CALL26:
174			if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
175				break;
176
177			/*
178			 * We only have to consider branch targets that resolve
179			 * to symbols that are defined in a different section.
180			 * This is not simply a heuristic, it is a fundamental
181			 * limitation, since there is no guaranteed way to emit
182			 * PLT entries sufficiently close to the branch if the
183			 * section size exceeds the range of a branch
184			 * instruction. So ignore relocations against defined
185			 * symbols if they live in the same section as the
186			 * relocation target.
187			 */
188			s = syms + ELF64_R_SYM(rela[i].r_info);
189			if (s->st_shndx == dstidx)
190				break;
191
192			/*
193			 * Jump relocations with non-zero addends against
194			 * undefined symbols are supported by the ELF spec, but
195			 * do not occur in practice (e.g., 'jump n bytes past
196			 * the entry point of undefined function symbol f').
197			 * So we need to support them, but there is no need to
198			 * take them into consideration when trying to optimize
199			 * this code. So let's only check for duplicates when
200			 * the addend is zero: this allows us to record the PLT
201			 * entry address in the symbol table itself, rather than
202			 * having to search the list for duplicates each time we
203			 * emit one.
204			 */
205			if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
206				ret++;
207			break;
208		case R_AARCH64_ADR_PREL_PG_HI21_NC:
209		case R_AARCH64_ADR_PREL_PG_HI21:
210			if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
211			    !cpus_have_const_cap(ARM64_WORKAROUND_843419))
212				break;
213
214			/*
215			 * Determine the minimal safe alignment for this ADRP
216			 * instruction: the section alignment at which it is
217			 * guaranteed not to appear at a vulnerable offset.
218			 *
219			 * This comes down to finding the least significant zero
220			 * bit in bits [11:3] of the section offset, and
221			 * increasing the section's alignment so that the
222			 * resulting address of this instruction is guaranteed
223			 * to equal the offset in that particular bit (as well
224			 * as all less significant bits). This ensures that the
225			 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
226			 * have all ones in bits [11:3])
227			 */
228			min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
229
230			/*
231			 * Allocate veneer space for each ADRP that may appear
232			 * at a vulnerable offset nonetheless. At relocation
233			 * time, some of these will remain unused since some
234			 * ADRP instructions can be patched to ADR instructions
235			 * instead.
236			 */
237			if (min_align > SZ_4K)
238				ret++;
239			else
240				dstsec->sh_addralign = max(dstsec->sh_addralign,
241							   min_align);
242			break;
243		}
244	}
245
246	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
247	    cpus_have_const_cap(ARM64_WORKAROUND_843419))
248		/*
249		 * Add some slack so we can skip PLT slots that may trigger
250		 * the erratum due to the placement of the ADRP instruction.
251		 */
252		ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
253
254	return ret;
255}
256
257static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
258				  Elf64_Word dstidx)
259{
260
261	Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
262
263	if (s->st_shndx == dstidx)
264		return false;
265
266	return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
267	       ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
268}
269
270/* Group branch PLT relas at the front end of the array. */
271static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
272				      int numrels, Elf64_Word dstidx)
273{
274	int i = 0, j = numrels - 1;
275
276	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
277		return 0;
278
279	while (i < j) {
280		if (branch_rela_needs_plt(syms, &rela[i], dstidx))
281			i++;
282		else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
283			swap(rela[i], rela[j]);
284		else
285			j--;
286	}
287
288	return i;
289}
290
291int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
292			      char *secstrings, struct module *mod)
293{
294	unsigned long core_plts = 0;
295	unsigned long init_plts = 0;
296	Elf64_Sym *syms = NULL;
297	Elf_Shdr *pltsec, *tramp = NULL;
298	int i;
299
300	/*
301	 * Find the empty .plt section so we can expand it to store the PLT
302	 * entries. Record the symtab address as well.
303	 */
304	for (i = 0; i < ehdr->e_shnum; i++) {
305		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
306			mod->arch.core.plt_shndx = i;
307		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
308			mod->arch.init.plt_shndx = i;
309		else if (!strcmp(secstrings + sechdrs[i].sh_name,
310				 ".text.ftrace_trampoline"))
311			tramp = sechdrs + i;
312		else if (sechdrs[i].sh_type == SHT_SYMTAB)
313			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
314	}
315
316	if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
317		pr_err("%s: module PLT section(s) missing\n", mod->name);
318		return -ENOEXEC;
319	}
320	if (!syms) {
321		pr_err("%s: module symtab section missing\n", mod->name);
322		return -ENOEXEC;
323	}
324
325	for (i = 0; i < ehdr->e_shnum; i++) {
326		Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
327		int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
328		Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
329
330		if (sechdrs[i].sh_type != SHT_RELA)
331			continue;
332
333		/* ignore relocations that operate on non-exec sections */
334		if (!(dstsec->sh_flags & SHF_EXECINSTR))
335			continue;
336
337		/*
338		 * sort branch relocations requiring a PLT by type, symbol index
339		 * and addend
340		 */
341		nents = partition_branch_plt_relas(syms, rels, numrels,
342						   sechdrs[i].sh_info);
343		if (nents)
344			sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
345
346		if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
347			core_plts += count_plts(syms, rels, numrels,
348						sechdrs[i].sh_info, dstsec);
349		else
350			init_plts += count_plts(syms, rels, numrels,
351						sechdrs[i].sh_info, dstsec);
352	}
353
354	pltsec = sechdrs + mod->arch.core.plt_shndx;
355	pltsec->sh_type = SHT_NOBITS;
356	pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
357	pltsec->sh_addralign = L1_CACHE_BYTES;
358	pltsec->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
359	mod->arch.core.plt_num_entries = 0;
360	mod->arch.core.plt_max_entries = core_plts;
361
362	pltsec = sechdrs + mod->arch.init.plt_shndx;
363	pltsec->sh_type = SHT_NOBITS;
364	pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
365	pltsec->sh_addralign = L1_CACHE_BYTES;
366	pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
367	mod->arch.init.plt_num_entries = 0;
368	mod->arch.init.plt_max_entries = init_plts;
369
370	if (tramp) {
371		tramp->sh_type = SHT_NOBITS;
372		tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
373		tramp->sh_addralign = __alignof__(struct plt_entry);
374		tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
375	}
376
377	return 0;
378}