Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
 
 
 
 
  4 */
  5
  6#include <linux/elf.h>
  7#include <linux/ftrace.h>
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/sort.h>
 11#include <linux/moduleloader.h>
 12
 13#include <asm/cache.h>
 14#include <asm/opcodes.h>
 15
 
 
 
 
 16#ifdef CONFIG_THUMB2_KERNEL
 17#define PLT_ENT_LDR		__opcode_to_mem_thumb32(0xf8dff000 | \
 18							(PLT_ENT_STRIDE - 4))
 19#else
 20#define PLT_ENT_LDR		__opcode_to_mem_arm(0xe59ff000 | \
 21						    (PLT_ENT_STRIDE - 8))
 22#endif
 23
 24static const u32 fixed_plts[] = {
 25#ifdef CONFIG_DYNAMIC_FTRACE
 26	FTRACE_ADDR,
 27	MCOUNT_ADDR,
 28#endif
 29};
 30
 31static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
 32{
 33	int i;
 34
 35	if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
 36		return;
 37	pltsec->plt_count = ARRAY_SIZE(fixed_plts);
 38
 39	for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
 40		plt->ldr[i] = PLT_ENT_LDR;
 41
 42	BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
 43	memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
 44}
 45
 46u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 47{
 48	struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ?
 49						&mod->arch.core : &mod->arch.init;
 50	struct plt_entries *plt;
 51	int idx;
 52
 53	/* cache the address, ELF header is available only during module load */
 54	if (!pltsec->plt_ent)
 55		pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
 56	plt = pltsec->plt_ent;
 57
 58	prealloc_fixed(pltsec, plt);
 59
 60	for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
 61		if (plt->lit[idx] == val)
 62			return (u32)&plt->ldr[idx];
 63
 64	idx = 0;
 65	/*
 66	 * Look for an existing entry pointing to 'val'. Given that the
 67	 * relocations are sorted, this will be the last entry we allocated.
 68	 * (if one exists).
 69	 */
 70	if (pltsec->plt_count > 0) {
 71		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
 72		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
 73
 74		if (plt->lit[idx] == val)
 75			return (u32)&plt->ldr[idx];
 76
 77		idx = (idx + 1) % PLT_ENT_COUNT;
 78		if (!idx)
 79			plt++;
 80	}
 81
 82	pltsec->plt_count++;
 83	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
 84
 85	if (!idx)
 86		/* Populate a new set of entries */
 87		*plt = (struct plt_entries){
 88			{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
 89			{ val, }
 90		};
 91	else
 92		plt->lit[idx] = val;
 93
 94	return (u32)&plt->ldr[idx];
 95}
 96
 97#define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
 98
 99static int cmp_rel(const void *a, const void *b)
100{
101	const Elf32_Rel *x = a, *y = b;
102	int i;
103
104	/* sort by type and symbol index */
105	i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
106	if (i == 0)
107		i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
108	return i;
109}
110
111static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
112{
113	u32 *tval = (u32 *)(base + rel->r_offset);
114
115	/*
116	 * Do a bitwise compare on the raw addend rather than fully decoding
117	 * the offset and doing an arithmetic comparison.
118	 * Note that a zero-addend jump/call relocation is encoded taking the
119	 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
120	 */
121	switch (ELF32_R_TYPE(rel->r_info)) {
122		u16 upper, lower;
123
124	case R_ARM_THM_CALL:
125	case R_ARM_THM_JUMP24:
126		upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
127		lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
128
129		return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
130
131	case R_ARM_CALL:
132	case R_ARM_PC24:
133	case R_ARM_JUMP24:
134		return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
135	}
136	BUG();
137}
138
139static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
140{
141	const Elf32_Rel *prev;
142
143	/*
144	 * Entries are sorted by type and symbol index. That means that,
145	 * if a duplicate entry exists, it must be in the preceding
146	 * slot.
147	 */
148	if (!num)
149		return false;
150
151	prev = rel + num - 1;
152	return cmp_rel(rel + num, prev) == 0 &&
153	       is_zero_addend_relocation(base, prev);
154}
155
156/* Count how many PLT entries we may need */
157static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
158			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
159{
160	unsigned int ret = 0;
161	const Elf32_Sym *s;
162	int i;
163
164	for (i = 0; i < num; i++) {
165		switch (ELF32_R_TYPE(rel[i].r_info)) {
166		case R_ARM_CALL:
167		case R_ARM_PC24:
168		case R_ARM_JUMP24:
169		case R_ARM_THM_CALL:
170		case R_ARM_THM_JUMP24:
171			/*
172			 * We only have to consider branch targets that resolve
173			 * to symbols that are defined in a different section.
174			 * This is not simply a heuristic, it is a fundamental
175			 * limitation, since there is no guaranteed way to emit
176			 * PLT entries sufficiently close to the branch if the
177			 * section size exceeds the range of a branch
178			 * instruction. So ignore relocations against defined
179			 * symbols if they live in the same section as the
180			 * relocation target.
181			 */
182			s = syms + ELF32_R_SYM(rel[i].r_info);
183			if (s->st_shndx == dstidx)
184				break;
185
186			/*
187			 * Jump relocations with non-zero addends against
188			 * undefined symbols are supported by the ELF spec, but
189			 * do not occur in practice (e.g., 'jump n bytes past
190			 * the entry point of undefined function symbol f').
191			 * So we need to support them, but there is no need to
192			 * take them into consideration when trying to optimize
193			 * this code. So let's only check for duplicates when
194			 * the addend is zero. (Note that calls into the core
195			 * module via init PLT entries could involve section
196			 * relative symbol references with non-zero addends, for
197			 * which we may end up emitting duplicates, but the init
198			 * PLT is released along with the rest of the .init
199			 * region as soon as module loading completes.)
200			 */
201			if (!is_zero_addend_relocation(base, rel + i) ||
202			    !duplicate_rel(base, rel, i))
203				ret++;
204		}
205	}
206	return ret;
207}
208
209int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
210			      char *secstrings, struct module *mod)
211{
212	unsigned long core_plts = ARRAY_SIZE(fixed_plts);
213	unsigned long init_plts = ARRAY_SIZE(fixed_plts);
214	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
215	Elf32_Sym *syms = NULL;
216
217	/*
218	 * To store the PLTs, we expand the .text section for core module code
219	 * and for initialization code.
220	 */
221	for (s = sechdrs; s < sechdrs_end; ++s) {
222		if (strcmp(".plt", secstrings + s->sh_name) == 0)
223			mod->arch.core.plt = s;
224		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
225			mod->arch.init.plt = s;
226		else if (s->sh_type == SHT_SYMTAB)
227			syms = (Elf32_Sym *)s->sh_addr;
228	}
229
230	if (!mod->arch.core.plt || !mod->arch.init.plt) {
231		pr_err("%s: module PLT section(s) missing\n", mod->name);
232		return -ENOEXEC;
233	}
234	if (!syms) {
235		pr_err("%s: module symtab section missing\n", mod->name);
236		return -ENOEXEC;
237	}
238
239	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
240		Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
241		int numrels = s->sh_size / sizeof(Elf32_Rel);
242		Elf32_Shdr *dstsec = sechdrs + s->sh_info;
243
244		if (s->sh_type != SHT_REL)
245			continue;
246
247		/* ignore relocations that operate on non-exec sections */
248		if (!(dstsec->sh_flags & SHF_EXECINSTR))
249			continue;
250
251		/* sort by type and symbol index */
252		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
253
254		if (!module_init_layout_section(secstrings + dstsec->sh_name))
255			core_plts += count_plts(syms, dstsec->sh_addr, rels,
256						numrels, s->sh_info);
257		else
258			init_plts += count_plts(syms, dstsec->sh_addr, rels,
259						numrels, s->sh_info);
260	}
261
262	mod->arch.core.plt->sh_type = SHT_NOBITS;
263	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
264	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
265	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
266					       sizeof(struct plt_entries));
267	mod->arch.core.plt_count = 0;
268	mod->arch.core.plt_ent = NULL;
269
270	mod->arch.init.plt->sh_type = SHT_NOBITS;
271	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
272	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
273	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
274					       sizeof(struct plt_entries));
275	mod->arch.init.plt_count = 0;
276	mod->arch.init.plt_ent = NULL;
277
278	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
279		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
280	return 0;
281}
282
283bool in_module_plt(unsigned long loc)
284{
285	struct module *mod;
286	bool ret;
287
288	preempt_disable();
289	mod = __module_text_address(loc);
290	ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE ||
291		      loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE);
292	preempt_enable();
293
294	return ret;
295}
v4.10.11
 
  1/*
  2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#include <linux/elf.h>
 
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/sort.h>
 
 13
 14#include <asm/cache.h>
 15#include <asm/opcodes.h>
 16
 17#define PLT_ENT_STRIDE		L1_CACHE_BYTES
 18#define PLT_ENT_COUNT		(PLT_ENT_STRIDE / sizeof(u32))
 19#define PLT_ENT_SIZE		(sizeof(struct plt_entries) / PLT_ENT_COUNT)
 20
 21#ifdef CONFIG_THUMB2_KERNEL
 22#define PLT_ENT_LDR		__opcode_to_mem_thumb32(0xf8dff000 | \
 23							(PLT_ENT_STRIDE - 4))
 24#else
 25#define PLT_ENT_LDR		__opcode_to_mem_arm(0xe59ff000 | \
 26						    (PLT_ENT_STRIDE - 8))
 27#endif
 28
 29struct plt_entries {
 30	u32	ldr[PLT_ENT_COUNT];
 31	u32	lit[PLT_ENT_COUNT];
 
 
 32};
 33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 35{
 36	struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
 37	int idx = 0;
 
 
 
 
 
 
 
 
 
 38
 
 
 
 
 
 39	/*
 40	 * Look for an existing entry pointing to 'val'. Given that the
 41	 * relocations are sorted, this will be the last entry we allocated.
 42	 * (if one exists).
 43	 */
 44	if (mod->arch.plt_count > 0) {
 45		plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
 46		idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
 47
 48		if (plt->lit[idx] == val)
 49			return (u32)&plt->ldr[idx];
 50
 51		idx = (idx + 1) % PLT_ENT_COUNT;
 52		if (!idx)
 53			plt++;
 54	}
 55
 56	mod->arch.plt_count++;
 57	BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
 58
 59	if (!idx)
 60		/* Populate a new set of entries */
 61		*plt = (struct plt_entries){
 62			{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
 63			{ val, }
 64		};
 65	else
 66		plt->lit[idx] = val;
 67
 68	return (u32)&plt->ldr[idx];
 69}
 70
 71#define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
 72
 73static int cmp_rel(const void *a, const void *b)
 74{
 75	const Elf32_Rel *x = a, *y = b;
 76	int i;
 77
 78	/* sort by type and symbol index */
 79	i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
 80	if (i == 0)
 81		i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
 82	return i;
 83}
 84
 85static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
 86{
 87	u32 *tval = (u32 *)(base + rel->r_offset);
 88
 89	/*
 90	 * Do a bitwise compare on the raw addend rather than fully decoding
 91	 * the offset and doing an arithmetic comparison.
 92	 * Note that a zero-addend jump/call relocation is encoded taking the
 93	 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
 94	 */
 95	switch (ELF32_R_TYPE(rel->r_info)) {
 96		u16 upper, lower;
 97
 98	case R_ARM_THM_CALL:
 99	case R_ARM_THM_JUMP24:
100		upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
101		lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
102
103		return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
104
105	case R_ARM_CALL:
106	case R_ARM_PC24:
107	case R_ARM_JUMP24:
108		return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
109	}
110	BUG();
111}
112
113static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
114{
115	const Elf32_Rel *prev;
116
117	/*
118	 * Entries are sorted by type and symbol index. That means that,
119	 * if a duplicate entry exists, it must be in the preceding
120	 * slot.
121	 */
122	if (!num)
123		return false;
124
125	prev = rel + num - 1;
126	return cmp_rel(rel + num, prev) == 0 &&
127	       is_zero_addend_relocation(base, prev);
128}
129
130/* Count how many PLT entries we may need */
131static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
132			       const Elf32_Rel *rel, int num)
133{
134	unsigned int ret = 0;
135	const Elf32_Sym *s;
136	int i;
137
138	for (i = 0; i < num; i++) {
139		switch (ELF32_R_TYPE(rel[i].r_info)) {
140		case R_ARM_CALL:
141		case R_ARM_PC24:
142		case R_ARM_JUMP24:
143		case R_ARM_THM_CALL:
144		case R_ARM_THM_JUMP24:
145			/*
146			 * We only have to consider branch targets that resolve
147			 * to undefined symbols. This is not simply a heuristic,
148			 * it is a fundamental limitation, since the PLT itself
149			 * is part of the module, and needs to be within range
150			 * as well, so modules can never grow beyond that limit.
 
 
 
 
151			 */
152			s = syms + ELF32_R_SYM(rel[i].r_info);
153			if (s->st_shndx != SHN_UNDEF)
154				break;
155
156			/*
157			 * Jump relocations with non-zero addends against
158			 * undefined symbols are supported by the ELF spec, but
159			 * do not occur in practice (e.g., 'jump n bytes past
160			 * the entry point of undefined function symbol f').
161			 * So we need to support them, but there is no need to
162			 * take them into consideration when trying to optimize
163			 * this code. So let's only check for duplicates when
164			 * the addend is zero.
 
 
 
 
 
165			 */
166			if (!is_zero_addend_relocation(base, rel + i) ||
167			    !duplicate_rel(base, rel, i))
168				ret++;
169		}
170	}
171	return ret;
172}
173
174int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
175			      char *secstrings, struct module *mod)
176{
177	unsigned long plts = 0;
 
178	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
179	Elf32_Sym *syms = NULL;
180
181	/*
182	 * To store the PLTs, we expand the .text section for core module code
183	 * and for initialization code.
184	 */
185	for (s = sechdrs; s < sechdrs_end; ++s) {
186		if (strcmp(".plt", secstrings + s->sh_name) == 0)
187			mod->arch.plt = s;
 
 
188		else if (s->sh_type == SHT_SYMTAB)
189			syms = (Elf32_Sym *)s->sh_addr;
190	}
191
192	if (!mod->arch.plt) {
193		pr_err("%s: module PLT section missing\n", mod->name);
194		return -ENOEXEC;
195	}
196	if (!syms) {
197		pr_err("%s: module symtab section missing\n", mod->name);
198		return -ENOEXEC;
199	}
200
201	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
202		Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
203		int numrels = s->sh_size / sizeof(Elf32_Rel);
204		Elf32_Shdr *dstsec = sechdrs + s->sh_info;
205
206		if (s->sh_type != SHT_REL)
207			continue;
208
209		/* ignore relocations that operate on non-exec sections */
210		if (!(dstsec->sh_flags & SHF_EXECINSTR))
211			continue;
212
213		/* sort by type and symbol index */
214		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
215
216		plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
 
 
 
 
 
217	}
218
219	mod->arch.plt->sh_type = SHT_NOBITS;
220	mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
221	mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
222	mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
223					  sizeof(struct plt_entries));
224	mod->arch.plt_count = 0;
 
 
 
 
 
 
 
 
 
225
226	pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
 
227	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228}