Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  4 */
  5
  6#include <linux/elf.h>
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/sort.h>
 10#include <linux/moduleloader.h>
 11
 12#include <asm/cache.h>
 13#include <asm/opcodes.h>
 14
 15#define PLT_ENT_STRIDE		L1_CACHE_BYTES
 16#define PLT_ENT_COUNT		(PLT_ENT_STRIDE / sizeof(u32))
 17#define PLT_ENT_SIZE		(sizeof(struct plt_entries) / PLT_ENT_COUNT)
 18
 19#ifdef CONFIG_THUMB2_KERNEL
 20#define PLT_ENT_LDR		__opcode_to_mem_thumb32(0xf8dff000 | \
 21							(PLT_ENT_STRIDE - 4))
 22#else
 23#define PLT_ENT_LDR		__opcode_to_mem_arm(0xe59ff000 | \
 24						    (PLT_ENT_STRIDE - 8))
 25#endif
 26
 27struct plt_entries {
 28	u32	ldr[PLT_ENT_COUNT];
 29	u32	lit[PLT_ENT_COUNT];
 30};
 31
 32static bool in_init(const struct module *mod, unsigned long loc)
 33{
 34	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
 35}
 36
 37u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 38{
 39	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
 40							  &mod->arch.init;
 41
 42	struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
 43	int idx = 0;
 44
 45	/*
 46	 * Look for an existing entry pointing to 'val'. Given that the
 47	 * relocations are sorted, this will be the last entry we allocated.
 48	 * (if one exists).
 49	 */
 50	if (pltsec->plt_count > 0) {
 51		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
 52		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
 53
 54		if (plt->lit[idx] == val)
 55			return (u32)&plt->ldr[idx];
 56
 57		idx = (idx + 1) % PLT_ENT_COUNT;
 58		if (!idx)
 59			plt++;
 60	}
 61
 62	pltsec->plt_count++;
 63	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
 64
 65	if (!idx)
 66		/* Populate a new set of entries */
 67		*plt = (struct plt_entries){
 68			{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
 69			{ val, }
 70		};
 71	else
 72		plt->lit[idx] = val;
 73
 74	return (u32)&plt->ldr[idx];
 75}
 76
 77#define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
 78
 79static int cmp_rel(const void *a, const void *b)
 80{
 81	const Elf32_Rel *x = a, *y = b;
 82	int i;
 83
 84	/* sort by type and symbol index */
 85	i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
 86	if (i == 0)
 87		i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
 88	return i;
 89}
 90
 91static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
 92{
 93	u32 *tval = (u32 *)(base + rel->r_offset);
 94
 95	/*
 96	 * Do a bitwise compare on the raw addend rather than fully decoding
 97	 * the offset and doing an arithmetic comparison.
 98	 * Note that a zero-addend jump/call relocation is encoded taking the
 99	 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
100	 */
101	switch (ELF32_R_TYPE(rel->r_info)) {
102		u16 upper, lower;
103
104	case R_ARM_THM_CALL:
105	case R_ARM_THM_JUMP24:
106		upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
107		lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
108
109		return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
110
111	case R_ARM_CALL:
112	case R_ARM_PC24:
113	case R_ARM_JUMP24:
114		return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
115	}
116	BUG();
117}
118
119static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
120{
121	const Elf32_Rel *prev;
122
123	/*
124	 * Entries are sorted by type and symbol index. That means that,
125	 * if a duplicate entry exists, it must be in the preceding
126	 * slot.
127	 */
128	if (!num)
129		return false;
130
131	prev = rel + num - 1;
132	return cmp_rel(rel + num, prev) == 0 &&
133	       is_zero_addend_relocation(base, prev);
134}
135
136/* Count how many PLT entries we may need */
137static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
138			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
139{
140	unsigned int ret = 0;
141	const Elf32_Sym *s;
142	int i;
143
144	for (i = 0; i < num; i++) {
145		switch (ELF32_R_TYPE(rel[i].r_info)) {
146		case R_ARM_CALL:
147		case R_ARM_PC24:
148		case R_ARM_JUMP24:
149		case R_ARM_THM_CALL:
150		case R_ARM_THM_JUMP24:
151			/*
152			 * We only have to consider branch targets that resolve
153			 * to symbols that are defined in a different section.
154			 * This is not simply a heuristic, it is a fundamental
155			 * limitation, since there is no guaranteed way to emit
156			 * PLT entries sufficiently close to the branch if the
157			 * section size exceeds the range of a branch
158			 * instruction. So ignore relocations against defined
159			 * symbols if they live in the same section as the
160			 * relocation target.
161			 */
162			s = syms + ELF32_R_SYM(rel[i].r_info);
163			if (s->st_shndx == dstidx)
164				break;
165
166			/*
167			 * Jump relocations with non-zero addends against
168			 * undefined symbols are supported by the ELF spec, but
169			 * do not occur in practice (e.g., 'jump n bytes past
170			 * the entry point of undefined function symbol f').
171			 * So we need to support them, but there is no need to
172			 * take them into consideration when trying to optimize
173			 * this code. So let's only check for duplicates when
174			 * the addend is zero. (Note that calls into the core
175			 * module via init PLT entries could involve section
176			 * relative symbol references with non-zero addends, for
177			 * which we may end up emitting duplicates, but the init
178			 * PLT is released along with the rest of the .init
179			 * region as soon as module loading completes.)
180			 */
181			if (!is_zero_addend_relocation(base, rel + i) ||
182			    !duplicate_rel(base, rel, i))
183				ret++;
184		}
185	}
186	return ret;
187}
188
189int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
190			      char *secstrings, struct module *mod)
191{
192	unsigned long core_plts = 0;
193	unsigned long init_plts = 0;
194	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
195	Elf32_Sym *syms = NULL;
196
197	/*
198	 * To store the PLTs, we expand the .text section for core module code
199	 * and for initialization code.
200	 */
201	for (s = sechdrs; s < sechdrs_end; ++s) {
202		if (strcmp(".plt", secstrings + s->sh_name) == 0)
203			mod->arch.core.plt = s;
204		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
205			mod->arch.init.plt = s;
206		else if (s->sh_type == SHT_SYMTAB)
207			syms = (Elf32_Sym *)s->sh_addr;
208	}
209
210	if (!mod->arch.core.plt || !mod->arch.init.plt) {
211		pr_err("%s: module PLT section(s) missing\n", mod->name);
212		return -ENOEXEC;
213	}
214	if (!syms) {
215		pr_err("%s: module symtab section missing\n", mod->name);
216		return -ENOEXEC;
217	}
218
219	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
220		Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
221		int numrels = s->sh_size / sizeof(Elf32_Rel);
222		Elf32_Shdr *dstsec = sechdrs + s->sh_info;
223
224		if (s->sh_type != SHT_REL)
225			continue;
226
227		/* ignore relocations that operate on non-exec sections */
228		if (!(dstsec->sh_flags & SHF_EXECINSTR))
229			continue;
230
231		/* sort by type and symbol index */
232		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
233
234		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
235			core_plts += count_plts(syms, dstsec->sh_addr, rels,
236						numrels, s->sh_info);
237		else
238			init_plts += count_plts(syms, dstsec->sh_addr, rels,
239						numrels, s->sh_info);
240	}
241
242	mod->arch.core.plt->sh_type = SHT_NOBITS;
243	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
244	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
245	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
246					       sizeof(struct plt_entries));
247	mod->arch.core.plt_count = 0;
248
249	mod->arch.init.plt->sh_type = SHT_NOBITS;
250	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
251	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
252	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
253					       sizeof(struct plt_entries));
254	mod->arch.init.plt_count = 0;
255
256	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
257		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
258	return 0;
259}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  4 */
  5
  6#include <linux/elf.h>
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/sort.h>
 
 10
 11#include <asm/cache.h>
 12#include <asm/opcodes.h>
 13
 14#define PLT_ENT_STRIDE		L1_CACHE_BYTES
 15#define PLT_ENT_COUNT		(PLT_ENT_STRIDE / sizeof(u32))
 16#define PLT_ENT_SIZE		(sizeof(struct plt_entries) / PLT_ENT_COUNT)
 17
 18#ifdef CONFIG_THUMB2_KERNEL
 19#define PLT_ENT_LDR		__opcode_to_mem_thumb32(0xf8dff000 | \
 20							(PLT_ENT_STRIDE - 4))
 21#else
 22#define PLT_ENT_LDR		__opcode_to_mem_arm(0xe59ff000 | \
 23						    (PLT_ENT_STRIDE - 8))
 24#endif
 25
 26struct plt_entries {
 27	u32	ldr[PLT_ENT_COUNT];
 28	u32	lit[PLT_ENT_COUNT];
 29};
 30
 31static bool in_init(const struct module *mod, unsigned long loc)
 32{
 33	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
 34}
 35
 36u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 37{
 38	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
 39							  &mod->arch.init;
 40
 41	struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
 42	int idx = 0;
 43
 44	/*
 45	 * Look for an existing entry pointing to 'val'. Given that the
 46	 * relocations are sorted, this will be the last entry we allocated.
 47	 * (if one exists).
 48	 */
 49	if (pltsec->plt_count > 0) {
 50		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
 51		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
 52
 53		if (plt->lit[idx] == val)
 54			return (u32)&plt->ldr[idx];
 55
 56		idx = (idx + 1) % PLT_ENT_COUNT;
 57		if (!idx)
 58			plt++;
 59	}
 60
 61	pltsec->plt_count++;
 62	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
 63
 64	if (!idx)
 65		/* Populate a new set of entries */
 66		*plt = (struct plt_entries){
 67			{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
 68			{ val, }
 69		};
 70	else
 71		plt->lit[idx] = val;
 72
 73	return (u32)&plt->ldr[idx];
 74}
 75
 76#define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
 77
 78static int cmp_rel(const void *a, const void *b)
 79{
 80	const Elf32_Rel *x = a, *y = b;
 81	int i;
 82
 83	/* sort by type and symbol index */
 84	i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
 85	if (i == 0)
 86		i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
 87	return i;
 88}
 89
 90static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
 91{
 92	u32 *tval = (u32 *)(base + rel->r_offset);
 93
 94	/*
 95	 * Do a bitwise compare on the raw addend rather than fully decoding
 96	 * the offset and doing an arithmetic comparison.
 97	 * Note that a zero-addend jump/call relocation is encoded taking the
 98	 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
 99	 */
100	switch (ELF32_R_TYPE(rel->r_info)) {
101		u16 upper, lower;
102
103	case R_ARM_THM_CALL:
104	case R_ARM_THM_JUMP24:
105		upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
106		lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
107
108		return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
109
110	case R_ARM_CALL:
111	case R_ARM_PC24:
112	case R_ARM_JUMP24:
113		return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
114	}
115	BUG();
116}
117
118static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
119{
120	const Elf32_Rel *prev;
121
122	/*
123	 * Entries are sorted by type and symbol index. That means that,
124	 * if a duplicate entry exists, it must be in the preceding
125	 * slot.
126	 */
127	if (!num)
128		return false;
129
130	prev = rel + num - 1;
131	return cmp_rel(rel + num, prev) == 0 &&
132	       is_zero_addend_relocation(base, prev);
133}
134
135/* Count how many PLT entries we may need */
136static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
137			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
138{
139	unsigned int ret = 0;
140	const Elf32_Sym *s;
141	int i;
142
143	for (i = 0; i < num; i++) {
144		switch (ELF32_R_TYPE(rel[i].r_info)) {
145		case R_ARM_CALL:
146		case R_ARM_PC24:
147		case R_ARM_JUMP24:
148		case R_ARM_THM_CALL:
149		case R_ARM_THM_JUMP24:
150			/*
151			 * We only have to consider branch targets that resolve
152			 * to symbols that are defined in a different section.
153			 * This is not simply a heuristic, it is a fundamental
154			 * limitation, since there is no guaranteed way to emit
155			 * PLT entries sufficiently close to the branch if the
156			 * section size exceeds the range of a branch
157			 * instruction. So ignore relocations against defined
158			 * symbols if they live in the same section as the
159			 * relocation target.
160			 */
161			s = syms + ELF32_R_SYM(rel[i].r_info);
162			if (s->st_shndx == dstidx)
163				break;
164
165			/*
166			 * Jump relocations with non-zero addends against
167			 * undefined symbols are supported by the ELF spec, but
168			 * do not occur in practice (e.g., 'jump n bytes past
169			 * the entry point of undefined function symbol f').
170			 * So we need to support them, but there is no need to
171			 * take them into consideration when trying to optimize
172			 * this code. So let's only check for duplicates when
173			 * the addend is zero. (Note that calls into the core
174			 * module via init PLT entries could involve section
175			 * relative symbol references with non-zero addends, for
176			 * which we may end up emitting duplicates, but the init
177			 * PLT is released along with the rest of the .init
178			 * region as soon as module loading completes.)
179			 */
180			if (!is_zero_addend_relocation(base, rel + i) ||
181			    !duplicate_rel(base, rel, i))
182				ret++;
183		}
184	}
185	return ret;
186}
187
188int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
189			      char *secstrings, struct module *mod)
190{
191	unsigned long core_plts = 0;
192	unsigned long init_plts = 0;
193	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
194	Elf32_Sym *syms = NULL;
195
196	/*
197	 * To store the PLTs, we expand the .text section for core module code
198	 * and for initialization code.
199	 */
200	for (s = sechdrs; s < sechdrs_end; ++s) {
201		if (strcmp(".plt", secstrings + s->sh_name) == 0)
202			mod->arch.core.plt = s;
203		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
204			mod->arch.init.plt = s;
205		else if (s->sh_type == SHT_SYMTAB)
206			syms = (Elf32_Sym *)s->sh_addr;
207	}
208
209	if (!mod->arch.core.plt || !mod->arch.init.plt) {
210		pr_err("%s: module PLT section(s) missing\n", mod->name);
211		return -ENOEXEC;
212	}
213	if (!syms) {
214		pr_err("%s: module symtab section missing\n", mod->name);
215		return -ENOEXEC;
216	}
217
218	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
219		Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
220		int numrels = s->sh_size / sizeof(Elf32_Rel);
221		Elf32_Shdr *dstsec = sechdrs + s->sh_info;
222
223		if (s->sh_type != SHT_REL)
224			continue;
225
226		/* ignore relocations that operate on non-exec sections */
227		if (!(dstsec->sh_flags & SHF_EXECINSTR))
228			continue;
229
230		/* sort by type and symbol index */
231		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
232
233		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
234			core_plts += count_plts(syms, dstsec->sh_addr, rels,
235						numrels, s->sh_info);
236		else
237			init_plts += count_plts(syms, dstsec->sh_addr, rels,
238						numrels, s->sh_info);
239	}
240
241	mod->arch.core.plt->sh_type = SHT_NOBITS;
242	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
243	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
244	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
245					       sizeof(struct plt_entries));
246	mod->arch.core.plt_count = 0;
247
248	mod->arch.init.plt->sh_type = SHT_NOBITS;
249	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
250	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
251	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
252					       sizeof(struct plt_entries));
253	mod->arch.init.plt_count = 0;
254
255	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
256		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
257	return 0;
258}