Loading...
1/*
2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/elf.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12
13#include <asm/cache.h>
14#include <asm/opcodes.h>
15
16#define PLT_ENT_STRIDE L1_CACHE_BYTES
17#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
18#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
19
20#ifdef CONFIG_THUMB2_KERNEL
21#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
22 (PLT_ENT_STRIDE - 4))
23#else
24#define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
25 (PLT_ENT_STRIDE - 8))
26#endif
27
28struct plt_entries {
29 u32 ldr[PLT_ENT_COUNT];
30 u32 lit[PLT_ENT_COUNT];
31};
32
33static bool in_init(const struct module *mod, u32 addr)
34{
35 return addr - (u32)mod->init_layout.base < mod->init_layout.size;
36}
37
38u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
39{
40 struct plt_entries *plt, *plt_end;
41 int c, *count;
42
43 if (in_init(mod, loc)) {
44 plt = (void *)mod->arch.init_plt->sh_addr;
45 plt_end = (void *)plt + mod->arch.init_plt->sh_size;
46 count = &mod->arch.init_plt_count;
47 } else {
48 plt = (void *)mod->arch.core_plt->sh_addr;
49 plt_end = (void *)plt + mod->arch.core_plt->sh_size;
50 count = &mod->arch.core_plt_count;
51 }
52
53 /* Look for an existing entry pointing to 'val' */
54 for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
55 int i;
56
57 if (!c) {
58 /* Populate a new set of entries */
59 *plt = (struct plt_entries){
60 { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
61 { val, }
62 };
63 ++*count;
64 return (u32)plt->ldr;
65 }
66 for (i = 0; i < PLT_ENT_COUNT; i++) {
67 if (!plt->lit[i]) {
68 plt->lit[i] = val;
69 ++*count;
70 }
71 if (plt->lit[i] == val)
72 return (u32)&plt->ldr[i];
73 }
74 }
75 BUG();
76}
77
78static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
79 u32 mask)
80{
81 u32 *loc1, *loc2;
82 int i;
83
84 for (i = 0; i < num; i++) {
85 if (rel[i].r_info != rel[num].r_info)
86 continue;
87
88 /*
89 * Identical relocation types against identical symbols can
90 * still result in different PLT entries if the addend in the
91 * place is different. So resolve the target of the relocation
92 * to compare the values.
93 */
94 loc1 = (u32 *)(base + rel[i].r_offset);
95 loc2 = (u32 *)(base + rel[num].r_offset);
96 if (((*loc1 ^ *loc2) & mask) == 0)
97 return 1;
98 }
99 return 0;
100}
101
102/* Count how many PLT entries we may need */
103static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
104{
105 unsigned int ret = 0;
106 int i;
107
108 /*
109 * Sure, this is order(n^2), but it's usually short, and not
110 * time critical
111 */
112 for (i = 0; i < num; i++)
113 switch (ELF32_R_TYPE(rel[i].r_info)) {
114 case R_ARM_CALL:
115 case R_ARM_PC24:
116 case R_ARM_JUMP24:
117 if (!duplicate_rel(base, rel, i,
118 __opcode_to_mem_arm(0x00ffffff)))
119 ret++;
120 break;
121#ifdef CONFIG_THUMB2_KERNEL
122 case R_ARM_THM_CALL:
123 case R_ARM_THM_JUMP24:
124 if (!duplicate_rel(base, rel, i,
125 __opcode_to_mem_thumb32(0x07ff2fff)))
126 ret++;
127#endif
128 }
129 return ret;
130}
131
132int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
133 char *secstrings, struct module *mod)
134{
135 unsigned long core_plts = 0, init_plts = 0;
136 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
137
138 /*
139 * To store the PLTs, we expand the .text section for core module code
140 * and the .init.text section for initialization code.
141 */
142 for (s = sechdrs; s < sechdrs_end; ++s)
143 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
144 mod->arch.core_plt = s;
145 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
146 mod->arch.init_plt = s;
147
148 if (!mod->arch.core_plt || !mod->arch.init_plt) {
149 pr_err("%s: sections missing\n", mod->name);
150 return -ENOEXEC;
151 }
152
153 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
154 const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
155 int numrels = s->sh_size / sizeof(Elf32_Rel);
156 Elf32_Shdr *dstsec = sechdrs + s->sh_info;
157
158 if (s->sh_type != SHT_REL)
159 continue;
160
161 if (strstr(secstrings + s->sh_name, ".init"))
162 init_plts += count_plts(dstsec->sh_addr, rels, numrels);
163 else
164 core_plts += count_plts(dstsec->sh_addr, rels, numrels);
165 }
166
167 mod->arch.core_plt->sh_type = SHT_NOBITS;
168 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
169 mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
170 mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
171 sizeof(struct plt_entries));
172 mod->arch.core_plt_count = 0;
173
174 mod->arch.init_plt->sh_type = SHT_NOBITS;
175 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
176 mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
177 mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
178 sizeof(struct plt_entries));
179 mod->arch.init_plt_count = 0;
180 pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
181 mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
182 return 0;
183}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 */
5
6#include <linux/elf.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/sort.h>
10#include <linux/moduleloader.h>
11
12#include <asm/cache.h>
13#include <asm/opcodes.h>
14
15#define PLT_ENT_STRIDE L1_CACHE_BYTES
16#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
17#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
18
19#ifdef CONFIG_THUMB2_KERNEL
20#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
21 (PLT_ENT_STRIDE - 4))
22#else
23#define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
24 (PLT_ENT_STRIDE - 8))
25#endif
26
27struct plt_entries {
28 u32 ldr[PLT_ENT_COUNT];
29 u32 lit[PLT_ENT_COUNT];
30};
31
32static bool in_init(const struct module *mod, unsigned long loc)
33{
34 return loc - (u32)mod->init_layout.base < mod->init_layout.size;
35}
36
37u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
38{
39 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
40 &mod->arch.init;
41
42 struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
43 int idx = 0;
44
45 /*
46 * Look for an existing entry pointing to 'val'. Given that the
47 * relocations are sorted, this will be the last entry we allocated.
48 * (if one exists).
49 */
50 if (pltsec->plt_count > 0) {
51 plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
52 idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
53
54 if (plt->lit[idx] == val)
55 return (u32)&plt->ldr[idx];
56
57 idx = (idx + 1) % PLT_ENT_COUNT;
58 if (!idx)
59 plt++;
60 }
61
62 pltsec->plt_count++;
63 BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
64
65 if (!idx)
66 /* Populate a new set of entries */
67 *plt = (struct plt_entries){
68 { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
69 { val, }
70 };
71 else
72 plt->lit[idx] = val;
73
74 return (u32)&plt->ldr[idx];
75}
76
77#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
78
79static int cmp_rel(const void *a, const void *b)
80{
81 const Elf32_Rel *x = a, *y = b;
82 int i;
83
84 /* sort by type and symbol index */
85 i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
86 if (i == 0)
87 i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
88 return i;
89}
90
91static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
92{
93 u32 *tval = (u32 *)(base + rel->r_offset);
94
95 /*
96 * Do a bitwise compare on the raw addend rather than fully decoding
97 * the offset and doing an arithmetic comparison.
98 * Note that a zero-addend jump/call relocation is encoded taking the
99 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
100 */
101 switch (ELF32_R_TYPE(rel->r_info)) {
102 u16 upper, lower;
103
104 case R_ARM_THM_CALL:
105 case R_ARM_THM_JUMP24:
106 upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
107 lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
108
109 return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
110
111 case R_ARM_CALL:
112 case R_ARM_PC24:
113 case R_ARM_JUMP24:
114 return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
115 }
116 BUG();
117}
118
119static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
120{
121 const Elf32_Rel *prev;
122
123 /*
124 * Entries are sorted by type and symbol index. That means that,
125 * if a duplicate entry exists, it must be in the preceding
126 * slot.
127 */
128 if (!num)
129 return false;
130
131 prev = rel + num - 1;
132 return cmp_rel(rel + num, prev) == 0 &&
133 is_zero_addend_relocation(base, prev);
134}
135
136/* Count how many PLT entries we may need */
137static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
138 const Elf32_Rel *rel, int num, Elf32_Word dstidx)
139{
140 unsigned int ret = 0;
141 const Elf32_Sym *s;
142 int i;
143
144 for (i = 0; i < num; i++) {
145 switch (ELF32_R_TYPE(rel[i].r_info)) {
146 case R_ARM_CALL:
147 case R_ARM_PC24:
148 case R_ARM_JUMP24:
149 case R_ARM_THM_CALL:
150 case R_ARM_THM_JUMP24:
151 /*
152 * We only have to consider branch targets that resolve
153 * to symbols that are defined in a different section.
154 * This is not simply a heuristic, it is a fundamental
155 * limitation, since there is no guaranteed way to emit
156 * PLT entries sufficiently close to the branch if the
157 * section size exceeds the range of a branch
158 * instruction. So ignore relocations against defined
159 * symbols if they live in the same section as the
160 * relocation target.
161 */
162 s = syms + ELF32_R_SYM(rel[i].r_info);
163 if (s->st_shndx == dstidx)
164 break;
165
166 /*
167 * Jump relocations with non-zero addends against
168 * undefined symbols are supported by the ELF spec, but
169 * do not occur in practice (e.g., 'jump n bytes past
170 * the entry point of undefined function symbol f').
171 * So we need to support them, but there is no need to
172 * take them into consideration when trying to optimize
173 * this code. So let's only check for duplicates when
174 * the addend is zero. (Note that calls into the core
175 * module via init PLT entries could involve section
176 * relative symbol references with non-zero addends, for
177 * which we may end up emitting duplicates, but the init
178 * PLT is released along with the rest of the .init
179 * region as soon as module loading completes.)
180 */
181 if (!is_zero_addend_relocation(base, rel + i) ||
182 !duplicate_rel(base, rel, i))
183 ret++;
184 }
185 }
186 return ret;
187}
188
189int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
190 char *secstrings, struct module *mod)
191{
192 unsigned long core_plts = 0;
193 unsigned long init_plts = 0;
194 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
195 Elf32_Sym *syms = NULL;
196
197 /*
198 * To store the PLTs, we expand the .text section for core module code
199 * and for initialization code.
200 */
201 for (s = sechdrs; s < sechdrs_end; ++s) {
202 if (strcmp(".plt", secstrings + s->sh_name) == 0)
203 mod->arch.core.plt = s;
204 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
205 mod->arch.init.plt = s;
206 else if (s->sh_type == SHT_SYMTAB)
207 syms = (Elf32_Sym *)s->sh_addr;
208 }
209
210 if (!mod->arch.core.plt || !mod->arch.init.plt) {
211 pr_err("%s: module PLT section(s) missing\n", mod->name);
212 return -ENOEXEC;
213 }
214 if (!syms) {
215 pr_err("%s: module symtab section missing\n", mod->name);
216 return -ENOEXEC;
217 }
218
219 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
220 Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
221 int numrels = s->sh_size / sizeof(Elf32_Rel);
222 Elf32_Shdr *dstsec = sechdrs + s->sh_info;
223
224 if (s->sh_type != SHT_REL)
225 continue;
226
227 /* ignore relocations that operate on non-exec sections */
228 if (!(dstsec->sh_flags & SHF_EXECINSTR))
229 continue;
230
231 /* sort by type and symbol index */
232 sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
233
234 if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
235 core_plts += count_plts(syms, dstsec->sh_addr, rels,
236 numrels, s->sh_info);
237 else
238 init_plts += count_plts(syms, dstsec->sh_addr, rels,
239 numrels, s->sh_info);
240 }
241
242 mod->arch.core.plt->sh_type = SHT_NOBITS;
243 mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
244 mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
245 mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
246 sizeof(struct plt_entries));
247 mod->arch.core.plt_count = 0;
248
249 mod->arch.init.plt->sh_type = SHT_NOBITS;
250 mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
251 mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
252 mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
253 sizeof(struct plt_entries));
254 mod->arch.init.plt_count = 0;
255
256 pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
257 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
258 return 0;
259}