Loading...
1/*
2 * AArch64 loadable module support.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/bitops.h>
22#include <linux/elf.h>
23#include <linux/gfp.h>
24#include <linux/kasan.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/moduleloader.h>
28#include <linux/vmalloc.h>
29#include <asm/alternative.h>
30#include <asm/insn.h>
31#include <asm/sections.h>
32
33void *module_alloc(unsigned long size)
34{
35 void *p;
36
37 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
38 module_alloc_base + MODULES_VSIZE,
39 GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
40 NUMA_NO_NODE, __builtin_return_address(0));
41
42 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
43 !IS_ENABLED(CONFIG_KASAN))
44 /*
45 * KASAN can only deal with module allocations being served
46 * from the reserved module region, since the remainder of
47 * the vmalloc region is already backed by zero shadow pages,
48 * and punching holes into it is non-trivial. Since the module
49 * region is not randomized when KASAN is enabled, it is even
50 * less likely that the module region gets exhausted, so we
51 * can simply omit this fallback in that case.
52 */
53 p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
54 VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
55 NUMA_NO_NODE, __builtin_return_address(0));
56
57 if (p && (kasan_module_alloc(p, size) < 0)) {
58 vfree(p);
59 return NULL;
60 }
61
62 return p;
63}
64
65enum aarch64_reloc_op {
66 RELOC_OP_NONE,
67 RELOC_OP_ABS,
68 RELOC_OP_PREL,
69 RELOC_OP_PAGE,
70};
71
72static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
73{
74 switch (reloc_op) {
75 case RELOC_OP_ABS:
76 return val;
77 case RELOC_OP_PREL:
78 return val - (u64)place;
79 case RELOC_OP_PAGE:
80 return (val & ~0xfff) - ((u64)place & ~0xfff);
81 case RELOC_OP_NONE:
82 return 0;
83 }
84
85 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
86 return 0;
87}
88
89static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
90{
91 s64 sval = do_reloc(op, place, val);
92
93 switch (len) {
94 case 16:
95 *(s16 *)place = sval;
96 if (sval < S16_MIN || sval > U16_MAX)
97 return -ERANGE;
98 break;
99 case 32:
100 *(s32 *)place = sval;
101 if (sval < S32_MIN || sval > U32_MAX)
102 return -ERANGE;
103 break;
104 case 64:
105 *(s64 *)place = sval;
106 break;
107 default:
108 pr_err("Invalid length (%d) for data relocation\n", len);
109 return 0;
110 }
111 return 0;
112}
113
114enum aarch64_insn_movw_imm_type {
115 AARCH64_INSN_IMM_MOVNZ,
116 AARCH64_INSN_IMM_MOVKZ,
117};
118
119static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
120 int lsb, enum aarch64_insn_movw_imm_type imm_type)
121{
122 u64 imm;
123 s64 sval;
124 u32 insn = le32_to_cpu(*(u32 *)place);
125
126 sval = do_reloc(op, place, val);
127 imm = sval >> lsb;
128
129 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
130 /*
131 * For signed MOVW relocations, we have to manipulate the
132 * instruction encoding depending on whether or not the
133 * immediate is less than zero.
134 */
135 insn &= ~(3 << 29);
136 if (sval >= 0) {
137 /* >=0: Set the instruction to MOVZ (opcode 10b). */
138 insn |= 2 << 29;
139 } else {
140 /*
141 * <0: Set the instruction to MOVN (opcode 00b).
142 * Since we've masked the opcode already, we
143 * don't need to do anything other than
144 * inverting the new immediate field.
145 */
146 imm = ~imm;
147 }
148 }
149
150 /* Update the instruction with the new encoding. */
151 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
152 *(u32 *)place = cpu_to_le32(insn);
153
154 if (imm > U16_MAX)
155 return -ERANGE;
156
157 return 0;
158}
159
160static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
161 int lsb, int len, enum aarch64_insn_imm_type imm_type)
162{
163 u64 imm, imm_mask;
164 s64 sval;
165 u32 insn = le32_to_cpu(*(u32 *)place);
166
167 /* Calculate the relocation value. */
168 sval = do_reloc(op, place, val);
169 sval >>= lsb;
170
171 /* Extract the value bits and shift them to bit 0. */
172 imm_mask = (BIT(lsb + len) - 1) >> lsb;
173 imm = sval & imm_mask;
174
175 /* Update the instruction's immediate field. */
176 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
177 *(u32 *)place = cpu_to_le32(insn);
178
179 /*
180 * Extract the upper value bits (including the sign bit) and
181 * shift them to bit 0.
182 */
183 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
184
185 /*
186 * Overflow has occurred if the upper bits are not all equal to
187 * the sign bit of the value.
188 */
189 if ((u64)(sval + 1) >= 2)
190 return -ERANGE;
191
192 return 0;
193}
194
195int apply_relocate_add(Elf64_Shdr *sechdrs,
196 const char *strtab,
197 unsigned int symindex,
198 unsigned int relsec,
199 struct module *me)
200{
201 unsigned int i;
202 int ovf;
203 bool overflow_check;
204 Elf64_Sym *sym;
205 void *loc;
206 u64 val;
207 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
208
209 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
210 /* loc corresponds to P in the AArch64 ELF document. */
211 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
212 + rel[i].r_offset;
213
214 /* sym is the ELF symbol we're referring to. */
215 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
216 + ELF64_R_SYM(rel[i].r_info);
217
218 /* val corresponds to (S + A) in the AArch64 ELF document. */
219 val = sym->st_value + rel[i].r_addend;
220
221 /* Check for overflow by default. */
222 overflow_check = true;
223
224 /* Perform the static relocation. */
225 switch (ELF64_R_TYPE(rel[i].r_info)) {
226 /* Null relocations. */
227 case R_ARM_NONE:
228 case R_AARCH64_NONE:
229 ovf = 0;
230 break;
231
232 /* Data relocations. */
233 case R_AARCH64_ABS64:
234 overflow_check = false;
235 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
236 break;
237 case R_AARCH64_ABS32:
238 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
239 break;
240 case R_AARCH64_ABS16:
241 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
242 break;
243 case R_AARCH64_PREL64:
244 overflow_check = false;
245 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
246 break;
247 case R_AARCH64_PREL32:
248 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
249 break;
250 case R_AARCH64_PREL16:
251 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
252 break;
253
254 /* MOVW instruction relocations. */
255 case R_AARCH64_MOVW_UABS_G0_NC:
256 overflow_check = false;
257 case R_AARCH64_MOVW_UABS_G0:
258 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
259 AARCH64_INSN_IMM_MOVKZ);
260 break;
261 case R_AARCH64_MOVW_UABS_G1_NC:
262 overflow_check = false;
263 case R_AARCH64_MOVW_UABS_G1:
264 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
265 AARCH64_INSN_IMM_MOVKZ);
266 break;
267 case R_AARCH64_MOVW_UABS_G2_NC:
268 overflow_check = false;
269 case R_AARCH64_MOVW_UABS_G2:
270 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
271 AARCH64_INSN_IMM_MOVKZ);
272 break;
273 case R_AARCH64_MOVW_UABS_G3:
274 /* We're using the top bits so we can't overflow. */
275 overflow_check = false;
276 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
277 AARCH64_INSN_IMM_MOVKZ);
278 break;
279 case R_AARCH64_MOVW_SABS_G0:
280 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
281 AARCH64_INSN_IMM_MOVNZ);
282 break;
283 case R_AARCH64_MOVW_SABS_G1:
284 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
285 AARCH64_INSN_IMM_MOVNZ);
286 break;
287 case R_AARCH64_MOVW_SABS_G2:
288 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
289 AARCH64_INSN_IMM_MOVNZ);
290 break;
291 case R_AARCH64_MOVW_PREL_G0_NC:
292 overflow_check = false;
293 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
294 AARCH64_INSN_IMM_MOVKZ);
295 break;
296 case R_AARCH64_MOVW_PREL_G0:
297 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
298 AARCH64_INSN_IMM_MOVNZ);
299 break;
300 case R_AARCH64_MOVW_PREL_G1_NC:
301 overflow_check = false;
302 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
303 AARCH64_INSN_IMM_MOVKZ);
304 break;
305 case R_AARCH64_MOVW_PREL_G1:
306 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
307 AARCH64_INSN_IMM_MOVNZ);
308 break;
309 case R_AARCH64_MOVW_PREL_G2_NC:
310 overflow_check = false;
311 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
312 AARCH64_INSN_IMM_MOVKZ);
313 break;
314 case R_AARCH64_MOVW_PREL_G2:
315 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
316 AARCH64_INSN_IMM_MOVNZ);
317 break;
318 case R_AARCH64_MOVW_PREL_G3:
319 /* We're using the top bits so we can't overflow. */
320 overflow_check = false;
321 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
322 AARCH64_INSN_IMM_MOVNZ);
323 break;
324
325 /* Immediate instruction relocations. */
326 case R_AARCH64_LD_PREL_LO19:
327 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
328 AARCH64_INSN_IMM_19);
329 break;
330 case R_AARCH64_ADR_PREL_LO21:
331 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
332 AARCH64_INSN_IMM_ADR);
333 break;
334#ifndef CONFIG_ARM64_ERRATUM_843419
335 case R_AARCH64_ADR_PREL_PG_HI21_NC:
336 overflow_check = false;
337 case R_AARCH64_ADR_PREL_PG_HI21:
338 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
339 AARCH64_INSN_IMM_ADR);
340 break;
341#endif
342 case R_AARCH64_ADD_ABS_LO12_NC:
343 case R_AARCH64_LDST8_ABS_LO12_NC:
344 overflow_check = false;
345 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
346 AARCH64_INSN_IMM_12);
347 break;
348 case R_AARCH64_LDST16_ABS_LO12_NC:
349 overflow_check = false;
350 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
351 AARCH64_INSN_IMM_12);
352 break;
353 case R_AARCH64_LDST32_ABS_LO12_NC:
354 overflow_check = false;
355 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
356 AARCH64_INSN_IMM_12);
357 break;
358 case R_AARCH64_LDST64_ABS_LO12_NC:
359 overflow_check = false;
360 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
361 AARCH64_INSN_IMM_12);
362 break;
363 case R_AARCH64_LDST128_ABS_LO12_NC:
364 overflow_check = false;
365 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
366 AARCH64_INSN_IMM_12);
367 break;
368 case R_AARCH64_TSTBR14:
369 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
370 AARCH64_INSN_IMM_14);
371 break;
372 case R_AARCH64_CONDBR19:
373 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
374 AARCH64_INSN_IMM_19);
375 break;
376 case R_AARCH64_JUMP26:
377 case R_AARCH64_CALL26:
378 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
379 AARCH64_INSN_IMM_26);
380
381 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
382 ovf == -ERANGE) {
383 val = module_emit_plt_entry(me, &rel[i], sym);
384 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
385 26, AARCH64_INSN_IMM_26);
386 }
387 break;
388
389 default:
390 pr_err("module %s: unsupported RELA relocation: %llu\n",
391 me->name, ELF64_R_TYPE(rel[i].r_info));
392 return -ENOEXEC;
393 }
394
395 if (overflow_check && ovf == -ERANGE)
396 goto overflow;
397
398 }
399
400 return 0;
401
402overflow:
403 pr_err("module %s: overflow in relocation type %d val %Lx\n",
404 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
405 return -ENOEXEC;
406}
407
408int module_finalize(const Elf_Ehdr *hdr,
409 const Elf_Shdr *sechdrs,
410 struct module *me)
411{
412 const Elf_Shdr *s, *se;
413 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
414
415 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
416 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
417 apply_alternatives((void *)s->sh_addr, s->sh_size);
418 return 0;
419 }
420 }
421
422 return 0;
423}
1/*
2 * AArch64 loadable module support.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/bitops.h>
22#include <linux/elf.h>
23#include <linux/gfp.h>
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/moduleloader.h>
27#include <linux/vmalloc.h>
28#include <asm/insn.h>
29
30#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
31#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
32
33void *module_alloc(unsigned long size)
34{
35 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
36 GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
37 __builtin_return_address(0));
38}
39
40enum aarch64_reloc_op {
41 RELOC_OP_NONE,
42 RELOC_OP_ABS,
43 RELOC_OP_PREL,
44 RELOC_OP_PAGE,
45};
46
47static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
48{
49 switch (reloc_op) {
50 case RELOC_OP_ABS:
51 return val;
52 case RELOC_OP_PREL:
53 return val - (u64)place;
54 case RELOC_OP_PAGE:
55 return (val & ~0xfff) - ((u64)place & ~0xfff);
56 case RELOC_OP_NONE:
57 return 0;
58 }
59
60 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
61 return 0;
62}
63
64static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
65{
66 u64 imm_mask = (1 << len) - 1;
67 s64 sval = do_reloc(op, place, val);
68
69 switch (len) {
70 case 16:
71 *(s16 *)place = sval;
72 break;
73 case 32:
74 *(s32 *)place = sval;
75 break;
76 case 64:
77 *(s64 *)place = sval;
78 break;
79 default:
80 pr_err("Invalid length (%d) for data relocation\n", len);
81 return 0;
82 }
83
84 /*
85 * Extract the upper value bits (including the sign bit) and
86 * shift them to bit 0.
87 */
88 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
89
90 /*
91 * Overflow has occurred if the value is not representable in
92 * len bits (i.e the bottom len bits are not sign-extended and
93 * the top bits are not all zero).
94 */
95 if ((u64)(sval + 1) > 2)
96 return -ERANGE;
97
98 return 0;
99}
100
101static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
102 int lsb, enum aarch64_insn_imm_type imm_type)
103{
104 u64 imm, limit = 0;
105 s64 sval;
106 u32 insn = le32_to_cpu(*(u32 *)place);
107
108 sval = do_reloc(op, place, val);
109 sval >>= lsb;
110 imm = sval & 0xffff;
111
112 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
113 /*
114 * For signed MOVW relocations, we have to manipulate the
115 * instruction encoding depending on whether or not the
116 * immediate is less than zero.
117 */
118 insn &= ~(3 << 29);
119 if ((s64)imm >= 0) {
120 /* >=0: Set the instruction to MOVZ (opcode 10b). */
121 insn |= 2 << 29;
122 } else {
123 /*
124 * <0: Set the instruction to MOVN (opcode 00b).
125 * Since we've masked the opcode already, we
126 * don't need to do anything other than
127 * inverting the new immediate field.
128 */
129 imm = ~imm;
130 }
131 imm_type = AARCH64_INSN_IMM_MOVK;
132 }
133
134 /* Update the instruction with the new encoding. */
135 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
136 *(u32 *)place = cpu_to_le32(insn);
137
138 /* Shift out the immediate field. */
139 sval >>= 16;
140
141 /*
142 * For unsigned immediates, the overflow check is straightforward.
143 * For signed immediates, the sign bit is actually the bit past the
144 * most significant bit of the field.
145 * The AARCH64_INSN_IMM_16 immediate type is unsigned.
146 */
147 if (imm_type != AARCH64_INSN_IMM_16) {
148 sval++;
149 limit++;
150 }
151
152 /* Check the upper bits depending on the sign of the immediate. */
153 if ((u64)sval > limit)
154 return -ERANGE;
155
156 return 0;
157}
158
159static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
160 int lsb, int len, enum aarch64_insn_imm_type imm_type)
161{
162 u64 imm, imm_mask;
163 s64 sval;
164 u32 insn = le32_to_cpu(*(u32 *)place);
165
166 /* Calculate the relocation value. */
167 sval = do_reloc(op, place, val);
168 sval >>= lsb;
169
170 /* Extract the value bits and shift them to bit 0. */
171 imm_mask = (BIT(lsb + len) - 1) >> lsb;
172 imm = sval & imm_mask;
173
174 /* Update the instruction's immediate field. */
175 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
176 *(u32 *)place = cpu_to_le32(insn);
177
178 /*
179 * Extract the upper value bits (including the sign bit) and
180 * shift them to bit 0.
181 */
182 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
183
184 /*
185 * Overflow has occurred if the upper bits are not all equal to
186 * the sign bit of the value.
187 */
188 if ((u64)(sval + 1) >= 2)
189 return -ERANGE;
190
191 return 0;
192}
193
194int apply_relocate_add(Elf64_Shdr *sechdrs,
195 const char *strtab,
196 unsigned int symindex,
197 unsigned int relsec,
198 struct module *me)
199{
200 unsigned int i;
201 int ovf;
202 bool overflow_check;
203 Elf64_Sym *sym;
204 void *loc;
205 u64 val;
206 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
207
208 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
209 /* loc corresponds to P in the AArch64 ELF document. */
210 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
211 + rel[i].r_offset;
212
213 /* sym is the ELF symbol we're referring to. */
214 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
215 + ELF64_R_SYM(rel[i].r_info);
216
217 /* val corresponds to (S + A) in the AArch64 ELF document. */
218 val = sym->st_value + rel[i].r_addend;
219
220 /* Check for overflow by default. */
221 overflow_check = true;
222
223 /* Perform the static relocation. */
224 switch (ELF64_R_TYPE(rel[i].r_info)) {
225 /* Null relocations. */
226 case R_ARM_NONE:
227 case R_AARCH64_NONE:
228 ovf = 0;
229 break;
230
231 /* Data relocations. */
232 case R_AARCH64_ABS64:
233 overflow_check = false;
234 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
235 break;
236 case R_AARCH64_ABS32:
237 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
238 break;
239 case R_AARCH64_ABS16:
240 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
241 break;
242 case R_AARCH64_PREL64:
243 overflow_check = false;
244 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
245 break;
246 case R_AARCH64_PREL32:
247 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
248 break;
249 case R_AARCH64_PREL16:
250 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
251 break;
252
253 /* MOVW instruction relocations. */
254 case R_AARCH64_MOVW_UABS_G0_NC:
255 overflow_check = false;
256 case R_AARCH64_MOVW_UABS_G0:
257 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
258 AARCH64_INSN_IMM_16);
259 break;
260 case R_AARCH64_MOVW_UABS_G1_NC:
261 overflow_check = false;
262 case R_AARCH64_MOVW_UABS_G1:
263 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
264 AARCH64_INSN_IMM_16);
265 break;
266 case R_AARCH64_MOVW_UABS_G2_NC:
267 overflow_check = false;
268 case R_AARCH64_MOVW_UABS_G2:
269 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
270 AARCH64_INSN_IMM_16);
271 break;
272 case R_AARCH64_MOVW_UABS_G3:
273 /* We're using the top bits so we can't overflow. */
274 overflow_check = false;
275 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
276 AARCH64_INSN_IMM_16);
277 break;
278 case R_AARCH64_MOVW_SABS_G0:
279 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
280 AARCH64_INSN_IMM_MOVNZ);
281 break;
282 case R_AARCH64_MOVW_SABS_G1:
283 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
284 AARCH64_INSN_IMM_MOVNZ);
285 break;
286 case R_AARCH64_MOVW_SABS_G2:
287 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
288 AARCH64_INSN_IMM_MOVNZ);
289 break;
290 case R_AARCH64_MOVW_PREL_G0_NC:
291 overflow_check = false;
292 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
293 AARCH64_INSN_IMM_MOVK);
294 break;
295 case R_AARCH64_MOVW_PREL_G0:
296 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
297 AARCH64_INSN_IMM_MOVNZ);
298 break;
299 case R_AARCH64_MOVW_PREL_G1_NC:
300 overflow_check = false;
301 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
302 AARCH64_INSN_IMM_MOVK);
303 break;
304 case R_AARCH64_MOVW_PREL_G1:
305 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
306 AARCH64_INSN_IMM_MOVNZ);
307 break;
308 case R_AARCH64_MOVW_PREL_G2_NC:
309 overflow_check = false;
310 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
311 AARCH64_INSN_IMM_MOVK);
312 break;
313 case R_AARCH64_MOVW_PREL_G2:
314 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
315 AARCH64_INSN_IMM_MOVNZ);
316 break;
317 case R_AARCH64_MOVW_PREL_G3:
318 /* We're using the top bits so we can't overflow. */
319 overflow_check = false;
320 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
321 AARCH64_INSN_IMM_MOVNZ);
322 break;
323
324 /* Immediate instruction relocations. */
325 case R_AARCH64_LD_PREL_LO19:
326 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
327 AARCH64_INSN_IMM_19);
328 break;
329 case R_AARCH64_ADR_PREL_LO21:
330 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
331 AARCH64_INSN_IMM_ADR);
332 break;
333 case R_AARCH64_ADR_PREL_PG_HI21_NC:
334 overflow_check = false;
335 case R_AARCH64_ADR_PREL_PG_HI21:
336 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
337 AARCH64_INSN_IMM_ADR);
338 break;
339 case R_AARCH64_ADD_ABS_LO12_NC:
340 case R_AARCH64_LDST8_ABS_LO12_NC:
341 overflow_check = false;
342 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
343 AARCH64_INSN_IMM_12);
344 break;
345 case R_AARCH64_LDST16_ABS_LO12_NC:
346 overflow_check = false;
347 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
348 AARCH64_INSN_IMM_12);
349 break;
350 case R_AARCH64_LDST32_ABS_LO12_NC:
351 overflow_check = false;
352 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
353 AARCH64_INSN_IMM_12);
354 break;
355 case R_AARCH64_LDST64_ABS_LO12_NC:
356 overflow_check = false;
357 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
358 AARCH64_INSN_IMM_12);
359 break;
360 case R_AARCH64_LDST128_ABS_LO12_NC:
361 overflow_check = false;
362 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
363 AARCH64_INSN_IMM_12);
364 break;
365 case R_AARCH64_TSTBR14:
366 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
367 AARCH64_INSN_IMM_14);
368 break;
369 case R_AARCH64_CONDBR19:
370 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
371 AARCH64_INSN_IMM_19);
372 break;
373 case R_AARCH64_JUMP26:
374 case R_AARCH64_CALL26:
375 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
376 AARCH64_INSN_IMM_26);
377 break;
378
379 default:
380 pr_err("module %s: unsupported RELA relocation: %llu\n",
381 me->name, ELF64_R_TYPE(rel[i].r_info));
382 return -ENOEXEC;
383 }
384
385 if (overflow_check && ovf == -ERANGE)
386 goto overflow;
387
388 }
389
390 return 0;
391
392overflow:
393 pr_err("module %s: overflow in relocation type %d val %Lx\n",
394 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
395 return -ENOEXEC;
396}