Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*  Kernel module help for x86.
  3    Copyright (C) 2001 Rusty Russell.
  4
  5*/
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/moduleloader.h>
 10#include <linux/elf.h>
 11#include <linux/vmalloc.h>
 12#include <linux/fs.h>
 13#include <linux/string.h>
 14#include <linux/kernel.h>
 15#include <linux/kasan.h>
 16#include <linux/bug.h>
 17#include <linux/mm.h>
 18#include <linux/gfp.h>
 19#include <linux/jump_label.h>
 20#include <linux/random.h>
 
 21
 22#include <asm/text-patching.h>
 23#include <asm/page.h>
 24#include <asm/pgtable.h>
 25#include <asm/setup.h>
 26#include <asm/unwind.h>
 27
 28#if 0
 29#define DEBUGP(fmt, ...)				\
 30	printk(KERN_DEBUG fmt, ##__VA_ARGS__)
 31#else
 32#define DEBUGP(fmt, ...)				\
 33do {							\
 34	if (0)						\
 35		printk(KERN_DEBUG fmt, ##__VA_ARGS__);	\
 36} while (0)
 37#endif
 38
 39#ifdef CONFIG_RANDOMIZE_BASE
 40static unsigned long module_load_offset;
 41
 42/* Mutex protects the module_load_offset. */
 43static DEFINE_MUTEX(module_kaslr_mutex);
 44
 45static unsigned long int get_module_load_offset(void)
 46{
 47	if (kaslr_enabled()) {
 48		mutex_lock(&module_kaslr_mutex);
 49		/*
 50		 * Calculate the module_load_offset the first time this
 51		 * code is called. Once calculated it stays the same until
 52		 * reboot.
 53		 */
 54		if (module_load_offset == 0)
 55			module_load_offset =
 56				(get_random_int() % 1024 + 1) * PAGE_SIZE;
 57		mutex_unlock(&module_kaslr_mutex);
 58	}
 59	return module_load_offset;
 60}
 61#else
 62static unsigned long int get_module_load_offset(void)
 63{
 64	return 0;
 65}
 66#endif
 67
 68void *module_alloc(unsigned long size)
 69{
 
 70	void *p;
 71
 72	if (PAGE_ALIGN(size) > MODULES_LEN)
 73		return NULL;
 74
 75	p = __vmalloc_node_range(size, MODULE_ALIGN,
 76				    MODULES_VADDR + get_module_load_offset(),
 77				    MODULES_END, GFP_KERNEL,
 78				    PAGE_KERNEL, 0, NUMA_NO_NODE,
 79				    __builtin_return_address(0));
 80	if (p && (kasan_module_alloc(p, size) < 0)) {
 
 81		vfree(p);
 82		return NULL;
 83	}
 84
 85	return p;
 86}
 87
 88#ifdef CONFIG_X86_32
 89int apply_relocate(Elf32_Shdr *sechdrs,
 90		   const char *strtab,
 91		   unsigned int symindex,
 92		   unsigned int relsec,
 93		   struct module *me)
 94{
 95	unsigned int i;
 96	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
 97	Elf32_Sym *sym;
 98	uint32_t *location;
 99
100	DEBUGP("Applying relocate section %u to %u\n",
101	       relsec, sechdrs[relsec].sh_info);
102	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
103		/* This is where to make the change */
104		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
105			+ rel[i].r_offset;
106		/* This is the symbol it is referring to.  Note that all
107		   undefined symbols have been resolved.  */
108		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
109			+ ELF32_R_SYM(rel[i].r_info);
110
111		switch (ELF32_R_TYPE(rel[i].r_info)) {
112		case R_386_32:
113			/* We add the value into the location given */
114			*location += sym->st_value;
115			break;
116		case R_386_PC32:
 
117			/* Add the value, subtract its position */
118			*location += sym->st_value - (uint32_t)location;
119			break;
120		default:
121			pr_err("%s: Unknown relocation: %u\n",
122			       me->name, ELF32_R_TYPE(rel[i].r_info));
123			return -ENOEXEC;
124		}
125	}
126	return 0;
127}
128#else /*X86_64*/
129int apply_relocate_add(Elf64_Shdr *sechdrs,
130		   const char *strtab,
131		   unsigned int symindex,
132		   unsigned int relsec,
133		   struct module *me)
 
134{
135	unsigned int i;
136	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
137	Elf64_Sym *sym;
138	void *loc;
139	u64 val;
140
141	DEBUGP("Applying relocate section %u to %u\n",
142	       relsec, sechdrs[relsec].sh_info);
143	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
144		/* This is where to make the change */
145		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
146			+ rel[i].r_offset;
147
148		/* This is the symbol it is referring to.  Note that all
149		   undefined symbols have been resolved.  */
150		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
151			+ ELF64_R_SYM(rel[i].r_info);
152
153		DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
154		       (int)ELF64_R_TYPE(rel[i].r_info),
155		       sym->st_value, rel[i].r_addend, (u64)loc);
156
157		val = sym->st_value + rel[i].r_addend;
158
159		switch (ELF64_R_TYPE(rel[i].r_info)) {
160		case R_X86_64_NONE:
161			break;
162		case R_X86_64_64:
163			if (*(u64 *)loc != 0)
164				goto invalid_relocation;
165			*(u64 *)loc = val;
166			break;
167		case R_X86_64_32:
168			if (*(u32 *)loc != 0)
169				goto invalid_relocation;
170			*(u32 *)loc = val;
171			if (val != *(u32 *)loc)
172				goto overflow;
173			break;
174		case R_X86_64_32S:
175			if (*(s32 *)loc != 0)
176				goto invalid_relocation;
177			*(s32 *)loc = val;
178			if ((s64)val != *(s32 *)loc)
179				goto overflow;
180			break;
181		case R_X86_64_PC32:
182		case R_X86_64_PLT32:
183			if (*(u32 *)loc != 0)
184				goto invalid_relocation;
185			val -= (u64)loc;
186			*(u32 *)loc = val;
187#if 0
188			if ((s64)val != *(s32 *)loc)
189				goto overflow;
190#endif
191			break;
192		case R_X86_64_PC64:
193			if (*(u64 *)loc != 0)
194				goto invalid_relocation;
195			val -= (u64)loc;
196			*(u64 *)loc = val;
197			break;
198		default:
199			pr_err("%s: Unknown rela relocation: %llu\n",
200			       me->name, ELF64_R_TYPE(rel[i].r_info));
201			return -ENOEXEC;
202		}
203	}
204	return 0;
205
206invalid_relocation:
207	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
208	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
209	return -ENOEXEC;
210
211overflow:
212	pr_err("overflow in relocation type %d val %Lx\n",
213	       (int)ELF64_R_TYPE(rel[i].r_info), val);
214	pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
215	       me->name);
216	return -ENOEXEC;
217}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218#endif
219
220int module_finalize(const Elf_Ehdr *hdr,
221		    const Elf_Shdr *sechdrs,
222		    struct module *me)
223{
224	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
225		*para = NULL, *orc = NULL, *orc_ip = NULL;
 
 
226	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
227
228	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
229		if (!strcmp(".text", secstrings + s->sh_name))
230			text = s;
231		if (!strcmp(".altinstructions", secstrings + s->sh_name))
232			alt = s;
233		if (!strcmp(".smp_locks", secstrings + s->sh_name))
234			locks = s;
235		if (!strcmp(".parainstructions", secstrings + s->sh_name))
236			para = s;
237		if (!strcmp(".orc_unwind", secstrings + s->sh_name))
238			orc = s;
239		if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
240			orc_ip = s;
 
 
 
 
 
 
 
 
 
 
241	}
242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243	if (alt) {
244		/* patch .altinstructions */
245		void *aseg = (void *)alt->sh_addr;
246		apply_alternatives(aseg, aseg + alt->sh_size);
247	}
248	if (locks && text) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249		void *lseg = (void *)locks->sh_addr;
250		void *tseg = (void *)text->sh_addr;
 
251		alternatives_smp_module_add(me, me->name,
252					    lseg, lseg + locks->sh_size,
253					    tseg, tseg + text->sh_size);
254	}
255
256	if (para) {
257		void *pseg = (void *)para->sh_addr;
258		apply_paravirt(pseg, pseg + para->sh_size);
259	}
260
261	/* make jump label nops */
262	jump_label_apply_nops(me);
263
264	if (orc && orc_ip)
265		unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
266				   (void *)orc->sh_addr, orc->sh_size);
267
268	return 0;
269}
270
271void module_arch_cleanup(struct module *mod)
272{
273	alternatives_smp_module_del(mod);
274}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*  Kernel module help for x86.
  3    Copyright (C) 2001 Rusty Russell.
  4
  5*/
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/moduleloader.h>
 10#include <linux/elf.h>
 11#include <linux/vmalloc.h>
 12#include <linux/fs.h>
 13#include <linux/string.h>
 14#include <linux/kernel.h>
 15#include <linux/kasan.h>
 16#include <linux/bug.h>
 17#include <linux/mm.h>
 18#include <linux/gfp.h>
 19#include <linux/jump_label.h>
 20#include <linux/random.h>
 21#include <linux/memory.h>
 22
 23#include <asm/text-patching.h>
 24#include <asm/page.h>
 
 25#include <asm/setup.h>
 26#include <asm/unwind.h>
 27
 28#if 0
 29#define DEBUGP(fmt, ...)				\
 30	printk(KERN_DEBUG fmt, ##__VA_ARGS__)
 31#else
 32#define DEBUGP(fmt, ...)				\
 33do {							\
 34	if (0)						\
 35		printk(KERN_DEBUG fmt, ##__VA_ARGS__);	\
 36} while (0)
 37#endif
 38
 39#ifdef CONFIG_RANDOMIZE_BASE
 40static unsigned long module_load_offset;
 41
 42/* Mutex protects the module_load_offset. */
 43static DEFINE_MUTEX(module_kaslr_mutex);
 44
 45static unsigned long int get_module_load_offset(void)
 46{
 47	if (kaslr_enabled()) {
 48		mutex_lock(&module_kaslr_mutex);
 49		/*
 50		 * Calculate the module_load_offset the first time this
 51		 * code is called. Once calculated it stays the same until
 52		 * reboot.
 53		 */
 54		if (module_load_offset == 0)
 55			module_load_offset =
 56				get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
 57		mutex_unlock(&module_kaslr_mutex);
 58	}
 59	return module_load_offset;
 60}
 61#else
 62static unsigned long int get_module_load_offset(void)
 63{
 64	return 0;
 65}
 66#endif
 67
 68void *module_alloc(unsigned long size)
 69{
 70	gfp_t gfp_mask = GFP_KERNEL;
 71	void *p;
 72
 73	if (PAGE_ALIGN(size) > MODULES_LEN)
 74		return NULL;
 75
 76	p = __vmalloc_node_range(size, MODULE_ALIGN,
 77				 MODULES_VADDR + get_module_load_offset(),
 78				 MODULES_END, gfp_mask, PAGE_KERNEL,
 79				 VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
 80				 NUMA_NO_NODE, __builtin_return_address(0));
 81
 82	if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
 83		vfree(p);
 84		return NULL;
 85	}
 86
 87	return p;
 88}
 89
 90#ifdef CONFIG_X86_32
 91int apply_relocate(Elf32_Shdr *sechdrs,
 92		   const char *strtab,
 93		   unsigned int symindex,
 94		   unsigned int relsec,
 95		   struct module *me)
 96{
 97	unsigned int i;
 98	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
 99	Elf32_Sym *sym;
100	uint32_t *location;
101
102	DEBUGP("Applying relocate section %u to %u\n",
103	       relsec, sechdrs[relsec].sh_info);
104	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
105		/* This is where to make the change */
106		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
107			+ rel[i].r_offset;
108		/* This is the symbol it is referring to.  Note that all
109		   undefined symbols have been resolved.  */
110		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
111			+ ELF32_R_SYM(rel[i].r_info);
112
113		switch (ELF32_R_TYPE(rel[i].r_info)) {
114		case R_386_32:
115			/* We add the value into the location given */
116			*location += sym->st_value;
117			break;
118		case R_386_PC32:
119		case R_386_PLT32:
120			/* Add the value, subtract its position */
121			*location += sym->st_value - (uint32_t)location;
122			break;
123		default:
124			pr_err("%s: Unknown relocation: %u\n",
125			       me->name, ELF32_R_TYPE(rel[i].r_info));
126			return -ENOEXEC;
127		}
128	}
129	return 0;
130}
131#else /*X86_64*/
132static int __apply_relocate_add(Elf64_Shdr *sechdrs,
133		   const char *strtab,
134		   unsigned int symindex,
135		   unsigned int relsec,
136		   struct module *me,
137		   void *(*write)(void *dest, const void *src, size_t len))
138{
139	unsigned int i;
140	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
141	Elf64_Sym *sym;
142	void *loc;
143	u64 val;
144
145	DEBUGP("Applying relocate section %u to %u\n",
146	       relsec, sechdrs[relsec].sh_info);
147	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
148		/* This is where to make the change */
149		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
150			+ rel[i].r_offset;
151
152		/* This is the symbol it is referring to.  Note that all
153		   undefined symbols have been resolved.  */
154		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
155			+ ELF64_R_SYM(rel[i].r_info);
156
157		DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
158		       (int)ELF64_R_TYPE(rel[i].r_info),
159		       sym->st_value, rel[i].r_addend, (u64)loc);
160
161		val = sym->st_value + rel[i].r_addend;
162
163		switch (ELF64_R_TYPE(rel[i].r_info)) {
164		case R_X86_64_NONE:
165			break;
166		case R_X86_64_64:
167			if (*(u64 *)loc != 0)
168				goto invalid_relocation;
169			write(loc, &val, 8);
170			break;
171		case R_X86_64_32:
172			if (*(u32 *)loc != 0)
173				goto invalid_relocation;
174			write(loc, &val, 4);
175			if (val != *(u32 *)loc)
176				goto overflow;
177			break;
178		case R_X86_64_32S:
179			if (*(s32 *)loc != 0)
180				goto invalid_relocation;
181			write(loc, &val, 4);
182			if ((s64)val != *(s32 *)loc)
183				goto overflow;
184			break;
185		case R_X86_64_PC32:
186		case R_X86_64_PLT32:
187			if (*(u32 *)loc != 0)
188				goto invalid_relocation;
189			val -= (u64)loc;
190			write(loc, &val, 4);
191#if 0
192			if ((s64)val != *(s32 *)loc)
193				goto overflow;
194#endif
195			break;
196		case R_X86_64_PC64:
197			if (*(u64 *)loc != 0)
198				goto invalid_relocation;
199			val -= (u64)loc;
200			write(loc, &val, 8);
201			break;
202		default:
203			pr_err("%s: Unknown rela relocation: %llu\n",
204			       me->name, ELF64_R_TYPE(rel[i].r_info));
205			return -ENOEXEC;
206		}
207	}
208	return 0;
209
210invalid_relocation:
211	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
212	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
213	return -ENOEXEC;
214
215overflow:
216	pr_err("overflow in relocation type %d val %Lx\n",
217	       (int)ELF64_R_TYPE(rel[i].r_info), val);
218	pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
219	       me->name);
220	return -ENOEXEC;
221}
222
223int apply_relocate_add(Elf64_Shdr *sechdrs,
224		   const char *strtab,
225		   unsigned int symindex,
226		   unsigned int relsec,
227		   struct module *me)
228{
229	int ret;
230	bool early = me->state == MODULE_STATE_UNFORMED;
231	void *(*write)(void *, const void *, size_t) = memcpy;
232
233	if (!early) {
234		write = text_poke;
235		mutex_lock(&text_mutex);
236	}
237
238	ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
239				   write);
240
241	if (!early) {
242		text_poke_sync();
243		mutex_unlock(&text_mutex);
244	}
245
246	return ret;
247}
248
249#endif
250
251int module_finalize(const Elf_Ehdr *hdr,
252		    const Elf_Shdr *sechdrs,
253		    struct module *me)
254{
255	const Elf_Shdr *s, *alt = NULL, *locks = NULL,
256		*para = NULL, *orc = NULL, *orc_ip = NULL,
257		*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
258		*calls = NULL, *cfi = NULL;
259	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
260
261	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
 
 
262		if (!strcmp(".altinstructions", secstrings + s->sh_name))
263			alt = s;
264		if (!strcmp(".smp_locks", secstrings + s->sh_name))
265			locks = s;
266		if (!strcmp(".parainstructions", secstrings + s->sh_name))
267			para = s;
268		if (!strcmp(".orc_unwind", secstrings + s->sh_name))
269			orc = s;
270		if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
271			orc_ip = s;
272		if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
273			retpolines = s;
274		if (!strcmp(".return_sites", secstrings + s->sh_name))
275			returns = s;
276		if (!strcmp(".call_sites", secstrings + s->sh_name))
277			calls = s;
278		if (!strcmp(".cfi_sites", secstrings + s->sh_name))
279			cfi = s;
280		if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
281			ibt_endbr = s;
282	}
283
284	/*
285	 * See alternative_instructions() for the ordering rules between the
286	 * various patching types.
287	 */
288	if (para) {
289		void *pseg = (void *)para->sh_addr;
290		apply_paravirt(pseg, pseg + para->sh_size);
291	}
292	if (retpolines || cfi) {
293		void *rseg = NULL, *cseg = NULL;
294		unsigned int rsize = 0, csize = 0;
295
296		if (retpolines) {
297			rseg = (void *)retpolines->sh_addr;
298			rsize = retpolines->sh_size;
299		}
300
301		if (cfi) {
302			cseg = (void *)cfi->sh_addr;
303			csize = cfi->sh_size;
304		}
305
306		apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize);
307	}
308	if (retpolines) {
309		void *rseg = (void *)retpolines->sh_addr;
310		apply_retpolines(rseg, rseg + retpolines->sh_size);
311	}
312	if (returns) {
313		void *rseg = (void *)returns->sh_addr;
314		apply_returns(rseg, rseg + returns->sh_size);
315	}
316	if (alt) {
317		/* patch .altinstructions */
318		void *aseg = (void *)alt->sh_addr;
319		apply_alternatives(aseg, aseg + alt->sh_size);
320	}
321	if (calls || para) {
322		struct callthunk_sites cs = {};
323
324		if (calls) {
325			cs.call_start = (void *)calls->sh_addr;
326			cs.call_end = (void *)calls->sh_addr + calls->sh_size;
327		}
328
329		if (para) {
330			cs.pv_start = (void *)para->sh_addr;
331			cs.pv_end = (void *)para->sh_addr + para->sh_size;
332		}
333
334		callthunks_patch_module_calls(&cs, me);
335	}
336	if (ibt_endbr) {
337		void *iseg = (void *)ibt_endbr->sh_addr;
338		apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
339	}
340	if (locks) {
341		void *lseg = (void *)locks->sh_addr;
342		void *text = me->core_layout.base;
343		void *text_end = text + me->core_layout.text_size;
344		alternatives_smp_module_add(me, me->name,
345					    lseg, lseg + locks->sh_size,
346					    text, text_end);
 
 
 
 
 
347	}
 
 
 
348
349	if (orc && orc_ip)
350		unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
351				   (void *)orc->sh_addr, orc->sh_size);
352
353	return 0;
354}
355
356void module_arch_cleanup(struct module *mod)
357{
358	alternatives_smp_module_del(mod);
359}