Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*  Kernel module help for PPC.
  3    Copyright (C) 2001 Rusty Russell.
  4
  5*/
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/module.h>
 10#include <linux/moduleloader.h>
 11#include <linux/elf.h>
 12#include <linux/vmalloc.h>
 13#include <linux/fs.h>
 14#include <linux/string.h>
 15#include <linux/kernel.h>
 16#include <linux/ftrace.h>
 17#include <linux/cache.h>
 18#include <linux/bug.h>
 19#include <linux/sort.h>
 20#include <asm/setup.h>
 21#include <asm/code-patching.h>
 22
 23/* Count how many different relocations (different symbol, different
 24   addend) */
 25static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
 26{
 27	unsigned int i, r_info, r_addend, _count_relocs;
 28
 29	_count_relocs = 0;
 30	r_info = 0;
 31	r_addend = 0;
 32	for (i = 0; i < num; i++)
 33		/* Only count 24-bit relocs, others don't need stubs */
 34		if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
 35		    (r_info != ELF32_R_SYM(rela[i].r_info) ||
 36		     r_addend != rela[i].r_addend)) {
 37			_count_relocs++;
 38			r_info = ELF32_R_SYM(rela[i].r_info);
 39			r_addend = rela[i].r_addend;
 40		}
 41
 42#ifdef CONFIG_DYNAMIC_FTRACE
 43	_count_relocs++;	/* add one for ftrace_caller */
 44#endif
 45	return _count_relocs;
 46}
 47
 48static int relacmp(const void *_x, const void *_y)
 49{
 50	const Elf32_Rela *x, *y;
 51
 52	y = (Elf32_Rela *)_x;
 53	x = (Elf32_Rela *)_y;
 54
 55	/* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
 56	 * make the comparison cheaper/faster. It won't affect the sorting or
 57	 * the counting algorithms' performance
 58	 */
 59	if (x->r_info < y->r_info)
 60		return -1;
 61	else if (x->r_info > y->r_info)
 62		return 1;
 63	else if (x->r_addend < y->r_addend)
 64		return -1;
 65	else if (x->r_addend > y->r_addend)
 66		return 1;
 67	else
 68		return 0;
 69}
 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71/* Get the potential trampolines size required of the init and
 72   non-init sections */
 73static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
 74				  const Elf32_Shdr *sechdrs,
 75				  const char *secstrings,
 76				  int is_init)
 77{
 78	unsigned long ret = 0;
 79	unsigned i;
 80
 81	/* Everything marked ALLOC (this includes the exported
 82           symbols) */
 83	for (i = 1; i < hdr->e_shnum; i++) {
 84		/* If it's called *.init*, and we're not init, we're
 85                   not interested */
 86		if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
 87		    != is_init)
 88			continue;
 89
 90		/* We don't want to look at debug sections. */
 91		if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
 92			continue;
 93
 94		if (sechdrs[i].sh_type == SHT_RELA) {
 95			pr_debug("Found relocations in section %u\n", i);
 96			pr_debug("Ptr: %p.  Number: %u\n",
 97			       (void *)hdr + sechdrs[i].sh_offset,
 98			       sechdrs[i].sh_size / sizeof(Elf32_Rela));
 99
100			/* Sort the relocation information based on a symbol and
101			 * addend key. This is a stable O(n*log n) complexity
102			 * algorithm but it will reduce the complexity of
103			 * count_relocs() to linear complexity O(n)
104			 */
105			sort((void *)hdr + sechdrs[i].sh_offset,
106			     sechdrs[i].sh_size / sizeof(Elf32_Rela),
107			     sizeof(Elf32_Rela), relacmp, NULL);
108
109			ret += count_relocs((void *)hdr
110					     + sechdrs[i].sh_offset,
111					     sechdrs[i].sh_size
112					     / sizeof(Elf32_Rela))
113				* sizeof(struct ppc_plt_entry);
114		}
115	}
116
117	return ret;
118}
119
120int module_frob_arch_sections(Elf32_Ehdr *hdr,
121			      Elf32_Shdr *sechdrs,
122			      char *secstrings,
123			      struct module *me)
124{
125	unsigned int i;
126
127	/* Find .plt and .init.plt sections */
128	for (i = 0; i < hdr->e_shnum; i++) {
129		if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
130			me->arch.init_plt_section = i;
131		else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
132			me->arch.core_plt_section = i;
133	}
134	if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
135		pr_err("Module doesn't contain .plt or .init.plt sections.\n");
136		return -ENOEXEC;
137	}
138
139	/* Override their sizes */
140	sechdrs[me->arch.core_plt_section].sh_size
141		= get_plt_size(hdr, sechdrs, secstrings, 0);
142	sechdrs[me->arch.init_plt_section].sh_size
143		= get_plt_size(hdr, sechdrs, secstrings, 1);
144	return 0;
145}
146
147static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
148{
149	if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
150		return 0;
151	if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
 
152		return 0;
153	return 1;
154}
155
156/* Set up a trampoline in the PLT to bounce us to the distant function */
157static uint32_t do_plt_call(void *location,
158			    Elf32_Addr val,
159			    const Elf32_Shdr *sechdrs,
160			    struct module *mod)
161{
162	struct ppc_plt_entry *entry;
163
164	pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
165	/* Init, or core PLT? */
166	if (location >= mod->core_layout.base
167	    && location < mod->core_layout.base + mod->core_layout.size)
168		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
169	else
170		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
171
172	/* Find this entry, or if that fails, the next avail. entry */
173	while (entry->jump[0]) {
174		if (entry_matches(entry, val)) return (uint32_t)entry;
175		entry++;
176	}
177
178	if (patch_instruction(&entry->jump[0], ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(val)))))
179		return 0;
180	if (patch_instruction(&entry->jump[1], ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))))
181		return 0;
182	if (patch_instruction(&entry->jump[2], ppc_inst(PPC_RAW_MTCTR(_R12))))
183		return 0;
184	if (patch_instruction(&entry->jump[3], ppc_inst(PPC_RAW_BCTR())))
185		return 0;
 
 
186
187	pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
188	return (uint32_t)entry;
189}
190
191static int patch_location_16(uint32_t *loc, u16 value)
192{
193	loc = PTR_ALIGN_DOWN(loc, sizeof(u32));
194	return patch_instruction(loc, ppc_inst((*loc & 0xffff0000) | value));
195}
196
197int apply_relocate_add(Elf32_Shdr *sechdrs,
198		       const char *strtab,
199		       unsigned int symindex,
200		       unsigned int relsec,
201		       struct module *module)
202{
203	unsigned int i;
204	Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
205	Elf32_Sym *sym;
206	uint32_t *location;
207	uint32_t value;
208
209	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
210	       sechdrs[relsec].sh_info);
211	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
212		/* This is where to make the change */
213		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
214			+ rela[i].r_offset;
215		/* This is the symbol it is referring to.  Note that all
216		   undefined symbols have been resolved.  */
217		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
218			+ ELF32_R_SYM(rela[i].r_info);
219		/* `Everything is relative'. */
220		value = sym->st_value + rela[i].r_addend;
221
222		switch (ELF32_R_TYPE(rela[i].r_info)) {
223		case R_PPC_ADDR32:
224			/* Simply set it */
225			*(uint32_t *)location = value;
226			break;
227
228		case R_PPC_ADDR16_LO:
229			/* Low half of the symbol */
230			if (patch_location_16(location, PPC_LO(value)))
231				return -EFAULT;
232			break;
233
234		case R_PPC_ADDR16_HI:
235			/* Higher half of the symbol */
236			if (patch_location_16(location, PPC_HI(value)))
237				return -EFAULT;
238			break;
239
240		case R_PPC_ADDR16_HA:
241			if (patch_location_16(location, PPC_HA(value)))
242				return -EFAULT;
 
 
 
243			break;
244
245		case R_PPC_REL24:
246			if ((int)(value - (uint32_t)location) < -0x02000000
247			    || (int)(value - (uint32_t)location) >= 0x02000000) {
248				value = do_plt_call(location, value,
249						    sechdrs, module);
250				if (!value)
251					return -EFAULT;
252			}
253
254			/* Only replace bits 2 through 26 */
255			pr_debug("REL24 value = %08X. location = %08X\n",
256			       value, (uint32_t)location);
257			pr_debug("Location before: %08X.\n",
258			       *(uint32_t *)location);
259			value = (*(uint32_t *)location & ~PPC_LI_MASK) |
260				PPC_LI(value - (uint32_t)location);
261
262			if (patch_instruction(location, ppc_inst(value)))
263				return -EFAULT;
264
265			pr_debug("Location after: %08X.\n",
266			       *(uint32_t *)location);
267			pr_debug("ie. jump to %08X+%08X = %08X\n",
268				 *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
269				 (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
 
 
270			break;
271
272		case R_PPC_REL32:
273			/* 32-bit relative jump. */
274			*(uint32_t *)location = value - (uint32_t)location;
275			break;
276
277		default:
278			pr_err("%s: unknown ADD relocation: %u\n",
279			       module->name,
280			       ELF32_R_TYPE(rela[i].r_info));
281			return -ENOEXEC;
282		}
283	}
284
285	return 0;
286}
287
288#ifdef CONFIG_DYNAMIC_FTRACE
289notrace int module_trampoline_target(struct module *mod, unsigned long addr,
290				     unsigned long *target)
291{
292	ppc_inst_t jmp[4];
293
294	/* Find where the trampoline jumps to */
295	if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
296		return -EFAULT;
297	if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
298		return -EFAULT;
299	if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
300		return -EFAULT;
301	if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
302		return -EFAULT;
303
304	/* verify that this is what we expect it to be */
305	if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
306		return -EINVAL;
307	if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
308		return -EINVAL;
309	if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
310		return -EINVAL;
311	if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
312		return -EINVAL;
313
314	addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
315	if (addr & 0x8000)
316		addr -= 0x10000;
317
318	*target = addr;
319
320	return 0;
321}
322
323int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
324{
325	module->arch.tramp = do_plt_call(module->core_layout.base,
326					 (unsigned long)ftrace_caller,
327					 sechdrs, module);
328	if (!module->arch.tramp)
329		return -ENOENT;
330
331#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
332	module->arch.tramp_regs = do_plt_call(module->core_layout.base,
333					      (unsigned long)ftrace_regs_caller,
334					      sechdrs, module);
335	if (!module->arch.tramp_regs)
336		return -ENOENT;
337#endif
338
339	return 0;
340}
341#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*  Kernel module help for PPC.
  3    Copyright (C) 2001 Rusty Russell.
  4
  5*/
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/module.h>
 10#include <linux/moduleloader.h>
 11#include <linux/elf.h>
 12#include <linux/vmalloc.h>
 13#include <linux/fs.h>
 14#include <linux/string.h>
 15#include <linux/kernel.h>
 16#include <linux/ftrace.h>
 17#include <linux/cache.h>
 18#include <linux/bug.h>
 19#include <linux/sort.h>
 20#include <asm/setup.h>
 
 21
 22/* Count how many different relocations (different symbol, different
 23   addend) */
 24static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
 25{
 26	unsigned int i, r_info, r_addend, _count_relocs;
 27
 28	_count_relocs = 0;
 29	r_info = 0;
 30	r_addend = 0;
 31	for (i = 0; i < num; i++)
 32		/* Only count 24-bit relocs, others don't need stubs */
 33		if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
 34		    (r_info != ELF32_R_SYM(rela[i].r_info) ||
 35		     r_addend != rela[i].r_addend)) {
 36			_count_relocs++;
 37			r_info = ELF32_R_SYM(rela[i].r_info);
 38			r_addend = rela[i].r_addend;
 39		}
 40
 41#ifdef CONFIG_DYNAMIC_FTRACE
 42	_count_relocs++;	/* add one for ftrace_caller */
 43#endif
 44	return _count_relocs;
 45}
 46
 47static int relacmp(const void *_x, const void *_y)
 48{
 49	const Elf32_Rela *x, *y;
 50
 51	y = (Elf32_Rela *)_x;
 52	x = (Elf32_Rela *)_y;
 53
 54	/* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
 55	 * make the comparison cheaper/faster. It won't affect the sorting or
 56	 * the counting algorithms' performance
 57	 */
 58	if (x->r_info < y->r_info)
 59		return -1;
 60	else if (x->r_info > y->r_info)
 61		return 1;
 62	else if (x->r_addend < y->r_addend)
 63		return -1;
 64	else if (x->r_addend > y->r_addend)
 65		return 1;
 66	else
 67		return 0;
 68}
 69
 70static void relaswap(void *_x, void *_y, int size)
 71{
 72	uint32_t *x, *y, tmp;
 73	int i;
 74
 75	y = (uint32_t *)_x;
 76	x = (uint32_t *)_y;
 77
 78	for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
 79		tmp = x[i];
 80		x[i] = y[i];
 81		y[i] = tmp;
 82	}
 83}
 84
 85/* Get the potential trampolines size required of the init and
 86   non-init sections */
 87static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
 88				  const Elf32_Shdr *sechdrs,
 89				  const char *secstrings,
 90				  int is_init)
 91{
 92	unsigned long ret = 0;
 93	unsigned i;
 94
 95	/* Everything marked ALLOC (this includes the exported
 96           symbols) */
 97	for (i = 1; i < hdr->e_shnum; i++) {
 98		/* If it's called *.init*, and we're not init, we're
 99                   not interested */
100		if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
101		    != is_init)
102			continue;
103
104		/* We don't want to look at debug sections. */
105		if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
106			continue;
107
108		if (sechdrs[i].sh_type == SHT_RELA) {
109			pr_debug("Found relocations in section %u\n", i);
110			pr_debug("Ptr: %p.  Number: %u\n",
111			       (void *)hdr + sechdrs[i].sh_offset,
112			       sechdrs[i].sh_size / sizeof(Elf32_Rela));
113
114			/* Sort the relocation information based on a symbol and
115			 * addend key. This is a stable O(n*log n) complexity
116			 * alogrithm but it will reduce the complexity of
117			 * count_relocs() to linear complexity O(n)
118			 */
119			sort((void *)hdr + sechdrs[i].sh_offset,
120			     sechdrs[i].sh_size / sizeof(Elf32_Rela),
121			     sizeof(Elf32_Rela), relacmp, relaswap);
122
123			ret += count_relocs((void *)hdr
124					     + sechdrs[i].sh_offset,
125					     sechdrs[i].sh_size
126					     / sizeof(Elf32_Rela))
127				* sizeof(struct ppc_plt_entry);
128		}
129	}
130
131	return ret;
132}
133
134int module_frob_arch_sections(Elf32_Ehdr *hdr,
135			      Elf32_Shdr *sechdrs,
136			      char *secstrings,
137			      struct module *me)
138{
139	unsigned int i;
140
141	/* Find .plt and .init.plt sections */
142	for (i = 0; i < hdr->e_shnum; i++) {
143		if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
144			me->arch.init_plt_section = i;
145		else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
146			me->arch.core_plt_section = i;
147	}
148	if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
149		pr_err("Module doesn't contain .plt or .init.plt sections.\n");
150		return -ENOEXEC;
151	}
152
153	/* Override their sizes */
154	sechdrs[me->arch.core_plt_section].sh_size
155		= get_plt_size(hdr, sechdrs, secstrings, 0);
156	sechdrs[me->arch.init_plt_section].sh_size
157		= get_plt_size(hdr, sechdrs, secstrings, 1);
158	return 0;
159}
160
161static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
162{
163	if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
164		return 0;
165	if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
166			       PPC_LO(val)))
167		return 0;
168	return 1;
169}
170
171/* Set up a trampoline in the PLT to bounce us to the distant function */
172static uint32_t do_plt_call(void *location,
173			    Elf32_Addr val,
174			    const Elf32_Shdr *sechdrs,
175			    struct module *mod)
176{
177	struct ppc_plt_entry *entry;
178
179	pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
180	/* Init, or core PLT? */
181	if (location >= mod->core_layout.base
182	    && location < mod->core_layout.base + mod->core_layout.size)
183		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
184	else
185		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
186
187	/* Find this entry, or if that fails, the next avail. entry */
188	while (entry->jump[0]) {
189		if (entry_matches(entry, val)) return (uint32_t)entry;
190		entry++;
191	}
192
193	/*
194	 * lis r12, sym@ha
195	 * addi r12, r12, sym@l
196	 * mtctr r12
197	 * bctr
198	 */
199	entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
200	entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
201	entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
202	entry->jump[3] = PPC_INST_BCTR;
203
204	pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
205	return (uint32_t)entry;
206}
207
 
 
 
 
 
 
208int apply_relocate_add(Elf32_Shdr *sechdrs,
209		       const char *strtab,
210		       unsigned int symindex,
211		       unsigned int relsec,
212		       struct module *module)
213{
214	unsigned int i;
215	Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
216	Elf32_Sym *sym;
217	uint32_t *location;
218	uint32_t value;
219
220	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
221	       sechdrs[relsec].sh_info);
222	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
223		/* This is where to make the change */
224		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
225			+ rela[i].r_offset;
226		/* This is the symbol it is referring to.  Note that all
227		   undefined symbols have been resolved.  */
228		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
229			+ ELF32_R_SYM(rela[i].r_info);
230		/* `Everything is relative'. */
231		value = sym->st_value + rela[i].r_addend;
232
233		switch (ELF32_R_TYPE(rela[i].r_info)) {
234		case R_PPC_ADDR32:
235			/* Simply set it */
236			*(uint32_t *)location = value;
237			break;
238
239		case R_PPC_ADDR16_LO:
240			/* Low half of the symbol */
241			*(uint16_t *)location = value;
 
242			break;
243
244		case R_PPC_ADDR16_HI:
245			/* Higher half of the symbol */
246			*(uint16_t *)location = (value >> 16);
 
247			break;
248
249		case R_PPC_ADDR16_HA:
250			/* Sign-adjusted lower 16 bits: PPC ELF ABI says:
251			   (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
252			   This is the same, only sane.
253			 */
254			*(uint16_t *)location = (value + 0x8000) >> 16;
255			break;
256
257		case R_PPC_REL24:
258			if ((int)(value - (uint32_t)location) < -0x02000000
259			    || (int)(value - (uint32_t)location) >= 0x02000000)
260				value = do_plt_call(location, value,
261						    sechdrs, module);
 
 
 
262
263			/* Only replace bits 2 through 26 */
264			pr_debug("REL24 value = %08X. location = %08X\n",
265			       value, (uint32_t)location);
266			pr_debug("Location before: %08X.\n",
267			       *(uint32_t *)location);
268			*(uint32_t *)location
269				= (*(uint32_t *)location & ~0x03fffffc)
270				| ((value - (uint32_t)location)
271				   & 0x03fffffc);
 
 
272			pr_debug("Location after: %08X.\n",
273			       *(uint32_t *)location);
274			pr_debug("ie. jump to %08X+%08X = %08X\n",
275			       *(uint32_t *)location & 0x03fffffc,
276			       (uint32_t)location,
277			       (*(uint32_t *)location & 0x03fffffc)
278			       + (uint32_t)location);
279			break;
280
281		case R_PPC_REL32:
282			/* 32-bit relative jump. */
283			*(uint32_t *)location = value - (uint32_t)location;
284			break;
285
286		default:
287			pr_err("%s: unknown ADD relocation: %u\n",
288			       module->name,
289			       ELF32_R_TYPE(rela[i].r_info));
290			return -ENOEXEC;
291		}
292	}
293
294	return 0;
295}
296
297#ifdef CONFIG_DYNAMIC_FTRACE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
299{
300	module->arch.tramp = do_plt_call(module->core_layout.base,
301					 (unsigned long)ftrace_caller,
302					 sechdrs, module);
303	if (!module->arch.tramp)
304		return -ENOENT;
 
 
 
 
 
 
 
 
305
306	return 0;
307}
308#endif