Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#define pr_fmt(fmt) "callthunks: " fmt
  4
  5#include <linux/debugfs.h>
  6#include <linux/kallsyms.h>
  7#include <linux/memory.h>
  8#include <linux/moduleloader.h>
  9#include <linux/static_call.h>
 10
 11#include <asm/alternative.h>
 12#include <asm/asm-offsets.h>
 13#include <asm/cpu.h>
 14#include <asm/ftrace.h>
 15#include <asm/insn.h>
 16#include <asm/kexec.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/paravirt.h>
 19#include <asm/sections.h>
 20#include <asm/switch_to.h>
 21#include <asm/sync_core.h>
 22#include <asm/text-patching.h>
 23#include <asm/xen/hypercall.h>
 24
 25static int __initdata_or_module debug_callthunks;
 26
 27#define MAX_PATCH_LEN (255-1)
 28
 29#define prdbg(fmt, args...)					\
 30do {								\
 31	if (debug_callthunks)					\
 32		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
 33} while(0)
 34
 35static int __init debug_thunks(char *str)
 36{
 37	debug_callthunks = 1;
 38	return 1;
 39}
 40__setup("debug-callthunks", debug_thunks);
 41
 42#ifdef CONFIG_CALL_THUNKS_DEBUG
 43DEFINE_PER_CPU(u64, __x86_call_count);
 44DEFINE_PER_CPU(u64, __x86_ret_count);
 45DEFINE_PER_CPU(u64, __x86_stuffs_count);
 46DEFINE_PER_CPU(u64, __x86_ctxsw_count);
 47EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
 48EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
 49#endif
 50
 51extern s32 __call_sites[], __call_sites_end[];
 52
 53struct core_text {
 54	unsigned long	base;
 55	unsigned long	end;
 56	const char	*name;
 57};
 58
 59static bool thunks_initialized __ro_after_init;
 60
 61static const struct core_text builtin_coretext = {
 62	.base = (unsigned long)_text,
 63	.end  = (unsigned long)_etext,
 64	.name = "builtin",
 65};
 66
 67asm (
 68	".pushsection .rodata				\n"
 69	".global skl_call_thunk_template		\n"
 70	"skl_call_thunk_template:			\n"
 71		__stringify(INCREMENT_CALL_DEPTH)"	\n"
 72	".global skl_call_thunk_tail			\n"
 73	"skl_call_thunk_tail:				\n"
 74	".popsection					\n"
 75);
 76
 77extern u8 skl_call_thunk_template[];
 78extern u8 skl_call_thunk_tail[];
 79
 80#define SKL_TMPL_SIZE \
 81	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
 82
 83extern void error_entry(void);
 84extern void xen_error_entry(void);
 85extern void paranoid_entry(void);
 86
 87static inline bool within_coretext(const struct core_text *ct, void *addr)
 88{
 89	unsigned long p = (unsigned long)addr;
 90
 91	return ct->base <= p && p < ct->end;
 92}
 93
 94static inline bool within_module_coretext(void *addr)
 95{
 96	bool ret = false;
 97
 98#ifdef CONFIG_MODULES
 99	struct module *mod;
100
101	preempt_disable();
102	mod = __module_address((unsigned long)addr);
103	if (mod && within_module_core((unsigned long)addr, mod))
104		ret = true;
105	preempt_enable();
106#endif
107	return ret;
108}
109
110static bool is_coretext(const struct core_text *ct, void *addr)
111{
112	if (ct && within_coretext(ct, addr))
113		return true;
114	if (within_coretext(&builtin_coretext, addr))
115		return true;
116	return within_module_coretext(addr);
117}
118
119static bool skip_addr(void *dest)
120{
121	if (dest == error_entry)
122		return true;
123	if (dest == paranoid_entry)
124		return true;
125	if (dest == xen_error_entry)
126		return true;
127	/* Does FILL_RSB... */
128	if (dest == __switch_to_asm)
129		return true;
130	/* Accounts directly */
131	if (dest == ret_from_fork)
132		return true;
133#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
134	if (dest == soft_restart_cpu)
135		return true;
136#endif
137#ifdef CONFIG_FUNCTION_TRACER
138	if (dest == __fentry__)
139		return true;
140#endif
141#ifdef CONFIG_KEXEC_CORE
142	if (dest >= (void *)relocate_kernel &&
143	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
144		return true;
145#endif
 
 
 
 
 
146	return false;
147}
148
149static __init_or_module void *call_get_dest(void *addr)
150{
151	struct insn insn;
152	void *dest;
153	int ret;
154
155	ret = insn_decode_kernel(&insn, addr);
156	if (ret)
157		return ERR_PTR(ret);
158
159	/* Patched out call? */
160	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
161		return NULL;
162
163	dest = addr + insn.length + insn.immediate.value;
164	if (skip_addr(dest))
165		return NULL;
166	return dest;
167}
168
169static const u8 nops[] = {
170	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
171	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
172	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
173	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174};
175
176static void *patch_dest(void *dest, bool direct)
177{
178	unsigned int tsize = SKL_TMPL_SIZE;
179	u8 insn_buff[MAX_PATCH_LEN];
180	u8 *pad = dest - tsize;
181
182	memcpy(insn_buff, skl_call_thunk_template, tsize);
183	apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
 
184
185	/* Already patched? */
186	if (!bcmp(pad, insn_buff, tsize))
187		return pad;
188
189	/* Ensure there are nops */
190	if (bcmp(pad, nops, tsize)) {
191		pr_warn_once("Invalid padding area for %pS\n", dest);
192		return NULL;
193	}
194
195	if (direct)
196		memcpy(pad, insn_buff, tsize);
197	else
198		text_poke_copy_locked(pad, insn_buff, tsize, true);
199	return pad;
200}
201
202static __init_or_module void patch_call(void *addr, const struct core_text *ct)
203{
204	void *pad, *dest;
205	u8 bytes[8];
206
207	if (!within_coretext(ct, addr))
208		return;
209
210	dest = call_get_dest(addr);
211	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
212		return;
213
214	if (!is_coretext(ct, dest))
215		return;
216
217	pad = patch_dest(dest, within_coretext(ct, dest));
218	if (!pad)
219		return;
220
221	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
222		dest, dest, pad);
223	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
224	text_poke_early(addr, bytes, CALL_INSN_SIZE);
225}
226
227static __init_or_module void
228patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
229{
230	s32 *s;
231
232	for (s = start; s < end; s++)
233		patch_call((void *)s + *s, ct);
234}
235
236static __init_or_module void
237patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
238		     const struct core_text *ct)
239{
240	struct alt_instr *a;
241
242	for (a = start; a < end; a++)
243		patch_call((void *)&a->instr_offset + a->instr_offset, ct);
244}
245
246static __init_or_module void
247callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
248{
249	prdbg("Patching call sites %s\n", ct->name);
250	patch_call_sites(cs->call_start, cs->call_end, ct);
251	patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
252	prdbg("Patching call sites done%s\n", ct->name);
253}
254
255void __init callthunks_patch_builtin_calls(void)
256{
257	struct callthunk_sites cs = {
258		.call_start	= __call_sites,
259		.call_end	= __call_sites_end,
260		.alt_start	= __alt_instructions,
261		.alt_end	= __alt_instructions_end
262	};
263
264	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
265		return;
266
267	pr_info("Setting up call depth tracking\n");
268	mutex_lock(&text_mutex);
269	callthunks_setup(&cs, &builtin_coretext);
270	thunks_initialized = true;
271	mutex_unlock(&text_mutex);
272}
273
274void *callthunks_translate_call_dest(void *dest)
275{
276	void *target;
277
278	lockdep_assert_held(&text_mutex);
279
280	if (!thunks_initialized || skip_addr(dest))
281		return dest;
282
283	if (!is_coretext(NULL, dest))
284		return dest;
285
286	target = patch_dest(dest, false);
287	return target ? : dest;
288}
289
290#ifdef CONFIG_BPF_JIT
291static bool is_callthunk(void *addr)
292{
293	unsigned int tmpl_size = SKL_TMPL_SIZE;
294	u8 insn_buff[MAX_PATCH_LEN];
295	unsigned long dest;
296	u8 *pad;
297
298	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
299	if (!thunks_initialized || skip_addr((void *)dest))
300		return false;
301
302	pad = (void *)(dest - tmpl_size);
303
304	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
305	apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
 
306
307	return !bcmp(pad, insn_buff, tmpl_size);
308}
309
310int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
311{
312	unsigned int tmpl_size = SKL_TMPL_SIZE;
313	u8 insn_buff[MAX_PATCH_LEN];
314
315	if (!thunks_initialized)
316		return 0;
317
318	/* Is function call target a thunk? */
319	if (func && is_callthunk(func))
320		return 0;
321
322	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
323	apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
 
324
325	memcpy(*pprog, insn_buff, tmpl_size);
326	*pprog += tmpl_size;
327	return tmpl_size;
328}
329#endif
330
331#ifdef CONFIG_MODULES
332void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
333					    struct module *mod)
334{
335	struct core_text ct = {
336		.base = (unsigned long)mod->mem[MOD_TEXT].base,
337		.end  = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
338		.name = mod->name,
339	};
340
341	if (!thunks_initialized)
342		return;
343
344	mutex_lock(&text_mutex);
345	callthunks_setup(cs, &ct);
346	mutex_unlock(&text_mutex);
347}
348#endif /* CONFIG_MODULES */
349
350#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
351static int callthunks_debug_show(struct seq_file *m, void *p)
352{
353	unsigned long cpu = (unsigned long)m->private;
354
355	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
356		   per_cpu(__x86_call_count, cpu),
357		   per_cpu(__x86_ret_count, cpu),
358		   per_cpu(__x86_stuffs_count, cpu),
359		   per_cpu(__x86_ctxsw_count, cpu));
360	return 0;
361}
362
363static int callthunks_debug_open(struct inode *inode, struct file *file)
364{
365	return single_open(file, callthunks_debug_show, inode->i_private);
366}
367
368static const struct file_operations dfs_ops = {
369	.open		= callthunks_debug_open,
370	.read		= seq_read,
371	.llseek		= seq_lseek,
372	.release	= single_release,
373};
374
375static int __init callthunks_debugfs_init(void)
376{
377	struct dentry *dir;
378	unsigned long cpu;
379
380	dir = debugfs_create_dir("callthunks", NULL);
381	for_each_possible_cpu(cpu) {
382		void *arg = (void *)cpu;
383		char name [10];
384
385		sprintf(name, "cpu%lu", cpu);
386		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
387	}
388	return 0;
389}
390__initcall(callthunks_debugfs_init);
391#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#define pr_fmt(fmt) "callthunks: " fmt
  4
  5#include <linux/debugfs.h>
  6#include <linux/kallsyms.h>
  7#include <linux/memory.h>
  8#include <linux/moduleloader.h>
  9#include <linux/static_call.h>
 10
 11#include <asm/alternative.h>
 12#include <asm/asm-offsets.h>
 13#include <asm/cpu.h>
 14#include <asm/ftrace.h>
 15#include <asm/insn.h>
 16#include <asm/kexec.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/paravirt.h>
 19#include <asm/sections.h>
 20#include <asm/switch_to.h>
 21#include <asm/sync_core.h>
 22#include <asm/text-patching.h>
 23#include <asm/xen/hypercall.h>
 24
 25static int __initdata_or_module debug_callthunks;
 26
 27#define MAX_PATCH_LEN (255-1)
 28
 29#define prdbg(fmt, args...)					\
 30do {								\
 31	if (debug_callthunks)					\
 32		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
 33} while(0)
 34
 35static int __init debug_thunks(char *str)
 36{
 37	debug_callthunks = 1;
 38	return 1;
 39}
 40__setup("debug-callthunks", debug_thunks);
 41
 42#ifdef CONFIG_CALL_THUNKS_DEBUG
 43DEFINE_PER_CPU(u64, __x86_call_count);
 44DEFINE_PER_CPU(u64, __x86_ret_count);
 45DEFINE_PER_CPU(u64, __x86_stuffs_count);
 46DEFINE_PER_CPU(u64, __x86_ctxsw_count);
 47EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
 48EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
 49#endif
 50
 51extern s32 __call_sites[], __call_sites_end[];
 52
 53struct core_text {
 54	unsigned long	base;
 55	unsigned long	end;
 56	const char	*name;
 57};
 58
 59static bool thunks_initialized __ro_after_init;
 60
 61static const struct core_text builtin_coretext = {
 62	.base = (unsigned long)_text,
 63	.end  = (unsigned long)_etext,
 64	.name = "builtin",
 65};
 66
 67asm (
 68	".pushsection .rodata				\n"
 69	".global skl_call_thunk_template		\n"
 70	"skl_call_thunk_template:			\n"
 71		__stringify(INCREMENT_CALL_DEPTH)"	\n"
 72	".global skl_call_thunk_tail			\n"
 73	"skl_call_thunk_tail:				\n"
 74	".popsection					\n"
 75);
 76
 77extern u8 skl_call_thunk_template[];
 78extern u8 skl_call_thunk_tail[];
 79
 80#define SKL_TMPL_SIZE \
 81	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
 82
 83extern void error_entry(void);
 84extern void xen_error_entry(void);
 85extern void paranoid_entry(void);
 86
 87static inline bool within_coretext(const struct core_text *ct, void *addr)
 88{
 89	unsigned long p = (unsigned long)addr;
 90
 91	return ct->base <= p && p < ct->end;
 92}
 93
 94static inline bool within_module_coretext(void *addr)
 95{
 96	bool ret = false;
 97
 98#ifdef CONFIG_MODULES
 99	struct module *mod;
100
101	preempt_disable();
102	mod = __module_address((unsigned long)addr);
103	if (mod && within_module_core((unsigned long)addr, mod))
104		ret = true;
105	preempt_enable();
106#endif
107	return ret;
108}
109
110static bool is_coretext(const struct core_text *ct, void *addr)
111{
112	if (ct && within_coretext(ct, addr))
113		return true;
114	if (within_coretext(&builtin_coretext, addr))
115		return true;
116	return within_module_coretext(addr);
117}
118
119static bool skip_addr(void *dest)
120{
121	if (dest == error_entry)
122		return true;
123	if (dest == paranoid_entry)
124		return true;
125	if (dest == xen_error_entry)
126		return true;
127	/* Does FILL_RSB... */
128	if (dest == __switch_to_asm)
129		return true;
130	/* Accounts directly */
131	if (dest == ret_from_fork)
132		return true;
133#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
134	if (dest == soft_restart_cpu)
135		return true;
136#endif
137#ifdef CONFIG_FUNCTION_TRACER
138	if (dest == __fentry__)
139		return true;
140#endif
141#ifdef CONFIG_KEXEC_CORE
142	if (dest >= (void *)relocate_kernel &&
143	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
144		return true;
145#endif
146#ifdef CONFIG_XEN
147	if (dest >= (void *)hypercall_page &&
148	    dest < (void*)hypercall_page + PAGE_SIZE)
149		return true;
150#endif
151	return false;
152}
153
154static __init_or_module void *call_get_dest(void *addr)
155{
156	struct insn insn;
157	void *dest;
158	int ret;
159
160	ret = insn_decode_kernel(&insn, addr);
161	if (ret)
162		return ERR_PTR(ret);
163
164	/* Patched out call? */
165	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
166		return NULL;
167
168	dest = addr + insn.length + insn.immediate.value;
169	if (skip_addr(dest))
170		return NULL;
171	return dest;
172}
173
174static const u8 nops[] = {
175	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
178	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
179};
180
181static void *patch_dest(void *dest, bool direct)
182{
183	unsigned int tsize = SKL_TMPL_SIZE;
184	u8 insn_buff[MAX_PATCH_LEN];
185	u8 *pad = dest - tsize;
186
187	memcpy(insn_buff, skl_call_thunk_template, tsize);
188	apply_relocation(insn_buff, tsize, pad,
189			 skl_call_thunk_template, tsize);
190
191	/* Already patched? */
192	if (!bcmp(pad, insn_buff, tsize))
193		return pad;
194
195	/* Ensure there are nops */
196	if (bcmp(pad, nops, tsize)) {
197		pr_warn_once("Invalid padding area for %pS\n", dest);
198		return NULL;
199	}
200
201	if (direct)
202		memcpy(pad, insn_buff, tsize);
203	else
204		text_poke_copy_locked(pad, insn_buff, tsize, true);
205	return pad;
206}
207
208static __init_or_module void patch_call(void *addr, const struct core_text *ct)
209{
210	void *pad, *dest;
211	u8 bytes[8];
212
213	if (!within_coretext(ct, addr))
214		return;
215
216	dest = call_get_dest(addr);
217	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
218		return;
219
220	if (!is_coretext(ct, dest))
221		return;
222
223	pad = patch_dest(dest, within_coretext(ct, dest));
224	if (!pad)
225		return;
226
227	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
228		dest, dest, pad);
229	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
230	text_poke_early(addr, bytes, CALL_INSN_SIZE);
231}
232
233static __init_or_module void
234patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
235{
236	s32 *s;
237
238	for (s = start; s < end; s++)
239		patch_call((void *)s + *s, ct);
240}
241
242static __init_or_module void
243patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
244		     const struct core_text *ct)
245{
246	struct alt_instr *a;
247
248	for (a = start; a < end; a++)
249		patch_call((void *)&a->instr_offset + a->instr_offset, ct);
250}
251
252static __init_or_module void
253callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
254{
255	prdbg("Patching call sites %s\n", ct->name);
256	patch_call_sites(cs->call_start, cs->call_end, ct);
257	patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
258	prdbg("Patching call sites done%s\n", ct->name);
259}
260
261void __init callthunks_patch_builtin_calls(void)
262{
263	struct callthunk_sites cs = {
264		.call_start	= __call_sites,
265		.call_end	= __call_sites_end,
266		.alt_start	= __alt_instructions,
267		.alt_end	= __alt_instructions_end
268	};
269
270	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
271		return;
272
273	pr_info("Setting up call depth tracking\n");
274	mutex_lock(&text_mutex);
275	callthunks_setup(&cs, &builtin_coretext);
276	thunks_initialized = true;
277	mutex_unlock(&text_mutex);
278}
279
280void *callthunks_translate_call_dest(void *dest)
281{
282	void *target;
283
284	lockdep_assert_held(&text_mutex);
285
286	if (!thunks_initialized || skip_addr(dest))
287		return dest;
288
289	if (!is_coretext(NULL, dest))
290		return dest;
291
292	target = patch_dest(dest, false);
293	return target ? : dest;
294}
295
296#ifdef CONFIG_BPF_JIT
297static bool is_callthunk(void *addr)
298{
299	unsigned int tmpl_size = SKL_TMPL_SIZE;
300	u8 insn_buff[MAX_PATCH_LEN];
301	unsigned long dest;
302	u8 *pad;
303
304	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
305	if (!thunks_initialized || skip_addr((void *)dest))
306		return false;
307
308	pad = (void *)(dest - tmpl_size);
309
310	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
311	apply_relocation(insn_buff, tmpl_size, pad,
312			 skl_call_thunk_template, tmpl_size);
313
314	return !bcmp(pad, insn_buff, tmpl_size);
315}
316
317int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
318{
319	unsigned int tmpl_size = SKL_TMPL_SIZE;
320	u8 insn_buff[MAX_PATCH_LEN];
321
322	if (!thunks_initialized)
323		return 0;
324
325	/* Is function call target a thunk? */
326	if (func && is_callthunk(func))
327		return 0;
328
329	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
330	apply_relocation(insn_buff, tmpl_size, ip,
331			 skl_call_thunk_template, tmpl_size);
332
333	memcpy(*pprog, insn_buff, tmpl_size);
334	*pprog += tmpl_size;
335	return tmpl_size;
336}
337#endif
338
339#ifdef CONFIG_MODULES
340void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
341					    struct module *mod)
342{
343	struct core_text ct = {
344		.base = (unsigned long)mod->mem[MOD_TEXT].base,
345		.end  = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
346		.name = mod->name,
347	};
348
349	if (!thunks_initialized)
350		return;
351
352	mutex_lock(&text_mutex);
353	callthunks_setup(cs, &ct);
354	mutex_unlock(&text_mutex);
355}
356#endif /* CONFIG_MODULES */
357
358#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
359static int callthunks_debug_show(struct seq_file *m, void *p)
360{
361	unsigned long cpu = (unsigned long)m->private;
362
363	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
364		   per_cpu(__x86_call_count, cpu),
365		   per_cpu(__x86_ret_count, cpu),
366		   per_cpu(__x86_stuffs_count, cpu),
367		   per_cpu(__x86_ctxsw_count, cpu));
368	return 0;
369}
370
371static int callthunks_debug_open(struct inode *inode, struct file *file)
372{
373	return single_open(file, callthunks_debug_show, inode->i_private);
374}
375
376static const struct file_operations dfs_ops = {
377	.open		= callthunks_debug_open,
378	.read		= seq_read,
379	.llseek		= seq_lseek,
380	.release	= single_release,
381};
382
383static int __init callthunks_debugfs_init(void)
384{
385	struct dentry *dir;
386	unsigned long cpu;
387
388	dir = debugfs_create_dir("callthunks", NULL);
389	for_each_possible_cpu(cpu) {
390		void *arg = (void *)cpu;
391		char name [10];
392
393		sprintf(name, "cpu%lu", cpu);
394		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
395	}
396	return 0;
397}
398__initcall(callthunks_debugfs_init);
399#endif