Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#define pr_fmt(fmt) "callthunks: " fmt
  4
  5#include <linux/debugfs.h>
  6#include <linux/kallsyms.h>
  7#include <linux/memory.h>
  8#include <linux/moduleloader.h>
  9#include <linux/static_call.h>
 10
 11#include <asm/alternative.h>
 12#include <asm/asm-offsets.h>
 13#include <asm/cpu.h>
 14#include <asm/ftrace.h>
 15#include <asm/insn.h>
 16#include <asm/kexec.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/paravirt.h>
 19#include <asm/sections.h>
 20#include <asm/switch_to.h>
 21#include <asm/sync_core.h>
 22#include <asm/text-patching.h>
 23#include <asm/xen/hypercall.h>
 24
 25static int __initdata_or_module debug_callthunks;
 26
 27#define prdbg(fmt, args...)					\
 28do {								\
 29	if (debug_callthunks)					\
 30		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
 31} while(0)
 32
 33static int __init debug_thunks(char *str)
 34{
 35	debug_callthunks = 1;
 36	return 1;
 37}
 38__setup("debug-callthunks", debug_thunks);
 39
 40#ifdef CONFIG_CALL_THUNKS_DEBUG
 41DEFINE_PER_CPU(u64, __x86_call_count);
 42DEFINE_PER_CPU(u64, __x86_ret_count);
 43DEFINE_PER_CPU(u64, __x86_stuffs_count);
 44DEFINE_PER_CPU(u64, __x86_ctxsw_count);
 45EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
 46EXPORT_SYMBOL_GPL(__x86_call_count);
 47#endif
 48
 49extern s32 __call_sites[], __call_sites_end[];
 50
 51struct thunk_desc {
 52	void		*template;
 53	unsigned int	template_size;
 54};
 55
 56struct core_text {
 57	unsigned long	base;
 58	unsigned long	end;
 59	const char	*name;
 60};
 61
 62static bool thunks_initialized __ro_after_init;
 63
 64static const struct core_text builtin_coretext = {
 65	.base = (unsigned long)_text,
 66	.end  = (unsigned long)_etext,
 67	.name = "builtin",
 68};
 69
 70asm (
 71	".pushsection .rodata				\n"
 72	".global skl_call_thunk_template		\n"
 73	"skl_call_thunk_template:			\n"
 74		__stringify(INCREMENT_CALL_DEPTH)"	\n"
 75	".global skl_call_thunk_tail			\n"
 76	"skl_call_thunk_tail:				\n"
 77	".popsection					\n"
 78);
 79
 80extern u8 skl_call_thunk_template[];
 81extern u8 skl_call_thunk_tail[];
 82
 83#define SKL_TMPL_SIZE \
 84	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
 85
 86extern void error_entry(void);
 87extern void xen_error_entry(void);
 88extern void paranoid_entry(void);
 89
 90static inline bool within_coretext(const struct core_text *ct, void *addr)
 91{
 92	unsigned long p = (unsigned long)addr;
 93
 94	return ct->base <= p && p < ct->end;
 95}
 96
 97static inline bool within_module_coretext(void *addr)
 98{
 99	bool ret = false;
100
101#ifdef CONFIG_MODULES
102	struct module *mod;
103
104	preempt_disable();
105	mod = __module_address((unsigned long)addr);
106	if (mod && within_module_core((unsigned long)addr, mod))
107		ret = true;
108	preempt_enable();
109#endif
110	return ret;
111}
112
113static bool is_coretext(const struct core_text *ct, void *addr)
114{
115	if (ct && within_coretext(ct, addr))
116		return true;
117	if (within_coretext(&builtin_coretext, addr))
118		return true;
119	return within_module_coretext(addr);
120}
121
122static bool skip_addr(void *dest)
123{
124	if (dest == error_entry)
125		return true;
126	if (dest == paranoid_entry)
127		return true;
128	if (dest == xen_error_entry)
129		return true;
130	/* Does FILL_RSB... */
131	if (dest == __switch_to_asm)
132		return true;
133	/* Accounts directly */
134	if (dest == ret_from_fork)
135		return true;
136#ifdef CONFIG_HOTPLUG_CPU
137	if (dest == start_cpu0)
138		return true;
139#endif
140#ifdef CONFIG_FUNCTION_TRACER
141	if (dest == __fentry__)
142		return true;
143#endif
144#ifdef CONFIG_KEXEC_CORE
145	if (dest >= (void *)relocate_kernel &&
146	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
147		return true;
148#endif
149#ifdef CONFIG_XEN
150	if (dest >= (void *)hypercall_page &&
151	    dest < (void*)hypercall_page + PAGE_SIZE)
152		return true;
153#endif
154	return false;
155}
156
157static __init_or_module void *call_get_dest(void *addr)
158{
159	struct insn insn;
160	void *dest;
161	int ret;
162
163	ret = insn_decode_kernel(&insn, addr);
164	if (ret)
165		return ERR_PTR(ret);
166
167	/* Patched out call? */
168	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
169		return NULL;
170
171	dest = addr + insn.length + insn.immediate.value;
172	if (skip_addr(dest))
173		return NULL;
174	return dest;
175}
176
177static const u8 nops[] = {
178	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
179	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
180	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
181	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
182};
183
184static void *patch_dest(void *dest, bool direct)
185{
186	unsigned int tsize = SKL_TMPL_SIZE;
187	u8 *pad = dest - tsize;
188
189	/* Already patched? */
190	if (!bcmp(pad, skl_call_thunk_template, tsize))
191		return pad;
192
193	/* Ensure there are nops */
194	if (bcmp(pad, nops, tsize)) {
195		pr_warn_once("Invalid padding area for %pS\n", dest);
196		return NULL;
197	}
198
199	if (direct)
200		memcpy(pad, skl_call_thunk_template, tsize);
201	else
202		text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
203	return pad;
204}
205
206static __init_or_module void patch_call(void *addr, const struct core_text *ct)
207{
208	void *pad, *dest;
209	u8 bytes[8];
210
211	if (!within_coretext(ct, addr))
212		return;
213
214	dest = call_get_dest(addr);
215	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
216		return;
217
218	if (!is_coretext(ct, dest))
219		return;
220
221	pad = patch_dest(dest, within_coretext(ct, dest));
222	if (!pad)
223		return;
224
225	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
226		dest, dest, pad);
227	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
228	text_poke_early(addr, bytes, CALL_INSN_SIZE);
229}
230
231static __init_or_module void
232patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
233{
234	s32 *s;
235
236	for (s = start; s < end; s++)
237		patch_call((void *)s + *s, ct);
238}
239
240static __init_or_module void
241patch_paravirt_call_sites(struct paravirt_patch_site *start,
242			  struct paravirt_patch_site *end,
243			  const struct core_text *ct)
244{
245	struct paravirt_patch_site *p;
246
247	for (p = start; p < end; p++)
248		patch_call(p->instr, ct);
249}
250
251static __init_or_module void
252callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
253{
254	prdbg("Patching call sites %s\n", ct->name);
255	patch_call_sites(cs->call_start, cs->call_end, ct);
256	patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
257	prdbg("Patching call sites done%s\n", ct->name);
258}
259
260void __init callthunks_patch_builtin_calls(void)
261{
262	struct callthunk_sites cs = {
263		.call_start	= __call_sites,
264		.call_end	= __call_sites_end,
265		.pv_start	= __parainstructions,
266		.pv_end		= __parainstructions_end
267	};
268
269	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
270		return;
271
272	pr_info("Setting up call depth tracking\n");
273	mutex_lock(&text_mutex);
274	callthunks_setup(&cs, &builtin_coretext);
275	static_call_force_reinit();
276	thunks_initialized = true;
277	mutex_unlock(&text_mutex);
278}
279
280void *callthunks_translate_call_dest(void *dest)
281{
282	void *target;
283
284	lockdep_assert_held(&text_mutex);
285
286	if (!thunks_initialized || skip_addr(dest))
287		return dest;
288
289	if (!is_coretext(NULL, dest))
290		return dest;
291
292	target = patch_dest(dest, false);
293	return target ? : dest;
294}
295
296bool is_callthunk(void *addr)
 
297{
298	unsigned int tmpl_size = SKL_TMPL_SIZE;
299	void *tmpl = skl_call_thunk_template;
300	unsigned long dest;
301
302	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
303	if (!thunks_initialized || skip_addr((void *)dest))
304		return false;
305
306	return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
307}
308
309#ifdef CONFIG_BPF_JIT
310int x86_call_depth_emit_accounting(u8 **pprog, void *func)
311{
312	unsigned int tmpl_size = SKL_TMPL_SIZE;
313	void *tmpl = skl_call_thunk_template;
314
315	if (!thunks_initialized)
316		return 0;
317
318	/* Is function call target a thunk? */
319	if (func && is_callthunk(func))
320		return 0;
321
322	memcpy(*pprog, tmpl, tmpl_size);
323	*pprog += tmpl_size;
324	return tmpl_size;
325}
326#endif
327
328#ifdef CONFIG_MODULES
329void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
330					    struct module *mod)
331{
332	struct core_text ct = {
333		.base = (unsigned long)mod->core_layout.base,
334		.end  = (unsigned long)mod->core_layout.base + mod->core_layout.size,
335		.name = mod->name,
336	};
337
338	if (!thunks_initialized)
339		return;
340
341	mutex_lock(&text_mutex);
342	callthunks_setup(cs, &ct);
343	mutex_unlock(&text_mutex);
344}
345#endif /* CONFIG_MODULES */
346
347#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
348static int callthunks_debug_show(struct seq_file *m, void *p)
349{
350	unsigned long cpu = (unsigned long)m->private;
351
352	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
353		   per_cpu(__x86_call_count, cpu),
354		   per_cpu(__x86_ret_count, cpu),
355		   per_cpu(__x86_stuffs_count, cpu),
356		   per_cpu(__x86_ctxsw_count, cpu));
357	return 0;
358}
359
360static int callthunks_debug_open(struct inode *inode, struct file *file)
361{
362	return single_open(file, callthunks_debug_show, inode->i_private);
363}
364
365static const struct file_operations dfs_ops = {
366	.open		= callthunks_debug_open,
367	.read		= seq_read,
368	.llseek		= seq_lseek,
369	.release	= single_release,
370};
371
372static int __init callthunks_debugfs_init(void)
373{
374	struct dentry *dir;
375	unsigned long cpu;
376
377	dir = debugfs_create_dir("callthunks", NULL);
378	for_each_possible_cpu(cpu) {
379		void *arg = (void *)cpu;
380		char name [10];
381
382		sprintf(name, "cpu%lu", cpu);
383		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
384	}
385	return 0;
386}
387__initcall(callthunks_debugfs_init);
388#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#define pr_fmt(fmt) "callthunks: " fmt
  4
  5#include <linux/debugfs.h>
  6#include <linux/kallsyms.h>
  7#include <linux/memory.h>
  8#include <linux/moduleloader.h>
  9#include <linux/static_call.h>
 10
 11#include <asm/alternative.h>
 12#include <asm/asm-offsets.h>
 13#include <asm/cpu.h>
 14#include <asm/ftrace.h>
 15#include <asm/insn.h>
 16#include <asm/kexec.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/paravirt.h>
 19#include <asm/sections.h>
 20#include <asm/switch_to.h>
 21#include <asm/sync_core.h>
 22#include <asm/text-patching.h>
 23#include <asm/xen/hypercall.h>
 24
 25static int __initdata_or_module debug_callthunks;
 26
 27#define prdbg(fmt, args...)					\
 28do {								\
 29	if (debug_callthunks)					\
 30		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
 31} while(0)
 32
 33static int __init debug_thunks(char *str)
 34{
 35	debug_callthunks = 1;
 36	return 1;
 37}
 38__setup("debug-callthunks", debug_thunks);
 39
 40#ifdef CONFIG_CALL_THUNKS_DEBUG
 41DEFINE_PER_CPU(u64, __x86_call_count);
 42DEFINE_PER_CPU(u64, __x86_ret_count);
 43DEFINE_PER_CPU(u64, __x86_stuffs_count);
 44DEFINE_PER_CPU(u64, __x86_ctxsw_count);
 45EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
 46EXPORT_SYMBOL_GPL(__x86_call_count);
 47#endif
 48
 49extern s32 __call_sites[], __call_sites_end[];
 50
 
 
 
 
 
 51struct core_text {
 52	unsigned long	base;
 53	unsigned long	end;
 54	const char	*name;
 55};
 56
 57static bool thunks_initialized __ro_after_init;
 58
 59static const struct core_text builtin_coretext = {
 60	.base = (unsigned long)_text,
 61	.end  = (unsigned long)_etext,
 62	.name = "builtin",
 63};
 64
 65asm (
 66	".pushsection .rodata				\n"
 67	".global skl_call_thunk_template		\n"
 68	"skl_call_thunk_template:			\n"
 69		__stringify(INCREMENT_CALL_DEPTH)"	\n"
 70	".global skl_call_thunk_tail			\n"
 71	"skl_call_thunk_tail:				\n"
 72	".popsection					\n"
 73);
 74
 75extern u8 skl_call_thunk_template[];
 76extern u8 skl_call_thunk_tail[];
 77
 78#define SKL_TMPL_SIZE \
 79	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
 80
 81extern void error_entry(void);
 82extern void xen_error_entry(void);
 83extern void paranoid_entry(void);
 84
 85static inline bool within_coretext(const struct core_text *ct, void *addr)
 86{
 87	unsigned long p = (unsigned long)addr;
 88
 89	return ct->base <= p && p < ct->end;
 90}
 91
 92static inline bool within_module_coretext(void *addr)
 93{
 94	bool ret = false;
 95
 96#ifdef CONFIG_MODULES
 97	struct module *mod;
 98
 99	preempt_disable();
100	mod = __module_address((unsigned long)addr);
101	if (mod && within_module_core((unsigned long)addr, mod))
102		ret = true;
103	preempt_enable();
104#endif
105	return ret;
106}
107
108static bool is_coretext(const struct core_text *ct, void *addr)
109{
110	if (ct && within_coretext(ct, addr))
111		return true;
112	if (within_coretext(&builtin_coretext, addr))
113		return true;
114	return within_module_coretext(addr);
115}
116
117static bool skip_addr(void *dest)
118{
119	if (dest == error_entry)
120		return true;
121	if (dest == paranoid_entry)
122		return true;
123	if (dest == xen_error_entry)
124		return true;
125	/* Does FILL_RSB... */
126	if (dest == __switch_to_asm)
127		return true;
128	/* Accounts directly */
129	if (dest == ret_from_fork)
130		return true;
131#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
132	if (dest == soft_restart_cpu)
133		return true;
134#endif
135#ifdef CONFIG_FUNCTION_TRACER
136	if (dest == __fentry__)
137		return true;
138#endif
139#ifdef CONFIG_KEXEC_CORE
140	if (dest >= (void *)relocate_kernel &&
141	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
142		return true;
143#endif
144#ifdef CONFIG_XEN
145	if (dest >= (void *)hypercall_page &&
146	    dest < (void*)hypercall_page + PAGE_SIZE)
147		return true;
148#endif
149	return false;
150}
151
152static __init_or_module void *call_get_dest(void *addr)
153{
154	struct insn insn;
155	void *dest;
156	int ret;
157
158	ret = insn_decode_kernel(&insn, addr);
159	if (ret)
160		return ERR_PTR(ret);
161
162	/* Patched out call? */
163	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
164		return NULL;
165
166	dest = addr + insn.length + insn.immediate.value;
167	if (skip_addr(dest))
168		return NULL;
169	return dest;
170}
171
172static const u8 nops[] = {
173	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
175	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177};
178
179static void *patch_dest(void *dest, bool direct)
180{
181	unsigned int tsize = SKL_TMPL_SIZE;
182	u8 *pad = dest - tsize;
183
184	/* Already patched? */
185	if (!bcmp(pad, skl_call_thunk_template, tsize))
186		return pad;
187
188	/* Ensure there are nops */
189	if (bcmp(pad, nops, tsize)) {
190		pr_warn_once("Invalid padding area for %pS\n", dest);
191		return NULL;
192	}
193
194	if (direct)
195		memcpy(pad, skl_call_thunk_template, tsize);
196	else
197		text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
198	return pad;
199}
200
201static __init_or_module void patch_call(void *addr, const struct core_text *ct)
202{
203	void *pad, *dest;
204	u8 bytes[8];
205
206	if (!within_coretext(ct, addr))
207		return;
208
209	dest = call_get_dest(addr);
210	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
211		return;
212
213	if (!is_coretext(ct, dest))
214		return;
215
216	pad = patch_dest(dest, within_coretext(ct, dest));
217	if (!pad)
218		return;
219
220	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
221		dest, dest, pad);
222	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
223	text_poke_early(addr, bytes, CALL_INSN_SIZE);
224}
225
226static __init_or_module void
227patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
228{
229	s32 *s;
230
231	for (s = start; s < end; s++)
232		patch_call((void *)s + *s, ct);
233}
234
235static __init_or_module void
236patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
237		     const struct core_text *ct)
 
238{
239	struct alt_instr *a;
240
241	for (a = start; a < end; a++)
242		patch_call((void *)&a->instr_offset + a->instr_offset, ct);
243}
244
245static __init_or_module void
246callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
247{
248	prdbg("Patching call sites %s\n", ct->name);
249	patch_call_sites(cs->call_start, cs->call_end, ct);
250	patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
251	prdbg("Patching call sites done%s\n", ct->name);
252}
253
254void __init callthunks_patch_builtin_calls(void)
255{
256	struct callthunk_sites cs = {
257		.call_start	= __call_sites,
258		.call_end	= __call_sites_end,
259		.alt_start	= __alt_instructions,
260		.alt_end	= __alt_instructions_end
261	};
262
263	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
264		return;
265
266	pr_info("Setting up call depth tracking\n");
267	mutex_lock(&text_mutex);
268	callthunks_setup(&cs, &builtin_coretext);
 
269	thunks_initialized = true;
270	mutex_unlock(&text_mutex);
271}
272
273void *callthunks_translate_call_dest(void *dest)
274{
275	void *target;
276
277	lockdep_assert_held(&text_mutex);
278
279	if (!thunks_initialized || skip_addr(dest))
280		return dest;
281
282	if (!is_coretext(NULL, dest))
283		return dest;
284
285	target = patch_dest(dest, false);
286	return target ? : dest;
287}
288
289#ifdef CONFIG_BPF_JIT
290static bool is_callthunk(void *addr)
291{
292	unsigned int tmpl_size = SKL_TMPL_SIZE;
293	void *tmpl = skl_call_thunk_template;
294	unsigned long dest;
295
296	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
297	if (!thunks_initialized || skip_addr((void *)dest))
298		return false;
299
300	return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
301}
302
 
303int x86_call_depth_emit_accounting(u8 **pprog, void *func)
304{
305	unsigned int tmpl_size = SKL_TMPL_SIZE;
306	void *tmpl = skl_call_thunk_template;
307
308	if (!thunks_initialized)
309		return 0;
310
311	/* Is function call target a thunk? */
312	if (func && is_callthunk(func))
313		return 0;
314
315	memcpy(*pprog, tmpl, tmpl_size);
316	*pprog += tmpl_size;
317	return tmpl_size;
318}
319#endif
320
321#ifdef CONFIG_MODULES
322void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
323					    struct module *mod)
324{
325	struct core_text ct = {
326		.base = (unsigned long)mod->mem[MOD_TEXT].base,
327		.end  = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
328		.name = mod->name,
329	};
330
331	if (!thunks_initialized)
332		return;
333
334	mutex_lock(&text_mutex);
335	callthunks_setup(cs, &ct);
336	mutex_unlock(&text_mutex);
337}
338#endif /* CONFIG_MODULES */
339
340#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
341static int callthunks_debug_show(struct seq_file *m, void *p)
342{
343	unsigned long cpu = (unsigned long)m->private;
344
345	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
346		   per_cpu(__x86_call_count, cpu),
347		   per_cpu(__x86_ret_count, cpu),
348		   per_cpu(__x86_stuffs_count, cpu),
349		   per_cpu(__x86_ctxsw_count, cpu));
350	return 0;
351}
352
353static int callthunks_debug_open(struct inode *inode, struct file *file)
354{
355	return single_open(file, callthunks_debug_show, inode->i_private);
356}
357
358static const struct file_operations dfs_ops = {
359	.open		= callthunks_debug_open,
360	.read		= seq_read,
361	.llseek		= seq_lseek,
362	.release	= single_release,
363};
364
365static int __init callthunks_debugfs_init(void)
366{
367	struct dentry *dir;
368	unsigned long cpu;
369
370	dir = debugfs_create_dir("callthunks", NULL);
371	for_each_possible_cpu(cpu) {
372		void *arg = (void *)cpu;
373		char name [10];
374
375		sprintf(name, "cpu%lu", cpu);
376		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
377	}
378	return 0;
379}
380__initcall(callthunks_debugfs_init);
381#endif