Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#define pr_fmt(fmt) "callthunks: " fmt
  4
  5#include <linux/debugfs.h>
  6#include <linux/kallsyms.h>
  7#include <linux/memory.h>
  8#include <linux/moduleloader.h>
  9#include <linux/static_call.h>
 10
 11#include <asm/alternative.h>
 12#include <asm/asm-offsets.h>
 13#include <asm/cpu.h>
 14#include <asm/ftrace.h>
 15#include <asm/insn.h>
 16#include <asm/kexec.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/paravirt.h>
 19#include <asm/sections.h>
 20#include <asm/switch_to.h>
 21#include <asm/sync_core.h>
 22#include <asm/text-patching.h>
 23#include <asm/xen/hypercall.h>
 24
 25static int __initdata_or_module debug_callthunks;
 26
 27#define MAX_PATCH_LEN (255-1)
 28
 29#define prdbg(fmt, args...)					\
 30do {								\
 31	if (debug_callthunks)					\
 32		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
 33} while(0)
 34
 35static int __init debug_thunks(char *str)
 36{
 37	debug_callthunks = 1;
 38	return 1;
 39}
 40__setup("debug-callthunks", debug_thunks);
 41
 42#ifdef CONFIG_CALL_THUNKS_DEBUG
 43DEFINE_PER_CPU(u64, __x86_call_count);
 44DEFINE_PER_CPU(u64, __x86_ret_count);
 45DEFINE_PER_CPU(u64, __x86_stuffs_count);
 46DEFINE_PER_CPU(u64, __x86_ctxsw_count);
 47EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
 48EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
 49#endif
 50
 51extern s32 __call_sites[], __call_sites_end[];
 52
 
 
 
 
 
 53struct core_text {
 54	unsigned long	base;
 55	unsigned long	end;
 56	const char	*name;
 57};
 58
 59static bool thunks_initialized __ro_after_init;
 60
 61static const struct core_text builtin_coretext = {
 62	.base = (unsigned long)_text,
 63	.end  = (unsigned long)_etext,
 64	.name = "builtin",
 65};
 66
 67asm (
 68	".pushsection .rodata				\n"
 69	".global skl_call_thunk_template		\n"
 70	"skl_call_thunk_template:			\n"
 71		__stringify(INCREMENT_CALL_DEPTH)"	\n"
 72	".global skl_call_thunk_tail			\n"
 73	"skl_call_thunk_tail:				\n"
 74	".popsection					\n"
 75);
 76
 77extern u8 skl_call_thunk_template[];
 78extern u8 skl_call_thunk_tail[];
 79
 80#define SKL_TMPL_SIZE \
 81	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
 82
 83extern void error_entry(void);
 84extern void xen_error_entry(void);
 85extern void paranoid_entry(void);
 86
 87static inline bool within_coretext(const struct core_text *ct, void *addr)
 88{
 89	unsigned long p = (unsigned long)addr;
 90
 91	return ct->base <= p && p < ct->end;
 92}
 93
 94static inline bool within_module_coretext(void *addr)
 95{
 96	bool ret = false;
 97
 98#ifdef CONFIG_MODULES
 99	struct module *mod;
100
101	preempt_disable();
102	mod = __module_address((unsigned long)addr);
103	if (mod && within_module_core((unsigned long)addr, mod))
104		ret = true;
105	preempt_enable();
106#endif
107	return ret;
108}
109
110static bool is_coretext(const struct core_text *ct, void *addr)
111{
112	if (ct && within_coretext(ct, addr))
113		return true;
114	if (within_coretext(&builtin_coretext, addr))
115		return true;
116	return within_module_coretext(addr);
117}
118
119static bool skip_addr(void *dest)
120{
121	if (dest == error_entry)
122		return true;
123	if (dest == paranoid_entry)
124		return true;
125	if (dest == xen_error_entry)
126		return true;
127	/* Does FILL_RSB... */
128	if (dest == __switch_to_asm)
129		return true;
130	/* Accounts directly */
131	if (dest == ret_from_fork)
132		return true;
133#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
134	if (dest == soft_restart_cpu)
135		return true;
136#endif
137#ifdef CONFIG_FUNCTION_TRACER
138	if (dest == __fentry__)
139		return true;
140#endif
141#ifdef CONFIG_KEXEC_CORE
142	if (dest >= (void *)relocate_kernel &&
143	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
144		return true;
145#endif
 
 
 
 
 
146	return false;
147}
148
149static __init_or_module void *call_get_dest(void *addr)
150{
151	struct insn insn;
152	void *dest;
153	int ret;
154
155	ret = insn_decode_kernel(&insn, addr);
156	if (ret)
157		return ERR_PTR(ret);
158
159	/* Patched out call? */
160	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
161		return NULL;
162
163	dest = addr + insn.length + insn.immediate.value;
164	if (skip_addr(dest))
165		return NULL;
166	return dest;
167}
168
169static const u8 nops[] = {
170	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
171	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
172	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
173	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174};
175
176static void *patch_dest(void *dest, bool direct)
177{
178	unsigned int tsize = SKL_TMPL_SIZE;
179	u8 insn_buff[MAX_PATCH_LEN];
180	u8 *pad = dest - tsize;
181
182	memcpy(insn_buff, skl_call_thunk_template, tsize);
183	apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
184
185	/* Already patched? */
186	if (!bcmp(pad, insn_buff, tsize))
187		return pad;
188
189	/* Ensure there are nops */
190	if (bcmp(pad, nops, tsize)) {
191		pr_warn_once("Invalid padding area for %pS\n", dest);
192		return NULL;
193	}
194
195	if (direct)
196		memcpy(pad, insn_buff, tsize);
197	else
198		text_poke_copy_locked(pad, insn_buff, tsize, true);
199	return pad;
200}
201
202static __init_or_module void patch_call(void *addr, const struct core_text *ct)
203{
204	void *pad, *dest;
205	u8 bytes[8];
206
207	if (!within_coretext(ct, addr))
208		return;
209
210	dest = call_get_dest(addr);
211	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
212		return;
213
214	if (!is_coretext(ct, dest))
215		return;
216
217	pad = patch_dest(dest, within_coretext(ct, dest));
218	if (!pad)
219		return;
220
221	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
222		dest, dest, pad);
223	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
224	text_poke_early(addr, bytes, CALL_INSN_SIZE);
225}
226
227static __init_or_module void
228patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
229{
230	s32 *s;
231
232	for (s = start; s < end; s++)
233		patch_call((void *)s + *s, ct);
234}
235
236static __init_or_module void
237patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
238		     const struct core_text *ct)
 
239{
240	struct alt_instr *a;
241
242	for (a = start; a < end; a++)
243		patch_call((void *)&a->instr_offset + a->instr_offset, ct);
244}
245
246static __init_or_module void
247callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
248{
249	prdbg("Patching call sites %s\n", ct->name);
250	patch_call_sites(cs->call_start, cs->call_end, ct);
251	patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
252	prdbg("Patching call sites done%s\n", ct->name);
253}
254
255void __init callthunks_patch_builtin_calls(void)
256{
257	struct callthunk_sites cs = {
258		.call_start	= __call_sites,
259		.call_end	= __call_sites_end,
260		.alt_start	= __alt_instructions,
261		.alt_end	= __alt_instructions_end
262	};
263
264	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
265		return;
266
267	pr_info("Setting up call depth tracking\n");
268	mutex_lock(&text_mutex);
269	callthunks_setup(&cs, &builtin_coretext);
 
270	thunks_initialized = true;
271	mutex_unlock(&text_mutex);
272}
273
274void *callthunks_translate_call_dest(void *dest)
275{
276	void *target;
277
278	lockdep_assert_held(&text_mutex);
279
280	if (!thunks_initialized || skip_addr(dest))
281		return dest;
282
283	if (!is_coretext(NULL, dest))
284		return dest;
285
286	target = patch_dest(dest, false);
287	return target ? : dest;
288}
289
290#ifdef CONFIG_BPF_JIT
291static bool is_callthunk(void *addr)
292{
293	unsigned int tmpl_size = SKL_TMPL_SIZE;
294	u8 insn_buff[MAX_PATCH_LEN];
295	unsigned long dest;
296	u8 *pad;
297
298	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
299	if (!thunks_initialized || skip_addr((void *)dest))
300		return false;
301
302	pad = (void *)(dest - tmpl_size);
303
304	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
305	apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
306
307	return !bcmp(pad, insn_buff, tmpl_size);
308}
309
310int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
 
311{
312	unsigned int tmpl_size = SKL_TMPL_SIZE;
313	u8 insn_buff[MAX_PATCH_LEN];
314
315	if (!thunks_initialized)
316		return 0;
317
318	/* Is function call target a thunk? */
319	if (func && is_callthunk(func))
320		return 0;
321
322	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
323	apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
324
325	memcpy(*pprog, insn_buff, tmpl_size);
326	*pprog += tmpl_size;
327	return tmpl_size;
328}
329#endif
330
331#ifdef CONFIG_MODULES
332void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
333					    struct module *mod)
334{
335	struct core_text ct = {
336		.base = (unsigned long)mod->mem[MOD_TEXT].base,
337		.end  = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
338		.name = mod->name,
339	};
340
341	if (!thunks_initialized)
342		return;
343
344	mutex_lock(&text_mutex);
345	callthunks_setup(cs, &ct);
346	mutex_unlock(&text_mutex);
347}
348#endif /* CONFIG_MODULES */
349
350#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
351static int callthunks_debug_show(struct seq_file *m, void *p)
352{
353	unsigned long cpu = (unsigned long)m->private;
354
355	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
356		   per_cpu(__x86_call_count, cpu),
357		   per_cpu(__x86_ret_count, cpu),
358		   per_cpu(__x86_stuffs_count, cpu),
359		   per_cpu(__x86_ctxsw_count, cpu));
360	return 0;
361}
362
363static int callthunks_debug_open(struct inode *inode, struct file *file)
364{
365	return single_open(file, callthunks_debug_show, inode->i_private);
366}
367
368static const struct file_operations dfs_ops = {
369	.open		= callthunks_debug_open,
370	.read		= seq_read,
371	.llseek		= seq_lseek,
372	.release	= single_release,
373};
374
375static int __init callthunks_debugfs_init(void)
376{
377	struct dentry *dir;
378	unsigned long cpu;
379
380	dir = debugfs_create_dir("callthunks", NULL);
381	for_each_possible_cpu(cpu) {
382		void *arg = (void *)cpu;
383		char name [10];
384
385		sprintf(name, "cpu%lu", cpu);
386		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
387	}
388	return 0;
389}
390__initcall(callthunks_debugfs_init);
391#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#define pr_fmt(fmt) "callthunks: " fmt
  4
  5#include <linux/debugfs.h>
  6#include <linux/kallsyms.h>
  7#include <linux/memory.h>
  8#include <linux/moduleloader.h>
  9#include <linux/static_call.h>
 10
 11#include <asm/alternative.h>
 12#include <asm/asm-offsets.h>
 13#include <asm/cpu.h>
 14#include <asm/ftrace.h>
 15#include <asm/insn.h>
 16#include <asm/kexec.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/paravirt.h>
 19#include <asm/sections.h>
 20#include <asm/switch_to.h>
 21#include <asm/sync_core.h>
 22#include <asm/text-patching.h>
 23#include <asm/xen/hypercall.h>
 24
 25static int __initdata_or_module debug_callthunks;
 26
 
 
 27#define prdbg(fmt, args...)					\
 28do {								\
 29	if (debug_callthunks)					\
 30		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
 31} while(0)
 32
 33static int __init debug_thunks(char *str)
 34{
 35	debug_callthunks = 1;
 36	return 1;
 37}
 38__setup("debug-callthunks", debug_thunks);
 39
 40#ifdef CONFIG_CALL_THUNKS_DEBUG
 41DEFINE_PER_CPU(u64, __x86_call_count);
 42DEFINE_PER_CPU(u64, __x86_ret_count);
 43DEFINE_PER_CPU(u64, __x86_stuffs_count);
 44DEFINE_PER_CPU(u64, __x86_ctxsw_count);
 45EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
 46EXPORT_SYMBOL_GPL(__x86_call_count);
 47#endif
 48
 49extern s32 __call_sites[], __call_sites_end[];
 50
 51struct thunk_desc {
 52	void		*template;
 53	unsigned int	template_size;
 54};
 55
 56struct core_text {
 57	unsigned long	base;
 58	unsigned long	end;
 59	const char	*name;
 60};
 61
 62static bool thunks_initialized __ro_after_init;
 63
 64static const struct core_text builtin_coretext = {
 65	.base = (unsigned long)_text,
 66	.end  = (unsigned long)_etext,
 67	.name = "builtin",
 68};
 69
 70asm (
 71	".pushsection .rodata				\n"
 72	".global skl_call_thunk_template		\n"
 73	"skl_call_thunk_template:			\n"
 74		__stringify(INCREMENT_CALL_DEPTH)"	\n"
 75	".global skl_call_thunk_tail			\n"
 76	"skl_call_thunk_tail:				\n"
 77	".popsection					\n"
 78);
 79
 80extern u8 skl_call_thunk_template[];
 81extern u8 skl_call_thunk_tail[];
 82
 83#define SKL_TMPL_SIZE \
 84	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
 85
 86extern void error_entry(void);
 87extern void xen_error_entry(void);
 88extern void paranoid_entry(void);
 89
 90static inline bool within_coretext(const struct core_text *ct, void *addr)
 91{
 92	unsigned long p = (unsigned long)addr;
 93
 94	return ct->base <= p && p < ct->end;
 95}
 96
 97static inline bool within_module_coretext(void *addr)
 98{
 99	bool ret = false;
100
101#ifdef CONFIG_MODULES
102	struct module *mod;
103
104	preempt_disable();
105	mod = __module_address((unsigned long)addr);
106	if (mod && within_module_core((unsigned long)addr, mod))
107		ret = true;
108	preempt_enable();
109#endif
110	return ret;
111}
112
113static bool is_coretext(const struct core_text *ct, void *addr)
114{
115	if (ct && within_coretext(ct, addr))
116		return true;
117	if (within_coretext(&builtin_coretext, addr))
118		return true;
119	return within_module_coretext(addr);
120}
121
122static bool skip_addr(void *dest)
123{
124	if (dest == error_entry)
125		return true;
126	if (dest == paranoid_entry)
127		return true;
128	if (dest == xen_error_entry)
129		return true;
130	/* Does FILL_RSB... */
131	if (dest == __switch_to_asm)
132		return true;
133	/* Accounts directly */
134	if (dest == ret_from_fork)
135		return true;
136#ifdef CONFIG_HOTPLUG_CPU
137	if (dest == start_cpu0)
138		return true;
139#endif
140#ifdef CONFIG_FUNCTION_TRACER
141	if (dest == __fentry__)
142		return true;
143#endif
144#ifdef CONFIG_KEXEC_CORE
145	if (dest >= (void *)relocate_kernel &&
146	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
147		return true;
148#endif
149#ifdef CONFIG_XEN
150	if (dest >= (void *)hypercall_page &&
151	    dest < (void*)hypercall_page + PAGE_SIZE)
152		return true;
153#endif
154	return false;
155}
156
157static __init_or_module void *call_get_dest(void *addr)
158{
159	struct insn insn;
160	void *dest;
161	int ret;
162
163	ret = insn_decode_kernel(&insn, addr);
164	if (ret)
165		return ERR_PTR(ret);
166
167	/* Patched out call? */
168	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
169		return NULL;
170
171	dest = addr + insn.length + insn.immediate.value;
172	if (skip_addr(dest))
173		return NULL;
174	return dest;
175}
176
177static const u8 nops[] = {
178	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
179	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
180	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
181	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
182};
183
184static void *patch_dest(void *dest, bool direct)
185{
186	unsigned int tsize = SKL_TMPL_SIZE;
 
187	u8 *pad = dest - tsize;
188
 
 
 
189	/* Already patched? */
190	if (!bcmp(pad, skl_call_thunk_template, tsize))
191		return pad;
192
193	/* Ensure there are nops */
194	if (bcmp(pad, nops, tsize)) {
195		pr_warn_once("Invalid padding area for %pS\n", dest);
196		return NULL;
197	}
198
199	if (direct)
200		memcpy(pad, skl_call_thunk_template, tsize);
201	else
202		text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
203	return pad;
204}
205
206static __init_or_module void patch_call(void *addr, const struct core_text *ct)
207{
208	void *pad, *dest;
209	u8 bytes[8];
210
211	if (!within_coretext(ct, addr))
212		return;
213
214	dest = call_get_dest(addr);
215	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
216		return;
217
218	if (!is_coretext(ct, dest))
219		return;
220
221	pad = patch_dest(dest, within_coretext(ct, dest));
222	if (!pad)
223		return;
224
225	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
226		dest, dest, pad);
227	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
228	text_poke_early(addr, bytes, CALL_INSN_SIZE);
229}
230
231static __init_or_module void
232patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
233{
234	s32 *s;
235
236	for (s = start; s < end; s++)
237		patch_call((void *)s + *s, ct);
238}
239
240static __init_or_module void
241patch_paravirt_call_sites(struct paravirt_patch_site *start,
242			  struct paravirt_patch_site *end,
243			  const struct core_text *ct)
244{
245	struct paravirt_patch_site *p;
246
247	for (p = start; p < end; p++)
248		patch_call(p->instr, ct);
249}
250
251static __init_or_module void
252callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
253{
254	prdbg("Patching call sites %s\n", ct->name);
255	patch_call_sites(cs->call_start, cs->call_end, ct);
256	patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
257	prdbg("Patching call sites done%s\n", ct->name);
258}
259
260void __init callthunks_patch_builtin_calls(void)
261{
262	struct callthunk_sites cs = {
263		.call_start	= __call_sites,
264		.call_end	= __call_sites_end,
265		.pv_start	= __parainstructions,
266		.pv_end		= __parainstructions_end
267	};
268
269	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
270		return;
271
272	pr_info("Setting up call depth tracking\n");
273	mutex_lock(&text_mutex);
274	callthunks_setup(&cs, &builtin_coretext);
275	static_call_force_reinit();
276	thunks_initialized = true;
277	mutex_unlock(&text_mutex);
278}
279
280void *callthunks_translate_call_dest(void *dest)
281{
282	void *target;
283
284	lockdep_assert_held(&text_mutex);
285
286	if (!thunks_initialized || skip_addr(dest))
287		return dest;
288
289	if (!is_coretext(NULL, dest))
290		return dest;
291
292	target = patch_dest(dest, false);
293	return target ? : dest;
294}
295
296bool is_callthunk(void *addr)
 
297{
298	unsigned int tmpl_size = SKL_TMPL_SIZE;
299	void *tmpl = skl_call_thunk_template;
300	unsigned long dest;
 
301
302	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
303	if (!thunks_initialized || skip_addr((void *)dest))
304		return false;
305
306	return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
 
 
 
 
 
307}
308
309#ifdef CONFIG_BPF_JIT
310int x86_call_depth_emit_accounting(u8 **pprog, void *func)
311{
312	unsigned int tmpl_size = SKL_TMPL_SIZE;
313	void *tmpl = skl_call_thunk_template;
314
315	if (!thunks_initialized)
316		return 0;
317
318	/* Is function call target a thunk? */
319	if (func && is_callthunk(func))
320		return 0;
321
322	memcpy(*pprog, tmpl, tmpl_size);
 
 
 
323	*pprog += tmpl_size;
324	return tmpl_size;
325}
326#endif
327
328#ifdef CONFIG_MODULES
329void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
330					    struct module *mod)
331{
332	struct core_text ct = {
333		.base = (unsigned long)mod->core_layout.base,
334		.end  = (unsigned long)mod->core_layout.base + mod->core_layout.size,
335		.name = mod->name,
336	};
337
338	if (!thunks_initialized)
339		return;
340
341	mutex_lock(&text_mutex);
342	callthunks_setup(cs, &ct);
343	mutex_unlock(&text_mutex);
344}
345#endif /* CONFIG_MODULES */
346
347#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
348static int callthunks_debug_show(struct seq_file *m, void *p)
349{
350	unsigned long cpu = (unsigned long)m->private;
351
352	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
353		   per_cpu(__x86_call_count, cpu),
354		   per_cpu(__x86_ret_count, cpu),
355		   per_cpu(__x86_stuffs_count, cpu),
356		   per_cpu(__x86_ctxsw_count, cpu));
357	return 0;
358}
359
360static int callthunks_debug_open(struct inode *inode, struct file *file)
361{
362	return single_open(file, callthunks_debug_show, inode->i_private);
363}
364
365static const struct file_operations dfs_ops = {
366	.open		= callthunks_debug_open,
367	.read		= seq_read,
368	.llseek		= seq_lseek,
369	.release	= single_release,
370};
371
372static int __init callthunks_debugfs_init(void)
373{
374	struct dentry *dir;
375	unsigned long cpu;
376
377	dir = debugfs_create_dir("callthunks", NULL);
378	for_each_possible_cpu(cpu) {
379		void *arg = (void *)cpu;
380		char name [10];
381
382		sprintf(name, "cpu%lu", cpu);
383		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
384	}
385	return 0;
386}
387__initcall(callthunks_debugfs_init);
388#endif