Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3#define pr_fmt(fmt) "callthunks: " fmt
4
5#include <linux/debugfs.h>
6#include <linux/kallsyms.h>
7#include <linux/memory.h>
8#include <linux/moduleloader.h>
9#include <linux/static_call.h>
10
11#include <asm/alternative.h>
12#include <asm/asm-offsets.h>
13#include <asm/cpu.h>
14#include <asm/ftrace.h>
15#include <asm/insn.h>
16#include <asm/kexec.h>
17#include <asm/nospec-branch.h>
18#include <asm/paravirt.h>
19#include <asm/sections.h>
20#include <asm/switch_to.h>
21#include <asm/sync_core.h>
22#include <asm/text-patching.h>
23#include <asm/xen/hypercall.h>
24
25static int __initdata_or_module debug_callthunks;
26
27#define MAX_PATCH_LEN (255-1)
28
29#define prdbg(fmt, args...) \
30do { \
31 if (debug_callthunks) \
32 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
33} while(0)
34
35static int __init debug_thunks(char *str)
36{
37 debug_callthunks = 1;
38 return 1;
39}
40__setup("debug-callthunks", debug_thunks);
41
42#ifdef CONFIG_CALL_THUNKS_DEBUG
43DEFINE_PER_CPU(u64, __x86_call_count);
44DEFINE_PER_CPU(u64, __x86_ret_count);
45DEFINE_PER_CPU(u64, __x86_stuffs_count);
46DEFINE_PER_CPU(u64, __x86_ctxsw_count);
47EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
48EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
49#endif
50
51extern s32 __call_sites[], __call_sites_end[];
52
53struct core_text {
54 unsigned long base;
55 unsigned long end;
56 const char *name;
57};
58
59static bool thunks_initialized __ro_after_init;
60
61static const struct core_text builtin_coretext = {
62 .base = (unsigned long)_text,
63 .end = (unsigned long)_etext,
64 .name = "builtin",
65};
66
67asm (
68 ".pushsection .rodata \n"
69 ".global skl_call_thunk_template \n"
70 "skl_call_thunk_template: \n"
71 __stringify(INCREMENT_CALL_DEPTH)" \n"
72 ".global skl_call_thunk_tail \n"
73 "skl_call_thunk_tail: \n"
74 ".popsection \n"
75);
76
77extern u8 skl_call_thunk_template[];
78extern u8 skl_call_thunk_tail[];
79
80#define SKL_TMPL_SIZE \
81 ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
82
83extern void error_entry(void);
84extern void xen_error_entry(void);
85extern void paranoid_entry(void);
86
87static inline bool within_coretext(const struct core_text *ct, void *addr)
88{
89 unsigned long p = (unsigned long)addr;
90
91 return ct->base <= p && p < ct->end;
92}
93
94static inline bool within_module_coretext(void *addr)
95{
96 bool ret = false;
97
98#ifdef CONFIG_MODULES
99 struct module *mod;
100
101 preempt_disable();
102 mod = __module_address((unsigned long)addr);
103 if (mod && within_module_core((unsigned long)addr, mod))
104 ret = true;
105 preempt_enable();
106#endif
107 return ret;
108}
109
110static bool is_coretext(const struct core_text *ct, void *addr)
111{
112 if (ct && within_coretext(ct, addr))
113 return true;
114 if (within_coretext(&builtin_coretext, addr))
115 return true;
116 return within_module_coretext(addr);
117}
118
119static bool skip_addr(void *dest)
120{
121 if (dest == error_entry)
122 return true;
123 if (dest == paranoid_entry)
124 return true;
125 if (dest == xen_error_entry)
126 return true;
127 /* Does FILL_RSB... */
128 if (dest == __switch_to_asm)
129 return true;
130 /* Accounts directly */
131 if (dest == ret_from_fork)
132 return true;
133#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
134 if (dest == soft_restart_cpu)
135 return true;
136#endif
137#ifdef CONFIG_FUNCTION_TRACER
138 if (dest == __fentry__)
139 return true;
140#endif
141#ifdef CONFIG_KEXEC_CORE
142 if (dest >= (void *)relocate_kernel &&
143 dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
144 return true;
145#endif
146#ifdef CONFIG_XEN
147 if (dest >= (void *)hypercall_page &&
148 dest < (void*)hypercall_page + PAGE_SIZE)
149 return true;
150#endif
151 return false;
152}
153
154static __init_or_module void *call_get_dest(void *addr)
155{
156 struct insn insn;
157 void *dest;
158 int ret;
159
160 ret = insn_decode_kernel(&insn, addr);
161 if (ret)
162 return ERR_PTR(ret);
163
164 /* Patched out call? */
165 if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
166 return NULL;
167
168 dest = addr + insn.length + insn.immediate.value;
169 if (skip_addr(dest))
170 return NULL;
171 return dest;
172}
173
174static const u8 nops[] = {
175 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
178 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
179};
180
181static void *patch_dest(void *dest, bool direct)
182{
183 unsigned int tsize = SKL_TMPL_SIZE;
184 u8 insn_buff[MAX_PATCH_LEN];
185 u8 *pad = dest - tsize;
186
187 memcpy(insn_buff, skl_call_thunk_template, tsize);
188 apply_relocation(insn_buff, tsize, pad,
189 skl_call_thunk_template, tsize);
190
191 /* Already patched? */
192 if (!bcmp(pad, insn_buff, tsize))
193 return pad;
194
195 /* Ensure there are nops */
196 if (bcmp(pad, nops, tsize)) {
197 pr_warn_once("Invalid padding area for %pS\n", dest);
198 return NULL;
199 }
200
201 if (direct)
202 memcpy(pad, insn_buff, tsize);
203 else
204 text_poke_copy_locked(pad, insn_buff, tsize, true);
205 return pad;
206}
207
208static __init_or_module void patch_call(void *addr, const struct core_text *ct)
209{
210 void *pad, *dest;
211 u8 bytes[8];
212
213 if (!within_coretext(ct, addr))
214 return;
215
216 dest = call_get_dest(addr);
217 if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
218 return;
219
220 if (!is_coretext(ct, dest))
221 return;
222
223 pad = patch_dest(dest, within_coretext(ct, dest));
224 if (!pad)
225 return;
226
227 prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
228 dest, dest, pad);
229 __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
230 text_poke_early(addr, bytes, CALL_INSN_SIZE);
231}
232
233static __init_or_module void
234patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
235{
236 s32 *s;
237
238 for (s = start; s < end; s++)
239 patch_call((void *)s + *s, ct);
240}
241
242static __init_or_module void
243patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
244 const struct core_text *ct)
245{
246 struct alt_instr *a;
247
248 for (a = start; a < end; a++)
249 patch_call((void *)&a->instr_offset + a->instr_offset, ct);
250}
251
252static __init_or_module void
253callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
254{
255 prdbg("Patching call sites %s\n", ct->name);
256 patch_call_sites(cs->call_start, cs->call_end, ct);
257 patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
258 prdbg("Patching call sites done%s\n", ct->name);
259}
260
261void __init callthunks_patch_builtin_calls(void)
262{
263 struct callthunk_sites cs = {
264 .call_start = __call_sites,
265 .call_end = __call_sites_end,
266 .alt_start = __alt_instructions,
267 .alt_end = __alt_instructions_end
268 };
269
270 if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
271 return;
272
273 pr_info("Setting up call depth tracking\n");
274 mutex_lock(&text_mutex);
275 callthunks_setup(&cs, &builtin_coretext);
276 thunks_initialized = true;
277 mutex_unlock(&text_mutex);
278}
279
280void *callthunks_translate_call_dest(void *dest)
281{
282 void *target;
283
284 lockdep_assert_held(&text_mutex);
285
286 if (!thunks_initialized || skip_addr(dest))
287 return dest;
288
289 if (!is_coretext(NULL, dest))
290 return dest;
291
292 target = patch_dest(dest, false);
293 return target ? : dest;
294}
295
296#ifdef CONFIG_BPF_JIT
297static bool is_callthunk(void *addr)
298{
299 unsigned int tmpl_size = SKL_TMPL_SIZE;
300 u8 insn_buff[MAX_PATCH_LEN];
301 unsigned long dest;
302 u8 *pad;
303
304 dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
305 if (!thunks_initialized || skip_addr((void *)dest))
306 return false;
307
308 pad = (void *)(dest - tmpl_size);
309
310 memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
311 apply_relocation(insn_buff, tmpl_size, pad,
312 skl_call_thunk_template, tmpl_size);
313
314 return !bcmp(pad, insn_buff, tmpl_size);
315}
316
317int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
318{
319 unsigned int tmpl_size = SKL_TMPL_SIZE;
320 u8 insn_buff[MAX_PATCH_LEN];
321
322 if (!thunks_initialized)
323 return 0;
324
325 /* Is function call target a thunk? */
326 if (func && is_callthunk(func))
327 return 0;
328
329 memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
330 apply_relocation(insn_buff, tmpl_size, ip,
331 skl_call_thunk_template, tmpl_size);
332
333 memcpy(*pprog, insn_buff, tmpl_size);
334 *pprog += tmpl_size;
335 return tmpl_size;
336}
337#endif
338
339#ifdef CONFIG_MODULES
340void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
341 struct module *mod)
342{
343 struct core_text ct = {
344 .base = (unsigned long)mod->mem[MOD_TEXT].base,
345 .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
346 .name = mod->name,
347 };
348
349 if (!thunks_initialized)
350 return;
351
352 mutex_lock(&text_mutex);
353 callthunks_setup(cs, &ct);
354 mutex_unlock(&text_mutex);
355}
356#endif /* CONFIG_MODULES */
357
358#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
359static int callthunks_debug_show(struct seq_file *m, void *p)
360{
361 unsigned long cpu = (unsigned long)m->private;
362
363 seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
364 per_cpu(__x86_call_count, cpu),
365 per_cpu(__x86_ret_count, cpu),
366 per_cpu(__x86_stuffs_count, cpu),
367 per_cpu(__x86_ctxsw_count, cpu));
368 return 0;
369}
370
371static int callthunks_debug_open(struct inode *inode, struct file *file)
372{
373 return single_open(file, callthunks_debug_show, inode->i_private);
374}
375
376static const struct file_operations dfs_ops = {
377 .open = callthunks_debug_open,
378 .read = seq_read,
379 .llseek = seq_lseek,
380 .release = single_release,
381};
382
383static int __init callthunks_debugfs_init(void)
384{
385 struct dentry *dir;
386 unsigned long cpu;
387
388 dir = debugfs_create_dir("callthunks", NULL);
389 for_each_possible_cpu(cpu) {
390 void *arg = (void *)cpu;
391 char name [10];
392
393 sprintf(name, "cpu%lu", cpu);
394 debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
395 }
396 return 0;
397}
398__initcall(callthunks_debugfs_init);
399#endif
1// SPDX-License-Identifier: GPL-2.0-only
2
3#define pr_fmt(fmt) "callthunks: " fmt
4
5#include <linux/debugfs.h>
6#include <linux/kallsyms.h>
7#include <linux/memory.h>
8#include <linux/moduleloader.h>
9#include <linux/static_call.h>
10
11#include <asm/alternative.h>
12#include <asm/asm-offsets.h>
13#include <asm/cpu.h>
14#include <asm/ftrace.h>
15#include <asm/insn.h>
16#include <asm/kexec.h>
17#include <asm/nospec-branch.h>
18#include <asm/paravirt.h>
19#include <asm/sections.h>
20#include <asm/switch_to.h>
21#include <asm/sync_core.h>
22#include <asm/text-patching.h>
23#include <asm/xen/hypercall.h>
24
25static int __initdata_or_module debug_callthunks;
26
27#define prdbg(fmt, args...) \
28do { \
29 if (debug_callthunks) \
30 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
31} while(0)
32
33static int __init debug_thunks(char *str)
34{
35 debug_callthunks = 1;
36 return 1;
37}
38__setup("debug-callthunks", debug_thunks);
39
40#ifdef CONFIG_CALL_THUNKS_DEBUG
41DEFINE_PER_CPU(u64, __x86_call_count);
42DEFINE_PER_CPU(u64, __x86_ret_count);
43DEFINE_PER_CPU(u64, __x86_stuffs_count);
44DEFINE_PER_CPU(u64, __x86_ctxsw_count);
45EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
46EXPORT_SYMBOL_GPL(__x86_call_count);
47#endif
48
49extern s32 __call_sites[], __call_sites_end[];
50
51struct core_text {
52 unsigned long base;
53 unsigned long end;
54 const char *name;
55};
56
57static bool thunks_initialized __ro_after_init;
58
59static const struct core_text builtin_coretext = {
60 .base = (unsigned long)_text,
61 .end = (unsigned long)_etext,
62 .name = "builtin",
63};
64
65asm (
66 ".pushsection .rodata \n"
67 ".global skl_call_thunk_template \n"
68 "skl_call_thunk_template: \n"
69 __stringify(INCREMENT_CALL_DEPTH)" \n"
70 ".global skl_call_thunk_tail \n"
71 "skl_call_thunk_tail: \n"
72 ".popsection \n"
73);
74
75extern u8 skl_call_thunk_template[];
76extern u8 skl_call_thunk_tail[];
77
78#define SKL_TMPL_SIZE \
79 ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
80
81extern void error_entry(void);
82extern void xen_error_entry(void);
83extern void paranoid_entry(void);
84
85static inline bool within_coretext(const struct core_text *ct, void *addr)
86{
87 unsigned long p = (unsigned long)addr;
88
89 return ct->base <= p && p < ct->end;
90}
91
92static inline bool within_module_coretext(void *addr)
93{
94 bool ret = false;
95
96#ifdef CONFIG_MODULES
97 struct module *mod;
98
99 preempt_disable();
100 mod = __module_address((unsigned long)addr);
101 if (mod && within_module_core((unsigned long)addr, mod))
102 ret = true;
103 preempt_enable();
104#endif
105 return ret;
106}
107
108static bool is_coretext(const struct core_text *ct, void *addr)
109{
110 if (ct && within_coretext(ct, addr))
111 return true;
112 if (within_coretext(&builtin_coretext, addr))
113 return true;
114 return within_module_coretext(addr);
115}
116
117static bool skip_addr(void *dest)
118{
119 if (dest == error_entry)
120 return true;
121 if (dest == paranoid_entry)
122 return true;
123 if (dest == xen_error_entry)
124 return true;
125 /* Does FILL_RSB... */
126 if (dest == __switch_to_asm)
127 return true;
128 /* Accounts directly */
129 if (dest == ret_from_fork)
130 return true;
131#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
132 if (dest == soft_restart_cpu)
133 return true;
134#endif
135#ifdef CONFIG_FUNCTION_TRACER
136 if (dest == __fentry__)
137 return true;
138#endif
139#ifdef CONFIG_KEXEC_CORE
140 if (dest >= (void *)relocate_kernel &&
141 dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
142 return true;
143#endif
144#ifdef CONFIG_XEN
145 if (dest >= (void *)hypercall_page &&
146 dest < (void*)hypercall_page + PAGE_SIZE)
147 return true;
148#endif
149 return false;
150}
151
152static __init_or_module void *call_get_dest(void *addr)
153{
154 struct insn insn;
155 void *dest;
156 int ret;
157
158 ret = insn_decode_kernel(&insn, addr);
159 if (ret)
160 return ERR_PTR(ret);
161
162 /* Patched out call? */
163 if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
164 return NULL;
165
166 dest = addr + insn.length + insn.immediate.value;
167 if (skip_addr(dest))
168 return NULL;
169 return dest;
170}
171
172static const u8 nops[] = {
173 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
175 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177};
178
179static void *patch_dest(void *dest, bool direct)
180{
181 unsigned int tsize = SKL_TMPL_SIZE;
182 u8 *pad = dest - tsize;
183
184 /* Already patched? */
185 if (!bcmp(pad, skl_call_thunk_template, tsize))
186 return pad;
187
188 /* Ensure there are nops */
189 if (bcmp(pad, nops, tsize)) {
190 pr_warn_once("Invalid padding area for %pS\n", dest);
191 return NULL;
192 }
193
194 if (direct)
195 memcpy(pad, skl_call_thunk_template, tsize);
196 else
197 text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
198 return pad;
199}
200
201static __init_or_module void patch_call(void *addr, const struct core_text *ct)
202{
203 void *pad, *dest;
204 u8 bytes[8];
205
206 if (!within_coretext(ct, addr))
207 return;
208
209 dest = call_get_dest(addr);
210 if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
211 return;
212
213 if (!is_coretext(ct, dest))
214 return;
215
216 pad = patch_dest(dest, within_coretext(ct, dest));
217 if (!pad)
218 return;
219
220 prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
221 dest, dest, pad);
222 __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
223 text_poke_early(addr, bytes, CALL_INSN_SIZE);
224}
225
226static __init_or_module void
227patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
228{
229 s32 *s;
230
231 for (s = start; s < end; s++)
232 patch_call((void *)s + *s, ct);
233}
234
235static __init_or_module void
236patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
237 const struct core_text *ct)
238{
239 struct alt_instr *a;
240
241 for (a = start; a < end; a++)
242 patch_call((void *)&a->instr_offset + a->instr_offset, ct);
243}
244
245static __init_or_module void
246callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
247{
248 prdbg("Patching call sites %s\n", ct->name);
249 patch_call_sites(cs->call_start, cs->call_end, ct);
250 patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
251 prdbg("Patching call sites done%s\n", ct->name);
252}
253
254void __init callthunks_patch_builtin_calls(void)
255{
256 struct callthunk_sites cs = {
257 .call_start = __call_sites,
258 .call_end = __call_sites_end,
259 .alt_start = __alt_instructions,
260 .alt_end = __alt_instructions_end
261 };
262
263 if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
264 return;
265
266 pr_info("Setting up call depth tracking\n");
267 mutex_lock(&text_mutex);
268 callthunks_setup(&cs, &builtin_coretext);
269 thunks_initialized = true;
270 mutex_unlock(&text_mutex);
271}
272
273void *callthunks_translate_call_dest(void *dest)
274{
275 void *target;
276
277 lockdep_assert_held(&text_mutex);
278
279 if (!thunks_initialized || skip_addr(dest))
280 return dest;
281
282 if (!is_coretext(NULL, dest))
283 return dest;
284
285 target = patch_dest(dest, false);
286 return target ? : dest;
287}
288
289#ifdef CONFIG_BPF_JIT
290static bool is_callthunk(void *addr)
291{
292 unsigned int tmpl_size = SKL_TMPL_SIZE;
293 void *tmpl = skl_call_thunk_template;
294 unsigned long dest;
295
296 dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
297 if (!thunks_initialized || skip_addr((void *)dest))
298 return false;
299
300 return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
301}
302
303int x86_call_depth_emit_accounting(u8 **pprog, void *func)
304{
305 unsigned int tmpl_size = SKL_TMPL_SIZE;
306 void *tmpl = skl_call_thunk_template;
307
308 if (!thunks_initialized)
309 return 0;
310
311 /* Is function call target a thunk? */
312 if (func && is_callthunk(func))
313 return 0;
314
315 memcpy(*pprog, tmpl, tmpl_size);
316 *pprog += tmpl_size;
317 return tmpl_size;
318}
319#endif
320
321#ifdef CONFIG_MODULES
322void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
323 struct module *mod)
324{
325 struct core_text ct = {
326 .base = (unsigned long)mod->mem[MOD_TEXT].base,
327 .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
328 .name = mod->name,
329 };
330
331 if (!thunks_initialized)
332 return;
333
334 mutex_lock(&text_mutex);
335 callthunks_setup(cs, &ct);
336 mutex_unlock(&text_mutex);
337}
338#endif /* CONFIG_MODULES */
339
340#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
341static int callthunks_debug_show(struct seq_file *m, void *p)
342{
343 unsigned long cpu = (unsigned long)m->private;
344
345 seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
346 per_cpu(__x86_call_count, cpu),
347 per_cpu(__x86_ret_count, cpu),
348 per_cpu(__x86_stuffs_count, cpu),
349 per_cpu(__x86_ctxsw_count, cpu));
350 return 0;
351}
352
353static int callthunks_debug_open(struct inode *inode, struct file *file)
354{
355 return single_open(file, callthunks_debug_show, inode->i_private);
356}
357
358static const struct file_operations dfs_ops = {
359 .open = callthunks_debug_open,
360 .read = seq_read,
361 .llseek = seq_lseek,
362 .release = single_release,
363};
364
365static int __init callthunks_debugfs_init(void)
366{
367 struct dentry *dir;
368 unsigned long cpu;
369
370 dir = debugfs_create_dir("callthunks", NULL);
371 for_each_possible_cpu(cpu) {
372 void *arg = (void *)cpu;
373 char name [10];
374
375 sprintf(name, "cpu%lu", cpu);
376 debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
377 }
378 return 0;
379}
380__initcall(callthunks_debugfs_init);
381#endif