Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3#define pr_fmt(fmt) "callthunks: " fmt
4
5#include <linux/debugfs.h>
6#include <linux/kallsyms.h>
7#include <linux/memory.h>
8#include <linux/moduleloader.h>
9#include <linux/static_call.h>
10
11#include <asm/alternative.h>
12#include <asm/asm-offsets.h>
13#include <asm/cpu.h>
14#include <asm/ftrace.h>
15#include <asm/insn.h>
16#include <asm/kexec.h>
17#include <asm/nospec-branch.h>
18#include <asm/paravirt.h>
19#include <asm/sections.h>
20#include <asm/switch_to.h>
21#include <asm/sync_core.h>
22#include <asm/text-patching.h>
23#include <asm/xen/hypercall.h>
24
25static int __initdata_or_module debug_callthunks;
26
27#define MAX_PATCH_LEN (255-1)
28
29#define prdbg(fmt, args...) \
30do { \
31 if (debug_callthunks) \
32 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
33} while(0)
34
35static int __init debug_thunks(char *str)
36{
37 debug_callthunks = 1;
38 return 1;
39}
40__setup("debug-callthunks", debug_thunks);
41
42#ifdef CONFIG_CALL_THUNKS_DEBUG
43DEFINE_PER_CPU(u64, __x86_call_count);
44DEFINE_PER_CPU(u64, __x86_ret_count);
45DEFINE_PER_CPU(u64, __x86_stuffs_count);
46DEFINE_PER_CPU(u64, __x86_ctxsw_count);
47EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
48EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
49#endif
50
51extern s32 __call_sites[], __call_sites_end[];
52
53struct core_text {
54 unsigned long base;
55 unsigned long end;
56 const char *name;
57};
58
59static bool thunks_initialized __ro_after_init;
60
61static const struct core_text builtin_coretext = {
62 .base = (unsigned long)_text,
63 .end = (unsigned long)_etext,
64 .name = "builtin",
65};
66
67asm (
68 ".pushsection .rodata \n"
69 ".global skl_call_thunk_template \n"
70 "skl_call_thunk_template: \n"
71 __stringify(INCREMENT_CALL_DEPTH)" \n"
72 ".global skl_call_thunk_tail \n"
73 "skl_call_thunk_tail: \n"
74 ".popsection \n"
75);
76
77extern u8 skl_call_thunk_template[];
78extern u8 skl_call_thunk_tail[];
79
80#define SKL_TMPL_SIZE \
81 ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
82
83extern void error_entry(void);
84extern void xen_error_entry(void);
85extern void paranoid_entry(void);
86
87static inline bool within_coretext(const struct core_text *ct, void *addr)
88{
89 unsigned long p = (unsigned long)addr;
90
91 return ct->base <= p && p < ct->end;
92}
93
94static inline bool within_module_coretext(void *addr)
95{
96 bool ret = false;
97
98#ifdef CONFIG_MODULES
99 struct module *mod;
100
101 preempt_disable();
102 mod = __module_address((unsigned long)addr);
103 if (mod && within_module_core((unsigned long)addr, mod))
104 ret = true;
105 preempt_enable();
106#endif
107 return ret;
108}
109
110static bool is_coretext(const struct core_text *ct, void *addr)
111{
112 if (ct && within_coretext(ct, addr))
113 return true;
114 if (within_coretext(&builtin_coretext, addr))
115 return true;
116 return within_module_coretext(addr);
117}
118
119static bool skip_addr(void *dest)
120{
121 if (dest == error_entry)
122 return true;
123 if (dest == paranoid_entry)
124 return true;
125 if (dest == xen_error_entry)
126 return true;
127 /* Does FILL_RSB... */
128 if (dest == __switch_to_asm)
129 return true;
130 /* Accounts directly */
131 if (dest == ret_from_fork)
132 return true;
133#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
134 if (dest == soft_restart_cpu)
135 return true;
136#endif
137#ifdef CONFIG_FUNCTION_TRACER
138 if (dest == __fentry__)
139 return true;
140#endif
141#ifdef CONFIG_KEXEC_CORE
142 if (dest >= (void *)relocate_kernel &&
143 dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
144 return true;
145#endif
146 return false;
147}
148
149static __init_or_module void *call_get_dest(void *addr)
150{
151 struct insn insn;
152 void *dest;
153 int ret;
154
155 ret = insn_decode_kernel(&insn, addr);
156 if (ret)
157 return ERR_PTR(ret);
158
159 /* Patched out call? */
160 if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
161 return NULL;
162
163 dest = addr + insn.length + insn.immediate.value;
164 if (skip_addr(dest))
165 return NULL;
166 return dest;
167}
168
169static const u8 nops[] = {
170 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
171 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
172 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
173 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174};
175
176static void *patch_dest(void *dest, bool direct)
177{
178 unsigned int tsize = SKL_TMPL_SIZE;
179 u8 insn_buff[MAX_PATCH_LEN];
180 u8 *pad = dest - tsize;
181
182 memcpy(insn_buff, skl_call_thunk_template, tsize);
183 apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
184
185 /* Already patched? */
186 if (!bcmp(pad, insn_buff, tsize))
187 return pad;
188
189 /* Ensure there are nops */
190 if (bcmp(pad, nops, tsize)) {
191 pr_warn_once("Invalid padding area for %pS\n", dest);
192 return NULL;
193 }
194
195 if (direct)
196 memcpy(pad, insn_buff, tsize);
197 else
198 text_poke_copy_locked(pad, insn_buff, tsize, true);
199 return pad;
200}
201
202static __init_or_module void patch_call(void *addr, const struct core_text *ct)
203{
204 void *pad, *dest;
205 u8 bytes[8];
206
207 if (!within_coretext(ct, addr))
208 return;
209
210 dest = call_get_dest(addr);
211 if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
212 return;
213
214 if (!is_coretext(ct, dest))
215 return;
216
217 pad = patch_dest(dest, within_coretext(ct, dest));
218 if (!pad)
219 return;
220
221 prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
222 dest, dest, pad);
223 __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
224 text_poke_early(addr, bytes, CALL_INSN_SIZE);
225}
226
227static __init_or_module void
228patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
229{
230 s32 *s;
231
232 for (s = start; s < end; s++)
233 patch_call((void *)s + *s, ct);
234}
235
236static __init_or_module void
237patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
238 const struct core_text *ct)
239{
240 struct alt_instr *a;
241
242 for (a = start; a < end; a++)
243 patch_call((void *)&a->instr_offset + a->instr_offset, ct);
244}
245
246static __init_or_module void
247callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
248{
249 prdbg("Patching call sites %s\n", ct->name);
250 patch_call_sites(cs->call_start, cs->call_end, ct);
251 patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
252 prdbg("Patching call sites done%s\n", ct->name);
253}
254
255void __init callthunks_patch_builtin_calls(void)
256{
257 struct callthunk_sites cs = {
258 .call_start = __call_sites,
259 .call_end = __call_sites_end,
260 .alt_start = __alt_instructions,
261 .alt_end = __alt_instructions_end
262 };
263
264 if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
265 return;
266
267 pr_info("Setting up call depth tracking\n");
268 mutex_lock(&text_mutex);
269 callthunks_setup(&cs, &builtin_coretext);
270 thunks_initialized = true;
271 mutex_unlock(&text_mutex);
272}
273
274void *callthunks_translate_call_dest(void *dest)
275{
276 void *target;
277
278 lockdep_assert_held(&text_mutex);
279
280 if (!thunks_initialized || skip_addr(dest))
281 return dest;
282
283 if (!is_coretext(NULL, dest))
284 return dest;
285
286 target = patch_dest(dest, false);
287 return target ? : dest;
288}
289
290#ifdef CONFIG_BPF_JIT
291static bool is_callthunk(void *addr)
292{
293 unsigned int tmpl_size = SKL_TMPL_SIZE;
294 u8 insn_buff[MAX_PATCH_LEN];
295 unsigned long dest;
296 u8 *pad;
297
298 dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
299 if (!thunks_initialized || skip_addr((void *)dest))
300 return false;
301
302 pad = (void *)(dest - tmpl_size);
303
304 memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
305 apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
306
307 return !bcmp(pad, insn_buff, tmpl_size);
308}
309
310int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
311{
312 unsigned int tmpl_size = SKL_TMPL_SIZE;
313 u8 insn_buff[MAX_PATCH_LEN];
314
315 if (!thunks_initialized)
316 return 0;
317
318 /* Is function call target a thunk? */
319 if (func && is_callthunk(func))
320 return 0;
321
322 memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
323 apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
324
325 memcpy(*pprog, insn_buff, tmpl_size);
326 *pprog += tmpl_size;
327 return tmpl_size;
328}
329#endif
330
331#ifdef CONFIG_MODULES
332void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
333 struct module *mod)
334{
335 struct core_text ct = {
336 .base = (unsigned long)mod->mem[MOD_TEXT].base,
337 .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
338 .name = mod->name,
339 };
340
341 if (!thunks_initialized)
342 return;
343
344 mutex_lock(&text_mutex);
345 callthunks_setup(cs, &ct);
346 mutex_unlock(&text_mutex);
347}
348#endif /* CONFIG_MODULES */
349
350#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
351static int callthunks_debug_show(struct seq_file *m, void *p)
352{
353 unsigned long cpu = (unsigned long)m->private;
354
355 seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
356 per_cpu(__x86_call_count, cpu),
357 per_cpu(__x86_ret_count, cpu),
358 per_cpu(__x86_stuffs_count, cpu),
359 per_cpu(__x86_ctxsw_count, cpu));
360 return 0;
361}
362
363static int callthunks_debug_open(struct inode *inode, struct file *file)
364{
365 return single_open(file, callthunks_debug_show, inode->i_private);
366}
367
368static const struct file_operations dfs_ops = {
369 .open = callthunks_debug_open,
370 .read = seq_read,
371 .llseek = seq_lseek,
372 .release = single_release,
373};
374
375static int __init callthunks_debugfs_init(void)
376{
377 struct dentry *dir;
378 unsigned long cpu;
379
380 dir = debugfs_create_dir("callthunks", NULL);
381 for_each_possible_cpu(cpu) {
382 void *arg = (void *)cpu;
383 char name [10];
384
385 sprintf(name, "cpu%lu", cpu);
386 debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
387 }
388 return 0;
389}
390__initcall(callthunks_debugfs_init);
391#endif
1// SPDX-License-Identifier: GPL-2.0-only
2
3#define pr_fmt(fmt) "callthunks: " fmt
4
5#include <linux/debugfs.h>
6#include <linux/kallsyms.h>
7#include <linux/memory.h>
8#include <linux/moduleloader.h>
9#include <linux/static_call.h>
10
11#include <asm/alternative.h>
12#include <asm/asm-offsets.h>
13#include <asm/cpu.h>
14#include <asm/ftrace.h>
15#include <asm/insn.h>
16#include <asm/kexec.h>
17#include <asm/nospec-branch.h>
18#include <asm/paravirt.h>
19#include <asm/sections.h>
20#include <asm/switch_to.h>
21#include <asm/sync_core.h>
22#include <asm/text-patching.h>
23#include <asm/xen/hypercall.h>
24
25static int __initdata_or_module debug_callthunks;
26
27#define prdbg(fmt, args...) \
28do { \
29 if (debug_callthunks) \
30 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
31} while(0)
32
33static int __init debug_thunks(char *str)
34{
35 debug_callthunks = 1;
36 return 1;
37}
38__setup("debug-callthunks", debug_thunks);
39
40#ifdef CONFIG_CALL_THUNKS_DEBUG
41DEFINE_PER_CPU(u64, __x86_call_count);
42DEFINE_PER_CPU(u64, __x86_ret_count);
43DEFINE_PER_CPU(u64, __x86_stuffs_count);
44DEFINE_PER_CPU(u64, __x86_ctxsw_count);
45EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
46EXPORT_SYMBOL_GPL(__x86_call_count);
47#endif
48
49extern s32 __call_sites[], __call_sites_end[];
50
51struct core_text {
52 unsigned long base;
53 unsigned long end;
54 const char *name;
55};
56
57static bool thunks_initialized __ro_after_init;
58
59static const struct core_text builtin_coretext = {
60 .base = (unsigned long)_text,
61 .end = (unsigned long)_etext,
62 .name = "builtin",
63};
64
65asm (
66 ".pushsection .rodata \n"
67 ".global skl_call_thunk_template \n"
68 "skl_call_thunk_template: \n"
69 __stringify(INCREMENT_CALL_DEPTH)" \n"
70 ".global skl_call_thunk_tail \n"
71 "skl_call_thunk_tail: \n"
72 ".popsection \n"
73);
74
75extern u8 skl_call_thunk_template[];
76extern u8 skl_call_thunk_tail[];
77
78#define SKL_TMPL_SIZE \
79 ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
80
81extern void error_entry(void);
82extern void xen_error_entry(void);
83extern void paranoid_entry(void);
84
85static inline bool within_coretext(const struct core_text *ct, void *addr)
86{
87 unsigned long p = (unsigned long)addr;
88
89 return ct->base <= p && p < ct->end;
90}
91
92static inline bool within_module_coretext(void *addr)
93{
94 bool ret = false;
95
96#ifdef CONFIG_MODULES
97 struct module *mod;
98
99 preempt_disable();
100 mod = __module_address((unsigned long)addr);
101 if (mod && within_module_core((unsigned long)addr, mod))
102 ret = true;
103 preempt_enable();
104#endif
105 return ret;
106}
107
108static bool is_coretext(const struct core_text *ct, void *addr)
109{
110 if (ct && within_coretext(ct, addr))
111 return true;
112 if (within_coretext(&builtin_coretext, addr))
113 return true;
114 return within_module_coretext(addr);
115}
116
117static bool skip_addr(void *dest)
118{
119 if (dest == error_entry)
120 return true;
121 if (dest == paranoid_entry)
122 return true;
123 if (dest == xen_error_entry)
124 return true;
125 /* Does FILL_RSB... */
126 if (dest == __switch_to_asm)
127 return true;
128 /* Accounts directly */
129 if (dest == ret_from_fork)
130 return true;
131#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
132 if (dest == soft_restart_cpu)
133 return true;
134#endif
135#ifdef CONFIG_FUNCTION_TRACER
136 if (dest == __fentry__)
137 return true;
138#endif
139#ifdef CONFIG_KEXEC_CORE
140 if (dest >= (void *)relocate_kernel &&
141 dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
142 return true;
143#endif
144#ifdef CONFIG_XEN
145 if (dest >= (void *)hypercall_page &&
146 dest < (void*)hypercall_page + PAGE_SIZE)
147 return true;
148#endif
149 return false;
150}
151
152static __init_or_module void *call_get_dest(void *addr)
153{
154 struct insn insn;
155 void *dest;
156 int ret;
157
158 ret = insn_decode_kernel(&insn, addr);
159 if (ret)
160 return ERR_PTR(ret);
161
162 /* Patched out call? */
163 if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
164 return NULL;
165
166 dest = addr + insn.length + insn.immediate.value;
167 if (skip_addr(dest))
168 return NULL;
169 return dest;
170}
171
172static const u8 nops[] = {
173 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
175 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177};
178
179static void *patch_dest(void *dest, bool direct)
180{
181 unsigned int tsize = SKL_TMPL_SIZE;
182 u8 *pad = dest - tsize;
183
184 /* Already patched? */
185 if (!bcmp(pad, skl_call_thunk_template, tsize))
186 return pad;
187
188 /* Ensure there are nops */
189 if (bcmp(pad, nops, tsize)) {
190 pr_warn_once("Invalid padding area for %pS\n", dest);
191 return NULL;
192 }
193
194 if (direct)
195 memcpy(pad, skl_call_thunk_template, tsize);
196 else
197 text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
198 return pad;
199}
200
201static __init_or_module void patch_call(void *addr, const struct core_text *ct)
202{
203 void *pad, *dest;
204 u8 bytes[8];
205
206 if (!within_coretext(ct, addr))
207 return;
208
209 dest = call_get_dest(addr);
210 if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
211 return;
212
213 if (!is_coretext(ct, dest))
214 return;
215
216 pad = patch_dest(dest, within_coretext(ct, dest));
217 if (!pad)
218 return;
219
220 prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
221 dest, dest, pad);
222 __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
223 text_poke_early(addr, bytes, CALL_INSN_SIZE);
224}
225
226static __init_or_module void
227patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
228{
229 s32 *s;
230
231 for (s = start; s < end; s++)
232 patch_call((void *)s + *s, ct);
233}
234
235static __init_or_module void
236patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
237 const struct core_text *ct)
238{
239 struct alt_instr *a;
240
241 for (a = start; a < end; a++)
242 patch_call((void *)&a->instr_offset + a->instr_offset, ct);
243}
244
245static __init_or_module void
246callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
247{
248 prdbg("Patching call sites %s\n", ct->name);
249 patch_call_sites(cs->call_start, cs->call_end, ct);
250 patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
251 prdbg("Patching call sites done%s\n", ct->name);
252}
253
254void __init callthunks_patch_builtin_calls(void)
255{
256 struct callthunk_sites cs = {
257 .call_start = __call_sites,
258 .call_end = __call_sites_end,
259 .alt_start = __alt_instructions,
260 .alt_end = __alt_instructions_end
261 };
262
263 if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
264 return;
265
266 pr_info("Setting up call depth tracking\n");
267 mutex_lock(&text_mutex);
268 callthunks_setup(&cs, &builtin_coretext);
269 thunks_initialized = true;
270 mutex_unlock(&text_mutex);
271}
272
273void *callthunks_translate_call_dest(void *dest)
274{
275 void *target;
276
277 lockdep_assert_held(&text_mutex);
278
279 if (!thunks_initialized || skip_addr(dest))
280 return dest;
281
282 if (!is_coretext(NULL, dest))
283 return dest;
284
285 target = patch_dest(dest, false);
286 return target ? : dest;
287}
288
289#ifdef CONFIG_BPF_JIT
290static bool is_callthunk(void *addr)
291{
292 unsigned int tmpl_size = SKL_TMPL_SIZE;
293 void *tmpl = skl_call_thunk_template;
294 unsigned long dest;
295
296 dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
297 if (!thunks_initialized || skip_addr((void *)dest))
298 return false;
299
300 return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
301}
302
303int x86_call_depth_emit_accounting(u8 **pprog, void *func)
304{
305 unsigned int tmpl_size = SKL_TMPL_SIZE;
306 void *tmpl = skl_call_thunk_template;
307
308 if (!thunks_initialized)
309 return 0;
310
311 /* Is function call target a thunk? */
312 if (func && is_callthunk(func))
313 return 0;
314
315 memcpy(*pprog, tmpl, tmpl_size);
316 *pprog += tmpl_size;
317 return tmpl_size;
318}
319#endif
320
321#ifdef CONFIG_MODULES
322void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
323 struct module *mod)
324{
325 struct core_text ct = {
326 .base = (unsigned long)mod->mem[MOD_TEXT].base,
327 .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
328 .name = mod->name,
329 };
330
331 if (!thunks_initialized)
332 return;
333
334 mutex_lock(&text_mutex);
335 callthunks_setup(cs, &ct);
336 mutex_unlock(&text_mutex);
337}
338#endif /* CONFIG_MODULES */
339
340#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
341static int callthunks_debug_show(struct seq_file *m, void *p)
342{
343 unsigned long cpu = (unsigned long)m->private;
344
345 seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
346 per_cpu(__x86_call_count, cpu),
347 per_cpu(__x86_ret_count, cpu),
348 per_cpu(__x86_stuffs_count, cpu),
349 per_cpu(__x86_ctxsw_count, cpu));
350 return 0;
351}
352
353static int callthunks_debug_open(struct inode *inode, struct file *file)
354{
355 return single_open(file, callthunks_debug_show, inode->i_private);
356}
357
358static const struct file_operations dfs_ops = {
359 .open = callthunks_debug_open,
360 .read = seq_read,
361 .llseek = seq_lseek,
362 .release = single_release,
363};
364
365static int __init callthunks_debugfs_init(void)
366{
367 struct dentry *dir;
368 unsigned long cpu;
369
370 dir = debugfs_create_dir("callthunks", NULL);
371 for_each_possible_cpu(cpu) {
372 void *arg = (void *)cpu;
373 char name [10];
374
375 sprintf(name, "cpu%lu", cpu);
376 debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
377 }
378 return 0;
379}
380__initcall(callthunks_debugfs_init);
381#endif