Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/stacktrace.c
4 *
5 * Stack trace management functions
6 *
7 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#include <linux/sched/task_stack.h>
10#include <linux/sched/debug.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/kallsyms.h>
15#include <linux/stacktrace.h>
16
17/**
18 * stack_trace_print - Print the entries in the stack trace
19 * @entries: Pointer to storage array
20 * @nr_entries: Number of entries in the storage array
21 * @spaces: Number of leading spaces to print
22 */
23void stack_trace_print(const unsigned long *entries, unsigned int nr_entries,
24 int spaces)
25{
26 unsigned int i;
27
28 if (WARN_ON(!entries))
29 return;
30
31 for (i = 0; i < nr_entries; i++)
32 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
33}
34EXPORT_SYMBOL_GPL(stack_trace_print);
35
36/**
37 * stack_trace_snprint - Print the entries in the stack trace into a buffer
38 * @buf: Pointer to the print buffer
39 * @size: Size of the print buffer
40 * @entries: Pointer to storage array
41 * @nr_entries: Number of entries in the storage array
42 * @spaces: Number of leading spaces to print
43 *
44 * Return: Number of bytes printed.
45 */
46int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
47 unsigned int nr_entries, int spaces)
48{
49 unsigned int generated, i, total = 0;
50
51 if (WARN_ON(!entries))
52 return 0;
53
54 for (i = 0; i < nr_entries && size; i++) {
55 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
56 (void *)entries[i]);
57
58 total += generated;
59 if (generated >= size) {
60 buf += size;
61 size = 0;
62 } else {
63 buf += generated;
64 size -= generated;
65 }
66 }
67
68 return total;
69}
70EXPORT_SYMBOL_GPL(stack_trace_snprint);
71
72#ifdef CONFIG_ARCH_STACKWALK
73
74struct stacktrace_cookie {
75 unsigned long *store;
76 unsigned int size;
77 unsigned int skip;
78 unsigned int len;
79};
80
81static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
82 bool reliable)
83{
84 struct stacktrace_cookie *c = cookie;
85
86 if (c->len >= c->size)
87 return false;
88
89 if (c->skip > 0) {
90 c->skip--;
91 return true;
92 }
93 c->store[c->len++] = addr;
94 return c->len < c->size;
95}
96
97static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
98 bool reliable)
99{
100 if (in_sched_functions(addr))
101 return true;
102 return stack_trace_consume_entry(cookie, addr, reliable);
103}
104
105/**
106 * stack_trace_save - Save a stack trace into a storage array
107 * @store: Pointer to storage array
108 * @size: Size of the storage array
109 * @skipnr: Number of entries to skip at the start of the stack trace
110 *
111 * Return: Number of trace entries stored.
112 */
113unsigned int stack_trace_save(unsigned long *store, unsigned int size,
114 unsigned int skipnr)
115{
116 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
117 struct stacktrace_cookie c = {
118 .store = store,
119 .size = size,
120 .skip = skipnr + 1,
121 };
122
123 arch_stack_walk(consume_entry, &c, current, NULL);
124 return c.len;
125}
126EXPORT_SYMBOL_GPL(stack_trace_save);
127
128/**
129 * stack_trace_save_tsk - Save a task stack trace into a storage array
130 * @task: The task to examine
131 * @store: Pointer to storage array
132 * @size: Size of the storage array
133 * @skipnr: Number of entries to skip at the start of the stack trace
134 *
135 * Return: Number of trace entries stored.
136 */
137unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
138 unsigned int size, unsigned int skipnr)
139{
140 stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
141 struct stacktrace_cookie c = {
142 .store = store,
143 .size = size,
144 /* skip this function if they are tracing us */
145 .skip = skipnr + !!(current == tsk),
146 };
147
148 if (!try_get_task_stack(tsk))
149 return 0;
150
151 arch_stack_walk(consume_entry, &c, tsk, NULL);
152 put_task_stack(tsk);
153 return c.len;
154}
155
156/**
157 * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
158 * @regs: Pointer to pt_regs to examine
159 * @store: Pointer to storage array
160 * @size: Size of the storage array
161 * @skipnr: Number of entries to skip at the start of the stack trace
162 *
163 * Return: Number of trace entries stored.
164 */
165unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
166 unsigned int size, unsigned int skipnr)
167{
168 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
169 struct stacktrace_cookie c = {
170 .store = store,
171 .size = size,
172 .skip = skipnr,
173 };
174
175 arch_stack_walk(consume_entry, &c, current, regs);
176 return c.len;
177}
178
179#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
180/**
181 * stack_trace_save_tsk_reliable - Save task stack with verification
182 * @tsk: Pointer to the task to examine
183 * @store: Pointer to storage array
184 * @size: Size of the storage array
185 *
186 * Return: An error if it detects any unreliable features of the
187 * stack. Otherwise it guarantees that the stack trace is
188 * reliable and returns the number of entries stored.
189 *
190 * If the task is not 'current', the caller *must* ensure the task is inactive.
191 */
192int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
193 unsigned int size)
194{
195 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
196 struct stacktrace_cookie c = {
197 .store = store,
198 .size = size,
199 };
200 int ret;
201
202 /*
203 * If the task doesn't have a stack (e.g., a zombie), the stack is
204 * "reliably" empty.
205 */
206 if (!try_get_task_stack(tsk))
207 return 0;
208
209 ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
210 put_task_stack(tsk);
211 return ret ? ret : c.len;
212}
213#endif
214
215#ifdef CONFIG_USER_STACKTRACE_SUPPORT
216/**
217 * stack_trace_save_user - Save a user space stack trace into a storage array
218 * @store: Pointer to storage array
219 * @size: Size of the storage array
220 *
221 * Return: Number of trace entries stored.
222 */
223unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
224{
225 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
226 struct stacktrace_cookie c = {
227 .store = store,
228 .size = size,
229 };
230 mm_segment_t fs;
231
232 /* Trace user stack if not a kernel thread */
233 if (current->flags & PF_KTHREAD)
234 return 0;
235
236 fs = get_fs();
237 set_fs(USER_DS);
238 arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
239 set_fs(fs);
240
241 return c.len;
242}
243#endif
244
245#else /* CONFIG_ARCH_STACKWALK */
246
247/*
248 * Architectures that do not implement save_stack_trace_*()
249 * get these weak aliases and once-per-bootup warnings
250 * (whenever this facility is utilized - for example by procfs):
251 */
252__weak void
253save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
254{
255 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
256}
257
258__weak void
259save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
260{
261 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
262}
263
264/**
265 * stack_trace_save - Save a stack trace into a storage array
266 * @store: Pointer to storage array
267 * @size: Size of the storage array
268 * @skipnr: Number of entries to skip at the start of the stack trace
269 *
270 * Return: Number of trace entries stored
271 */
272unsigned int stack_trace_save(unsigned long *store, unsigned int size,
273 unsigned int skipnr)
274{
275 struct stack_trace trace = {
276 .entries = store,
277 .max_entries = size,
278 .skip = skipnr + 1,
279 };
280
281 save_stack_trace(&trace);
282 return trace.nr_entries;
283}
284EXPORT_SYMBOL_GPL(stack_trace_save);
285
286/**
287 * stack_trace_save_tsk - Save a task stack trace into a storage array
288 * @task: The task to examine
289 * @store: Pointer to storage array
290 * @size: Size of the storage array
291 * @skipnr: Number of entries to skip at the start of the stack trace
292 *
293 * Return: Number of trace entries stored
294 */
295unsigned int stack_trace_save_tsk(struct task_struct *task,
296 unsigned long *store, unsigned int size,
297 unsigned int skipnr)
298{
299 struct stack_trace trace = {
300 .entries = store,
301 .max_entries = size,
302 /* skip this function if they are tracing us */
303 .skip = skipnr + !!(current == task),
304 };
305
306 save_stack_trace_tsk(task, &trace);
307 return trace.nr_entries;
308}
309
310/**
311 * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
312 * @regs: Pointer to pt_regs to examine
313 * @store: Pointer to storage array
314 * @size: Size of the storage array
315 * @skipnr: Number of entries to skip at the start of the stack trace
316 *
317 * Return: Number of trace entries stored
318 */
319unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
320 unsigned int size, unsigned int skipnr)
321{
322 struct stack_trace trace = {
323 .entries = store,
324 .max_entries = size,
325 .skip = skipnr,
326 };
327
328 save_stack_trace_regs(regs, &trace);
329 return trace.nr_entries;
330}
331
332#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
333/**
334 * stack_trace_save_tsk_reliable - Save task stack with verification
335 * @tsk: Pointer to the task to examine
336 * @store: Pointer to storage array
337 * @size: Size of the storage array
338 *
339 * Return: An error if it detects any unreliable features of the
340 * stack. Otherwise it guarantees that the stack trace is
341 * reliable and returns the number of entries stored.
342 *
343 * If the task is not 'current', the caller *must* ensure the task is inactive.
344 */
345int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
346 unsigned int size)
347{
348 struct stack_trace trace = {
349 .entries = store,
350 .max_entries = size,
351 };
352 int ret = save_stack_trace_tsk_reliable(tsk, &trace);
353
354 return ret ? ret : trace.nr_entries;
355}
356#endif
357
358#ifdef CONFIG_USER_STACKTRACE_SUPPORT
359/**
360 * stack_trace_save_user - Save a user space stack trace into a storage array
361 * @store: Pointer to storage array
362 * @size: Size of the storage array
363 *
364 * Return: Number of trace entries stored
365 */
366unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
367{
368 struct stack_trace trace = {
369 .entries = store,
370 .max_entries = size,
371 };
372
373 save_stack_trace_user(&trace);
374 return trace.nr_entries;
375}
376#endif /* CONFIG_USER_STACKTRACE_SUPPORT */
377
378#endif /* !CONFIG_ARCH_STACKWALK */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/stacktrace.c
4 *
5 * Stack trace management functions
6 *
7 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#include <linux/sched/task_stack.h>
10#include <linux/sched/debug.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/kallsyms.h>
15#include <linux/stacktrace.h>
16#include <linux/interrupt.h>
17
18/**
19 * stack_trace_print - Print the entries in the stack trace
20 * @entries: Pointer to storage array
21 * @nr_entries: Number of entries in the storage array
22 * @spaces: Number of leading spaces to print
23 */
24void stack_trace_print(const unsigned long *entries, unsigned int nr_entries,
25 int spaces)
26{
27 unsigned int i;
28
29 if (WARN_ON(!entries))
30 return;
31
32 for (i = 0; i < nr_entries; i++)
33 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
34}
35EXPORT_SYMBOL_GPL(stack_trace_print);
36
37/**
38 * stack_trace_snprint - Print the entries in the stack trace into a buffer
39 * @buf: Pointer to the print buffer
40 * @size: Size of the print buffer
41 * @entries: Pointer to storage array
42 * @nr_entries: Number of entries in the storage array
43 * @spaces: Number of leading spaces to print
44 *
45 * Return: Number of bytes printed.
46 */
47int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
48 unsigned int nr_entries, int spaces)
49{
50 unsigned int generated, i, total = 0;
51
52 if (WARN_ON(!entries))
53 return 0;
54
55 for (i = 0; i < nr_entries && size; i++) {
56 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
57 (void *)entries[i]);
58
59 total += generated;
60 if (generated >= size) {
61 buf += size;
62 size = 0;
63 } else {
64 buf += generated;
65 size -= generated;
66 }
67 }
68
69 return total;
70}
71EXPORT_SYMBOL_GPL(stack_trace_snprint);
72
73#ifdef CONFIG_ARCH_STACKWALK
74
75struct stacktrace_cookie {
76 unsigned long *store;
77 unsigned int size;
78 unsigned int skip;
79 unsigned int len;
80};
81
82static bool stack_trace_consume_entry(void *cookie, unsigned long addr)
83{
84 struct stacktrace_cookie *c = cookie;
85
86 if (c->len >= c->size)
87 return false;
88
89 if (c->skip > 0) {
90 c->skip--;
91 return true;
92 }
93 c->store[c->len++] = addr;
94 return c->len < c->size;
95}
96
97static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr)
98{
99 if (in_sched_functions(addr))
100 return true;
101 return stack_trace_consume_entry(cookie, addr);
102}
103
104/**
105 * stack_trace_save - Save a stack trace into a storage array
106 * @store: Pointer to storage array
107 * @size: Size of the storage array
108 * @skipnr: Number of entries to skip at the start of the stack trace
109 *
110 * Return: Number of trace entries stored.
111 */
112unsigned int stack_trace_save(unsigned long *store, unsigned int size,
113 unsigned int skipnr)
114{
115 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
116 struct stacktrace_cookie c = {
117 .store = store,
118 .size = size,
119 .skip = skipnr + 1,
120 };
121
122 arch_stack_walk(consume_entry, &c, current, NULL);
123 return c.len;
124}
125EXPORT_SYMBOL_GPL(stack_trace_save);
126
127/**
128 * stack_trace_save_tsk - Save a task stack trace into a storage array
129 * @tsk: The task to examine
130 * @store: Pointer to storage array
131 * @size: Size of the storage array
132 * @skipnr: Number of entries to skip at the start of the stack trace
133 *
134 * Return: Number of trace entries stored.
135 */
136unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
137 unsigned int size, unsigned int skipnr)
138{
139 stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
140 struct stacktrace_cookie c = {
141 .store = store,
142 .size = size,
143 /* skip this function if they are tracing us */
144 .skip = skipnr + (current == tsk),
145 };
146
147 if (!try_get_task_stack(tsk))
148 return 0;
149
150 arch_stack_walk(consume_entry, &c, tsk, NULL);
151 put_task_stack(tsk);
152 return c.len;
153}
154EXPORT_SYMBOL_GPL(stack_trace_save_tsk);
155
156/**
157 * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
158 * @regs: Pointer to pt_regs to examine
159 * @store: Pointer to storage array
160 * @size: Size of the storage array
161 * @skipnr: Number of entries to skip at the start of the stack trace
162 *
163 * Return: Number of trace entries stored.
164 */
165unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
166 unsigned int size, unsigned int skipnr)
167{
168 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
169 struct stacktrace_cookie c = {
170 .store = store,
171 .size = size,
172 .skip = skipnr,
173 };
174
175 arch_stack_walk(consume_entry, &c, current, regs);
176 return c.len;
177}
178
179#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
180/**
181 * stack_trace_save_tsk_reliable - Save task stack with verification
182 * @tsk: Pointer to the task to examine
183 * @store: Pointer to storage array
184 * @size: Size of the storage array
185 *
186 * Return: An error if it detects any unreliable features of the
187 * stack. Otherwise it guarantees that the stack trace is
188 * reliable and returns the number of entries stored.
189 *
190 * If the task is not 'current', the caller *must* ensure the task is inactive.
191 */
192int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
193 unsigned int size)
194{
195 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
196 struct stacktrace_cookie c = {
197 .store = store,
198 .size = size,
199 };
200 int ret;
201
202 /*
203 * If the task doesn't have a stack (e.g., a zombie), the stack is
204 * "reliably" empty.
205 */
206 if (!try_get_task_stack(tsk))
207 return 0;
208
209 ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
210 put_task_stack(tsk);
211 return ret ? ret : c.len;
212}
213#endif
214
215#ifdef CONFIG_USER_STACKTRACE_SUPPORT
216/**
217 * stack_trace_save_user - Save a user space stack trace into a storage array
218 * @store: Pointer to storage array
219 * @size: Size of the storage array
220 *
221 * Return: Number of trace entries stored.
222 */
223unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
224{
225 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
226 struct stacktrace_cookie c = {
227 .store = store,
228 .size = size,
229 };
230
231 /* Trace user stack if not a kernel thread */
232 if (current->flags & PF_KTHREAD)
233 return 0;
234
235 arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
236
237 return c.len;
238}
239#endif
240
241#else /* CONFIG_ARCH_STACKWALK */
242
243/*
244 * Architectures that do not implement save_stack_trace_*()
245 * get these weak aliases and once-per-bootup warnings
246 * (whenever this facility is utilized - for example by procfs):
247 */
248__weak void
249save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
250{
251 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
252}
253
254__weak void
255save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
256{
257 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
258}
259
260/**
261 * stack_trace_save - Save a stack trace into a storage array
262 * @store: Pointer to storage array
263 * @size: Size of the storage array
264 * @skipnr: Number of entries to skip at the start of the stack trace
265 *
266 * Return: Number of trace entries stored
267 */
268unsigned int stack_trace_save(unsigned long *store, unsigned int size,
269 unsigned int skipnr)
270{
271 struct stack_trace trace = {
272 .entries = store,
273 .max_entries = size,
274 .skip = skipnr + 1,
275 };
276
277 save_stack_trace(&trace);
278 return trace.nr_entries;
279}
280EXPORT_SYMBOL_GPL(stack_trace_save);
281
282/**
283 * stack_trace_save_tsk - Save a task stack trace into a storage array
284 * @task: The task to examine
285 * @store: Pointer to storage array
286 * @size: Size of the storage array
287 * @skipnr: Number of entries to skip at the start of the stack trace
288 *
289 * Return: Number of trace entries stored
290 */
291unsigned int stack_trace_save_tsk(struct task_struct *task,
292 unsigned long *store, unsigned int size,
293 unsigned int skipnr)
294{
295 struct stack_trace trace = {
296 .entries = store,
297 .max_entries = size,
298 /* skip this function if they are tracing us */
299 .skip = skipnr + (current == task),
300 };
301
302 save_stack_trace_tsk(task, &trace);
303 return trace.nr_entries;
304}
305EXPORT_SYMBOL_GPL(stack_trace_save_tsk);
306
307/**
308 * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
309 * @regs: Pointer to pt_regs to examine
310 * @store: Pointer to storage array
311 * @size: Size of the storage array
312 * @skipnr: Number of entries to skip at the start of the stack trace
313 *
314 * Return: Number of trace entries stored
315 */
316unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
317 unsigned int size, unsigned int skipnr)
318{
319 struct stack_trace trace = {
320 .entries = store,
321 .max_entries = size,
322 .skip = skipnr,
323 };
324
325 save_stack_trace_regs(regs, &trace);
326 return trace.nr_entries;
327}
328
329#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
330/**
331 * stack_trace_save_tsk_reliable - Save task stack with verification
332 * @tsk: Pointer to the task to examine
333 * @store: Pointer to storage array
334 * @size: Size of the storage array
335 *
336 * Return: An error if it detects any unreliable features of the
337 * stack. Otherwise it guarantees that the stack trace is
338 * reliable and returns the number of entries stored.
339 *
340 * If the task is not 'current', the caller *must* ensure the task is inactive.
341 */
342int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
343 unsigned int size)
344{
345 struct stack_trace trace = {
346 .entries = store,
347 .max_entries = size,
348 };
349 int ret = save_stack_trace_tsk_reliable(tsk, &trace);
350
351 return ret ? ret : trace.nr_entries;
352}
353#endif
354
355#ifdef CONFIG_USER_STACKTRACE_SUPPORT
356/**
357 * stack_trace_save_user - Save a user space stack trace into a storage array
358 * @store: Pointer to storage array
359 * @size: Size of the storage array
360 *
361 * Return: Number of trace entries stored
362 */
363unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
364{
365 struct stack_trace trace = {
366 .entries = store,
367 .max_entries = size,
368 };
369
370 save_stack_trace_user(&trace);
371 return trace.nr_entries;
372}
373#endif /* CONFIG_USER_STACKTRACE_SUPPORT */
374
375#endif /* !CONFIG_ARCH_STACKWALK */
376
377static inline bool in_irqentry_text(unsigned long ptr)
378{
379 return (ptr >= (unsigned long)&__irqentry_text_start &&
380 ptr < (unsigned long)&__irqentry_text_end) ||
381 (ptr >= (unsigned long)&__softirqentry_text_start &&
382 ptr < (unsigned long)&__softirqentry_text_end);
383}
384
385/**
386 * filter_irq_stacks - Find first IRQ stack entry in trace
387 * @entries: Pointer to stack trace array
388 * @nr_entries: Number of entries in the storage array
389 *
390 * Return: Number of trace entries until IRQ stack starts.
391 */
392unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries)
393{
394 unsigned int i;
395
396 for (i = 0; i < nr_entries; i++) {
397 if (in_irqentry_text(entries[i])) {
398 /* Include the irqentry function into the stack. */
399 return i + 1;
400 }
401 }
402 return nr_entries;
403}
404EXPORT_SYMBOL_GPL(filter_irq_stacks);