Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/ftrace.h>
10#include <linux/kprobes.h>
11#include <linux/sched.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/stacktrace.h>
15
16#include <asm/irq.h>
17#include <asm/stack_pointer.h>
18#include <asm/stacktrace.h>
19
20/*
21 * AArch64 PCS assigns the frame pointer to x29.
22 *
23 * A simple function prologue looks like this:
24 * sub sp, sp, #0x10
25 * stp x29, x30, [sp]
26 * mov x29, sp
27 *
28 * A simple function epilogue looks like this:
29 * mov sp, x29
30 * ldp x29, x30, [sp]
31 * add sp, sp, #0x10
32 */
33
34/*
35 * Unwind from one frame record (A) to the next frame record (B).
36 *
37 * We terminate early if the location of B indicates a malformed chain of frame
38 * records (e.g. a cycle), determined based on the location and fp value of A
39 * and the location (but not the fp value) of B.
40 */
41int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
42{
43 unsigned long fp = frame->fp;
44 struct stack_info info;
45
46 if (fp & 0xf)
47 return -EINVAL;
48
49 if (!tsk)
50 tsk = current;
51
52 if (!on_accessible_stack(tsk, fp, &info))
53 return -EINVAL;
54
55 if (test_bit(info.type, frame->stacks_done))
56 return -EINVAL;
57
58 /*
59 * As stacks grow downward, any valid record on the same stack must be
60 * at a strictly higher address than the prior record.
61 *
62 * Stacks can nest in several valid orders, e.g.
63 *
64 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
65 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
66 *
67 * ... but the nesting itself is strict. Once we transition from one
68 * stack to another, it's never valid to unwind back to that first
69 * stack.
70 */
71 if (info.type == frame->prev_type) {
72 if (fp <= frame->prev_fp)
73 return -EINVAL;
74 } else {
75 set_bit(frame->prev_type, frame->stacks_done);
76 }
77
78 /*
79 * Record this frame record's values and location. The prev_fp and
80 * prev_type are only meaningful to the next unwind_frame() invocation.
81 */
82 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
83 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
84 frame->prev_fp = fp;
85 frame->prev_type = info.type;
86
87#ifdef CONFIG_FUNCTION_GRAPH_TRACER
88 if (tsk->ret_stack &&
89 (frame->pc == (unsigned long)return_to_handler)) {
90 struct ftrace_ret_stack *ret_stack;
91 /*
92 * This is a case where function graph tracer has
93 * modified a return address (LR) in a stack frame
94 * to hook a function return.
95 * So replace it to an original value.
96 */
97 ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
98 if (WARN_ON_ONCE(!ret_stack))
99 return -EINVAL;
100 frame->pc = ret_stack->ret;
101 }
102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
103
104 /*
105 * Frames created upon entry from EL0 have NULL FP and PC values, so
106 * don't bother reporting these. Frames created by __noreturn functions
107 * might have a valid FP even if PC is bogus, so only terminate where
108 * both are NULL.
109 */
110 if (!frame->fp && !frame->pc)
111 return -EINVAL;
112
113 return 0;
114}
115NOKPROBE_SYMBOL(unwind_frame);
116
117void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
118 int (*fn)(struct stackframe *, void *), void *data)
119{
120 while (1) {
121 int ret;
122
123 if (fn(frame, data))
124 break;
125 ret = unwind_frame(tsk, frame);
126 if (ret < 0)
127 break;
128 }
129}
130NOKPROBE_SYMBOL(walk_stackframe);
131
132#ifdef CONFIG_STACKTRACE
133struct stack_trace_data {
134 struct stack_trace *trace;
135 unsigned int no_sched_functions;
136 unsigned int skip;
137};
138
139static int save_trace(struct stackframe *frame, void *d)
140{
141 struct stack_trace_data *data = d;
142 struct stack_trace *trace = data->trace;
143 unsigned long addr = frame->pc;
144
145 if (data->no_sched_functions && in_sched_functions(addr))
146 return 0;
147 if (data->skip) {
148 data->skip--;
149 return 0;
150 }
151
152 trace->entries[trace->nr_entries++] = addr;
153
154 return trace->nr_entries >= trace->max_entries;
155}
156
157void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
158{
159 struct stack_trace_data data;
160 struct stackframe frame;
161
162 data.trace = trace;
163 data.skip = trace->skip;
164 data.no_sched_functions = 0;
165
166 start_backtrace(&frame, regs->regs[29], regs->pc);
167 walk_stackframe(current, &frame, save_trace, &data);
168}
169EXPORT_SYMBOL_GPL(save_stack_trace_regs);
170
171static noinline void __save_stack_trace(struct task_struct *tsk,
172 struct stack_trace *trace, unsigned int nosched)
173{
174 struct stack_trace_data data;
175 struct stackframe frame;
176
177 if (!try_get_task_stack(tsk))
178 return;
179
180 data.trace = trace;
181 data.skip = trace->skip;
182 data.no_sched_functions = nosched;
183
184 if (tsk != current) {
185 start_backtrace(&frame, thread_saved_fp(tsk),
186 thread_saved_pc(tsk));
187 } else {
188 /* We don't want this function nor the caller */
189 data.skip += 2;
190 start_backtrace(&frame,
191 (unsigned long)__builtin_frame_address(0),
192 (unsigned long)__save_stack_trace);
193 }
194
195 walk_stackframe(tsk, &frame, save_trace, &data);
196
197 put_task_stack(tsk);
198}
199EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
200
201void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
202{
203 __save_stack_trace(tsk, trace, 1);
204}
205
206void save_stack_trace(struct stack_trace *trace)
207{
208 __save_stack_trace(current, trace, 0);
209}
210
211EXPORT_SYMBOL_GPL(save_stack_trace);
212#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/filter.h>
11#include <linux/ftrace.h>
12#include <linux/kprobes.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task_stack.h>
16#include <linux/stacktrace.h>
17
18#include <asm/efi.h>
19#include <asm/irq.h>
20#include <asm/stack_pointer.h>
21#include <asm/stacktrace.h>
22
23enum kunwind_source {
24 KUNWIND_SOURCE_UNKNOWN,
25 KUNWIND_SOURCE_FRAME,
26 KUNWIND_SOURCE_CALLER,
27 KUNWIND_SOURCE_TASK,
28 KUNWIND_SOURCE_REGS_PC,
29};
30
31union unwind_flags {
32 unsigned long all;
33 struct {
34 unsigned long fgraph : 1,
35 kretprobe : 1;
36 };
37};
38
39/*
40 * Kernel unwind state
41 *
42 * @common: Common unwind state.
43 * @task: The task being unwound.
44 * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
45 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
46 * associated with the most recently encountered replacement lr
47 * value.
48 */
49struct kunwind_state {
50 struct unwind_state common;
51 struct task_struct *task;
52 int graph_idx;
53#ifdef CONFIG_KRETPROBES
54 struct llist_node *kr_cur;
55#endif
56 enum kunwind_source source;
57 union unwind_flags flags;
58 struct pt_regs *regs;
59};
60
61static __always_inline void
62kunwind_init(struct kunwind_state *state,
63 struct task_struct *task)
64{
65 unwind_init_common(&state->common);
66 state->task = task;
67 state->source = KUNWIND_SOURCE_UNKNOWN;
68 state->flags.all = 0;
69 state->regs = NULL;
70}
71
72/*
73 * Start an unwind from a pt_regs.
74 *
75 * The unwind will begin at the PC within the regs.
76 *
77 * The regs must be on a stack currently owned by the calling task.
78 */
79static __always_inline void
80kunwind_init_from_regs(struct kunwind_state *state,
81 struct pt_regs *regs)
82{
83 kunwind_init(state, current);
84
85 state->regs = regs;
86 state->common.fp = regs->regs[29];
87 state->common.pc = regs->pc;
88 state->source = KUNWIND_SOURCE_REGS_PC;
89}
90
91/*
92 * Start an unwind from a caller.
93 *
94 * The unwind will begin at the caller of whichever function this is inlined
95 * into.
96 *
97 * The function which invokes this must be noinline.
98 */
99static __always_inline void
100kunwind_init_from_caller(struct kunwind_state *state)
101{
102 kunwind_init(state, current);
103
104 state->common.fp = (unsigned long)__builtin_frame_address(1);
105 state->common.pc = (unsigned long)__builtin_return_address(0);
106 state->source = KUNWIND_SOURCE_CALLER;
107}
108
109/*
110 * Start an unwind from a blocked task.
111 *
112 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
113 * cpu_switch_to()).
114 *
115 * The caller should ensure the task is blocked in cpu_switch_to() for the
116 * duration of the unwind, or the unwind will be bogus. It is never valid to
117 * call this for the current task.
118 */
119static __always_inline void
120kunwind_init_from_task(struct kunwind_state *state,
121 struct task_struct *task)
122{
123 kunwind_init(state, task);
124
125 state->common.fp = thread_saved_fp(task);
126 state->common.pc = thread_saved_pc(task);
127 state->source = KUNWIND_SOURCE_TASK;
128}
129
130static __always_inline int
131kunwind_recover_return_address(struct kunwind_state *state)
132{
133#ifdef CONFIG_FUNCTION_GRAPH_TRACER
134 if (state->task->ret_stack &&
135 (state->common.pc == (unsigned long)return_to_handler)) {
136 unsigned long orig_pc;
137 orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
138 state->common.pc,
139 (void *)state->common.fp);
140 if (state->common.pc == orig_pc) {
141 WARN_ON_ONCE(state->task == current);
142 return -EINVAL;
143 }
144 state->common.pc = orig_pc;
145 state->flags.fgraph = 1;
146 }
147#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
148
149#ifdef CONFIG_KRETPROBES
150 if (is_kretprobe_trampoline(state->common.pc)) {
151 unsigned long orig_pc;
152 orig_pc = kretprobe_find_ret_addr(state->task,
153 (void *)state->common.fp,
154 &state->kr_cur);
155 state->common.pc = orig_pc;
156 state->flags.kretprobe = 1;
157 }
158#endif /* CONFIG_KRETPROBES */
159
160 return 0;
161}
162
163static __always_inline
164int kunwind_next_regs_pc(struct kunwind_state *state)
165{
166 struct stack_info *info;
167 unsigned long fp = state->common.fp;
168 struct pt_regs *regs;
169
170 regs = container_of((u64 *)fp, struct pt_regs, stackframe.record.fp);
171
172 info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs));
173 if (!info)
174 return -EINVAL;
175
176 unwind_consume_stack(&state->common, info, (unsigned long)regs,
177 sizeof(*regs));
178
179 state->regs = regs;
180 state->common.pc = regs->pc;
181 state->common.fp = regs->regs[29];
182 state->regs = NULL;
183 state->source = KUNWIND_SOURCE_REGS_PC;
184 return 0;
185}
186
187static __always_inline int
188kunwind_next_frame_record_meta(struct kunwind_state *state)
189{
190 struct task_struct *tsk = state->task;
191 unsigned long fp = state->common.fp;
192 struct frame_record_meta *meta;
193 struct stack_info *info;
194
195 info = unwind_find_stack(&state->common, fp, sizeof(*meta));
196 if (!info)
197 return -EINVAL;
198
199 meta = (struct frame_record_meta *)fp;
200 switch (READ_ONCE(meta->type)) {
201 case FRAME_META_TYPE_FINAL:
202 if (meta == &task_pt_regs(tsk)->stackframe)
203 return -ENOENT;
204 WARN_ON_ONCE(tsk == current);
205 return -EINVAL;
206 case FRAME_META_TYPE_PT_REGS:
207 return kunwind_next_regs_pc(state);
208 default:
209 WARN_ON_ONCE(tsk == current);
210 return -EINVAL;
211 }
212}
213
214static __always_inline int
215kunwind_next_frame_record(struct kunwind_state *state)
216{
217 unsigned long fp = state->common.fp;
218 struct frame_record *record;
219 struct stack_info *info;
220 unsigned long new_fp, new_pc;
221
222 if (fp & 0x7)
223 return -EINVAL;
224
225 info = unwind_find_stack(&state->common, fp, sizeof(*record));
226 if (!info)
227 return -EINVAL;
228
229 record = (struct frame_record *)fp;
230 new_fp = READ_ONCE(record->fp);
231 new_pc = READ_ONCE(record->lr);
232
233 if (!new_fp && !new_pc)
234 return kunwind_next_frame_record_meta(state);
235
236 unwind_consume_stack(&state->common, info, fp, sizeof(*record));
237
238 state->common.fp = new_fp;
239 state->common.pc = new_pc;
240 state->source = KUNWIND_SOURCE_FRAME;
241
242 return 0;
243}
244
245/*
246 * Unwind from one frame record (A) to the next frame record (B).
247 *
248 * We terminate early if the location of B indicates a malformed chain of frame
249 * records (e.g. a cycle), determined based on the location and fp value of A
250 * and the location (but not the fp value) of B.
251 */
252static __always_inline int
253kunwind_next(struct kunwind_state *state)
254{
255 int err;
256
257 state->flags.all = 0;
258
259 switch (state->source) {
260 case KUNWIND_SOURCE_FRAME:
261 case KUNWIND_SOURCE_CALLER:
262 case KUNWIND_SOURCE_TASK:
263 case KUNWIND_SOURCE_REGS_PC:
264 err = kunwind_next_frame_record(state);
265 break;
266 default:
267 err = -EINVAL;
268 }
269
270 if (err)
271 return err;
272
273 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
274
275 return kunwind_recover_return_address(state);
276}
277
278typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
279
280static __always_inline void
281do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
282 void *cookie)
283{
284 if (kunwind_recover_return_address(state))
285 return;
286
287 while (1) {
288 int ret;
289
290 if (!consume_state(state, cookie))
291 break;
292 ret = kunwind_next(state);
293 if (ret < 0)
294 break;
295 }
296}
297
298/*
299 * Per-cpu stacks are only accessible when unwinding the current task in a
300 * non-preemptible context.
301 */
302#define STACKINFO_CPU(name) \
303 ({ \
304 ((task == current) && !preemptible()) \
305 ? stackinfo_get_##name() \
306 : stackinfo_get_unknown(); \
307 })
308
309/*
310 * SDEI stacks are only accessible when unwinding the current task in an NMI
311 * context.
312 */
313#define STACKINFO_SDEI(name) \
314 ({ \
315 ((task == current) && in_nmi()) \
316 ? stackinfo_get_sdei_##name() \
317 : stackinfo_get_unknown(); \
318 })
319
320#define STACKINFO_EFI \
321 ({ \
322 ((task == current) && current_in_efi()) \
323 ? stackinfo_get_efi() \
324 : stackinfo_get_unknown(); \
325 })
326
327static __always_inline void
328kunwind_stack_walk(kunwind_consume_fn consume_state,
329 void *cookie, struct task_struct *task,
330 struct pt_regs *regs)
331{
332 struct stack_info stacks[] = {
333 stackinfo_get_task(task),
334 STACKINFO_CPU(irq),
335#if defined(CONFIG_VMAP_STACK)
336 STACKINFO_CPU(overflow),
337#endif
338#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
339 STACKINFO_SDEI(normal),
340 STACKINFO_SDEI(critical),
341#endif
342#ifdef CONFIG_EFI
343 STACKINFO_EFI,
344#endif
345 };
346 struct kunwind_state state = {
347 .common = {
348 .stacks = stacks,
349 .nr_stacks = ARRAY_SIZE(stacks),
350 },
351 };
352
353 if (regs) {
354 if (task != current)
355 return;
356 kunwind_init_from_regs(&state, regs);
357 } else if (task == current) {
358 kunwind_init_from_caller(&state);
359 } else {
360 kunwind_init_from_task(&state, task);
361 }
362
363 do_kunwind(&state, consume_state, cookie);
364}
365
366struct kunwind_consume_entry_data {
367 stack_trace_consume_fn consume_entry;
368 void *cookie;
369};
370
371static __always_inline bool
372arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
373{
374 struct kunwind_consume_entry_data *data = cookie;
375 return data->consume_entry(data->cookie, state->common.pc);
376}
377
378noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
379 void *cookie, struct task_struct *task,
380 struct pt_regs *regs)
381{
382 struct kunwind_consume_entry_data data = {
383 .consume_entry = consume_entry,
384 .cookie = cookie,
385 };
386
387 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
388}
389
390struct bpf_unwind_consume_entry_data {
391 bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
392 void *cookie;
393};
394
395static bool
396arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
397{
398 struct bpf_unwind_consume_entry_data *data = cookie;
399
400 return data->consume_entry(data->cookie, state->common.pc, 0,
401 state->common.fp);
402}
403
404noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
405 u64 fp), void *cookie)
406{
407 struct bpf_unwind_consume_entry_data data = {
408 .consume_entry = consume_entry,
409 .cookie = cookie,
410 };
411
412 kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
413}
414
415static const char *state_source_string(const struct kunwind_state *state)
416{
417 switch (state->source) {
418 case KUNWIND_SOURCE_FRAME: return NULL;
419 case KUNWIND_SOURCE_CALLER: return "C";
420 case KUNWIND_SOURCE_TASK: return "T";
421 case KUNWIND_SOURCE_REGS_PC: return "P";
422 default: return "U";
423 }
424}
425
426static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
427{
428 const char *source = state_source_string(state);
429 union unwind_flags flags = state->flags;
430 bool has_info = source || flags.all;
431 char *loglvl = arg;
432
433 printk("%s %pSb%s%s%s%s%s\n", loglvl,
434 (void *)state->common.pc,
435 has_info ? " (" : "",
436 source ? source : "",
437 flags.fgraph ? "F" : "",
438 flags.kretprobe ? "K" : "",
439 has_info ? ")" : "");
440
441 return true;
442}
443
444void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
445 const char *loglvl)
446{
447 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
448
449 if (regs && user_mode(regs))
450 return;
451
452 if (!tsk)
453 tsk = current;
454
455 if (!try_get_task_stack(tsk))
456 return;
457
458 printk("%sCall trace:\n", loglvl);
459 kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
460
461 put_task_stack(tsk);
462}
463
464void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
465{
466 dump_backtrace(NULL, tsk, loglvl);
467 barrier();
468}
469
470/*
471 * The struct defined for userspace stack frame in AARCH64 mode.
472 */
473struct frame_tail {
474 struct frame_tail __user *fp;
475 unsigned long lr;
476} __attribute__((packed));
477
478/*
479 * Get the return address for a single stackframe and return a pointer to the
480 * next frame tail.
481 */
482static struct frame_tail __user *
483unwind_user_frame(struct frame_tail __user *tail, void *cookie,
484 stack_trace_consume_fn consume_entry)
485{
486 struct frame_tail buftail;
487 unsigned long err;
488 unsigned long lr;
489
490 /* Also check accessibility of one struct frame_tail beyond */
491 if (!access_ok(tail, sizeof(buftail)))
492 return NULL;
493
494 pagefault_disable();
495 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
496 pagefault_enable();
497
498 if (err)
499 return NULL;
500
501 lr = ptrauth_strip_user_insn_pac(buftail.lr);
502
503 if (!consume_entry(cookie, lr))
504 return NULL;
505
506 /*
507 * Frame pointers should strictly progress back up the stack
508 * (towards higher addresses).
509 */
510 if (tail >= buftail.fp)
511 return NULL;
512
513 return buftail.fp;
514}
515
516#ifdef CONFIG_COMPAT
517/*
518 * The registers we're interested in are at the end of the variable
519 * length saved register structure. The fp points at the end of this
520 * structure so the address of this struct is:
521 * (struct compat_frame_tail *)(xxx->fp)-1
522 *
523 * This code has been adapted from the ARM OProfile support.
524 */
525struct compat_frame_tail {
526 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
527 u32 sp;
528 u32 lr;
529} __attribute__((packed));
530
531static struct compat_frame_tail __user *
532unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
533 stack_trace_consume_fn consume_entry)
534{
535 struct compat_frame_tail buftail;
536 unsigned long err;
537
538 /* Also check accessibility of one struct frame_tail beyond */
539 if (!access_ok(tail, sizeof(buftail)))
540 return NULL;
541
542 pagefault_disable();
543 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
544 pagefault_enable();
545
546 if (err)
547 return NULL;
548
549 if (!consume_entry(cookie, buftail.lr))
550 return NULL;
551
552 /*
553 * Frame pointers should strictly progress back up the stack
554 * (towards higher addresses).
555 */
556 if (tail + 1 >= (struct compat_frame_tail __user *)
557 compat_ptr(buftail.fp))
558 return NULL;
559
560 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
561}
562#endif /* CONFIG_COMPAT */
563
564
565void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
566 const struct pt_regs *regs)
567{
568 if (!consume_entry(cookie, regs->pc))
569 return;
570
571 if (!compat_user_mode(regs)) {
572 /* AARCH64 mode */
573 struct frame_tail __user *tail;
574
575 tail = (struct frame_tail __user *)regs->regs[29];
576 while (tail && !((unsigned long)tail & 0x7))
577 tail = unwind_user_frame(tail, cookie, consume_entry);
578 } else {
579#ifdef CONFIG_COMPAT
580 /* AARCH32 compat mode */
581 struct compat_frame_tail __user *tail;
582
583 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
584 while (tail && !((unsigned long)tail & 0x3))
585 tail = unwind_compat_user_frame(tail, cookie, consume_entry);
586#endif
587 }
588}