Loading...
1#include <linux/module.h>
2#include <linux/sort.h>
3#include <asm/ptrace.h>
4#include <asm/stacktrace.h>
5#include <asm/unwind.h>
6#include <asm/orc_types.h>
7#include <asm/orc_lookup.h>
8
9#define orc_warn(fmt, ...) \
10 printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
11
12extern int __start_orc_unwind_ip[];
13extern int __stop_orc_unwind_ip[];
14extern struct orc_entry __start_orc_unwind[];
15extern struct orc_entry __stop_orc_unwind[];
16
17static DEFINE_MUTEX(sort_mutex);
18int *cur_orc_ip_table = __start_orc_unwind_ip;
19struct orc_entry *cur_orc_table = __start_orc_unwind;
20
21unsigned int lookup_num_blocks;
22bool orc_init;
23
24static inline unsigned long orc_ip(const int *ip)
25{
26 return (unsigned long)ip + *ip;
27}
28
29static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
30 unsigned int num_entries, unsigned long ip)
31{
32 int *first = ip_table;
33 int *last = ip_table + num_entries - 1;
34 int *mid = first, *found = first;
35
36 if (!num_entries)
37 return NULL;
38
39 /*
40 * Do a binary range search to find the rightmost duplicate of a given
41 * starting address. Some entries are section terminators which are
42 * "weak" entries for ensuring there are no gaps. They should be
43 * ignored when they conflict with a real entry.
44 */
45 while (first <= last) {
46 mid = first + ((last - first) / 2);
47
48 if (orc_ip(mid) <= ip) {
49 found = mid;
50 first = mid + 1;
51 } else
52 last = mid - 1;
53 }
54
55 return u_table + (found - ip_table);
56}
57
58#ifdef CONFIG_MODULES
59static struct orc_entry *orc_module_find(unsigned long ip)
60{
61 struct module *mod;
62
63 mod = __module_address(ip);
64 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
65 return NULL;
66 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
67 mod->arch.num_orcs, ip);
68}
69#else
70static struct orc_entry *orc_module_find(unsigned long ip)
71{
72 return NULL;
73}
74#endif
75
76#ifdef CONFIG_DYNAMIC_FTRACE
77static struct orc_entry *orc_find(unsigned long ip);
78
79/*
80 * Ftrace dynamic trampolines do not have orc entries of their own.
81 * But they are copies of the ftrace entries that are static and
82 * defined in ftrace_*.S, which do have orc entries.
83 *
84 * If the undwinder comes across a ftrace trampoline, then find the
85 * ftrace function that was used to create it, and use that ftrace
86 * function's orc entrie, as the placement of the return code in
87 * the stack will be identical.
88 */
89static struct orc_entry *orc_ftrace_find(unsigned long ip)
90{
91 struct ftrace_ops *ops;
92 unsigned long caller;
93
94 ops = ftrace_ops_trampoline(ip);
95 if (!ops)
96 return NULL;
97
98 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
99 caller = (unsigned long)ftrace_regs_call;
100 else
101 caller = (unsigned long)ftrace_call;
102
103 /* Prevent unlikely recursion */
104 if (ip == caller)
105 return NULL;
106
107 return orc_find(caller);
108}
109#else
110static struct orc_entry *orc_ftrace_find(unsigned long ip)
111{
112 return NULL;
113}
114#endif
115
116static struct orc_entry *orc_find(unsigned long ip)
117{
118 static struct orc_entry *orc;
119
120 if (!orc_init)
121 return NULL;
122
123 /* For non-init vmlinux addresses, use the fast lookup table: */
124 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
125 unsigned int idx, start, stop;
126
127 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
128
129 if (unlikely((idx >= lookup_num_blocks-1))) {
130 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
131 idx, lookup_num_blocks, (void *)ip);
132 return NULL;
133 }
134
135 start = orc_lookup[idx];
136 stop = orc_lookup[idx + 1] + 1;
137
138 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
139 (__start_orc_unwind + stop > __stop_orc_unwind))) {
140 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
141 idx, lookup_num_blocks, start, stop, (void *)ip);
142 return NULL;
143 }
144
145 return __orc_find(__start_orc_unwind_ip + start,
146 __start_orc_unwind + start, stop - start, ip);
147 }
148
149 /* vmlinux .init slow lookup: */
150 if (init_kernel_text(ip))
151 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
152 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
153
154 /* Module lookup: */
155 orc = orc_module_find(ip);
156 if (orc)
157 return orc;
158
159 return orc_ftrace_find(ip);
160}
161
162static void orc_sort_swap(void *_a, void *_b, int size)
163{
164 struct orc_entry *orc_a, *orc_b;
165 struct orc_entry orc_tmp;
166 int *a = _a, *b = _b, tmp;
167 int delta = _b - _a;
168
169 /* Swap the .orc_unwind_ip entries: */
170 tmp = *a;
171 *a = *b + delta;
172 *b = tmp - delta;
173
174 /* Swap the corresponding .orc_unwind entries: */
175 orc_a = cur_orc_table + (a - cur_orc_ip_table);
176 orc_b = cur_orc_table + (b - cur_orc_ip_table);
177 orc_tmp = *orc_a;
178 *orc_a = *orc_b;
179 *orc_b = orc_tmp;
180}
181
182static int orc_sort_cmp(const void *_a, const void *_b)
183{
184 struct orc_entry *orc_a;
185 const int *a = _a, *b = _b;
186 unsigned long a_val = orc_ip(a);
187 unsigned long b_val = orc_ip(b);
188
189 if (a_val > b_val)
190 return 1;
191 if (a_val < b_val)
192 return -1;
193
194 /*
195 * The "weak" section terminator entries need to always be on the left
196 * to ensure the lookup code skips them in favor of real entries.
197 * These terminator entries exist to handle any gaps created by
198 * whitelisted .o files which didn't get objtool generation.
199 */
200 orc_a = cur_orc_table + (a - cur_orc_ip_table);
201 return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
202}
203
204#ifdef CONFIG_MODULES
205void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
206 void *_orc, size_t orc_size)
207{
208 int *orc_ip = _orc_ip;
209 struct orc_entry *orc = _orc;
210 unsigned int num_entries = orc_ip_size / sizeof(int);
211
212 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
213 orc_size % sizeof(*orc) != 0 ||
214 num_entries != orc_size / sizeof(*orc));
215
216 /*
217 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
218 * associate an .orc_unwind_ip table entry with its corresponding
219 * .orc_unwind entry so they can both be swapped.
220 */
221 mutex_lock(&sort_mutex);
222 cur_orc_ip_table = orc_ip;
223 cur_orc_table = orc;
224 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
225 mutex_unlock(&sort_mutex);
226
227 mod->arch.orc_unwind_ip = orc_ip;
228 mod->arch.orc_unwind = orc;
229 mod->arch.num_orcs = num_entries;
230}
231#endif
232
233void __init unwind_init(void)
234{
235 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
236 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
237 size_t num_entries = orc_ip_size / sizeof(int);
238 struct orc_entry *orc;
239 int i;
240
241 if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
242 orc_size % sizeof(struct orc_entry) != 0 ||
243 num_entries != orc_size / sizeof(struct orc_entry)) {
244 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
245 return;
246 }
247
248 /* Sort the .orc_unwind and .orc_unwind_ip tables: */
249 sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
250 orc_sort_swap);
251
252 /* Initialize the fast lookup table: */
253 lookup_num_blocks = orc_lookup_end - orc_lookup;
254 for (i = 0; i < lookup_num_blocks-1; i++) {
255 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
256 num_entries,
257 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
258 if (!orc) {
259 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
260 return;
261 }
262
263 orc_lookup[i] = orc - __start_orc_unwind;
264 }
265
266 /* Initialize the ending block: */
267 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
268 LOOKUP_STOP_IP);
269 if (!orc) {
270 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
271 return;
272 }
273 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
274
275 orc_init = true;
276}
277
278unsigned long unwind_get_return_address(struct unwind_state *state)
279{
280 if (unwind_done(state))
281 return 0;
282
283 return __kernel_text_address(state->ip) ? state->ip : 0;
284}
285EXPORT_SYMBOL_GPL(unwind_get_return_address);
286
287unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
288{
289 if (unwind_done(state))
290 return NULL;
291
292 if (state->regs)
293 return &state->regs->ip;
294
295 if (state->sp)
296 return (unsigned long *)state->sp - 1;
297
298 return NULL;
299}
300
301static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
302 size_t len)
303{
304 struct stack_info *info = &state->stack_info;
305 void *addr = (void *)_addr;
306
307 if (!on_stack(info, addr, len) &&
308 (get_stack_info(addr, state->task, info, &state->stack_mask)))
309 return false;
310
311 return true;
312}
313
314static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
315 unsigned long *val)
316{
317 if (!stack_access_ok(state, addr, sizeof(long)))
318 return false;
319
320 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
321 return true;
322}
323
324static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
325 unsigned long *ip, unsigned long *sp)
326{
327 struct pt_regs *regs = (struct pt_regs *)addr;
328
329 /* x86-32 support will be more complicated due to the ®s->sp hack */
330 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
331
332 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
333 return false;
334
335 *ip = regs->ip;
336 *sp = regs->sp;
337 return true;
338}
339
340static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
341 unsigned long *ip, unsigned long *sp)
342{
343 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
344
345 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
346 return false;
347
348 *ip = regs->ip;
349 *sp = regs->sp;
350 return true;
351}
352
353bool unwind_next_frame(struct unwind_state *state)
354{
355 unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
356 enum stack_type prev_type = state->stack_info.type;
357 struct orc_entry *orc;
358 bool indirect = false;
359
360 if (unwind_done(state))
361 return false;
362
363 /* Don't let modules unload while we're reading their ORC data. */
364 preempt_disable();
365
366 /* Have we reached the end? */
367 if (state->regs && user_mode(state->regs))
368 goto done;
369
370 /*
371 * Find the orc_entry associated with the text address.
372 *
373 * Decrement call return addresses by one so they work for sibling
374 * calls and calls to noreturn functions.
375 */
376 orc = orc_find(state->signal ? state->ip : state->ip - 1);
377 if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
378 goto done;
379 orig_ip = state->ip;
380
381 /* Find the previous frame's stack: */
382 switch (orc->sp_reg) {
383 case ORC_REG_SP:
384 sp = state->sp + orc->sp_offset;
385 break;
386
387 case ORC_REG_BP:
388 sp = state->bp + orc->sp_offset;
389 break;
390
391 case ORC_REG_SP_INDIRECT:
392 sp = state->sp + orc->sp_offset;
393 indirect = true;
394 break;
395
396 case ORC_REG_BP_INDIRECT:
397 sp = state->bp + orc->sp_offset;
398 indirect = true;
399 break;
400
401 case ORC_REG_R10:
402 if (!state->regs || !state->full_regs) {
403 orc_warn("missing regs for base reg R10 at ip %pB\n",
404 (void *)state->ip);
405 goto done;
406 }
407 sp = state->regs->r10;
408 break;
409
410 case ORC_REG_R13:
411 if (!state->regs || !state->full_regs) {
412 orc_warn("missing regs for base reg R13 at ip %pB\n",
413 (void *)state->ip);
414 goto done;
415 }
416 sp = state->regs->r13;
417 break;
418
419 case ORC_REG_DI:
420 if (!state->regs || !state->full_regs) {
421 orc_warn("missing regs for base reg DI at ip %pB\n",
422 (void *)state->ip);
423 goto done;
424 }
425 sp = state->regs->di;
426 break;
427
428 case ORC_REG_DX:
429 if (!state->regs || !state->full_regs) {
430 orc_warn("missing regs for base reg DX at ip %pB\n",
431 (void *)state->ip);
432 goto done;
433 }
434 sp = state->regs->dx;
435 break;
436
437 default:
438 orc_warn("unknown SP base reg %d for ip %pB\n",
439 orc->sp_reg, (void *)state->ip);
440 goto done;
441 }
442
443 if (indirect) {
444 if (!deref_stack_reg(state, sp, &sp))
445 goto done;
446 }
447
448 /* Find IP, SP and possibly regs: */
449 switch (orc->type) {
450 case ORC_TYPE_CALL:
451 ip_p = sp - sizeof(long);
452
453 if (!deref_stack_reg(state, ip_p, &state->ip))
454 goto done;
455
456 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
457 state->ip, (void *)ip_p);
458
459 state->sp = sp;
460 state->regs = NULL;
461 state->signal = false;
462 break;
463
464 case ORC_TYPE_REGS:
465 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
466 orc_warn("can't dereference registers at %p for ip %pB\n",
467 (void *)sp, (void *)orig_ip);
468 goto done;
469 }
470
471 state->regs = (struct pt_regs *)sp;
472 state->full_regs = true;
473 state->signal = true;
474 break;
475
476 case ORC_TYPE_REGS_IRET:
477 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
478 orc_warn("can't dereference iret registers at %p for ip %pB\n",
479 (void *)sp, (void *)orig_ip);
480 goto done;
481 }
482
483 state->regs = (void *)sp - IRET_FRAME_OFFSET;
484 state->full_regs = false;
485 state->signal = true;
486 break;
487
488 default:
489 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
490 orc->type, (void *)orig_ip);
491 break;
492 }
493
494 /* Find BP: */
495 switch (orc->bp_reg) {
496 case ORC_REG_UNDEFINED:
497 if (state->regs && state->full_regs)
498 state->bp = state->regs->bp;
499 break;
500
501 case ORC_REG_PREV_SP:
502 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
503 goto done;
504 break;
505
506 case ORC_REG_BP:
507 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
508 goto done;
509 break;
510
511 default:
512 orc_warn("unknown BP base reg %d for ip %pB\n",
513 orc->bp_reg, (void *)orig_ip);
514 goto done;
515 }
516
517 /* Prevent a recursive loop due to bad ORC data: */
518 if (state->stack_info.type == prev_type &&
519 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
520 state->sp <= prev_sp) {
521 orc_warn("stack going in the wrong direction? ip=%pB\n",
522 (void *)orig_ip);
523 goto done;
524 }
525
526 preempt_enable();
527 return true;
528
529done:
530 preempt_enable();
531 state->stack_info.type = STACK_TYPE_UNKNOWN;
532 return false;
533}
534EXPORT_SYMBOL_GPL(unwind_next_frame);
535
536void __unwind_start(struct unwind_state *state, struct task_struct *task,
537 struct pt_regs *regs, unsigned long *first_frame)
538{
539 memset(state, 0, sizeof(*state));
540 state->task = task;
541
542 /*
543 * Refuse to unwind the stack of a task while it's executing on another
544 * CPU. This check is racy, but that's ok: the unwinder has other
545 * checks to prevent it from going off the rails.
546 */
547 if (task_on_another_cpu(task))
548 goto done;
549
550 if (regs) {
551 if (user_mode(regs))
552 goto done;
553
554 state->ip = regs->ip;
555 state->sp = kernel_stack_pointer(regs);
556 state->bp = regs->bp;
557 state->regs = regs;
558 state->full_regs = true;
559 state->signal = true;
560
561 } else if (task == current) {
562 asm volatile("lea (%%rip), %0\n\t"
563 "mov %%rsp, %1\n\t"
564 "mov %%rbp, %2\n\t"
565 : "=r" (state->ip), "=r" (state->sp),
566 "=r" (state->bp));
567
568 } else {
569 struct inactive_task_frame *frame = (void *)task->thread.sp;
570
571 state->sp = task->thread.sp;
572 state->bp = READ_ONCE_NOCHECK(frame->bp);
573 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
574 }
575
576 if (get_stack_info((unsigned long *)state->sp, state->task,
577 &state->stack_info, &state->stack_mask)) {
578 /*
579 * We weren't on a valid stack. It's possible that
580 * we overflowed a valid stack into a guard page.
581 * See if the next page up is valid so that we can
582 * generate some kind of backtrace if this happens.
583 */
584 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
585 if (get_stack_info(next_page, state->task, &state->stack_info,
586 &state->stack_mask))
587 return;
588 }
589
590 /*
591 * The caller can provide the address of the first frame directly
592 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
593 * to start unwinding at. Skip ahead until we reach it.
594 */
595
596 /* When starting from regs, skip the regs frame: */
597 if (regs) {
598 unwind_next_frame(state);
599 return;
600 }
601
602 /* Otherwise, skip ahead to the user-specified starting frame: */
603 while (!unwind_done(state) &&
604 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
605 state->sp <= (unsigned long)first_frame))
606 unwind_next_frame(state);
607
608 return;
609
610done:
611 state->stack_info.type = STACK_TYPE_UNKNOWN;
612 return;
613}
614EXPORT_SYMBOL_GPL(__unwind_start);
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/objtool.h>
3#include <linux/module.h>
4#include <linux/sort.h>
5#include <asm/ptrace.h>
6#include <asm/stacktrace.h>
7#include <asm/unwind.h>
8#include <asm/orc_types.h>
9#include <asm/orc_lookup.h>
10
11#define orc_warn(fmt, ...) \
12 printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
13
14#define orc_warn_current(args...) \
15({ \
16 if (state->task == current && !state->error) \
17 orc_warn(args); \
18})
19
20extern int __start_orc_unwind_ip[];
21extern int __stop_orc_unwind_ip[];
22extern struct orc_entry __start_orc_unwind[];
23extern struct orc_entry __stop_orc_unwind[];
24
25static bool orc_init __ro_after_init;
26static unsigned int lookup_num_blocks __ro_after_init;
27
28static inline unsigned long orc_ip(const int *ip)
29{
30 return (unsigned long)ip + *ip;
31}
32
33static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
34 unsigned int num_entries, unsigned long ip)
35{
36 int *first = ip_table;
37 int *last = ip_table + num_entries - 1;
38 int *mid = first, *found = first;
39
40 if (!num_entries)
41 return NULL;
42
43 /*
44 * Do a binary range search to find the rightmost duplicate of a given
45 * starting address. Some entries are section terminators which are
46 * "weak" entries for ensuring there are no gaps. They should be
47 * ignored when they conflict with a real entry.
48 */
49 while (first <= last) {
50 mid = first + ((last - first) / 2);
51
52 if (orc_ip(mid) <= ip) {
53 found = mid;
54 first = mid + 1;
55 } else
56 last = mid - 1;
57 }
58
59 return u_table + (found - ip_table);
60}
61
62#ifdef CONFIG_MODULES
63static struct orc_entry *orc_module_find(unsigned long ip)
64{
65 struct module *mod;
66
67 mod = __module_address(ip);
68 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
69 return NULL;
70 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
71 mod->arch.num_orcs, ip);
72}
73#else
74static struct orc_entry *orc_module_find(unsigned long ip)
75{
76 return NULL;
77}
78#endif
79
80#ifdef CONFIG_DYNAMIC_FTRACE
81static struct orc_entry *orc_find(unsigned long ip);
82
83/*
84 * Ftrace dynamic trampolines do not have orc entries of their own.
85 * But they are copies of the ftrace entries that are static and
86 * defined in ftrace_*.S, which do have orc entries.
87 *
88 * If the unwinder comes across a ftrace trampoline, then find the
89 * ftrace function that was used to create it, and use that ftrace
90 * function's orc entry, as the placement of the return code in
91 * the stack will be identical.
92 */
93static struct orc_entry *orc_ftrace_find(unsigned long ip)
94{
95 struct ftrace_ops *ops;
96 unsigned long caller;
97
98 ops = ftrace_ops_trampoline(ip);
99 if (!ops)
100 return NULL;
101
102 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
103 caller = (unsigned long)ftrace_regs_call;
104 else
105 caller = (unsigned long)ftrace_call;
106
107 /* Prevent unlikely recursion */
108 if (ip == caller)
109 return NULL;
110
111 return orc_find(caller);
112}
113#else
114static struct orc_entry *orc_ftrace_find(unsigned long ip)
115{
116 return NULL;
117}
118#endif
119
120/*
121 * If we crash with IP==0, the last successfully executed instruction
122 * was probably an indirect function call with a NULL function pointer,
123 * and we don't have unwind information for NULL.
124 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
125 * pointer into its parent and then continue normally from there.
126 */
127static struct orc_entry null_orc_entry = {
128 .sp_offset = sizeof(long),
129 .sp_reg = ORC_REG_SP,
130 .bp_reg = ORC_REG_UNDEFINED,
131 .type = UNWIND_HINT_TYPE_CALL
132};
133
134/* Fake frame pointer entry -- used as a fallback for generated code */
135static struct orc_entry orc_fp_entry = {
136 .type = UNWIND_HINT_TYPE_CALL,
137 .sp_reg = ORC_REG_BP,
138 .sp_offset = 16,
139 .bp_reg = ORC_REG_PREV_SP,
140 .bp_offset = -16,
141 .end = 0,
142};
143
144static struct orc_entry *orc_find(unsigned long ip)
145{
146 static struct orc_entry *orc;
147
148 if (ip == 0)
149 return &null_orc_entry;
150
151 /* For non-init vmlinux addresses, use the fast lookup table: */
152 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
153 unsigned int idx, start, stop;
154
155 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
156
157 if (unlikely((idx >= lookup_num_blocks-1))) {
158 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
159 idx, lookup_num_blocks, (void *)ip);
160 return NULL;
161 }
162
163 start = orc_lookup[idx];
164 stop = orc_lookup[idx + 1] + 1;
165
166 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
167 (__start_orc_unwind + stop > __stop_orc_unwind))) {
168 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
169 idx, lookup_num_blocks, start, stop, (void *)ip);
170 return NULL;
171 }
172
173 return __orc_find(__start_orc_unwind_ip + start,
174 __start_orc_unwind + start, stop - start, ip);
175 }
176
177 /* vmlinux .init slow lookup: */
178 if (init_kernel_text(ip))
179 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
180 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
181
182 /* Module lookup: */
183 orc = orc_module_find(ip);
184 if (orc)
185 return orc;
186
187 return orc_ftrace_find(ip);
188}
189
190#ifdef CONFIG_MODULES
191
192static DEFINE_MUTEX(sort_mutex);
193static int *cur_orc_ip_table = __start_orc_unwind_ip;
194static struct orc_entry *cur_orc_table = __start_orc_unwind;
195
196static void orc_sort_swap(void *_a, void *_b, int size)
197{
198 struct orc_entry *orc_a, *orc_b;
199 struct orc_entry orc_tmp;
200 int *a = _a, *b = _b, tmp;
201 int delta = _b - _a;
202
203 /* Swap the .orc_unwind_ip entries: */
204 tmp = *a;
205 *a = *b + delta;
206 *b = tmp - delta;
207
208 /* Swap the corresponding .orc_unwind entries: */
209 orc_a = cur_orc_table + (a - cur_orc_ip_table);
210 orc_b = cur_orc_table + (b - cur_orc_ip_table);
211 orc_tmp = *orc_a;
212 *orc_a = *orc_b;
213 *orc_b = orc_tmp;
214}
215
216static int orc_sort_cmp(const void *_a, const void *_b)
217{
218 struct orc_entry *orc_a;
219 const int *a = _a, *b = _b;
220 unsigned long a_val = orc_ip(a);
221 unsigned long b_val = orc_ip(b);
222
223 if (a_val > b_val)
224 return 1;
225 if (a_val < b_val)
226 return -1;
227
228 /*
229 * The "weak" section terminator entries need to always be on the left
230 * to ensure the lookup code skips them in favor of real entries.
231 * These terminator entries exist to handle any gaps created by
232 * whitelisted .o files which didn't get objtool generation.
233 */
234 orc_a = cur_orc_table + (a - cur_orc_ip_table);
235 return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
236}
237
238void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
239 void *_orc, size_t orc_size)
240{
241 int *orc_ip = _orc_ip;
242 struct orc_entry *orc = _orc;
243 unsigned int num_entries = orc_ip_size / sizeof(int);
244
245 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
246 orc_size % sizeof(*orc) != 0 ||
247 num_entries != orc_size / sizeof(*orc));
248
249 /*
250 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
251 * associate an .orc_unwind_ip table entry with its corresponding
252 * .orc_unwind entry so they can both be swapped.
253 */
254 mutex_lock(&sort_mutex);
255 cur_orc_ip_table = orc_ip;
256 cur_orc_table = orc;
257 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
258 mutex_unlock(&sort_mutex);
259
260 mod->arch.orc_unwind_ip = orc_ip;
261 mod->arch.orc_unwind = orc;
262 mod->arch.num_orcs = num_entries;
263}
264#endif
265
266void __init unwind_init(void)
267{
268 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
269 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
270 size_t num_entries = orc_ip_size / sizeof(int);
271 struct orc_entry *orc;
272 int i;
273
274 if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
275 orc_size % sizeof(struct orc_entry) != 0 ||
276 num_entries != orc_size / sizeof(struct orc_entry)) {
277 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
278 return;
279 }
280
281 /*
282 * Note, the orc_unwind and orc_unwind_ip tables were already
283 * sorted at build time via the 'sorttable' tool.
284 * It's ready for binary search straight away, no need to sort it.
285 */
286
287 /* Initialize the fast lookup table: */
288 lookup_num_blocks = orc_lookup_end - orc_lookup;
289 for (i = 0; i < lookup_num_blocks-1; i++) {
290 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
291 num_entries,
292 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
293 if (!orc) {
294 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
295 return;
296 }
297
298 orc_lookup[i] = orc - __start_orc_unwind;
299 }
300
301 /* Initialize the ending block: */
302 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
303 LOOKUP_STOP_IP);
304 if (!orc) {
305 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
306 return;
307 }
308 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
309
310 orc_init = true;
311}
312
313unsigned long unwind_get_return_address(struct unwind_state *state)
314{
315 if (unwind_done(state))
316 return 0;
317
318 return __kernel_text_address(state->ip) ? state->ip : 0;
319}
320EXPORT_SYMBOL_GPL(unwind_get_return_address);
321
322unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
323{
324 if (unwind_done(state))
325 return NULL;
326
327 if (state->regs)
328 return &state->regs->ip;
329
330 if (state->sp)
331 return (unsigned long *)state->sp - 1;
332
333 return NULL;
334}
335
336static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
337 size_t len)
338{
339 struct stack_info *info = &state->stack_info;
340 void *addr = (void *)_addr;
341
342 if (!on_stack(info, addr, len) &&
343 (get_stack_info(addr, state->task, info, &state->stack_mask)))
344 return false;
345
346 return true;
347}
348
349static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
350 unsigned long *val)
351{
352 if (!stack_access_ok(state, addr, sizeof(long)))
353 return false;
354
355 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
356 return true;
357}
358
359static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
360 unsigned long *ip, unsigned long *sp)
361{
362 struct pt_regs *regs = (struct pt_regs *)addr;
363
364 /* x86-32 support will be more complicated due to the ®s->sp hack */
365 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
366
367 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
368 return false;
369
370 *ip = READ_ONCE_NOCHECK(regs->ip);
371 *sp = READ_ONCE_NOCHECK(regs->sp);
372 return true;
373}
374
375static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
376 unsigned long *ip, unsigned long *sp)
377{
378 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
379
380 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
381 return false;
382
383 *ip = READ_ONCE_NOCHECK(regs->ip);
384 *sp = READ_ONCE_NOCHECK(regs->sp);
385 return true;
386}
387
388/*
389 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
390 * value from state->regs.
391 *
392 * Otherwise, if state->regs just points to IRET regs, and the previous frame
393 * had full regs, it's safe to get the value from the previous regs. This can
394 * happen when early/late IRQ entry code gets interrupted by an NMI.
395 */
396static bool get_reg(struct unwind_state *state, unsigned int reg_off,
397 unsigned long *val)
398{
399 unsigned int reg = reg_off/8;
400
401 if (!state->regs)
402 return false;
403
404 if (state->full_regs) {
405 *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
406 return true;
407 }
408
409 if (state->prev_regs) {
410 *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
411 return true;
412 }
413
414 return false;
415}
416
417bool unwind_next_frame(struct unwind_state *state)
418{
419 unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
420 enum stack_type prev_type = state->stack_info.type;
421 struct orc_entry *orc;
422 bool indirect = false;
423
424 if (unwind_done(state))
425 return false;
426
427 /* Don't let modules unload while we're reading their ORC data. */
428 preempt_disable();
429
430 /* End-of-stack check for user tasks: */
431 if (state->regs && user_mode(state->regs))
432 goto the_end;
433
434 /*
435 * Find the orc_entry associated with the text address.
436 *
437 * For a call frame (as opposed to a signal frame), state->ip points to
438 * the instruction after the call. That instruction's stack layout
439 * could be different from the call instruction's layout, for example
440 * if the call was to a noreturn function. So get the ORC data for the
441 * call instruction itself.
442 */
443 orc = orc_find(state->signal ? state->ip : state->ip - 1);
444 if (!orc) {
445 /*
446 * As a fallback, try to assume this code uses a frame pointer.
447 * This is useful for generated code, like BPF, which ORC
448 * doesn't know about. This is just a guess, so the rest of
449 * the unwind is no longer considered reliable.
450 */
451 orc = &orc_fp_entry;
452 state->error = true;
453 }
454
455 /* End-of-stack check for kernel threads: */
456 if (orc->sp_reg == ORC_REG_UNDEFINED) {
457 if (!orc->end)
458 goto err;
459
460 goto the_end;
461 }
462
463 /* Find the previous frame's stack: */
464 switch (orc->sp_reg) {
465 case ORC_REG_SP:
466 sp = state->sp + orc->sp_offset;
467 break;
468
469 case ORC_REG_BP:
470 sp = state->bp + orc->sp_offset;
471 break;
472
473 case ORC_REG_SP_INDIRECT:
474 sp = state->sp;
475 indirect = true;
476 break;
477
478 case ORC_REG_BP_INDIRECT:
479 sp = state->bp + orc->sp_offset;
480 indirect = true;
481 break;
482
483 case ORC_REG_R10:
484 if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
485 orc_warn_current("missing R10 value at %pB\n",
486 (void *)state->ip);
487 goto err;
488 }
489 break;
490
491 case ORC_REG_R13:
492 if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
493 orc_warn_current("missing R13 value at %pB\n",
494 (void *)state->ip);
495 goto err;
496 }
497 break;
498
499 case ORC_REG_DI:
500 if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
501 orc_warn_current("missing RDI value at %pB\n",
502 (void *)state->ip);
503 goto err;
504 }
505 break;
506
507 case ORC_REG_DX:
508 if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
509 orc_warn_current("missing DX value at %pB\n",
510 (void *)state->ip);
511 goto err;
512 }
513 break;
514
515 default:
516 orc_warn("unknown SP base reg %d at %pB\n",
517 orc->sp_reg, (void *)state->ip);
518 goto err;
519 }
520
521 if (indirect) {
522 if (!deref_stack_reg(state, sp, &sp))
523 goto err;
524
525 if (orc->sp_reg == ORC_REG_SP_INDIRECT)
526 sp += orc->sp_offset;
527 }
528
529 /* Find IP, SP and possibly regs: */
530 switch (orc->type) {
531 case UNWIND_HINT_TYPE_CALL:
532 ip_p = sp - sizeof(long);
533
534 if (!deref_stack_reg(state, ip_p, &state->ip))
535 goto err;
536
537 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
538 state->ip, (void *)ip_p);
539
540 state->sp = sp;
541 state->regs = NULL;
542 state->prev_regs = NULL;
543 state->signal = false;
544 break;
545
546 case UNWIND_HINT_TYPE_REGS:
547 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
548 orc_warn_current("can't access registers at %pB\n",
549 (void *)orig_ip);
550 goto err;
551 }
552
553 state->regs = (struct pt_regs *)sp;
554 state->prev_regs = NULL;
555 state->full_regs = true;
556 state->signal = true;
557 break;
558
559 case UNWIND_HINT_TYPE_REGS_PARTIAL:
560 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
561 orc_warn_current("can't access iret registers at %pB\n",
562 (void *)orig_ip);
563 goto err;
564 }
565
566 if (state->full_regs)
567 state->prev_regs = state->regs;
568 state->regs = (void *)sp - IRET_FRAME_OFFSET;
569 state->full_regs = false;
570 state->signal = true;
571 break;
572
573 default:
574 orc_warn("unknown .orc_unwind entry type %d at %pB\n",
575 orc->type, (void *)orig_ip);
576 goto err;
577 }
578
579 /* Find BP: */
580 switch (orc->bp_reg) {
581 case ORC_REG_UNDEFINED:
582 if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
583 state->bp = tmp;
584 break;
585
586 case ORC_REG_PREV_SP:
587 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
588 goto err;
589 break;
590
591 case ORC_REG_BP:
592 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
593 goto err;
594 break;
595
596 default:
597 orc_warn("unknown BP base reg %d for ip %pB\n",
598 orc->bp_reg, (void *)orig_ip);
599 goto err;
600 }
601
602 /* Prevent a recursive loop due to bad ORC data: */
603 if (state->stack_info.type == prev_type &&
604 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
605 state->sp <= prev_sp) {
606 orc_warn_current("stack going in the wrong direction? at %pB\n",
607 (void *)orig_ip);
608 goto err;
609 }
610
611 preempt_enable();
612 return true;
613
614err:
615 state->error = true;
616
617the_end:
618 preempt_enable();
619 state->stack_info.type = STACK_TYPE_UNKNOWN;
620 return false;
621}
622EXPORT_SYMBOL_GPL(unwind_next_frame);
623
624void __unwind_start(struct unwind_state *state, struct task_struct *task,
625 struct pt_regs *regs, unsigned long *first_frame)
626{
627 memset(state, 0, sizeof(*state));
628 state->task = task;
629
630 if (!orc_init)
631 goto err;
632
633 /*
634 * Refuse to unwind the stack of a task while it's executing on another
635 * CPU. This check is racy, but that's ok: the unwinder has other
636 * checks to prevent it from going off the rails.
637 */
638 if (task_on_another_cpu(task))
639 goto err;
640
641 if (regs) {
642 if (user_mode(regs))
643 goto the_end;
644
645 state->ip = regs->ip;
646 state->sp = regs->sp;
647 state->bp = regs->bp;
648 state->regs = regs;
649 state->full_regs = true;
650 state->signal = true;
651
652 } else if (task == current) {
653 asm volatile("lea (%%rip), %0\n\t"
654 "mov %%rsp, %1\n\t"
655 "mov %%rbp, %2\n\t"
656 : "=r" (state->ip), "=r" (state->sp),
657 "=r" (state->bp));
658
659 } else {
660 struct inactive_task_frame *frame = (void *)task->thread.sp;
661
662 state->sp = task->thread.sp + sizeof(*frame);
663 state->bp = READ_ONCE_NOCHECK(frame->bp);
664 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
665 state->signal = (void *)state->ip == ret_from_fork;
666 }
667
668 if (get_stack_info((unsigned long *)state->sp, state->task,
669 &state->stack_info, &state->stack_mask)) {
670 /*
671 * We weren't on a valid stack. It's possible that
672 * we overflowed a valid stack into a guard page.
673 * See if the next page up is valid so that we can
674 * generate some kind of backtrace if this happens.
675 */
676 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
677 state->error = true;
678 if (get_stack_info(next_page, state->task, &state->stack_info,
679 &state->stack_mask))
680 return;
681 }
682
683 /*
684 * The caller can provide the address of the first frame directly
685 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
686 * to start unwinding at. Skip ahead until we reach it.
687 */
688
689 /* When starting from regs, skip the regs frame: */
690 if (regs) {
691 unwind_next_frame(state);
692 return;
693 }
694
695 /* Otherwise, skip ahead to the user-specified starting frame: */
696 while (!unwind_done(state) &&
697 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
698 state->sp < (unsigned long)first_frame))
699 unwind_next_frame(state);
700
701 return;
702
703err:
704 state->error = true;
705the_end:
706 state->stack_info.type = STACK_TYPE_UNKNOWN;
707}
708EXPORT_SYMBOL_GPL(__unwind_start);