Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel unwinding support
4 *
5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 *
7 * Derived partially from the IA64 implementation. The PA-RISC
8 * Runtime Architecture Document is also a useful reference to
9 * understand what is happening here
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/sort.h>
17#include <linux/sched/task_stack.h>
18
19#include <linux/uaccess.h>
20#include <asm/assembly.h>
21#include <asm/asm-offsets.h>
22#include <asm/ptrace.h>
23
24#include <asm/unwind.h>
25#include <asm/switch_to.h>
26#include <asm/sections.h>
27#include <asm/ftrace.h>
28
29/* #define DEBUG 1 */
30#ifdef DEBUG
31#define dbg(x...) pr_debug(x)
32#else
33#define dbg(x...) do { } while (0)
34#endif
35
36#define KERNEL_START (KERNEL_BINARY_TEXT_START)
37
38extern struct unwind_table_entry __start___unwind[];
39extern struct unwind_table_entry __stop___unwind[];
40
41static DEFINE_SPINLOCK(unwind_lock);
42/*
43 * the kernel unwind block is not dynamically allocated so that
44 * we can call unwind_init as early in the bootup process as
45 * possible (before the slab allocator is initialized)
46 */
47static struct unwind_table kernel_unwind_table __ro_after_init;
48static LIST_HEAD(unwind_tables);
49
50static inline const struct unwind_table_entry *
51find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
52{
53 const struct unwind_table_entry *e = NULL;
54 unsigned long lo, hi, mid;
55
56 lo = 0;
57 hi = table->length - 1;
58
59 while (lo <= hi) {
60 mid = (hi - lo) / 2 + lo;
61 e = &table->table[mid];
62 if (addr < e->region_start)
63 hi = mid - 1;
64 else if (addr > e->region_end)
65 lo = mid + 1;
66 else
67 return e;
68 }
69
70 return NULL;
71}
72
73static const struct unwind_table_entry *
74find_unwind_entry(unsigned long addr)
75{
76 struct unwind_table *table;
77 const struct unwind_table_entry *e = NULL;
78
79 if (addr >= kernel_unwind_table.start &&
80 addr <= kernel_unwind_table.end)
81 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
82 else {
83 unsigned long flags;
84
85 spin_lock_irqsave(&unwind_lock, flags);
86 list_for_each_entry(table, &unwind_tables, list) {
87 if (addr >= table->start &&
88 addr <= table->end)
89 e = find_unwind_entry_in_table(table, addr);
90 if (e) {
91 /* Move-to-front to exploit common traces */
92 list_move(&table->list, &unwind_tables);
93 break;
94 }
95 }
96 spin_unlock_irqrestore(&unwind_lock, flags);
97 }
98
99 return e;
100}
101
102static void
103unwind_table_init(struct unwind_table *table, const char *name,
104 unsigned long base_addr, unsigned long gp,
105 void *table_start, void *table_end)
106{
107 struct unwind_table_entry *start = table_start;
108 struct unwind_table_entry *end =
109 (struct unwind_table_entry *)table_end - 1;
110
111 table->name = name;
112 table->base_addr = base_addr;
113 table->gp = gp;
114 table->start = base_addr + start->region_start;
115 table->end = base_addr + end->region_end;
116 table->table = (struct unwind_table_entry *)table_start;
117 table->length = end - start + 1;
118 INIT_LIST_HEAD(&table->list);
119
120 for (; start <= end; start++) {
121 if (start < end &&
122 start->region_end > (start+1)->region_start) {
123 pr_warn("Out of order unwind entry! %px and %px\n",
124 start, start+1);
125 }
126
127 start->region_start += base_addr;
128 start->region_end += base_addr;
129 }
130}
131
132static int cmp_unwind_table_entry(const void *a, const void *b)
133{
134 return ((const struct unwind_table_entry *)a)->region_start
135 - ((const struct unwind_table_entry *)b)->region_start;
136}
137
138static void
139unwind_table_sort(struct unwind_table_entry *start,
140 struct unwind_table_entry *finish)
141{
142 sort(start, finish - start, sizeof(struct unwind_table_entry),
143 cmp_unwind_table_entry, NULL);
144}
145
146struct unwind_table *
147unwind_table_add(const char *name, unsigned long base_addr,
148 unsigned long gp,
149 void *start, void *end)
150{
151 struct unwind_table *table;
152 unsigned long flags;
153 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
154 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
155
156 unwind_table_sort(s, e);
157
158 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
159 if (table == NULL)
160 return NULL;
161 unwind_table_init(table, name, base_addr, gp, start, end);
162 spin_lock_irqsave(&unwind_lock, flags);
163 list_add_tail(&table->list, &unwind_tables);
164 spin_unlock_irqrestore(&unwind_lock, flags);
165
166 return table;
167}
168
169void unwind_table_remove(struct unwind_table *table)
170{
171 unsigned long flags;
172
173 spin_lock_irqsave(&unwind_lock, flags);
174 list_del(&table->list);
175 spin_unlock_irqrestore(&unwind_lock, flags);
176
177 kfree(table);
178}
179
180/* Called from setup_arch to import the kernel unwind info */
181int __init unwind_init(void)
182{
183 long start __maybe_unused, stop __maybe_unused;
184 register unsigned long gp __asm__ ("r27");
185
186 start = (long)&__start___unwind[0];
187 stop = (long)&__stop___unwind[0];
188
189 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
190 start, stop,
191 (stop - start) / sizeof(struct unwind_table_entry));
192
193 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
194 gp,
195 &__start___unwind[0], &__stop___unwind[0]);
196#if 0
197 {
198 int i;
199 for (i = 0; i < 10; i++)
200 {
201 printk("region 0x%x-0x%x\n",
202 __start___unwind[i].region_start,
203 __start___unwind[i].region_end);
204 }
205 }
206#endif
207 return 0;
208}
209
210static bool pc_is_kernel_fn(unsigned long pc, void *fn)
211{
212 return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
213}
214
215static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
216{
217 /*
218 * We have to use void * instead of a function pointer, because
219 * function pointers aren't a pointer to the function on 64-bit.
220 * Make them const so the compiler knows they live in .text
221 * Note: We could use dereference_kernel_function_descriptor()
222 * instead but we want to keep it simple here.
223 */
224 extern void * const ret_from_kernel_thread;
225 extern void * const syscall_exit;
226 extern void * const intr_return;
227 extern void * const _switch_to_ret;
228#ifdef CONFIG_IRQSTACKS
229 extern void * const _call_on_stack;
230#endif /* CONFIG_IRQSTACKS */
231
232 if (pc_is_kernel_fn(pc, handle_interruption)) {
233 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
234 dbg("Unwinding through handle_interruption()\n");
235 info->prev_sp = regs->gr[30];
236 info->prev_ip = regs->iaoq[0];
237 return 1;
238 }
239
240 if (pc == (unsigned long)&ret_from_kernel_thread ||
241 pc == (unsigned long)&syscall_exit) {
242 info->prev_sp = info->prev_ip = 0;
243 return 1;
244 }
245
246 if (pc == (unsigned long)&intr_return) {
247 struct pt_regs *regs;
248
249 dbg("Found intr_return()\n");
250 regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
251 info->prev_sp = regs->gr[30];
252 info->prev_ip = regs->iaoq[0];
253 info->rp = regs->gr[2];
254 return 1;
255 }
256
257 if (pc_is_kernel_fn(pc, _switch_to) ||
258 pc == (unsigned long)&_switch_to_ret) {
259 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
260 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
261 return 1;
262 }
263
264#ifdef CONFIG_IRQSTACKS
265 if (pc == (unsigned long)&_call_on_stack) {
266 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
267 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
268 return 1;
269 }
270#endif
271 return 0;
272}
273
274static void unwind_frame_regs(struct unwind_frame_info *info)
275{
276 const struct unwind_table_entry *e;
277 unsigned long npc;
278 unsigned int insn;
279 long frame_size = 0;
280 int looking_for_rp, rpoffset = 0;
281
282 e = find_unwind_entry(info->ip);
283 if (e == NULL) {
284 unsigned long sp;
285
286 dbg("Cannot find unwind entry for %pS; forced unwinding\n",
287 (void *) info->ip);
288
289 /* Since we are doing the unwinding blind, we don't know if
290 we are adjusting the stack correctly or extracting the rp
291 correctly. The rp is checked to see if it belongs to the
292 kernel text section, if not we assume we don't have a
293 correct stack frame and we continue to unwind the stack.
294 This is not quite correct, and will fail for loadable
295 modules. */
296 sp = info->sp & ~63;
297 do {
298 unsigned long tmp;
299
300 info->prev_sp = sp - 64;
301 info->prev_ip = 0;
302
303 /* Check if stack is inside kernel stack area */
304 if ((info->prev_sp - (unsigned long) task_stack_page(info->t))
305 >= THREAD_SIZE) {
306 info->prev_sp = 0;
307 break;
308 }
309
310 if (copy_from_kernel_nofault(&tmp,
311 (void *)info->prev_sp - RP_OFFSET, sizeof(tmp)))
312 break;
313 info->prev_ip = tmp;
314 sp = info->prev_sp;
315 } while (!kernel_text_address(info->prev_ip));
316
317 info->rp = 0;
318
319 dbg("analyzing func @ %lx with no unwind info, setting "
320 "prev_sp=%lx prev_ip=%lx\n", info->ip,
321 info->prev_sp, info->prev_ip);
322 } else {
323 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
324 "Save_RP = %d, Millicode = %d size = %u\n",
325 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
326 e->Millicode, e->Total_frame_size);
327
328 looking_for_rp = e->Save_RP;
329
330 for (npc = e->region_start;
331 (frame_size < (e->Total_frame_size << 3) ||
332 looking_for_rp) &&
333 npc < info->ip;
334 npc += 4) {
335
336 insn = *(unsigned int *)npc;
337
338 if ((insn & 0xffffc001) == 0x37de0000 ||
339 (insn & 0xffe00001) == 0x6fc00000) {
340 /* ldo X(sp), sp, or stwm X,D(sp) */
341 frame_size += (insn & 0x3fff) >> 1;
342 dbg("analyzing func @ %lx, insn=%08x @ "
343 "%lx, frame_size = %ld\n", info->ip,
344 insn, npc, frame_size);
345 } else if ((insn & 0xffe00009) == 0x73c00008) {
346 /* std,ma X,D(sp) */
347 frame_size += ((insn >> 4) & 0x3ff) << 3;
348 dbg("analyzing func @ %lx, insn=%08x @ "
349 "%lx, frame_size = %ld\n", info->ip,
350 insn, npc, frame_size);
351 } else if (insn == 0x6bc23fd9) {
352 /* stw rp,-20(sp) */
353 rpoffset = 20;
354 looking_for_rp = 0;
355 dbg("analyzing func @ %lx, insn=stw rp,"
356 "-20(sp) @ %lx\n", info->ip, npc);
357 } else if (insn == 0x0fc212c1) {
358 /* std rp,-16(sr0,sp) */
359 rpoffset = 16;
360 looking_for_rp = 0;
361 dbg("analyzing func @ %lx, insn=std rp,"
362 "-16(sp) @ %lx\n", info->ip, npc);
363 }
364 }
365
366 if (frame_size > e->Total_frame_size << 3)
367 frame_size = e->Total_frame_size << 3;
368
369 if (!unwind_special(info, e->region_start, frame_size)) {
370 info->prev_sp = info->sp - frame_size;
371 if (e->Millicode)
372 info->rp = info->r31;
373 else if (rpoffset)
374 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
375 info->prev_ip = info->rp;
376 info->rp = 0;
377 }
378
379 dbg("analyzing func @ %lx, setting prev_sp=%lx "
380 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
381 info->prev_ip, npc);
382 }
383}
384
385void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
386 struct pt_regs *regs)
387{
388 memset(info, 0, sizeof(struct unwind_frame_info));
389 info->t = t;
390 info->sp = regs->gr[30];
391 info->ip = regs->iaoq[0];
392 info->rp = regs->gr[2];
393 info->r31 = regs->gr[31];
394
395 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
396 t ? (int)t->pid : -1, info->sp, info->ip);
397}
398
399void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
400{
401 struct pt_regs *r = &t->thread.regs;
402 struct pt_regs *r2;
403
404 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
405 if (!r2)
406 return;
407 *r2 = *r;
408 r2->gr[30] = r->ksp;
409 r2->iaoq[0] = r->kpc;
410 unwind_frame_init(info, t, r2);
411 kfree(r2);
412}
413
414#define get_parisc_stackpointer() ({ \
415 unsigned long sp; \
416 __asm__("copy %%r30, %0" : "=r"(sp)); \
417 (sp); \
418})
419
420void unwind_frame_init_task(struct unwind_frame_info *info,
421 struct task_struct *task, struct pt_regs *regs)
422{
423 task = task ? task : current;
424
425 if (task == current) {
426 struct pt_regs r;
427
428 if (!regs) {
429 memset(&r, 0, sizeof(r));
430 r.iaoq[0] = _THIS_IP_;
431 r.gr[2] = _RET_IP_;
432 r.gr[30] = get_parisc_stackpointer();
433 regs = &r;
434 }
435 unwind_frame_init(info, task, regs);
436 } else {
437 unwind_frame_init_from_blocked_task(info, task);
438 }
439}
440
441int unwind_once(struct unwind_frame_info *next_frame)
442{
443 unwind_frame_regs(next_frame);
444
445 if (next_frame->prev_sp == 0 ||
446 next_frame->prev_ip == 0)
447 return -1;
448
449 next_frame->sp = next_frame->prev_sp;
450 next_frame->ip = next_frame->prev_ip;
451 next_frame->prev_sp = 0;
452 next_frame->prev_ip = 0;
453
454 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
455 next_frame->t ? (int)next_frame->t->pid : -1,
456 next_frame->sp, next_frame->ip);
457
458 return 0;
459}
460
461int unwind_to_user(struct unwind_frame_info *info)
462{
463 int ret;
464
465 do {
466 ret = unwind_once(info);
467 } while (!ret && !(info->ip & 3));
468
469 return ret;
470}
471
472unsigned long return_address(unsigned int level)
473{
474 struct unwind_frame_info info;
475
476 /* initialize unwind info */
477 unwind_frame_init_task(&info, current, NULL);
478
479 /* unwind stack */
480 level += 2;
481 do {
482 if (unwind_once(&info) < 0 || info.ip == 0)
483 return 0;
484 if (!kernel_text_address(info.ip))
485 return 0;
486 } while (info.ip && level--);
487
488 return info.ip;
489}
1/*
2 * Kernel unwinding support
3 *
4 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
5 *
6 * Derived partially from the IA64 implementation. The PA-RISC
7 * Runtime Architecture Document is also a useful reference to
8 * understand what is happening here
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/kallsyms.h>
16#include <linux/sort.h>
17
18#include <asm/uaccess.h>
19#include <asm/assembly.h>
20#include <asm/asm-offsets.h>
21#include <asm/ptrace.h>
22
23#include <asm/unwind.h>
24
25/* #define DEBUG 1 */
26#ifdef DEBUG
27#define dbg(x...) printk(x)
28#else
29#define dbg(x...)
30#endif
31
32#define KERNEL_START (KERNEL_BINARY_TEXT_START)
33
34extern struct unwind_table_entry __start___unwind[];
35extern struct unwind_table_entry __stop___unwind[];
36
37static spinlock_t unwind_lock;
38/*
39 * the kernel unwind block is not dynamically allocated so that
40 * we can call unwind_init as early in the bootup process as
41 * possible (before the slab allocator is initialized)
42 */
43static struct unwind_table kernel_unwind_table __read_mostly;
44static LIST_HEAD(unwind_tables);
45
46static inline const struct unwind_table_entry *
47find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
48{
49 const struct unwind_table_entry *e = NULL;
50 unsigned long lo, hi, mid;
51
52 lo = 0;
53 hi = table->length - 1;
54
55 while (lo <= hi) {
56 mid = (hi - lo) / 2 + lo;
57 e = &table->table[mid];
58 if (addr < e->region_start)
59 hi = mid - 1;
60 else if (addr > e->region_end)
61 lo = mid + 1;
62 else
63 return e;
64 }
65
66 return NULL;
67}
68
69static const struct unwind_table_entry *
70find_unwind_entry(unsigned long addr)
71{
72 struct unwind_table *table;
73 const struct unwind_table_entry *e = NULL;
74
75 if (addr >= kernel_unwind_table.start &&
76 addr <= kernel_unwind_table.end)
77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 else
79 list_for_each_entry(table, &unwind_tables, list) {
80 if (addr >= table->start &&
81 addr <= table->end)
82 e = find_unwind_entry_in_table(table, addr);
83 if (e) {
84 /* Move-to-front to exploit common traces */
85 list_move(&table->list, &unwind_tables);
86 break;
87 }
88 }
89
90 return e;
91}
92
93static void
94unwind_table_init(struct unwind_table *table, const char *name,
95 unsigned long base_addr, unsigned long gp,
96 void *table_start, void *table_end)
97{
98 struct unwind_table_entry *start = table_start;
99 struct unwind_table_entry *end =
100 (struct unwind_table_entry *)table_end - 1;
101
102 table->name = name;
103 table->base_addr = base_addr;
104 table->gp = gp;
105 table->start = base_addr + start->region_start;
106 table->end = base_addr + end->region_end;
107 table->table = (struct unwind_table_entry *)table_start;
108 table->length = end - start + 1;
109 INIT_LIST_HEAD(&table->list);
110
111 for (; start <= end; start++) {
112 if (start < end &&
113 start->region_end > (start+1)->region_start) {
114 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
115 }
116
117 start->region_start += base_addr;
118 start->region_end += base_addr;
119 }
120}
121
122static int cmp_unwind_table_entry(const void *a, const void *b)
123{
124 return ((const struct unwind_table_entry *)a)->region_start
125 - ((const struct unwind_table_entry *)b)->region_start;
126}
127
128static void
129unwind_table_sort(struct unwind_table_entry *start,
130 struct unwind_table_entry *finish)
131{
132 sort(start, finish - start, sizeof(struct unwind_table_entry),
133 cmp_unwind_table_entry, NULL);
134}
135
136struct unwind_table *
137unwind_table_add(const char *name, unsigned long base_addr,
138 unsigned long gp,
139 void *start, void *end)
140{
141 struct unwind_table *table;
142 unsigned long flags;
143 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
144 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
145
146 unwind_table_sort(s, e);
147
148 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
149 if (table == NULL)
150 return NULL;
151 unwind_table_init(table, name, base_addr, gp, start, end);
152 spin_lock_irqsave(&unwind_lock, flags);
153 list_add_tail(&table->list, &unwind_tables);
154 spin_unlock_irqrestore(&unwind_lock, flags);
155
156 return table;
157}
158
159void unwind_table_remove(struct unwind_table *table)
160{
161 unsigned long flags;
162
163 spin_lock_irqsave(&unwind_lock, flags);
164 list_del(&table->list);
165 spin_unlock_irqrestore(&unwind_lock, flags);
166
167 kfree(table);
168}
169
170/* Called from setup_arch to import the kernel unwind info */
171int unwind_init(void)
172{
173 long start, stop;
174 register unsigned long gp __asm__ ("r27");
175
176 start = (long)&__start___unwind[0];
177 stop = (long)&__stop___unwind[0];
178
179 spin_lock_init(&unwind_lock);
180
181 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
182 start, stop,
183 (stop - start) / sizeof(struct unwind_table_entry));
184
185 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
186 gp,
187 &__start___unwind[0], &__stop___unwind[0]);
188#if 0
189 {
190 int i;
191 for (i = 0; i < 10; i++)
192 {
193 printk("region 0x%x-0x%x\n",
194 __start___unwind[i].region_start,
195 __start___unwind[i].region_end);
196 }
197 }
198#endif
199 return 0;
200}
201
202#ifdef CONFIG_64BIT
203#define get_func_addr(fptr) fptr[2]
204#else
205#define get_func_addr(fptr) fptr[0]
206#endif
207
208static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
209{
210 extern void handle_interruption(int, struct pt_regs *);
211 static unsigned long *hi = (unsigned long *)&handle_interruption;
212
213 if (pc == get_func_addr(hi)) {
214 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
215 dbg("Unwinding through handle_interruption()\n");
216 info->prev_sp = regs->gr[30];
217 info->prev_ip = regs->iaoq[0];
218
219 return 1;
220 }
221
222 return 0;
223}
224
225static void unwind_frame_regs(struct unwind_frame_info *info)
226{
227 const struct unwind_table_entry *e;
228 unsigned long npc;
229 unsigned int insn;
230 long frame_size = 0;
231 int looking_for_rp, rpoffset = 0;
232
233 e = find_unwind_entry(info->ip);
234 if (e == NULL) {
235 unsigned long sp;
236 extern char _stext[], _etext[];
237
238 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
239
240#ifdef CONFIG_KALLSYMS
241 /* Handle some frequent special cases.... */
242 {
243 char symname[KSYM_NAME_LEN];
244 char *modname;
245
246 kallsyms_lookup(info->ip, NULL, NULL, &modname,
247 symname);
248
249 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
250
251 if (strcmp(symname, "_switch_to_ret") == 0) {
252 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
253 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
254 dbg("_switch_to_ret @ %lx - setting "
255 "prev_sp=%lx prev_ip=%lx\n",
256 info->ip, info->prev_sp,
257 info->prev_ip);
258 return;
259 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
260 strcmp(symname, "syscall_exit") == 0) {
261 info->prev_ip = info->prev_sp = 0;
262 return;
263 }
264 }
265#endif
266
267 /* Since we are doing the unwinding blind, we don't know if
268 we are adjusting the stack correctly or extracting the rp
269 correctly. The rp is checked to see if it belongs to the
270 kernel text section, if not we assume we don't have a
271 correct stack frame and we continue to unwind the stack.
272 This is not quite correct, and will fail for loadable
273 modules. */
274 sp = info->sp & ~63;
275 do {
276 unsigned long tmp;
277
278 info->prev_sp = sp - 64;
279 info->prev_ip = 0;
280 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
281 break;
282 info->prev_ip = tmp;
283 sp = info->prev_sp;
284 } while (info->prev_ip < (unsigned long)_stext ||
285 info->prev_ip > (unsigned long)_etext);
286
287 info->rp = 0;
288
289 dbg("analyzing func @ %lx with no unwind info, setting "
290 "prev_sp=%lx prev_ip=%lx\n", info->ip,
291 info->prev_sp, info->prev_ip);
292 } else {
293 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
294 "Save_RP = %d, Millicode = %d size = %u\n",
295 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
296 e->Millicode, e->Total_frame_size);
297
298 looking_for_rp = e->Save_RP;
299
300 for (npc = e->region_start;
301 (frame_size < (e->Total_frame_size << 3) ||
302 looking_for_rp) &&
303 npc < info->ip;
304 npc += 4) {
305
306 insn = *(unsigned int *)npc;
307
308 if ((insn & 0xffffc000) == 0x37de0000 ||
309 (insn & 0xffe00000) == 0x6fc00000) {
310 /* ldo X(sp), sp, or stwm X,D(sp) */
311 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
312 ((insn & 0x3fff) >> 1);
313 dbg("analyzing func @ %lx, insn=%08x @ "
314 "%lx, frame_size = %ld\n", info->ip,
315 insn, npc, frame_size);
316 } else if ((insn & 0xffe00008) == 0x73c00008) {
317 /* std,ma X,D(sp) */
318 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
319 (((insn >> 4) & 0x3ff) << 3);
320 dbg("analyzing func @ %lx, insn=%08x @ "
321 "%lx, frame_size = %ld\n", info->ip,
322 insn, npc, frame_size);
323 } else if (insn == 0x6bc23fd9) {
324 /* stw rp,-20(sp) */
325 rpoffset = 20;
326 looking_for_rp = 0;
327 dbg("analyzing func @ %lx, insn=stw rp,"
328 "-20(sp) @ %lx\n", info->ip, npc);
329 } else if (insn == 0x0fc212c1) {
330 /* std rp,-16(sr0,sp) */
331 rpoffset = 16;
332 looking_for_rp = 0;
333 dbg("analyzing func @ %lx, insn=std rp,"
334 "-16(sp) @ %lx\n", info->ip, npc);
335 }
336 }
337
338 if (!unwind_special(info, e->region_start, frame_size)) {
339 info->prev_sp = info->sp - frame_size;
340 if (e->Millicode)
341 info->rp = info->r31;
342 else if (rpoffset)
343 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
344 info->prev_ip = info->rp;
345 info->rp = 0;
346 }
347
348 dbg("analyzing func @ %lx, setting prev_sp=%lx "
349 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
350 info->prev_ip, npc);
351 }
352}
353
354void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
355 struct pt_regs *regs)
356{
357 memset(info, 0, sizeof(struct unwind_frame_info));
358 info->t = t;
359 info->sp = regs->gr[30];
360 info->ip = regs->iaoq[0];
361 info->rp = regs->gr[2];
362 info->r31 = regs->gr[31];
363
364 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
365 t ? (int)t->pid : -1, info->sp, info->ip);
366}
367
368void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
369{
370 struct pt_regs *r = &t->thread.regs;
371 struct pt_regs *r2;
372
373 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
374 if (!r2)
375 return;
376 *r2 = *r;
377 r2->gr[30] = r->ksp;
378 r2->iaoq[0] = r->kpc;
379 unwind_frame_init(info, t, r2);
380 kfree(r2);
381}
382
383void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
384{
385 unwind_frame_init(info, current, regs);
386}
387
388int unwind_once(struct unwind_frame_info *next_frame)
389{
390 unwind_frame_regs(next_frame);
391
392 if (next_frame->prev_sp == 0 ||
393 next_frame->prev_ip == 0)
394 return -1;
395
396 next_frame->sp = next_frame->prev_sp;
397 next_frame->ip = next_frame->prev_ip;
398 next_frame->prev_sp = 0;
399 next_frame->prev_ip = 0;
400
401 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
402 next_frame->t ? (int)next_frame->t->pid : -1,
403 next_frame->sp, next_frame->ip);
404
405 return 0;
406}
407
408int unwind_to_user(struct unwind_frame_info *info)
409{
410 int ret;
411
412 do {
413 ret = unwind_once(info);
414 } while (!ret && !(info->ip & 3));
415
416 return ret;
417}
418
419unsigned long return_address(unsigned int level)
420{
421 struct unwind_frame_info info;
422 struct pt_regs r;
423 unsigned long sp;
424
425 /* initialize unwind info */
426 asm volatile ("copy %%r30, %0" : "=r"(sp));
427 memset(&r, 0, sizeof(struct pt_regs));
428 r.iaoq[0] = (unsigned long) current_text_addr();
429 r.gr[2] = (unsigned long) __builtin_return_address(0);
430 r.gr[30] = sp;
431 unwind_frame_init(&info, current, &r);
432
433 /* unwind stack */
434 ++level;
435 do {
436 if (unwind_once(&info) < 0 || info.ip == 0)
437 return 0;
438 if (!__kernel_text_address(info.ip)) {
439 return 0;
440 }
441 } while (info.ip && level--);
442
443 return info.ip;
444}