Loading...
1/*
2 * Kernel unwinding support
3 *
4 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
5 *
6 * Derived partially from the IA64 implementation. The PA-RISC
7 * Runtime Architecture Document is also a useful reference to
8 * understand what is happening here
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/kallsyms.h>
16#include <linux/sort.h>
17
18#include <asm/uaccess.h>
19#include <asm/assembly.h>
20#include <asm/asm-offsets.h>
21#include <asm/ptrace.h>
22
23#include <asm/unwind.h>
24
25/* #define DEBUG 1 */
26#ifdef DEBUG
27#define dbg(x...) printk(x)
28#else
29#define dbg(x...)
30#endif
31
32#define KERNEL_START (KERNEL_BINARY_TEXT_START)
33
34extern struct unwind_table_entry __start___unwind[];
35extern struct unwind_table_entry __stop___unwind[];
36
37static spinlock_t unwind_lock;
38/*
39 * the kernel unwind block is not dynamically allocated so that
40 * we can call unwind_init as early in the bootup process as
41 * possible (before the slab allocator is initialized)
42 */
43static struct unwind_table kernel_unwind_table __read_mostly;
44static LIST_HEAD(unwind_tables);
45
46static inline const struct unwind_table_entry *
47find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
48{
49 const struct unwind_table_entry *e = NULL;
50 unsigned long lo, hi, mid;
51
52 lo = 0;
53 hi = table->length - 1;
54
55 while (lo <= hi) {
56 mid = (hi - lo) / 2 + lo;
57 e = &table->table[mid];
58 if (addr < e->region_start)
59 hi = mid - 1;
60 else if (addr > e->region_end)
61 lo = mid + 1;
62 else
63 return e;
64 }
65
66 return NULL;
67}
68
69static const struct unwind_table_entry *
70find_unwind_entry(unsigned long addr)
71{
72 struct unwind_table *table;
73 const struct unwind_table_entry *e = NULL;
74
75 if (addr >= kernel_unwind_table.start &&
76 addr <= kernel_unwind_table.end)
77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 else
79 list_for_each_entry(table, &unwind_tables, list) {
80 if (addr >= table->start &&
81 addr <= table->end)
82 e = find_unwind_entry_in_table(table, addr);
83 if (e) {
84 /* Move-to-front to exploit common traces */
85 list_move(&table->list, &unwind_tables);
86 break;
87 }
88 }
89
90 return e;
91}
92
93static void
94unwind_table_init(struct unwind_table *table, const char *name,
95 unsigned long base_addr, unsigned long gp,
96 void *table_start, void *table_end)
97{
98 struct unwind_table_entry *start = table_start;
99 struct unwind_table_entry *end =
100 (struct unwind_table_entry *)table_end - 1;
101
102 table->name = name;
103 table->base_addr = base_addr;
104 table->gp = gp;
105 table->start = base_addr + start->region_start;
106 table->end = base_addr + end->region_end;
107 table->table = (struct unwind_table_entry *)table_start;
108 table->length = end - start + 1;
109 INIT_LIST_HEAD(&table->list);
110
111 for (; start <= end; start++) {
112 if (start < end &&
113 start->region_end > (start+1)->region_start) {
114 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
115 }
116
117 start->region_start += base_addr;
118 start->region_end += base_addr;
119 }
120}
121
122static int cmp_unwind_table_entry(const void *a, const void *b)
123{
124 return ((const struct unwind_table_entry *)a)->region_start
125 - ((const struct unwind_table_entry *)b)->region_start;
126}
127
128static void
129unwind_table_sort(struct unwind_table_entry *start,
130 struct unwind_table_entry *finish)
131{
132 sort(start, finish - start, sizeof(struct unwind_table_entry),
133 cmp_unwind_table_entry, NULL);
134}
135
136struct unwind_table *
137unwind_table_add(const char *name, unsigned long base_addr,
138 unsigned long gp,
139 void *start, void *end)
140{
141 struct unwind_table *table;
142 unsigned long flags;
143 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
144 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
145
146 unwind_table_sort(s, e);
147
148 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
149 if (table == NULL)
150 return NULL;
151 unwind_table_init(table, name, base_addr, gp, start, end);
152 spin_lock_irqsave(&unwind_lock, flags);
153 list_add_tail(&table->list, &unwind_tables);
154 spin_unlock_irqrestore(&unwind_lock, flags);
155
156 return table;
157}
158
159void unwind_table_remove(struct unwind_table *table)
160{
161 unsigned long flags;
162
163 spin_lock_irqsave(&unwind_lock, flags);
164 list_del(&table->list);
165 spin_unlock_irqrestore(&unwind_lock, flags);
166
167 kfree(table);
168}
169
170/* Called from setup_arch to import the kernel unwind info */
171int __init unwind_init(void)
172{
173 long start, stop;
174 register unsigned long gp __asm__ ("r27");
175
176 start = (long)&__start___unwind[0];
177 stop = (long)&__stop___unwind[0];
178
179 spin_lock_init(&unwind_lock);
180
181 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
182 start, stop,
183 (stop - start) / sizeof(struct unwind_table_entry));
184
185 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
186 gp,
187 &__start___unwind[0], &__stop___unwind[0]);
188#if 0
189 {
190 int i;
191 for (i = 0; i < 10; i++)
192 {
193 printk("region 0x%x-0x%x\n",
194 __start___unwind[i].region_start,
195 __start___unwind[i].region_end);
196 }
197 }
198#endif
199 return 0;
200}
201
202#ifdef CONFIG_64BIT
203#define get_func_addr(fptr) fptr[2]
204#else
205#define get_func_addr(fptr) fptr[0]
206#endif
207
208static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
209{
210 extern void handle_interruption(int, struct pt_regs *);
211 static unsigned long *hi = (unsigned long *)&handle_interruption;
212
213 if (pc == get_func_addr(hi)) {
214 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
215 dbg("Unwinding through handle_interruption()\n");
216 info->prev_sp = regs->gr[30];
217 info->prev_ip = regs->iaoq[0];
218
219 return 1;
220 }
221
222 return 0;
223}
224
225static void unwind_frame_regs(struct unwind_frame_info *info)
226{
227 const struct unwind_table_entry *e;
228 unsigned long npc;
229 unsigned int insn;
230 long frame_size = 0;
231 int looking_for_rp, rpoffset = 0;
232
233 e = find_unwind_entry(info->ip);
234 if (e == NULL) {
235 unsigned long sp;
236
237 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
238
239#ifdef CONFIG_KALLSYMS
240 /* Handle some frequent special cases.... */
241 {
242 char symname[KSYM_NAME_LEN];
243 char *modname;
244
245 kallsyms_lookup(info->ip, NULL, NULL, &modname,
246 symname);
247
248 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
249
250 if (strcmp(symname, "_switch_to_ret") == 0) {
251 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
252 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
253 dbg("_switch_to_ret @ %lx - setting "
254 "prev_sp=%lx prev_ip=%lx\n",
255 info->ip, info->prev_sp,
256 info->prev_ip);
257 return;
258 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
259 strcmp(symname, "syscall_exit") == 0) {
260 info->prev_ip = info->prev_sp = 0;
261 return;
262 }
263 }
264#endif
265
266 /* Since we are doing the unwinding blind, we don't know if
267 we are adjusting the stack correctly or extracting the rp
268 correctly. The rp is checked to see if it belongs to the
269 kernel text section, if not we assume we don't have a
270 correct stack frame and we continue to unwind the stack.
271 This is not quite correct, and will fail for loadable
272 modules. */
273 sp = info->sp & ~63;
274 do {
275 unsigned long tmp;
276
277 info->prev_sp = sp - 64;
278 info->prev_ip = 0;
279 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
280 break;
281 info->prev_ip = tmp;
282 sp = info->prev_sp;
283 } while (!kernel_text_address(info->prev_ip));
284
285 info->rp = 0;
286
287 dbg("analyzing func @ %lx with no unwind info, setting "
288 "prev_sp=%lx prev_ip=%lx\n", info->ip,
289 info->prev_sp, info->prev_ip);
290 } else {
291 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
292 "Save_RP = %d, Millicode = %d size = %u\n",
293 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
294 e->Millicode, e->Total_frame_size);
295
296 looking_for_rp = e->Save_RP;
297
298 for (npc = e->region_start;
299 (frame_size < (e->Total_frame_size << 3) ||
300 looking_for_rp) &&
301 npc < info->ip;
302 npc += 4) {
303
304 insn = *(unsigned int *)npc;
305
306 if ((insn & 0xffffc000) == 0x37de0000 ||
307 (insn & 0xffe00000) == 0x6fc00000) {
308 /* ldo X(sp), sp, or stwm X,D(sp) */
309 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
310 ((insn & 0x3fff) >> 1);
311 dbg("analyzing func @ %lx, insn=%08x @ "
312 "%lx, frame_size = %ld\n", info->ip,
313 insn, npc, frame_size);
314 } else if ((insn & 0xffe00008) == 0x73c00008) {
315 /* std,ma X,D(sp) */
316 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
317 (((insn >> 4) & 0x3ff) << 3);
318 dbg("analyzing func @ %lx, insn=%08x @ "
319 "%lx, frame_size = %ld\n", info->ip,
320 insn, npc, frame_size);
321 } else if (insn == 0x6bc23fd9) {
322 /* stw rp,-20(sp) */
323 rpoffset = 20;
324 looking_for_rp = 0;
325 dbg("analyzing func @ %lx, insn=stw rp,"
326 "-20(sp) @ %lx\n", info->ip, npc);
327 } else if (insn == 0x0fc212c1) {
328 /* std rp,-16(sr0,sp) */
329 rpoffset = 16;
330 looking_for_rp = 0;
331 dbg("analyzing func @ %lx, insn=std rp,"
332 "-16(sp) @ %lx\n", info->ip, npc);
333 }
334 }
335
336 if (!unwind_special(info, e->region_start, frame_size)) {
337 info->prev_sp = info->sp - frame_size;
338 if (e->Millicode)
339 info->rp = info->r31;
340 else if (rpoffset)
341 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
342 info->prev_ip = info->rp;
343 info->rp = 0;
344 }
345
346 dbg("analyzing func @ %lx, setting prev_sp=%lx "
347 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
348 info->prev_ip, npc);
349 }
350}
351
352void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
353 struct pt_regs *regs)
354{
355 memset(info, 0, sizeof(struct unwind_frame_info));
356 info->t = t;
357 info->sp = regs->gr[30];
358 info->ip = regs->iaoq[0];
359 info->rp = regs->gr[2];
360 info->r31 = regs->gr[31];
361
362 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
363 t ? (int)t->pid : -1, info->sp, info->ip);
364}
365
366void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
367{
368 struct pt_regs *r = &t->thread.regs;
369 struct pt_regs *r2;
370
371 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
372 if (!r2)
373 return;
374 *r2 = *r;
375 r2->gr[30] = r->ksp;
376 r2->iaoq[0] = r->kpc;
377 unwind_frame_init(info, t, r2);
378 kfree(r2);
379}
380
381void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
382{
383 unwind_frame_init(info, current, regs);
384}
385
386int unwind_once(struct unwind_frame_info *next_frame)
387{
388 unwind_frame_regs(next_frame);
389
390 if (next_frame->prev_sp == 0 ||
391 next_frame->prev_ip == 0)
392 return -1;
393
394 next_frame->sp = next_frame->prev_sp;
395 next_frame->ip = next_frame->prev_ip;
396 next_frame->prev_sp = 0;
397 next_frame->prev_ip = 0;
398
399 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
400 next_frame->t ? (int)next_frame->t->pid : -1,
401 next_frame->sp, next_frame->ip);
402
403 return 0;
404}
405
406int unwind_to_user(struct unwind_frame_info *info)
407{
408 int ret;
409
410 do {
411 ret = unwind_once(info);
412 } while (!ret && !(info->ip & 3));
413
414 return ret;
415}
416
417unsigned long return_address(unsigned int level)
418{
419 struct unwind_frame_info info;
420 struct pt_regs r;
421 unsigned long sp;
422
423 /* initialize unwind info */
424 asm volatile ("copy %%r30, %0" : "=r"(sp));
425 memset(&r, 0, sizeof(struct pt_regs));
426 r.iaoq[0] = (unsigned long) current_text_addr();
427 r.gr[2] = (unsigned long) __builtin_return_address(0);
428 r.gr[30] = sp;
429 unwind_frame_init(&info, current, &r);
430
431 /* unwind stack */
432 ++level;
433 do {
434 if (unwind_once(&info) < 0 || info.ip == 0)
435 return 0;
436 if (!kernel_text_address(info.ip))
437 return 0;
438 } while (info.ip && level--);
439
440 return info.ip;
441}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel unwinding support
4 *
5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 *
7 * Derived partially from the IA64 implementation. The PA-RISC
8 * Runtime Architecture Document is also a useful reference to
9 * understand what is happening here
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/kallsyms.h>
17#include <linux/sort.h>
18
19#include <linux/uaccess.h>
20#include <asm/assembly.h>
21#include <asm/asm-offsets.h>
22#include <asm/ptrace.h>
23
24#include <asm/unwind.h>
25
26/* #define DEBUG 1 */
27#ifdef DEBUG
28#define dbg(x...) printk(x)
29#else
30#define dbg(x...)
31#endif
32
33#define KERNEL_START (KERNEL_BINARY_TEXT_START)
34
35extern struct unwind_table_entry __start___unwind[];
36extern struct unwind_table_entry __stop___unwind[];
37
38static DEFINE_SPINLOCK(unwind_lock);
39/*
40 * the kernel unwind block is not dynamically allocated so that
41 * we can call unwind_init as early in the bootup process as
42 * possible (before the slab allocator is initialized)
43 */
44static struct unwind_table kernel_unwind_table __read_mostly;
45static LIST_HEAD(unwind_tables);
46
47static inline const struct unwind_table_entry *
48find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
49{
50 const struct unwind_table_entry *e = NULL;
51 unsigned long lo, hi, mid;
52
53 lo = 0;
54 hi = table->length - 1;
55
56 while (lo <= hi) {
57 mid = (hi - lo) / 2 + lo;
58 e = &table->table[mid];
59 if (addr < e->region_start)
60 hi = mid - 1;
61 else if (addr > e->region_end)
62 lo = mid + 1;
63 else
64 return e;
65 }
66
67 return NULL;
68}
69
70static const struct unwind_table_entry *
71find_unwind_entry(unsigned long addr)
72{
73 struct unwind_table *table;
74 const struct unwind_table_entry *e = NULL;
75
76 if (addr >= kernel_unwind_table.start &&
77 addr <= kernel_unwind_table.end)
78 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
79 else {
80 unsigned long flags;
81
82 spin_lock_irqsave(&unwind_lock, flags);
83 list_for_each_entry(table, &unwind_tables, list) {
84 if (addr >= table->start &&
85 addr <= table->end)
86 e = find_unwind_entry_in_table(table, addr);
87 if (e) {
88 /* Move-to-front to exploit common traces */
89 list_move(&table->list, &unwind_tables);
90 break;
91 }
92 }
93 spin_unlock_irqrestore(&unwind_lock, flags);
94 }
95
96 return e;
97}
98
99static void
100unwind_table_init(struct unwind_table *table, const char *name,
101 unsigned long base_addr, unsigned long gp,
102 void *table_start, void *table_end)
103{
104 struct unwind_table_entry *start = table_start;
105 struct unwind_table_entry *end =
106 (struct unwind_table_entry *)table_end - 1;
107
108 table->name = name;
109 table->base_addr = base_addr;
110 table->gp = gp;
111 table->start = base_addr + start->region_start;
112 table->end = base_addr + end->region_end;
113 table->table = (struct unwind_table_entry *)table_start;
114 table->length = end - start + 1;
115 INIT_LIST_HEAD(&table->list);
116
117 for (; start <= end; start++) {
118 if (start < end &&
119 start->region_end > (start+1)->region_start) {
120 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
121 }
122
123 start->region_start += base_addr;
124 start->region_end += base_addr;
125 }
126}
127
128static int cmp_unwind_table_entry(const void *a, const void *b)
129{
130 return ((const struct unwind_table_entry *)a)->region_start
131 - ((const struct unwind_table_entry *)b)->region_start;
132}
133
134static void
135unwind_table_sort(struct unwind_table_entry *start,
136 struct unwind_table_entry *finish)
137{
138 sort(start, finish - start, sizeof(struct unwind_table_entry),
139 cmp_unwind_table_entry, NULL);
140}
141
142struct unwind_table *
143unwind_table_add(const char *name, unsigned long base_addr,
144 unsigned long gp,
145 void *start, void *end)
146{
147 struct unwind_table *table;
148 unsigned long flags;
149 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
150 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
151
152 unwind_table_sort(s, e);
153
154 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
155 if (table == NULL)
156 return NULL;
157 unwind_table_init(table, name, base_addr, gp, start, end);
158 spin_lock_irqsave(&unwind_lock, flags);
159 list_add_tail(&table->list, &unwind_tables);
160 spin_unlock_irqrestore(&unwind_lock, flags);
161
162 return table;
163}
164
165void unwind_table_remove(struct unwind_table *table)
166{
167 unsigned long flags;
168
169 spin_lock_irqsave(&unwind_lock, flags);
170 list_del(&table->list);
171 spin_unlock_irqrestore(&unwind_lock, flags);
172
173 kfree(table);
174}
175
176/* Called from setup_arch to import the kernel unwind info */
177int __init unwind_init(void)
178{
179 long start, stop;
180 register unsigned long gp __asm__ ("r27");
181
182 start = (long)&__start___unwind[0];
183 stop = (long)&__stop___unwind[0];
184
185 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
186 start, stop,
187 (stop - start) / sizeof(struct unwind_table_entry));
188
189 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
190 gp,
191 &__start___unwind[0], &__stop___unwind[0]);
192#if 0
193 {
194 int i;
195 for (i = 0; i < 10; i++)
196 {
197 printk("region 0x%x-0x%x\n",
198 __start___unwind[i].region_start,
199 __start___unwind[i].region_end);
200 }
201 }
202#endif
203 return 0;
204}
205
206#ifdef CONFIG_64BIT
207#define get_func_addr(fptr) fptr[2]
208#else
209#define get_func_addr(fptr) fptr[0]
210#endif
211
212static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
213{
214 extern void handle_interruption(int, struct pt_regs *);
215 static unsigned long *hi = (unsigned long *)&handle_interruption;
216
217 if (pc == get_func_addr(hi)) {
218 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
219 dbg("Unwinding through handle_interruption()\n");
220 info->prev_sp = regs->gr[30];
221 info->prev_ip = regs->iaoq[0];
222
223 return 1;
224 }
225
226 return 0;
227}
228
229static void unwind_frame_regs(struct unwind_frame_info *info)
230{
231 const struct unwind_table_entry *e;
232 unsigned long npc;
233 unsigned int insn;
234 long frame_size = 0;
235 int looking_for_rp, rpoffset = 0;
236
237 e = find_unwind_entry(info->ip);
238 if (e == NULL) {
239 unsigned long sp;
240
241 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
242
243#ifdef CONFIG_KALLSYMS
244 /* Handle some frequent special cases.... */
245 {
246 char symname[KSYM_NAME_LEN];
247 char *modname;
248
249 kallsyms_lookup(info->ip, NULL, NULL, &modname,
250 symname);
251
252 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
253
254 if (strcmp(symname, "_switch_to_ret") == 0) {
255 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
256 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
257 dbg("_switch_to_ret @ %lx - setting "
258 "prev_sp=%lx prev_ip=%lx\n",
259 info->ip, info->prev_sp,
260 info->prev_ip);
261 return;
262 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
263 strcmp(symname, "syscall_exit") == 0) {
264 info->prev_ip = info->prev_sp = 0;
265 return;
266 }
267 }
268#endif
269
270 /* Since we are doing the unwinding blind, we don't know if
271 we are adjusting the stack correctly or extracting the rp
272 correctly. The rp is checked to see if it belongs to the
273 kernel text section, if not we assume we don't have a
274 correct stack frame and we continue to unwind the stack.
275 This is not quite correct, and will fail for loadable
276 modules. */
277 sp = info->sp & ~63;
278 do {
279 unsigned long tmp;
280
281 info->prev_sp = sp - 64;
282 info->prev_ip = 0;
283
284 /* The stack is at the end inside the thread_union
285 * struct. If we reach data, we have reached the
286 * beginning of the stack and should stop unwinding. */
287 if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
288 info->prev_sp < ((unsigned long) task_thread_info(info->t)
289 + THREAD_SZ_ALGN)) {
290 info->prev_sp = 0;
291 break;
292 }
293
294 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
295 break;
296 info->prev_ip = tmp;
297 sp = info->prev_sp;
298 } while (!kernel_text_address(info->prev_ip));
299
300 info->rp = 0;
301
302 dbg("analyzing func @ %lx with no unwind info, setting "
303 "prev_sp=%lx prev_ip=%lx\n", info->ip,
304 info->prev_sp, info->prev_ip);
305 } else {
306 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
307 "Save_RP = %d, Millicode = %d size = %u\n",
308 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
309 e->Millicode, e->Total_frame_size);
310
311 looking_for_rp = e->Save_RP;
312
313 for (npc = e->region_start;
314 (frame_size < (e->Total_frame_size << 3) ||
315 looking_for_rp) &&
316 npc < info->ip;
317 npc += 4) {
318
319 insn = *(unsigned int *)npc;
320
321 if ((insn & 0xffffc001) == 0x37de0000 ||
322 (insn & 0xffe00001) == 0x6fc00000) {
323 /* ldo X(sp), sp, or stwm X,D(sp) */
324 frame_size += (insn & 0x3fff) >> 1;
325 dbg("analyzing func @ %lx, insn=%08x @ "
326 "%lx, frame_size = %ld\n", info->ip,
327 insn, npc, frame_size);
328 } else if ((insn & 0xffe00009) == 0x73c00008) {
329 /* std,ma X,D(sp) */
330 frame_size += ((insn >> 4) & 0x3ff) << 3;
331 dbg("analyzing func @ %lx, insn=%08x @ "
332 "%lx, frame_size = %ld\n", info->ip,
333 insn, npc, frame_size);
334 } else if (insn == 0x6bc23fd9) {
335 /* stw rp,-20(sp) */
336 rpoffset = 20;
337 looking_for_rp = 0;
338 dbg("analyzing func @ %lx, insn=stw rp,"
339 "-20(sp) @ %lx\n", info->ip, npc);
340 } else if (insn == 0x0fc212c1) {
341 /* std rp,-16(sr0,sp) */
342 rpoffset = 16;
343 looking_for_rp = 0;
344 dbg("analyzing func @ %lx, insn=std rp,"
345 "-16(sp) @ %lx\n", info->ip, npc);
346 }
347 }
348
349 if (frame_size > e->Total_frame_size << 3)
350 frame_size = e->Total_frame_size << 3;
351
352 if (!unwind_special(info, e->region_start, frame_size)) {
353 info->prev_sp = info->sp - frame_size;
354 if (e->Millicode)
355 info->rp = info->r31;
356 else if (rpoffset)
357 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
358 info->prev_ip = info->rp;
359 info->rp = 0;
360 }
361
362 dbg("analyzing func @ %lx, setting prev_sp=%lx "
363 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
364 info->prev_ip, npc);
365 }
366}
367
368void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
369 struct pt_regs *regs)
370{
371 memset(info, 0, sizeof(struct unwind_frame_info));
372 info->t = t;
373 info->sp = regs->gr[30];
374 info->ip = regs->iaoq[0];
375 info->rp = regs->gr[2];
376 info->r31 = regs->gr[31];
377
378 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
379 t ? (int)t->pid : -1, info->sp, info->ip);
380}
381
382void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
383{
384 struct pt_regs *r = &t->thread.regs;
385 struct pt_regs *r2;
386
387 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
388 if (!r2)
389 return;
390 *r2 = *r;
391 r2->gr[30] = r->ksp;
392 r2->iaoq[0] = r->kpc;
393 unwind_frame_init(info, t, r2);
394 kfree(r2);
395}
396
397void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
398{
399 unwind_frame_init(info, current, regs);
400}
401
402int unwind_once(struct unwind_frame_info *next_frame)
403{
404 unwind_frame_regs(next_frame);
405
406 if (next_frame->prev_sp == 0 ||
407 next_frame->prev_ip == 0)
408 return -1;
409
410 next_frame->sp = next_frame->prev_sp;
411 next_frame->ip = next_frame->prev_ip;
412 next_frame->prev_sp = 0;
413 next_frame->prev_ip = 0;
414
415 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
416 next_frame->t ? (int)next_frame->t->pid : -1,
417 next_frame->sp, next_frame->ip);
418
419 return 0;
420}
421
422int unwind_to_user(struct unwind_frame_info *info)
423{
424 int ret;
425
426 do {
427 ret = unwind_once(info);
428 } while (!ret && !(info->ip & 3));
429
430 return ret;
431}
432
433unsigned long return_address(unsigned int level)
434{
435 struct unwind_frame_info info;
436 struct pt_regs r;
437 unsigned long sp;
438
439 /* initialize unwind info */
440 asm volatile ("copy %%r30, %0" : "=r"(sp));
441 memset(&r, 0, sizeof(struct pt_regs));
442 r.iaoq[0] = (unsigned long) current_text_addr();
443 r.gr[2] = (unsigned long) __builtin_return_address(0);
444 r.gr[30] = sp;
445 unwind_frame_init(&info, current, &r);
446
447 /* unwind stack */
448 ++level;
449 do {
450 if (unwind_once(&info) < 0 || info.ip == 0)
451 return 0;
452 if (!kernel_text_address(info.ip))
453 return 0;
454 } while (info.ip && level--);
455
456 return info.ip;
457}