Loading...
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * A code-rewriter that enables instruction single-stepping.
15 * Derived from iLib's single-stepping code.
16 */
17
18#ifndef __tilegx__ /* Hardware support for single step unavailable. */
19
20/* These functions are only used on the TILE platform */
21#include <linux/slab.h>
22#include <linux/thread_info.h>
23#include <linux/uaccess.h>
24#include <linux/mman.h>
25#include <linux/types.h>
26#include <linux/err.h>
27#include <asm/cacheflush.h>
28#include <asm/unaligned.h>
29#include <arch/abi.h>
30#include <arch/opcode.h>
31
32#define signExtend17(val) sign_extend((val), 17)
33#define TILE_X1_MASK (0xffffffffULL << 31)
34
35int unaligned_printk;
36
37static int __init setup_unaligned_printk(char *str)
38{
39 long val;
40 if (strict_strtol(str, 0, &val) != 0)
41 return 0;
42 unaligned_printk = val;
43 pr_info("Printk for each unaligned data accesses is %s\n",
44 unaligned_printk ? "enabled" : "disabled");
45 return 1;
46}
47__setup("unaligned_printk=", setup_unaligned_printk);
48
49unsigned int unaligned_fixup_count;
50
51enum mem_op {
52 MEMOP_NONE,
53 MEMOP_LOAD,
54 MEMOP_STORE,
55 MEMOP_LOAD_POSTINCR,
56 MEMOP_STORE_POSTINCR
57};
58
59static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
60{
61 tile_bundle_bits result;
62
63 /* mask out the old offset */
64 tile_bundle_bits mask = create_BrOff_X1(-1);
65 result = n & (~mask);
66
67 /* or in the new offset */
68 result |= create_BrOff_X1(offset);
69
70 return result;
71}
72
73static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
74{
75 tile_bundle_bits result;
76 tile_bundle_bits op;
77
78 result = n & (~TILE_X1_MASK);
79
80 op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
81 create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
82 create_Dest_X1(dest) |
83 create_SrcB_X1(TREG_ZERO) |
84 create_SrcA_X1(src) ;
85
86 result |= op;
87 return result;
88}
89
90static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
91{
92 return move_X1(n, TREG_ZERO, TREG_ZERO);
93}
94
95static inline tile_bundle_bits addi_X1(
96 tile_bundle_bits n, int dest, int src, int imm)
97{
98 n &= ~TILE_X1_MASK;
99
100 n |= (create_SrcA_X1(src) |
101 create_Dest_X1(dest) |
102 create_Imm8_X1(imm) |
103 create_S_X1(0) |
104 create_Opcode_X1(IMM_0_OPCODE_X1) |
105 create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
106
107 return n;
108}
109
110static tile_bundle_bits rewrite_load_store_unaligned(
111 struct single_step_state *state,
112 tile_bundle_bits bundle,
113 struct pt_regs *regs,
114 enum mem_op mem_op,
115 int size, int sign_ext)
116{
117 unsigned char __user *addr;
118 int val_reg, addr_reg, err, val;
119
120 /* Get address and value registers */
121 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
122 addr_reg = get_SrcA_Y2(bundle);
123 val_reg = get_SrcBDest_Y2(bundle);
124 } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
125 addr_reg = get_SrcA_X1(bundle);
126 val_reg = get_Dest_X1(bundle);
127 } else {
128 addr_reg = get_SrcA_X1(bundle);
129 val_reg = get_SrcB_X1(bundle);
130 }
131
132 /*
133 * If registers are not GPRs, don't try to handle it.
134 *
135 * FIXME: we could handle non-GPR loads by getting the real value
136 * from memory, writing it to the single step buffer, using a
137 * temp_reg to hold a pointer to that memory, then executing that
138 * instruction and resetting temp_reg. For non-GPR stores, it's a
139 * little trickier; we could use the single step buffer for that
140 * too, but we'd have to add some more state bits so that we could
141 * call back in here to copy that value to the real target. For
142 * now, we just handle the simple case.
143 */
144 if ((val_reg >= PTREGS_NR_GPRS &&
145 (val_reg != TREG_ZERO ||
146 mem_op == MEMOP_LOAD ||
147 mem_op == MEMOP_LOAD_POSTINCR)) ||
148 addr_reg >= PTREGS_NR_GPRS)
149 return bundle;
150
151 /* If it's aligned, don't handle it specially */
152 addr = (void __user *)regs->regs[addr_reg];
153 if (((unsigned long)addr % size) == 0)
154 return bundle;
155
156 /*
157 * Return SIGBUS with the unaligned address, if requested.
158 * Note that we return SIGBUS even for completely invalid addresses
159 * as long as they are in fact unaligned; this matches what the
160 * tilepro hardware would be doing, if it could provide us with the
161 * actual bad address in an SPR, which it doesn't.
162 */
163 if (unaligned_fixup == 0) {
164 siginfo_t info = {
165 .si_signo = SIGBUS,
166 .si_code = BUS_ADRALN,
167 .si_addr = addr
168 };
169 trace_unhandled_signal("unaligned trap", regs,
170 (unsigned long)addr, SIGBUS);
171 force_sig_info(info.si_signo, &info, current);
172 return (tilepro_bundle_bits) 0;
173 }
174
175 /* Handle unaligned load/store */
176 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
177 unsigned short val_16;
178 switch (size) {
179 case 2:
180 err = copy_from_user(&val_16, addr, sizeof(val_16));
181 val = sign_ext ? ((short)val_16) : val_16;
182 break;
183 case 4:
184 err = copy_from_user(&val, addr, sizeof(val));
185 break;
186 default:
187 BUG();
188 }
189 if (err == 0) {
190 state->update_reg = val_reg;
191 state->update_value = val;
192 state->update = 1;
193 }
194 } else {
195 unsigned short val_16;
196 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
197 switch (size) {
198 case 2:
199 val_16 = val;
200 err = copy_to_user(addr, &val_16, sizeof(val_16));
201 break;
202 case 4:
203 err = copy_to_user(addr, &val, sizeof(val));
204 break;
205 default:
206 BUG();
207 }
208 }
209
210 if (err) {
211 siginfo_t info = {
212 .si_signo = SIGSEGV,
213 .si_code = SEGV_MAPERR,
214 .si_addr = addr
215 };
216 trace_unhandled_signal("segfault", regs,
217 (unsigned long)addr, SIGSEGV);
218 force_sig_info(info.si_signo, &info, current);
219 return (tile_bundle_bits) 0;
220 }
221
222 if (unaligned_printk || unaligned_fixup_count == 0) {
223 pr_info("Process %d/%s: PC %#lx: Fixup of"
224 " unaligned %s at %#lx.\n",
225 current->pid, current->comm, regs->pc,
226 (mem_op == MEMOP_LOAD ||
227 mem_op == MEMOP_LOAD_POSTINCR) ?
228 "load" : "store",
229 (unsigned long)addr);
230 if (!unaligned_printk) {
231#define P pr_info
232P("\n");
233P("Unaligned fixups in the kernel will slow your application considerably.\n");
234P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
235P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
236P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
237P("access will become a SIGBUS you can debug. No further warnings will be\n");
238P("shown so as to avoid additional slowdown, but you can track the number\n");
239P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
240P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
241P("\n");
242#undef P
243 }
244 }
245 ++unaligned_fixup_count;
246
247 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
248 /* Convert the Y2 instruction to a prefetch. */
249 bundle &= ~(create_SrcBDest_Y2(-1) |
250 create_Opcode_Y2(-1));
251 bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
252 create_Opcode_Y2(LW_OPCODE_Y2));
253 /* Replace the load postincr with an addi */
254 } else if (mem_op == MEMOP_LOAD_POSTINCR) {
255 bundle = addi_X1(bundle, addr_reg, addr_reg,
256 get_Imm8_X1(bundle));
257 /* Replace the store postincr with an addi */
258 } else if (mem_op == MEMOP_STORE_POSTINCR) {
259 bundle = addi_X1(bundle, addr_reg, addr_reg,
260 get_Dest_Imm8_X1(bundle));
261 } else {
262 /* Convert the X1 instruction to a nop. */
263 bundle &= ~(create_Opcode_X1(-1) |
264 create_UnShOpcodeExtension_X1(-1) |
265 create_UnOpcodeExtension_X1(-1));
266 bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
267 create_UnShOpcodeExtension_X1(
268 UN_0_SHUN_0_OPCODE_X1) |
269 create_UnOpcodeExtension_X1(
270 NOP_UN_0_SHUN_0_OPCODE_X1));
271 }
272
273 return bundle;
274}
275
276/*
277 * Called after execve() has started the new image. This allows us
278 * to reset the info state. Note that the the mmap'ed memory, if there
279 * was any, has already been unmapped by the exec.
280 */
281void single_step_execve(void)
282{
283 struct thread_info *ti = current_thread_info();
284 kfree(ti->step_state);
285 ti->step_state = NULL;
286}
287
288/**
289 * single_step_once() - entry point when single stepping has been triggered.
290 * @regs: The machine register state
291 *
292 * When we arrive at this routine via a trampoline, the single step
293 * engine copies the executing bundle to the single step buffer.
294 * If the instruction is a condition branch, then the target is
295 * reset to one past the next instruction. If the instruction
296 * sets the lr, then that is noted. If the instruction is a jump
297 * or call, then the new target pc is preserved and the current
298 * bundle instruction set to null.
299 *
300 * The necessary post-single-step rewriting information is stored in
301 * single_step_state-> We use data segment values because the
302 * stack will be rewound when we run the rewritten single-stepped
303 * instruction.
304 */
305void single_step_once(struct pt_regs *regs)
306{
307 extern tile_bundle_bits __single_step_ill_insn;
308 extern tile_bundle_bits __single_step_j_insn;
309 extern tile_bundle_bits __single_step_addli_insn;
310 extern tile_bundle_bits __single_step_auli_insn;
311 struct thread_info *info = (void *)current_thread_info();
312 struct single_step_state *state = info->step_state;
313 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
314 tile_bundle_bits __user *buffer, *pc;
315 tile_bundle_bits bundle;
316 int temp_reg;
317 int target_reg = TREG_LR;
318 int err;
319 enum mem_op mem_op = MEMOP_NONE;
320 int size = 0, sign_ext = 0; /* happy compiler */
321
322 asm(
323" .pushsection .rodata.single_step\n"
324" .align 8\n"
325" .globl __single_step_ill_insn\n"
326"__single_step_ill_insn:\n"
327" ill\n"
328" .globl __single_step_addli_insn\n"
329"__single_step_addli_insn:\n"
330" { nop; addli r0, zero, 0 }\n"
331" .globl __single_step_auli_insn\n"
332"__single_step_auli_insn:\n"
333" { nop; auli r0, r0, 0 }\n"
334" .globl __single_step_j_insn\n"
335"__single_step_j_insn:\n"
336" j .\n"
337" .popsection\n"
338 );
339
340 /*
341 * Enable interrupts here to allow touching userspace and the like.
342 * The callers expect this: do_trap() already has interrupts
343 * enabled, and do_work_pending() handles functions that enable
344 * interrupts internally.
345 */
346 local_irq_enable();
347
348 if (state == NULL) {
349 /* allocate a page of writable, executable memory */
350 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
351 if (state == NULL) {
352 pr_err("Out of kernel memory trying to single-step\n");
353 return;
354 }
355
356 /* allocate a cache line of writable, executable memory */
357 buffer = (void __user *) vm_mmap(NULL, 0, 64,
358 PROT_EXEC | PROT_READ | PROT_WRITE,
359 MAP_PRIVATE | MAP_ANONYMOUS,
360 0);
361
362 if (IS_ERR((void __force *)buffer)) {
363 kfree(state);
364 pr_err("Out of kernel pages trying to single-step\n");
365 return;
366 }
367
368 state->buffer = buffer;
369 state->is_enabled = 0;
370
371 info->step_state = state;
372
373 /* Validate our stored instruction patterns */
374 BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
375 ADDLI_OPCODE_X1);
376 BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
377 AULI_OPCODE_X1);
378 BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
379 BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
380 BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
381 }
382
383 /*
384 * If we are returning from a syscall, we still haven't hit the
385 * "ill" for the swint1 instruction. So back the PC up to be
386 * pointing at the swint1, but we'll actually return directly
387 * back to the "ill" so we come back in via SIGILL as if we
388 * had "executed" the swint1 without ever being in kernel space.
389 */
390 if (regs->faultnum == INT_SWINT_1)
391 regs->pc -= 8;
392
393 pc = (tile_bundle_bits __user *)(regs->pc);
394 if (get_user(bundle, pc) != 0) {
395 pr_err("Couldn't read instruction at %p trying to step\n", pc);
396 return;
397 }
398
399 /* We'll follow the instruction with 2 ill op bundles */
400 state->orig_pc = (unsigned long)pc;
401 state->next_pc = (unsigned long)(pc + 1);
402 state->branch_next_pc = 0;
403 state->update = 0;
404
405 if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
406 /* two wide, check for control flow */
407 int opcode = get_Opcode_X1(bundle);
408
409 switch (opcode) {
410 /* branches */
411 case BRANCH_OPCODE_X1:
412 {
413 s32 offset = signExtend17(get_BrOff_X1(bundle));
414
415 /*
416 * For branches, we use a rewriting trick to let the
417 * hardware evaluate whether the branch is taken or
418 * untaken. We record the target offset and then
419 * rewrite the branch instruction to target 1 insn
420 * ahead if the branch is taken. We then follow the
421 * rewritten branch with two bundles, each containing
422 * an "ill" instruction. The supervisor examines the
423 * pc after the single step code is executed, and if
424 * the pc is the first ill instruction, then the
425 * branch (if any) was not taken. If the pc is the
426 * second ill instruction, then the branch was
427 * taken. The new pc is computed for these cases, and
428 * inserted into the registers for the thread. If
429 * the pc is the start of the single step code, then
430 * an exception or interrupt was taken before the
431 * code started processing, and the same "original"
432 * pc is restored. This change, different from the
433 * original implementation, has the advantage of
434 * executing a single user instruction.
435 */
436 state->branch_next_pc = (unsigned long)(pc + offset);
437
438 /* rewrite branch offset to go forward one bundle */
439 bundle = set_BrOff_X1(bundle, 2);
440 }
441 break;
442
443 /* jumps */
444 case JALB_OPCODE_X1:
445 case JALF_OPCODE_X1:
446 state->update = 1;
447 state->next_pc =
448 (unsigned long) (pc + get_JOffLong_X1(bundle));
449 break;
450
451 case JB_OPCODE_X1:
452 case JF_OPCODE_X1:
453 state->next_pc =
454 (unsigned long) (pc + get_JOffLong_X1(bundle));
455 bundle = nop_X1(bundle);
456 break;
457
458 case SPECIAL_0_OPCODE_X1:
459 switch (get_RRROpcodeExtension_X1(bundle)) {
460 /* jump-register */
461 case JALRP_SPECIAL_0_OPCODE_X1:
462 case JALR_SPECIAL_0_OPCODE_X1:
463 state->update = 1;
464 state->next_pc =
465 regs->regs[get_SrcA_X1(bundle)];
466 break;
467
468 case JRP_SPECIAL_0_OPCODE_X1:
469 case JR_SPECIAL_0_OPCODE_X1:
470 state->next_pc =
471 regs->regs[get_SrcA_X1(bundle)];
472 bundle = nop_X1(bundle);
473 break;
474
475 case LNK_SPECIAL_0_OPCODE_X1:
476 state->update = 1;
477 target_reg = get_Dest_X1(bundle);
478 break;
479
480 /* stores */
481 case SH_SPECIAL_0_OPCODE_X1:
482 mem_op = MEMOP_STORE;
483 size = 2;
484 break;
485
486 case SW_SPECIAL_0_OPCODE_X1:
487 mem_op = MEMOP_STORE;
488 size = 4;
489 break;
490 }
491 break;
492
493 /* loads and iret */
494 case SHUN_0_OPCODE_X1:
495 if (get_UnShOpcodeExtension_X1(bundle) ==
496 UN_0_SHUN_0_OPCODE_X1) {
497 switch (get_UnOpcodeExtension_X1(bundle)) {
498 case LH_UN_0_SHUN_0_OPCODE_X1:
499 mem_op = MEMOP_LOAD;
500 size = 2;
501 sign_ext = 1;
502 break;
503
504 case LH_U_UN_0_SHUN_0_OPCODE_X1:
505 mem_op = MEMOP_LOAD;
506 size = 2;
507 sign_ext = 0;
508 break;
509
510 case LW_UN_0_SHUN_0_OPCODE_X1:
511 mem_op = MEMOP_LOAD;
512 size = 4;
513 break;
514
515 case IRET_UN_0_SHUN_0_OPCODE_X1:
516 {
517 unsigned long ex0_0 = __insn_mfspr(
518 SPR_EX_CONTEXT_0_0);
519 unsigned long ex0_1 = __insn_mfspr(
520 SPR_EX_CONTEXT_0_1);
521 /*
522 * Special-case it if we're iret'ing
523 * to PL0 again. Otherwise just let
524 * it run and it will generate SIGILL.
525 */
526 if (EX1_PL(ex0_1) == USER_PL) {
527 state->next_pc = ex0_0;
528 regs->ex1 = ex0_1;
529 bundle = nop_X1(bundle);
530 }
531 }
532 }
533 }
534 break;
535
536#if CHIP_HAS_WH64()
537 /* postincrement operations */
538 case IMM_0_OPCODE_X1:
539 switch (get_ImmOpcodeExtension_X1(bundle)) {
540 case LWADD_IMM_0_OPCODE_X1:
541 mem_op = MEMOP_LOAD_POSTINCR;
542 size = 4;
543 break;
544
545 case LHADD_IMM_0_OPCODE_X1:
546 mem_op = MEMOP_LOAD_POSTINCR;
547 size = 2;
548 sign_ext = 1;
549 break;
550
551 case LHADD_U_IMM_0_OPCODE_X1:
552 mem_op = MEMOP_LOAD_POSTINCR;
553 size = 2;
554 sign_ext = 0;
555 break;
556
557 case SWADD_IMM_0_OPCODE_X1:
558 mem_op = MEMOP_STORE_POSTINCR;
559 size = 4;
560 break;
561
562 case SHADD_IMM_0_OPCODE_X1:
563 mem_op = MEMOP_STORE_POSTINCR;
564 size = 2;
565 break;
566
567 default:
568 break;
569 }
570 break;
571#endif /* CHIP_HAS_WH64() */
572 }
573
574 if (state->update) {
575 /*
576 * Get an available register. We start with a
577 * bitmask with 1's for available registers.
578 * We truncate to the low 32 registers since
579 * we are guaranteed to have set bits in the
580 * low 32 bits, then use ctz to pick the first.
581 */
582 u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
583 (1ULL << get_SrcA_X0(bundle)) |
584 (1ULL << get_SrcB_X0(bundle)) |
585 (1ULL << target_reg));
586 temp_reg = __builtin_ctz(mask);
587 state->update_reg = temp_reg;
588 state->update_value = regs->regs[temp_reg];
589 regs->regs[temp_reg] = (unsigned long) (pc+1);
590 regs->flags |= PT_FLAGS_RESTORE_REGS;
591 bundle = move_X1(bundle, target_reg, temp_reg);
592 }
593 } else {
594 int opcode = get_Opcode_Y2(bundle);
595
596 switch (opcode) {
597 /* loads */
598 case LH_OPCODE_Y2:
599 mem_op = MEMOP_LOAD;
600 size = 2;
601 sign_ext = 1;
602 break;
603
604 case LH_U_OPCODE_Y2:
605 mem_op = MEMOP_LOAD;
606 size = 2;
607 sign_ext = 0;
608 break;
609
610 case LW_OPCODE_Y2:
611 mem_op = MEMOP_LOAD;
612 size = 4;
613 break;
614
615 /* stores */
616 case SH_OPCODE_Y2:
617 mem_op = MEMOP_STORE;
618 size = 2;
619 break;
620
621 case SW_OPCODE_Y2:
622 mem_op = MEMOP_STORE;
623 size = 4;
624 break;
625 }
626 }
627
628 /*
629 * Check if we need to rewrite an unaligned load/store.
630 * Returning zero is a special value meaning we need to SIGSEGV.
631 */
632 if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
633 bundle = rewrite_load_store_unaligned(state, bundle, regs,
634 mem_op, size, sign_ext);
635 if (bundle == 0)
636 return;
637 }
638
639 /* write the bundle to our execution area */
640 buffer = state->buffer;
641 err = __put_user(bundle, buffer++);
642
643 /*
644 * If we're really single-stepping, we take an INT_ILL after.
645 * If we're just handling an unaligned access, we can just
646 * jump directly back to where we were in user code.
647 */
648 if (is_single_step) {
649 err |= __put_user(__single_step_ill_insn, buffer++);
650 err |= __put_user(__single_step_ill_insn, buffer++);
651 } else {
652 long delta;
653
654 if (state->update) {
655 /* We have some state to update; do it inline */
656 int ha16;
657 bundle = __single_step_addli_insn;
658 bundle |= create_Dest_X1(state->update_reg);
659 bundle |= create_Imm16_X1(state->update_value);
660 err |= __put_user(bundle, buffer++);
661 bundle = __single_step_auli_insn;
662 bundle |= create_Dest_X1(state->update_reg);
663 bundle |= create_SrcA_X1(state->update_reg);
664 ha16 = (state->update_value + 0x8000) >> 16;
665 bundle |= create_Imm16_X1(ha16);
666 err |= __put_user(bundle, buffer++);
667 state->update = 0;
668 }
669
670 /* End with a jump back to the next instruction */
671 delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
672 (unsigned long)buffer) >>
673 TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
674 bundle = __single_step_j_insn;
675 bundle |= create_JOffLong_X1(delta);
676 err |= __put_user(bundle, buffer++);
677 }
678
679 if (err) {
680 pr_err("Fault when writing to single-step buffer\n");
681 return;
682 }
683
684 /*
685 * Flush the buffer.
686 * We do a local flush only, since this is a thread-specific buffer.
687 */
688 __flush_icache_range((unsigned long)state->buffer,
689 (unsigned long)buffer);
690
691 /* Indicate enabled */
692 state->is_enabled = is_single_step;
693 regs->pc = (unsigned long)state->buffer;
694
695 /* Fault immediately if we are coming back from a syscall. */
696 if (regs->faultnum == INT_SWINT_1)
697 regs->pc += 8;
698}
699
700#else
701#include <linux/smp.h>
702#include <linux/ptrace.h>
703#include <arch/spr_def.h>
704
705static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
706
707
708/*
709 * Called directly on the occasion of an interrupt.
710 *
711 * If the process doesn't have single step set, then we use this as an
712 * opportunity to turn single step off.
713 *
714 * It has been mentioned that we could conditionally turn off single stepping
715 * on each entry into the kernel and rely on single_step_once to turn it
716 * on for the processes that matter (as we already do), but this
717 * implementation is somewhat more efficient in that we muck with registers
718 * once on a bum interrupt rather than on every entry into the kernel.
719 *
720 * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
721 * so we have to run through this process again before we can say that an
722 * instruction has executed.
723 *
724 * swint will set CANCELED, but it's a legitimate instruction. Fortunately
725 * it changes the PC. If it hasn't changed, then we know that the interrupt
726 * wasn't generated by swint and we'll need to run this process again before
727 * we can say an instruction has executed.
728 *
729 * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
730 * on with our lives.
731 */
732
733void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
734{
735 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
736 struct thread_info *info = (void *)current_thread_info();
737 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
738 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
739
740 if (is_single_step == 0) {
741 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
742
743 } else if ((*ss_pc != regs->pc) ||
744 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
745
746 ptrace_notify(SIGTRAP);
747 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
748 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
749 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
750 }
751}
752
753
754/*
755 * Called from need_singlestep. Set up the control registers and the enable
756 * register, then return back.
757 */
758
759void single_step_once(struct pt_regs *regs)
760{
761 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
762 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
763
764 *ss_pc = regs->pc;
765 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
766 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
767 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
768 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
769}
770
771void single_step_execve(void)
772{
773 /* Nothing */
774}
775
776#endif /* !__tilegx__ */
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * A code-rewriter that enables instruction single-stepping.
15 */
16
17#include <linux/smp.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/thread_info.h>
21#include <linux/uaccess.h>
22#include <linux/mman.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/prctl.h>
26#include <asm/cacheflush.h>
27#include <asm/traps.h>
28#include <asm/uaccess.h>
29#include <asm/unaligned.h>
30#include <arch/abi.h>
31#include <arch/spr_def.h>
32#include <arch/opcode.h>
33
34
35#ifndef __tilegx__ /* Hardware support for single step unavailable. */
36
37#define signExtend17(val) sign_extend((val), 17)
38#define TILE_X1_MASK (0xffffffffULL << 31)
39
40enum mem_op {
41 MEMOP_NONE,
42 MEMOP_LOAD,
43 MEMOP_STORE,
44 MEMOP_LOAD_POSTINCR,
45 MEMOP_STORE_POSTINCR
46};
47
48static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
49 s32 offset)
50{
51 tilepro_bundle_bits result;
52
53 /* mask out the old offset */
54 tilepro_bundle_bits mask = create_BrOff_X1(-1);
55 result = n & (~mask);
56
57 /* or in the new offset */
58 result |= create_BrOff_X1(offset);
59
60 return result;
61}
62
63static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
64 int src)
65{
66 tilepro_bundle_bits result;
67 tilepro_bundle_bits op;
68
69 result = n & (~TILE_X1_MASK);
70
71 op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
72 create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
73 create_Dest_X1(dest) |
74 create_SrcB_X1(TREG_ZERO) |
75 create_SrcA_X1(src) ;
76
77 result |= op;
78 return result;
79}
80
81static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
82{
83 return move_X1(n, TREG_ZERO, TREG_ZERO);
84}
85
86static inline tilepro_bundle_bits addi_X1(
87 tilepro_bundle_bits n, int dest, int src, int imm)
88{
89 n &= ~TILE_X1_MASK;
90
91 n |= (create_SrcA_X1(src) |
92 create_Dest_X1(dest) |
93 create_Imm8_X1(imm) |
94 create_S_X1(0) |
95 create_Opcode_X1(IMM_0_OPCODE_X1) |
96 create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
97
98 return n;
99}
100
101static tilepro_bundle_bits rewrite_load_store_unaligned(
102 struct single_step_state *state,
103 tilepro_bundle_bits bundle,
104 struct pt_regs *regs,
105 enum mem_op mem_op,
106 int size, int sign_ext)
107{
108 unsigned char __user *addr;
109 int val_reg, addr_reg, err, val;
110 int align_ctl;
111
112 align_ctl = unaligned_fixup;
113 switch (task_thread_info(current)->align_ctl) {
114 case PR_UNALIGN_NOPRINT:
115 align_ctl = 1;
116 break;
117 case PR_UNALIGN_SIGBUS:
118 align_ctl = 0;
119 break;
120 }
121
122 /* Get address and value registers */
123 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
124 addr_reg = get_SrcA_Y2(bundle);
125 val_reg = get_SrcBDest_Y2(bundle);
126 } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
127 addr_reg = get_SrcA_X1(bundle);
128 val_reg = get_Dest_X1(bundle);
129 } else {
130 addr_reg = get_SrcA_X1(bundle);
131 val_reg = get_SrcB_X1(bundle);
132 }
133
134 /*
135 * If registers are not GPRs, don't try to handle it.
136 *
137 * FIXME: we could handle non-GPR loads by getting the real value
138 * from memory, writing it to the single step buffer, using a
139 * temp_reg to hold a pointer to that memory, then executing that
140 * instruction and resetting temp_reg. For non-GPR stores, it's a
141 * little trickier; we could use the single step buffer for that
142 * too, but we'd have to add some more state bits so that we could
143 * call back in here to copy that value to the real target. For
144 * now, we just handle the simple case.
145 */
146 if ((val_reg >= PTREGS_NR_GPRS &&
147 (val_reg != TREG_ZERO ||
148 mem_op == MEMOP_LOAD ||
149 mem_op == MEMOP_LOAD_POSTINCR)) ||
150 addr_reg >= PTREGS_NR_GPRS)
151 return bundle;
152
153 /* If it's aligned, don't handle it specially */
154 addr = (void __user *)regs->regs[addr_reg];
155 if (((unsigned long)addr % size) == 0)
156 return bundle;
157
158 /*
159 * Return SIGBUS with the unaligned address, if requested.
160 * Note that we return SIGBUS even for completely invalid addresses
161 * as long as they are in fact unaligned; this matches what the
162 * tilepro hardware would be doing, if it could provide us with the
163 * actual bad address in an SPR, which it doesn't.
164 */
165 if (align_ctl == 0) {
166 siginfo_t info = {
167 .si_signo = SIGBUS,
168 .si_code = BUS_ADRALN,
169 .si_addr = addr
170 };
171 trace_unhandled_signal("unaligned trap", regs,
172 (unsigned long)addr, SIGBUS);
173 force_sig_info(info.si_signo, &info, current);
174 return (tilepro_bundle_bits) 0;
175 }
176
177 /* Handle unaligned load/store */
178 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
179 unsigned short val_16;
180 switch (size) {
181 case 2:
182 err = copy_from_user(&val_16, addr, sizeof(val_16));
183 val = sign_ext ? ((short)val_16) : val_16;
184 break;
185 case 4:
186 err = copy_from_user(&val, addr, sizeof(val));
187 break;
188 default:
189 BUG();
190 }
191 if (err == 0) {
192 state->update_reg = val_reg;
193 state->update_value = val;
194 state->update = 1;
195 }
196 } else {
197 unsigned short val_16;
198 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
199 switch (size) {
200 case 2:
201 val_16 = val;
202 err = copy_to_user(addr, &val_16, sizeof(val_16));
203 break;
204 case 4:
205 err = copy_to_user(addr, &val, sizeof(val));
206 break;
207 default:
208 BUG();
209 }
210 }
211
212 if (err) {
213 siginfo_t info = {
214 .si_signo = SIGBUS,
215 .si_code = BUS_ADRALN,
216 .si_addr = addr
217 };
218 trace_unhandled_signal("bad address for unaligned fixup", regs,
219 (unsigned long)addr, SIGBUS);
220 force_sig_info(info.si_signo, &info, current);
221 return (tilepro_bundle_bits) 0;
222 }
223
224 if (unaligned_printk || unaligned_fixup_count == 0) {
225 pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
226 current->pid, current->comm, regs->pc,
227 mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
228 "load" : "store",
229 (unsigned long)addr);
230 if (!unaligned_printk) {
231#define P pr_info
232P("\n");
233P("Unaligned fixups in the kernel will slow your application considerably.\n");
234P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
235P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
236P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
237P("access will become a SIGBUS you can debug. No further warnings will be\n");
238P("shown so as to avoid additional slowdown, but you can track the number\n");
239P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
240P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
241P("\n");
242#undef P
243 }
244 }
245 ++unaligned_fixup_count;
246
247 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
248 /* Convert the Y2 instruction to a prefetch. */
249 bundle &= ~(create_SrcBDest_Y2(-1) |
250 create_Opcode_Y2(-1));
251 bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
252 create_Opcode_Y2(LW_OPCODE_Y2));
253 /* Replace the load postincr with an addi */
254 } else if (mem_op == MEMOP_LOAD_POSTINCR) {
255 bundle = addi_X1(bundle, addr_reg, addr_reg,
256 get_Imm8_X1(bundle));
257 /* Replace the store postincr with an addi */
258 } else if (mem_op == MEMOP_STORE_POSTINCR) {
259 bundle = addi_X1(bundle, addr_reg, addr_reg,
260 get_Dest_Imm8_X1(bundle));
261 } else {
262 /* Convert the X1 instruction to a nop. */
263 bundle &= ~(create_Opcode_X1(-1) |
264 create_UnShOpcodeExtension_X1(-1) |
265 create_UnOpcodeExtension_X1(-1));
266 bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
267 create_UnShOpcodeExtension_X1(
268 UN_0_SHUN_0_OPCODE_X1) |
269 create_UnOpcodeExtension_X1(
270 NOP_UN_0_SHUN_0_OPCODE_X1));
271 }
272
273 return bundle;
274}
275
276/*
277 * Called after execve() has started the new image. This allows us
278 * to reset the info state. Note that the the mmap'ed memory, if there
279 * was any, has already been unmapped by the exec.
280 */
281void single_step_execve(void)
282{
283 struct thread_info *ti = current_thread_info();
284 kfree(ti->step_state);
285 ti->step_state = NULL;
286}
287
288/*
289 * single_step_once() - entry point when single stepping has been triggered.
290 * @regs: The machine register state
291 *
292 * When we arrive at this routine via a trampoline, the single step
293 * engine copies the executing bundle to the single step buffer.
294 * If the instruction is a condition branch, then the target is
295 * reset to one past the next instruction. If the instruction
296 * sets the lr, then that is noted. If the instruction is a jump
297 * or call, then the new target pc is preserved and the current
298 * bundle instruction set to null.
299 *
300 * The necessary post-single-step rewriting information is stored in
301 * single_step_state-> We use data segment values because the
302 * stack will be rewound when we run the rewritten single-stepped
303 * instruction.
304 */
305void single_step_once(struct pt_regs *regs)
306{
307 extern tilepro_bundle_bits __single_step_ill_insn;
308 extern tilepro_bundle_bits __single_step_j_insn;
309 extern tilepro_bundle_bits __single_step_addli_insn;
310 extern tilepro_bundle_bits __single_step_auli_insn;
311 struct thread_info *info = (void *)current_thread_info();
312 struct single_step_state *state = info->step_state;
313 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
314 tilepro_bundle_bits __user *buffer, *pc;
315 tilepro_bundle_bits bundle;
316 int temp_reg;
317 int target_reg = TREG_LR;
318 int err;
319 enum mem_op mem_op = MEMOP_NONE;
320 int size = 0, sign_ext = 0; /* happy compiler */
321 int align_ctl;
322
323 align_ctl = unaligned_fixup;
324 switch (task_thread_info(current)->align_ctl) {
325 case PR_UNALIGN_NOPRINT:
326 align_ctl = 1;
327 break;
328 case PR_UNALIGN_SIGBUS:
329 align_ctl = 0;
330 break;
331 }
332
333 asm(
334" .pushsection .rodata.single_step\n"
335" .align 8\n"
336" .globl __single_step_ill_insn\n"
337"__single_step_ill_insn:\n"
338" ill\n"
339" .globl __single_step_addli_insn\n"
340"__single_step_addli_insn:\n"
341" { nop; addli r0, zero, 0 }\n"
342" .globl __single_step_auli_insn\n"
343"__single_step_auli_insn:\n"
344" { nop; auli r0, r0, 0 }\n"
345" .globl __single_step_j_insn\n"
346"__single_step_j_insn:\n"
347" j .\n"
348" .popsection\n"
349 );
350
351 /*
352 * Enable interrupts here to allow touching userspace and the like.
353 * The callers expect this: do_trap() already has interrupts
354 * enabled, and do_work_pending() handles functions that enable
355 * interrupts internally.
356 */
357 local_irq_enable();
358
359 if (state == NULL) {
360 /* allocate a page of writable, executable memory */
361 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
362 if (state == NULL) {
363 pr_err("Out of kernel memory trying to single-step\n");
364 return;
365 }
366
367 /* allocate a cache line of writable, executable memory */
368 buffer = (void __user *) vm_mmap(NULL, 0, 64,
369 PROT_EXEC | PROT_READ | PROT_WRITE,
370 MAP_PRIVATE | MAP_ANONYMOUS,
371 0);
372
373 if (IS_ERR((void __force *)buffer)) {
374 kfree(state);
375 pr_err("Out of kernel pages trying to single-step\n");
376 return;
377 }
378
379 state->buffer = buffer;
380 state->is_enabled = 0;
381
382 info->step_state = state;
383
384 /* Validate our stored instruction patterns */
385 BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
386 ADDLI_OPCODE_X1);
387 BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
388 AULI_OPCODE_X1);
389 BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
390 BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
391 BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
392 }
393
394 /*
395 * If we are returning from a syscall, we still haven't hit the
396 * "ill" for the swint1 instruction. So back the PC up to be
397 * pointing at the swint1, but we'll actually return directly
398 * back to the "ill" so we come back in via SIGILL as if we
399 * had "executed" the swint1 without ever being in kernel space.
400 */
401 if (regs->faultnum == INT_SWINT_1)
402 regs->pc -= 8;
403
404 pc = (tilepro_bundle_bits __user *)(regs->pc);
405 if (get_user(bundle, pc) != 0) {
406 pr_err("Couldn't read instruction at %p trying to step\n", pc);
407 return;
408 }
409
410 /* We'll follow the instruction with 2 ill op bundles */
411 state->orig_pc = (unsigned long)pc;
412 state->next_pc = (unsigned long)(pc + 1);
413 state->branch_next_pc = 0;
414 state->update = 0;
415
416 if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
417 /* two wide, check for control flow */
418 int opcode = get_Opcode_X1(bundle);
419
420 switch (opcode) {
421 /* branches */
422 case BRANCH_OPCODE_X1:
423 {
424 s32 offset = signExtend17(get_BrOff_X1(bundle));
425
426 /*
427 * For branches, we use a rewriting trick to let the
428 * hardware evaluate whether the branch is taken or
429 * untaken. We record the target offset and then
430 * rewrite the branch instruction to target 1 insn
431 * ahead if the branch is taken. We then follow the
432 * rewritten branch with two bundles, each containing
433 * an "ill" instruction. The supervisor examines the
434 * pc after the single step code is executed, and if
435 * the pc is the first ill instruction, then the
436 * branch (if any) was not taken. If the pc is the
437 * second ill instruction, then the branch was
438 * taken. The new pc is computed for these cases, and
439 * inserted into the registers for the thread. If
440 * the pc is the start of the single step code, then
441 * an exception or interrupt was taken before the
442 * code started processing, and the same "original"
443 * pc is restored. This change, different from the
444 * original implementation, has the advantage of
445 * executing a single user instruction.
446 */
447 state->branch_next_pc = (unsigned long)(pc + offset);
448
449 /* rewrite branch offset to go forward one bundle */
450 bundle = set_BrOff_X1(bundle, 2);
451 }
452 break;
453
454 /* jumps */
455 case JALB_OPCODE_X1:
456 case JALF_OPCODE_X1:
457 state->update = 1;
458 state->next_pc =
459 (unsigned long) (pc + get_JOffLong_X1(bundle));
460 break;
461
462 case JB_OPCODE_X1:
463 case JF_OPCODE_X1:
464 state->next_pc =
465 (unsigned long) (pc + get_JOffLong_X1(bundle));
466 bundle = nop_X1(bundle);
467 break;
468
469 case SPECIAL_0_OPCODE_X1:
470 switch (get_RRROpcodeExtension_X1(bundle)) {
471 /* jump-register */
472 case JALRP_SPECIAL_0_OPCODE_X1:
473 case JALR_SPECIAL_0_OPCODE_X1:
474 state->update = 1;
475 state->next_pc =
476 regs->regs[get_SrcA_X1(bundle)];
477 break;
478
479 case JRP_SPECIAL_0_OPCODE_X1:
480 case JR_SPECIAL_0_OPCODE_X1:
481 state->next_pc =
482 regs->regs[get_SrcA_X1(bundle)];
483 bundle = nop_X1(bundle);
484 break;
485
486 case LNK_SPECIAL_0_OPCODE_X1:
487 state->update = 1;
488 target_reg = get_Dest_X1(bundle);
489 break;
490
491 /* stores */
492 case SH_SPECIAL_0_OPCODE_X1:
493 mem_op = MEMOP_STORE;
494 size = 2;
495 break;
496
497 case SW_SPECIAL_0_OPCODE_X1:
498 mem_op = MEMOP_STORE;
499 size = 4;
500 break;
501 }
502 break;
503
504 /* loads and iret */
505 case SHUN_0_OPCODE_X1:
506 if (get_UnShOpcodeExtension_X1(bundle) ==
507 UN_0_SHUN_0_OPCODE_X1) {
508 switch (get_UnOpcodeExtension_X1(bundle)) {
509 case LH_UN_0_SHUN_0_OPCODE_X1:
510 mem_op = MEMOP_LOAD;
511 size = 2;
512 sign_ext = 1;
513 break;
514
515 case LH_U_UN_0_SHUN_0_OPCODE_X1:
516 mem_op = MEMOP_LOAD;
517 size = 2;
518 sign_ext = 0;
519 break;
520
521 case LW_UN_0_SHUN_0_OPCODE_X1:
522 mem_op = MEMOP_LOAD;
523 size = 4;
524 break;
525
526 case IRET_UN_0_SHUN_0_OPCODE_X1:
527 {
528 unsigned long ex0_0 = __insn_mfspr(
529 SPR_EX_CONTEXT_0_0);
530 unsigned long ex0_1 = __insn_mfspr(
531 SPR_EX_CONTEXT_0_1);
532 /*
533 * Special-case it if we're iret'ing
534 * to PL0 again. Otherwise just let
535 * it run and it will generate SIGILL.
536 */
537 if (EX1_PL(ex0_1) == USER_PL) {
538 state->next_pc = ex0_0;
539 regs->ex1 = ex0_1;
540 bundle = nop_X1(bundle);
541 }
542 }
543 }
544 }
545 break;
546
547 /* postincrement operations */
548 case IMM_0_OPCODE_X1:
549 switch (get_ImmOpcodeExtension_X1(bundle)) {
550 case LWADD_IMM_0_OPCODE_X1:
551 mem_op = MEMOP_LOAD_POSTINCR;
552 size = 4;
553 break;
554
555 case LHADD_IMM_0_OPCODE_X1:
556 mem_op = MEMOP_LOAD_POSTINCR;
557 size = 2;
558 sign_ext = 1;
559 break;
560
561 case LHADD_U_IMM_0_OPCODE_X1:
562 mem_op = MEMOP_LOAD_POSTINCR;
563 size = 2;
564 sign_ext = 0;
565 break;
566
567 case SWADD_IMM_0_OPCODE_X1:
568 mem_op = MEMOP_STORE_POSTINCR;
569 size = 4;
570 break;
571
572 case SHADD_IMM_0_OPCODE_X1:
573 mem_op = MEMOP_STORE_POSTINCR;
574 size = 2;
575 break;
576
577 default:
578 break;
579 }
580 break;
581 }
582
583 if (state->update) {
584 /*
585 * Get an available register. We start with a
586 * bitmask with 1's for available registers.
587 * We truncate to the low 32 registers since
588 * we are guaranteed to have set bits in the
589 * low 32 bits, then use ctz to pick the first.
590 */
591 u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
592 (1ULL << get_SrcA_X0(bundle)) |
593 (1ULL << get_SrcB_X0(bundle)) |
594 (1ULL << target_reg));
595 temp_reg = __builtin_ctz(mask);
596 state->update_reg = temp_reg;
597 state->update_value = regs->regs[temp_reg];
598 regs->regs[temp_reg] = (unsigned long) (pc+1);
599 regs->flags |= PT_FLAGS_RESTORE_REGS;
600 bundle = move_X1(bundle, target_reg, temp_reg);
601 }
602 } else {
603 int opcode = get_Opcode_Y2(bundle);
604
605 switch (opcode) {
606 /* loads */
607 case LH_OPCODE_Y2:
608 mem_op = MEMOP_LOAD;
609 size = 2;
610 sign_ext = 1;
611 break;
612
613 case LH_U_OPCODE_Y2:
614 mem_op = MEMOP_LOAD;
615 size = 2;
616 sign_ext = 0;
617 break;
618
619 case LW_OPCODE_Y2:
620 mem_op = MEMOP_LOAD;
621 size = 4;
622 break;
623
624 /* stores */
625 case SH_OPCODE_Y2:
626 mem_op = MEMOP_STORE;
627 size = 2;
628 break;
629
630 case SW_OPCODE_Y2:
631 mem_op = MEMOP_STORE;
632 size = 4;
633 break;
634 }
635 }
636
637 /*
638 * Check if we need to rewrite an unaligned load/store.
639 * Returning zero is a special value meaning we generated a signal.
640 */
641 if (mem_op != MEMOP_NONE && align_ctl >= 0) {
642 bundle = rewrite_load_store_unaligned(state, bundle, regs,
643 mem_op, size, sign_ext);
644 if (bundle == 0)
645 return;
646 }
647
648 /* write the bundle to our execution area */
649 buffer = state->buffer;
650 err = __put_user(bundle, buffer++);
651
652 /*
653 * If we're really single-stepping, we take an INT_ILL after.
654 * If we're just handling an unaligned access, we can just
655 * jump directly back to where we were in user code.
656 */
657 if (is_single_step) {
658 err |= __put_user(__single_step_ill_insn, buffer++);
659 err |= __put_user(__single_step_ill_insn, buffer++);
660 } else {
661 long delta;
662
663 if (state->update) {
664 /* We have some state to update; do it inline */
665 int ha16;
666 bundle = __single_step_addli_insn;
667 bundle |= create_Dest_X1(state->update_reg);
668 bundle |= create_Imm16_X1(state->update_value);
669 err |= __put_user(bundle, buffer++);
670 bundle = __single_step_auli_insn;
671 bundle |= create_Dest_X1(state->update_reg);
672 bundle |= create_SrcA_X1(state->update_reg);
673 ha16 = (state->update_value + 0x8000) >> 16;
674 bundle |= create_Imm16_X1(ha16);
675 err |= __put_user(bundle, buffer++);
676 state->update = 0;
677 }
678
679 /* End with a jump back to the next instruction */
680 delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
681 (unsigned long)buffer) >>
682 TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
683 bundle = __single_step_j_insn;
684 bundle |= create_JOffLong_X1(delta);
685 err |= __put_user(bundle, buffer++);
686 }
687
688 if (err) {
689 pr_err("Fault when writing to single-step buffer\n");
690 return;
691 }
692
693 /*
694 * Flush the buffer.
695 * We do a local flush only, since this is a thread-specific buffer.
696 */
697 __flush_icache_range((unsigned long)state->buffer,
698 (unsigned long)buffer);
699
700 /* Indicate enabled */
701 state->is_enabled = is_single_step;
702 regs->pc = (unsigned long)state->buffer;
703
704 /* Fault immediately if we are coming back from a syscall. */
705 if (regs->faultnum == INT_SWINT_1)
706 regs->pc += 8;
707}
708
709#else
710
711static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
712
713
714/*
715 * Called directly on the occasion of an interrupt.
716 *
717 * If the process doesn't have single step set, then we use this as an
718 * opportunity to turn single step off.
719 *
720 * It has been mentioned that we could conditionally turn off single stepping
721 * on each entry into the kernel and rely on single_step_once to turn it
722 * on for the processes that matter (as we already do), but this
723 * implementation is somewhat more efficient in that we muck with registers
724 * once on a bum interrupt rather than on every entry into the kernel.
725 *
726 * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
727 * so we have to run through this process again before we can say that an
728 * instruction has executed.
729 *
730 * swint will set CANCELED, but it's a legitimate instruction. Fortunately
731 * it changes the PC. If it hasn't changed, then we know that the interrupt
732 * wasn't generated by swint and we'll need to run this process again before
733 * we can say an instruction has executed.
734 *
735 * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
736 * on with our lives.
737 */
738
739void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
740{
741 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
742 struct thread_info *info = (void *)current_thread_info();
743 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
744 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
745
746 if (is_single_step == 0) {
747 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
748
749 } else if ((*ss_pc != regs->pc) ||
750 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
751
752 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
753 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
754 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
755 send_sigtrap(current, regs);
756 }
757}
758
759
760/*
761 * Called from need_singlestep. Set up the control registers and the enable
762 * register, then return back.
763 */
764
765void single_step_once(struct pt_regs *regs)
766{
767 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
768 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
769
770 *ss_pc = regs->pc;
771 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
772 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
773 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
774 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
775}
776
777void single_step_execve(void)
778{
779 /* Nothing */
780}
781
782#endif /* !__tilegx__ */