Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * A code-rewriter that enables instruction single-stepping.
 15 */
 16
 17#include <linux/smp.h>
 18#include <linux/ptrace.h>
 19#include <linux/slab.h>
 20#include <linux/thread_info.h>
 21#include <linux/uaccess.h>
 22#include <linux/mman.h>
 23#include <linux/types.h>
 24#include <linux/err.h>
 25#include <linux/prctl.h>
 26#include <asm/cacheflush.h>
 27#include <asm/traps.h>
 28#include <asm/uaccess.h>
 29#include <asm/unaligned.h>
 30#include <arch/abi.h>
 31#include <arch/spr_def.h>
 32#include <arch/opcode.h>
 33
 34
 35#ifndef __tilegx__   /* Hardware support for single step unavailable. */
 36
 37#define signExtend17(val) sign_extend((val), 17)
 38#define TILE_X1_MASK (0xffffffffULL << 31)
 39
 40enum mem_op {
 41	MEMOP_NONE,
 42	MEMOP_LOAD,
 43	MEMOP_STORE,
 44	MEMOP_LOAD_POSTINCR,
 45	MEMOP_STORE_POSTINCR
 46};
 47
 48static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
 49	s32 offset)
 50{
 51	tilepro_bundle_bits result;
 52
 53	/* mask out the old offset */
 54	tilepro_bundle_bits mask = create_BrOff_X1(-1);
 55	result = n & (~mask);
 56
 57	/* or in the new offset */
 58	result |= create_BrOff_X1(offset);
 59
 60	return result;
 61}
 62
 63static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
 64	int src)
 65{
 66	tilepro_bundle_bits result;
 67	tilepro_bundle_bits op;
 68
 69	result = n & (~TILE_X1_MASK);
 70
 71	op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
 72		create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
 73		create_Dest_X1(dest) |
 74		create_SrcB_X1(TREG_ZERO) |
 75		create_SrcA_X1(src) ;
 76
 77	result |= op;
 78	return result;
 79}
 80
 81static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
 82{
 83	return move_X1(n, TREG_ZERO, TREG_ZERO);
 84}
 85
 86static inline tilepro_bundle_bits addi_X1(
 87	tilepro_bundle_bits n, int dest, int src, int imm)
 88{
 89	n &= ~TILE_X1_MASK;
 90
 91	n |=  (create_SrcA_X1(src) |
 92	       create_Dest_X1(dest) |
 93	       create_Imm8_X1(imm) |
 94	       create_S_X1(0) |
 95	       create_Opcode_X1(IMM_0_OPCODE_X1) |
 96	       create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
 97
 98	return n;
 99}
100
101static tilepro_bundle_bits rewrite_load_store_unaligned(
102	struct single_step_state *state,
103	tilepro_bundle_bits bundle,
104	struct pt_regs *regs,
105	enum mem_op mem_op,
106	int size, int sign_ext)
107{
108	unsigned char __user *addr;
109	int val_reg, addr_reg, err, val;
110	int align_ctl;
111
112	align_ctl = unaligned_fixup;
113	switch (task_thread_info(current)->align_ctl) {
114	case PR_UNALIGN_NOPRINT:
115		align_ctl = 1;
116		break;
117	case PR_UNALIGN_SIGBUS:
118		align_ctl = 0;
119		break;
120	}
121
122	/* Get address and value registers */
123	if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
124		addr_reg = get_SrcA_Y2(bundle);
125		val_reg = get_SrcBDest_Y2(bundle);
126	} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
127		addr_reg = get_SrcA_X1(bundle);
128		val_reg  = get_Dest_X1(bundle);
129	} else {
130		addr_reg = get_SrcA_X1(bundle);
131		val_reg  = get_SrcB_X1(bundle);
132	}
133
134	/*
135	 * If registers are not GPRs, don't try to handle it.
136	 *
137	 * FIXME: we could handle non-GPR loads by getting the real value
138	 * from memory, writing it to the single step buffer, using a
139	 * temp_reg to hold a pointer to that memory, then executing that
140	 * instruction and resetting temp_reg.  For non-GPR stores, it's a
141	 * little trickier; we could use the single step buffer for that
142	 * too, but we'd have to add some more state bits so that we could
143	 * call back in here to copy that value to the real target.  For
144	 * now, we just handle the simple case.
145	 */
146	if ((val_reg >= PTREGS_NR_GPRS &&
147	     (val_reg != TREG_ZERO ||
148	      mem_op == MEMOP_LOAD ||
149	      mem_op == MEMOP_LOAD_POSTINCR)) ||
150	    addr_reg >= PTREGS_NR_GPRS)
151		return bundle;
152
153	/* If it's aligned, don't handle it specially */
154	addr = (void __user *)regs->regs[addr_reg];
155	if (((unsigned long)addr % size) == 0)
156		return bundle;
157
158	/*
159	 * Return SIGBUS with the unaligned address, if requested.
160	 * Note that we return SIGBUS even for completely invalid addresses
161	 * as long as they are in fact unaligned; this matches what the
162	 * tilepro hardware would be doing, if it could provide us with the
163	 * actual bad address in an SPR, which it doesn't.
164	 */
165	if (align_ctl == 0) {
166		siginfo_t info = {
167			.si_signo = SIGBUS,
168			.si_code = BUS_ADRALN,
169			.si_addr = addr
170		};
171		trace_unhandled_signal("unaligned trap", regs,
172				       (unsigned long)addr, SIGBUS);
173		force_sig_info(info.si_signo, &info, current);
174		return (tilepro_bundle_bits) 0;
175	}
176
177	/* Handle unaligned load/store */
178	if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
179		unsigned short val_16;
180		switch (size) {
181		case 2:
182			err = copy_from_user(&val_16, addr, sizeof(val_16));
183			val = sign_ext ? ((short)val_16) : val_16;
184			break;
185		case 4:
186			err = copy_from_user(&val, addr, sizeof(val));
187			break;
188		default:
189			BUG();
190		}
191		if (err == 0) {
192			state->update_reg = val_reg;
193			state->update_value = val;
194			state->update = 1;
195		}
196	} else {
197		unsigned short val_16;
198		val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
199		switch (size) {
200		case 2:
201			val_16 = val;
202			err = copy_to_user(addr, &val_16, sizeof(val_16));
203			break;
204		case 4:
205			err = copy_to_user(addr, &val, sizeof(val));
206			break;
207		default:
208			BUG();
209		}
210	}
211
212	if (err) {
213		siginfo_t info = {
214			.si_signo = SIGBUS,
215			.si_code = BUS_ADRALN,
216			.si_addr = addr
217		};
218		trace_unhandled_signal("bad address for unaligned fixup", regs,
219				       (unsigned long)addr, SIGBUS);
220		force_sig_info(info.si_signo, &info, current);
221		return (tilepro_bundle_bits) 0;
222	}
223
224	if (unaligned_printk || unaligned_fixup_count == 0) {
225		pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
226			current->pid, current->comm, regs->pc,
227			mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
228			"load" : "store",
229			(unsigned long)addr);
230		if (!unaligned_printk) {
231#define P pr_info
232P("\n");
233P("Unaligned fixups in the kernel will slow your application considerably.\n");
234P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
235P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
236P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
237P("access will become a SIGBUS you can debug. No further warnings will be\n");
238P("shown so as to avoid additional slowdown, but you can track the number\n");
239P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
240P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
241P("\n");
242#undef P
243		}
244	}
245	++unaligned_fixup_count;
246
247	if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
248		/* Convert the Y2 instruction to a prefetch. */
249		bundle &= ~(create_SrcBDest_Y2(-1) |
250			    create_Opcode_Y2(-1));
251		bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
252			   create_Opcode_Y2(LW_OPCODE_Y2));
253	/* Replace the load postincr with an addi */
254	} else if (mem_op == MEMOP_LOAD_POSTINCR) {
255		bundle = addi_X1(bundle, addr_reg, addr_reg,
256				 get_Imm8_X1(bundle));
257	/* Replace the store postincr with an addi */
258	} else if (mem_op == MEMOP_STORE_POSTINCR) {
259		bundle = addi_X1(bundle, addr_reg, addr_reg,
260				 get_Dest_Imm8_X1(bundle));
261	} else {
262		/* Convert the X1 instruction to a nop. */
263		bundle &= ~(create_Opcode_X1(-1) |
264			    create_UnShOpcodeExtension_X1(-1) |
265			    create_UnOpcodeExtension_X1(-1));
266		bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
267			   create_UnShOpcodeExtension_X1(
268				   UN_0_SHUN_0_OPCODE_X1) |
269			   create_UnOpcodeExtension_X1(
270				   NOP_UN_0_SHUN_0_OPCODE_X1));
271	}
272
273	return bundle;
274}
275
276/*
277 * Called after execve() has started the new image.  This allows us
278 * to reset the info state.  Note that the the mmap'ed memory, if there
279 * was any, has already been unmapped by the exec.
280 */
281void single_step_execve(void)
282{
283	struct thread_info *ti = current_thread_info();
284	kfree(ti->step_state);
285	ti->step_state = NULL;
286}
287
288/*
289 * single_step_once() - entry point when single stepping has been triggered.
290 * @regs: The machine register state
291 *
292 *  When we arrive at this routine via a trampoline, the single step
293 *  engine copies the executing bundle to the single step buffer.
294 *  If the instruction is a condition branch, then the target is
295 *  reset to one past the next instruction. If the instruction
296 *  sets the lr, then that is noted. If the instruction is a jump
297 *  or call, then the new target pc is preserved and the current
298 *  bundle instruction set to null.
299 *
300 *  The necessary post-single-step rewriting information is stored in
301 *  single_step_state->  We use data segment values because the
302 *  stack will be rewound when we run the rewritten single-stepped
303 *  instruction.
304 */
305void single_step_once(struct pt_regs *regs)
306{
307	extern tilepro_bundle_bits __single_step_ill_insn;
308	extern tilepro_bundle_bits __single_step_j_insn;
309	extern tilepro_bundle_bits __single_step_addli_insn;
310	extern tilepro_bundle_bits __single_step_auli_insn;
311	struct thread_info *info = (void *)current_thread_info();
312	struct single_step_state *state = info->step_state;
313	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
314	tilepro_bundle_bits __user *buffer, *pc;
315	tilepro_bundle_bits bundle;
316	int temp_reg;
317	int target_reg = TREG_LR;
318	int err;
319	enum mem_op mem_op = MEMOP_NONE;
320	int size = 0, sign_ext = 0;  /* happy compiler */
321	int align_ctl;
322
323	align_ctl = unaligned_fixup;
324	switch (task_thread_info(current)->align_ctl) {
325	case PR_UNALIGN_NOPRINT:
326		align_ctl = 1;
327		break;
328	case PR_UNALIGN_SIGBUS:
329		align_ctl = 0;
330		break;
331	}
332
333	asm(
334"    .pushsection .rodata.single_step\n"
335"    .align 8\n"
336"    .globl    __single_step_ill_insn\n"
337"__single_step_ill_insn:\n"
338"    ill\n"
339"    .globl    __single_step_addli_insn\n"
340"__single_step_addli_insn:\n"
341"    { nop; addli r0, zero, 0 }\n"
342"    .globl    __single_step_auli_insn\n"
343"__single_step_auli_insn:\n"
344"    { nop; auli r0, r0, 0 }\n"
345"    .globl    __single_step_j_insn\n"
346"__single_step_j_insn:\n"
347"    j .\n"
348"    .popsection\n"
349	);
350
351	/*
352	 * Enable interrupts here to allow touching userspace and the like.
353	 * The callers expect this: do_trap() already has interrupts
354	 * enabled, and do_work_pending() handles functions that enable
355	 * interrupts internally.
356	 */
357	local_irq_enable();
358
359	if (state == NULL) {
360		/* allocate a page of writable, executable memory */
361		state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
362		if (state == NULL) {
363			pr_err("Out of kernel memory trying to single-step\n");
364			return;
365		}
366
367		/* allocate a cache line of writable, executable memory */
368		buffer = (void __user *) vm_mmap(NULL, 0, 64,
369					  PROT_EXEC | PROT_READ | PROT_WRITE,
370					  MAP_PRIVATE | MAP_ANONYMOUS,
371					  0);
372
373		if (IS_ERR((void __force *)buffer)) {
374			kfree(state);
375			pr_err("Out of kernel pages trying to single-step\n");
376			return;
377		}
378
379		state->buffer = buffer;
380		state->is_enabled = 0;
381
382		info->step_state = state;
383
384		/* Validate our stored instruction patterns */
385		BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
386		       ADDLI_OPCODE_X1);
387		BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
388		       AULI_OPCODE_X1);
389		BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
390		BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
391		BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
392	}
393
394	/*
395	 * If we are returning from a syscall, we still haven't hit the
396	 * "ill" for the swint1 instruction.  So back the PC up to be
397	 * pointing at the swint1, but we'll actually return directly
398	 * back to the "ill" so we come back in via SIGILL as if we
399	 * had "executed" the swint1 without ever being in kernel space.
400	 */
401	if (regs->faultnum == INT_SWINT_1)
402		regs->pc -= 8;
403
404	pc = (tilepro_bundle_bits __user *)(regs->pc);
405	if (get_user(bundle, pc) != 0) {
406		pr_err("Couldn't read instruction at %p trying to step\n", pc);
407		return;
408	}
409
410	/* We'll follow the instruction with 2 ill op bundles */
411	state->orig_pc = (unsigned long)pc;
412	state->next_pc = (unsigned long)(pc + 1);
413	state->branch_next_pc = 0;
414	state->update = 0;
415
416	if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
417		/* two wide, check for control flow */
418		int opcode = get_Opcode_X1(bundle);
419
420		switch (opcode) {
421		/* branches */
422		case BRANCH_OPCODE_X1:
423		{
424			s32 offset = signExtend17(get_BrOff_X1(bundle));
425
426			/*
427			 * For branches, we use a rewriting trick to let the
428			 * hardware evaluate whether the branch is taken or
429			 * untaken.  We record the target offset and then
430			 * rewrite the branch instruction to target 1 insn
431			 * ahead if the branch is taken.  We then follow the
432			 * rewritten branch with two bundles, each containing
433			 * an "ill" instruction. The supervisor examines the
434			 * pc after the single step code is executed, and if
435			 * the pc is the first ill instruction, then the
436			 * branch (if any) was not taken.  If the pc is the
437			 * second ill instruction, then the branch was
438			 * taken. The new pc is computed for these cases, and
439			 * inserted into the registers for the thread.  If
440			 * the pc is the start of the single step code, then
441			 * an exception or interrupt was taken before the
442			 * code started processing, and the same "original"
443			 * pc is restored.  This change, different from the
444			 * original implementation, has the advantage of
445			 * executing a single user instruction.
446			 */
447			state->branch_next_pc = (unsigned long)(pc + offset);
448
449			/* rewrite branch offset to go forward one bundle */
450			bundle = set_BrOff_X1(bundle, 2);
451		}
452		break;
453
454		/* jumps */
455		case JALB_OPCODE_X1:
456		case JALF_OPCODE_X1:
457			state->update = 1;
458			state->next_pc =
459				(unsigned long) (pc + get_JOffLong_X1(bundle));
460			break;
461
462		case JB_OPCODE_X1:
463		case JF_OPCODE_X1:
464			state->next_pc =
465				(unsigned long) (pc + get_JOffLong_X1(bundle));
466			bundle = nop_X1(bundle);
467			break;
468
469		case SPECIAL_0_OPCODE_X1:
470			switch (get_RRROpcodeExtension_X1(bundle)) {
471			/* jump-register */
472			case JALRP_SPECIAL_0_OPCODE_X1:
473			case JALR_SPECIAL_0_OPCODE_X1:
474				state->update = 1;
475				state->next_pc =
476					regs->regs[get_SrcA_X1(bundle)];
477				break;
478
479			case JRP_SPECIAL_0_OPCODE_X1:
480			case JR_SPECIAL_0_OPCODE_X1:
481				state->next_pc =
482					regs->regs[get_SrcA_X1(bundle)];
483				bundle = nop_X1(bundle);
484				break;
485
486			case LNK_SPECIAL_0_OPCODE_X1:
487				state->update = 1;
488				target_reg = get_Dest_X1(bundle);
489				break;
490
491			/* stores */
492			case SH_SPECIAL_0_OPCODE_X1:
493				mem_op = MEMOP_STORE;
494				size = 2;
495				break;
496
497			case SW_SPECIAL_0_OPCODE_X1:
498				mem_op = MEMOP_STORE;
499				size = 4;
500				break;
501			}
502			break;
503
504		/* loads and iret */
505		case SHUN_0_OPCODE_X1:
506			if (get_UnShOpcodeExtension_X1(bundle) ==
507			    UN_0_SHUN_0_OPCODE_X1) {
508				switch (get_UnOpcodeExtension_X1(bundle)) {
509				case LH_UN_0_SHUN_0_OPCODE_X1:
510					mem_op = MEMOP_LOAD;
511					size = 2;
512					sign_ext = 1;
513					break;
514
515				case LH_U_UN_0_SHUN_0_OPCODE_X1:
516					mem_op = MEMOP_LOAD;
517					size = 2;
518					sign_ext = 0;
519					break;
520
521				case LW_UN_0_SHUN_0_OPCODE_X1:
522					mem_op = MEMOP_LOAD;
523					size = 4;
524					break;
525
526				case IRET_UN_0_SHUN_0_OPCODE_X1:
527				{
528					unsigned long ex0_0 = __insn_mfspr(
529						SPR_EX_CONTEXT_0_0);
530					unsigned long ex0_1 = __insn_mfspr(
531						SPR_EX_CONTEXT_0_1);
532					/*
533					 * Special-case it if we're iret'ing
534					 * to PL0 again.  Otherwise just let
535					 * it run and it will generate SIGILL.
536					 */
537					if (EX1_PL(ex0_1) == USER_PL) {
538						state->next_pc = ex0_0;
539						regs->ex1 = ex0_1;
540						bundle = nop_X1(bundle);
541					}
542				}
543				}
544			}
545			break;
546
547		/* postincrement operations */
548		case IMM_0_OPCODE_X1:
549			switch (get_ImmOpcodeExtension_X1(bundle)) {
550			case LWADD_IMM_0_OPCODE_X1:
551				mem_op = MEMOP_LOAD_POSTINCR;
552				size = 4;
553				break;
554
555			case LHADD_IMM_0_OPCODE_X1:
556				mem_op = MEMOP_LOAD_POSTINCR;
557				size = 2;
558				sign_ext = 1;
559				break;
560
561			case LHADD_U_IMM_0_OPCODE_X1:
562				mem_op = MEMOP_LOAD_POSTINCR;
563				size = 2;
564				sign_ext = 0;
565				break;
566
567			case SWADD_IMM_0_OPCODE_X1:
568				mem_op = MEMOP_STORE_POSTINCR;
569				size = 4;
570				break;
571
572			case SHADD_IMM_0_OPCODE_X1:
573				mem_op = MEMOP_STORE_POSTINCR;
574				size = 2;
575				break;
576
577			default:
578				break;
579			}
580			break;
581		}
582
583		if (state->update) {
584			/*
585			 * Get an available register.  We start with a
586			 * bitmask with 1's for available registers.
587			 * We truncate to the low 32 registers since
588			 * we are guaranteed to have set bits in the
589			 * low 32 bits, then use ctz to pick the first.
590			 */
591			u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
592					   (1ULL << get_SrcA_X0(bundle)) |
593					   (1ULL << get_SrcB_X0(bundle)) |
594					   (1ULL << target_reg));
595			temp_reg = __builtin_ctz(mask);
596			state->update_reg = temp_reg;
597			state->update_value = regs->regs[temp_reg];
598			regs->regs[temp_reg] = (unsigned long) (pc+1);
599			regs->flags |= PT_FLAGS_RESTORE_REGS;
600			bundle = move_X1(bundle, target_reg, temp_reg);
601		}
602	} else {
603		int opcode = get_Opcode_Y2(bundle);
604
605		switch (opcode) {
606		/* loads */
607		case LH_OPCODE_Y2:
608			mem_op = MEMOP_LOAD;
609			size = 2;
610			sign_ext = 1;
611			break;
612
613		case LH_U_OPCODE_Y2:
614			mem_op = MEMOP_LOAD;
615			size = 2;
616			sign_ext = 0;
617			break;
618
619		case LW_OPCODE_Y2:
620			mem_op = MEMOP_LOAD;
621			size = 4;
622			break;
623
624		/* stores */
625		case SH_OPCODE_Y2:
626			mem_op = MEMOP_STORE;
627			size = 2;
628			break;
629
630		case SW_OPCODE_Y2:
631			mem_op = MEMOP_STORE;
632			size = 4;
633			break;
634		}
635	}
636
637	/*
638	 * Check if we need to rewrite an unaligned load/store.
639	 * Returning zero is a special value meaning we generated a signal.
640	 */
641	if (mem_op != MEMOP_NONE && align_ctl >= 0) {
642		bundle = rewrite_load_store_unaligned(state, bundle, regs,
643						      mem_op, size, sign_ext);
644		if (bundle == 0)
645			return;
646	}
647
648	/* write the bundle to our execution area */
649	buffer = state->buffer;
650	err = __put_user(bundle, buffer++);
651
652	/*
653	 * If we're really single-stepping, we take an INT_ILL after.
654	 * If we're just handling an unaligned access, we can just
655	 * jump directly back to where we were in user code.
656	 */
657	if (is_single_step) {
658		err |= __put_user(__single_step_ill_insn, buffer++);
659		err |= __put_user(__single_step_ill_insn, buffer++);
660	} else {
661		long delta;
662
663		if (state->update) {
664			/* We have some state to update; do it inline */
665			int ha16;
666			bundle = __single_step_addli_insn;
667			bundle |= create_Dest_X1(state->update_reg);
668			bundle |= create_Imm16_X1(state->update_value);
669			err |= __put_user(bundle, buffer++);
670			bundle = __single_step_auli_insn;
671			bundle |= create_Dest_X1(state->update_reg);
672			bundle |= create_SrcA_X1(state->update_reg);
673			ha16 = (state->update_value + 0x8000) >> 16;
674			bundle |= create_Imm16_X1(ha16);
675			err |= __put_user(bundle, buffer++);
676			state->update = 0;
677		}
678
679		/* End with a jump back to the next instruction */
680		delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
681			(unsigned long)buffer) >>
682			TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
683		bundle = __single_step_j_insn;
684		bundle |= create_JOffLong_X1(delta);
685		err |= __put_user(bundle, buffer++);
686	}
687
688	if (err) {
689		pr_err("Fault when writing to single-step buffer\n");
690		return;
691	}
692
693	/*
694	 * Flush the buffer.
695	 * We do a local flush only, since this is a thread-specific buffer.
696	 */
697	__flush_icache_range((unsigned long)state->buffer,
698			     (unsigned long)buffer);
699
700	/* Indicate enabled */
701	state->is_enabled = is_single_step;
702	regs->pc = (unsigned long)state->buffer;
703
704	/* Fault immediately if we are coming back from a syscall. */
705	if (regs->faultnum == INT_SWINT_1)
706		regs->pc += 8;
707}
708
709#else
710
711static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
712
713
714/*
715 * Called directly on the occasion of an interrupt.
716 *
717 * If the process doesn't have single step set, then we use this as an
718 * opportunity to turn single step off.
719 *
720 * It has been mentioned that we could conditionally turn off single stepping
721 * on each entry into the kernel and rely on single_step_once to turn it
722 * on for the processes that matter (as we already do), but this
723 * implementation is somewhat more efficient in that we muck with registers
724 * once on a bum interrupt rather than on every entry into the kernel.
725 *
726 * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
727 * so we have to run through this process again before we can say that an
728 * instruction has executed.
729 *
730 * swint will set CANCELED, but it's a legitimate instruction.  Fortunately
731 * it changes the PC.  If it hasn't changed, then we know that the interrupt
732 * wasn't generated by swint and we'll need to run this process again before
733 * we can say an instruction has executed.
734 *
735 * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
736 * on with our lives.
737 */
738
739void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
740{
741	unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
742	struct thread_info *info = (void *)current_thread_info();
743	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
744	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
745
746	if (is_single_step == 0) {
747		__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
748
749	} else if ((*ss_pc != regs->pc) ||
750		   (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
751
752		control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
753		control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
754		__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
755		send_sigtrap(current, regs);
756	}
757}
758
759
760/*
761 * Called from need_singlestep.  Set up the control registers and the enable
762 * register, then return back.
763 */
764
765void single_step_once(struct pt_regs *regs)
766{
767	unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
768	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
769
770	*ss_pc = regs->pc;
771	control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
772	control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
773	__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
774	__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
775}
776
777void single_step_execve(void)
778{
779	/* Nothing */
780}
781
782#endif /* !__tilegx__ */