Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/sched.h>
 16#include <linux/preempt.h>
 17#include <linux/module.h>
 18#include <linux/fs.h>
 19#include <linux/kprobes.h>
 20#include <linux/elfcore.h>
 21#include <linux/tick.h>
 22#include <linux/init.h>
 23#include <linux/mm.h>
 24#include <linux/compat.h>
 25#include <linux/hardirq.h>
 26#include <linux/syscalls.h>
 27#include <linux/kernel.h>
 28#include <linux/tracehook.h>
 29#include <linux/signal.h>
 30#include <asm/stack.h>
 31#include <asm/switch_to.h>
 32#include <asm/homecache.h>
 33#include <asm/syscalls.h>
 34#include <asm/traps.h>
 35#include <asm/setup.h>
 
 36#ifdef CONFIG_HARDWALL
 37#include <asm/hardwall.h>
 38#endif
 39#include <arch/chip.h>
 40#include <arch/abi.h>
 41#include <arch/sim_def.h>
 42
 43
 44/*
 45 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
 46 * idle loop over low power while in the idle loop, e.g. if we have
 47 * one thread per core and we want to get threads out of futex waits fast.
 48 */
 49static int no_idle_nap;
 50static int __init idle_setup(char *str)
 51{
 52	if (!str)
 53		return -EINVAL;
 54
 55	if (!strcmp(str, "poll")) {
 56		pr_info("using polling idle threads.\n");
 57		no_idle_nap = 1;
 58	} else if (!strcmp(str, "halt"))
 59		no_idle_nap = 0;
 60	else
 61		return -1;
 62
 63	return 0;
 64}
 65early_param("idle", idle_setup);
 66
 67/*
 68 * The idle thread. There's no useful work to be
 69 * done, so just try to conserve power and have a
 70 * low exit latency (ie sit in a loop waiting for
 71 * somebody to say that they'd like to reschedule)
 72 */
 73void cpu_idle(void)
 74{
 75	int cpu = smp_processor_id();
 76
 77
 78	current_thread_info()->status |= TS_POLLING;
 79
 80	if (no_idle_nap) {
 81		while (1) {
 82			while (!need_resched())
 83				cpu_relax();
 84			schedule();
 85		}
 86	}
 87
 88	/* endless idle loop with no priority at all */
 89	while (1) {
 90		tick_nohz_idle_enter();
 91		rcu_idle_enter();
 92		while (!need_resched()) {
 93			if (cpu_is_offline(cpu))
 94				BUG();  /* no HOTPLUG_CPU */
 95
 96			local_irq_disable();
 97			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
 98			current_thread_info()->status &= ~TS_POLLING;
 99			/*
100			 * TS_POLLING-cleared state must be visible before we
101			 * test NEED_RESCHED:
102			 */
103			smp_mb();
104
105			if (!need_resched())
106				_cpu_idle();
107			else
108				local_irq_enable();
109			current_thread_info()->status |= TS_POLLING;
110		}
111		rcu_idle_exit();
112		tick_nohz_idle_exit();
113		schedule_preempt_disabled();
114	}
115}
116
117/*
118 * Release a thread_info structure
119 */
120void arch_release_thread_info(struct thread_info *info)
121{
122	struct single_step_state *step_state = info->step_state;
123
124#ifdef CONFIG_HARDWALL
125	/*
126	 * We free a thread_info from the context of the task that has
127	 * been scheduled next, so the original task is already dead.
128	 * Calling deactivate here just frees up the data structures.
129	 * If the task we're freeing held the last reference to a
130	 * hardwall fd, it would have been released prior to this point
131	 * anyway via exit_files(), and the hardwall_task.info pointers
132	 * would be NULL by now.
133	 */
134	hardwall_deactivate_all(info->task);
135#endif
136
137	if (step_state) {
138
139		/*
140		 * FIXME: we don't munmap step_state->buffer
141		 * because the mm_struct for this process (info->task->mm)
142		 * has already been zeroed in exit_mm().  Keeping a
143		 * reference to it here seems like a bad move, so this
144		 * means we can't munmap() the buffer, and therefore if we
145		 * ptrace multiple threads in a process, we will slowly
146		 * leak user memory.  (Note that as soon as the last
147		 * thread in a process dies, we will reclaim all user
148		 * memory including single-step buffers in the usual way.)
149		 * We should either assign a kernel VA to this buffer
150		 * somehow, or we should associate the buffer(s) with the
151		 * mm itself so we can clean them up that way.
152		 */
153		kfree(step_state);
154	}
155}
156
157static void save_arch_state(struct thread_struct *t);
158
159int copy_thread(unsigned long clone_flags, unsigned long sp,
160		unsigned long stack_size,
161		struct task_struct *p, struct pt_regs *regs)
162{
163	struct pt_regs *childregs;
164	unsigned long ksp;
 
165
166	/*
167	 * When creating a new kernel thread we pass sp as zero.
168	 * Assign it to a reasonable value now that we have the stack.
 
 
 
 
169	 */
170	if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
171		sp = KSTK_TOP(p);
 
 
 
 
 
 
172
173	/*
174	 * Do not clone step state from the parent; each thread
175	 * must make its own lazily.
176	 */
177	task_thread_info(p)->step_state = NULL;
 
 
 
 
 
 
 
 
 
178
179	/*
180	 * Start new thread in ret_from_fork so it schedules properly
181	 * and then return from interrupt like the parent.
182	 */
183	p->thread.pc = (unsigned long) ret_from_fork;
184
185	/* Save user stack top pointer so we can ID the stack vm area later. */
186	p->thread.usp0 = sp;
 
 
 
187
188	/* Record the pid of the process that created this one. */
189	p->thread.creator_pid = current->pid;
 
 
 
 
 
190
191	/*
192	 * Copy the registers onto the kernel stack so the
193	 * return-from-interrupt code will reload it into registers.
194	 */
195	childregs = task_pt_regs(p);
196	*childregs = *regs;
197	childregs->regs[0] = 0;         /* return value is zero */
198	childregs->sp = sp;  /* override with new user stack pointer */
 
 
 
 
 
 
199
200	/*
201	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
202	 * which is passed in as arg #5 to sys_clone().
203	 */
204	if (clone_flags & CLONE_SETTLS)
205		childregs->tp = regs->regs[4];
206
207	/*
208	 * Copy the callee-saved registers from the passed pt_regs struct
209	 * into the context-switch callee-saved registers area.
210	 * This way when we start the interrupt-return sequence, the
211	 * callee-save registers will be correctly in registers, which
212	 * is how we assume the compiler leaves them as we start doing
213	 * the normal return-from-interrupt path after calling C code.
214	 * Zero out the C ABI save area to mark the top of the stack.
215	 */
216	ksp = (unsigned long) childregs;
217	ksp -= C_ABI_SAVE_AREA_SIZE;   /* interrupt-entry save area */
218	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
219	ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
220	memcpy((void *)ksp, &regs->regs[CALLEE_SAVED_FIRST_REG],
221	       CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
222	ksp -= C_ABI_SAVE_AREA_SIZE;   /* __switch_to() save area */
223	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
224	p->thread.ksp = ksp;
225
226#if CHIP_HAS_TILE_DMA()
227	/*
228	 * No DMA in the new thread.  We model this on the fact that
229	 * fork() clears the pending signals, alarms, and aio for the child.
230	 */
231	memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
232	memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
233#endif
234
235#if CHIP_HAS_SN_PROC()
236	/* Likewise, the new thread is not running static processor code. */
237	p->thread.sn_proc_running = 0;
238	memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
239#endif
240
241#if CHIP_HAS_PROC_STATUS_SPR()
242	/* New thread has its miscellaneous processor state bits clear. */
243	p->thread.proc_status = 0;
244#endif
245
246#ifdef CONFIG_HARDWALL
247	/* New thread does not own any networks. */
248	memset(&p->thread.hardwall[0], 0,
249	       sizeof(struct hardwall_task) * HARDWALL_TYPES);
250#endif
251
252
253	/*
254	 * Start the new thread with the current architecture state
255	 * (user interrupt masks, etc.).
256	 */
257	save_arch_state(&p->thread);
258
259	return 0;
260}
261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262/*
263 * Return "current" if it looks plausible, or else a pointer to a dummy.
264 * This can be helpful if we are just trying to emit a clean panic.
265 */
266struct task_struct *validate_current(void)
267{
268	static struct task_struct corrupt = { .comm = "<corrupt>" };
269	struct task_struct *tsk = current;
270	if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
271		     (high_memory && (void *)tsk > high_memory) ||
272		     ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
273		pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
274		tsk = &corrupt;
275	}
276	return tsk;
277}
278
279/* Take and return the pointer to the previous task, for schedule_tail(). */
280struct task_struct *sim_notify_fork(struct task_struct *prev)
281{
282	struct task_struct *tsk = current;
283	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
284		     (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
285	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
286		     (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
287	return prev;
288}
289
290int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
291{
292	struct pt_regs *ptregs = task_pt_regs(tsk);
293	elf_core_copy_regs(regs, ptregs);
294	return 1;
295}
296
297#if CHIP_HAS_TILE_DMA()
298
299/* Allow user processes to access the DMA SPRs */
300void grant_dma_mpls(void)
301{
302#if CONFIG_KERNEL_PL == 2
303	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
304	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
305#else
306	__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
307	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
308#endif
309}
310
311/* Forbid user processes from accessing the DMA SPRs */
312void restrict_dma_mpls(void)
313{
314#if CONFIG_KERNEL_PL == 2
315	__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
316	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
317#else
318	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
319	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
320#endif
321}
322
323/* Pause the DMA engine, then save off its state registers. */
324static void save_tile_dma_state(struct tile_dma_state *dma)
325{
326	unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
327	unsigned long post_suspend_state;
328
329	/* If we're running, suspend the engine. */
330	if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
331		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
332
333	/*
334	 * Wait for the engine to idle, then save regs.  Note that we
335	 * want to record the "running" bit from before suspension,
336	 * and the "done" bit from after, so that we can properly
337	 * distinguish a case where the user suspended the engine from
338	 * the case where the kernel suspended as part of the context
339	 * swap.
340	 */
341	do {
342		post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
343	} while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
344
345	dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
346	dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
347	dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
348	dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
349	dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
350	dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
351	dma->byte = __insn_mfspr(SPR_DMA_BYTE);
352	dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
353		(post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
354}
355
356/* Restart a DMA that was running before we were context-switched out. */
357static void restore_tile_dma_state(struct thread_struct *t)
358{
359	const struct tile_dma_state *dma = &t->tile_dma_state;
360
361	/*
362	 * The only way to restore the done bit is to run a zero
363	 * length transaction.
364	 */
365	if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
366	    !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
367		__insn_mtspr(SPR_DMA_BYTE, 0);
368		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
369		while (__insn_mfspr(SPR_DMA_USER_STATUS) &
370		       SPR_DMA_STATUS__BUSY_MASK)
371			;
372	}
373
374	__insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
375	__insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
376	__insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
377	__insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
378	__insn_mtspr(SPR_DMA_STRIDE, dma->strides);
379	__insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
380	__insn_mtspr(SPR_DMA_BYTE, dma->byte);
381
382	/*
383	 * Restart the engine if we were running and not done.
384	 * Clear a pending async DMA fault that we were waiting on return
385	 * to user space to execute, since we expect the DMA engine
386	 * to regenerate those faults for us now.  Note that we don't
387	 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
388	 * harmless if set, and it covers both DMA and the SN processor.
389	 */
390	if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
391		t->dma_async_tlb.fault_num = 0;
392		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
393	}
394}
395
396#endif
397
398static void save_arch_state(struct thread_struct *t)
399{
400#if CHIP_HAS_SPLIT_INTR_MASK()
401	t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
402		((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
403#else
404	t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
405#endif
406	t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
407	t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
408	t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
409	t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
410	t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
411	t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
412	t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
413#if CHIP_HAS_PROC_STATUS_SPR()
414	t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
415#endif
416#if !CHIP_HAS_FIXED_INTVEC_BASE()
417	t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
418#endif
419#if CHIP_HAS_TILE_RTF_HWM()
420	t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
421#endif
422#if CHIP_HAS_DSTREAM_PF()
423	t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
424#endif
425}
426
427static void restore_arch_state(const struct thread_struct *t)
428{
429#if CHIP_HAS_SPLIT_INTR_MASK()
430	__insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
431	__insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
432#else
433	__insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
434#endif
435	__insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
436	__insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
437	__insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
438	__insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
439	__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
440	__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
441	__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
442#if CHIP_HAS_PROC_STATUS_SPR()
443	__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
444#endif
445#if !CHIP_HAS_FIXED_INTVEC_BASE()
446	__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
447#endif
448#if CHIP_HAS_TILE_RTF_HWM()
449	__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
450#endif
451#if CHIP_HAS_DSTREAM_PF()
452	__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
453#endif
454}
455
456
457void _prepare_arch_switch(struct task_struct *next)
458{
459#if CHIP_HAS_SN_PROC()
460	int snctl;
461#endif
462#if CHIP_HAS_TILE_DMA()
463	struct tile_dma_state *dma = &current->thread.tile_dma_state;
464	if (dma->enabled)
465		save_tile_dma_state(dma);
466#endif
467#if CHIP_HAS_SN_PROC()
468	/*
469	 * Suspend the static network processor if it was running.
470	 * We do not suspend the fabric itself, just like we don't
471	 * try to suspend the UDN.
472	 */
473	snctl = __insn_mfspr(SPR_SNCTL);
474	current->thread.sn_proc_running =
475		(snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
476	if (current->thread.sn_proc_running)
477		__insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
478#endif
479}
480
481
482struct task_struct *__sched _switch_to(struct task_struct *prev,
483				       struct task_struct *next)
484{
485	/* DMA state is already saved; save off other arch state. */
486	save_arch_state(&prev->thread);
487
488#if CHIP_HAS_TILE_DMA()
489	/*
490	 * Restore DMA in new task if desired.
491	 * Note that it is only safe to restart here since interrupts
492	 * are disabled, so we can't take any DMATLB miss or access
493	 * interrupts before we have finished switching stacks.
494	 */
495	if (next->thread.tile_dma_state.enabled) {
496		restore_tile_dma_state(&next->thread);
497		grant_dma_mpls();
498	} else {
499		restrict_dma_mpls();
500	}
501#endif
502
503	/* Restore other arch state. */
504	restore_arch_state(&next->thread);
505
506#if CHIP_HAS_SN_PROC()
507	/*
508	 * Restart static network processor in the new process
509	 * if it was running before.
510	 */
511	if (next->thread.sn_proc_running) {
512		int snctl = __insn_mfspr(SPR_SNCTL);
513		__insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
514	}
515#endif
516
517#ifdef CONFIG_HARDWALL
518	/* Enable or disable access to the network registers appropriately. */
519	hardwall_switch_tasks(prev, next);
520#endif
521
522	/*
523	 * Switch kernel SP, PC, and callee-saved registers.
524	 * In the context of the new task, return the old task pointer
525	 * (i.e. the task that actually called __switch_to).
526	 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
527	 */
528	return __switch_to(prev, next, next_current_ksp0(next));
529}
530
531/*
532 * This routine is called on return from interrupt if any of the
533 * TIF_WORK_MASK flags are set in thread_info->flags.  It is
534 * entered with interrupts disabled so we don't miss an event
535 * that modified the thread_info flags.  If any flag is set, we
536 * handle it and return, and the calling assembly code will
537 * re-disable interrupts, reload the thread flags, and call back
538 * if more flags need to be handled.
539 *
540 * We return whether we need to check the thread_info flags again
541 * or not.  Note that we don't clear TIF_SINGLESTEP here, so it's
542 * important that it be tested last, and then claim that we don't
543 * need to recheck the flags.
544 */
545int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
546{
547	/* If we enter in kernel mode, do nothing and exit the caller loop. */
548	if (!user_mode(regs))
549		return 0;
550
 
 
 
551	if (thread_info_flags & _TIF_NEED_RESCHED) {
552		schedule();
553		return 1;
554	}
555#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
556	if (thread_info_flags & _TIF_ASYNC_TLB) {
557		do_async_page_fault(regs);
558		return 1;
559	}
560#endif
561	if (thread_info_flags & _TIF_SIGPENDING) {
562		do_signal(regs);
563		return 1;
564	}
565	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
566		clear_thread_flag(TIF_NOTIFY_RESUME);
567		tracehook_notify_resume(regs);
568		return 1;
569	}
570	if (thread_info_flags & _TIF_SINGLESTEP) {
571		single_step_once(regs);
572		return 0;
573	}
574	panic("work_pending: bad flags %#x\n", thread_info_flags);
575}
576
577/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
578SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
579		void __user *, parent_tidptr, void __user *, child_tidptr,
580		struct pt_regs *, regs)
581{
582	if (!newsp)
583		newsp = regs->sp;
584	return do_fork(clone_flags, newsp, regs, 0,
585		       parent_tidptr, child_tidptr);
586}
587
588/*
589 * sys_execve() executes a new program.
590 */
591SYSCALL_DEFINE4(execve, const char __user *, path,
592		const char __user *const __user *, argv,
593		const char __user *const __user *, envp,
594		struct pt_regs *, regs)
595{
596	long error;
597	char *filename;
598
599	filename = getname(path);
600	error = PTR_ERR(filename);
601	if (IS_ERR(filename))
602		goto out;
603	error = do_execve(filename, argv, envp, regs);
604	putname(filename);
605	if (error == 0)
606		single_step_execve();
607out:
608	return error;
609}
610
611#ifdef CONFIG_COMPAT
612long compat_sys_execve(const char __user *path,
613		       compat_uptr_t __user *argv,
614		       compat_uptr_t __user *envp,
615		       struct pt_regs *regs)
616{
617	long error;
618	char *filename;
619
620	filename = getname(path);
621	error = PTR_ERR(filename);
622	if (IS_ERR(filename))
623		goto out;
624	error = compat_do_execve(filename, argv, envp, regs);
625	putname(filename);
626	if (error == 0)
627		single_step_execve();
628out:
629	return error;
630}
631#endif
632
633unsigned long get_wchan(struct task_struct *p)
634{
635	struct KBacktraceIterator kbt;
636
637	if (!p || p == current || p->state == TASK_RUNNING)
638		return 0;
639
640	for (KBacktraceIterator_init(&kbt, p, NULL);
641	     !KBacktraceIterator_end(&kbt);
642	     KBacktraceIterator_next(&kbt)) {
643		if (!in_sched_functions(kbt.it.pc))
644			return kbt.it.pc;
645	}
646
647	return 0;
648}
649
650/*
651 * We pass in lr as zero (cleared in kernel_thread) and the caller
652 * part of the backtrace ABI on the stack also zeroed (in copy_thread)
653 * so that backtraces will stop with this function.
654 * Note that we don't use r0, since copy_thread() clears it.
655 */
656static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
657{
658	do_exit(fn(arg));
659}
660
661/*
662 * Create a kernel thread
663 */
664int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
665{
666	struct pt_regs regs;
667
668	memset(&regs, 0, sizeof(regs));
669	regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0);  /* run at kernel PL, no ICS */
670	regs.pc = (long) start_kernel_thread;
671	regs.flags = PT_FLAGS_CALLER_SAVES;   /* need to restore r1 and r2 */
672	regs.regs[1] = (long) fn;             /* function pointer */
673	regs.regs[2] = (long) arg;            /* parameter register */
674
675	/* Ok, create the new process.. */
676	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs,
677		       0, NULL, NULL);
678}
679EXPORT_SYMBOL(kernel_thread);
680
681/* Flush thread state. */
682void flush_thread(void)
683{
684	/* Nothing */
685}
686
687/*
688 * Free current thread data structures etc..
689 */
690void exit_thread(void)
691{
692	/* Nothing */
 
 
 
 
 
 
 
 
693}
694
695void show_regs(struct pt_regs *regs)
696{
697	struct task_struct *tsk = validate_current();
698	int i;
699
700	pr_err("\n");
701	pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
702	       tsk->pid, tsk->comm, smp_processor_id());
703#ifdef __tilegx__
704	for (i = 0; i < 51; i += 3)
705		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
706		       i, regs->regs[i], i+1, regs->regs[i+1],
707		       i+2, regs->regs[i+2]);
708	pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
709	       regs->regs[51], regs->regs[52], regs->tp);
710	pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
711#else
712	for (i = 0; i < 52; i += 4)
713		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
714		       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
715		       i, regs->regs[i], i+1, regs->regs[i+1],
716		       i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
717	pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
718	       regs->regs[52], regs->tp, regs->sp, regs->lr);
719#endif
720	pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld\n",
721	       regs->pc, regs->ex1, regs->faultnum);
722
723	dump_stack_regs(regs);
724}
v3.15
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/sched.h>
 16#include <linux/preempt.h>
 17#include <linux/module.h>
 18#include <linux/fs.h>
 19#include <linux/kprobes.h>
 20#include <linux/elfcore.h>
 21#include <linux/tick.h>
 22#include <linux/init.h>
 23#include <linux/mm.h>
 24#include <linux/compat.h>
 25#include <linux/hardirq.h>
 26#include <linux/syscalls.h>
 27#include <linux/kernel.h>
 28#include <linux/tracehook.h>
 29#include <linux/signal.h>
 30#include <asm/stack.h>
 31#include <asm/switch_to.h>
 32#include <asm/homecache.h>
 33#include <asm/syscalls.h>
 34#include <asm/traps.h>
 35#include <asm/setup.h>
 36#include <asm/uaccess.h>
 37#ifdef CONFIG_HARDWALL
 38#include <asm/hardwall.h>
 39#endif
 40#include <arch/chip.h>
 41#include <arch/abi.h>
 42#include <arch/sim_def.h>
 43
 
 44/*
 45 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
 46 * idle loop over low power while in the idle loop, e.g. if we have
 47 * one thread per core and we want to get threads out of futex waits fast.
 48 */
 
 49static int __init idle_setup(char *str)
 50{
 51	if (!str)
 52		return -EINVAL;
 53
 54	if (!strcmp(str, "poll")) {
 55		pr_info("using polling idle threads.\n");
 56		cpu_idle_poll_ctrl(true);
 57		return 0;
 58	} else if (!strcmp(str, "halt")) {
 59		return 0;
 60	}
 61	return -1;
 
 62}
 63early_param("idle", idle_setup);
 64
 65void arch_cpu_idle(void)
 
 
 
 
 
 
 66{
 67	__get_cpu_var(irq_stat).idle_timestamp = jiffies;
 68	_cpu_idle();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69}
 70
 71/*
 72 * Release a thread_info structure
 73 */
 74void arch_release_thread_info(struct thread_info *info)
 75{
 76	struct single_step_state *step_state = info->step_state;
 77
 
 
 
 
 
 
 
 
 
 
 
 
 
 78	if (step_state) {
 79
 80		/*
 81		 * FIXME: we don't munmap step_state->buffer
 82		 * because the mm_struct for this process (info->task->mm)
 83		 * has already been zeroed in exit_mm().  Keeping a
 84		 * reference to it here seems like a bad move, so this
 85		 * means we can't munmap() the buffer, and therefore if we
 86		 * ptrace multiple threads in a process, we will slowly
 87		 * leak user memory.  (Note that as soon as the last
 88		 * thread in a process dies, we will reclaim all user
 89		 * memory including single-step buffers in the usual way.)
 90		 * We should either assign a kernel VA to this buffer
 91		 * somehow, or we should associate the buffer(s) with the
 92		 * mm itself so we can clean them up that way.
 93		 */
 94		kfree(step_state);
 95	}
 96}
 97
 98static void save_arch_state(struct thread_struct *t);
 99
100int copy_thread(unsigned long clone_flags, unsigned long sp,
101		unsigned long arg, struct task_struct *p)
 
102{
103	struct pt_regs *childregs = task_pt_regs(p);
104	unsigned long ksp;
105	unsigned long *callee_regs;
106
107	/*
108	 * Set up the stack and stack pointer appropriately for the
109	 * new child to find itself woken up in __switch_to().
110	 * The callee-saved registers must be on the stack to be read;
111	 * the new task will then jump to assembly support to handle
112	 * calling schedule_tail(), etc., and (for userspace tasks)
113	 * returning to the context set up in the pt_regs.
114	 */
115	ksp = (unsigned long) childregs;
116	ksp -= C_ABI_SAVE_AREA_SIZE;   /* interrupt-entry save area */
117	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
118	ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
119	callee_regs = (unsigned long *)ksp;
120	ksp -= C_ABI_SAVE_AREA_SIZE;   /* __switch_to() save area */
121	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
122	p->thread.ksp = ksp;
123
124	/* Record the pid of the task that created this one. */
125	p->thread.creator_pid = current->pid;
126
127	if (unlikely(p->flags & PF_KTHREAD)) {
128		/* kernel thread */
129		memset(childregs, 0, sizeof(struct pt_regs));
130		memset(&callee_regs[2], 0,
131		       (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
132		callee_regs[0] = sp;   /* r30 = function */
133		callee_regs[1] = arg;  /* r31 = arg */
134		childregs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
135		p->thread.pc = (unsigned long) ret_from_kernel_thread;
136		return 0;
137	}
138
139	/*
140	 * Start new thread in ret_from_fork so it schedules properly
141	 * and then return from interrupt like the parent.
142	 */
143	p->thread.pc = (unsigned long) ret_from_fork;
144
145	/*
146	 * Do not clone step state from the parent; each thread
147	 * must make its own lazily.
148	 */
149	task_thread_info(p)->step_state = NULL;
150
151#ifdef __tilegx__
152	/*
153	 * Do not clone unalign jit fixup from the parent; each thread
154	 * must allocate its own on demand.
155	 */
156	task_thread_info(p)->unalign_jit_base = NULL;
157#endif
158
159	/*
160	 * Copy the registers onto the kernel stack so the
161	 * return-from-interrupt code will reload it into registers.
162	 */
163	*childregs = *current_pt_regs();
 
164	childregs->regs[0] = 0;         /* return value is zero */
165	if (sp)
166		childregs->sp = sp;  /* override with new user stack pointer */
167	memcpy(callee_regs, &childregs->regs[CALLEE_SAVED_FIRST_REG],
168	       CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
169
170	/* Save user stack top pointer so we can ID the stack vm area later. */
171	p->thread.usp0 = childregs->sp;
172
173	/*
174	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
175	 * which is passed in as arg #5 to sys_clone().
176	 */
177	if (clone_flags & CLONE_SETTLS)
178		childregs->tp = childregs->regs[4];
179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
181#if CHIP_HAS_TILE_DMA()
182	/*
183	 * No DMA in the new thread.  We model this on the fact that
184	 * fork() clears the pending signals, alarms, and aio for the child.
185	 */
186	memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
187	memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
188#endif
189
 
 
 
 
 
 
 
190	/* New thread has its miscellaneous processor state bits clear. */
191	p->thread.proc_status = 0;
 
192
193#ifdef CONFIG_HARDWALL
194	/* New thread does not own any networks. */
195	memset(&p->thread.hardwall[0], 0,
196	       sizeof(struct hardwall_task) * HARDWALL_TYPES);
197#endif
198
199
200	/*
201	 * Start the new thread with the current architecture state
202	 * (user interrupt masks, etc.).
203	 */
204	save_arch_state(&p->thread);
205
206	return 0;
207}
208
209int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
210{
211	task_thread_info(tsk)->align_ctl = val;
212	return 0;
213}
214
215int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
216{
217	return put_user(task_thread_info(tsk)->align_ctl,
218			(unsigned int __user *)adr);
219}
220
221static struct task_struct corrupt_current = { .comm = "<corrupt>" };
222
223/*
224 * Return "current" if it looks plausible, or else a pointer to a dummy.
225 * This can be helpful if we are just trying to emit a clean panic.
226 */
227struct task_struct *validate_current(void)
228{
 
229	struct task_struct *tsk = current;
230	if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
231		     (high_memory && (void *)tsk > high_memory) ||
232		     ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
233		pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
234		tsk = &corrupt_current;
235	}
236	return tsk;
237}
238
239/* Take and return the pointer to the previous task, for schedule_tail(). */
240struct task_struct *sim_notify_fork(struct task_struct *prev)
241{
242	struct task_struct *tsk = current;
243	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
244		     (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
245	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
246		     (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
247	return prev;
248}
249
250int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
251{
252	struct pt_regs *ptregs = task_pt_regs(tsk);
253	elf_core_copy_regs(regs, ptregs);
254	return 1;
255}
256
257#if CHIP_HAS_TILE_DMA()
258
259/* Allow user processes to access the DMA SPRs */
260void grant_dma_mpls(void)
261{
262#if CONFIG_KERNEL_PL == 2
263	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
264	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
265#else
266	__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
267	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
268#endif
269}
270
271/* Forbid user processes from accessing the DMA SPRs */
272void restrict_dma_mpls(void)
273{
274#if CONFIG_KERNEL_PL == 2
275	__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
276	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
277#else
278	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
279	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
280#endif
281}
282
283/* Pause the DMA engine, then save off its state registers. */
284static void save_tile_dma_state(struct tile_dma_state *dma)
285{
286	unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
287	unsigned long post_suspend_state;
288
289	/* If we're running, suspend the engine. */
290	if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
291		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
292
293	/*
294	 * Wait for the engine to idle, then save regs.  Note that we
295	 * want to record the "running" bit from before suspension,
296	 * and the "done" bit from after, so that we can properly
297	 * distinguish a case where the user suspended the engine from
298	 * the case where the kernel suspended as part of the context
299	 * swap.
300	 */
301	do {
302		post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
303	} while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
304
305	dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
306	dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
307	dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
308	dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
309	dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
310	dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
311	dma->byte = __insn_mfspr(SPR_DMA_BYTE);
312	dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
313		(post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
314}
315
316/* Restart a DMA that was running before we were context-switched out. */
317static void restore_tile_dma_state(struct thread_struct *t)
318{
319	const struct tile_dma_state *dma = &t->tile_dma_state;
320
321	/*
322	 * The only way to restore the done bit is to run a zero
323	 * length transaction.
324	 */
325	if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
326	    !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
327		__insn_mtspr(SPR_DMA_BYTE, 0);
328		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
329		while (__insn_mfspr(SPR_DMA_USER_STATUS) &
330		       SPR_DMA_STATUS__BUSY_MASK)
331			;
332	}
333
334	__insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
335	__insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
336	__insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
337	__insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
338	__insn_mtspr(SPR_DMA_STRIDE, dma->strides);
339	__insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
340	__insn_mtspr(SPR_DMA_BYTE, dma->byte);
341
342	/*
343	 * Restart the engine if we were running and not done.
344	 * Clear a pending async DMA fault that we were waiting on return
345	 * to user space to execute, since we expect the DMA engine
346	 * to regenerate those faults for us now.  Note that we don't
347	 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
348	 * harmless if set, and it covers both DMA and the SN processor.
349	 */
350	if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
351		t->dma_async_tlb.fault_num = 0;
352		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
353	}
354}
355
356#endif
357
358static void save_arch_state(struct thread_struct *t)
359{
360#if CHIP_HAS_SPLIT_INTR_MASK()
361	t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
362		((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
363#else
364	t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
365#endif
366	t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
367	t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
368	t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
369	t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
370	t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
371	t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
372	t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
 
373	t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
 
374#if !CHIP_HAS_FIXED_INTVEC_BASE()
375	t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
376#endif
 
377	t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
 
378#if CHIP_HAS_DSTREAM_PF()
379	t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
380#endif
381}
382
383static void restore_arch_state(const struct thread_struct *t)
384{
385#if CHIP_HAS_SPLIT_INTR_MASK()
386	__insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
387	__insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
388#else
389	__insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
390#endif
391	__insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
392	__insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
393	__insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
394	__insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
395	__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
396	__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
397	__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
 
398	__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
 
399#if !CHIP_HAS_FIXED_INTVEC_BASE()
400	__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
401#endif
 
402	__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
 
403#if CHIP_HAS_DSTREAM_PF()
404	__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
405#endif
406}
407
408
409void _prepare_arch_switch(struct task_struct *next)
410{
 
 
 
411#if CHIP_HAS_TILE_DMA()
412	struct tile_dma_state *dma = &current->thread.tile_dma_state;
413	if (dma->enabled)
414		save_tile_dma_state(dma);
415#endif
 
 
 
 
 
 
 
 
 
 
 
 
416}
417
418
419struct task_struct *__sched _switch_to(struct task_struct *prev,
420				       struct task_struct *next)
421{
422	/* DMA state is already saved; save off other arch state. */
423	save_arch_state(&prev->thread);
424
425#if CHIP_HAS_TILE_DMA()
426	/*
427	 * Restore DMA in new task if desired.
428	 * Note that it is only safe to restart here since interrupts
429	 * are disabled, so we can't take any DMATLB miss or access
430	 * interrupts before we have finished switching stacks.
431	 */
432	if (next->thread.tile_dma_state.enabled) {
433		restore_tile_dma_state(&next->thread);
434		grant_dma_mpls();
435	} else {
436		restrict_dma_mpls();
437	}
438#endif
439
440	/* Restore other arch state. */
441	restore_arch_state(&next->thread);
442
 
 
 
 
 
 
 
 
 
 
 
443#ifdef CONFIG_HARDWALL
444	/* Enable or disable access to the network registers appropriately. */
445	hardwall_switch_tasks(prev, next);
446#endif
447
448	/*
449	 * Switch kernel SP, PC, and callee-saved registers.
450	 * In the context of the new task, return the old task pointer
451	 * (i.e. the task that actually called __switch_to).
452	 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
453	 */
454	return __switch_to(prev, next, next_current_ksp0(next));
455}
456
457/*
458 * This routine is called on return from interrupt if any of the
459 * TIF_WORK_MASK flags are set in thread_info->flags.  It is
460 * entered with interrupts disabled so we don't miss an event
461 * that modified the thread_info flags.  If any flag is set, we
462 * handle it and return, and the calling assembly code will
463 * re-disable interrupts, reload the thread flags, and call back
464 * if more flags need to be handled.
465 *
466 * We return whether we need to check the thread_info flags again
467 * or not.  Note that we don't clear TIF_SINGLESTEP here, so it's
468 * important that it be tested last, and then claim that we don't
469 * need to recheck the flags.
470 */
471int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
472{
473	/* If we enter in kernel mode, do nothing and exit the caller loop. */
474	if (!user_mode(regs))
475		return 0;
476
477	/* Enable interrupts; they are disabled again on return to caller. */
478	local_irq_enable();
479
480	if (thread_info_flags & _TIF_NEED_RESCHED) {
481		schedule();
482		return 1;
483	}
484#if CHIP_HAS_TILE_DMA()
485	if (thread_info_flags & _TIF_ASYNC_TLB) {
486		do_async_page_fault(regs);
487		return 1;
488	}
489#endif
490	if (thread_info_flags & _TIF_SIGPENDING) {
491		do_signal(regs);
492		return 1;
493	}
494	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
495		clear_thread_flag(TIF_NOTIFY_RESUME);
496		tracehook_notify_resume(regs);
497		return 1;
498	}
499	if (thread_info_flags & _TIF_SINGLESTEP) {
500		single_step_once(regs);
501		return 0;
502	}
503	panic("work_pending: bad flags %#x\n", thread_info_flags);
504}
505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506unsigned long get_wchan(struct task_struct *p)
507{
508	struct KBacktraceIterator kbt;
509
510	if (!p || p == current || p->state == TASK_RUNNING)
511		return 0;
512
513	for (KBacktraceIterator_init(&kbt, p, NULL);
514	     !KBacktraceIterator_end(&kbt);
515	     KBacktraceIterator_next(&kbt)) {
516		if (!in_sched_functions(kbt.it.pc))
517			return kbt.it.pc;
518	}
519
520	return 0;
521}
522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523/* Flush thread state. */
524void flush_thread(void)
525{
526	/* Nothing */
527}
528
529/*
530 * Free current thread data structures etc..
531 */
532void exit_thread(void)
533{
534#ifdef CONFIG_HARDWALL
535	/*
536	 * Remove the task from the list of tasks that are associated
537	 * with any live hardwalls.  (If the task that is exiting held
538	 * the last reference to a hardwall fd, it would already have
539	 * been released and deactivated at this point.)
540	 */
541	hardwall_deactivate_all(current);
542#endif
543}
544
545void show_regs(struct pt_regs *regs)
546{
547	struct task_struct *tsk = validate_current();
548	int i;
549
550	pr_err("\n");
551	if (tsk != &corrupt_current)
552		show_regs_print_info(KERN_ERR);
553#ifdef __tilegx__
554	for (i = 0; i < 17; i++)
555		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
556		       i, regs->regs[i], i+18, regs->regs[i+18],
557		       i+36, regs->regs[i+36]);
558	pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
559	       regs->regs[17], regs->regs[35], regs->tp);
560	pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
561#else
562	for (i = 0; i < 13; i++)
563		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
564		       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
565		       i, regs->regs[i], i+14, regs->regs[i+14],
566		       i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
567	pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
568	       regs->regs[13], regs->tp, regs->sp, regs->lr);
569#endif
570	pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld\n",
571	       regs->pc, regs->ex1, regs->faultnum);
572
573	dump_stack_regs(regs);
574}