Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/sched.h>
 16#include <linux/preempt.h>
 17#include <linux/module.h>
 18#include <linux/fs.h>
 19#include <linux/kprobes.h>
 20#include <linux/elfcore.h>
 21#include <linux/tick.h>
 22#include <linux/init.h>
 23#include <linux/mm.h>
 24#include <linux/compat.h>
 25#include <linux/hardirq.h>
 26#include <linux/syscalls.h>
 27#include <linux/kernel.h>
 28#include <linux/tracehook.h>
 29#include <linux/signal.h>
 30#include <asm/system.h>
 31#include <asm/stack.h>
 32#include <asm/homecache.h>
 33#include <asm/syscalls.h>
 34#include <asm/traps.h>
 35#ifdef CONFIG_HARDWALL
 36#include <asm/hardwall.h>
 37#endif
 38#include <arch/chip.h>
 39#include <arch/abi.h>
 40
 41
 42/*
 43 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
 44 * idle loop over low power while in the idle loop, e.g. if we have
 45 * one thread per core and we want to get threads out of futex waits fast.
 46 */
 47static int no_idle_nap;
 48static int __init idle_setup(char *str)
 49{
 50	if (!str)
 51		return -EINVAL;
 52
 53	if (!strcmp(str, "poll")) {
 54		pr_info("using polling idle threads.\n");
 55		no_idle_nap = 1;
 56	} else if (!strcmp(str, "halt"))
 57		no_idle_nap = 0;
 58	else
 59		return -1;
 60
 61	return 0;
 62}
 63early_param("idle", idle_setup);
 64
 65/*
 66 * The idle thread. There's no useful work to be
 67 * done, so just try to conserve power and have a
 68 * low exit latency (ie sit in a loop waiting for
 69 * somebody to say that they'd like to reschedule)
 70 */
 71void cpu_idle(void)
 72{
 73	int cpu = smp_processor_id();
 74
 75
 76	current_thread_info()->status |= TS_POLLING;
 77
 78	if (no_idle_nap) {
 79		while (1) {
 80			while (!need_resched())
 81				cpu_relax();
 82			schedule();
 83		}
 84	}
 85
 86	/* endless idle loop with no priority at all */
 87	while (1) {
 88		tick_nohz_stop_sched_tick(1);
 89		while (!need_resched()) {
 90			if (cpu_is_offline(cpu))
 91				BUG();  /* no HOTPLUG_CPU */
 92
 93			local_irq_disable();
 94			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
 95			current_thread_info()->status &= ~TS_POLLING;
 96			/*
 97			 * TS_POLLING-cleared state must be visible before we
 98			 * test NEED_RESCHED:
 99			 */
100			smp_mb();
101
102			if (!need_resched())
103				_cpu_idle();
104			else
105				local_irq_enable();
106			current_thread_info()->status |= TS_POLLING;
107		}
108		tick_nohz_restart_sched_tick();
109		preempt_enable_no_resched();
110		schedule();
111		preempt_disable();
112	}
113}
114
115struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
116{
117	struct page *page;
118	gfp_t flags = GFP_KERNEL;
119
120#ifdef CONFIG_DEBUG_STACK_USAGE
121	flags |= __GFP_ZERO;
122#endif
123
124	page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
125	if (!page)
126		return NULL;
127
128	return (struct thread_info *)page_address(page);
129}
130
131/*
132 * Free a thread_info node, and all of its derivative
133 * data structures.
134 */
135void free_thread_info(struct thread_info *info)
136{
137	struct single_step_state *step_state = info->step_state;
138
139#ifdef CONFIG_HARDWALL
140	/*
141	 * We free a thread_info from the context of the task that has
142	 * been scheduled next, so the original task is already dead.
143	 * Calling deactivate here just frees up the data structures.
144	 * If the task we're freeing held the last reference to a
145	 * hardwall fd, it would have been released prior to this point
146	 * anyway via exit_files(), and "hardwall" would be NULL by now.
147	 */
148	if (info->task->thread.hardwall)
149		hardwall_deactivate(info->task);
150#endif
151
152	if (step_state) {
153
154		/*
155		 * FIXME: we don't munmap step_state->buffer
156		 * because the mm_struct for this process (info->task->mm)
157		 * has already been zeroed in exit_mm().  Keeping a
158		 * reference to it here seems like a bad move, so this
159		 * means we can't munmap() the buffer, and therefore if we
160		 * ptrace multiple threads in a process, we will slowly
161		 * leak user memory.  (Note that as soon as the last
162		 * thread in a process dies, we will reclaim all user
163		 * memory including single-step buffers in the usual way.)
164		 * We should either assign a kernel VA to this buffer
165		 * somehow, or we should associate the buffer(s) with the
166		 * mm itself so we can clean them up that way.
167		 */
168		kfree(step_state);
169	}
170
171	free_pages((unsigned long)info, THREAD_SIZE_ORDER);
172}
173
174static void save_arch_state(struct thread_struct *t);
175
176int copy_thread(unsigned long clone_flags, unsigned long sp,
177		unsigned long stack_size,
178		struct task_struct *p, struct pt_regs *regs)
179{
180	struct pt_regs *childregs;
181	unsigned long ksp;
182
183	/*
184	 * When creating a new kernel thread we pass sp as zero.
185	 * Assign it to a reasonable value now that we have the stack.
186	 */
187	if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
188		sp = KSTK_TOP(p);
189
190	/*
191	 * Do not clone step state from the parent; each thread
192	 * must make its own lazily.
193	 */
194	task_thread_info(p)->step_state = NULL;
195
196	/*
197	 * Start new thread in ret_from_fork so it schedules properly
198	 * and then return from interrupt like the parent.
199	 */
200	p->thread.pc = (unsigned long) ret_from_fork;
201
202	/* Save user stack top pointer so we can ID the stack vm area later. */
203	p->thread.usp0 = sp;
204
205	/* Record the pid of the process that created this one. */
206	p->thread.creator_pid = current->pid;
207
208	/*
209	 * Copy the registers onto the kernel stack so the
210	 * return-from-interrupt code will reload it into registers.
211	 */
212	childregs = task_pt_regs(p);
213	*childregs = *regs;
214	childregs->regs[0] = 0;         /* return value is zero */
215	childregs->sp = sp;  /* override with new user stack pointer */
216
217	/*
218	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
219	 * which is passed in as arg #5 to sys_clone().
220	 */
221	if (clone_flags & CLONE_SETTLS)
222		childregs->tp = regs->regs[4];
223
224	/*
225	 * Copy the callee-saved registers from the passed pt_regs struct
226	 * into the context-switch callee-saved registers area.
227	 * This way when we start the interrupt-return sequence, the
228	 * callee-save registers will be correctly in registers, which
229	 * is how we assume the compiler leaves them as we start doing
230	 * the normal return-from-interrupt path after calling C code.
231	 * Zero out the C ABI save area to mark the top of the stack.
232	 */
233	ksp = (unsigned long) childregs;
234	ksp -= C_ABI_SAVE_AREA_SIZE;   /* interrupt-entry save area */
235	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
236	ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
237	memcpy((void *)ksp, &regs->regs[CALLEE_SAVED_FIRST_REG],
238	       CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
239	ksp -= C_ABI_SAVE_AREA_SIZE;   /* __switch_to() save area */
240	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
241	p->thread.ksp = ksp;
242
243#if CHIP_HAS_TILE_DMA()
244	/*
245	 * No DMA in the new thread.  We model this on the fact that
246	 * fork() clears the pending signals, alarms, and aio for the child.
247	 */
248	memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
249	memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
250#endif
251
252#if CHIP_HAS_SN_PROC()
253	/* Likewise, the new thread is not running static processor code. */
254	p->thread.sn_proc_running = 0;
255	memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
256#endif
257
258#if CHIP_HAS_PROC_STATUS_SPR()
259	/* New thread has its miscellaneous processor state bits clear. */
260	p->thread.proc_status = 0;
261#endif
262
263#ifdef CONFIG_HARDWALL
264	/* New thread does not own any networks. */
265	p->thread.hardwall = NULL;
266#endif
267
268
269	/*
270	 * Start the new thread with the current architecture state
271	 * (user interrupt masks, etc.).
272	 */
273	save_arch_state(&p->thread);
274
275	return 0;
276}
277
278/*
279 * Return "current" if it looks plausible, or else a pointer to a dummy.
280 * This can be helpful if we are just trying to emit a clean panic.
281 */
282struct task_struct *validate_current(void)
283{
284	static struct task_struct corrupt = { .comm = "<corrupt>" };
285	struct task_struct *tsk = current;
286	if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
287		     (void *)tsk > high_memory ||
288		     ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
289		pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
290		tsk = &corrupt;
291	}
292	return tsk;
293}
294
295/* Take and return the pointer to the previous task, for schedule_tail(). */
296struct task_struct *sim_notify_fork(struct task_struct *prev)
297{
298	struct task_struct *tsk = current;
299	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
300		     (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
301	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
302		     (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
303	return prev;
304}
305
306int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
307{
308	struct pt_regs *ptregs = task_pt_regs(tsk);
309	elf_core_copy_regs(regs, ptregs);
310	return 1;
311}
312
313#if CHIP_HAS_TILE_DMA()
314
315/* Allow user processes to access the DMA SPRs */
316void grant_dma_mpls(void)
317{
318#if CONFIG_KERNEL_PL == 2
319	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
320	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
321#else
322	__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
323	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
324#endif
325}
326
327/* Forbid user processes from accessing the DMA SPRs */
328void restrict_dma_mpls(void)
329{
330#if CONFIG_KERNEL_PL == 2
331	__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
332	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
333#else
334	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
335	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
336#endif
337}
338
339/* Pause the DMA engine, then save off its state registers. */
340static void save_tile_dma_state(struct tile_dma_state *dma)
341{
342	unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
343	unsigned long post_suspend_state;
344
345	/* If we're running, suspend the engine. */
346	if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
347		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
348
349	/*
350	 * Wait for the engine to idle, then save regs.  Note that we
351	 * want to record the "running" bit from before suspension,
352	 * and the "done" bit from after, so that we can properly
353	 * distinguish a case where the user suspended the engine from
354	 * the case where the kernel suspended as part of the context
355	 * swap.
356	 */
357	do {
358		post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
359	} while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
360
361	dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
362	dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
363	dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
364	dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
365	dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
366	dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
367	dma->byte = __insn_mfspr(SPR_DMA_BYTE);
368	dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
369		(post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
370}
371
372/* Restart a DMA that was running before we were context-switched out. */
373static void restore_tile_dma_state(struct thread_struct *t)
374{
375	const struct tile_dma_state *dma = &t->tile_dma_state;
376
377	/*
378	 * The only way to restore the done bit is to run a zero
379	 * length transaction.
380	 */
381	if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
382	    !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
383		__insn_mtspr(SPR_DMA_BYTE, 0);
384		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
385		while (__insn_mfspr(SPR_DMA_USER_STATUS) &
386		       SPR_DMA_STATUS__BUSY_MASK)
387			;
388	}
389
390	__insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
391	__insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
392	__insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
393	__insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
394	__insn_mtspr(SPR_DMA_STRIDE, dma->strides);
395	__insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
396	__insn_mtspr(SPR_DMA_BYTE, dma->byte);
397
398	/*
399	 * Restart the engine if we were running and not done.
400	 * Clear a pending async DMA fault that we were waiting on return
401	 * to user space to execute, since we expect the DMA engine
402	 * to regenerate those faults for us now.  Note that we don't
403	 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
404	 * harmless if set, and it covers both DMA and the SN processor.
405	 */
406	if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
407		t->dma_async_tlb.fault_num = 0;
408		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
409	}
410}
411
412#endif
413
414static void save_arch_state(struct thread_struct *t)
415{
416#if CHIP_HAS_SPLIT_INTR_MASK()
417	t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
418		((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
419#else
420	t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
421#endif
422	t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
423	t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
424	t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
425	t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
426	t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
427	t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
428	t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
429#if CHIP_HAS_PROC_STATUS_SPR()
430	t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
431#endif
432#if !CHIP_HAS_FIXED_INTVEC_BASE()
433	t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
434#endif
435#if CHIP_HAS_TILE_RTF_HWM()
436	t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
437#endif
438#if CHIP_HAS_DSTREAM_PF()
439	t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
440#endif
441}
442
443static void restore_arch_state(const struct thread_struct *t)
444{
445#if CHIP_HAS_SPLIT_INTR_MASK()
446	__insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
447	__insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
448#else
449	__insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
450#endif
451	__insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
452	__insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
453	__insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
454	__insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
455	__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
456	__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
457	__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
458#if CHIP_HAS_PROC_STATUS_SPR()
459	__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
460#endif
461#if !CHIP_HAS_FIXED_INTVEC_BASE()
462	__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
463#endif
464#if CHIP_HAS_TILE_RTF_HWM()
465	__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
466#endif
467#if CHIP_HAS_DSTREAM_PF()
468	__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
469#endif
470}
471
472
473void _prepare_arch_switch(struct task_struct *next)
474{
475#if CHIP_HAS_SN_PROC()
476	int snctl;
477#endif
478#if CHIP_HAS_TILE_DMA()
479	struct tile_dma_state *dma = &current->thread.tile_dma_state;
480	if (dma->enabled)
481		save_tile_dma_state(dma);
482#endif
483#if CHIP_HAS_SN_PROC()
484	/*
485	 * Suspend the static network processor if it was running.
486	 * We do not suspend the fabric itself, just like we don't
487	 * try to suspend the UDN.
488	 */
489	snctl = __insn_mfspr(SPR_SNCTL);
490	current->thread.sn_proc_running =
491		(snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
492	if (current->thread.sn_proc_running)
493		__insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
494#endif
495}
496
497
498struct task_struct *__sched _switch_to(struct task_struct *prev,
499				       struct task_struct *next)
500{
501	/* DMA state is already saved; save off other arch state. */
502	save_arch_state(&prev->thread);
503
504#if CHIP_HAS_TILE_DMA()
505	/*
506	 * Restore DMA in new task if desired.
507	 * Note that it is only safe to restart here since interrupts
508	 * are disabled, so we can't take any DMATLB miss or access
509	 * interrupts before we have finished switching stacks.
510	 */
511	if (next->thread.tile_dma_state.enabled) {
512		restore_tile_dma_state(&next->thread);
513		grant_dma_mpls();
514	} else {
515		restrict_dma_mpls();
516	}
517#endif
518
519	/* Restore other arch state. */
520	restore_arch_state(&next->thread);
521
522#if CHIP_HAS_SN_PROC()
523	/*
524	 * Restart static network processor in the new process
525	 * if it was running before.
526	 */
527	if (next->thread.sn_proc_running) {
528		int snctl = __insn_mfspr(SPR_SNCTL);
529		__insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
530	}
531#endif
532
533#ifdef CONFIG_HARDWALL
534	/* Enable or disable access to the network registers appropriately. */
535	if (prev->thread.hardwall != NULL) {
536		if (next->thread.hardwall == NULL)
537			restrict_network_mpls();
538	} else if (next->thread.hardwall != NULL) {
539		grant_network_mpls();
540	}
541#endif
542
543	/*
544	 * Switch kernel SP, PC, and callee-saved registers.
545	 * In the context of the new task, return the old task pointer
546	 * (i.e. the task that actually called __switch_to).
547	 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
548	 */
549	return __switch_to(prev, next, next_current_ksp0(next));
550}
551
552/*
553 * This routine is called on return from interrupt if any of the
554 * TIF_WORK_MASK flags are set in thread_info->flags.  It is
555 * entered with interrupts disabled so we don't miss an event
556 * that modified the thread_info flags.  If any flag is set, we
557 * handle it and return, and the calling assembly code will
558 * re-disable interrupts, reload the thread flags, and call back
559 * if more flags need to be handled.
560 *
561 * We return whether we need to check the thread_info flags again
562 * or not.  Note that we don't clear TIF_SINGLESTEP here, so it's
563 * important that it be tested last, and then claim that we don't
564 * need to recheck the flags.
565 */
566int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
567{
568	if (thread_info_flags & _TIF_NEED_RESCHED) {
569		schedule();
570		return 1;
571	}
572#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
573	if (thread_info_flags & _TIF_ASYNC_TLB) {
574		do_async_page_fault(regs);
575		return 1;
576	}
577#endif
578	if (thread_info_flags & _TIF_SIGPENDING) {
579		do_signal(regs);
580		return 1;
581	}
582	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
583		clear_thread_flag(TIF_NOTIFY_RESUME);
584		tracehook_notify_resume(regs);
585		if (current->replacement_session_keyring)
586			key_replace_session_keyring();
587		return 1;
588	}
589	if (thread_info_flags & _TIF_SINGLESTEP) {
590		if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
591			single_step_once(regs);
592		return 0;
593	}
594	panic("work_pending: bad flags %#x\n", thread_info_flags);
595}
596
597/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
598SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
599		void __user *, parent_tidptr, void __user *, child_tidptr,
600		struct pt_regs *, regs)
601{
602	if (!newsp)
603		newsp = regs->sp;
604	return do_fork(clone_flags, newsp, regs, 0,
605		       parent_tidptr, child_tidptr);
606}
607
608/*
609 * sys_execve() executes a new program.
610 */
611SYSCALL_DEFINE4(execve, const char __user *, path,
612		const char __user *const __user *, argv,
613		const char __user *const __user *, envp,
614		struct pt_regs *, regs)
615{
616	long error;
617	char *filename;
618
619	filename = getname(path);
620	error = PTR_ERR(filename);
621	if (IS_ERR(filename))
622		goto out;
623	error = do_execve(filename, argv, envp, regs);
624	putname(filename);
625	if (error == 0)
626		single_step_execve();
627out:
628	return error;
629}
630
631#ifdef CONFIG_COMPAT
632long compat_sys_execve(const char __user *path,
633		       compat_uptr_t __user *argv,
634		       compat_uptr_t __user *envp,
635		       struct pt_regs *regs)
636{
637	long error;
638	char *filename;
639
640	filename = getname(path);
641	error = PTR_ERR(filename);
642	if (IS_ERR(filename))
643		goto out;
644	error = compat_do_execve(filename, argv, envp, regs);
645	putname(filename);
646	if (error == 0)
647		single_step_execve();
648out:
649	return error;
650}
651#endif
652
653unsigned long get_wchan(struct task_struct *p)
654{
655	struct KBacktraceIterator kbt;
656
657	if (!p || p == current || p->state == TASK_RUNNING)
658		return 0;
659
660	for (KBacktraceIterator_init(&kbt, p, NULL);
661	     !KBacktraceIterator_end(&kbt);
662	     KBacktraceIterator_next(&kbt)) {
663		if (!in_sched_functions(kbt.it.pc))
664			return kbt.it.pc;
665	}
666
667	return 0;
668}
669
670/*
671 * We pass in lr as zero (cleared in kernel_thread) and the caller
672 * part of the backtrace ABI on the stack also zeroed (in copy_thread)
673 * so that backtraces will stop with this function.
674 * Note that we don't use r0, since copy_thread() clears it.
675 */
676static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
677{
678	do_exit(fn(arg));
679}
680
681/*
682 * Create a kernel thread
683 */
684int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
685{
686	struct pt_regs regs;
687
688	memset(&regs, 0, sizeof(regs));
689	regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0);  /* run at kernel PL, no ICS */
690	regs.pc = (long) start_kernel_thread;
691	regs.flags = PT_FLAGS_CALLER_SAVES;   /* need to restore r1 and r2 */
692	regs.regs[1] = (long) fn;             /* function pointer */
693	regs.regs[2] = (long) arg;            /* parameter register */
694
695	/* Ok, create the new process.. */
696	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs,
697		       0, NULL, NULL);
698}
699EXPORT_SYMBOL(kernel_thread);
700
701/* Flush thread state. */
702void flush_thread(void)
703{
704	/* Nothing */
705}
706
707/*
708 * Free current thread data structures etc..
709 */
710void exit_thread(void)
711{
712	/* Nothing */
713}
714
715void show_regs(struct pt_regs *regs)
716{
717	struct task_struct *tsk = validate_current();
718	int i;
719
720	pr_err("\n");
721	pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
722	       tsk->pid, tsk->comm, smp_processor_id());
723#ifdef __tilegx__
724	for (i = 0; i < 51; i += 3)
725		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
726		       i, regs->regs[i], i+1, regs->regs[i+1],
727		       i+2, regs->regs[i+2]);
728	pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
729	       regs->regs[51], regs->regs[52], regs->tp);
730	pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
731#else
732	for (i = 0; i < 52; i += 4)
733		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
734		       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
735		       i, regs->regs[i], i+1, regs->regs[i+1],
736		       i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
737	pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
738	       regs->regs[52], regs->tp, regs->sp, regs->lr);
739#endif
740	pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld\n",
741	       regs->pc, regs->ex1, regs->faultnum);
742
743	dump_stack_regs(regs);
744}