Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#ifndef _ASM_TILE_SYSTEM_H
 16#define _ASM_TILE_SYSTEM_H
 17
 18#ifndef __ASSEMBLY__
 19
 20#include <linux/types.h>
 21#include <linux/irqflags.h>
 22
 23/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
 24#include <asm/ptrace.h>
 25
 26#include <arch/chip.h>
 27#include <arch/sim_def.h>
 28#include <arch/spr_def.h>
 29
 30/*
 31 * read_barrier_depends - Flush all pending reads that subsequents reads
 32 * depend on.
 33 *
 34 * No data-dependent reads from memory-like regions are ever reordered
 35 * over this barrier.  All reads preceding this primitive are guaranteed
 36 * to access memory (but not necessarily other CPUs' caches) before any
 37 * reads following this primitive that depend on the data return by
 38 * any of the preceding reads.  This primitive is much lighter weight than
 39 * rmb() on most CPUs, and is never heavier weight than is
 40 * rmb().
 41 *
 42 * These ordering constraints are respected by both the local CPU
 43 * and the compiler.
 44 *
 45 * Ordering is not guaranteed by anything other than these primitives,
 46 * not even by data dependencies.  See the documentation for
 47 * memory_barrier() for examples and URLs to more information.
 48 *
 49 * For example, the following code would force ordering (the initial
 50 * value of "a" is zero, "b" is one, and "p" is "&a"):
 51 *
 52 * <programlisting>
 53 *	CPU 0				CPU 1
 54 *
 55 *	b = 2;
 56 *	memory_barrier();
 57 *	p = &b;				q = p;
 58 *					read_barrier_depends();
 59 *					d = *q;
 60 * </programlisting>
 61 *
 62 * because the read of "*q" depends on the read of "p" and these
 63 * two reads are separated by a read_barrier_depends().  However,
 64 * the following code, with the same initial values for "a" and "b":
 65 *
 66 * <programlisting>
 67 *	CPU 0				CPU 1
 68 *
 69 *	a = 2;
 70 *	memory_barrier();
 71 *	b = 3;				y = b;
 72 *					read_barrier_depends();
 73 *					x = a;
 74 * </programlisting>
 75 *
 76 * does not enforce ordering, since there is no data dependency between
 77 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 78 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 79 * in cases like this where there are no data dependencies.
 80 */
 81
 82#define read_barrier_depends()	do { } while (0)
 83
 84#define __sync()	__insn_mf()
 85
 86#if CHIP_HAS_SPLIT_CYCLE()
 87#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
 88#else
 89#define get_cycles_low() __insn_mfspr(SPR_CYCLE)   /* just get all 64 bits */
 90#endif
 91
 92#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
 93#include <hv/syscall_public.h>
 94/*
 95 * Issue an uncacheable load to each memory controller, then
 96 * wait until those loads have completed.
 97 */
 98static inline void __mb_incoherent(void)
 99{
100	long clobber_r10;
101	asm volatile("swint2"
102		     : "=R10" (clobber_r10)
103		     : "R10" (HV_SYS_fence_incoherent)
104		     : "r0", "r1", "r2", "r3", "r4",
105		       "r5", "r6", "r7", "r8", "r9",
106		       "r11", "r12", "r13", "r14",
107		       "r15", "r16", "r17", "r18", "r19",
108		       "r20", "r21", "r22", "r23", "r24",
109		       "r25", "r26", "r27", "r28", "r29");
110}
111#endif
112
113/* Fence to guarantee visibility of stores to incoherent memory. */
114static inline void
115mb_incoherent(void)
116{
117	__insn_mf();
118
119#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
120	{
121#if CHIP_HAS_TILE_WRITE_PENDING()
122		const unsigned long WRITE_TIMEOUT_CYCLES = 400;
123		unsigned long start = get_cycles_low();
124		do {
125			if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
126				return;
127		} while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
128#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
129		(void) __mb_incoherent();
130	}
131#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
132}
133
134#define fast_wmb()	__sync()
135#define fast_rmb()	__sync()
136#define fast_mb()	__sync()
137#define fast_iob()	mb_incoherent()
138
139#define wmb()		fast_wmb()
140#define rmb()		fast_rmb()
141#define mb()		fast_mb()
142#define iob()		fast_iob()
143
144#ifdef CONFIG_SMP
145#define smp_mb()	mb()
146#define smp_rmb()	rmb()
147#define smp_wmb()	wmb()
148#define smp_read_barrier_depends()	read_barrier_depends()
149#else
150#define smp_mb()	barrier()
151#define smp_rmb()	barrier()
152#define smp_wmb()	barrier()
153#define smp_read_barrier_depends()	do { } while (0)
154#endif
155
156#define set_mb(var, value) \
157	do { var = value; mb(); } while (0)
158
159/*
160 * Pause the DMA engine and static network before task switching.
161 */
162#define prepare_arch_switch(next) _prepare_arch_switch(next)
163void _prepare_arch_switch(struct task_struct *next);
164
165
166/*
167 * switch_to(n) should switch tasks to task nr n, first
168 * checking that n isn't the current task, in which case it does nothing.
169 * The number of callee-saved registers saved on the kernel stack
170 * is defined here for use in copy_thread() and must agree with __switch_to().
171 */
172#endif /* !__ASSEMBLY__ */
173#define CALLEE_SAVED_FIRST_REG 30
174#define CALLEE_SAVED_REGS_COUNT 24   /* r30 to r52, plus an empty to align */
175#ifndef __ASSEMBLY__
176struct task_struct;
177#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
178extern struct task_struct *_switch_to(struct task_struct *prev,
179				      struct task_struct *next);
180
181/* Helper function for _switch_to(). */
182extern struct task_struct *__switch_to(struct task_struct *prev,
183				       struct task_struct *next,
184				       unsigned long new_system_save_k_0);
185
186/* Address that switched-away from tasks are at. */
187extern unsigned long get_switch_to_pc(void);
188
189/*
190 * On SMP systems, when the scheduler does migration-cost autodetection,
191 * it needs a way to flush as much of the CPU's caches as possible:
192 *
193 * TODO: fill this in!
194 */
195static inline void sched_cacheflush(void)
196{
197}
198
199#define arch_align_stack(x) (x)
200
201/*
202 * Is the kernel doing fixups of unaligned accesses?  If <0, no kernel
203 * intervention occurs and SIGBUS is delivered with no data address
204 * info.  If 0, the kernel single-steps the instruction to discover
205 * the data address to provide with the SIGBUS.  If 1, the kernel does
206 * a fixup.
207 */
208extern int unaligned_fixup;
209
210/* Is the kernel printing on each unaligned fixup? */
211extern int unaligned_printk;
212
213/* Number of unaligned fixups performed */
214extern unsigned int unaligned_fixup_count;
215
216/* Init-time routine to do tile-specific per-cpu setup. */
217void setup_cpu(int boot);
218
219/* User-level DMA management functions */
220void grant_dma_mpls(void);
221void restrict_dma_mpls(void);
222
223#ifdef CONFIG_HARDWALL
224/* User-level network management functions */
225void reset_network_state(void);
226void grant_network_mpls(void);
227void restrict_network_mpls(void);
228int hardwall_deactivate(struct task_struct *task);
229
230/* Hook hardwall code into changes in affinity. */
231#define arch_set_cpus_allowed(p, new_mask) do { \
232	if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
233		hardwall_deactivate(p); \
234} while (0)
235#endif
236
237/*
238 * Kernel threads can check to see if they need to migrate their
239 * stack whenever they return from a context switch; for user
240 * threads, we defer until they are returning to user-space.
241 */
242#define finish_arch_switch(prev) do {                                     \
243	if (unlikely((prev)->state == TASK_DEAD))                         \
244		__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |       \
245			((prev)->pid << _SIM_CONTROL_OPERATOR_BITS));     \
246	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH |             \
247		(current->pid << _SIM_CONTROL_OPERATOR_BITS));            \
248	if (current->mm == NULL && !kstack_hash &&                        \
249	    current_thread_info()->homecache_cpu != smp_processor_id())   \
250		homecache_migrate_kthread();                              \
251} while (0)
252
253/* Support function for forking a new task. */
254void ret_from_fork(void);
255
256/* Called from ret_from_fork() when a new process starts up. */
257struct task_struct *sim_notify_fork(struct task_struct *prev);
258
259#endif /* !__ASSEMBLY__ */
260
261#endif /* _ASM_TILE_SYSTEM_H */