Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#ifndef _ASM_TILE_BARRIER_H
 16#define _ASM_TILE_BARRIER_H
 17
 18#ifndef __ASSEMBLY__
 19
 20#include <linux/types.h>
 21#include <arch/chip.h>
 22#include <arch/spr_def.h>
 23#include <asm/timex.h>
 24
 25/*
 26 * read_barrier_depends - Flush all pending reads that subsequents reads
 27 * depend on.
 28 *
 29 * No data-dependent reads from memory-like regions are ever reordered
 30 * over this barrier.  All reads preceding this primitive are guaranteed
 31 * to access memory (but not necessarily other CPUs' caches) before any
 32 * reads following this primitive that depend on the data return by
 33 * any of the preceding reads.  This primitive is much lighter weight than
 34 * rmb() on most CPUs, and is never heavier weight than is
 35 * rmb().
 36 *
 37 * These ordering constraints are respected by both the local CPU
 38 * and the compiler.
 39 *
 40 * Ordering is not guaranteed by anything other than these primitives,
 41 * not even by data dependencies.  See the documentation for
 42 * memory_barrier() for examples and URLs to more information.
 43 *
 44 * For example, the following code would force ordering (the initial
 45 * value of "a" is zero, "b" is one, and "p" is "&a"):
 46 *
 47 * <programlisting>
 48 *	CPU 0				CPU 1
 49 *
 50 *	b = 2;
 51 *	memory_barrier();
 52 *	p = &b;				q = p;
 53 *					read_barrier_depends();
 54 *					d = *q;
 55 * </programlisting>
 56 *
 57 * because the read of "*q" depends on the read of "p" and these
 58 * two reads are separated by a read_barrier_depends().  However,
 59 * the following code, with the same initial values for "a" and "b":
 60 *
 61 * <programlisting>
 62 *	CPU 0				CPU 1
 63 *
 64 *	a = 2;
 65 *	memory_barrier();
 66 *	b = 3;				y = b;
 67 *					read_barrier_depends();
 68 *					x = a;
 69 * </programlisting>
 70 *
 71 * does not enforce ordering, since there is no data dependency between
 72 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 73 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 74 * in cases like this where there are no data dependencies.
 75 */
 76#define read_barrier_depends()	do { } while (0)
 77
 78#define __sync()	__insn_mf()
 79
 80#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
 81#include <hv/syscall_public.h>
 82/*
 83 * Issue an uncacheable load to each memory controller, then
 84 * wait until those loads have completed.
 85 */
 86static inline void __mb_incoherent(void)
 87{
 88	long clobber_r10;
 89	asm volatile("swint2"
 90		     : "=R10" (clobber_r10)
 91		     : "R10" (HV_SYS_fence_incoherent)
 92		     : "r0", "r1", "r2", "r3", "r4",
 93		       "r5", "r6", "r7", "r8", "r9",
 94		       "r11", "r12", "r13", "r14",
 95		       "r15", "r16", "r17", "r18", "r19",
 96		       "r20", "r21", "r22", "r23", "r24",
 97		       "r25", "r26", "r27", "r28", "r29");
 98}
 99#endif
100
101/* Fence to guarantee visibility of stores to incoherent memory. */
102static inline void
103mb_incoherent(void)
104{
105	__insn_mf();
106
107#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
108	{
109#if CHIP_HAS_TILE_WRITE_PENDING()
110		const unsigned long WRITE_TIMEOUT_CYCLES = 400;
111		unsigned long start = get_cycles_low();
112		do {
113			if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
114				return;
115		} while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
116#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
117		(void) __mb_incoherent();
118	}
119#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
120}
121
122#define fast_wmb()	__sync()
123#define fast_rmb()	__sync()
124#define fast_mb()	__sync()
125#define fast_iob()	mb_incoherent()
126
127#define wmb()		fast_wmb()
128#define rmb()		fast_rmb()
129#define mb()		fast_mb()
130#define iob()		fast_iob()
131
132#ifdef CONFIG_SMP
133#define smp_mb()	mb()
134#define smp_rmb()	rmb()
135#define smp_wmb()	wmb()
136#define smp_read_barrier_depends()	read_barrier_depends()
137#else
138#define smp_mb()	barrier()
139#define smp_rmb()	barrier()
140#define smp_wmb()	barrier()
141#define smp_read_barrier_depends()	do { } while (0)
142#endif
143
144#define set_mb(var, value) \
145	do { var = value; mb(); } while (0)
146
147#endif /* !__ASSEMBLY__ */
148#endif /* _ASM_TILE_BARRIER_H */