Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
  7 */
  8#ifndef __ASM_BARRIER_H
  9#define __ASM_BARRIER_H
 10
 11/*
 12 * read_barrier_depends - Flush all pending reads that subsequents reads
 13 * depend on.
 14 *
 15 * No data-dependent reads from memory-like regions are ever reordered
 16 * over this barrier.  All reads preceding this primitive are guaranteed
 17 * to access memory (but not necessarily other CPUs' caches) before any
 18 * reads following this primitive that depend on the data return by
 19 * any of the preceding reads.  This primitive is much lighter weight than
 20 * rmb() on most CPUs, and is never heavier weight than is
 21 * rmb().
 22 *
 23 * These ordering constraints are respected by both the local CPU
 24 * and the compiler.
 25 *
 26 * Ordering is not guaranteed by anything other than these primitives,
 27 * not even by data dependencies.  See the documentation for
 28 * memory_barrier() for examples and URLs to more information.
 29 *
 30 * For example, the following code would force ordering (the initial
 31 * value of "a" is zero, "b" is one, and "p" is "&a"):
 32 *
 33 * <programlisting>
 34 *	CPU 0				CPU 1
 35 *
 36 *	b = 2;
 37 *	memory_barrier();
 38 *	p = &b;				q = p;
 39 *					read_barrier_depends();
 40 *					d = *q;
 41 * </programlisting>
 42 *
 43 * because the read of "*q" depends on the read of "p" and these
 44 * two reads are separated by a read_barrier_depends().  However,
 45 * the following code, with the same initial values for "a" and "b":
 46 *
 47 * <programlisting>
 48 *	CPU 0				CPU 1
 49 *
 50 *	a = 2;
 51 *	memory_barrier();
 52 *	b = 3;				y = b;
 53 *					read_barrier_depends();
 54 *					x = a;
 55 * </programlisting>
 56 *
 57 * does not enforce ordering, since there is no data dependency between
 58 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 59 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 60 * in cases like this where there are no data dependencies.
 61 */
 62
 63#define read_barrier_depends()		do { } while(0)
 64#define smp_read_barrier_depends()	do { } while(0)
 65
 66#ifdef CONFIG_CPU_HAS_SYNC
 67#define __sync()				\
 68	__asm__ __volatile__(			\
 69		".set	push\n\t"		\
 70		".set	noreorder\n\t"		\
 71		".set	mips2\n\t"		\
 72		"sync\n\t"			\
 73		".set	pop"			\
 74		: /* no output */		\
 75		: /* no input */		\
 76		: "memory")
 77#else
 78#define __sync()	do { } while(0)
 79#endif
 80
 81#define __fast_iob()				\
 82	__asm__ __volatile__(			\
 83		".set	push\n\t"		\
 84		".set	noreorder\n\t"		\
 85		"lw	$0,%0\n\t"		\
 86		"nop\n\t"			\
 87		".set	pop"			\
 88		: /* no output */		\
 89		: "m" (*(int *)CKSEG1)		\
 90		: "memory")
 91#ifdef CONFIG_CPU_CAVIUM_OCTEON
 92# define OCTEON_SYNCW_STR	".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
 93# define __syncw() 	__asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
 94
 95# define fast_wmb()	__syncw()
 96# define fast_rmb()	barrier()
 97# define fast_mb()	__sync()
 98# define fast_iob()	do { } while (0)
 99#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
100# define fast_wmb()	__sync()
101# define fast_rmb()	__sync()
102# define fast_mb()	__sync()
103# ifdef CONFIG_SGI_IP28
104#  define fast_iob()				\
105	__asm__ __volatile__(			\
106		".set	push\n\t"		\
107		".set	noreorder\n\t"		\
108		"lw	$0,%0\n\t"		\
109		"sync\n\t"			\
110		"lw	$0,%0\n\t"		\
111		".set	pop"			\
112		: /* no output */		\
113		: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
114		: "memory")
115# else
116#  define fast_iob()				\
117	do {					\
118		__sync();			\
119		__fast_iob();			\
120	} while (0)
121# endif
122#endif /* CONFIG_CPU_CAVIUM_OCTEON */
123
124#ifdef CONFIG_CPU_HAS_WB
125
126#include <asm/wbflush.h>
127
128#define wmb()		fast_wmb()
129#define rmb()		fast_rmb()
130#define mb()		wbflush()
131#define iob()		wbflush()
132
133#else /* !CONFIG_CPU_HAS_WB */
134
135#define wmb()		fast_wmb()
136#define rmb()		fast_rmb()
137#define mb()		fast_mb()
138#define iob()		fast_iob()
139
140#endif /* !CONFIG_CPU_HAS_WB */
141
142#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
 
 
 
143# ifdef CONFIG_CPU_CAVIUM_OCTEON
144#  define smp_mb()	__sync()
145#  define smp_rmb()	barrier()
146#  define smp_wmb()	__syncw()
147# else
148#  define smp_mb()	__asm__ __volatile__("sync" : : :"memory")
149#  define smp_rmb()	__asm__ __volatile__("sync" : : :"memory")
150#  define smp_wmb()	__asm__ __volatile__("sync" : : :"memory")
151# endif
152#else
153#define smp_mb()	barrier()
154#define smp_rmb()	barrier()
155#define smp_wmb()	barrier()
156#endif
157
158#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
159#define __WEAK_LLSC_MB		"       sync	\n"
160#else
161#define __WEAK_LLSC_MB		"		\n"
162#endif
163
164#define set_mb(var, value) \
165	do { var = value; smp_mb(); } while (0)
166
167#define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
168
169#ifdef CONFIG_CPU_CAVIUM_OCTEON
170#define smp_mb__before_llsc() smp_wmb()
 
171/* Cause previous writes to become visible on all CPUs as soon as possible */
172#define nudge_writes() __asm__ __volatile__(".set push\n\t"		\
173					    ".set arch=octeon\n\t"	\
174					    "syncw\n\t"			\
175					    ".set pop" : : : "memory")
176#else
177#define smp_mb__before_llsc() smp_llsc_mb()
 
178#define nudge_writes() mb()
179#endif
 
 
 
 
 
180
181#endif /* __ASM_BARRIER_H */
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
  7 */
  8#ifndef __ASM_BARRIER_H
  9#define __ASM_BARRIER_H
 10
 11#include <asm/addrspace.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12
 13#ifdef CONFIG_CPU_HAS_SYNC
 14#define __sync()				\
 15	__asm__ __volatile__(			\
 16		".set	push\n\t"		\
 17		".set	noreorder\n\t"		\
 18		".set	mips2\n\t"		\
 19		"sync\n\t"			\
 20		".set	pop"			\
 21		: /* no output */		\
 22		: /* no input */		\
 23		: "memory")
 24#else
 25#define __sync()	do { } while(0)
 26#endif
 27
 28#define __fast_iob()				\
 29	__asm__ __volatile__(			\
 30		".set	push\n\t"		\
 31		".set	noreorder\n\t"		\
 32		"lw	$0,%0\n\t"		\
 33		"nop\n\t"			\
 34		".set	pop"			\
 35		: /* no output */		\
 36		: "m" (*(int *)CKSEG1)		\
 37		: "memory")
 38#ifdef CONFIG_CPU_CAVIUM_OCTEON
 39# define OCTEON_SYNCW_STR	".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
 40# define __syncw()	__asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
 41
 42# define fast_wmb()	__syncw()
 43# define fast_rmb()	barrier()
 44# define fast_mb()	__sync()
 45# define fast_iob()	do { } while (0)
 46#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
 47# define fast_wmb()	__sync()
 48# define fast_rmb()	__sync()
 49# define fast_mb()	__sync()
 50# ifdef CONFIG_SGI_IP28
 51#  define fast_iob()				\
 52	__asm__ __volatile__(			\
 53		".set	push\n\t"		\
 54		".set	noreorder\n\t"		\
 55		"lw	$0,%0\n\t"		\
 56		"sync\n\t"			\
 57		"lw	$0,%0\n\t"		\
 58		".set	pop"			\
 59		: /* no output */		\
 60		: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
 61		: "memory")
 62# else
 63#  define fast_iob()				\
 64	do {					\
 65		__sync();			\
 66		__fast_iob();			\
 67	} while (0)
 68# endif
 69#endif /* CONFIG_CPU_CAVIUM_OCTEON */
 70
 71#ifdef CONFIG_CPU_HAS_WB
 72
 73#include <asm/wbflush.h>
 74
 
 
 75#define mb()		wbflush()
 76#define iob()		wbflush()
 77
 78#else /* !CONFIG_CPU_HAS_WB */
 79
 
 
 80#define mb()		fast_mb()
 81#define iob()		fast_iob()
 82
 83#endif /* !CONFIG_CPU_HAS_WB */
 84
 85#define wmb()		fast_wmb()
 86#define rmb()		fast_rmb()
 87
 88#if defined(CONFIG_WEAK_ORDERING)
 89# ifdef CONFIG_CPU_CAVIUM_OCTEON
 90#  define __smp_mb()	__sync()
 91#  define __smp_rmb()	barrier()
 92#  define __smp_wmb()	__syncw()
 93# else
 94#  define __smp_mb()	__asm__ __volatile__("sync" : : :"memory")
 95#  define __smp_rmb()	__asm__ __volatile__("sync" : : :"memory")
 96#  define __smp_wmb()	__asm__ __volatile__("sync" : : :"memory")
 97# endif
 98#else
 99#define __smp_mb()	barrier()
100#define __smp_rmb()	barrier()
101#define __smp_wmb()	barrier()
102#endif
103
104#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
105#define __WEAK_LLSC_MB		"	sync	\n"
106#else
107#define __WEAK_LLSC_MB		"		\n"
108#endif
109
 
 
 
110#define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
111
112#ifdef CONFIG_CPU_CAVIUM_OCTEON
113#define smp_mb__before_llsc() smp_wmb()
114#define __smp_mb__before_llsc() __smp_wmb()
115/* Cause previous writes to become visible on all CPUs as soon as possible */
116#define nudge_writes() __asm__ __volatile__(".set push\n\t"		\
117					    ".set arch=octeon\n\t"	\
118					    "syncw\n\t"			\
119					    ".set pop" : : : "memory")
120#else
121#define smp_mb__before_llsc() smp_llsc_mb()
122#define __smp_mb__before_llsc() smp_llsc_mb()
123#define nudge_writes() mb()
124#endif
125
126#define __smp_mb__before_atomic()	__smp_mb__before_llsc()
127#define __smp_mb__after_atomic()	smp_llsc_mb()
128
129#include <asm-generic/barrier.h>
130
131#endif /* __ASM_BARRIER_H */