Linux Audio

Check our new training course

Loading...
v6.9.4
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_POWERPC_SYNCH_H 
 3#define _ASM_POWERPC_SYNCH_H 
 4#ifdef __KERNEL__
 5
 6#include <asm/cputable.h>
 7#include <asm/feature-fixups.h>
 8#include <asm/ppc-opcode.h>
 
 
 
 9
10#ifndef __ASSEMBLY__
11extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
12extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
13			     void *fixup_end);
 
14
15static inline void eieio(void)
16{
17	if (IS_ENABLED(CONFIG_BOOKE))
18		__asm__ __volatile__ ("mbar" : : : "memory");
19	else
20		__asm__ __volatile__ ("eieio" : : : "memory");
21}
22
23static inline void isync(void)
24{
25	__asm__ __volatile__ ("isync" : : : "memory");
26}
27
28static inline void ppc_after_tlbiel_barrier(void)
29{
30	asm volatile("ptesync": : :"memory");
31	/*
32	 * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
33	 * invalidated correctly. If this is not done, the paste can take data
34	 * from the physical address that was translated at copy time.
35	 *
36	 * POWER9 in practice does not need this, because address spaces with
37	 * accelerators mapped will use tlbie (which does invalidate the copy)
38	 * to invalidate translations. It's not possible to limit POWER10 this
39	 * way due to local copy-paste.
40	 */
41	asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory");
42}
43#endif /* __ASSEMBLY__ */
44
45#if defined(__powerpc64__)
46#    define LWSYNC	lwsync
47#elif defined(CONFIG_PPC_E500)
48#    define LWSYNC					\
49	START_LWSYNC_SECTION(96);			\
50	sync;						\
51	MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
52#else
53#    define LWSYNC	sync
54#endif
55
56#ifdef CONFIG_SMP
57#define __PPC_ACQUIRE_BARRIER				\
58	START_LWSYNC_SECTION(97);			\
59	isync;						\
60	MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
61#define PPC_ACQUIRE_BARRIER	 "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
62#define PPC_RELEASE_BARRIER	 stringify_in_c(LWSYNC) "\n"
63#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
64#define PPC_ATOMIC_EXIT_BARRIER	 "\n" stringify_in_c(sync) "\n"
65#else
66#define PPC_ACQUIRE_BARRIER
67#define PPC_RELEASE_BARRIER
68#define PPC_ATOMIC_ENTRY_BARRIER
69#define PPC_ATOMIC_EXIT_BARRIER
70#endif
71
72#endif /* __KERNEL__ */
73#endif	/* _ASM_POWERPC_SYNCH_H */
v3.5.6
 
 1#ifndef _ASM_POWERPC_SYNCH_H 
 2#define _ASM_POWERPC_SYNCH_H 
 3#ifdef __KERNEL__
 4
 5#include <linux/stringify.h>
 6#include <asm/feature-fixups.h>
 7
 8#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
 9#define __SUBARCH_HAS_LWSYNC
10#endif
11
12#ifndef __ASSEMBLY__
13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
14extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
15			     void *fixup_end);
16extern void do_final_fixups(void);
17
18static inline void eieio(void)
19{
20	__asm__ __volatile__ ("eieio" : : : "memory");
 
 
 
21}
22
23static inline void isync(void)
24{
25	__asm__ __volatile__ ("isync" : : : "memory");
26}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27#endif /* __ASSEMBLY__ */
28
29#if defined(__powerpc64__)
30#    define LWSYNC	lwsync
31#elif defined(CONFIG_E500)
32#    define LWSYNC					\
33	START_LWSYNC_SECTION(96);			\
34	sync;						\
35	MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
36#else
37#    define LWSYNC	sync
38#endif
39
40#ifdef CONFIG_SMP
41#define __PPC_ACQUIRE_BARRIER				\
42	START_LWSYNC_SECTION(97);			\
43	isync;						\
44	MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
45#define PPC_ACQUIRE_BARRIER	 "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
46#define PPC_RELEASE_BARRIER	 stringify_in_c(LWSYNC) "\n"
47#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
48#define PPC_ATOMIC_EXIT_BARRIER	 "\n" stringify_in_c(sync) "\n"
49#else
50#define PPC_ACQUIRE_BARRIER
51#define PPC_RELEASE_BARRIER
52#define PPC_ATOMIC_ENTRY_BARRIER
53#define PPC_ATOMIC_EXIT_BARRIER
54#endif
55
56#endif /* __KERNEL__ */
57#endif	/* _ASM_POWERPC_SYNCH_H */