Linux Audio

Check our new training course

Loading...
v3.15
 
  1#ifndef __ASM_ARM_CP15_H
  2#define __ASM_ARM_CP15_H
  3
  4#include <asm/barrier.h>
  5
  6/*
  7 * CR1 bits (CP#15 CR1)
  8 */
  9#define CR_M	(1 << 0)	/* MMU enable				*/
 10#define CR_A	(1 << 1)	/* Alignment abort enable		*/
 11#define CR_C	(1 << 2)	/* Dcache enable			*/
 12#define CR_W	(1 << 3)	/* Write buffer enable			*/
 13#define CR_P	(1 << 4)	/* 32-bit exception handler		*/
 14#define CR_D	(1 << 5)	/* 32-bit data address range		*/
 15#define CR_L	(1 << 6)	/* Implementation defined		*/
 16#define CR_B	(1 << 7)	/* Big endian				*/
 17#define CR_S	(1 << 8)	/* System MMU protection		*/
 18#define CR_R	(1 << 9)	/* ROM MMU protection			*/
 19#define CR_F	(1 << 10)	/* Implementation defined		*/
 20#define CR_Z	(1 << 11)	/* Implementation defined		*/
 21#define CR_I	(1 << 12)	/* Icache enable			*/
 22#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
 23#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
 24#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
 25#define CR_DT	(1 << 16)
 26#ifdef CONFIG_MMU
 27#define CR_HA	(1 << 17)	/* Hardware management of Access Flag   */
 28#else
 29#define CR_BR	(1 << 17)	/* MPU Background region enable (PMSA)  */
 30#endif
 31#define CR_IT	(1 << 18)
 32#define CR_ST	(1 << 19)
 33#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
 34#define CR_U	(1 << 22)	/* Unaligned access operation		*/
 35#define CR_XP	(1 << 23)	/* Extended page tables			*/
 36#define CR_VE	(1 << 24)	/* Vectored interrupts			*/
 37#define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
 38#define CR_TRE	(1 << 28)	/* TEX remap enable			*/
 39#define CR_AFE	(1 << 29)	/* Access flag enable			*/
 40#define CR_TE	(1 << 30)	/* Thumb exception enable		*/
 41
 42#ifndef __ASSEMBLY__
 43
 44#if __LINUX_ARM_ARCH__ >= 4
 45#define vectors_high()	(cr_alignment & CR_V)
 46#else
 47#define vectors_high()	(0)
 48#endif
 49
 50#ifdef CONFIG_CPU_CP15
 51
 52extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53extern unsigned long cr_alignment;	/* defined in entry-armv.S */
 54
 55static inline unsigned int get_cr(void)
 56{
 57	unsigned int val;
 58	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
 59	return val;
 60}
 61
 62static inline void set_cr(unsigned int val)
 63{
 64	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"
 65	  : : "r" (val) : "cc");
 66	isb();
 67}
 68
 69static inline unsigned int get_auxcr(void)
 70{
 71	unsigned int val;
 72	asm("mrc p15, 0, %0, c1, c0, 1	@ get AUXCR" : "=r" (val));
 73	return val;
 74}
 75
 76static inline void set_auxcr(unsigned int val)
 77{
 78	asm volatile("mcr p15, 0, %0, c1, c0, 1	@ set AUXCR"
 79	  : : "r" (val));
 80	isb();
 81}
 82
 83#ifndef CONFIG_SMP
 84extern void adjust_cr(unsigned long mask, unsigned long set);
 85#endif
 86
 87#define CPACC_FULL(n)		(3 << (n * 2))
 88#define CPACC_SVC(n)		(1 << (n * 2))
 89#define CPACC_DISABLE(n)	(0 << (n * 2))
 90
 91static inline unsigned int get_copro_access(void)
 92{
 93	unsigned int val;
 94	asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
 95	  : "=r" (val) : : "cc");
 96	return val;
 97}
 98
 99static inline void set_copro_access(unsigned int val)
100{
101	asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
102	  : : "r" (val) : "cc");
103	isb();
104}
105
106#else /* ifdef CONFIG_CPU_CP15 */
107
108/*
109 * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
110 * minds of the developers). Yielding 0 for machines without a cp15 (and making
111 * it read-only) is fine for most cases and saves quite some #ifdeffery.
112 */
113#define cr_no_alignment	UL(0)
114#define cr_alignment	UL(0)
 
 
 
 
 
115
116#endif /* ifdef CONFIG_CPU_CP15 / else */
117
118#endif /* ifndef __ASSEMBLY__ */
119
120#endif
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_ARM_CP15_H
  3#define __ASM_ARM_CP15_H
  4
  5#include <asm/barrier.h>
  6
  7/*
  8 * CR1 bits (CP#15 CR1)
  9 */
 10#define CR_M	(1 << 0)	/* MMU enable				*/
 11#define CR_A	(1 << 1)	/* Alignment abort enable		*/
 12#define CR_C	(1 << 2)	/* Dcache enable			*/
 13#define CR_W	(1 << 3)	/* Write buffer enable			*/
 14#define CR_P	(1 << 4)	/* 32-bit exception handler		*/
 15#define CR_D	(1 << 5)	/* 32-bit data address range		*/
 16#define CR_L	(1 << 6)	/* Implementation defined		*/
 17#define CR_B	(1 << 7)	/* Big endian				*/
 18#define CR_S	(1 << 8)	/* System MMU protection		*/
 19#define CR_R	(1 << 9)	/* ROM MMU protection			*/
 20#define CR_F	(1 << 10)	/* Implementation defined		*/
 21#define CR_Z	(1 << 11)	/* Implementation defined		*/
 22#define CR_I	(1 << 12)	/* Icache enable			*/
 23#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
 24#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
 25#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
 26#define CR_DT	(1 << 16)
 27#ifdef CONFIG_MMU
 28#define CR_HA	(1 << 17)	/* Hardware management of Access Flag   */
 29#else
 30#define CR_BR	(1 << 17)	/* MPU Background region enable (PMSA)  */
 31#endif
 32#define CR_IT	(1 << 18)
 33#define CR_ST	(1 << 19)
 34#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
 35#define CR_U	(1 << 22)	/* Unaligned access operation		*/
 36#define CR_XP	(1 << 23)	/* Extended page tables			*/
 37#define CR_VE	(1 << 24)	/* Vectored interrupts			*/
 38#define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
 39#define CR_TRE	(1 << 28)	/* TEX remap enable			*/
 40#define CR_AFE	(1 << 29)	/* Access flag enable			*/
 41#define CR_TE	(1 << 30)	/* Thumb exception enable		*/
 42
 43#ifndef __ASSEMBLY__
 44
 45#if __LINUX_ARM_ARCH__ >= 4
 46#define vectors_high()	(get_cr() & CR_V)
 47#else
 48#define vectors_high()	(0)
 49#endif
 50
 51#ifdef CONFIG_CPU_CP15
 52
 53#define __ACCESS_CP15(CRn, Op1, CRm, Op2)	\
 54	"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
 55#define __ACCESS_CP15_64(Op1, CRm)		\
 56	"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
 57
 58#define __read_sysreg(r, w, c, t) ({				\
 59	t __val;						\
 60	asm volatile(r " " c : "=r" (__val));			\
 61	__val;							\
 62})
 63#define read_sysreg(...)		__read_sysreg(__VA_ARGS__)
 64
 65#define __write_sysreg(v, r, w, c, t)	asm volatile(w " " c : : "r" ((t)(v)))
 66#define write_sysreg(v, ...)		__write_sysreg(v, __VA_ARGS__)
 67
 68extern unsigned long cr_alignment;	/* defined in entry-armv.S */
 69
 70static inline unsigned long get_cr(void)
 71{
 72	unsigned long val;
 73	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
 74	return val;
 75}
 76
 77static inline void set_cr(unsigned long val)
 78{
 79	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"
 80	  : : "r" (val) : "cc");
 81	isb();
 82}
 83
 84static inline unsigned int get_auxcr(void)
 85{
 86	unsigned int val;
 87	asm("mrc p15, 0, %0, c1, c0, 1	@ get AUXCR" : "=r" (val));
 88	return val;
 89}
 90
 91static inline void set_auxcr(unsigned int val)
 92{
 93	asm volatile("mcr p15, 0, %0, c1, c0, 1	@ set AUXCR"
 94	  : : "r" (val));
 95	isb();
 96}
 97
 
 
 
 
 98#define CPACC_FULL(n)		(3 << (n * 2))
 99#define CPACC_SVC(n)		(1 << (n * 2))
100#define CPACC_DISABLE(n)	(0 << (n * 2))
101
102static inline unsigned int get_copro_access(void)
103{
104	unsigned int val;
105	asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
106	  : "=r" (val) : : "cc");
107	return val;
108}
109
110static inline void set_copro_access(unsigned int val)
111{
112	asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
113	  : : "r" (val) : "cc");
114	isb();
115}
116
117#else /* ifdef CONFIG_CPU_CP15 */
118
119/*
120 * cr_alignment is tightly coupled to cp15 (at least in the minds of the
121 * developers). Yielding 0 for machines without a cp15 (and making it
122 * read-only) is fine for most cases and saves quite some #ifdeffery.
123 */
 
124#define cr_alignment	UL(0)
125
126static inline unsigned long get_cr(void)
127{
128	return 0;
129}
130
131#endif /* ifdef CONFIG_CPU_CP15 / else */
132
133#endif /* ifndef __ASSEMBLY__ */
134
135#endif