Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * arch/arm/include/asm/arch_gicv3.h
  3 *
  4 * Copyright (C) 2015 ARM Ltd.
  5 *
  6 * This program is free software: you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 17 */
 18#ifndef __ASM_ARCH_GICV3_H
 19#define __ASM_ARCH_GICV3_H
 20
 21#ifndef __ASSEMBLY__
 22
 23#include <linux/io.h>
 24#include <asm/barrier.h>
 25#include <asm/cacheflush.h>
 26#include <asm/cp15.h>
 27
 28#define ICC_EOIR1			__ACCESS_CP15(c12, 0, c12, 1)
 29#define ICC_DIR				__ACCESS_CP15(c12, 0, c11, 1)
 30#define ICC_IAR1			__ACCESS_CP15(c12, 0, c12, 0)
 31#define ICC_SGI1R			__ACCESS_CP15_64(0, c12)
 32#define ICC_PMR				__ACCESS_CP15(c4, 0, c6, 0)
 33#define ICC_CTLR			__ACCESS_CP15(c12, 0, c12, 4)
 34#define ICC_SRE				__ACCESS_CP15(c12, 0, c12, 5)
 35#define ICC_IGRPEN1			__ACCESS_CP15(c12, 0, c12, 7)
 36#define ICC_BPR1			__ACCESS_CP15(c12, 0, c12, 3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38#define ICC_HSRE			__ACCESS_CP15(c12, 4, c9, 5)
 39
 40#define ICH_VSEIR			__ACCESS_CP15(c12, 4, c9, 4)
 41#define ICH_HCR				__ACCESS_CP15(c12, 4, c11, 0)
 42#define ICH_VTR				__ACCESS_CP15(c12, 4, c11, 1)
 43#define ICH_MISR			__ACCESS_CP15(c12, 4, c11, 2)
 44#define ICH_EISR			__ACCESS_CP15(c12, 4, c11, 3)
 45#define ICH_ELSR			__ACCESS_CP15(c12, 4, c11, 5)
 46#define ICH_VMCR			__ACCESS_CP15(c12, 4, c11, 7)
 47
 48#define __LR0(x)			__ACCESS_CP15(c12, 4, c12, x)
 49#define __LR8(x)			__ACCESS_CP15(c12, 4, c13, x)
 50
 51#define ICH_LR0				__LR0(0)
 52#define ICH_LR1				__LR0(1)
 53#define ICH_LR2				__LR0(2)
 54#define ICH_LR3				__LR0(3)
 55#define ICH_LR4				__LR0(4)
 56#define ICH_LR5				__LR0(5)
 57#define ICH_LR6				__LR0(6)
 58#define ICH_LR7				__LR0(7)
 59#define ICH_LR8				__LR8(0)
 60#define ICH_LR9				__LR8(1)
 61#define ICH_LR10			__LR8(2)
 62#define ICH_LR11			__LR8(3)
 63#define ICH_LR12			__LR8(4)
 64#define ICH_LR13			__LR8(5)
 65#define ICH_LR14			__LR8(6)
 66#define ICH_LR15			__LR8(7)
 67
 68/* LR top half */
 69#define __LRC0(x)			__ACCESS_CP15(c12, 4, c14, x)
 70#define __LRC8(x)			__ACCESS_CP15(c12, 4, c15, x)
 71
 72#define ICH_LRC0			__LRC0(0)
 73#define ICH_LRC1			__LRC0(1)
 74#define ICH_LRC2			__LRC0(2)
 75#define ICH_LRC3			__LRC0(3)
 76#define ICH_LRC4			__LRC0(4)
 77#define ICH_LRC5			__LRC0(5)
 78#define ICH_LRC6			__LRC0(6)
 79#define ICH_LRC7			__LRC0(7)
 80#define ICH_LRC8			__LRC8(0)
 81#define ICH_LRC9			__LRC8(1)
 82#define ICH_LRC10			__LRC8(2)
 83#define ICH_LRC11			__LRC8(3)
 84#define ICH_LRC12			__LRC8(4)
 85#define ICH_LRC13			__LRC8(5)
 86#define ICH_LRC14			__LRC8(6)
 87#define ICH_LRC15			__LRC8(7)
 88
 89#define __AP0Rx(x)			__ACCESS_CP15(c12, 4, c8, x)
 90#define ICH_AP0R0			__AP0Rx(0)
 91#define ICH_AP0R1			__AP0Rx(1)
 92#define ICH_AP0R2			__AP0Rx(2)
 93#define ICH_AP0R3			__AP0Rx(3)
 94
 95#define __AP1Rx(x)			__ACCESS_CP15(c12, 4, c9, x)
 96#define ICH_AP1R0			__AP1Rx(0)
 97#define ICH_AP1R1			__AP1Rx(1)
 98#define ICH_AP1R2			__AP1Rx(2)
 99#define ICH_AP1R3			__AP1Rx(3)
100
101/* A32-to-A64 mappings used by VGIC save/restore */
102
103#define CPUIF_MAP(a32, a64)			\
104static inline void write_ ## a64(u32 val)	\
105{						\
106	write_sysreg(val, a32);			\
107}						\
108static inline u32 read_ ## a64(void)		\
109{						\
110	return read_sysreg(a32); 		\
111}						\
112
113#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64)	\
114static inline void write_ ## a64(u64 val)	\
115{						\
116	write_sysreg(lower_32_bits(val), a32lo);\
117	write_sysreg(upper_32_bits(val), a32hi);\
118}						\
119static inline u64 read_ ## a64(void)		\
120{						\
121	u64 val = read_sysreg(a32lo);		\
122						\
123	val |=	(u64)read_sysreg(a32hi) << 32;	\
124						\
125	return val; 				\
126}
127
 
 
 
 
 
 
 
 
 
 
128CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
129CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
130CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
131CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
132CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
133CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
134CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
135CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
136CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
137CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
138CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
139CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
140CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
141CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
142CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
143CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
144
145CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
146CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
147CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
148CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
149CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
150CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
151CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
152CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
153CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
154CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
155CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
156CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
157CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
158CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
159CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
160CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
161
162#define read_gicreg(r)                 read_##r()
163#define write_gicreg(v, r)             write_##r(v)
164
165/* Low-level accessors */
166
167static inline void gic_write_eoir(u32 irq)
168{
169	write_sysreg(irq, ICC_EOIR1);
170	isb();
171}
172
173static inline void gic_write_dir(u32 val)
174{
175	write_sysreg(val, ICC_DIR);
176	isb();
177}
178
179static inline u32 gic_read_iar(void)
180{
181	u32 irqstat = read_sysreg(ICC_IAR1);
182
183	dsb(sy);
184
185	return irqstat;
186}
187
188static inline void gic_write_pmr(u32 val)
189{
190	write_sysreg(val, ICC_PMR);
191}
192
193static inline void gic_write_ctlr(u32 val)
194{
195	write_sysreg(val, ICC_CTLR);
196	isb();
197}
198
 
 
 
 
 
199static inline void gic_write_grpen1(u32 val)
200{
201	write_sysreg(val, ICC_IGRPEN1);
202	isb();
203}
204
205static inline void gic_write_sgi1r(u64 val)
206{
207	write_sysreg(val, ICC_SGI1R);
208}
209
210static inline u32 gic_read_sre(void)
211{
212	return read_sysreg(ICC_SRE);
213}
214
215static inline void gic_write_sre(u32 val)
216{
217	write_sysreg(val, ICC_SRE);
218	isb();
219}
220
221static inline void gic_write_bpr1(u32 val)
222{
223	write_sysreg(val, ICC_BPR1);
224}
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226/*
227 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
228 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
229 * make much sense.
230 * Moreover, 64bit I/O emulation is extremely difficult to implement on
231 * AArch32, since the syndrome register doesn't provide any information for
232 * them.
233 * Consequently, the following IO helpers use 32bit accesses.
234 */
235static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
236{
237	writel_relaxed((u32)val, addr);
238	writel_relaxed((u32)(val >> 32), addr + 4);
239}
240
241static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
242{
243	u64 val;
244
245	val = readl_relaxed(addr);
246	val |= (u64)readl_relaxed(addr + 4) << 32;
247	return val;
248}
249
250#define gic_flush_dcache_to_poc(a,l)    __cpuc_flush_dcache_area((a), (l))
251
252/*
253 *  GICD_IROUTERn, contain the affinity values associated to each interrupt.
254 *  The upper-word (aff3) will always be 0, so there is no need for a lock.
255 */
256#define gic_write_irouter(v, c)		__gic_writeq_nonatomic(v, c)
257
258/*
259 * GICR_TYPER is an ID register and doesn't need atomicity.
260 */
261#define gic_read_typer(c)		__gic_readq_nonatomic(c)
262
263/*
264 * GITS_BASER - hi and lo bits may be accessed independently.
265 */
266#define gits_read_baser(c)		__gic_readq_nonatomic(c)
267#define gits_write_baser(v, c)		__gic_writeq_nonatomic(v, c)
268
269/*
270 * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
271 * won't be being used during any updates and can be changed non-atomically
272 */
273#define gicr_read_propbaser(c)		__gic_readq_nonatomic(c)
274#define gicr_write_propbaser(v, c)	__gic_writeq_nonatomic(v, c)
275#define gicr_read_pendbaser(c)		__gic_readq_nonatomic(c)
276#define gicr_write_pendbaser(v, c)	__gic_writeq_nonatomic(v, c)
277
278/*
 
 
 
 
 
 
279 * GITS_TYPER is an ID register and doesn't need atomicity.
280 */
281#define gits_read_typer(c)		__gic_readq_nonatomic(c)
282
283/*
284 * GITS_CBASER - hi and lo bits may be accessed independently.
285 */
286#define gits_read_cbaser(c)		__gic_readq_nonatomic(c)
287#define gits_write_cbaser(v, c)		__gic_writeq_nonatomic(v, c)
288
289/*
290 * GITS_CWRITER - hi and lo bits may be accessed independently.
291 */
292#define gits_write_cwriter(v, c)	__gic_writeq_nonatomic(v, c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
294#endif /* !__ASSEMBLY__ */
295#endif /* !__ASM_ARCH_GICV3_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * arch/arm/include/asm/arch_gicv3.h
  4 *
  5 * Copyright (C) 2015 ARM Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7#ifndef __ASM_ARCH_GICV3_H
  8#define __ASM_ARCH_GICV3_H
  9
 10#ifndef __ASSEMBLY__
 11
 12#include <linux/io.h>
 13#include <asm/barrier.h>
 14#include <asm/cacheflush.h>
 15#include <asm/cp15.h>
 16
 17#define ICC_EOIR1			__ACCESS_CP15(c12, 0, c12, 1)
 18#define ICC_DIR				__ACCESS_CP15(c12, 0, c11, 1)
 19#define ICC_IAR1			__ACCESS_CP15(c12, 0, c12, 0)
 20#define ICC_SGI1R			__ACCESS_CP15_64(0, c12)
 21#define ICC_PMR				__ACCESS_CP15(c4, 0, c6, 0)
 22#define ICC_CTLR			__ACCESS_CP15(c12, 0, c12, 4)
 23#define ICC_SRE				__ACCESS_CP15(c12, 0, c12, 5)
 24#define ICC_IGRPEN1			__ACCESS_CP15(c12, 0, c12, 7)
 25#define ICC_BPR1			__ACCESS_CP15(c12, 0, c12, 3)
 26#define ICC_RPR				__ACCESS_CP15(c12, 0, c11, 3)
 27
 28#define __ICC_AP0Rx(x)			__ACCESS_CP15(c12, 0, c8, 4 | x)
 29#define ICC_AP0R0			__ICC_AP0Rx(0)
 30#define ICC_AP0R1			__ICC_AP0Rx(1)
 31#define ICC_AP0R2			__ICC_AP0Rx(2)
 32#define ICC_AP0R3			__ICC_AP0Rx(3)
 33
 34#define __ICC_AP1Rx(x)			__ACCESS_CP15(c12, 0, c9, x)
 35#define ICC_AP1R0			__ICC_AP1Rx(0)
 36#define ICC_AP1R1			__ICC_AP1Rx(1)
 37#define ICC_AP1R2			__ICC_AP1Rx(2)
 38#define ICC_AP1R3			__ICC_AP1Rx(3)
 39
 40#define ICC_HSRE			__ACCESS_CP15(c12, 4, c9, 5)
 41
 42#define ICH_VSEIR			__ACCESS_CP15(c12, 4, c9, 4)
 43#define ICH_HCR				__ACCESS_CP15(c12, 4, c11, 0)
 44#define ICH_VTR				__ACCESS_CP15(c12, 4, c11, 1)
 45#define ICH_MISR			__ACCESS_CP15(c12, 4, c11, 2)
 46#define ICH_EISR			__ACCESS_CP15(c12, 4, c11, 3)
 47#define ICH_ELRSR			__ACCESS_CP15(c12, 4, c11, 5)
 48#define ICH_VMCR			__ACCESS_CP15(c12, 4, c11, 7)
 49
 50#define __LR0(x)			__ACCESS_CP15(c12, 4, c12, x)
 51#define __LR8(x)			__ACCESS_CP15(c12, 4, c13, x)
 52
 53#define ICH_LR0				__LR0(0)
 54#define ICH_LR1				__LR0(1)
 55#define ICH_LR2				__LR0(2)
 56#define ICH_LR3				__LR0(3)
 57#define ICH_LR4				__LR0(4)
 58#define ICH_LR5				__LR0(5)
 59#define ICH_LR6				__LR0(6)
 60#define ICH_LR7				__LR0(7)
 61#define ICH_LR8				__LR8(0)
 62#define ICH_LR9				__LR8(1)
 63#define ICH_LR10			__LR8(2)
 64#define ICH_LR11			__LR8(3)
 65#define ICH_LR12			__LR8(4)
 66#define ICH_LR13			__LR8(5)
 67#define ICH_LR14			__LR8(6)
 68#define ICH_LR15			__LR8(7)
 69
 70/* LR top half */
 71#define __LRC0(x)			__ACCESS_CP15(c12, 4, c14, x)
 72#define __LRC8(x)			__ACCESS_CP15(c12, 4, c15, x)
 73
 74#define ICH_LRC0			__LRC0(0)
 75#define ICH_LRC1			__LRC0(1)
 76#define ICH_LRC2			__LRC0(2)
 77#define ICH_LRC3			__LRC0(3)
 78#define ICH_LRC4			__LRC0(4)
 79#define ICH_LRC5			__LRC0(5)
 80#define ICH_LRC6			__LRC0(6)
 81#define ICH_LRC7			__LRC0(7)
 82#define ICH_LRC8			__LRC8(0)
 83#define ICH_LRC9			__LRC8(1)
 84#define ICH_LRC10			__LRC8(2)
 85#define ICH_LRC11			__LRC8(3)
 86#define ICH_LRC12			__LRC8(4)
 87#define ICH_LRC13			__LRC8(5)
 88#define ICH_LRC14			__LRC8(6)
 89#define ICH_LRC15			__LRC8(7)
 90
 91#define __ICH_AP0Rx(x)			__ACCESS_CP15(c12, 4, c8, x)
 92#define ICH_AP0R0			__ICH_AP0Rx(0)
 93#define ICH_AP0R1			__ICH_AP0Rx(1)
 94#define ICH_AP0R2			__ICH_AP0Rx(2)
 95#define ICH_AP0R3			__ICH_AP0Rx(3)
 96
 97#define __ICH_AP1Rx(x)			__ACCESS_CP15(c12, 4, c9, x)
 98#define ICH_AP1R0			__ICH_AP1Rx(0)
 99#define ICH_AP1R1			__ICH_AP1Rx(1)
100#define ICH_AP1R2			__ICH_AP1Rx(2)
101#define ICH_AP1R3			__ICH_AP1Rx(3)
102
103/* A32-to-A64 mappings used by VGIC save/restore */
104
105#define CPUIF_MAP(a32, a64)			\
106static inline void write_ ## a64(u32 val)	\
107{						\
108	write_sysreg(val, a32);			\
109}						\
110static inline u32 read_ ## a64(void)		\
111{						\
112	return read_sysreg(a32); 		\
113}						\
114
115#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64)	\
116static inline void write_ ## a64(u64 val)	\
117{						\
118	write_sysreg(lower_32_bits(val), a32lo);\
119	write_sysreg(upper_32_bits(val), a32hi);\
120}						\
121static inline u64 read_ ## a64(void)		\
122{						\
123	u64 val = read_sysreg(a32lo);		\
124						\
125	val |=	(u64)read_sysreg(a32hi) << 32;	\
126						\
127	return val; 				\
128}
129
130CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
131CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
132CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
133CPUIF_MAP(ICC_AP0R2, ICC_AP0R2_EL1)
134CPUIF_MAP(ICC_AP0R3, ICC_AP0R3_EL1)
135CPUIF_MAP(ICC_AP1R0, ICC_AP1R0_EL1)
136CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
137CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
138CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
139
140CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
141CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
142CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
143CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
144CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
145CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
146CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
147CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
148CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
149CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
150CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
151CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
152CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
153CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
154CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
155CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
156
157CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
158CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
159CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
160CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
161CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
162CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
163CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
164CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
165CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
166CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
167CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
168CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
169CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
170CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
171CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
172CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
173
174#define read_gicreg(r)                 read_##r()
175#define write_gicreg(v, r)             write_##r(v)
176
177/* Low-level accessors */
178
179static inline void gic_write_eoir(u32 irq)
180{
181	write_sysreg(irq, ICC_EOIR1);
182	isb();
183}
184
185static inline void gic_write_dir(u32 val)
186{
187	write_sysreg(val, ICC_DIR);
188	isb();
189}
190
191static inline u32 gic_read_iar(void)
192{
193	u32 irqstat = read_sysreg(ICC_IAR1);
194
195	dsb(sy);
196
197	return irqstat;
198}
199
 
 
 
 
 
200static inline void gic_write_ctlr(u32 val)
201{
202	write_sysreg(val, ICC_CTLR);
203	isb();
204}
205
206static inline u32 gic_read_ctlr(void)
207{
208	return read_sysreg(ICC_CTLR);
209}
210
211static inline void gic_write_grpen1(u32 val)
212{
213	write_sysreg(val, ICC_IGRPEN1);
214	isb();
215}
216
217static inline void gic_write_sgi1r(u64 val)
218{
219	write_sysreg(val, ICC_SGI1R);
220}
221
222static inline u32 gic_read_sre(void)
223{
224	return read_sysreg(ICC_SRE);
225}
226
227static inline void gic_write_sre(u32 val)
228{
229	write_sysreg(val, ICC_SRE);
230	isb();
231}
232
233static inline void gic_write_bpr1(u32 val)
234{
235	write_sysreg(val, ICC_BPR1);
236}
237
238static inline u32 gic_read_pmr(void)
239{
240	return read_sysreg(ICC_PMR);
241}
242
243static inline void gic_write_pmr(u32 val)
244{
245	write_sysreg(val, ICC_PMR);
246}
247
248static inline u32 gic_read_rpr(void)
249{
250	return read_sysreg(ICC_RPR);
251}
252
253/*
254 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
255 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
256 * make much sense.
257 * Moreover, 64bit I/O emulation is extremely difficult to implement on
258 * AArch32, since the syndrome register doesn't provide any information for
259 * them.
260 * Consequently, the following IO helpers use 32bit accesses.
261 */
262static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
263{
264	writel_relaxed((u32)val, addr);
265	writel_relaxed((u32)(val >> 32), addr + 4);
266}
267
268static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
269{
270	u64 val;
271
272	val = readl_relaxed(addr);
273	val |= (u64)readl_relaxed(addr + 4) << 32;
274	return val;
275}
276
277#define gic_flush_dcache_to_poc(a,l)    __cpuc_flush_dcache_area((a), (l))
278
279/*
280 *  GICD_IROUTERn, contain the affinity values associated to each interrupt.
281 *  The upper-word (aff3) will always be 0, so there is no need for a lock.
282 */
283#define gic_write_irouter(v, c)		__gic_writeq_nonatomic(v, c)
284
285/*
286 * GICR_TYPER is an ID register and doesn't need atomicity.
287 */
288#define gic_read_typer(c)		__gic_readq_nonatomic(c)
289
290/*
291 * GITS_BASER - hi and lo bits may be accessed independently.
292 */
293#define gits_read_baser(c)		__gic_readq_nonatomic(c)
294#define gits_write_baser(v, c)		__gic_writeq_nonatomic(v, c)
295
296/*
297 * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
298 * won't be being used during any updates and can be changed non-atomically
299 */
300#define gicr_read_propbaser(c)		__gic_readq_nonatomic(c)
301#define gicr_write_propbaser(v, c)	__gic_writeq_nonatomic(v, c)
302#define gicr_read_pendbaser(c)		__gic_readq_nonatomic(c)
303#define gicr_write_pendbaser(v, c)	__gic_writeq_nonatomic(v, c)
304
305/*
306 * GICR_xLPIR - only the lower bits are significant
307 */
308#define gic_read_lpir(c)		readl_relaxed(c)
309#define gic_write_lpir(v, c)		writel_relaxed(lower_32_bits(v), c)
310
311/*
312 * GITS_TYPER is an ID register and doesn't need atomicity.
313 */
314#define gits_read_typer(c)		__gic_readq_nonatomic(c)
315
316/*
317 * GITS_CBASER - hi and lo bits may be accessed independently.
318 */
319#define gits_read_cbaser(c)		__gic_readq_nonatomic(c)
320#define gits_write_cbaser(v, c)		__gic_writeq_nonatomic(v, c)
321
322/*
323 * GITS_CWRITER - hi and lo bits may be accessed independently.
324 */
325#define gits_write_cwriter(v, c)	__gic_writeq_nonatomic(v, c)
326
327/*
328 * GITS_VPROPBASER - hi and lo bits may be accessed independently.
329 */
330#define gits_write_vpropbaser(v, c)	__gic_writeq_nonatomic(v, c)
331
332/*
333 * GITS_VPENDBASER - the Valid bit must be cleared before changing
334 * anything else.
335 */
336static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
337{
338	u32 tmp;
339
340	tmp = readl_relaxed(addr + 4);
341	if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
342		tmp &= ~(GICR_VPENDBASER_Valid >> 32);
343		writel_relaxed(tmp, addr + 4);
344	}
345
346	/*
347	 * Use the fact that __gic_writeq_nonatomic writes the second
348	 * half of the 64bit quantity after the first.
349	 */
350	__gic_writeq_nonatomic(val, addr);
351}
352
353#define gits_read_vpendbaser(c)		__gic_readq_nonatomic(c)
354
355static inline bool gic_prio_masking_enabled(void)
356{
357	return false;
358}
359
360static inline void gic_pmr_mask_irqs(void)
361{
362	/* Should not get called. */
363	WARN_ON_ONCE(true);
364}
365
366static inline void gic_arch_enable_irqs(void)
367{
368	/* Should not get called. */
369	WARN_ON_ONCE(true);
370}
371
372#endif /* !__ASSEMBLY__ */
373#endif /* !__ASM_ARCH_GICV3_H */