Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4 */
  5
  6#ifndef _ASM_ARC_IO_H
  7#define _ASM_ARC_IO_H
  8
  9#include <linux/types.h>
 10#include <asm/byteorder.h>
 11#include <asm/page.h>
 12#include <asm/unaligned.h>
 13
 14#ifdef CONFIG_ISA_ARCV2
 15#include <asm/barrier.h>
 16#define __iormb()		rmb()
 17#define __iowmb()		wmb()
 18#else
 19#define __iormb()		do { } while (0)
 20#define __iowmb()		do { } while (0)
 21#endif
 22
 23extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
 24#define ioremap ioremap
 25#define ioremap_prot ioremap_prot
 26#define iounmap iounmap
 27static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 28{
 29	return (void __iomem *)port;
 30}
 31
 32static inline void ioport_unmap(void __iomem *addr)
 33{
 34}
 
 
 35
 36/*
 37 * io{read,write}{16,32}be() macros
 38 */
 39#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
 40#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
 41
 42#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
 43#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
 44
 45/* Change struct page to physical address */
 46#define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
 47
 48#define __raw_readb __raw_readb
 49static inline u8 __raw_readb(const volatile void __iomem *addr)
 50{
 51	u8 b;
 52
 53	__asm__ __volatile__(
 54	"	ldb%U1 %0, %1	\n"
 55	: "=r" (b)
 56	: "m" (*(volatile u8 __force *)addr)
 57	: "memory");
 58
 59	return b;
 60}
 61
 62#define __raw_readw __raw_readw
 63static inline u16 __raw_readw(const volatile void __iomem *addr)
 64{
 65	u16 s;
 66
 67	__asm__ __volatile__(
 68	"	ldw%U1 %0, %1	\n"
 69	: "=r" (s)
 70	: "m" (*(volatile u16 __force *)addr)
 71	: "memory");
 72
 73	return s;
 74}
 75
 76#define __raw_readl __raw_readl
 77static inline u32 __raw_readl(const volatile void __iomem *addr)
 78{
 79	u32 w;
 80
 81	__asm__ __volatile__(
 82	"	ld%U1 %0, %1	\n"
 83	: "=r" (w)
 84	: "m" (*(volatile u32 __force *)addr)
 85	: "memory");
 86
 87	return w;
 88}
 89
 90/*
 91 * {read,write}s{b,w,l}() repeatedly access the same IO address in
 92 * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
 93 * @count times
 94 */
 95#define __raw_readsx(t,f) \
 96static inline void __raw_reads##f(const volatile void __iomem *addr,	\
 97				  void *ptr, unsigned int count)	\
 98{									\
 99	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
100	u##t *buf = ptr;						\
101									\
102	if (!count)							\
103		return;							\
104									\
105	/* Some ARC CPU's don't support unaligned accesses */		\
106	if (is_aligned) {						\
107		do {							\
108			u##t x = __raw_read##f(addr);			\
109			*buf++ = x;					\
110		} while (--count);					\
111	} else {							\
112		do {							\
113			u##t x = __raw_read##f(addr);			\
114			put_unaligned(x, buf++);			\
115		} while (--count);					\
116	}								\
117}
118
119#define __raw_readsb __raw_readsb
120__raw_readsx(8, b)
121#define __raw_readsw __raw_readsw
122__raw_readsx(16, w)
123#define __raw_readsl __raw_readsl
124__raw_readsx(32, l)
125
126#define __raw_writeb __raw_writeb
127static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
128{
129	__asm__ __volatile__(
130	"	stb%U1 %0, %1	\n"
131	:
132	: "r" (b), "m" (*(volatile u8 __force *)addr)
133	: "memory");
134}
135
136#define __raw_writew __raw_writew
137static inline void __raw_writew(u16 s, volatile void __iomem *addr)
138{
139	__asm__ __volatile__(
140	"	stw%U1 %0, %1	\n"
141	:
142	: "r" (s), "m" (*(volatile u16 __force *)addr)
143	: "memory");
144
145}
146
147#define __raw_writel __raw_writel
148static inline void __raw_writel(u32 w, volatile void __iomem *addr)
149{
150	__asm__ __volatile__(
151	"	st%U1 %0, %1	\n"
152	:
153	: "r" (w), "m" (*(volatile u32 __force *)addr)
154	: "memory");
155
156}
157
158#define __raw_writesx(t,f)						\
159static inline void __raw_writes##f(volatile void __iomem *addr, 	\
160				   const void *ptr, unsigned int count)	\
161{									\
162	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
163	const u##t *buf = ptr;						\
164									\
165	if (!count)							\
166		return;							\
167									\
168	/* Some ARC CPU's don't support unaligned accesses */		\
169	if (is_aligned) {						\
170		do {							\
171			__raw_write##f(*buf++, addr);			\
172		} while (--count);					\
173	} else {							\
174		do {							\
175			__raw_write##f(get_unaligned(buf++), addr);	\
176		} while (--count);					\
177	}								\
178}
179
180#define __raw_writesb __raw_writesb
181__raw_writesx(8, b)
182#define __raw_writesw __raw_writesw
183__raw_writesx(16, w)
184#define __raw_writesl __raw_writesl
185__raw_writesx(32, l)
186
187/*
188 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
189 * Based on ARM model for the typical use case
190 *
191 *	<ST [DMA buffer]>
192 *	<writel MMIO "go" reg>
193 *  or:
194 *	<readl MMIO "status" reg>
195 *	<LD [DMA buffer]>
196 *
197 * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
198 */
199#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
200#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
201#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
202#define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
203#define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
204#define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
205
206#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
207#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
208#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
209#define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
210#define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
211#define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
212
213/*
214 * Relaxed API for drivers which can handle barrier ordering themselves
215 *
216 * Also these are defined to perform little endian accesses.
217 * To provide the typical device register semantics of fixed endian,
218 * swap the byte order for Big Endian
219 *
220 * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
221 */
222#define readb_relaxed(c)	__raw_readb(c)
223#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
224					__raw_readw(c)); __r; })
225#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
226					__raw_readl(c)); __r; })
227
228#define writeb_relaxed(v,c)	__raw_writeb(v,c)
229#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
230#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
231
232#include <asm-generic/io.h>
233
234#endif /* _ASM_ARC_IO_H */
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4 */
  5
  6#ifndef _ASM_ARC_IO_H
  7#define _ASM_ARC_IO_H
  8
  9#include <linux/types.h>
 10#include <asm/byteorder.h>
 11#include <asm/page.h>
 12#include <asm/unaligned.h>
 13
 14#ifdef CONFIG_ISA_ARCV2
 15#include <asm/barrier.h>
 16#define __iormb()		rmb()
 17#define __iowmb()		wmb()
 18#else
 19#define __iormb()		do { } while (0)
 20#define __iowmb()		do { } while (0)
 21#endif
 22
 23extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
 24extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 25				  unsigned long flags);
 
 26static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 27{
 28	return (void __iomem *)port;
 29}
 30
 31static inline void ioport_unmap(void __iomem *addr)
 32{
 33}
 34
 35extern void iounmap(const void __iomem *addr);
 36
 37/*
 38 * io{read,write}{16,32}be() macros
 39 */
 40#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
 41#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
 42
 43#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
 44#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
 45
 46/* Change struct page to physical address */
 47#define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
 48
 49#define __raw_readb __raw_readb
 50static inline u8 __raw_readb(const volatile void __iomem *addr)
 51{
 52	u8 b;
 53
 54	__asm__ __volatile__(
 55	"	ldb%U1 %0, %1	\n"
 56	: "=r" (b)
 57	: "m" (*(volatile u8 __force *)addr)
 58	: "memory");
 59
 60	return b;
 61}
 62
 63#define __raw_readw __raw_readw
 64static inline u16 __raw_readw(const volatile void __iomem *addr)
 65{
 66	u16 s;
 67
 68	__asm__ __volatile__(
 69	"	ldw%U1 %0, %1	\n"
 70	: "=r" (s)
 71	: "m" (*(volatile u16 __force *)addr)
 72	: "memory");
 73
 74	return s;
 75}
 76
 77#define __raw_readl __raw_readl
 78static inline u32 __raw_readl(const volatile void __iomem *addr)
 79{
 80	u32 w;
 81
 82	__asm__ __volatile__(
 83	"	ld%U1 %0, %1	\n"
 84	: "=r" (w)
 85	: "m" (*(volatile u32 __force *)addr)
 86	: "memory");
 87
 88	return w;
 89}
 90
 91/*
 92 * {read,write}s{b,w,l}() repeatedly access the same IO address in
 93 * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
 94 * @count times
 95 */
 96#define __raw_readsx(t,f) \
 97static inline void __raw_reads##f(const volatile void __iomem *addr,	\
 98				  void *ptr, unsigned int count)	\
 99{									\
100	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
101	u##t *buf = ptr;						\
102									\
103	if (!count)							\
104		return;							\
105									\
106	/* Some ARC CPU's don't support unaligned accesses */		\
107	if (is_aligned) {						\
108		do {							\
109			u##t x = __raw_read##f(addr);			\
110			*buf++ = x;					\
111		} while (--count);					\
112	} else {							\
113		do {							\
114			u##t x = __raw_read##f(addr);			\
115			put_unaligned(x, buf++);			\
116		} while (--count);					\
117	}								\
118}
119
120#define __raw_readsb __raw_readsb
121__raw_readsx(8, b)
122#define __raw_readsw __raw_readsw
123__raw_readsx(16, w)
124#define __raw_readsl __raw_readsl
125__raw_readsx(32, l)
126
127#define __raw_writeb __raw_writeb
128static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
129{
130	__asm__ __volatile__(
131	"	stb%U1 %0, %1	\n"
132	:
133	: "r" (b), "m" (*(volatile u8 __force *)addr)
134	: "memory");
135}
136
137#define __raw_writew __raw_writew
138static inline void __raw_writew(u16 s, volatile void __iomem *addr)
139{
140	__asm__ __volatile__(
141	"	stw%U1 %0, %1	\n"
142	:
143	: "r" (s), "m" (*(volatile u16 __force *)addr)
144	: "memory");
145
146}
147
148#define __raw_writel __raw_writel
149static inline void __raw_writel(u32 w, volatile void __iomem *addr)
150{
151	__asm__ __volatile__(
152	"	st%U1 %0, %1	\n"
153	:
154	: "r" (w), "m" (*(volatile u32 __force *)addr)
155	: "memory");
156
157}
158
159#define __raw_writesx(t,f)						\
160static inline void __raw_writes##f(volatile void __iomem *addr, 	\
161				   const void *ptr, unsigned int count)	\
162{									\
163	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
164	const u##t *buf = ptr;						\
165									\
166	if (!count)							\
167		return;							\
168									\
169	/* Some ARC CPU's don't support unaligned accesses */		\
170	if (is_aligned) {						\
171		do {							\
172			__raw_write##f(*buf++, addr);			\
173		} while (--count);					\
174	} else {							\
175		do {							\
176			__raw_write##f(get_unaligned(buf++), addr);	\
177		} while (--count);					\
178	}								\
179}
180
181#define __raw_writesb __raw_writesb
182__raw_writesx(8, b)
183#define __raw_writesw __raw_writesw
184__raw_writesx(16, w)
185#define __raw_writesl __raw_writesl
186__raw_writesx(32, l)
187
188/*
189 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
190 * Based on ARM model for the typical use case
191 *
192 *	<ST [DMA buffer]>
193 *	<writel MMIO "go" reg>
194 *  or:
195 *	<readl MMIO "status" reg>
196 *	<LD [DMA buffer]>
197 *
198 * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
199 */
200#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
201#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
202#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
203#define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
204#define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
205#define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
206
207#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
208#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
209#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
210#define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
211#define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
212#define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
213
214/*
215 * Relaxed API for drivers which can handle barrier ordering themselves
216 *
217 * Also these are defined to perform little endian accesses.
218 * To provide the typical device register semantics of fixed endian,
219 * swap the byte order for Big Endian
220 *
221 * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
222 */
223#define readb_relaxed(c)	__raw_readb(c)
224#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
225					__raw_readw(c)); __r; })
226#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
227					__raw_readl(c)); __r; })
228
229#define writeb_relaxed(v,c)	__raw_writeb(v,c)
230#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
231#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
232
233#include <asm-generic/io.h>
234
235#endif /* _ASM_ARC_IO_H */