Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 *  arch/arm/include/asm/io.h
  3 *
  4 *  Copyright (C) 1996-2000 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * Modifications:
 11 *  16-Sep-1996	RMK	Inlined the inx/outx functions & optimised for both
 12 *			constant addresses and variable addresses.
 13 *  04-Dec-1997	RMK	Moved a lot of this stuff to the new architecture
 14 *			specific IO header files.
 15 *  27-Mar-1999	PJB	Second parameter of memcpy_toio is const..
 16 *  04-Apr-1999	PJB	Added check_signature.
 17 *  12-Dec-1999	RMK	More cleanups
 18 *  18-Jun-2000 RMK	Removed virt_to_* and friends definitions
 19 *  05-Oct-2004 BJD     Moved memory string functions to use void __iomem
 20 */
 21#ifndef __ASM_ARM_IO_H
 22#define __ASM_ARM_IO_H
 23
 24#ifdef __KERNEL__
 25
 26#include <linux/string.h>
 27#include <linux/types.h>
 28#include <linux/blk_types.h>
 29#include <asm/byteorder.h>
 30#include <asm/memory.h>
 31#include <asm-generic/pci_iomap.h>
 32#include <xen/xen.h>
 33
 34/*
 35 * ISA I/O bus memory addresses are 1:1 with the physical address.
 36 */
 37#define isa_virt_to_bus virt_to_phys
 38#define isa_page_to_bus page_to_phys
 39#define isa_bus_to_virt phys_to_virt
 40
 41/*
 42 * Atomic MMIO-wide IO modify
 43 */
 44extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
 45extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
 46
 47/*
 48 * Generic IO read/write.  These perform native-endian accesses.  Note
 49 * that some architectures will want to re-define __raw_{read,write}w.
 50 */
 51void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
 52void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
 53void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
 54
 55void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
 56void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
 57void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
 58
 59#if __LINUX_ARM_ARCH__ < 6
 60/*
 61 * Half-word accesses are problematic with RiscPC due to limitations of
 62 * the bus. Rather than special-case the machine, just let the compiler
 63 * generate the access for CPUs prior to ARMv6.
 64 */
 65#define __raw_readw(a)         (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
 66#define __raw_writew(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
 67#else
 68/*
 69 * When running under a hypervisor, we want to avoid I/O accesses with
 70 * writeback addressing modes as these incur a significant performance
 71 * overhead (the address generation must be emulated in software).
 72 */
 73#define __raw_writew __raw_writew
 74static inline void __raw_writew(u16 val, volatile void __iomem *addr)
 75{
 76	asm volatile("strh %1, %0"
 77		     : : "Q" (*(volatile u16 __force *)addr), "r" (val));
 78}
 79
 80#define __raw_readw __raw_readw
 81static inline u16 __raw_readw(const volatile void __iomem *addr)
 82{
 83	u16 val;
 84	asm volatile("ldrh %0, %1"
 85		     : "=r" (val)
 86		     : "Q" (*(volatile u16 __force *)addr));
 87	return val;
 88}
 89#endif
 90
 91#define __raw_writeb __raw_writeb
 92static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
 93{
 94	asm volatile("strb %1, %0"
 95		     : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
 96}
 97
 98#define __raw_writel __raw_writel
 99static inline void __raw_writel(u32 val, volatile void __iomem *addr)
100{
101	asm volatile("str %1, %0"
102		     : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
103}
104
105#define __raw_readb __raw_readb
106static inline u8 __raw_readb(const volatile void __iomem *addr)
107{
108	u8 val;
109	asm volatile("ldrb %0, %1"
110		     : "=r" (val)
111		     : "Qo" (*(volatile u8 __force *)addr));
112	return val;
113}
114
115#define __raw_readl __raw_readl
116static inline u32 __raw_readl(const volatile void __iomem *addr)
117{
118	u32 val;
119	asm volatile("ldr %0, %1"
120		     : "=r" (val)
121		     : "Qo" (*(volatile u32 __force *)addr));
122	return val;
123}
124
125/*
126 * Architecture ioremap implementation.
127 */
128#define MT_DEVICE		0
129#define MT_DEVICE_NONSHARED	1
130#define MT_DEVICE_CACHED	2
131#define MT_DEVICE_WC		3
132/*
133 * types 4 onwards can be found in asm/mach/map.h and are undefined
134 * for ioremap
135 */
136
137/*
138 * __arm_ioremap takes CPU physical address.
139 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
140 * The _caller variety takes a __builtin_return_address(0) value for
141 * /proc/vmalloc to use - and should only be used in non-inline functions.
142 */
143extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
144	void *);
145extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
146extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
147extern void __iounmap(volatile void __iomem *addr);
148
149extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
150	unsigned int, void *);
151extern void (*arch_iounmap)(volatile void __iomem *);
152
153/*
154 * Bad read/write accesses...
155 */
156extern void __readwrite_bug(const char *fn);
157
158/*
159 * A typesafe __io() helper
160 */
161static inline void __iomem *__typesafe_io(unsigned long addr)
162{
163	return (void __iomem *)addr;
164}
165
166#define IOMEM(x)	((void __force __iomem *)(x))
167
168/* IO barriers */
169#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
170#include <asm/barrier.h>
171#define __iormb()		rmb()
172#define __iowmb()		wmb()
173#else
174#define __iormb()		do { } while (0)
175#define __iowmb()		do { } while (0)
176#endif
177
178/* PCI fixed i/o mapping */
179#define PCI_IO_VIRT_BASE	0xfee00000
180#define PCI_IOBASE		((void __iomem *)PCI_IO_VIRT_BASE)
181
182#if defined(CONFIG_PCI)
183void pci_ioremap_set_mem_type(int mem_type);
184#else
185static inline void pci_ioremap_set_mem_type(int mem_type) {}
186#endif
187
188extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
189
190/*
 
 
 
 
 
 
 
 
 
 
191 * Now, pick up the machine-defined IO definitions
192 */
193#ifdef CONFIG_NEED_MACH_IO_H
194#include <mach/io.h>
195#elif defined(CONFIG_PCI)
196#define IO_SPACE_LIMIT	((resource_size_t)0xfffff)
197#define __io(a)		__typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
198#else
199#define __io(a)		__typesafe_io((a) & IO_SPACE_LIMIT)
200#endif
201
202/*
203 * This is the limit of PC card/PCI/ISA IO space, which is by default
204 * 64K if we have PC card, PCI or ISA support.  Otherwise, default to
205 * zero to prevent ISA/PCI drivers claiming IO space (and potentially
206 * oopsing.)
207 *
208 * Only set this larger if you really need inb() et.al. to operate over
209 * a larger address space.  Note that SOC_COMMON ioremaps each sockets
210 * IO space area, and so inb() et.al. must be defined to operate as per
211 * readb() et.al. on such platforms.
212 */
213#ifndef IO_SPACE_LIMIT
214#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
215#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
216#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
217#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
218#else
219#define IO_SPACE_LIMIT ((resource_size_t)0)
220#endif
221#endif
222
223/*
224 *  IO port access primitives
225 *  -------------------------
226 *
227 * The ARM doesn't have special IO access instructions; all IO is memory
228 * mapped.  Note that these are defined to perform little endian accesses
229 * only.  Their primary purpose is to access PCI and ISA peripherals.
230 *
231 * Note that for a big endian machine, this implies that the following
232 * big endian mode connectivity is in place, as described by numerous
233 * ARM documents:
234 *
235 *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
236 *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
237 *
238 * The machine specific io.h include defines __io to translate an "IO"
239 * address to a memory address.
240 *
241 * Note that we prevent GCC re-ordering or caching values in expressions
242 * by introducing sequence points into the in*() definitions.  Note that
243 * __raw_* do not guarantee this behaviour.
244 *
245 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
246 */
247#ifdef __io
248#define outb(v,p)	({ __iowmb(); __raw_writeb(v,__io(p)); })
249#define outw(v,p)	({ __iowmb(); __raw_writew((__force __u16) \
250					cpu_to_le16(v),__io(p)); })
251#define outl(v,p)	({ __iowmb(); __raw_writel((__force __u32) \
252					cpu_to_le32(v),__io(p)); })
253
254#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
255#define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \
256			__raw_readw(__io(p))); __iormb(); __v; })
257#define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \
258			__raw_readl(__io(p))); __iormb(); __v; })
259
260#define outsb(p,d,l)		__raw_writesb(__io(p),d,l)
261#define outsw(p,d,l)		__raw_writesw(__io(p),d,l)
262#define outsl(p,d,l)		__raw_writesl(__io(p),d,l)
263
264#define insb(p,d,l)		__raw_readsb(__io(p),d,l)
265#define insw(p,d,l)		__raw_readsw(__io(p),d,l)
266#define insl(p,d,l)		__raw_readsl(__io(p),d,l)
267#endif
268
269/*
270 * String version of IO memory access ops:
271 */
272extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
273extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
274extern void _memset_io(volatile void __iomem *, int, size_t);
275
276#define mmiowb()
277
278/*
279 *  Memory access primitives
280 *  ------------------------
281 *
282 * These perform PCI memory accesses via an ioremap region.  They don't
283 * take an address as such, but a cookie.
284 *
285 * Again, this are defined to perform little endian accesses.  See the
286 * IO port primitives for more information.
287 */
288#ifndef readl
289#define readb_relaxed(c) ({ u8  __r = __raw_readb(c); __r; })
290#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
291					__raw_readw(c)); __r; })
292#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
293					__raw_readl(c)); __r; })
294
295#define writeb_relaxed(v,c)	__raw_writeb(v,c)
296#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
297#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
298
299#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
300#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
301#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
302
303#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
304#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
305#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
306
307#define readsb(p,d,l)		__raw_readsb(p,d,l)
308#define readsw(p,d,l)		__raw_readsw(p,d,l)
309#define readsl(p,d,l)		__raw_readsl(p,d,l)
310
311#define writesb(p,d,l)		__raw_writesb(p,d,l)
312#define writesw(p,d,l)		__raw_writesw(p,d,l)
313#define writesl(p,d,l)		__raw_writesl(p,d,l)
314
315#ifndef __ARMBE__
316static inline void memset_io(volatile void __iomem *dst, unsigned c,
317	size_t count)
318{
319	extern void mmioset(void *, unsigned int, size_t);
320	mmioset((void __force *)dst, c, count);
321}
322#define memset_io(dst,c,count) memset_io(dst,c,count)
323
324static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
325	size_t count)
326{
327	extern void mmiocpy(void *, const void *, size_t);
328	mmiocpy(to, (const void __force *)from, count);
329}
330#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
331
332static inline void memcpy_toio(volatile void __iomem *to, const void *from,
333	size_t count)
334{
335	extern void mmiocpy(void *, const void *, size_t);
336	mmiocpy((void __force *)to, from, count);
337}
338#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
339
340#else
341#define memset_io(c,v,l)	_memset_io(c,(v),(l))
342#define memcpy_fromio(a,c,l)	_memcpy_fromio((a),c,(l))
343#define memcpy_toio(c,a,l)	_memcpy_toio(c,(a),(l))
344#endif
345
346#endif	/* readl */
347
348/*
349 * ioremap() and friends.
350 *
351 * ioremap() takes a resource address, and size.  Due to the ARM memory
352 * types, it is important to use the correct ioremap() function as each
353 * mapping has specific properties.
354 *
355 * Function		Memory type	Cacheability	Cache hint
356 * ioremap()		Device		n/a		n/a
357 * ioremap_nocache()	Device		n/a		n/a
358 * ioremap_cache()	Normal		Writeback	Read allocate
359 * ioremap_wc()		Normal		Non-cacheable	n/a
360 * ioremap_wt()		Normal		Non-cacheable	n/a
361 *
362 * All device mappings have the following properties:
363 * - no access speculation
364 * - no repetition (eg, on return from an exception)
365 * - number, order and size of accesses are maintained
366 * - unaligned accesses are "unpredictable"
367 * - writes may be delayed before they hit the endpoint device
368 *
369 * ioremap_nocache() is the same as ioremap() as there are too many device
370 * drivers using this for device registers, and documentation which tells
371 * people to use it for such for this to be any different.  This is not a
372 * safe fallback for memory-like mappings, or memory regions where the
373 * compiler may generate unaligned accesses - eg, via inlining its own
374 * memcpy.
375 *
376 * All normal memory mappings have the following properties:
377 * - reads can be repeated with no side effects
378 * - repeated reads return the last value written
379 * - reads can fetch additional locations without side effects
380 * - writes can be repeated (in certain cases) with no side effects
381 * - writes can be merged before accessing the target
382 * - unaligned accesses can be supported
383 * - ordering is not guaranteed without explicit dependencies or barrier
384 *   instructions
385 * - writes may be delayed before they hit the endpoint memory
386 *
387 * The cache hint is only a performance hint: CPUs may alias these hints.
388 * Eg, a CPU not implementing read allocate but implementing write allocate
389 * will provide a write allocate mapping instead.
390 */
391void __iomem *ioremap(resource_size_t res_cookie, size_t size);
392#define ioremap ioremap
393#define ioremap_nocache ioremap
394
 
 
 
395void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
396#define ioremap_cache ioremap_cache
397
 
 
 
 
 
 
398void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
399#define ioremap_wc ioremap_wc
400#define ioremap_wt ioremap_wc
401
402void iounmap(volatile void __iomem *iomem_cookie);
403#define iounmap iounmap
 
 
 
404
405/*
406 * io{read,write}{16,32}be() macros
407 */
408#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
409#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
410
411#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
412#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
413
414#ifndef ioport_map
415#define ioport_map ioport_map
416extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
417#endif
418#ifndef ioport_unmap
419#define ioport_unmap ioport_unmap
420extern void ioport_unmap(void __iomem *addr);
421#endif
422
423struct pci_dev;
424
425#define pci_iounmap pci_iounmap
426extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
427
428/*
429 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
430 * access
431 */
432#define xlate_dev_mem_ptr(p)	__va(p)
433
434/*
435 * Convert a virtual cached pointer to an uncached pointer
436 */
437#define xlate_dev_kmem_ptr(p)	p
438
439#include <asm-generic/io.h>
440
441/*
442 * can the hardware map this into one segment or not, given no other
443 * constraints.
444 */
445#define BIOVEC_MERGEABLE(vec1, vec2)	\
446	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
447
448struct bio_vec;
449extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
450				      const struct bio_vec *vec2);
451#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
452	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
453	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
454
455#ifdef CONFIG_MMU
456#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
457extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
458extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
459extern int devmem_is_allowed(unsigned long pfn);
460#endif
461
462/*
463 * Register ISA memory and port locations for glibc iopl/inb/outb
464 * emulation.
465 */
466extern void register_isa_ports(unsigned int mmio, unsigned int io,
467			       unsigned int io_shift);
468
469#endif	/* __KERNEL__ */
470#endif	/* __ASM_ARM_IO_H */
v4.17
  1/*
  2 *  arch/arm/include/asm/io.h
  3 *
  4 *  Copyright (C) 1996-2000 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * Modifications:
 11 *  16-Sep-1996	RMK	Inlined the inx/outx functions & optimised for both
 12 *			constant addresses and variable addresses.
 13 *  04-Dec-1997	RMK	Moved a lot of this stuff to the new architecture
 14 *			specific IO header files.
 15 *  27-Mar-1999	PJB	Second parameter of memcpy_toio is const..
 16 *  04-Apr-1999	PJB	Added check_signature.
 17 *  12-Dec-1999	RMK	More cleanups
 18 *  18-Jun-2000 RMK	Removed virt_to_* and friends definitions
 19 *  05-Oct-2004 BJD     Moved memory string functions to use void __iomem
 20 */
 21#ifndef __ASM_ARM_IO_H
 22#define __ASM_ARM_IO_H
 23
 24#ifdef __KERNEL__
 25
 26#include <linux/string.h>
 27#include <linux/types.h>
 
 28#include <asm/byteorder.h>
 29#include <asm/memory.h>
 30#include <asm-generic/pci_iomap.h>
 31#include <xen/xen.h>
 32
 33/*
 34 * ISA I/O bus memory addresses are 1:1 with the physical address.
 35 */
 36#define isa_virt_to_bus virt_to_phys
 37#define isa_page_to_bus page_to_phys
 38#define isa_bus_to_virt phys_to_virt
 39
 40/*
 41 * Atomic MMIO-wide IO modify
 42 */
 43extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
 44extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
 45
 46/*
 47 * Generic IO read/write.  These perform native-endian accesses.  Note
 48 * that some architectures will want to re-define __raw_{read,write}w.
 49 */
 50void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
 51void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
 52void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
 53
 54void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
 55void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
 56void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
 57
 58#if __LINUX_ARM_ARCH__ < 6
 59/*
 60 * Half-word accesses are problematic with RiscPC due to limitations of
 61 * the bus. Rather than special-case the machine, just let the compiler
 62 * generate the access for CPUs prior to ARMv6.
 63 */
 64#define __raw_readw(a)         (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
 65#define __raw_writew(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
 66#else
 67/*
 68 * When running under a hypervisor, we want to avoid I/O accesses with
 69 * writeback addressing modes as these incur a significant performance
 70 * overhead (the address generation must be emulated in software).
 71 */
 72#define __raw_writew __raw_writew
 73static inline void __raw_writew(u16 val, volatile void __iomem *addr)
 74{
 75	asm volatile("strh %1, %0"
 76		     : : "Q" (*(volatile u16 __force *)addr), "r" (val));
 77}
 78
 79#define __raw_readw __raw_readw
 80static inline u16 __raw_readw(const volatile void __iomem *addr)
 81{
 82	u16 val;
 83	asm volatile("ldrh %0, %1"
 84		     : "=r" (val)
 85		     : "Q" (*(volatile u16 __force *)addr));
 86	return val;
 87}
 88#endif
 89
 90#define __raw_writeb __raw_writeb
 91static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
 92{
 93	asm volatile("strb %1, %0"
 94		     : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
 95}
 96
 97#define __raw_writel __raw_writel
 98static inline void __raw_writel(u32 val, volatile void __iomem *addr)
 99{
100	asm volatile("str %1, %0"
101		     : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
102}
103
104#define __raw_readb __raw_readb
105static inline u8 __raw_readb(const volatile void __iomem *addr)
106{
107	u8 val;
108	asm volatile("ldrb %0, %1"
109		     : "=r" (val)
110		     : "Qo" (*(volatile u8 __force *)addr));
111	return val;
112}
113
114#define __raw_readl __raw_readl
115static inline u32 __raw_readl(const volatile void __iomem *addr)
116{
117	u32 val;
118	asm volatile("ldr %0, %1"
119		     : "=r" (val)
120		     : "Qo" (*(volatile u32 __force *)addr));
121	return val;
122}
123
124/*
125 * Architecture ioremap implementation.
126 */
127#define MT_DEVICE		0
128#define MT_DEVICE_NONSHARED	1
129#define MT_DEVICE_CACHED	2
130#define MT_DEVICE_WC		3
131/*
132 * types 4 onwards can be found in asm/mach/map.h and are undefined
133 * for ioremap
134 */
135
136/*
137 * __arm_ioremap takes CPU physical address.
138 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
139 * The _caller variety takes a __builtin_return_address(0) value for
140 * /proc/vmalloc to use - and should only be used in non-inline functions.
141 */
142extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
143	void *);
144extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
145extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
146extern void __iounmap(volatile void __iomem *addr);
147
148extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
149	unsigned int, void *);
150extern void (*arch_iounmap)(volatile void __iomem *);
151
152/*
153 * Bad read/write accesses...
154 */
155extern void __readwrite_bug(const char *fn);
156
157/*
158 * A typesafe __io() helper
159 */
160static inline void __iomem *__typesafe_io(unsigned long addr)
161{
162	return (void __iomem *)addr;
163}
164
165#define IOMEM(x)	((void __force __iomem *)(x))
166
167/* IO barriers */
168#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
169#include <asm/barrier.h>
170#define __iormb()		rmb()
171#define __iowmb()		wmb()
172#else
173#define __iormb()		do { } while (0)
174#define __iowmb()		do { } while (0)
175#endif
176
177/* PCI fixed i/o mapping */
178#define PCI_IO_VIRT_BASE	0xfee00000
179#define PCI_IOBASE		((void __iomem *)PCI_IO_VIRT_BASE)
180
181#if defined(CONFIG_PCI)
182void pci_ioremap_set_mem_type(int mem_type);
183#else
184static inline void pci_ioremap_set_mem_type(int mem_type) {}
185#endif
186
187extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
188
189/*
190 * PCI configuration space mapping function.
191 *
192 * The PCI specification does not allow configuration write
193 * transactions to be posted. Add an arch specific
194 * pci_remap_cfgspace() definition that is implemented
195 * through strongly ordered memory mappings.
196 */
197#define pci_remap_cfgspace pci_remap_cfgspace
198void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
199/*
200 * Now, pick up the machine-defined IO definitions
201 */
202#ifdef CONFIG_NEED_MACH_IO_H
203#include <mach/io.h>
204#elif defined(CONFIG_PCI)
205#define IO_SPACE_LIMIT	((resource_size_t)0xfffff)
206#define __io(a)		__typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
207#else
208#define __io(a)		__typesafe_io((a) & IO_SPACE_LIMIT)
209#endif
210
211/*
212 * This is the limit of PC card/PCI/ISA IO space, which is by default
213 * 64K if we have PC card, PCI or ISA support.  Otherwise, default to
214 * zero to prevent ISA/PCI drivers claiming IO space (and potentially
215 * oopsing.)
216 *
217 * Only set this larger if you really need inb() et.al. to operate over
218 * a larger address space.  Note that SOC_COMMON ioremaps each sockets
219 * IO space area, and so inb() et.al. must be defined to operate as per
220 * readb() et.al. on such platforms.
221 */
222#ifndef IO_SPACE_LIMIT
223#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
224#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
225#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
226#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
227#else
228#define IO_SPACE_LIMIT ((resource_size_t)0)
229#endif
230#endif
231
232/*
233 *  IO port access primitives
234 *  -------------------------
235 *
236 * The ARM doesn't have special IO access instructions; all IO is memory
237 * mapped.  Note that these are defined to perform little endian accesses
238 * only.  Their primary purpose is to access PCI and ISA peripherals.
239 *
240 * Note that for a big endian machine, this implies that the following
241 * big endian mode connectivity is in place, as described by numerous
242 * ARM documents:
243 *
244 *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
245 *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
246 *
247 * The machine specific io.h include defines __io to translate an "IO"
248 * address to a memory address.
249 *
250 * Note that we prevent GCC re-ordering or caching values in expressions
251 * by introducing sequence points into the in*() definitions.  Note that
252 * __raw_* do not guarantee this behaviour.
253 *
254 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
255 */
256#ifdef __io
257#define outb(v,p)	({ __iowmb(); __raw_writeb(v,__io(p)); })
258#define outw(v,p)	({ __iowmb(); __raw_writew((__force __u16) \
259					cpu_to_le16(v),__io(p)); })
260#define outl(v,p)	({ __iowmb(); __raw_writel((__force __u32) \
261					cpu_to_le32(v),__io(p)); })
262
263#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
264#define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \
265			__raw_readw(__io(p))); __iormb(); __v; })
266#define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \
267			__raw_readl(__io(p))); __iormb(); __v; })
268
269#define outsb(p,d,l)		__raw_writesb(__io(p),d,l)
270#define outsw(p,d,l)		__raw_writesw(__io(p),d,l)
271#define outsl(p,d,l)		__raw_writesl(__io(p),d,l)
272
273#define insb(p,d,l)		__raw_readsb(__io(p),d,l)
274#define insw(p,d,l)		__raw_readsw(__io(p),d,l)
275#define insl(p,d,l)		__raw_readsl(__io(p),d,l)
276#endif
277
278/*
279 * String version of IO memory access ops:
280 */
281extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
282extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
283extern void _memset_io(volatile void __iomem *, int, size_t);
284
285#define mmiowb()
286
287/*
288 *  Memory access primitives
289 *  ------------------------
290 *
291 * These perform PCI memory accesses via an ioremap region.  They don't
292 * take an address as such, but a cookie.
293 *
294 * Again, these are defined to perform little endian accesses.  See the
295 * IO port primitives for more information.
296 */
297#ifndef readl
298#define readb_relaxed(c) ({ u8  __r = __raw_readb(c); __r; })
299#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
300					__raw_readw(c)); __r; })
301#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
302					__raw_readl(c)); __r; })
303
304#define writeb_relaxed(v,c)	__raw_writeb(v,c)
305#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
306#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
307
308#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
309#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
310#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
311
312#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
313#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
314#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
315
316#define readsb(p,d,l)		__raw_readsb(p,d,l)
317#define readsw(p,d,l)		__raw_readsw(p,d,l)
318#define readsl(p,d,l)		__raw_readsl(p,d,l)
319
320#define writesb(p,d,l)		__raw_writesb(p,d,l)
321#define writesw(p,d,l)		__raw_writesw(p,d,l)
322#define writesl(p,d,l)		__raw_writesl(p,d,l)
323
324#ifndef __ARMBE__
325static inline void memset_io(volatile void __iomem *dst, unsigned c,
326	size_t count)
327{
328	extern void mmioset(void *, unsigned int, size_t);
329	mmioset((void __force *)dst, c, count);
330}
331#define memset_io(dst,c,count) memset_io(dst,c,count)
332
333static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
334	size_t count)
335{
336	extern void mmiocpy(void *, const void *, size_t);
337	mmiocpy(to, (const void __force *)from, count);
338}
339#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
340
341static inline void memcpy_toio(volatile void __iomem *to, const void *from,
342	size_t count)
343{
344	extern void mmiocpy(void *, const void *, size_t);
345	mmiocpy((void __force *)to, from, count);
346}
347#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
348
349#else
350#define memset_io(c,v,l)	_memset_io(c,(v),(l))
351#define memcpy_fromio(a,c,l)	_memcpy_fromio((a),c,(l))
352#define memcpy_toio(c,a,l)	_memcpy_toio(c,(a),(l))
353#endif
354
355#endif	/* readl */
356
357/*
358 * ioremap() and friends.
359 *
360 * ioremap() takes a resource address, and size.  Due to the ARM memory
361 * types, it is important to use the correct ioremap() function as each
362 * mapping has specific properties.
363 *
364 * Function		Memory type	Cacheability	Cache hint
365 * ioremap()		Device		n/a		n/a
366 * ioremap_nocache()	Device		n/a		n/a
367 * ioremap_cache()	Normal		Writeback	Read allocate
368 * ioremap_wc()		Normal		Non-cacheable	n/a
369 * ioremap_wt()		Normal		Non-cacheable	n/a
370 *
371 * All device mappings have the following properties:
372 * - no access speculation
373 * - no repetition (eg, on return from an exception)
374 * - number, order and size of accesses are maintained
375 * - unaligned accesses are "unpredictable"
376 * - writes may be delayed before they hit the endpoint device
377 *
378 * ioremap_nocache() is the same as ioremap() as there are too many device
379 * drivers using this for device registers, and documentation which tells
380 * people to use it for such for this to be any different.  This is not a
381 * safe fallback for memory-like mappings, or memory regions where the
382 * compiler may generate unaligned accesses - eg, via inlining its own
383 * memcpy.
384 *
385 * All normal memory mappings have the following properties:
386 * - reads can be repeated with no side effects
387 * - repeated reads return the last value written
388 * - reads can fetch additional locations without side effects
389 * - writes can be repeated (in certain cases) with no side effects
390 * - writes can be merged before accessing the target
391 * - unaligned accesses can be supported
392 * - ordering is not guaranteed without explicit dependencies or barrier
393 *   instructions
394 * - writes may be delayed before they hit the endpoint memory
395 *
396 * The cache hint is only a performance hint: CPUs may alias these hints.
397 * Eg, a CPU not implementing read allocate but implementing write allocate
398 * will provide a write allocate mapping instead.
399 */
400void __iomem *ioremap(resource_size_t res_cookie, size_t size);
401#define ioremap ioremap
402#define ioremap_nocache ioremap
403
404/*
405 * Do not use ioremap_cache for mapping memory. Use memremap instead.
406 */
407void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
408#define ioremap_cache ioremap_cache
409
410/*
411 * Do not use ioremap_cached in new code. Provided for the benefit of
412 * the pxa2xx-flash MTD driver only.
413 */
414void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
415
416void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
417#define ioremap_wc ioremap_wc
418#define ioremap_wt ioremap_wc
419
420void iounmap(volatile void __iomem *iomem_cookie);
421#define iounmap iounmap
422
423void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
424#define arch_memremap_wb arch_memremap_wb
425
426/*
427 * io{read,write}{16,32}be() macros
428 */
429#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
430#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
431
432#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
433#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
434
435#ifndef ioport_map
436#define ioport_map ioport_map
437extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
438#endif
439#ifndef ioport_unmap
440#define ioport_unmap ioport_unmap
441extern void ioport_unmap(void __iomem *addr);
442#endif
443
444struct pci_dev;
445
446#define pci_iounmap pci_iounmap
447extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
448
449/*
450 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
451 * access
452 */
453#define xlate_dev_mem_ptr(p)	__va(p)
454
455/*
456 * Convert a virtual cached pointer to an uncached pointer
457 */
458#define xlate_dev_kmem_ptr(p)	p
459
460#include <asm-generic/io.h>
461
462/*
463 * can the hardware map this into one segment or not, given no other
464 * constraints.
465 */
466#define BIOVEC_MERGEABLE(vec1, vec2)	\
467	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
468
469struct bio_vec;
470extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
471				      const struct bio_vec *vec2);
472#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
473	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
474	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
475
476#ifdef CONFIG_MMU
477#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
478extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
479extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
480extern int devmem_is_allowed(unsigned long pfn);
481#endif
482
483/*
484 * Register ISA memory and port locations for glibc iopl/inb/outb
485 * emulation.
486 */
487extern void register_isa_ports(unsigned int mmio, unsigned int io,
488			       unsigned int io_shift);
489
490#endif	/* __KERNEL__ */
491#endif	/* __ASM_ARM_IO_H */