Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1#ifndef __ASM_SH_IO_H
  2#define __ASM_SH_IO_H
  3
  4/*
  5 * Convention:
  6 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
  7 *    while in{b,w,l}/out{b,w,l} are for ISA
  8 *
  9 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
 10 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
 11 *
 12 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
 13 * automatically, there are also __raw versions, which do not.
 14 */
 15#include <linux/errno.h>
 16#include <asm/cache.h>
 17#include <asm/addrspace.h>
 18#include <asm/machvec.h>
 19#include <asm/pgtable.h>
 20#include <asm-generic/iomap.h>
 21
 22#ifdef __KERNEL__
 23#define __IO_PREFIX     generic
 24#include <asm/io_generic.h>
 25#include <asm/io_trapped.h>
 
 26#include <mach/mangle-port.h>
 27
 28#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
 29#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
 30#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
 31#define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
 32
 33#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
 34#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
 35#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
 36#define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))
 37
 38#define readb_relaxed(c)	({ u8  __v = ioswabb(__raw_readb(c)); __v; })
 39#define readw_relaxed(c)	({ u16 __v = ioswabw(__raw_readw(c)); __v; })
 40#define readl_relaxed(c)	({ u32 __v = ioswabl(__raw_readl(c)); __v; })
 41#define readq_relaxed(c)	({ u64 __v = ioswabq(__raw_readq(c)); __v; })
 42
 43#define writeb_relaxed(v,c)	((void)__raw_writeb((__force  u8)ioswabb(v),c))
 44#define writew_relaxed(v,c)	((void)__raw_writew((__force u16)ioswabw(v),c))
 45#define writel_relaxed(v,c)	((void)__raw_writel((__force u32)ioswabl(v),c))
 46#define writeq_relaxed(v,c)	((void)__raw_writeq((__force u64)ioswabq(v),c))
 47
 48#define readb(a)		({ u8  r_ = readb_relaxed(a); rmb(); r_; })
 49#define readw(a)		({ u16 r_ = readw_relaxed(a); rmb(); r_; })
 50#define readl(a)		({ u32 r_ = readl_relaxed(a); rmb(); r_; })
 51#define readq(a)		({ u64 r_ = readq_relaxed(a); rmb(); r_; })
 52
 53#define writeb(v,a)		({ wmb(); writeb_relaxed((v),(a)); })
 54#define writew(v,a)		({ wmb(); writew_relaxed((v),(a)); })
 55#define writel(v,a)		({ wmb(); writel_relaxed((v),(a)); })
 56#define writeq(v,a)		({ wmb(); writeq_relaxed((v),(a)); })
 57
 58#define readsb(p,d,l)		__raw_readsb(p,d,l)
 59#define readsw(p,d,l)		__raw_readsw(p,d,l)
 60#define readsl(p,d,l)		__raw_readsl(p,d,l)
 61
 62#define writesb(p,d,l)		__raw_writesb(p,d,l)
 63#define writesw(p,d,l)		__raw_writesw(p,d,l)
 64#define writesl(p,d,l)		__raw_writesl(p,d,l)
 65
 66#define __BUILD_UNCACHED_IO(bwlq, type)					\
 67static inline type read##bwlq##_uncached(unsigned long addr)		\
 68{									\
 69	type ret;							\
 70	jump_to_uncached();						\
 71	ret = __raw_read##bwlq(addr);					\
 72	back_to_cached();						\
 73	return ret;							\
 74}									\
 75									\
 76static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
 77{									\
 78	jump_to_uncached();						\
 79	__raw_write##bwlq(v, addr);					\
 80	back_to_cached();						\
 81}
 82
 83__BUILD_UNCACHED_IO(b, u8)
 84__BUILD_UNCACHED_IO(w, u16)
 85__BUILD_UNCACHED_IO(l, u32)
 86__BUILD_UNCACHED_IO(q, u64)
 87
 88#define __BUILD_MEMORY_STRING(pfx, bwlq, type)				\
 89									\
 90static inline void							\
 91pfx##writes##bwlq(volatile void __iomem *mem, const void *addr,		\
 92		  unsigned int count)					\
 93{									\
 94	const volatile type *__addr = addr;				\
 95									\
 96	while (count--) {						\
 97		__raw_write##bwlq(*__addr, mem);			\
 98		__addr++;						\
 99	}								\
100}									\
101									\
102static inline void pfx##reads##bwlq(volatile void __iomem *mem,		\
103				    void *addr, unsigned int count)	\
104{									\
105	volatile type *__addr = addr;					\
106									\
107	while (count--) {						\
108		*__addr = __raw_read##bwlq(mem);			\
109		__addr++;						\
110	}								\
111}
112
113__BUILD_MEMORY_STRING(__raw_, b, u8)
114__BUILD_MEMORY_STRING(__raw_, w, u16)
115
116#ifdef CONFIG_SUPERH32
117void __raw_writesl(void __iomem *addr, const void *data, int longlen);
118void __raw_readsl(const void __iomem *addr, void *data, int longlen);
119#else
120__BUILD_MEMORY_STRING(__raw_, l, u32)
121#endif
122
123__BUILD_MEMORY_STRING(__raw_, q, u64)
124
125#ifdef CONFIG_HAS_IOPORT
126
127/*
128 * Slowdown I/O port space accesses for antique hardware.
129 */
130#undef CONF_SLOWDOWN_IO
131
132/*
133 * On SuperH I/O ports are memory mapped, so we access them using normal
134 * load/store instructions. sh_io_port_base is the virtual address to
135 * which all ports are being mapped.
136 */
137extern const unsigned long sh_io_port_base;
138
139static inline void __set_io_port_base(unsigned long pbase)
140{
141	*(unsigned long *)&sh_io_port_base = pbase;
142	barrier();
143}
144
145#ifdef CONFIG_GENERIC_IOMAP
146#define __ioport_map ioport_map
147#else
148extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
149#endif
150
151#ifdef CONF_SLOWDOWN_IO
152#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
153#else
154#define SLOW_DOWN_IO
155#endif
156
157#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)			\
158									\
159static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
160{									\
161	volatile type *__addr;						\
162									\
163	__addr = __ioport_map(port, sizeof(type));			\
164	*__addr = val;							\
165	slow;								\
166}									\
167									\
168static inline type pfx##in##bwlq##p(unsigned long port)			\
169{									\
170	volatile type *__addr;						\
171	type __val;							\
172									\
173	__addr = __ioport_map(port, sizeof(type));			\
174	__val = *__addr;						\
175	slow;								\
176									\
177	return __val;							\
178}
179
180#define __BUILD_IOPORT_PFX(bus, bwlq, type)				\
181	__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,)			\
182	__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
183
184#define BUILDIO_IOPORT(bwlq, type)					\
185	__BUILD_IOPORT_PFX(, bwlq, type)
186
187BUILDIO_IOPORT(b, u8)
188BUILDIO_IOPORT(w, u16)
189BUILDIO_IOPORT(l, u32)
190BUILDIO_IOPORT(q, u64)
191
192#define __BUILD_IOPORT_STRING(bwlq, type)				\
193									\
194static inline void outs##bwlq(unsigned long port, const void *addr,	\
195			      unsigned int count)			\
196{									\
197	const volatile type *__addr = addr;				\
198									\
199	while (count--) {						\
200		out##bwlq(*__addr, port);				\
201		__addr++;						\
202	}								\
203}									\
204									\
205static inline void ins##bwlq(unsigned long port, void *addr,		\
206			     unsigned int count)			\
207{									\
208	volatile type *__addr = addr;					\
209									\
210	while (count--) {						\
211		*__addr = in##bwlq(port);				\
212		__addr++;						\
213	}								\
214}
215
216__BUILD_IOPORT_STRING(b, u8)
217__BUILD_IOPORT_STRING(w, u16)
218__BUILD_IOPORT_STRING(l, u32)
219__BUILD_IOPORT_STRING(q, u64)
220
221#else /* !CONFIG_HAS_IOPORT */
222
223#include <asm/io_noioport.h>
224
225#endif
226
227
228#define IO_SPACE_LIMIT 0xffffffff
229
230/* synco on SH-4A, otherwise a nop */
231#define mmiowb()		wmb()
232
233/* We really want to try and get these to memcpy etc */
234void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
235void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
236void memset_io(volatile void __iomem *, int, unsigned long);
237
238/* Quad-word real-mode I/O, don't ask.. */
239unsigned long long peek_real_address_q(unsigned long long addr);
240unsigned long long poke_real_address_q(unsigned long long addr,
241				       unsigned long long val);
242
243#if !defined(CONFIG_MMU)
244#define virt_to_phys(address)	((unsigned long)(address))
245#define phys_to_virt(address)	((void *)(address))
246#else
247#define virt_to_phys(address)	(__pa(address))
248#define phys_to_virt(address)	(__va(address))
249#endif
250
251/*
252 * On 32-bit SH, we traditionally have the whole physical address space
253 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
254 * not need to do anything but place the address in the proper segment.
255 * This is true for P1 and P2 addresses, as well as some P3 ones.
256 * However, most of the P3 addresses and newer cores using extended
257 * addressing need to map through page tables, so the ioremap()
258 * implementation becomes a bit more complicated.
259 *
260 * See arch/sh/mm/ioremap.c for additional notes on this.
261 *
262 * We cheat a bit and always return uncachable areas until we've fixed
263 * the drivers to handle caching properly.
264 *
265 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
266 * doesn't exist, so everything must go through page tables.
267 */
268#ifdef CONFIG_MMU
269void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
270			       pgprot_t prot, void *caller);
271void __iounmap(void __iomem *addr);
272
273static inline void __iomem *
274__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
275{
276	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
277}
278
279static inline void __iomem *
280__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
281{
282#ifdef CONFIG_29BIT
283	phys_addr_t last_addr = offset + size - 1;
284
285	/*
286	 * For P1 and P2 space this is trivial, as everything is already
287	 * mapped. Uncached access for P1 addresses are done through P2.
288	 * In the P3 case or for addresses outside of the 29-bit space,
289	 * mapping must be done by the PMB or by using page tables.
290	 */
291	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
292		u64 flags = pgprot_val(prot);
293
294		/*
295		 * Anything using the legacy PTEA space attributes needs
296		 * to be kicked down to page table mappings.
297		 */
298		if (unlikely(flags & _PAGE_PCC_MASK))
299			return NULL;
300		if (unlikely(flags & _PAGE_CACHABLE))
301			return (void __iomem *)P1SEGADDR(offset);
302
303		return (void __iomem *)P2SEGADDR(offset);
304	}
305
306	/* P4 above the store queues are always mapped. */
307	if (unlikely(offset >= P3_ADDR_MAX))
308		return (void __iomem *)P4SEGADDR(offset);
309#endif
310
311	return NULL;
312}
313
314static inline void __iomem *
315__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
316{
317	void __iomem *ret;
318
319	ret = __ioremap_trapped(offset, size);
320	if (ret)
321		return ret;
322
323	ret = __ioremap_29bit(offset, size, prot);
324	if (ret)
325		return ret;
326
327	return __ioremap(offset, size, prot);
328}
329#else
330#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
331#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
332#define __iounmap(addr)				do { } while (0)
333#endif /* CONFIG_MMU */
334
335static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
336{
337	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
338}
339
340static inline void __iomem *
341ioremap_cache(phys_addr_t offset, unsigned long size)
342{
343	return __ioremap_mode(offset, size, PAGE_KERNEL);
344}
 
345
346#ifdef CONFIG_HAVE_IOREMAP_PROT
347static inline void __iomem *
348ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
349{
350	return __ioremap_mode(offset, size, __pgprot(flags));
351}
352#endif
353
354#ifdef CONFIG_IOREMAP_FIXED
355extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
356extern int iounmap_fixed(void __iomem *);
357extern void ioremap_fixed_init(void);
358#else
359static inline void __iomem *
360ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
361{
362	BUG();
363	return NULL;
364}
365
366static inline void ioremap_fixed_init(void) { }
367static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
368#endif
369
370#define ioremap_nocache	ioremap
371#define iounmap		__iounmap
 
 
 
 
 
372
373/*
374 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
375 * access
376 */
377#define xlate_dev_mem_ptr(p)	__va(p)
378
379/*
380 * Convert a virtual cached pointer to an uncached pointer
381 */
382#define xlate_dev_kmem_ptr(p)	p
383
384#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
385int valid_phys_addr_range(unsigned long addr, size_t size);
386int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
387
388#endif /* __KERNEL__ */
389
390#endif /* __ASM_SH_IO_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_SH_IO_H
  3#define __ASM_SH_IO_H
  4
  5/*
  6 * Convention:
  7 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
  8 *    while in{b,w,l}/out{b,w,l} are for ISA
  9 *
 10 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
 11 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
 12 *
 13 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
 14 * automatically, there are also __raw versions, which do not.
 15 */
 16#include <linux/errno.h>
 17#include <asm/cache.h>
 18#include <asm/addrspace.h>
 19#include <asm/machvec.h>
 20#include <asm/pgtable.h>
 21#include <asm-generic/iomap.h>
 22
 23#ifdef __KERNEL__
 24#define __IO_PREFIX     generic
 25#include <asm/io_generic.h>
 26#include <asm/io_trapped.h>
 27#include <asm-generic/pci_iomap.h>
 28#include <mach/mangle-port.h>
 29
 30#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
 31#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
 32#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
 33#define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
 34
 35#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
 36#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
 37#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
 38#define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))
 39
 40#define readb_relaxed(c)	({ u8  __v = ioswabb(__raw_readb(c)); __v; })
 41#define readw_relaxed(c)	({ u16 __v = ioswabw(__raw_readw(c)); __v; })
 42#define readl_relaxed(c)	({ u32 __v = ioswabl(__raw_readl(c)); __v; })
 43#define readq_relaxed(c)	({ u64 __v = ioswabq(__raw_readq(c)); __v; })
 44
 45#define writeb_relaxed(v,c)	((void)__raw_writeb((__force  u8)ioswabb(v),c))
 46#define writew_relaxed(v,c)	((void)__raw_writew((__force u16)ioswabw(v),c))
 47#define writel_relaxed(v,c)	((void)__raw_writel((__force u32)ioswabl(v),c))
 48#define writeq_relaxed(v,c)	((void)__raw_writeq((__force u64)ioswabq(v),c))
 49
 50#define readb(a)		({ u8  r_ = readb_relaxed(a); rmb(); r_; })
 51#define readw(a)		({ u16 r_ = readw_relaxed(a); rmb(); r_; })
 52#define readl(a)		({ u32 r_ = readl_relaxed(a); rmb(); r_; })
 53#define readq(a)		({ u64 r_ = readq_relaxed(a); rmb(); r_; })
 54
 55#define writeb(v,a)		({ wmb(); writeb_relaxed((v),(a)); })
 56#define writew(v,a)		({ wmb(); writew_relaxed((v),(a)); })
 57#define writel(v,a)		({ wmb(); writel_relaxed((v),(a)); })
 58#define writeq(v,a)		({ wmb(); writeq_relaxed((v),(a)); })
 59
 60#define readsb(p,d,l)		__raw_readsb(p,d,l)
 61#define readsw(p,d,l)		__raw_readsw(p,d,l)
 62#define readsl(p,d,l)		__raw_readsl(p,d,l)
 63
 64#define writesb(p,d,l)		__raw_writesb(p,d,l)
 65#define writesw(p,d,l)		__raw_writesw(p,d,l)
 66#define writesl(p,d,l)		__raw_writesl(p,d,l)
 67
 68#define __BUILD_UNCACHED_IO(bwlq, type)					\
 69static inline type read##bwlq##_uncached(unsigned long addr)		\
 70{									\
 71	type ret;							\
 72	jump_to_uncached();						\
 73	ret = __raw_read##bwlq(addr);					\
 74	back_to_cached();						\
 75	return ret;							\
 76}									\
 77									\
 78static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
 79{									\
 80	jump_to_uncached();						\
 81	__raw_write##bwlq(v, addr);					\
 82	back_to_cached();						\
 83}
 84
 85__BUILD_UNCACHED_IO(b, u8)
 86__BUILD_UNCACHED_IO(w, u16)
 87__BUILD_UNCACHED_IO(l, u32)
 88__BUILD_UNCACHED_IO(q, u64)
 89
 90#define __BUILD_MEMORY_STRING(pfx, bwlq, type)				\
 91									\
 92static inline void							\
 93pfx##writes##bwlq(volatile void __iomem *mem, const void *addr,		\
 94		  unsigned int count)					\
 95{									\
 96	const volatile type *__addr = addr;				\
 97									\
 98	while (count--) {						\
 99		__raw_write##bwlq(*__addr, mem);			\
100		__addr++;						\
101	}								\
102}									\
103									\
104static inline void pfx##reads##bwlq(volatile void __iomem *mem,		\
105				    void *addr, unsigned int count)	\
106{									\
107	volatile type *__addr = addr;					\
108									\
109	while (count--) {						\
110		*__addr = __raw_read##bwlq(mem);			\
111		__addr++;						\
112	}								\
113}
114
115__BUILD_MEMORY_STRING(__raw_, b, u8)
116__BUILD_MEMORY_STRING(__raw_, w, u16)
117
118#ifdef CONFIG_SUPERH32
119void __raw_writesl(void __iomem *addr, const void *data, int longlen);
120void __raw_readsl(const void __iomem *addr, void *data, int longlen);
121#else
122__BUILD_MEMORY_STRING(__raw_, l, u32)
123#endif
124
125__BUILD_MEMORY_STRING(__raw_, q, u64)
126
127#ifdef CONFIG_HAS_IOPORT_MAP
128
129/*
130 * Slowdown I/O port space accesses for antique hardware.
131 */
132#undef CONF_SLOWDOWN_IO
133
134/*
135 * On SuperH I/O ports are memory mapped, so we access them using normal
136 * load/store instructions. sh_io_port_base is the virtual address to
137 * which all ports are being mapped.
138 */
139extern unsigned long sh_io_port_base;
140
141static inline void __set_io_port_base(unsigned long pbase)
142{
143	*(unsigned long *)&sh_io_port_base = pbase;
144	barrier();
145}
146
147#ifdef CONFIG_GENERIC_IOMAP
148#define __ioport_map ioport_map
149#else
150extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
151#endif
152
153#ifdef CONF_SLOWDOWN_IO
154#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
155#else
156#define SLOW_DOWN_IO
157#endif
158
159#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)			\
160									\
161static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
162{									\
163	volatile type *__addr;						\
164									\
165	__addr = __ioport_map(port, sizeof(type));			\
166	*__addr = val;							\
167	slow;								\
168}									\
169									\
170static inline type pfx##in##bwlq##p(unsigned long port)			\
171{									\
172	volatile type *__addr;						\
173	type __val;							\
174									\
175	__addr = __ioport_map(port, sizeof(type));			\
176	__val = *__addr;						\
177	slow;								\
178									\
179	return __val;							\
180}
181
182#define __BUILD_IOPORT_PFX(bus, bwlq, type)				\
183	__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,)			\
184	__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
185
186#define BUILDIO_IOPORT(bwlq, type)					\
187	__BUILD_IOPORT_PFX(, bwlq, type)
188
189BUILDIO_IOPORT(b, u8)
190BUILDIO_IOPORT(w, u16)
191BUILDIO_IOPORT(l, u32)
192BUILDIO_IOPORT(q, u64)
193
194#define __BUILD_IOPORT_STRING(bwlq, type)				\
195									\
196static inline void outs##bwlq(unsigned long port, const void *addr,	\
197			      unsigned int count)			\
198{									\
199	const volatile type *__addr = addr;				\
200									\
201	while (count--) {						\
202		out##bwlq(*__addr, port);				\
203		__addr++;						\
204	}								\
205}									\
206									\
207static inline void ins##bwlq(unsigned long port, void *addr,		\
208			     unsigned int count)			\
209{									\
210	volatile type *__addr = addr;					\
211									\
212	while (count--) {						\
213		*__addr = in##bwlq(port);				\
214		__addr++;						\
215	}								\
216}
217
218__BUILD_IOPORT_STRING(b, u8)
219__BUILD_IOPORT_STRING(w, u16)
220__BUILD_IOPORT_STRING(l, u32)
221__BUILD_IOPORT_STRING(q, u64)
222
223#else /* !CONFIG_HAS_IOPORT_MAP */
224
225#include <asm/io_noioport.h>
226
227#endif
228
229
230#define IO_SPACE_LIMIT 0xffffffff
231
 
 
 
232/* We really want to try and get these to memcpy etc */
233void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
234void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
235void memset_io(volatile void __iomem *, int, unsigned long);
236
237/* Quad-word real-mode I/O, don't ask.. */
238unsigned long long peek_real_address_q(unsigned long long addr);
239unsigned long long poke_real_address_q(unsigned long long addr,
240				       unsigned long long val);
241
242#if !defined(CONFIG_MMU)
243#define virt_to_phys(address)	((unsigned long)(address))
244#define phys_to_virt(address)	((void *)(address))
245#else
246#define virt_to_phys(address)	(__pa(address))
247#define phys_to_virt(address)	(__va(address))
248#endif
249
250/*
251 * On 32-bit SH, we traditionally have the whole physical address space
252 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
253 * not need to do anything but place the address in the proper segment.
254 * This is true for P1 and P2 addresses, as well as some P3 ones.
255 * However, most of the P3 addresses and newer cores using extended
256 * addressing need to map through page tables, so the ioremap()
257 * implementation becomes a bit more complicated.
258 *
259 * See arch/sh/mm/ioremap.c for additional notes on this.
260 *
261 * We cheat a bit and always return uncachable areas until we've fixed
262 * the drivers to handle caching properly.
263 *
264 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
265 * doesn't exist, so everything must go through page tables.
266 */
267#ifdef CONFIG_MMU
268void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
269			       pgprot_t prot, void *caller);
270void __iounmap(void __iomem *addr);
271
272static inline void __iomem *
273__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
274{
275	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
276}
277
278static inline void __iomem *
279__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
280{
281#ifdef CONFIG_29BIT
282	phys_addr_t last_addr = offset + size - 1;
283
284	/*
285	 * For P1 and P2 space this is trivial, as everything is already
286	 * mapped. Uncached access for P1 addresses are done through P2.
287	 * In the P3 case or for addresses outside of the 29-bit space,
288	 * mapping must be done by the PMB or by using page tables.
289	 */
290	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
291		u64 flags = pgprot_val(prot);
292
293		/*
294		 * Anything using the legacy PTEA space attributes needs
295		 * to be kicked down to page table mappings.
296		 */
297		if (unlikely(flags & _PAGE_PCC_MASK))
298			return NULL;
299		if (unlikely(flags & _PAGE_CACHABLE))
300			return (void __iomem *)P1SEGADDR(offset);
301
302		return (void __iomem *)P2SEGADDR(offset);
303	}
304
305	/* P4 above the store queues are always mapped. */
306	if (unlikely(offset >= P3_ADDR_MAX))
307		return (void __iomem *)P4SEGADDR(offset);
308#endif
309
310	return NULL;
311}
312
313static inline void __iomem *
314__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
315{
316	void __iomem *ret;
317
318	ret = __ioremap_trapped(offset, size);
319	if (ret)
320		return ret;
321
322	ret = __ioremap_29bit(offset, size, prot);
323	if (ret)
324		return ret;
325
326	return __ioremap(offset, size, prot);
327}
328#else
329#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
330#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
331#define __iounmap(addr)				do { } while (0)
332#endif /* CONFIG_MMU */
333
334static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
335{
336	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
337}
338
339static inline void __iomem *
340ioremap_cache(phys_addr_t offset, unsigned long size)
341{
342	return __ioremap_mode(offset, size, PAGE_KERNEL);
343}
344#define ioremap_cache ioremap_cache
345
346#ifdef CONFIG_HAVE_IOREMAP_PROT
347static inline void __iomem *
348ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
349{
350	return __ioremap_mode(offset, size, __pgprot(flags));
351}
352#endif
353
354#ifdef CONFIG_IOREMAP_FIXED
355extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
356extern int iounmap_fixed(void __iomem *);
357extern void ioremap_fixed_init(void);
358#else
359static inline void __iomem *
360ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
361{
362	BUG();
363	return NULL;
364}
365
366static inline void ioremap_fixed_init(void) { }
367static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
368#endif
369
370#define ioremap_nocache	ioremap
371#define ioremap_uc	ioremap
372
373static inline void iounmap(void __iomem *addr)
374{
375	__iounmap(addr);
376}
377
378/*
379 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
380 * access
381 */
382#define xlate_dev_mem_ptr(p)	__va(p)
383
384/*
385 * Convert a virtual cached pointer to an uncached pointer
386 */
387#define xlate_dev_kmem_ptr(p)	p
388
389#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
390int valid_phys_addr_range(phys_addr_t addr, size_t size);
391int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
392
393#endif /* __KERNEL__ */
394
395#endif /* __ASM_SH_IO_H */