Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef __ALPHA_IO_H
  2#define __ALPHA_IO_H
  3
  4#ifdef __KERNEL__
  5
  6#include <linux/kernel.h>
  7#include <linux/mm.h>
  8#include <asm/compiler.h>
  9#include <asm/system.h>
 10#include <asm/pgtable.h>
 11#include <asm/machvec.h>
 12#include <asm/hwrpb.h>
 13
 14/* The generic header contains only prototypes.  Including it ensures that
 15   the implementation we have here matches that interface.  */
 16#include <asm-generic/iomap.h>
 17
 18/* We don't use IO slowdowns on the Alpha, but.. */
 19#define __SLOW_DOWN_IO	do { } while (0)
 20#define SLOW_DOWN_IO	do { } while (0)
 21
 22/*
 23 * Virtual -> physical identity mapping starts at this offset
 24 */
 25#ifdef USE_48_BIT_KSEG
 26#define IDENT_ADDR     0xffff800000000000UL
 27#else
 28#define IDENT_ADDR     0xfffffc0000000000UL
 29#endif
 30
 31/*
 32 * We try to avoid hae updates (thus the cache), but when we
 33 * do need to update the hae, we need to do it atomically, so
 34 * that any interrupts wouldn't get confused with the hae
 35 * register not being up-to-date with respect to the hardware
 36 * value.
 37 */
 38extern inline void __set_hae(unsigned long new_hae)
 39{
 40	unsigned long flags = swpipl(IPL_MAX);
 41
 42	barrier();
 43
 44	alpha_mv.hae_cache = new_hae;
 45	*alpha_mv.hae_register = new_hae;
 46	mb();
 47	/* Re-read to make sure it was written.  */
 48	new_hae = *alpha_mv.hae_register;
 49
 50	setipl(flags);
 51	barrier();
 52}
 53
 54extern inline void set_hae(unsigned long new_hae)
 55{
 56	if (new_hae != alpha_mv.hae_cache)
 57		__set_hae(new_hae);
 58}
 59
 60/*
 61 * Change virtual addresses to physical addresses and vv.
 62 */
 63#ifdef USE_48_BIT_KSEG
 64static inline unsigned long virt_to_phys(void *address)
 65{
 66	return (unsigned long)address - IDENT_ADDR;
 67}
 68
 69static inline void * phys_to_virt(unsigned long address)
 70{
 71	return (void *) (address + IDENT_ADDR);
 72}
 73#else
 74static inline unsigned long virt_to_phys(void *address)
 75{
 76        unsigned long phys = (unsigned long)address;
 77
 78	/* Sign-extend from bit 41.  */
 79	phys <<= (64 - 41);
 80	phys = (long)phys >> (64 - 41);
 81
 82	/* Crop to the physical address width of the processor.  */
 83        phys &= (1ul << hwrpb->pa_bits) - 1;
 84
 85        return phys;
 86}
 87
 88static inline void * phys_to_virt(unsigned long address)
 89{
 90        return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
 91}
 92#endif
 93
 94#define page_to_phys(page)	page_to_pa(page)
 95
 96static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
 97{
 98	return page_to_phys(page);
 99}
100
101/* Maximum PIO space address supported?  */
102#define IO_SPACE_LIMIT 0xffff
103
104/*
105 * Change addresses as seen by the kernel (virtual) to addresses as
106 * seen by a device (bus), and vice versa.
107 *
108 * Note that this only works for a limited range of kernel addresses,
109 * and very well may not span all memory.  Consider this interface 
110 * deprecated in favour of the DMA-mapping API.
111 */
112extern unsigned long __direct_map_base;
113extern unsigned long __direct_map_size;
114
115static inline unsigned long __deprecated virt_to_bus(void *address)
116{
117	unsigned long phys = virt_to_phys(address);
118	unsigned long bus = phys + __direct_map_base;
119	return phys <= __direct_map_size ? bus : 0;
120}
121#define isa_virt_to_bus virt_to_bus
122
123static inline void * __deprecated bus_to_virt(unsigned long address)
124{
125	void *virt;
126
127	/* This check is a sanity check but also ensures that bus address 0
128	   maps to virtual address 0 which is useful to detect null pointers
129	   (the NCR driver is much simpler if NULL pointers are preserved).  */
130	address -= __direct_map_base;
131	virt = phys_to_virt(address);
132	return (long)address <= 0 ? NULL : virt;
133}
134#define isa_bus_to_virt bus_to_virt
135
136/*
137 * There are different chipsets to interface the Alpha CPUs to the world.
138 */
139
140#define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
141#define _IO_CONCAT(a,b)	a ## _ ## b
142
143#ifdef CONFIG_ALPHA_GENERIC
144
145/* In a generic kernel, we always go through the machine vector.  */
146
147#define REMAP1(TYPE, NAME, QUAL)					\
148static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
149{									\
150	return alpha_mv.mv_##NAME(addr);				\
151}
152
153#define REMAP2(TYPE, NAME, QUAL)					\
154static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
155{									\
156	alpha_mv.mv_##NAME(b, addr);					\
157}
158
159REMAP1(unsigned int, ioread8, /**/)
160REMAP1(unsigned int, ioread16, /**/)
161REMAP1(unsigned int, ioread32, /**/)
162REMAP1(u8, readb, const volatile)
163REMAP1(u16, readw, const volatile)
164REMAP1(u32, readl, const volatile)
165REMAP1(u64, readq, const volatile)
166
167REMAP2(u8, iowrite8, /**/)
168REMAP2(u16, iowrite16, /**/)
169REMAP2(u32, iowrite32, /**/)
170REMAP2(u8, writeb, volatile)
171REMAP2(u16, writew, volatile)
172REMAP2(u32, writel, volatile)
173REMAP2(u64, writeq, volatile)
174
175#undef REMAP1
176#undef REMAP2
177
178extern inline void __iomem *generic_ioportmap(unsigned long a)
179{
180	return alpha_mv.mv_ioportmap(a);
181}
182
183static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
184{
185	return alpha_mv.mv_ioremap(a, s);
186}
187
188static inline void generic_iounmap(volatile void __iomem *a)
189{
190	return alpha_mv.mv_iounmap(a);
191}
192
193static inline int generic_is_ioaddr(unsigned long a)
194{
195	return alpha_mv.mv_is_ioaddr(a);
196}
197
198static inline int generic_is_mmio(const volatile void __iomem *a)
199{
200	return alpha_mv.mv_is_mmio(a);
201}
202
203#define __IO_PREFIX		generic
204#define generic_trivial_rw_bw	0
205#define generic_trivial_rw_lq	0
206#define generic_trivial_io_bw	0
207#define generic_trivial_io_lq	0
208#define generic_trivial_iounmap	0
209
210#else
211
212#if defined(CONFIG_ALPHA_APECS)
213# include <asm/core_apecs.h>
214#elif defined(CONFIG_ALPHA_CIA)
215# include <asm/core_cia.h>
216#elif defined(CONFIG_ALPHA_IRONGATE)
217# include <asm/core_irongate.h>
218#elif defined(CONFIG_ALPHA_JENSEN)
219# include <asm/jensen.h>
220#elif defined(CONFIG_ALPHA_LCA)
221# include <asm/core_lca.h>
222#elif defined(CONFIG_ALPHA_MARVEL)
223# include <asm/core_marvel.h>
224#elif defined(CONFIG_ALPHA_MCPCIA)
225# include <asm/core_mcpcia.h>
226#elif defined(CONFIG_ALPHA_POLARIS)
227# include <asm/core_polaris.h>
228#elif defined(CONFIG_ALPHA_T2)
229# include <asm/core_t2.h>
230#elif defined(CONFIG_ALPHA_TSUNAMI)
231# include <asm/core_tsunami.h>
232#elif defined(CONFIG_ALPHA_TITAN)
233# include <asm/core_titan.h>
234#elif defined(CONFIG_ALPHA_WILDFIRE)
235# include <asm/core_wildfire.h>
236#else
237#error "What system is this?"
238#endif
239
240#endif /* GENERIC */
241
242/*
243 * We always have external versions of these routines.
244 */
245extern u8		inb(unsigned long port);
246extern u16		inw(unsigned long port);
247extern u32		inl(unsigned long port);
248extern void		outb(u8 b, unsigned long port);
249extern void		outw(u16 b, unsigned long port);
250extern void		outl(u32 b, unsigned long port);
251
252extern u8		readb(const volatile void __iomem *addr);
253extern u16		readw(const volatile void __iomem *addr);
254extern u32		readl(const volatile void __iomem *addr);
255extern u64		readq(const volatile void __iomem *addr);
256extern void		writeb(u8 b, volatile void __iomem *addr);
257extern void		writew(u16 b, volatile void __iomem *addr);
258extern void		writel(u32 b, volatile void __iomem *addr);
259extern void		writeq(u64 b, volatile void __iomem *addr);
260
261extern u8		__raw_readb(const volatile void __iomem *addr);
262extern u16		__raw_readw(const volatile void __iomem *addr);
263extern u32		__raw_readl(const volatile void __iomem *addr);
264extern u64		__raw_readq(const volatile void __iomem *addr);
265extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
266extern void		__raw_writew(u16 b, volatile void __iomem *addr);
267extern void		__raw_writel(u32 b, volatile void __iomem *addr);
268extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
269
270/*
271 * Mapping from port numbers to __iomem space is pretty easy.
272 */
273
274/* These two have to be extern inline because of the extern prototype from
275   <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
276   the same declaration.  */
277extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
278{
279	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
280}
281
282extern inline void ioport_unmap(void __iomem *addr)
283{
284}
285
286static inline void __iomem *ioremap(unsigned long port, unsigned long size)
287{
288	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
289}
290
291static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
292				      unsigned long flags)
293{
294	return ioremap(port, size);
295}
296
297static inline void __iomem * ioremap_nocache(unsigned long offset,
298					     unsigned long size)
299{
300	return ioremap(offset, size);
301} 
 
 
 
302
303static inline void iounmap(volatile void __iomem *addr)
304{
305	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
306}
307
308static inline int __is_ioaddr(unsigned long addr)
309{
310	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
311}
312#define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
313
314static inline int __is_mmio(const volatile void __iomem *addr)
315{
316	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
317}
318
319
320/*
321 * If the actual I/O bits are sufficiently trivial, then expand inline.
322 */
323
324#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
325extern inline unsigned int ioread8(void __iomem *addr)
326{
327	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
328	mb();
329	return ret;
330}
331
332extern inline unsigned int ioread16(void __iomem *addr)
333{
334	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
335	mb();
336	return ret;
337}
338
339extern inline void iowrite8(u8 b, void __iomem *addr)
340{
341	IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
342	mb();
 
343}
344
345extern inline void iowrite16(u16 b, void __iomem *addr)
346{
347	IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
348	mb();
 
349}
350
351extern inline u8 inb(unsigned long port)
352{
353	return ioread8(ioport_map(port, 1));
354}
355
356extern inline u16 inw(unsigned long port)
357{
358	return ioread16(ioport_map(port, 2));
359}
360
361extern inline void outb(u8 b, unsigned long port)
362{
363	iowrite8(b, ioport_map(port, 1));
364}
365
366extern inline void outw(u16 b, unsigned long port)
367{
368	iowrite16(b, ioport_map(port, 2));
369}
370#endif
371
372#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
373extern inline unsigned int ioread32(void __iomem *addr)
374{
375	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
376	mb();
377	return ret;
378}
379
380extern inline void iowrite32(u32 b, void __iomem *addr)
381{
382	IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
383	mb();
 
384}
385
386extern inline u32 inl(unsigned long port)
387{
388	return ioread32(ioport_map(port, 4));
389}
390
391extern inline void outl(u32 b, unsigned long port)
392{
393	iowrite32(b, ioport_map(port, 4));
394}
395#endif
396
397#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
398extern inline u8 __raw_readb(const volatile void __iomem *addr)
399{
400	return IO_CONCAT(__IO_PREFIX,readb)(addr);
401}
402
403extern inline u16 __raw_readw(const volatile void __iomem *addr)
404{
405	return IO_CONCAT(__IO_PREFIX,readw)(addr);
406}
407
408extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
409{
410	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
411}
412
413extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
414{
415	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
416}
417
418extern inline u8 readb(const volatile void __iomem *addr)
419{
420	u8 ret = __raw_readb(addr);
421	mb();
422	return ret;
423}
424
425extern inline u16 readw(const volatile void __iomem *addr)
426{
427	u16 ret = __raw_readw(addr);
428	mb();
429	return ret;
430}
431
432extern inline void writeb(u8 b, volatile void __iomem *addr)
433{
434	__raw_writeb(b, addr);
435	mb();
 
436}
437
438extern inline void writew(u16 b, volatile void __iomem *addr)
439{
440	__raw_writew(b, addr);
441	mb();
 
442}
443#endif
444
445#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
446extern inline u32 __raw_readl(const volatile void __iomem *addr)
447{
448	return IO_CONCAT(__IO_PREFIX,readl)(addr);
449}
450
451extern inline u64 __raw_readq(const volatile void __iomem *addr)
452{
453	return IO_CONCAT(__IO_PREFIX,readq)(addr);
454}
455
456extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
457{
458	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
459}
460
461extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
462{
463	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
464}
465
466extern inline u32 readl(const volatile void __iomem *addr)
467{
468	u32 ret = __raw_readl(addr);
469	mb();
470	return ret;
471}
472
473extern inline u64 readq(const volatile void __iomem *addr)
474{
475	u64 ret = __raw_readq(addr);
476	mb();
477	return ret;
478}
479
480extern inline void writel(u32 b, volatile void __iomem *addr)
481{
482	__raw_writel(b, addr);
483	mb();
 
484}
485
486extern inline void writeq(u64 b, volatile void __iomem *addr)
487{
488	__raw_writeq(b, addr);
489	mb();
 
490}
491#endif
492
 
 
 
 
 
493#define inb_p		inb
494#define inw_p		inw
495#define inl_p		inl
496#define outb_p		outb
497#define outw_p		outw
498#define outl_p		outl
499#define readb_relaxed(addr) __raw_readb(addr)
500#define readw_relaxed(addr) __raw_readw(addr)
501#define readl_relaxed(addr) __raw_readl(addr)
502#define readq_relaxed(addr) __raw_readq(addr)
503
504#define mmiowb()
 
 
505
506/*
507 * String version of IO memory access ops:
508 */
509extern void memcpy_fromio(void *, const volatile void __iomem *, long);
510extern void memcpy_toio(volatile void __iomem *, const void *, long);
511extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
512
513static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
514{
515	_memset_c_io(addr, 0x0101010101010101UL * c, len);
516}
517
518#define __HAVE_ARCH_MEMSETW_IO
519static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
520{
521	_memset_c_io(addr, 0x0001000100010001UL * c, len);
522}
523
524/*
525 * String versions of in/out ops:
526 */
527extern void insb (unsigned long port, void *dst, unsigned long count);
528extern void insw (unsigned long port, void *dst, unsigned long count);
529extern void insl (unsigned long port, void *dst, unsigned long count);
530extern void outsb (unsigned long port, const void *src, unsigned long count);
531extern void outsw (unsigned long port, const void *src, unsigned long count);
532extern void outsl (unsigned long port, const void *src, unsigned long count);
533
534/*
535 * The Alpha Jensen hardware for some rather strange reason puts
536 * the RTC clock at 0x170 instead of 0x70. Probably due to some
537 * misguided idea about using 0x70 for NMI stuff.
538 *
539 * These defines will override the defaults when doing RTC queries
540 */
541
542#ifdef CONFIG_ALPHA_GENERIC
543# define RTC_PORT(x)	((x) + alpha_mv.rtc_port)
544#else
545# ifdef CONFIG_ALPHA_JENSEN
546#  define RTC_PORT(x)	(0x170+(x))
547# else
548#  define RTC_PORT(x)	(0x70 + (x))
549# endif
550#endif
551#define RTC_ALWAYS_BCD	0
552
553/*
554 * Some mucking forons use if[n]def writeq to check if platform has it.
555 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
556 * to play with; for now just use cpp anti-recursion logics and make sure
557 * that damn thing is defined and expands to itself.
558 */
559
560#define writeq writeq
561#define readq readq
562
563/*
564 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
565 * access
566 */
567#define xlate_dev_mem_ptr(p)	__va(p)
568
569/*
570 * Convert a virtual cached pointer to an uncached pointer
571 */
572#define xlate_dev_kmem_ptr(p)	p
573
574#endif /* __KERNEL__ */
575
576#endif /* __ALPHA_IO_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ALPHA_IO_H
  3#define __ALPHA_IO_H
  4
  5#ifdef __KERNEL__
  6
  7#include <linux/kernel.h>
  8#include <linux/mm.h>
  9#include <asm/compiler.h>
 
 10#include <asm/pgtable.h>
 11#include <asm/machvec.h>
 12#include <asm/hwrpb.h>
 13
 14/* The generic header contains only prototypes.  Including it ensures that
 15   the implementation we have here matches that interface.  */
 16#include <asm-generic/iomap.h>
 17
 18/* We don't use IO slowdowns on the Alpha, but.. */
 19#define __SLOW_DOWN_IO	do { } while (0)
 20#define SLOW_DOWN_IO	do { } while (0)
 21
 22/*
 23 * Virtual -> physical identity mapping starts at this offset
 24 */
 25#ifdef USE_48_BIT_KSEG
 26#define IDENT_ADDR     0xffff800000000000UL
 27#else
 28#define IDENT_ADDR     0xfffffc0000000000UL
 29#endif
 30
 31/*
 32 * We try to avoid hae updates (thus the cache), but when we
 33 * do need to update the hae, we need to do it atomically, so
 34 * that any interrupts wouldn't get confused with the hae
 35 * register not being up-to-date with respect to the hardware
 36 * value.
 37 */
 38extern inline void __set_hae(unsigned long new_hae)
 39{
 40	unsigned long flags = swpipl(IPL_MAX);
 41
 42	barrier();
 43
 44	alpha_mv.hae_cache = new_hae;
 45	*alpha_mv.hae_register = new_hae;
 46	mb();
 47	/* Re-read to make sure it was written.  */
 48	new_hae = *alpha_mv.hae_register;
 49
 50	setipl(flags);
 51	barrier();
 52}
 53
 54extern inline void set_hae(unsigned long new_hae)
 55{
 56	if (new_hae != alpha_mv.hae_cache)
 57		__set_hae(new_hae);
 58}
 59
 60/*
 61 * Change virtual addresses to physical addresses and vv.
 62 */
 63#ifdef USE_48_BIT_KSEG
 64static inline unsigned long virt_to_phys(void *address)
 65{
 66	return (unsigned long)address - IDENT_ADDR;
 67}
 68
 69static inline void * phys_to_virt(unsigned long address)
 70{
 71	return (void *) (address + IDENT_ADDR);
 72}
 73#else
 74static inline unsigned long virt_to_phys(void *address)
 75{
 76        unsigned long phys = (unsigned long)address;
 77
 78	/* Sign-extend from bit 41.  */
 79	phys <<= (64 - 41);
 80	phys = (long)phys >> (64 - 41);
 81
 82	/* Crop to the physical address width of the processor.  */
 83        phys &= (1ul << hwrpb->pa_bits) - 1;
 84
 85        return phys;
 86}
 87
 88static inline void * phys_to_virt(unsigned long address)
 89{
 90        return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
 91}
 92#endif
 93
 94#define page_to_phys(page)	page_to_pa(page)
 95
 
 
 
 
 
 96/* Maximum PIO space address supported?  */
 97#define IO_SPACE_LIMIT 0xffff
 98
 99/*
100 * Change addresses as seen by the kernel (virtual) to addresses as
101 * seen by a device (bus), and vice versa.
102 *
103 * Note that this only works for a limited range of kernel addresses,
104 * and very well may not span all memory.  Consider this interface 
105 * deprecated in favour of the DMA-mapping API.
106 */
107extern unsigned long __direct_map_base;
108extern unsigned long __direct_map_size;
109
110static inline unsigned long __deprecated virt_to_bus(void *address)
111{
112	unsigned long phys = virt_to_phys(address);
113	unsigned long bus = phys + __direct_map_base;
114	return phys <= __direct_map_size ? bus : 0;
115}
116#define isa_virt_to_bus virt_to_bus
117
118static inline void * __deprecated bus_to_virt(unsigned long address)
119{
120	void *virt;
121
122	/* This check is a sanity check but also ensures that bus address 0
123	   maps to virtual address 0 which is useful to detect null pointers
124	   (the NCR driver is much simpler if NULL pointers are preserved).  */
125	address -= __direct_map_base;
126	virt = phys_to_virt(address);
127	return (long)address <= 0 ? NULL : virt;
128}
129#define isa_bus_to_virt bus_to_virt
130
131/*
132 * There are different chipsets to interface the Alpha CPUs to the world.
133 */
134
135#define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
136#define _IO_CONCAT(a,b)	a ## _ ## b
137
138#ifdef CONFIG_ALPHA_GENERIC
139
140/* In a generic kernel, we always go through the machine vector.  */
141
142#define REMAP1(TYPE, NAME, QUAL)					\
143static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
144{									\
145	return alpha_mv.mv_##NAME(addr);				\
146}
147
148#define REMAP2(TYPE, NAME, QUAL)					\
149static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
150{									\
151	alpha_mv.mv_##NAME(b, addr);					\
152}
153
154REMAP1(unsigned int, ioread8, /**/)
155REMAP1(unsigned int, ioread16, /**/)
156REMAP1(unsigned int, ioread32, /**/)
157REMAP1(u8, readb, const volatile)
158REMAP1(u16, readw, const volatile)
159REMAP1(u32, readl, const volatile)
160REMAP1(u64, readq, const volatile)
161
162REMAP2(u8, iowrite8, /**/)
163REMAP2(u16, iowrite16, /**/)
164REMAP2(u32, iowrite32, /**/)
165REMAP2(u8, writeb, volatile)
166REMAP2(u16, writew, volatile)
167REMAP2(u32, writel, volatile)
168REMAP2(u64, writeq, volatile)
169
170#undef REMAP1
171#undef REMAP2
172
173extern inline void __iomem *generic_ioportmap(unsigned long a)
174{
175	return alpha_mv.mv_ioportmap(a);
176}
177
178static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
179{
180	return alpha_mv.mv_ioremap(a, s);
181}
182
183static inline void generic_iounmap(volatile void __iomem *a)
184{
185	return alpha_mv.mv_iounmap(a);
186}
187
188static inline int generic_is_ioaddr(unsigned long a)
189{
190	return alpha_mv.mv_is_ioaddr(a);
191}
192
193static inline int generic_is_mmio(const volatile void __iomem *a)
194{
195	return alpha_mv.mv_is_mmio(a);
196}
197
198#define __IO_PREFIX		generic
199#define generic_trivial_rw_bw	0
200#define generic_trivial_rw_lq	0
201#define generic_trivial_io_bw	0
202#define generic_trivial_io_lq	0
203#define generic_trivial_iounmap	0
204
205#else
206
207#if defined(CONFIG_ALPHA_APECS)
208# include <asm/core_apecs.h>
209#elif defined(CONFIG_ALPHA_CIA)
210# include <asm/core_cia.h>
211#elif defined(CONFIG_ALPHA_IRONGATE)
212# include <asm/core_irongate.h>
213#elif defined(CONFIG_ALPHA_JENSEN)
214# include <asm/jensen.h>
215#elif defined(CONFIG_ALPHA_LCA)
216# include <asm/core_lca.h>
217#elif defined(CONFIG_ALPHA_MARVEL)
218# include <asm/core_marvel.h>
219#elif defined(CONFIG_ALPHA_MCPCIA)
220# include <asm/core_mcpcia.h>
221#elif defined(CONFIG_ALPHA_POLARIS)
222# include <asm/core_polaris.h>
223#elif defined(CONFIG_ALPHA_T2)
224# include <asm/core_t2.h>
225#elif defined(CONFIG_ALPHA_TSUNAMI)
226# include <asm/core_tsunami.h>
227#elif defined(CONFIG_ALPHA_TITAN)
228# include <asm/core_titan.h>
229#elif defined(CONFIG_ALPHA_WILDFIRE)
230# include <asm/core_wildfire.h>
231#else
232#error "What system is this?"
233#endif
234
235#endif /* GENERIC */
236
237/*
238 * We always have external versions of these routines.
239 */
240extern u8		inb(unsigned long port);
241extern u16		inw(unsigned long port);
242extern u32		inl(unsigned long port);
243extern void		outb(u8 b, unsigned long port);
244extern void		outw(u16 b, unsigned long port);
245extern void		outl(u32 b, unsigned long port);
246
247extern u8		readb(const volatile void __iomem *addr);
248extern u16		readw(const volatile void __iomem *addr);
249extern u32		readl(const volatile void __iomem *addr);
250extern u64		readq(const volatile void __iomem *addr);
251extern void		writeb(u8 b, volatile void __iomem *addr);
252extern void		writew(u16 b, volatile void __iomem *addr);
253extern void		writel(u32 b, volatile void __iomem *addr);
254extern void		writeq(u64 b, volatile void __iomem *addr);
255
256extern u8		__raw_readb(const volatile void __iomem *addr);
257extern u16		__raw_readw(const volatile void __iomem *addr);
258extern u32		__raw_readl(const volatile void __iomem *addr);
259extern u64		__raw_readq(const volatile void __iomem *addr);
260extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
261extern void		__raw_writew(u16 b, volatile void __iomem *addr);
262extern void		__raw_writel(u32 b, volatile void __iomem *addr);
263extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
264
265/*
266 * Mapping from port numbers to __iomem space is pretty easy.
267 */
268
269/* These two have to be extern inline because of the extern prototype from
270   <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
271   the same declaration.  */
272extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
273{
274	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
275}
276
277extern inline void ioport_unmap(void __iomem *addr)
278{
279}
280
281static inline void __iomem *ioremap(unsigned long port, unsigned long size)
282{
283	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
284}
285
286static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
287				      unsigned long flags)
288{
289	return ioremap(port, size);
290}
291
292static inline void __iomem * ioremap_nocache(unsigned long offset,
293					     unsigned long size)
294{
295	return ioremap(offset, size);
296}
297
298#define ioremap_wc ioremap_nocache
299#define ioremap_uc ioremap_nocache
300
301static inline void iounmap(volatile void __iomem *addr)
302{
303	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
304}
305
306static inline int __is_ioaddr(unsigned long addr)
307{
308	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
309}
310#define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
311
312static inline int __is_mmio(const volatile void __iomem *addr)
313{
314	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
315}
316
317
318/*
319 * If the actual I/O bits are sufficiently trivial, then expand inline.
320 */
321
322#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
323extern inline unsigned int ioread8(void __iomem *addr)
324{
325	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
326	mb();
327	return ret;
328}
329
330extern inline unsigned int ioread16(void __iomem *addr)
331{
332	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
333	mb();
334	return ret;
335}
336
337extern inline void iowrite8(u8 b, void __iomem *addr)
338{
 
339	mb();
340	IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
341}
342
343extern inline void iowrite16(u16 b, void __iomem *addr)
344{
 
345	mb();
346	IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
347}
348
349extern inline u8 inb(unsigned long port)
350{
351	return ioread8(ioport_map(port, 1));
352}
353
354extern inline u16 inw(unsigned long port)
355{
356	return ioread16(ioport_map(port, 2));
357}
358
359extern inline void outb(u8 b, unsigned long port)
360{
361	iowrite8(b, ioport_map(port, 1));
362}
363
364extern inline void outw(u16 b, unsigned long port)
365{
366	iowrite16(b, ioport_map(port, 2));
367}
368#endif
369
370#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
371extern inline unsigned int ioread32(void __iomem *addr)
372{
373	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
374	mb();
375	return ret;
376}
377
378extern inline void iowrite32(u32 b, void __iomem *addr)
379{
 
380	mb();
381	IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
382}
383
384extern inline u32 inl(unsigned long port)
385{
386	return ioread32(ioport_map(port, 4));
387}
388
389extern inline void outl(u32 b, unsigned long port)
390{
391	iowrite32(b, ioport_map(port, 4));
392}
393#endif
394
395#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
396extern inline u8 __raw_readb(const volatile void __iomem *addr)
397{
398	return IO_CONCAT(__IO_PREFIX,readb)(addr);
399}
400
401extern inline u16 __raw_readw(const volatile void __iomem *addr)
402{
403	return IO_CONCAT(__IO_PREFIX,readw)(addr);
404}
405
406extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
407{
408	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
409}
410
411extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
412{
413	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
414}
415
416extern inline u8 readb(const volatile void __iomem *addr)
417{
418	u8 ret = __raw_readb(addr);
419	mb();
420	return ret;
421}
422
423extern inline u16 readw(const volatile void __iomem *addr)
424{
425	u16 ret = __raw_readw(addr);
426	mb();
427	return ret;
428}
429
430extern inline void writeb(u8 b, volatile void __iomem *addr)
431{
 
432	mb();
433	__raw_writeb(b, addr);
434}
435
436extern inline void writew(u16 b, volatile void __iomem *addr)
437{
 
438	mb();
439	__raw_writew(b, addr);
440}
441#endif
442
443#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
444extern inline u32 __raw_readl(const volatile void __iomem *addr)
445{
446	return IO_CONCAT(__IO_PREFIX,readl)(addr);
447}
448
449extern inline u64 __raw_readq(const volatile void __iomem *addr)
450{
451	return IO_CONCAT(__IO_PREFIX,readq)(addr);
452}
453
454extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
455{
456	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
457}
458
459extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
460{
461	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
462}
463
464extern inline u32 readl(const volatile void __iomem *addr)
465{
466	u32 ret = __raw_readl(addr);
467	mb();
468	return ret;
469}
470
471extern inline u64 readq(const volatile void __iomem *addr)
472{
473	u64 ret = __raw_readq(addr);
474	mb();
475	return ret;
476}
477
478extern inline void writel(u32 b, volatile void __iomem *addr)
479{
 
480	mb();
481	__raw_writel(b, addr);
482}
483
484extern inline void writeq(u64 b, volatile void __iomem *addr)
485{
 
486	mb();
487	__raw_writeq(b, addr);
488}
489#endif
490
491#define ioread16be(p) be16_to_cpu(ioread16(p))
492#define ioread32be(p) be32_to_cpu(ioread32(p))
493#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
494#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
495
496#define inb_p		inb
497#define inw_p		inw
498#define inl_p		inl
499#define outb_p		outb
500#define outw_p		outw
501#define outl_p		outl
502#define readb_relaxed(addr)	__raw_readb(addr)
503#define readw_relaxed(addr)	__raw_readw(addr)
504#define readl_relaxed(addr)	__raw_readl(addr)
505#define readq_relaxed(addr)	__raw_readq(addr)
506#define writeb_relaxed(b, addr)	__raw_writeb(b, addr)
507#define writew_relaxed(b, addr)	__raw_writew(b, addr)
508#define writel_relaxed(b, addr)	__raw_writel(b, addr)
509#define writeq_relaxed(b, addr)	__raw_writeq(b, addr)
510
511/*
512 * String version of IO memory access ops:
513 */
514extern void memcpy_fromio(void *, const volatile void __iomem *, long);
515extern void memcpy_toio(volatile void __iomem *, const void *, long);
516extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
517
518static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
519{
520	_memset_c_io(addr, 0x0101010101010101UL * c, len);
521}
522
523#define __HAVE_ARCH_MEMSETW_IO
524static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
525{
526	_memset_c_io(addr, 0x0001000100010001UL * c, len);
527}
528
529/*
530 * String versions of in/out ops:
531 */
532extern void insb (unsigned long port, void *dst, unsigned long count);
533extern void insw (unsigned long port, void *dst, unsigned long count);
534extern void insl (unsigned long port, void *dst, unsigned long count);
535extern void outsb (unsigned long port, const void *src, unsigned long count);
536extern void outsw (unsigned long port, const void *src, unsigned long count);
537extern void outsl (unsigned long port, const void *src, unsigned long count);
538
539/*
540 * The Alpha Jensen hardware for some rather strange reason puts
541 * the RTC clock at 0x170 instead of 0x70. Probably due to some
542 * misguided idea about using 0x70 for NMI stuff.
543 *
544 * These defines will override the defaults when doing RTC queries
545 */
546
547#ifdef CONFIG_ALPHA_GENERIC
548# define RTC_PORT(x)	((x) + alpha_mv.rtc_port)
549#else
550# ifdef CONFIG_ALPHA_JENSEN
551#  define RTC_PORT(x)	(0x170+(x))
552# else
553#  define RTC_PORT(x)	(0x70 + (x))
554# endif
555#endif
556#define RTC_ALWAYS_BCD	0
557
558/*
559 * Some mucking forons use if[n]def writeq to check if platform has it.
560 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
561 * to play with; for now just use cpp anti-recursion logics and make sure
562 * that damn thing is defined and expands to itself.
563 */
564
565#define writeq writeq
566#define readq readq
567
568/*
569 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
570 * access
571 */
572#define xlate_dev_mem_ptr(p)	__va(p)
573
574/*
575 * Convert a virtual cached pointer to an uncached pointer
576 */
577#define xlate_dev_kmem_ptr(p)	p
578
579#endif /* __KERNEL__ */
580
581#endif /* __ALPHA_IO_H */