Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef __ALPHA_IO_H
  2#define __ALPHA_IO_H
  3
  4#ifdef __KERNEL__
  5
  6#include <linux/kernel.h>
  7#include <linux/mm.h>
  8#include <asm/compiler.h>
  9#include <asm/system.h>
 10#include <asm/pgtable.h>
 11#include <asm/machvec.h>
 12#include <asm/hwrpb.h>
 13
 14/* The generic header contains only prototypes.  Including it ensures that
 15   the implementation we have here matches that interface.  */
 16#include <asm-generic/iomap.h>
 17
 18/* We don't use IO slowdowns on the Alpha, but.. */
 19#define __SLOW_DOWN_IO	do { } while (0)
 20#define SLOW_DOWN_IO	do { } while (0)
 21
 22/*
 23 * Virtual -> physical identity mapping starts at this offset
 24 */
 25#ifdef USE_48_BIT_KSEG
 26#define IDENT_ADDR     0xffff800000000000UL
 27#else
 28#define IDENT_ADDR     0xfffffc0000000000UL
 29#endif
 30
 31/*
 32 * We try to avoid hae updates (thus the cache), but when we
 33 * do need to update the hae, we need to do it atomically, so
 34 * that any interrupts wouldn't get confused with the hae
 35 * register not being up-to-date with respect to the hardware
 36 * value.
 37 */
 38extern inline void __set_hae(unsigned long new_hae)
 39{
 40	unsigned long flags = swpipl(IPL_MAX);
 41
 42	barrier();
 43
 44	alpha_mv.hae_cache = new_hae;
 45	*alpha_mv.hae_register = new_hae;
 46	mb();
 47	/* Re-read to make sure it was written.  */
 48	new_hae = *alpha_mv.hae_register;
 49
 50	setipl(flags);
 51	barrier();
 52}
 53
 54extern inline void set_hae(unsigned long new_hae)
 55{
 56	if (new_hae != alpha_mv.hae_cache)
 57		__set_hae(new_hae);
 58}
 59
 60/*
 61 * Change virtual addresses to physical addresses and vv.
 62 */
 63#ifdef USE_48_BIT_KSEG
 64static inline unsigned long virt_to_phys(void *address)
 65{
 66	return (unsigned long)address - IDENT_ADDR;
 67}
 68
 69static inline void * phys_to_virt(unsigned long address)
 70{
 71	return (void *) (address + IDENT_ADDR);
 72}
 73#else
 74static inline unsigned long virt_to_phys(void *address)
 75{
 76        unsigned long phys = (unsigned long)address;
 77
 78	/* Sign-extend from bit 41.  */
 79	phys <<= (64 - 41);
 80	phys = (long)phys >> (64 - 41);
 81
 82	/* Crop to the physical address width of the processor.  */
 83        phys &= (1ul << hwrpb->pa_bits) - 1;
 84
 85        return phys;
 86}
 87
 88static inline void * phys_to_virt(unsigned long address)
 89{
 90        return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
 91}
 92#endif
 93
 94#define page_to_phys(page)	page_to_pa(page)
 95
 96static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
 97{
 98	return page_to_phys(page);
 99}
100
101/* Maximum PIO space address supported?  */
102#define IO_SPACE_LIMIT 0xffff
103
104/*
105 * Change addresses as seen by the kernel (virtual) to addresses as
106 * seen by a device (bus), and vice versa.
107 *
108 * Note that this only works for a limited range of kernel addresses,
109 * and very well may not span all memory.  Consider this interface 
110 * deprecated in favour of the DMA-mapping API.
111 */
112extern unsigned long __direct_map_base;
113extern unsigned long __direct_map_size;
114
115static inline unsigned long __deprecated virt_to_bus(void *address)
116{
117	unsigned long phys = virt_to_phys(address);
118	unsigned long bus = phys + __direct_map_base;
119	return phys <= __direct_map_size ? bus : 0;
120}
121#define isa_virt_to_bus virt_to_bus
122
123static inline void * __deprecated bus_to_virt(unsigned long address)
124{
125	void *virt;
126
127	/* This check is a sanity check but also ensures that bus address 0
128	   maps to virtual address 0 which is useful to detect null pointers
129	   (the NCR driver is much simpler if NULL pointers are preserved).  */
130	address -= __direct_map_base;
131	virt = phys_to_virt(address);
132	return (long)address <= 0 ? NULL : virt;
133}
134#define isa_bus_to_virt bus_to_virt
135
136/*
137 * There are different chipsets to interface the Alpha CPUs to the world.
138 */
139
140#define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
141#define _IO_CONCAT(a,b)	a ## _ ## b
142
143#ifdef CONFIG_ALPHA_GENERIC
144
145/* In a generic kernel, we always go through the machine vector.  */
146
147#define REMAP1(TYPE, NAME, QUAL)					\
148static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
149{									\
150	return alpha_mv.mv_##NAME(addr);				\
151}
152
153#define REMAP2(TYPE, NAME, QUAL)					\
154static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
155{									\
156	alpha_mv.mv_##NAME(b, addr);					\
157}
158
159REMAP1(unsigned int, ioread8, /**/)
160REMAP1(unsigned int, ioread16, /**/)
161REMAP1(unsigned int, ioread32, /**/)
 
162REMAP1(u8, readb, const volatile)
163REMAP1(u16, readw, const volatile)
164REMAP1(u32, readl, const volatile)
165REMAP1(u64, readq, const volatile)
166
167REMAP2(u8, iowrite8, /**/)
168REMAP2(u16, iowrite16, /**/)
169REMAP2(u32, iowrite32, /**/)
 
170REMAP2(u8, writeb, volatile)
171REMAP2(u16, writew, volatile)
172REMAP2(u32, writel, volatile)
173REMAP2(u64, writeq, volatile)
174
175#undef REMAP1
176#undef REMAP2
177
178extern inline void __iomem *generic_ioportmap(unsigned long a)
179{
180	return alpha_mv.mv_ioportmap(a);
181}
182
183static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
184{
185	return alpha_mv.mv_ioremap(a, s);
186}
187
188static inline void generic_iounmap(volatile void __iomem *a)
189{
190	return alpha_mv.mv_iounmap(a);
191}
192
193static inline int generic_is_ioaddr(unsigned long a)
194{
195	return alpha_mv.mv_is_ioaddr(a);
196}
197
198static inline int generic_is_mmio(const volatile void __iomem *a)
199{
200	return alpha_mv.mv_is_mmio(a);
201}
202
203#define __IO_PREFIX		generic
204#define generic_trivial_rw_bw	0
205#define generic_trivial_rw_lq	0
206#define generic_trivial_io_bw	0
207#define generic_trivial_io_lq	0
208#define generic_trivial_iounmap	0
209
210#else
211
212#if defined(CONFIG_ALPHA_APECS)
213# include <asm/core_apecs.h>
214#elif defined(CONFIG_ALPHA_CIA)
215# include <asm/core_cia.h>
216#elif defined(CONFIG_ALPHA_IRONGATE)
217# include <asm/core_irongate.h>
218#elif defined(CONFIG_ALPHA_JENSEN)
219# include <asm/jensen.h>
220#elif defined(CONFIG_ALPHA_LCA)
221# include <asm/core_lca.h>
222#elif defined(CONFIG_ALPHA_MARVEL)
223# include <asm/core_marvel.h>
224#elif defined(CONFIG_ALPHA_MCPCIA)
225# include <asm/core_mcpcia.h>
226#elif defined(CONFIG_ALPHA_POLARIS)
227# include <asm/core_polaris.h>
228#elif defined(CONFIG_ALPHA_T2)
229# include <asm/core_t2.h>
230#elif defined(CONFIG_ALPHA_TSUNAMI)
231# include <asm/core_tsunami.h>
232#elif defined(CONFIG_ALPHA_TITAN)
233# include <asm/core_titan.h>
234#elif defined(CONFIG_ALPHA_WILDFIRE)
235# include <asm/core_wildfire.h>
236#else
237#error "What system is this?"
238#endif
239
240#endif /* GENERIC */
241
242/*
243 * We always have external versions of these routines.
244 */
245extern u8		inb(unsigned long port);
246extern u16		inw(unsigned long port);
247extern u32		inl(unsigned long port);
248extern void		outb(u8 b, unsigned long port);
249extern void		outw(u16 b, unsigned long port);
250extern void		outl(u32 b, unsigned long port);
 
 
 
 
 
 
251
252extern u8		readb(const volatile void __iomem *addr);
253extern u16		readw(const volatile void __iomem *addr);
254extern u32		readl(const volatile void __iomem *addr);
255extern u64		readq(const volatile void __iomem *addr);
256extern void		writeb(u8 b, volatile void __iomem *addr);
257extern void		writew(u16 b, volatile void __iomem *addr);
258extern void		writel(u32 b, volatile void __iomem *addr);
259extern void		writeq(u64 b, volatile void __iomem *addr);
 
 
 
 
 
 
 
 
260
261extern u8		__raw_readb(const volatile void __iomem *addr);
262extern u16		__raw_readw(const volatile void __iomem *addr);
263extern u32		__raw_readl(const volatile void __iomem *addr);
264extern u64		__raw_readq(const volatile void __iomem *addr);
265extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
266extern void		__raw_writew(u16 b, volatile void __iomem *addr);
267extern void		__raw_writel(u32 b, volatile void __iomem *addr);
268extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
 
 
 
 
 
 
 
 
269
270/*
271 * Mapping from port numbers to __iomem space is pretty easy.
272 */
273
274/* These two have to be extern inline because of the extern prototype from
275   <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
276   the same declaration.  */
277extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
278{
279	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
280}
281
282extern inline void ioport_unmap(void __iomem *addr)
283{
284}
285
 
 
 
286static inline void __iomem *ioremap(unsigned long port, unsigned long size)
287{
288	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
289}
290
291static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
292				      unsigned long flags)
293{
294	return ioremap(port, size);
295}
296
297static inline void __iomem * ioremap_nocache(unsigned long offset,
298					     unsigned long size)
299{
300	return ioremap(offset, size);
301} 
302
303static inline void iounmap(volatile void __iomem *addr)
304{
305	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
306}
307
308static inline int __is_ioaddr(unsigned long addr)
309{
310	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
311}
312#define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
313
314static inline int __is_mmio(const volatile void __iomem *addr)
315{
316	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
317}
318
319
320/*
321 * If the actual I/O bits are sufficiently trivial, then expand inline.
322 */
323
324#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
325extern inline unsigned int ioread8(void __iomem *addr)
326{
327	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
 
 
328	mb();
329	return ret;
330}
331
332extern inline unsigned int ioread16(void __iomem *addr)
333{
334	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
 
 
335	mb();
336	return ret;
337}
338
339extern inline void iowrite8(u8 b, void __iomem *addr)
340{
341	IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
342	mb();
 
343}
344
345extern inline void iowrite16(u16 b, void __iomem *addr)
346{
347	IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
348	mb();
 
349}
350
351extern inline u8 inb(unsigned long port)
352{
353	return ioread8(ioport_map(port, 1));
354}
355
356extern inline u16 inw(unsigned long port)
357{
358	return ioread16(ioport_map(port, 2));
359}
360
361extern inline void outb(u8 b, unsigned long port)
362{
363	iowrite8(b, ioport_map(port, 1));
364}
365
366extern inline void outw(u16 b, unsigned long port)
367{
368	iowrite16(b, ioport_map(port, 2));
369}
370#endif
371
 
 
 
 
 
372#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
373extern inline unsigned int ioread32(void __iomem *addr)
 
 
 
 
 
 
 
 
 
374{
375	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
 
 
376	mb();
377	return ret;
378}
379
380extern inline void iowrite32(u32 b, void __iomem *addr)
381{
382	IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
383	mb();
 
 
 
 
 
 
 
384}
385
386extern inline u32 inl(unsigned long port)
387{
388	return ioread32(ioport_map(port, 4));
389}
390
391extern inline void outl(u32 b, unsigned long port)
392{
393	iowrite32(b, ioport_map(port, 4));
394}
395#endif
396
 
 
 
 
 
397#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
398extern inline u8 __raw_readb(const volatile void __iomem *addr)
399{
400	return IO_CONCAT(__IO_PREFIX,readb)(addr);
401}
402
403extern inline u16 __raw_readw(const volatile void __iomem *addr)
404{
405	return IO_CONCAT(__IO_PREFIX,readw)(addr);
406}
407
408extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
409{
410	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
411}
412
413extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
414{
415	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
416}
417
418extern inline u8 readb(const volatile void __iomem *addr)
419{
420	u8 ret = __raw_readb(addr);
 
 
421	mb();
422	return ret;
423}
424
425extern inline u16 readw(const volatile void __iomem *addr)
426{
427	u16 ret = __raw_readw(addr);
 
 
428	mb();
429	return ret;
430}
431
432extern inline void writeb(u8 b, volatile void __iomem *addr)
433{
434	__raw_writeb(b, addr);
435	mb();
 
436}
437
438extern inline void writew(u16 b, volatile void __iomem *addr)
439{
440	__raw_writew(b, addr);
441	mb();
 
442}
443#endif
444
445#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
446extern inline u32 __raw_readl(const volatile void __iomem *addr)
447{
448	return IO_CONCAT(__IO_PREFIX,readl)(addr);
449}
450
451extern inline u64 __raw_readq(const volatile void __iomem *addr)
452{
453	return IO_CONCAT(__IO_PREFIX,readq)(addr);
454}
455
456extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
457{
458	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
459}
460
461extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
462{
463	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
464}
465
466extern inline u32 readl(const volatile void __iomem *addr)
467{
468	u32 ret = __raw_readl(addr);
 
 
469	mb();
470	return ret;
471}
472
473extern inline u64 readq(const volatile void __iomem *addr)
474{
475	u64 ret = __raw_readq(addr);
 
 
476	mb();
477	return ret;
478}
479
480extern inline void writel(u32 b, volatile void __iomem *addr)
481{
482	__raw_writel(b, addr);
483	mb();
 
484}
485
486extern inline void writeq(u64 b, volatile void __iomem *addr)
487{
488	__raw_writeq(b, addr);
489	mb();
 
490}
491#endif
492
 
 
 
 
 
 
 
493#define inb_p		inb
494#define inw_p		inw
495#define inl_p		inl
496#define outb_p		outb
497#define outw_p		outw
498#define outl_p		outl
499#define readb_relaxed(addr) __raw_readb(addr)
500#define readw_relaxed(addr) __raw_readw(addr)
501#define readl_relaxed(addr) __raw_readl(addr)
502#define readq_relaxed(addr) __raw_readq(addr)
503
504#define mmiowb()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
506/*
507 * String version of IO memory access ops:
508 */
509extern void memcpy_fromio(void *, const volatile void __iomem *, long);
510extern void memcpy_toio(volatile void __iomem *, const void *, long);
511extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
512
513static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
514{
515	_memset_c_io(addr, 0x0101010101010101UL * c, len);
516}
517
518#define __HAVE_ARCH_MEMSETW_IO
519static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
520{
521	_memset_c_io(addr, 0x0001000100010001UL * c, len);
522}
523
 
 
 
 
524/*
525 * String versions of in/out ops:
526 */
527extern void insb (unsigned long port, void *dst, unsigned long count);
528extern void insw (unsigned long port, void *dst, unsigned long count);
529extern void insl (unsigned long port, void *dst, unsigned long count);
530extern void outsb (unsigned long port, const void *src, unsigned long count);
531extern void outsw (unsigned long port, const void *src, unsigned long count);
532extern void outsl (unsigned long port, const void *src, unsigned long count);
533
534/*
535 * The Alpha Jensen hardware for some rather strange reason puts
536 * the RTC clock at 0x170 instead of 0x70. Probably due to some
537 * misguided idea about using 0x70 for NMI stuff.
538 *
539 * These defines will override the defaults when doing RTC queries
540 */
541
542#ifdef CONFIG_ALPHA_GENERIC
543# define RTC_PORT(x)	((x) + alpha_mv.rtc_port)
544#else
545# ifdef CONFIG_ALPHA_JENSEN
546#  define RTC_PORT(x)	(0x170+(x))
547# else
548#  define RTC_PORT(x)	(0x70 + (x))
549# endif
550#endif
551#define RTC_ALWAYS_BCD	0
552
553/*
554 * Some mucking forons use if[n]def writeq to check if platform has it.
555 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
556 * to play with; for now just use cpp anti-recursion logics and make sure
557 * that damn thing is defined and expands to itself.
558 */
 
 
 
 
 
 
 
 
 
559
560#define writeq writeq
561#define readq readq
562
563/*
564 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
565 * access
566 */
567#define xlate_dev_mem_ptr(p)	__va(p)
568
569/*
570 * Convert a virtual cached pointer to an uncached pointer
571 */
572#define xlate_dev_kmem_ptr(p)	p
573
574#endif /* __KERNEL__ */
575
576#endif /* __ALPHA_IO_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ALPHA_IO_H
  3#define __ALPHA_IO_H
  4
  5#ifdef __KERNEL__
  6
  7#include <linux/kernel.h>
  8#include <linux/mm.h>
  9#include <asm/compiler.h>
 
 
 10#include <asm/machvec.h>
 11#include <asm/hwrpb.h>
 12
 13/* The generic header contains only prototypes.  Including it ensures that
 14   the implementation we have here matches that interface.  */
 15#include <asm-generic/iomap.h>
 16
 
 
 
 
 17/*
 18 * Virtual -> physical identity mapping starts at this offset
 19 */
 20#ifdef USE_48_BIT_KSEG
 21#define IDENT_ADDR     0xffff800000000000UL
 22#else
 23#define IDENT_ADDR     0xfffffc0000000000UL
 24#endif
 25
 26/*
 27 * We try to avoid hae updates (thus the cache), but when we
 28 * do need to update the hae, we need to do it atomically, so
 29 * that any interrupts wouldn't get confused with the hae
 30 * register not being up-to-date with respect to the hardware
 31 * value.
 32 */
 33extern inline void __set_hae(unsigned long new_hae)
 34{
 35	unsigned long flags = swpipl(IPL_MAX);
 36
 37	barrier();
 38
 39	alpha_mv.hae_cache = new_hae;
 40	*alpha_mv.hae_register = new_hae;
 41	mb();
 42	/* Re-read to make sure it was written.  */
 43	new_hae = *alpha_mv.hae_register;
 44
 45	setipl(flags);
 46	barrier();
 47}
 48
 49extern inline void set_hae(unsigned long new_hae)
 50{
 51	if (new_hae != alpha_mv.hae_cache)
 52		__set_hae(new_hae);
 53}
 54
 55/*
 56 * Change virtual addresses to physical addresses and vv.
 57 */
 58#ifdef USE_48_BIT_KSEG
 59static inline unsigned long virt_to_phys(volatile void *address)
 60{
 61	return (unsigned long)address - IDENT_ADDR;
 62}
 63
 64static inline void * phys_to_virt(unsigned long address)
 65{
 66	return (void *) (address + IDENT_ADDR);
 67}
 68#else
 69static inline unsigned long virt_to_phys(volatile void *address)
 70{
 71        unsigned long phys = (unsigned long)address;
 72
 73	/* Sign-extend from bit 41.  */
 74	phys <<= (64 - 41);
 75	phys = (long)phys >> (64 - 41);
 76
 77	/* Crop to the physical address width of the processor.  */
 78        phys &= (1ul << hwrpb->pa_bits) - 1;
 79
 80        return phys;
 81}
 82
 83static inline void * phys_to_virt(unsigned long address)
 84{
 85        return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
 86}
 87#endif
 88
 89#define virt_to_phys		virt_to_phys
 90#define phys_to_virt		phys_to_virt
 
 
 
 
 91
 92/* Maximum PIO space address supported?  */
 93#define IO_SPACE_LIMIT 0xffff
 94
 95/*
 96 * Change addresses as seen by the kernel (virtual) to addresses as
 97 * seen by a device (bus), and vice versa.
 98 *
 99 * Note that this only works for a limited range of kernel addresses,
100 * and very well may not span all memory.  Consider this interface 
101 * deprecated in favour of the DMA-mapping API.
102 */
103extern unsigned long __direct_map_base;
104extern unsigned long __direct_map_size;
105
106static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
107{
108	unsigned long phys = virt_to_phys(address);
109	unsigned long bus = phys + __direct_map_base;
110	return phys <= __direct_map_size ? bus : 0;
111}
112#define isa_virt_to_bus isa_virt_to_bus
113
114static inline void * __deprecated isa_bus_to_virt(unsigned long address)
115{
116	void *virt;
117
118	/* This check is a sanity check but also ensures that bus address 0
119	   maps to virtual address 0 which is useful to detect null pointers
120	   (the NCR driver is much simpler if NULL pointers are preserved).  */
121	address -= __direct_map_base;
122	virt = phys_to_virt(address);
123	return (long)address <= 0 ? NULL : virt;
124}
125#define isa_bus_to_virt isa_bus_to_virt
126
127/*
128 * There are different chipsets to interface the Alpha CPUs to the world.
129 */
130
131#define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
132#define _IO_CONCAT(a,b)	a ## _ ## b
133
134#ifdef CONFIG_ALPHA_GENERIC
135
136/* In a generic kernel, we always go through the machine vector.  */
137
138#define REMAP1(TYPE, NAME, QUAL)					\
139static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
140{									\
141	return alpha_mv.mv_##NAME(addr);				\
142}
143
144#define REMAP2(TYPE, NAME, QUAL)					\
145static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
146{									\
147	alpha_mv.mv_##NAME(b, addr);					\
148}
149
150REMAP1(unsigned int, ioread8, const)
151REMAP1(unsigned int, ioread16, const)
152REMAP1(unsigned int, ioread32, const)
153REMAP1(u64, ioread64, const)
154REMAP1(u8, readb, const volatile)
155REMAP1(u16, readw, const volatile)
156REMAP1(u32, readl, const volatile)
157REMAP1(u64, readq, const volatile)
158
159REMAP2(u8, iowrite8, /**/)
160REMAP2(u16, iowrite16, /**/)
161REMAP2(u32, iowrite32, /**/)
162REMAP2(u64, iowrite64, /**/)
163REMAP2(u8, writeb, volatile)
164REMAP2(u16, writew, volatile)
165REMAP2(u32, writel, volatile)
166REMAP2(u64, writeq, volatile)
167
168#undef REMAP1
169#undef REMAP2
170
171extern inline void __iomem *generic_ioportmap(unsigned long a)
172{
173	return alpha_mv.mv_ioportmap(a);
174}
175
176static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
177{
178	return alpha_mv.mv_ioremap(a, s);
179}
180
181static inline void generic_iounmap(volatile void __iomem *a)
182{
183	return alpha_mv.mv_iounmap(a);
184}
185
186static inline int generic_is_ioaddr(unsigned long a)
187{
188	return alpha_mv.mv_is_ioaddr(a);
189}
190
191static inline int generic_is_mmio(const volatile void __iomem *a)
192{
193	return alpha_mv.mv_is_mmio(a);
194}
195
196#define __IO_PREFIX		generic
197#define generic_trivial_rw_bw	0
198#define generic_trivial_rw_lq	0
199#define generic_trivial_io_bw	0
200#define generic_trivial_io_lq	0
201#define generic_trivial_iounmap	0
202
203#else
204
205#if defined(CONFIG_ALPHA_CIA)
 
 
206# include <asm/core_cia.h>
207#elif defined(CONFIG_ALPHA_IRONGATE)
208# include <asm/core_irongate.h>
 
 
 
 
209#elif defined(CONFIG_ALPHA_MARVEL)
210# include <asm/core_marvel.h>
211#elif defined(CONFIG_ALPHA_MCPCIA)
212# include <asm/core_mcpcia.h>
213#elif defined(CONFIG_ALPHA_POLARIS)
214# include <asm/core_polaris.h>
215#elif defined(CONFIG_ALPHA_T2)
216# include <asm/core_t2.h>
217#elif defined(CONFIG_ALPHA_TSUNAMI)
218# include <asm/core_tsunami.h>
219#elif defined(CONFIG_ALPHA_TITAN)
220# include <asm/core_titan.h>
221#elif defined(CONFIG_ALPHA_WILDFIRE)
222# include <asm/core_wildfire.h>
223#else
224#error "What system is this?"
225#endif
226
227#endif /* GENERIC */
228
229/*
230 * We always have external versions of these routines.
231 */
232extern u8		inb(unsigned long port);
233extern u16		inw(unsigned long port);
234extern u32		inl(unsigned long port);
235extern void		outb(u8 b, unsigned long port);
236extern void		outw(u16 b, unsigned long port);
237extern void		outl(u32 b, unsigned long port);
238#define inb inb
239#define inw inw
240#define inl inl
241#define outb outb
242#define outw outw
243#define outl outl
244
245extern u8		readb(const volatile void __iomem *addr);
246extern u16		readw(const volatile void __iomem *addr);
247extern u32		readl(const volatile void __iomem *addr);
248extern u64		readq(const volatile void __iomem *addr);
249extern void		writeb(u8 b, volatile void __iomem *addr);
250extern void		writew(u16 b, volatile void __iomem *addr);
251extern void		writel(u32 b, volatile void __iomem *addr);
252extern void		writeq(u64 b, volatile void __iomem *addr);
253#define readb readb
254#define readw readw
255#define readl readl
256#define readq readq
257#define writeb writeb
258#define writew writew
259#define writel writel
260#define writeq writeq
261
262extern u8		__raw_readb(const volatile void __iomem *addr);
263extern u16		__raw_readw(const volatile void __iomem *addr);
264extern u32		__raw_readl(const volatile void __iomem *addr);
265extern u64		__raw_readq(const volatile void __iomem *addr);
266extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
267extern void		__raw_writew(u16 b, volatile void __iomem *addr);
268extern void		__raw_writel(u32 b, volatile void __iomem *addr);
269extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
270#define __raw_readb __raw_readb
271#define __raw_readw __raw_readw
272#define __raw_readl __raw_readl
273#define __raw_readq __raw_readq
274#define __raw_writeb __raw_writeb
275#define __raw_writew __raw_writew
276#define __raw_writel __raw_writel
277#define __raw_writeq __raw_writeq
278
279/*
280 * Mapping from port numbers to __iomem space is pretty easy.
281 */
282
283/* These two have to be extern inline because of the extern prototype from
284   <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
285   the same declaration.  */
286extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
287{
288	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
289}
290
291extern inline void ioport_unmap(void __iomem *addr)
292{
293}
294
295#define ioport_map ioport_map
296#define ioport_unmap ioport_unmap
297
298static inline void __iomem *ioremap(unsigned long port, unsigned long size)
299{
300	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
301}
302
303#define ioremap_wc ioremap
 
 
 
 
 
 
 
 
 
 
304
305static inline void iounmap(volatile void __iomem *addr)
306{
307	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
308}
309
310static inline int __is_ioaddr(unsigned long addr)
311{
312	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
313}
314#define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
315
316static inline int __is_mmio(const volatile void __iomem *addr)
317{
318	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
319}
320
321
322/*
323 * If the actual I/O bits are sufficiently trivial, then expand inline.
324 */
325
326#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
327extern inline unsigned int ioread8(const void __iomem *addr)
328{
329	unsigned int ret;
330	mb();
331	ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
332	mb();
333	return ret;
334}
335
336extern inline unsigned int ioread16(const void __iomem *addr)
337{
338	unsigned int ret;
339	mb();
340	ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
341	mb();
342	return ret;
343}
344
345extern inline void iowrite8(u8 b, void __iomem *addr)
346{
 
347	mb();
348	IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
349}
350
351extern inline void iowrite16(u16 b, void __iomem *addr)
352{
 
353	mb();
354	IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
355}
356
357extern inline u8 inb(unsigned long port)
358{
359	return ioread8(ioport_map(port, 1));
360}
361
362extern inline u16 inw(unsigned long port)
363{
364	return ioread16(ioport_map(port, 2));
365}
366
367extern inline void outb(u8 b, unsigned long port)
368{
369	iowrite8(b, ioport_map(port, 1));
370}
371
372extern inline void outw(u16 b, unsigned long port)
373{
374	iowrite16(b, ioport_map(port, 2));
375}
376#endif
377
378#define ioread8 ioread8
379#define ioread16 ioread16
380#define iowrite8 iowrite8
381#define iowrite16 iowrite16
382
383#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
384extern inline unsigned int ioread32(const void __iomem *addr)
385{
386	unsigned int ret;
387	mb();
388	ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
389	mb();
390	return ret;
391}
392
393extern inline u64 ioread64(const void __iomem *addr)
394{
395	unsigned int ret;
396	mb();
397	ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
398	mb();
399	return ret;
400}
401
402extern inline void iowrite32(u32 b, void __iomem *addr)
403{
 
404	mb();
405	IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
406}
407
408extern inline void iowrite64(u64 b, void __iomem *addr)
409{
410	mb();
411	IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr);
412}
413
414extern inline u32 inl(unsigned long port)
415{
416	return ioread32(ioport_map(port, 4));
417}
418
419extern inline void outl(u32 b, unsigned long port)
420{
421	iowrite32(b, ioport_map(port, 4));
422}
423#endif
424
425#define ioread32 ioread32
426#define ioread64 ioread64
427#define iowrite32 iowrite32
428#define iowrite64 iowrite64
429
430#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
431extern inline u8 __raw_readb(const volatile void __iomem *addr)
432{
433	return IO_CONCAT(__IO_PREFIX,readb)(addr);
434}
435
436extern inline u16 __raw_readw(const volatile void __iomem *addr)
437{
438	return IO_CONCAT(__IO_PREFIX,readw)(addr);
439}
440
441extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
442{
443	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
444}
445
446extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
447{
448	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
449}
450
451extern inline u8 readb(const volatile void __iomem *addr)
452{
453	u8 ret;
454	mb();
455	ret = __raw_readb(addr);
456	mb();
457	return ret;
458}
459
460extern inline u16 readw(const volatile void __iomem *addr)
461{
462	u16 ret;
463	mb();
464	ret = __raw_readw(addr);
465	mb();
466	return ret;
467}
468
469extern inline void writeb(u8 b, volatile void __iomem *addr)
470{
 
471	mb();
472	__raw_writeb(b, addr);
473}
474
475extern inline void writew(u16 b, volatile void __iomem *addr)
476{
 
477	mb();
478	__raw_writew(b, addr);
479}
480#endif
481
482#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
483extern inline u32 __raw_readl(const volatile void __iomem *addr)
484{
485	return IO_CONCAT(__IO_PREFIX,readl)(addr);
486}
487
488extern inline u64 __raw_readq(const volatile void __iomem *addr)
489{
490	return IO_CONCAT(__IO_PREFIX,readq)(addr);
491}
492
493extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
494{
495	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
496}
497
498extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
499{
500	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
501}
502
503extern inline u32 readl(const volatile void __iomem *addr)
504{
505	u32 ret;
506	mb();
507	ret = __raw_readl(addr);
508	mb();
509	return ret;
510}
511
512extern inline u64 readq(const volatile void __iomem *addr)
513{
514	u64 ret;
515	mb();
516	ret = __raw_readq(addr);
517	mb();
518	return ret;
519}
520
521extern inline void writel(u32 b, volatile void __iomem *addr)
522{
 
523	mb();
524	__raw_writel(b, addr);
525}
526
527extern inline void writeq(u64 b, volatile void __iomem *addr)
528{
 
529	mb();
530	__raw_writeq(b, addr);
531}
532#endif
533
534#define ioread16be(p) swab16(ioread16(p))
535#define ioread32be(p) swab32(ioread32(p))
536#define ioread64be(p) swab64(ioread64(p))
537#define iowrite16be(v,p) iowrite16(swab16(v), (p))
538#define iowrite32be(v,p) iowrite32(swab32(v), (p))
539#define iowrite64be(v,p) iowrite64(swab64(v), (p))
540
541#define inb_p		inb
542#define inw_p		inw
543#define inl_p		inl
544#define outb_p		outb
545#define outw_p		outw
546#define outl_p		outl
 
 
 
 
547
548extern u8 readb_relaxed(const volatile void __iomem *addr);
549extern u16 readw_relaxed(const volatile void __iomem *addr);
550extern u32 readl_relaxed(const volatile void __iomem *addr);
551extern u64 readq_relaxed(const volatile void __iomem *addr);
552#define readb_relaxed readb_relaxed
553#define readw_relaxed readw_relaxed
554#define readl_relaxed readl_relaxed
555#define readq_relaxed readq_relaxed
556
557#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
558extern inline u8 readb_relaxed(const volatile void __iomem *addr)
559{
560	mb();
561	return __raw_readb(addr);
562}
563
564extern inline u16 readw_relaxed(const volatile void __iomem *addr)
565{
566	mb();
567	return __raw_readw(addr);
568}
569#endif
570
571#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
572extern inline u32 readl_relaxed(const volatile void __iomem *addr)
573{
574	mb();
575	return __raw_readl(addr);
576}
577
578extern inline u64 readq_relaxed(const volatile void __iomem *addr)
579{
580	mb();
581	return __raw_readq(addr);
582}
583#endif
584
585#define writeb_relaxed	writeb
586#define writew_relaxed	writew
587#define writel_relaxed	writel
588#define writeq_relaxed	writeq
589
590/*
591 * String version of IO memory access ops:
592 */
593extern void memcpy_fromio(void *, const volatile void __iomem *, long);
594extern void memcpy_toio(volatile void __iomem *, const void *, long);
595extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
596
597static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
598{
599	_memset_c_io(addr, 0x0101010101010101UL * c, len);
600}
601
602#define __HAVE_ARCH_MEMSETW_IO
603static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
604{
605	_memset_c_io(addr, 0x0001000100010001UL * c, len);
606}
607
608#define memset_io memset_io
609#define memcpy_fromio memcpy_fromio
610#define memcpy_toio memcpy_toio
611
612/*
613 * String versions of in/out ops:
614 */
615extern void insb (unsigned long port, void *dst, unsigned long count);
616extern void insw (unsigned long port, void *dst, unsigned long count);
617extern void insl (unsigned long port, void *dst, unsigned long count);
618extern void outsb (unsigned long port, const void *src, unsigned long count);
619extern void outsw (unsigned long port, const void *src, unsigned long count);
620extern void outsl (unsigned long port, const void *src, unsigned long count);
621
622#define insb insb
623#define insw insw
624#define insl insl
625#define outsb outsb
626#define outsw outsw
627#define outsl outsl
 
628
629#define RTC_PORT(x)	(0x70 + (x))
 
 
 
 
 
 
 
 
630#define RTC_ALWAYS_BCD	0
631
632/*
633 * These get provided from <asm-generic/iomap.h> since alpha does not
634 * select GENERIC_IOMAP.
 
 
635 */
636#define ioread64 ioread64
637#define iowrite64 iowrite64
638#define ioread8_rep ioread8_rep
639#define ioread16_rep ioread16_rep
640#define ioread32_rep ioread32_rep
641#define iowrite8_rep iowrite8_rep
642#define iowrite16_rep iowrite16_rep
643#define iowrite32_rep iowrite32_rep
644#define pci_iounmap pci_iounmap
645
646#include <asm-generic/io.h>
 
 
 
 
 
 
 
 
 
 
 
 
647
648#endif /* __KERNEL__ */
649
650#endif /* __ALPHA_IO_H */