Loading...
1#ifndef __ALPHA_IO_H
2#define __ALPHA_IO_H
3
4#ifdef __KERNEL__
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <asm/compiler.h>
9#include <asm/system.h>
10#include <asm/pgtable.h>
11#include <asm/machvec.h>
12#include <asm/hwrpb.h>
13
14/* The generic header contains only prototypes. Including it ensures that
15 the implementation we have here matches that interface. */
16#include <asm-generic/iomap.h>
17
18/* We don't use IO slowdowns on the Alpha, but.. */
19#define __SLOW_DOWN_IO do { } while (0)
20#define SLOW_DOWN_IO do { } while (0)
21
22/*
23 * Virtual -> physical identity mapping starts at this offset
24 */
25#ifdef USE_48_BIT_KSEG
26#define IDENT_ADDR 0xffff800000000000UL
27#else
28#define IDENT_ADDR 0xfffffc0000000000UL
29#endif
30
31/*
32 * We try to avoid hae updates (thus the cache), but when we
33 * do need to update the hae, we need to do it atomically, so
34 * that any interrupts wouldn't get confused with the hae
35 * register not being up-to-date with respect to the hardware
36 * value.
37 */
38extern inline void __set_hae(unsigned long new_hae)
39{
40 unsigned long flags = swpipl(IPL_MAX);
41
42 barrier();
43
44 alpha_mv.hae_cache = new_hae;
45 *alpha_mv.hae_register = new_hae;
46 mb();
47 /* Re-read to make sure it was written. */
48 new_hae = *alpha_mv.hae_register;
49
50 setipl(flags);
51 barrier();
52}
53
54extern inline void set_hae(unsigned long new_hae)
55{
56 if (new_hae != alpha_mv.hae_cache)
57 __set_hae(new_hae);
58}
59
60/*
61 * Change virtual addresses to physical addresses and vv.
62 */
63#ifdef USE_48_BIT_KSEG
64static inline unsigned long virt_to_phys(void *address)
65{
66 return (unsigned long)address - IDENT_ADDR;
67}
68
69static inline void * phys_to_virt(unsigned long address)
70{
71 return (void *) (address + IDENT_ADDR);
72}
73#else
74static inline unsigned long virt_to_phys(void *address)
75{
76 unsigned long phys = (unsigned long)address;
77
78 /* Sign-extend from bit 41. */
79 phys <<= (64 - 41);
80 phys = (long)phys >> (64 - 41);
81
82 /* Crop to the physical address width of the processor. */
83 phys &= (1ul << hwrpb->pa_bits) - 1;
84
85 return phys;
86}
87
88static inline void * phys_to_virt(unsigned long address)
89{
90 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
91}
92#endif
93
94#define page_to_phys(page) page_to_pa(page)
95
96static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
97{
98 return page_to_phys(page);
99}
100
101/* Maximum PIO space address supported? */
102#define IO_SPACE_LIMIT 0xffff
103
104/*
105 * Change addresses as seen by the kernel (virtual) to addresses as
106 * seen by a device (bus), and vice versa.
107 *
108 * Note that this only works for a limited range of kernel addresses,
109 * and very well may not span all memory. Consider this interface
110 * deprecated in favour of the DMA-mapping API.
111 */
112extern unsigned long __direct_map_base;
113extern unsigned long __direct_map_size;
114
115static inline unsigned long __deprecated virt_to_bus(void *address)
116{
117 unsigned long phys = virt_to_phys(address);
118 unsigned long bus = phys + __direct_map_base;
119 return phys <= __direct_map_size ? bus : 0;
120}
121#define isa_virt_to_bus virt_to_bus
122
123static inline void * __deprecated bus_to_virt(unsigned long address)
124{
125 void *virt;
126
127 /* This check is a sanity check but also ensures that bus address 0
128 maps to virtual address 0 which is useful to detect null pointers
129 (the NCR driver is much simpler if NULL pointers are preserved). */
130 address -= __direct_map_base;
131 virt = phys_to_virt(address);
132 return (long)address <= 0 ? NULL : virt;
133}
134#define isa_bus_to_virt bus_to_virt
135
136/*
137 * There are different chipsets to interface the Alpha CPUs to the world.
138 */
139
140#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
141#define _IO_CONCAT(a,b) a ## _ ## b
142
143#ifdef CONFIG_ALPHA_GENERIC
144
145/* In a generic kernel, we always go through the machine vector. */
146
147#define REMAP1(TYPE, NAME, QUAL) \
148static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
149{ \
150 return alpha_mv.mv_##NAME(addr); \
151}
152
153#define REMAP2(TYPE, NAME, QUAL) \
154static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
155{ \
156 alpha_mv.mv_##NAME(b, addr); \
157}
158
159REMAP1(unsigned int, ioread8, /**/)
160REMAP1(unsigned int, ioread16, /**/)
161REMAP1(unsigned int, ioread32, /**/)
162REMAP1(u8, readb, const volatile)
163REMAP1(u16, readw, const volatile)
164REMAP1(u32, readl, const volatile)
165REMAP1(u64, readq, const volatile)
166
167REMAP2(u8, iowrite8, /**/)
168REMAP2(u16, iowrite16, /**/)
169REMAP2(u32, iowrite32, /**/)
170REMAP2(u8, writeb, volatile)
171REMAP2(u16, writew, volatile)
172REMAP2(u32, writel, volatile)
173REMAP2(u64, writeq, volatile)
174
175#undef REMAP1
176#undef REMAP2
177
178extern inline void __iomem *generic_ioportmap(unsigned long a)
179{
180 return alpha_mv.mv_ioportmap(a);
181}
182
183static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
184{
185 return alpha_mv.mv_ioremap(a, s);
186}
187
188static inline void generic_iounmap(volatile void __iomem *a)
189{
190 return alpha_mv.mv_iounmap(a);
191}
192
193static inline int generic_is_ioaddr(unsigned long a)
194{
195 return alpha_mv.mv_is_ioaddr(a);
196}
197
198static inline int generic_is_mmio(const volatile void __iomem *a)
199{
200 return alpha_mv.mv_is_mmio(a);
201}
202
203#define __IO_PREFIX generic
204#define generic_trivial_rw_bw 0
205#define generic_trivial_rw_lq 0
206#define generic_trivial_io_bw 0
207#define generic_trivial_io_lq 0
208#define generic_trivial_iounmap 0
209
210#else
211
212#if defined(CONFIG_ALPHA_APECS)
213# include <asm/core_apecs.h>
214#elif defined(CONFIG_ALPHA_CIA)
215# include <asm/core_cia.h>
216#elif defined(CONFIG_ALPHA_IRONGATE)
217# include <asm/core_irongate.h>
218#elif defined(CONFIG_ALPHA_JENSEN)
219# include <asm/jensen.h>
220#elif defined(CONFIG_ALPHA_LCA)
221# include <asm/core_lca.h>
222#elif defined(CONFIG_ALPHA_MARVEL)
223# include <asm/core_marvel.h>
224#elif defined(CONFIG_ALPHA_MCPCIA)
225# include <asm/core_mcpcia.h>
226#elif defined(CONFIG_ALPHA_POLARIS)
227# include <asm/core_polaris.h>
228#elif defined(CONFIG_ALPHA_T2)
229# include <asm/core_t2.h>
230#elif defined(CONFIG_ALPHA_TSUNAMI)
231# include <asm/core_tsunami.h>
232#elif defined(CONFIG_ALPHA_TITAN)
233# include <asm/core_titan.h>
234#elif defined(CONFIG_ALPHA_WILDFIRE)
235# include <asm/core_wildfire.h>
236#else
237#error "What system is this?"
238#endif
239
240#endif /* GENERIC */
241
242/*
243 * We always have external versions of these routines.
244 */
245extern u8 inb(unsigned long port);
246extern u16 inw(unsigned long port);
247extern u32 inl(unsigned long port);
248extern void outb(u8 b, unsigned long port);
249extern void outw(u16 b, unsigned long port);
250extern void outl(u32 b, unsigned long port);
251
252extern u8 readb(const volatile void __iomem *addr);
253extern u16 readw(const volatile void __iomem *addr);
254extern u32 readl(const volatile void __iomem *addr);
255extern u64 readq(const volatile void __iomem *addr);
256extern void writeb(u8 b, volatile void __iomem *addr);
257extern void writew(u16 b, volatile void __iomem *addr);
258extern void writel(u32 b, volatile void __iomem *addr);
259extern void writeq(u64 b, volatile void __iomem *addr);
260
261extern u8 __raw_readb(const volatile void __iomem *addr);
262extern u16 __raw_readw(const volatile void __iomem *addr);
263extern u32 __raw_readl(const volatile void __iomem *addr);
264extern u64 __raw_readq(const volatile void __iomem *addr);
265extern void __raw_writeb(u8 b, volatile void __iomem *addr);
266extern void __raw_writew(u16 b, volatile void __iomem *addr);
267extern void __raw_writel(u32 b, volatile void __iomem *addr);
268extern void __raw_writeq(u64 b, volatile void __iomem *addr);
269
270/*
271 * Mapping from port numbers to __iomem space is pretty easy.
272 */
273
274/* These two have to be extern inline because of the extern prototype from
275 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
276 the same declaration. */
277extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
278{
279 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
280}
281
282extern inline void ioport_unmap(void __iomem *addr)
283{
284}
285
286static inline void __iomem *ioremap(unsigned long port, unsigned long size)
287{
288 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
289}
290
291static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
292 unsigned long flags)
293{
294 return ioremap(port, size);
295}
296
297static inline void __iomem * ioremap_nocache(unsigned long offset,
298 unsigned long size)
299{
300 return ioremap(offset, size);
301}
302
303static inline void iounmap(volatile void __iomem *addr)
304{
305 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
306}
307
308static inline int __is_ioaddr(unsigned long addr)
309{
310 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
311}
312#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
313
314static inline int __is_mmio(const volatile void __iomem *addr)
315{
316 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
317}
318
319
320/*
321 * If the actual I/O bits are sufficiently trivial, then expand inline.
322 */
323
324#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
325extern inline unsigned int ioread8(void __iomem *addr)
326{
327 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
328 mb();
329 return ret;
330}
331
332extern inline unsigned int ioread16(void __iomem *addr)
333{
334 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
335 mb();
336 return ret;
337}
338
339extern inline void iowrite8(u8 b, void __iomem *addr)
340{
341 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
342 mb();
343}
344
345extern inline void iowrite16(u16 b, void __iomem *addr)
346{
347 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
348 mb();
349}
350
351extern inline u8 inb(unsigned long port)
352{
353 return ioread8(ioport_map(port, 1));
354}
355
356extern inline u16 inw(unsigned long port)
357{
358 return ioread16(ioport_map(port, 2));
359}
360
361extern inline void outb(u8 b, unsigned long port)
362{
363 iowrite8(b, ioport_map(port, 1));
364}
365
366extern inline void outw(u16 b, unsigned long port)
367{
368 iowrite16(b, ioport_map(port, 2));
369}
370#endif
371
372#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
373extern inline unsigned int ioread32(void __iomem *addr)
374{
375 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
376 mb();
377 return ret;
378}
379
380extern inline void iowrite32(u32 b, void __iomem *addr)
381{
382 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
383 mb();
384}
385
386extern inline u32 inl(unsigned long port)
387{
388 return ioread32(ioport_map(port, 4));
389}
390
391extern inline void outl(u32 b, unsigned long port)
392{
393 iowrite32(b, ioport_map(port, 4));
394}
395#endif
396
397#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
398extern inline u8 __raw_readb(const volatile void __iomem *addr)
399{
400 return IO_CONCAT(__IO_PREFIX,readb)(addr);
401}
402
403extern inline u16 __raw_readw(const volatile void __iomem *addr)
404{
405 return IO_CONCAT(__IO_PREFIX,readw)(addr);
406}
407
408extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
409{
410 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
411}
412
413extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
414{
415 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
416}
417
418extern inline u8 readb(const volatile void __iomem *addr)
419{
420 u8 ret = __raw_readb(addr);
421 mb();
422 return ret;
423}
424
425extern inline u16 readw(const volatile void __iomem *addr)
426{
427 u16 ret = __raw_readw(addr);
428 mb();
429 return ret;
430}
431
432extern inline void writeb(u8 b, volatile void __iomem *addr)
433{
434 __raw_writeb(b, addr);
435 mb();
436}
437
438extern inline void writew(u16 b, volatile void __iomem *addr)
439{
440 __raw_writew(b, addr);
441 mb();
442}
443#endif
444
445#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
446extern inline u32 __raw_readl(const volatile void __iomem *addr)
447{
448 return IO_CONCAT(__IO_PREFIX,readl)(addr);
449}
450
451extern inline u64 __raw_readq(const volatile void __iomem *addr)
452{
453 return IO_CONCAT(__IO_PREFIX,readq)(addr);
454}
455
456extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
457{
458 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
459}
460
461extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
462{
463 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
464}
465
466extern inline u32 readl(const volatile void __iomem *addr)
467{
468 u32 ret = __raw_readl(addr);
469 mb();
470 return ret;
471}
472
473extern inline u64 readq(const volatile void __iomem *addr)
474{
475 u64 ret = __raw_readq(addr);
476 mb();
477 return ret;
478}
479
480extern inline void writel(u32 b, volatile void __iomem *addr)
481{
482 __raw_writel(b, addr);
483 mb();
484}
485
486extern inline void writeq(u64 b, volatile void __iomem *addr)
487{
488 __raw_writeq(b, addr);
489 mb();
490}
491#endif
492
493#define inb_p inb
494#define inw_p inw
495#define inl_p inl
496#define outb_p outb
497#define outw_p outw
498#define outl_p outl
499#define readb_relaxed(addr) __raw_readb(addr)
500#define readw_relaxed(addr) __raw_readw(addr)
501#define readl_relaxed(addr) __raw_readl(addr)
502#define readq_relaxed(addr) __raw_readq(addr)
503
504#define mmiowb()
505
506/*
507 * String version of IO memory access ops:
508 */
509extern void memcpy_fromio(void *, const volatile void __iomem *, long);
510extern void memcpy_toio(volatile void __iomem *, const void *, long);
511extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
512
513static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
514{
515 _memset_c_io(addr, 0x0101010101010101UL * c, len);
516}
517
518#define __HAVE_ARCH_MEMSETW_IO
519static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
520{
521 _memset_c_io(addr, 0x0001000100010001UL * c, len);
522}
523
524/*
525 * String versions of in/out ops:
526 */
527extern void insb (unsigned long port, void *dst, unsigned long count);
528extern void insw (unsigned long port, void *dst, unsigned long count);
529extern void insl (unsigned long port, void *dst, unsigned long count);
530extern void outsb (unsigned long port, const void *src, unsigned long count);
531extern void outsw (unsigned long port, const void *src, unsigned long count);
532extern void outsl (unsigned long port, const void *src, unsigned long count);
533
534/*
535 * The Alpha Jensen hardware for some rather strange reason puts
536 * the RTC clock at 0x170 instead of 0x70. Probably due to some
537 * misguided idea about using 0x70 for NMI stuff.
538 *
539 * These defines will override the defaults when doing RTC queries
540 */
541
542#ifdef CONFIG_ALPHA_GENERIC
543# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
544#else
545# ifdef CONFIG_ALPHA_JENSEN
546# define RTC_PORT(x) (0x170+(x))
547# else
548# define RTC_PORT(x) (0x70 + (x))
549# endif
550#endif
551#define RTC_ALWAYS_BCD 0
552
553/*
554 * Some mucking forons use if[n]def writeq to check if platform has it.
555 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
556 * to play with; for now just use cpp anti-recursion logics and make sure
557 * that damn thing is defined and expands to itself.
558 */
559
560#define writeq writeq
561#define readq readq
562
563/*
564 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
565 * access
566 */
567#define xlate_dev_mem_ptr(p) __va(p)
568
569/*
570 * Convert a virtual cached pointer to an uncached pointer
571 */
572#define xlate_dev_kmem_ptr(p) p
573
574#endif /* __KERNEL__ */
575
576#endif /* __ALPHA_IO_H */
1#ifndef __ALPHA_IO_H
2#define __ALPHA_IO_H
3
4#ifdef __KERNEL__
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <asm/compiler.h>
9#include <asm/pgtable.h>
10#include <asm/machvec.h>
11#include <asm/hwrpb.h>
12
13/* The generic header contains only prototypes. Including it ensures that
14 the implementation we have here matches that interface. */
15#include <asm-generic/iomap.h>
16
17/* We don't use IO slowdowns on the Alpha, but.. */
18#define __SLOW_DOWN_IO do { } while (0)
19#define SLOW_DOWN_IO do { } while (0)
20
21/*
22 * Virtual -> physical identity mapping starts at this offset
23 */
24#ifdef USE_48_BIT_KSEG
25#define IDENT_ADDR 0xffff800000000000UL
26#else
27#define IDENT_ADDR 0xfffffc0000000000UL
28#endif
29
30/*
31 * We try to avoid hae updates (thus the cache), but when we
32 * do need to update the hae, we need to do it atomically, so
33 * that any interrupts wouldn't get confused with the hae
34 * register not being up-to-date with respect to the hardware
35 * value.
36 */
37extern inline void __set_hae(unsigned long new_hae)
38{
39 unsigned long flags = swpipl(IPL_MAX);
40
41 barrier();
42
43 alpha_mv.hae_cache = new_hae;
44 *alpha_mv.hae_register = new_hae;
45 mb();
46 /* Re-read to make sure it was written. */
47 new_hae = *alpha_mv.hae_register;
48
49 setipl(flags);
50 barrier();
51}
52
53extern inline void set_hae(unsigned long new_hae)
54{
55 if (new_hae != alpha_mv.hae_cache)
56 __set_hae(new_hae);
57}
58
59/*
60 * Change virtual addresses to physical addresses and vv.
61 */
62#ifdef USE_48_BIT_KSEG
63static inline unsigned long virt_to_phys(void *address)
64{
65 return (unsigned long)address - IDENT_ADDR;
66}
67
68static inline void * phys_to_virt(unsigned long address)
69{
70 return (void *) (address + IDENT_ADDR);
71}
72#else
73static inline unsigned long virt_to_phys(void *address)
74{
75 unsigned long phys = (unsigned long)address;
76
77 /* Sign-extend from bit 41. */
78 phys <<= (64 - 41);
79 phys = (long)phys >> (64 - 41);
80
81 /* Crop to the physical address width of the processor. */
82 phys &= (1ul << hwrpb->pa_bits) - 1;
83
84 return phys;
85}
86
87static inline void * phys_to_virt(unsigned long address)
88{
89 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
90}
91#endif
92
93#define page_to_phys(page) page_to_pa(page)
94
95static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
96{
97 return page_to_phys(page);
98}
99
100/* Maximum PIO space address supported? */
101#define IO_SPACE_LIMIT 0xffff
102
103/*
104 * Change addresses as seen by the kernel (virtual) to addresses as
105 * seen by a device (bus), and vice versa.
106 *
107 * Note that this only works for a limited range of kernel addresses,
108 * and very well may not span all memory. Consider this interface
109 * deprecated in favour of the DMA-mapping API.
110 */
111extern unsigned long __direct_map_base;
112extern unsigned long __direct_map_size;
113
114static inline unsigned long __deprecated virt_to_bus(void *address)
115{
116 unsigned long phys = virt_to_phys(address);
117 unsigned long bus = phys + __direct_map_base;
118 return phys <= __direct_map_size ? bus : 0;
119}
120#define isa_virt_to_bus virt_to_bus
121
122static inline void * __deprecated bus_to_virt(unsigned long address)
123{
124 void *virt;
125
126 /* This check is a sanity check but also ensures that bus address 0
127 maps to virtual address 0 which is useful to detect null pointers
128 (the NCR driver is much simpler if NULL pointers are preserved). */
129 address -= __direct_map_base;
130 virt = phys_to_virt(address);
131 return (long)address <= 0 ? NULL : virt;
132}
133#define isa_bus_to_virt bus_to_virt
134
135/*
136 * There are different chipsets to interface the Alpha CPUs to the world.
137 */
138
139#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
140#define _IO_CONCAT(a,b) a ## _ ## b
141
142#ifdef CONFIG_ALPHA_GENERIC
143
144/* In a generic kernel, we always go through the machine vector. */
145
146#define REMAP1(TYPE, NAME, QUAL) \
147static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
148{ \
149 return alpha_mv.mv_##NAME(addr); \
150}
151
152#define REMAP2(TYPE, NAME, QUAL) \
153static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
154{ \
155 alpha_mv.mv_##NAME(b, addr); \
156}
157
158REMAP1(unsigned int, ioread8, /**/)
159REMAP1(unsigned int, ioread16, /**/)
160REMAP1(unsigned int, ioread32, /**/)
161REMAP1(u8, readb, const volatile)
162REMAP1(u16, readw, const volatile)
163REMAP1(u32, readl, const volatile)
164REMAP1(u64, readq, const volatile)
165
166REMAP2(u8, iowrite8, /**/)
167REMAP2(u16, iowrite16, /**/)
168REMAP2(u32, iowrite32, /**/)
169REMAP2(u8, writeb, volatile)
170REMAP2(u16, writew, volatile)
171REMAP2(u32, writel, volatile)
172REMAP2(u64, writeq, volatile)
173
174#undef REMAP1
175#undef REMAP2
176
177extern inline void __iomem *generic_ioportmap(unsigned long a)
178{
179 return alpha_mv.mv_ioportmap(a);
180}
181
182static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
183{
184 return alpha_mv.mv_ioremap(a, s);
185}
186
187static inline void generic_iounmap(volatile void __iomem *a)
188{
189 return alpha_mv.mv_iounmap(a);
190}
191
192static inline int generic_is_ioaddr(unsigned long a)
193{
194 return alpha_mv.mv_is_ioaddr(a);
195}
196
197static inline int generic_is_mmio(const volatile void __iomem *a)
198{
199 return alpha_mv.mv_is_mmio(a);
200}
201
202#define __IO_PREFIX generic
203#define generic_trivial_rw_bw 0
204#define generic_trivial_rw_lq 0
205#define generic_trivial_io_bw 0
206#define generic_trivial_io_lq 0
207#define generic_trivial_iounmap 0
208
209#else
210
211#if defined(CONFIG_ALPHA_APECS)
212# include <asm/core_apecs.h>
213#elif defined(CONFIG_ALPHA_CIA)
214# include <asm/core_cia.h>
215#elif defined(CONFIG_ALPHA_IRONGATE)
216# include <asm/core_irongate.h>
217#elif defined(CONFIG_ALPHA_JENSEN)
218# include <asm/jensen.h>
219#elif defined(CONFIG_ALPHA_LCA)
220# include <asm/core_lca.h>
221#elif defined(CONFIG_ALPHA_MARVEL)
222# include <asm/core_marvel.h>
223#elif defined(CONFIG_ALPHA_MCPCIA)
224# include <asm/core_mcpcia.h>
225#elif defined(CONFIG_ALPHA_POLARIS)
226# include <asm/core_polaris.h>
227#elif defined(CONFIG_ALPHA_T2)
228# include <asm/core_t2.h>
229#elif defined(CONFIG_ALPHA_TSUNAMI)
230# include <asm/core_tsunami.h>
231#elif defined(CONFIG_ALPHA_TITAN)
232# include <asm/core_titan.h>
233#elif defined(CONFIG_ALPHA_WILDFIRE)
234# include <asm/core_wildfire.h>
235#else
236#error "What system is this?"
237#endif
238
239#endif /* GENERIC */
240
241/*
242 * We always have external versions of these routines.
243 */
244extern u8 inb(unsigned long port);
245extern u16 inw(unsigned long port);
246extern u32 inl(unsigned long port);
247extern void outb(u8 b, unsigned long port);
248extern void outw(u16 b, unsigned long port);
249extern void outl(u32 b, unsigned long port);
250
251extern u8 readb(const volatile void __iomem *addr);
252extern u16 readw(const volatile void __iomem *addr);
253extern u32 readl(const volatile void __iomem *addr);
254extern u64 readq(const volatile void __iomem *addr);
255extern void writeb(u8 b, volatile void __iomem *addr);
256extern void writew(u16 b, volatile void __iomem *addr);
257extern void writel(u32 b, volatile void __iomem *addr);
258extern void writeq(u64 b, volatile void __iomem *addr);
259
260extern u8 __raw_readb(const volatile void __iomem *addr);
261extern u16 __raw_readw(const volatile void __iomem *addr);
262extern u32 __raw_readl(const volatile void __iomem *addr);
263extern u64 __raw_readq(const volatile void __iomem *addr);
264extern void __raw_writeb(u8 b, volatile void __iomem *addr);
265extern void __raw_writew(u16 b, volatile void __iomem *addr);
266extern void __raw_writel(u32 b, volatile void __iomem *addr);
267extern void __raw_writeq(u64 b, volatile void __iomem *addr);
268
269/*
270 * Mapping from port numbers to __iomem space is pretty easy.
271 */
272
273/* These two have to be extern inline because of the extern prototype from
274 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
275 the same declaration. */
276extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
277{
278 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
279}
280
281extern inline void ioport_unmap(void __iomem *addr)
282{
283}
284
285static inline void __iomem *ioremap(unsigned long port, unsigned long size)
286{
287 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
288}
289
290static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
291 unsigned long flags)
292{
293 return ioremap(port, size);
294}
295
296static inline void __iomem * ioremap_nocache(unsigned long offset,
297 unsigned long size)
298{
299 return ioremap(offset, size);
300}
301
302static inline void iounmap(volatile void __iomem *addr)
303{
304 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
305}
306
307static inline int __is_ioaddr(unsigned long addr)
308{
309 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
310}
311#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
312
313static inline int __is_mmio(const volatile void __iomem *addr)
314{
315 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
316}
317
318
319/*
320 * If the actual I/O bits are sufficiently trivial, then expand inline.
321 */
322
323#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
324extern inline unsigned int ioread8(void __iomem *addr)
325{
326 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
327 mb();
328 return ret;
329}
330
331extern inline unsigned int ioread16(void __iomem *addr)
332{
333 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
334 mb();
335 return ret;
336}
337
338extern inline void iowrite8(u8 b, void __iomem *addr)
339{
340 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
341 mb();
342}
343
344extern inline void iowrite16(u16 b, void __iomem *addr)
345{
346 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
347 mb();
348}
349
350extern inline u8 inb(unsigned long port)
351{
352 return ioread8(ioport_map(port, 1));
353}
354
355extern inline u16 inw(unsigned long port)
356{
357 return ioread16(ioport_map(port, 2));
358}
359
360extern inline void outb(u8 b, unsigned long port)
361{
362 iowrite8(b, ioport_map(port, 1));
363}
364
365extern inline void outw(u16 b, unsigned long port)
366{
367 iowrite16(b, ioport_map(port, 2));
368}
369#endif
370
371#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
372extern inline unsigned int ioread32(void __iomem *addr)
373{
374 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
375 mb();
376 return ret;
377}
378
379extern inline void iowrite32(u32 b, void __iomem *addr)
380{
381 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
382 mb();
383}
384
385extern inline u32 inl(unsigned long port)
386{
387 return ioread32(ioport_map(port, 4));
388}
389
390extern inline void outl(u32 b, unsigned long port)
391{
392 iowrite32(b, ioport_map(port, 4));
393}
394#endif
395
396#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
397extern inline u8 __raw_readb(const volatile void __iomem *addr)
398{
399 return IO_CONCAT(__IO_PREFIX,readb)(addr);
400}
401
402extern inline u16 __raw_readw(const volatile void __iomem *addr)
403{
404 return IO_CONCAT(__IO_PREFIX,readw)(addr);
405}
406
407extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
408{
409 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
410}
411
412extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
413{
414 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
415}
416
417extern inline u8 readb(const volatile void __iomem *addr)
418{
419 u8 ret = __raw_readb(addr);
420 mb();
421 return ret;
422}
423
424extern inline u16 readw(const volatile void __iomem *addr)
425{
426 u16 ret = __raw_readw(addr);
427 mb();
428 return ret;
429}
430
431extern inline void writeb(u8 b, volatile void __iomem *addr)
432{
433 __raw_writeb(b, addr);
434 mb();
435}
436
437extern inline void writew(u16 b, volatile void __iomem *addr)
438{
439 __raw_writew(b, addr);
440 mb();
441}
442#endif
443
444#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
445extern inline u32 __raw_readl(const volatile void __iomem *addr)
446{
447 return IO_CONCAT(__IO_PREFIX,readl)(addr);
448}
449
450extern inline u64 __raw_readq(const volatile void __iomem *addr)
451{
452 return IO_CONCAT(__IO_PREFIX,readq)(addr);
453}
454
455extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
456{
457 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
458}
459
460extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
461{
462 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
463}
464
465extern inline u32 readl(const volatile void __iomem *addr)
466{
467 u32 ret = __raw_readl(addr);
468 mb();
469 return ret;
470}
471
472extern inline u64 readq(const volatile void __iomem *addr)
473{
474 u64 ret = __raw_readq(addr);
475 mb();
476 return ret;
477}
478
479extern inline void writel(u32 b, volatile void __iomem *addr)
480{
481 __raw_writel(b, addr);
482 mb();
483}
484
485extern inline void writeq(u64 b, volatile void __iomem *addr)
486{
487 __raw_writeq(b, addr);
488 mb();
489}
490#endif
491
492#define ioread16be(p) be16_to_cpu(ioread16(p))
493#define ioread32be(p) be32_to_cpu(ioread32(p))
494#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
495#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
496
497#define inb_p inb
498#define inw_p inw
499#define inl_p inl
500#define outb_p outb
501#define outw_p outw
502#define outl_p outl
503#define readb_relaxed(addr) __raw_readb(addr)
504#define readw_relaxed(addr) __raw_readw(addr)
505#define readl_relaxed(addr) __raw_readl(addr)
506#define readq_relaxed(addr) __raw_readq(addr)
507
508#define mmiowb()
509
510/*
511 * String version of IO memory access ops:
512 */
513extern void memcpy_fromio(void *, const volatile void __iomem *, long);
514extern void memcpy_toio(volatile void __iomem *, const void *, long);
515extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
516
517static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
518{
519 _memset_c_io(addr, 0x0101010101010101UL * c, len);
520}
521
522#define __HAVE_ARCH_MEMSETW_IO
523static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
524{
525 _memset_c_io(addr, 0x0001000100010001UL * c, len);
526}
527
528/*
529 * String versions of in/out ops:
530 */
531extern void insb (unsigned long port, void *dst, unsigned long count);
532extern void insw (unsigned long port, void *dst, unsigned long count);
533extern void insl (unsigned long port, void *dst, unsigned long count);
534extern void outsb (unsigned long port, const void *src, unsigned long count);
535extern void outsw (unsigned long port, const void *src, unsigned long count);
536extern void outsl (unsigned long port, const void *src, unsigned long count);
537
538/*
539 * The Alpha Jensen hardware for some rather strange reason puts
540 * the RTC clock at 0x170 instead of 0x70. Probably due to some
541 * misguided idea about using 0x70 for NMI stuff.
542 *
543 * These defines will override the defaults when doing RTC queries
544 */
545
546#ifdef CONFIG_ALPHA_GENERIC
547# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
548#else
549# ifdef CONFIG_ALPHA_JENSEN
550# define RTC_PORT(x) (0x170+(x))
551# else
552# define RTC_PORT(x) (0x70 + (x))
553# endif
554#endif
555#define RTC_ALWAYS_BCD 0
556
557/*
558 * Some mucking forons use if[n]def writeq to check if platform has it.
559 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
560 * to play with; for now just use cpp anti-recursion logics and make sure
561 * that damn thing is defined and expands to itself.
562 */
563
564#define writeq writeq
565#define readq readq
566
567/*
568 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
569 * access
570 */
571#define xlate_dev_mem_ptr(p) __va(p)
572
573/*
574 * Convert a virtual cached pointer to an uncached pointer
575 */
576#define xlate_dev_kmem_ptr(p) p
577
578#endif /* __KERNEL__ */
579
580#endif /* __ALPHA_IO_H */