Loading...
1#ifndef __ALPHA_IO_H
2#define __ALPHA_IO_H
3
4#ifdef __KERNEL__
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <asm/compiler.h>
9#include <asm/system.h>
10#include <asm/pgtable.h>
11#include <asm/machvec.h>
12#include <asm/hwrpb.h>
13
14/* The generic header contains only prototypes. Including it ensures that
15 the implementation we have here matches that interface. */
16#include <asm-generic/iomap.h>
17
18/* We don't use IO slowdowns on the Alpha, but.. */
19#define __SLOW_DOWN_IO do { } while (0)
20#define SLOW_DOWN_IO do { } while (0)
21
22/*
23 * Virtual -> physical identity mapping starts at this offset
24 */
25#ifdef USE_48_BIT_KSEG
26#define IDENT_ADDR 0xffff800000000000UL
27#else
28#define IDENT_ADDR 0xfffffc0000000000UL
29#endif
30
31/*
32 * We try to avoid hae updates (thus the cache), but when we
33 * do need to update the hae, we need to do it atomically, so
34 * that any interrupts wouldn't get confused with the hae
35 * register not being up-to-date with respect to the hardware
36 * value.
37 */
38extern inline void __set_hae(unsigned long new_hae)
39{
40 unsigned long flags = swpipl(IPL_MAX);
41
42 barrier();
43
44 alpha_mv.hae_cache = new_hae;
45 *alpha_mv.hae_register = new_hae;
46 mb();
47 /* Re-read to make sure it was written. */
48 new_hae = *alpha_mv.hae_register;
49
50 setipl(flags);
51 barrier();
52}
53
54extern inline void set_hae(unsigned long new_hae)
55{
56 if (new_hae != alpha_mv.hae_cache)
57 __set_hae(new_hae);
58}
59
60/*
61 * Change virtual addresses to physical addresses and vv.
62 */
63#ifdef USE_48_BIT_KSEG
64static inline unsigned long virt_to_phys(void *address)
65{
66 return (unsigned long)address - IDENT_ADDR;
67}
68
69static inline void * phys_to_virt(unsigned long address)
70{
71 return (void *) (address + IDENT_ADDR);
72}
73#else
74static inline unsigned long virt_to_phys(void *address)
75{
76 unsigned long phys = (unsigned long)address;
77
78 /* Sign-extend from bit 41. */
79 phys <<= (64 - 41);
80 phys = (long)phys >> (64 - 41);
81
82 /* Crop to the physical address width of the processor. */
83 phys &= (1ul << hwrpb->pa_bits) - 1;
84
85 return phys;
86}
87
88static inline void * phys_to_virt(unsigned long address)
89{
90 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
91}
92#endif
93
94#define page_to_phys(page) page_to_pa(page)
95
96static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
97{
98 return page_to_phys(page);
99}
100
101/* Maximum PIO space address supported? */
102#define IO_SPACE_LIMIT 0xffff
103
104/*
105 * Change addresses as seen by the kernel (virtual) to addresses as
106 * seen by a device (bus), and vice versa.
107 *
108 * Note that this only works for a limited range of kernel addresses,
109 * and very well may not span all memory. Consider this interface
110 * deprecated in favour of the DMA-mapping API.
111 */
112extern unsigned long __direct_map_base;
113extern unsigned long __direct_map_size;
114
115static inline unsigned long __deprecated virt_to_bus(void *address)
116{
117 unsigned long phys = virt_to_phys(address);
118 unsigned long bus = phys + __direct_map_base;
119 return phys <= __direct_map_size ? bus : 0;
120}
121#define isa_virt_to_bus virt_to_bus
122
123static inline void * __deprecated bus_to_virt(unsigned long address)
124{
125 void *virt;
126
127 /* This check is a sanity check but also ensures that bus address 0
128 maps to virtual address 0 which is useful to detect null pointers
129 (the NCR driver is much simpler if NULL pointers are preserved). */
130 address -= __direct_map_base;
131 virt = phys_to_virt(address);
132 return (long)address <= 0 ? NULL : virt;
133}
134#define isa_bus_to_virt bus_to_virt
135
136/*
137 * There are different chipsets to interface the Alpha CPUs to the world.
138 */
139
140#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
141#define _IO_CONCAT(a,b) a ## _ ## b
142
143#ifdef CONFIG_ALPHA_GENERIC
144
145/* In a generic kernel, we always go through the machine vector. */
146
147#define REMAP1(TYPE, NAME, QUAL) \
148static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
149{ \
150 return alpha_mv.mv_##NAME(addr); \
151}
152
153#define REMAP2(TYPE, NAME, QUAL) \
154static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
155{ \
156 alpha_mv.mv_##NAME(b, addr); \
157}
158
159REMAP1(unsigned int, ioread8, /**/)
160REMAP1(unsigned int, ioread16, /**/)
161REMAP1(unsigned int, ioread32, /**/)
162REMAP1(u8, readb, const volatile)
163REMAP1(u16, readw, const volatile)
164REMAP1(u32, readl, const volatile)
165REMAP1(u64, readq, const volatile)
166
167REMAP2(u8, iowrite8, /**/)
168REMAP2(u16, iowrite16, /**/)
169REMAP2(u32, iowrite32, /**/)
170REMAP2(u8, writeb, volatile)
171REMAP2(u16, writew, volatile)
172REMAP2(u32, writel, volatile)
173REMAP2(u64, writeq, volatile)
174
175#undef REMAP1
176#undef REMAP2
177
178extern inline void __iomem *generic_ioportmap(unsigned long a)
179{
180 return alpha_mv.mv_ioportmap(a);
181}
182
183static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
184{
185 return alpha_mv.mv_ioremap(a, s);
186}
187
188static inline void generic_iounmap(volatile void __iomem *a)
189{
190 return alpha_mv.mv_iounmap(a);
191}
192
193static inline int generic_is_ioaddr(unsigned long a)
194{
195 return alpha_mv.mv_is_ioaddr(a);
196}
197
198static inline int generic_is_mmio(const volatile void __iomem *a)
199{
200 return alpha_mv.mv_is_mmio(a);
201}
202
203#define __IO_PREFIX generic
204#define generic_trivial_rw_bw 0
205#define generic_trivial_rw_lq 0
206#define generic_trivial_io_bw 0
207#define generic_trivial_io_lq 0
208#define generic_trivial_iounmap 0
209
210#else
211
212#if defined(CONFIG_ALPHA_APECS)
213# include <asm/core_apecs.h>
214#elif defined(CONFIG_ALPHA_CIA)
215# include <asm/core_cia.h>
216#elif defined(CONFIG_ALPHA_IRONGATE)
217# include <asm/core_irongate.h>
218#elif defined(CONFIG_ALPHA_JENSEN)
219# include <asm/jensen.h>
220#elif defined(CONFIG_ALPHA_LCA)
221# include <asm/core_lca.h>
222#elif defined(CONFIG_ALPHA_MARVEL)
223# include <asm/core_marvel.h>
224#elif defined(CONFIG_ALPHA_MCPCIA)
225# include <asm/core_mcpcia.h>
226#elif defined(CONFIG_ALPHA_POLARIS)
227# include <asm/core_polaris.h>
228#elif defined(CONFIG_ALPHA_T2)
229# include <asm/core_t2.h>
230#elif defined(CONFIG_ALPHA_TSUNAMI)
231# include <asm/core_tsunami.h>
232#elif defined(CONFIG_ALPHA_TITAN)
233# include <asm/core_titan.h>
234#elif defined(CONFIG_ALPHA_WILDFIRE)
235# include <asm/core_wildfire.h>
236#else
237#error "What system is this?"
238#endif
239
240#endif /* GENERIC */
241
242/*
243 * We always have external versions of these routines.
244 */
245extern u8 inb(unsigned long port);
246extern u16 inw(unsigned long port);
247extern u32 inl(unsigned long port);
248extern void outb(u8 b, unsigned long port);
249extern void outw(u16 b, unsigned long port);
250extern void outl(u32 b, unsigned long port);
251
252extern u8 readb(const volatile void __iomem *addr);
253extern u16 readw(const volatile void __iomem *addr);
254extern u32 readl(const volatile void __iomem *addr);
255extern u64 readq(const volatile void __iomem *addr);
256extern void writeb(u8 b, volatile void __iomem *addr);
257extern void writew(u16 b, volatile void __iomem *addr);
258extern void writel(u32 b, volatile void __iomem *addr);
259extern void writeq(u64 b, volatile void __iomem *addr);
260
261extern u8 __raw_readb(const volatile void __iomem *addr);
262extern u16 __raw_readw(const volatile void __iomem *addr);
263extern u32 __raw_readl(const volatile void __iomem *addr);
264extern u64 __raw_readq(const volatile void __iomem *addr);
265extern void __raw_writeb(u8 b, volatile void __iomem *addr);
266extern void __raw_writew(u16 b, volatile void __iomem *addr);
267extern void __raw_writel(u32 b, volatile void __iomem *addr);
268extern void __raw_writeq(u64 b, volatile void __iomem *addr);
269
270/*
271 * Mapping from port numbers to __iomem space is pretty easy.
272 */
273
274/* These two have to be extern inline because of the extern prototype from
275 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
276 the same declaration. */
277extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
278{
279 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
280}
281
282extern inline void ioport_unmap(void __iomem *addr)
283{
284}
285
286static inline void __iomem *ioremap(unsigned long port, unsigned long size)
287{
288 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
289}
290
291static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
292 unsigned long flags)
293{
294 return ioremap(port, size);
295}
296
297static inline void __iomem * ioremap_nocache(unsigned long offset,
298 unsigned long size)
299{
300 return ioremap(offset, size);
301}
302
303static inline void iounmap(volatile void __iomem *addr)
304{
305 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
306}
307
308static inline int __is_ioaddr(unsigned long addr)
309{
310 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
311}
312#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
313
314static inline int __is_mmio(const volatile void __iomem *addr)
315{
316 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
317}
318
319
320/*
321 * If the actual I/O bits are sufficiently trivial, then expand inline.
322 */
323
324#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
325extern inline unsigned int ioread8(void __iomem *addr)
326{
327 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
328 mb();
329 return ret;
330}
331
332extern inline unsigned int ioread16(void __iomem *addr)
333{
334 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
335 mb();
336 return ret;
337}
338
339extern inline void iowrite8(u8 b, void __iomem *addr)
340{
341 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
342 mb();
343}
344
345extern inline void iowrite16(u16 b, void __iomem *addr)
346{
347 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
348 mb();
349}
350
351extern inline u8 inb(unsigned long port)
352{
353 return ioread8(ioport_map(port, 1));
354}
355
356extern inline u16 inw(unsigned long port)
357{
358 return ioread16(ioport_map(port, 2));
359}
360
361extern inline void outb(u8 b, unsigned long port)
362{
363 iowrite8(b, ioport_map(port, 1));
364}
365
366extern inline void outw(u16 b, unsigned long port)
367{
368 iowrite16(b, ioport_map(port, 2));
369}
370#endif
371
372#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
373extern inline unsigned int ioread32(void __iomem *addr)
374{
375 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
376 mb();
377 return ret;
378}
379
380extern inline void iowrite32(u32 b, void __iomem *addr)
381{
382 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
383 mb();
384}
385
386extern inline u32 inl(unsigned long port)
387{
388 return ioread32(ioport_map(port, 4));
389}
390
391extern inline void outl(u32 b, unsigned long port)
392{
393 iowrite32(b, ioport_map(port, 4));
394}
395#endif
396
397#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
398extern inline u8 __raw_readb(const volatile void __iomem *addr)
399{
400 return IO_CONCAT(__IO_PREFIX,readb)(addr);
401}
402
403extern inline u16 __raw_readw(const volatile void __iomem *addr)
404{
405 return IO_CONCAT(__IO_PREFIX,readw)(addr);
406}
407
408extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
409{
410 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
411}
412
413extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
414{
415 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
416}
417
418extern inline u8 readb(const volatile void __iomem *addr)
419{
420 u8 ret = __raw_readb(addr);
421 mb();
422 return ret;
423}
424
425extern inline u16 readw(const volatile void __iomem *addr)
426{
427 u16 ret = __raw_readw(addr);
428 mb();
429 return ret;
430}
431
432extern inline void writeb(u8 b, volatile void __iomem *addr)
433{
434 __raw_writeb(b, addr);
435 mb();
436}
437
438extern inline void writew(u16 b, volatile void __iomem *addr)
439{
440 __raw_writew(b, addr);
441 mb();
442}
443#endif
444
445#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
446extern inline u32 __raw_readl(const volatile void __iomem *addr)
447{
448 return IO_CONCAT(__IO_PREFIX,readl)(addr);
449}
450
451extern inline u64 __raw_readq(const volatile void __iomem *addr)
452{
453 return IO_CONCAT(__IO_PREFIX,readq)(addr);
454}
455
456extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
457{
458 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
459}
460
461extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
462{
463 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
464}
465
466extern inline u32 readl(const volatile void __iomem *addr)
467{
468 u32 ret = __raw_readl(addr);
469 mb();
470 return ret;
471}
472
473extern inline u64 readq(const volatile void __iomem *addr)
474{
475 u64 ret = __raw_readq(addr);
476 mb();
477 return ret;
478}
479
480extern inline void writel(u32 b, volatile void __iomem *addr)
481{
482 __raw_writel(b, addr);
483 mb();
484}
485
486extern inline void writeq(u64 b, volatile void __iomem *addr)
487{
488 __raw_writeq(b, addr);
489 mb();
490}
491#endif
492
493#define inb_p inb
494#define inw_p inw
495#define inl_p inl
496#define outb_p outb
497#define outw_p outw
498#define outl_p outl
499#define readb_relaxed(addr) __raw_readb(addr)
500#define readw_relaxed(addr) __raw_readw(addr)
501#define readl_relaxed(addr) __raw_readl(addr)
502#define readq_relaxed(addr) __raw_readq(addr)
503
504#define mmiowb()
505
506/*
507 * String version of IO memory access ops:
508 */
509extern void memcpy_fromio(void *, const volatile void __iomem *, long);
510extern void memcpy_toio(volatile void __iomem *, const void *, long);
511extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
512
513static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
514{
515 _memset_c_io(addr, 0x0101010101010101UL * c, len);
516}
517
518#define __HAVE_ARCH_MEMSETW_IO
519static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
520{
521 _memset_c_io(addr, 0x0001000100010001UL * c, len);
522}
523
524/*
525 * String versions of in/out ops:
526 */
527extern void insb (unsigned long port, void *dst, unsigned long count);
528extern void insw (unsigned long port, void *dst, unsigned long count);
529extern void insl (unsigned long port, void *dst, unsigned long count);
530extern void outsb (unsigned long port, const void *src, unsigned long count);
531extern void outsw (unsigned long port, const void *src, unsigned long count);
532extern void outsl (unsigned long port, const void *src, unsigned long count);
533
534/*
535 * The Alpha Jensen hardware for some rather strange reason puts
536 * the RTC clock at 0x170 instead of 0x70. Probably due to some
537 * misguided idea about using 0x70 for NMI stuff.
538 *
539 * These defines will override the defaults when doing RTC queries
540 */
541
542#ifdef CONFIG_ALPHA_GENERIC
543# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
544#else
545# ifdef CONFIG_ALPHA_JENSEN
546# define RTC_PORT(x) (0x170+(x))
547# else
548# define RTC_PORT(x) (0x70 + (x))
549# endif
550#endif
551#define RTC_ALWAYS_BCD 0
552
553/*
554 * Some mucking forons use if[n]def writeq to check if platform has it.
555 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
556 * to play with; for now just use cpp anti-recursion logics and make sure
557 * that damn thing is defined and expands to itself.
558 */
559
560#define writeq writeq
561#define readq readq
562
563/*
564 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
565 * access
566 */
567#define xlate_dev_mem_ptr(p) __va(p)
568
569/*
570 * Convert a virtual cached pointer to an uncached pointer
571 */
572#define xlate_dev_kmem_ptr(p) p
573
574#endif /* __KERNEL__ */
575
576#endif /* __ALPHA_IO_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ALPHA_IO_H
3#define __ALPHA_IO_H
4
5#ifdef __KERNEL__
6
7#include <linux/kernel.h>
8#include <linux/mm.h>
9#include <asm/compiler.h>
10#include <asm/machvec.h>
11#include <asm/hwrpb.h>
12
13/* The generic header contains only prototypes. Including it ensures that
14 the implementation we have here matches that interface. */
15#include <asm-generic/iomap.h>
16
17/* We don't use IO slowdowns on the Alpha, but.. */
18#define __SLOW_DOWN_IO do { } while (0)
19#define SLOW_DOWN_IO do { } while (0)
20
21/*
22 * Virtual -> physical identity mapping starts at this offset
23 */
24#ifdef USE_48_BIT_KSEG
25#define IDENT_ADDR 0xffff800000000000UL
26#else
27#define IDENT_ADDR 0xfffffc0000000000UL
28#endif
29
30/*
31 * We try to avoid hae updates (thus the cache), but when we
32 * do need to update the hae, we need to do it atomically, so
33 * that any interrupts wouldn't get confused with the hae
34 * register not being up-to-date with respect to the hardware
35 * value.
36 */
37extern inline void __set_hae(unsigned long new_hae)
38{
39 unsigned long flags = swpipl(IPL_MAX);
40
41 barrier();
42
43 alpha_mv.hae_cache = new_hae;
44 *alpha_mv.hae_register = new_hae;
45 mb();
46 /* Re-read to make sure it was written. */
47 new_hae = *alpha_mv.hae_register;
48
49 setipl(flags);
50 barrier();
51}
52
53extern inline void set_hae(unsigned long new_hae)
54{
55 if (new_hae != alpha_mv.hae_cache)
56 __set_hae(new_hae);
57}
58
59/*
60 * Change virtual addresses to physical addresses and vv.
61 */
62#ifdef USE_48_BIT_KSEG
63static inline unsigned long virt_to_phys(volatile void *address)
64{
65 return (unsigned long)address - IDENT_ADDR;
66}
67
68static inline void * phys_to_virt(unsigned long address)
69{
70 return (void *) (address + IDENT_ADDR);
71}
72#else
73static inline unsigned long virt_to_phys(volatile void *address)
74{
75 unsigned long phys = (unsigned long)address;
76
77 /* Sign-extend from bit 41. */
78 phys <<= (64 - 41);
79 phys = (long)phys >> (64 - 41);
80
81 /* Crop to the physical address width of the processor. */
82 phys &= (1ul << hwrpb->pa_bits) - 1;
83
84 return phys;
85}
86
87static inline void * phys_to_virt(unsigned long address)
88{
89 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
90}
91#endif
92
93#define virt_to_phys virt_to_phys
94#define phys_to_virt phys_to_virt
95#define page_to_phys(page) page_to_pa(page)
96
97/* Maximum PIO space address supported? */
98#define IO_SPACE_LIMIT 0xffff
99
100/*
101 * Change addresses as seen by the kernel (virtual) to addresses as
102 * seen by a device (bus), and vice versa.
103 *
104 * Note that this only works for a limited range of kernel addresses,
105 * and very well may not span all memory. Consider this interface
106 * deprecated in favour of the DMA-mapping API.
107 */
108extern unsigned long __direct_map_base;
109extern unsigned long __direct_map_size;
110
111static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
112{
113 unsigned long phys = virt_to_phys(address);
114 unsigned long bus = phys + __direct_map_base;
115 return phys <= __direct_map_size ? bus : 0;
116}
117#define isa_virt_to_bus isa_virt_to_bus
118
119static inline void * __deprecated isa_bus_to_virt(unsigned long address)
120{
121 void *virt;
122
123 /* This check is a sanity check but also ensures that bus address 0
124 maps to virtual address 0 which is useful to detect null pointers
125 (the NCR driver is much simpler if NULL pointers are preserved). */
126 address -= __direct_map_base;
127 virt = phys_to_virt(address);
128 return (long)address <= 0 ? NULL : virt;
129}
130#define isa_bus_to_virt isa_bus_to_virt
131
132/*
133 * There are different chipsets to interface the Alpha CPUs to the world.
134 */
135
136#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
137#define _IO_CONCAT(a,b) a ## _ ## b
138
139#ifdef CONFIG_ALPHA_GENERIC
140
141/* In a generic kernel, we always go through the machine vector. */
142
143#define REMAP1(TYPE, NAME, QUAL) \
144static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
145{ \
146 return alpha_mv.mv_##NAME(addr); \
147}
148
149#define REMAP2(TYPE, NAME, QUAL) \
150static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
151{ \
152 alpha_mv.mv_##NAME(b, addr); \
153}
154
155REMAP1(unsigned int, ioread8, const)
156REMAP1(unsigned int, ioread16, const)
157REMAP1(unsigned int, ioread32, const)
158REMAP1(u64, ioread64, const)
159REMAP1(u8, readb, const volatile)
160REMAP1(u16, readw, const volatile)
161REMAP1(u32, readl, const volatile)
162REMAP1(u64, readq, const volatile)
163
164REMAP2(u8, iowrite8, /**/)
165REMAP2(u16, iowrite16, /**/)
166REMAP2(u32, iowrite32, /**/)
167REMAP2(u64, iowrite64, /**/)
168REMAP2(u8, writeb, volatile)
169REMAP2(u16, writew, volatile)
170REMAP2(u32, writel, volatile)
171REMAP2(u64, writeq, volatile)
172
173#undef REMAP1
174#undef REMAP2
175
176extern inline void __iomem *generic_ioportmap(unsigned long a)
177{
178 return alpha_mv.mv_ioportmap(a);
179}
180
181static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
182{
183 return alpha_mv.mv_ioremap(a, s);
184}
185
186static inline void generic_iounmap(volatile void __iomem *a)
187{
188 return alpha_mv.mv_iounmap(a);
189}
190
191static inline int generic_is_ioaddr(unsigned long a)
192{
193 return alpha_mv.mv_is_ioaddr(a);
194}
195
196static inline int generic_is_mmio(const volatile void __iomem *a)
197{
198 return alpha_mv.mv_is_mmio(a);
199}
200
201#define __IO_PREFIX generic
202#define generic_trivial_rw_bw 0
203#define generic_trivial_rw_lq 0
204#define generic_trivial_io_bw 0
205#define generic_trivial_io_lq 0
206#define generic_trivial_iounmap 0
207
208#else
209
210#if defined(CONFIG_ALPHA_APECS)
211# include <asm/core_apecs.h>
212#elif defined(CONFIG_ALPHA_CIA)
213# include <asm/core_cia.h>
214#elif defined(CONFIG_ALPHA_IRONGATE)
215# include <asm/core_irongate.h>
216#elif defined(CONFIG_ALPHA_JENSEN)
217# include <asm/jensen.h>
218#elif defined(CONFIG_ALPHA_LCA)
219# include <asm/core_lca.h>
220#elif defined(CONFIG_ALPHA_MARVEL)
221# include <asm/core_marvel.h>
222#elif defined(CONFIG_ALPHA_MCPCIA)
223# include <asm/core_mcpcia.h>
224#elif defined(CONFIG_ALPHA_POLARIS)
225# include <asm/core_polaris.h>
226#elif defined(CONFIG_ALPHA_T2)
227# include <asm/core_t2.h>
228#elif defined(CONFIG_ALPHA_TSUNAMI)
229# include <asm/core_tsunami.h>
230#elif defined(CONFIG_ALPHA_TITAN)
231# include <asm/core_titan.h>
232#elif defined(CONFIG_ALPHA_WILDFIRE)
233# include <asm/core_wildfire.h>
234#else
235#error "What system is this?"
236#endif
237
238#endif /* GENERIC */
239
240/*
241 * We always have external versions of these routines.
242 */
243extern u8 inb(unsigned long port);
244extern u16 inw(unsigned long port);
245extern u32 inl(unsigned long port);
246extern void outb(u8 b, unsigned long port);
247extern void outw(u16 b, unsigned long port);
248extern void outl(u32 b, unsigned long port);
249#define inb inb
250#define inw inw
251#define inl inl
252#define outb outb
253#define outw outw
254#define outl outl
255
256extern u8 readb(const volatile void __iomem *addr);
257extern u16 readw(const volatile void __iomem *addr);
258extern u32 readl(const volatile void __iomem *addr);
259extern u64 readq(const volatile void __iomem *addr);
260extern void writeb(u8 b, volatile void __iomem *addr);
261extern void writew(u16 b, volatile void __iomem *addr);
262extern void writel(u32 b, volatile void __iomem *addr);
263extern void writeq(u64 b, volatile void __iomem *addr);
264#define readb readb
265#define readw readw
266#define readl readl
267#define readq readq
268#define writeb writeb
269#define writew writew
270#define writel writel
271#define writeq writeq
272
273extern u8 __raw_readb(const volatile void __iomem *addr);
274extern u16 __raw_readw(const volatile void __iomem *addr);
275extern u32 __raw_readl(const volatile void __iomem *addr);
276extern u64 __raw_readq(const volatile void __iomem *addr);
277extern void __raw_writeb(u8 b, volatile void __iomem *addr);
278extern void __raw_writew(u16 b, volatile void __iomem *addr);
279extern void __raw_writel(u32 b, volatile void __iomem *addr);
280extern void __raw_writeq(u64 b, volatile void __iomem *addr);
281#define __raw_readb __raw_readb
282#define __raw_readw __raw_readw
283#define __raw_readl __raw_readl
284#define __raw_readq __raw_readq
285#define __raw_writeb __raw_writeb
286#define __raw_writew __raw_writew
287#define __raw_writel __raw_writel
288#define __raw_writeq __raw_writeq
289
290/*
291 * Mapping from port numbers to __iomem space is pretty easy.
292 */
293
294/* These two have to be extern inline because of the extern prototype from
295 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
296 the same declaration. */
297extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
298{
299 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
300}
301
302extern inline void ioport_unmap(void __iomem *addr)
303{
304}
305
306#define ioport_map ioport_map
307#define ioport_unmap ioport_unmap
308
309static inline void __iomem *ioremap(unsigned long port, unsigned long size)
310{
311 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
312}
313
314#define ioremap_wc ioremap
315#define ioremap_uc ioremap
316
317static inline void iounmap(volatile void __iomem *addr)
318{
319 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
320}
321
322static inline int __is_ioaddr(unsigned long addr)
323{
324 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
325}
326#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
327
328static inline int __is_mmio(const volatile void __iomem *addr)
329{
330 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
331}
332
333
334/*
335 * If the actual I/O bits are sufficiently trivial, then expand inline.
336 */
337
338#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
339extern inline unsigned int ioread8(const void __iomem *addr)
340{
341 unsigned int ret;
342 mb();
343 ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
344 mb();
345 return ret;
346}
347
348extern inline unsigned int ioread16(const void __iomem *addr)
349{
350 unsigned int ret;
351 mb();
352 ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
353 mb();
354 return ret;
355}
356
357extern inline void iowrite8(u8 b, void __iomem *addr)
358{
359 mb();
360 IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
361}
362
363extern inline void iowrite16(u16 b, void __iomem *addr)
364{
365 mb();
366 IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
367}
368
369extern inline u8 inb(unsigned long port)
370{
371 return ioread8(ioport_map(port, 1));
372}
373
374extern inline u16 inw(unsigned long port)
375{
376 return ioread16(ioport_map(port, 2));
377}
378
379extern inline void outb(u8 b, unsigned long port)
380{
381 iowrite8(b, ioport_map(port, 1));
382}
383
384extern inline void outw(u16 b, unsigned long port)
385{
386 iowrite16(b, ioport_map(port, 2));
387}
388#endif
389
390#define ioread8 ioread8
391#define ioread16 ioread16
392#define iowrite8 iowrite8
393#define iowrite16 iowrite16
394
395#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
396extern inline unsigned int ioread32(const void __iomem *addr)
397{
398 unsigned int ret;
399 mb();
400 ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
401 mb();
402 return ret;
403}
404
405extern inline u64 ioread64(const void __iomem *addr)
406{
407 unsigned int ret;
408 mb();
409 ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
410 mb();
411 return ret;
412}
413
414extern inline void iowrite32(u32 b, void __iomem *addr)
415{
416 mb();
417 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
418}
419
420extern inline void iowrite64(u64 b, void __iomem *addr)
421{
422 mb();
423 IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr);
424}
425
426extern inline u32 inl(unsigned long port)
427{
428 return ioread32(ioport_map(port, 4));
429}
430
431extern inline void outl(u32 b, unsigned long port)
432{
433 iowrite32(b, ioport_map(port, 4));
434}
435#endif
436
437#define ioread32 ioread32
438#define ioread64 ioread64
439#define iowrite32 iowrite32
440#define iowrite64 iowrite64
441
442#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
443extern inline u8 __raw_readb(const volatile void __iomem *addr)
444{
445 return IO_CONCAT(__IO_PREFIX,readb)(addr);
446}
447
448extern inline u16 __raw_readw(const volatile void __iomem *addr)
449{
450 return IO_CONCAT(__IO_PREFIX,readw)(addr);
451}
452
453extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
454{
455 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
456}
457
458extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
459{
460 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
461}
462
463extern inline u8 readb(const volatile void __iomem *addr)
464{
465 u8 ret;
466 mb();
467 ret = __raw_readb(addr);
468 mb();
469 return ret;
470}
471
472extern inline u16 readw(const volatile void __iomem *addr)
473{
474 u16 ret;
475 mb();
476 ret = __raw_readw(addr);
477 mb();
478 return ret;
479}
480
481extern inline void writeb(u8 b, volatile void __iomem *addr)
482{
483 mb();
484 __raw_writeb(b, addr);
485}
486
487extern inline void writew(u16 b, volatile void __iomem *addr)
488{
489 mb();
490 __raw_writew(b, addr);
491}
492#endif
493
494#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
495extern inline u32 __raw_readl(const volatile void __iomem *addr)
496{
497 return IO_CONCAT(__IO_PREFIX,readl)(addr);
498}
499
500extern inline u64 __raw_readq(const volatile void __iomem *addr)
501{
502 return IO_CONCAT(__IO_PREFIX,readq)(addr);
503}
504
505extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
506{
507 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
508}
509
510extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
511{
512 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
513}
514
515extern inline u32 readl(const volatile void __iomem *addr)
516{
517 u32 ret;
518 mb();
519 ret = __raw_readl(addr);
520 mb();
521 return ret;
522}
523
524extern inline u64 readq(const volatile void __iomem *addr)
525{
526 u64 ret;
527 mb();
528 ret = __raw_readq(addr);
529 mb();
530 return ret;
531}
532
533extern inline void writel(u32 b, volatile void __iomem *addr)
534{
535 mb();
536 __raw_writel(b, addr);
537}
538
539extern inline void writeq(u64 b, volatile void __iomem *addr)
540{
541 mb();
542 __raw_writeq(b, addr);
543}
544#endif
545
546#define ioread16be(p) swab16(ioread16(p))
547#define ioread32be(p) swab32(ioread32(p))
548#define iowrite16be(v,p) iowrite16(swab16(v), (p))
549#define iowrite32be(v,p) iowrite32(swab32(v), (p))
550
551#define inb_p inb
552#define inw_p inw
553#define inl_p inl
554#define outb_p outb
555#define outw_p outw
556#define outl_p outl
557
558extern u8 readb_relaxed(const volatile void __iomem *addr);
559extern u16 readw_relaxed(const volatile void __iomem *addr);
560extern u32 readl_relaxed(const volatile void __iomem *addr);
561extern u64 readq_relaxed(const volatile void __iomem *addr);
562#define readb_relaxed readb_relaxed
563#define readw_relaxed readw_relaxed
564#define readl_relaxed readl_relaxed
565#define readq_relaxed readq_relaxed
566
567#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
568extern inline u8 readb_relaxed(const volatile void __iomem *addr)
569{
570 mb();
571 return __raw_readb(addr);
572}
573
574extern inline u16 readw_relaxed(const volatile void __iomem *addr)
575{
576 mb();
577 return __raw_readw(addr);
578}
579#endif
580
581#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
582extern inline u32 readl_relaxed(const volatile void __iomem *addr)
583{
584 mb();
585 return __raw_readl(addr);
586}
587
588extern inline u64 readq_relaxed(const volatile void __iomem *addr)
589{
590 mb();
591 return __raw_readq(addr);
592}
593#endif
594
595#define writeb_relaxed writeb
596#define writew_relaxed writew
597#define writel_relaxed writel
598#define writeq_relaxed writeq
599
600/*
601 * String version of IO memory access ops:
602 */
603extern void memcpy_fromio(void *, const volatile void __iomem *, long);
604extern void memcpy_toio(volatile void __iomem *, const void *, long);
605extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
606
607static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
608{
609 _memset_c_io(addr, 0x0101010101010101UL * c, len);
610}
611
612#define __HAVE_ARCH_MEMSETW_IO
613static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
614{
615 _memset_c_io(addr, 0x0001000100010001UL * c, len);
616}
617
618#define memset_io memset_io
619#define memcpy_fromio memcpy_fromio
620#define memcpy_toio memcpy_toio
621
622/*
623 * String versions of in/out ops:
624 */
625extern void insb (unsigned long port, void *dst, unsigned long count);
626extern void insw (unsigned long port, void *dst, unsigned long count);
627extern void insl (unsigned long port, void *dst, unsigned long count);
628extern void outsb (unsigned long port, const void *src, unsigned long count);
629extern void outsw (unsigned long port, const void *src, unsigned long count);
630extern void outsl (unsigned long port, const void *src, unsigned long count);
631
632#define insb insb
633#define insw insw
634#define insl insl
635#define outsb outsb
636#define outsw outsw
637#define outsl outsl
638
639/*
640 * The Alpha Jensen hardware for some rather strange reason puts
641 * the RTC clock at 0x170 instead of 0x70. Probably due to some
642 * misguided idea about using 0x70 for NMI stuff.
643 *
644 * These defines will override the defaults when doing RTC queries
645 */
646
647#ifdef CONFIG_ALPHA_GENERIC
648# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
649#else
650# ifdef CONFIG_ALPHA_JENSEN
651# define RTC_PORT(x) (0x170+(x))
652# else
653# define RTC_PORT(x) (0x70 + (x))
654# endif
655#endif
656#define RTC_ALWAYS_BCD 0
657
658/*
659 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
660 * access
661 */
662#define xlate_dev_mem_ptr(p) __va(p)
663
664/*
665 * These get provided from <asm-generic/iomap.h> since alpha does not
666 * select GENERIC_IOMAP.
667 */
668#define ioread64 ioread64
669#define iowrite64 iowrite64
670#define ioread64be ioread64be
671#define iowrite64be iowrite64be
672#define ioread8_rep ioread8_rep
673#define ioread16_rep ioread16_rep
674#define ioread32_rep ioread32_rep
675#define iowrite8_rep iowrite8_rep
676#define iowrite16_rep iowrite16_rep
677#define iowrite32_rep iowrite32_rep
678#define pci_iounmap pci_iounmap
679
680#include <asm-generic/io.h>
681
682#endif /* __KERNEL__ */
683
684#endif /* __ALPHA_IO_H */