Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
11 */
12#ifndef _ASM_IO_H
13#define _ASM_IO_H
14
15#include <linux/compiler.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/irqflags.h>
19
20#include <asm/addrspace.h>
21#include <asm/bug.h>
22#include <asm/byteorder.h>
23#include <asm/cpu.h>
24#include <asm/cpu-features.h>
25#include <asm-generic/iomap.h>
26#include <asm/page.h>
27#include <asm/pgtable-bits.h>
28#include <asm/processor.h>
29#include <asm/string.h>
30
31#include <ioremap.h>
32#include <mangle-port.h>
33
34/*
35 * Slowdown I/O port space accesses for antique hardware.
36 */
37#undef CONF_SLOWDOWN_IO
38
39/*
40 * Raw operations are never swapped in software. OTOH values that raw
41 * operations are working on may or may not have been swapped by the bus
42 * hardware. An example use would be for flash memory that's used for
43 * execute in place.
44 */
45# define __raw_ioswabb(a, x) (x)
46# define __raw_ioswabw(a, x) (x)
47# define __raw_ioswabl(a, x) (x)
48# define __raw_ioswabq(a, x) (x)
49# define ____raw_ioswabq(a, x) (x)
50
51/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
52
53#define IO_SPACE_LIMIT 0xffff
54
55/*
56 * On MIPS I/O ports are memory mapped, so we access them using normal
57 * load/store instructions. mips_io_port_base is the virtual address to
58 * which all ports are being mapped. For sake of efficiency some code
59 * assumes that this is an address that can be loaded with a single lui
60 * instruction, so the lower 16 bits must be zero. Should be true on
61 * on any sane architecture; generic code does not use this assumption.
62 */
63extern const unsigned long mips_io_port_base;
64
65/*
66 * Gcc will generate code to load the value of mips_io_port_base after each
67 * function call which may be fairly wasteful in some cases. So we don't
68 * play quite by the book. We tell gcc mips_io_port_base is a long variable
69 * which solves the code generation issue. Now we need to violate the
70 * aliasing rules a little to make initialization possible and finally we
71 * will need the barrier() to fight side effects of the aliasing chat.
72 * This trickery will eventually collapse under gcc's optimizer. Oh well.
73 */
74static inline void set_io_port_base(unsigned long base)
75{
76 * (unsigned long *) &mips_io_port_base = base;
77 barrier();
78}
79
80/*
81 * Thanks to James van Artsdalen for a better timing-fix than
82 * the two short jumps: using outb's to a nonexistent port seems
83 * to guarantee better timings even on fast machines.
84 *
85 * On the other hand, I'd like to be sure of a non-existent port:
86 * I feel a bit unsafe about using 0x80 (should be safe, though)
87 *
88 * Linus
89 *
90 */
91
92#define __SLOW_DOWN_IO \
93 __asm__ __volatile__( \
94 "sb\t$0,0x80(%0)" \
95 : : "r" (mips_io_port_base));
96
97#ifdef CONF_SLOWDOWN_IO
98#ifdef REALLY_SLOW_IO
99#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
100#else
101#define SLOW_DOWN_IO __SLOW_DOWN_IO
102#endif
103#else
104#define SLOW_DOWN_IO
105#endif
106
107/*
108 * virt_to_phys - map virtual addresses to physical
109 * @address: address to remap
110 *
111 * The returned physical address is the physical (CPU) mapping for
112 * the memory address given. It is only valid to use this function on
113 * addresses directly mapped or allocated via kmalloc.
114 *
115 * This function does not give bus mappings for DMA transfers. In
116 * almost all conceivable cases a device driver should not be using
117 * this function
118 */
119static inline unsigned long virt_to_phys(volatile const void *address)
120{
121 return __pa(address);
122}
123
124/*
125 * phys_to_virt - map physical address to virtual
126 * @address: address to remap
127 *
128 * The returned virtual address is a current CPU mapping for
129 * the memory address given. It is only valid to use this function on
130 * addresses that have a kernel mapping
131 *
132 * This function does not handle bus mappings for DMA transfers. In
133 * almost all conceivable cases a device driver should not be using
134 * this function
135 */
136static inline void * phys_to_virt(unsigned long address)
137{
138 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
139}
140
141/*
142 * ISA I/O bus memory addresses are 1:1 with the physical address.
143 */
144static inline unsigned long isa_virt_to_bus(volatile void * address)
145{
146 return (unsigned long)address - PAGE_OFFSET;
147}
148
149static inline void * isa_bus_to_virt(unsigned long address)
150{
151 return (void *)(address + PAGE_OFFSET);
152}
153
154#define isa_page_to_bus page_to_phys
155
156/*
157 * However PCI ones are not necessarily 1:1 and therefore these interfaces
158 * are forbidden in portable PCI drivers.
159 *
160 * Allow them for x86 for legacy drivers, though.
161 */
162#define virt_to_bus virt_to_phys
163#define bus_to_virt phys_to_virt
164
165/*
166 * Change "struct page" to physical address.
167 */
168#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
169
170extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
171extern void __iounmap(const volatile void __iomem *addr);
172
173#ifndef CONFIG_PCI
174struct pci_dev;
175static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
176#endif
177
178static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
179 unsigned long flags)
180{
181 void __iomem *addr = plat_ioremap(offset, size, flags);
182
183 if (addr)
184 return addr;
185
186#define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
187
188 if (cpu_has_64bit_addresses) {
189 u64 base = UNCAC_BASE;
190
191 /*
192 * R10000 supports a 2 bit uncached attribute therefore
193 * UNCAC_BASE may not equal IO_BASE.
194 */
195 if (flags == _CACHE_UNCACHED)
196 base = (u64) IO_BASE;
197 return (void __iomem *) (unsigned long) (base + offset);
198 } else if (__builtin_constant_p(offset) &&
199 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
200 phys_addr_t phys_addr, last_addr;
201
202 phys_addr = fixup_bigphys_addr(offset, size);
203
204 /* Don't allow wraparound or zero size. */
205 last_addr = phys_addr + size - 1;
206 if (!size || last_addr < phys_addr)
207 return NULL;
208
209 /*
210 * Map uncached objects in the low 512MB of address
211 * space using KSEG1.
212 */
213 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
214 flags == _CACHE_UNCACHED)
215 return (void __iomem *)
216 (unsigned long)CKSEG1ADDR(phys_addr);
217 }
218
219 return __ioremap(offset, size, flags);
220
221#undef __IS_LOW512
222}
223
224/*
225 * ioremap - map bus memory into CPU space
226 * @offset: bus address of the memory
227 * @size: size of the resource to map
228 *
229 * ioremap performs a platform specific sequence of operations to
230 * make bus memory CPU accessible via the readb/readw/readl/writeb/
231 * writew/writel functions and the other mmio helpers. The returned
232 * address is not guaranteed to be usable directly as a virtual
233 * address.
234 */
235#define ioremap(offset, size) \
236 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
237
238/*
239 * ioremap_nocache - map bus memory into CPU space
240 * @offset: bus address of the memory
241 * @size: size of the resource to map
242 *
243 * ioremap_nocache performs a platform specific sequence of operations to
244 * make bus memory CPU accessible via the readb/readw/readl/writeb/
245 * writew/writel functions and the other mmio helpers. The returned
246 * address is not guaranteed to be usable directly as a virtual
247 * address.
248 *
249 * This version of ioremap ensures that the memory is marked uncachable
250 * on the CPU as well as honouring existing caching rules from things like
251 * the PCI bus. Note that there are other caches and buffers on many
252 * busses. In particular driver authors should read up on PCI writes
253 *
254 * It's useful if some control registers are in such an area and
255 * write combining or read caching is not desirable:
256 */
257#define ioremap_nocache(offset, size) \
258 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
259#define ioremap_uc ioremap_nocache
260
261/*
262 * ioremap_cachable - map bus memory into CPU space
263 * @offset: bus address of the memory
264 * @size: size of the resource to map
265 *
266 * ioremap_nocache performs a platform specific sequence of operations to
267 * make bus memory CPU accessible via the readb/readw/readl/writeb/
268 * writew/writel functions and the other mmio helpers. The returned
269 * address is not guaranteed to be usable directly as a virtual
270 * address.
271 *
272 * This version of ioremap ensures that the memory is marked cachable by
273 * the CPU. Also enables full write-combining. Useful for some
274 * memory-like regions on I/O busses.
275 */
276#define ioremap_cachable(offset, size) \
277 __ioremap_mode((offset), (size), _page_cachable_default)
278#define ioremap_cache ioremap_cachable
279
280/*
281 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
282 * requests a cachable mapping, ioremap_uncached_accelerated requests a
283 * mapping using the uncached accelerated mode which isn't supported on
284 * all processors.
285 */
286#define ioremap_cacheable_cow(offset, size) \
287 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
288#define ioremap_uncached_accelerated(offset, size) \
289 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
290
291static inline void iounmap(const volatile void __iomem *addr)
292{
293 if (plat_iounmap(addr))
294 return;
295
296#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
297
298 if (cpu_has_64bit_addresses ||
299 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
300 return;
301
302 __iounmap(addr);
303
304#undef __IS_KSEG1
305}
306
307#ifdef CONFIG_CPU_CAVIUM_OCTEON
308#define war_octeon_io_reorder_wmb() wmb()
309#else
310#define war_octeon_io_reorder_wmb() do { } while (0)
311#endif
312
313#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
314 \
315static inline void pfx##write##bwlq(type val, \
316 volatile void __iomem *mem) \
317{ \
318 volatile type *__mem; \
319 type __val; \
320 \
321 war_octeon_io_reorder_wmb(); \
322 \
323 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
324 \
325 __val = pfx##ioswab##bwlq(__mem, val); \
326 \
327 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
328 *__mem = __val; \
329 else if (cpu_has_64bits) { \
330 unsigned long __flags; \
331 type __tmp; \
332 \
333 if (irq) \
334 local_irq_save(__flags); \
335 __asm__ __volatile__( \
336 ".set arch=r4000" "\t\t# __writeq""\n\t" \
337 "dsll32 %L0, %L0, 0" "\n\t" \
338 "dsrl32 %L0, %L0, 0" "\n\t" \
339 "dsll32 %M0, %M0, 0" "\n\t" \
340 "or %L0, %L0, %M0" "\n\t" \
341 "sd %L0, %2" "\n\t" \
342 ".set mips0" "\n" \
343 : "=r" (__tmp) \
344 : "0" (__val), "m" (*__mem)); \
345 if (irq) \
346 local_irq_restore(__flags); \
347 } else \
348 BUG(); \
349} \
350 \
351static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
352{ \
353 volatile type *__mem; \
354 type __val; \
355 \
356 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
357 \
358 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
359 __val = *__mem; \
360 else if (cpu_has_64bits) { \
361 unsigned long __flags; \
362 \
363 if (irq) \
364 local_irq_save(__flags); \
365 __asm__ __volatile__( \
366 ".set arch=r4000" "\t\t# __readq" "\n\t" \
367 "ld %L0, %1" "\n\t" \
368 "dsra32 %M0, %L0, 0" "\n\t" \
369 "sll %L0, %L0, 0" "\n\t" \
370 ".set mips0" "\n" \
371 : "=r" (__val) \
372 : "m" (*__mem)); \
373 if (irq) \
374 local_irq_restore(__flags); \
375 } else { \
376 __val = 0; \
377 BUG(); \
378 } \
379 \
380 return pfx##ioswab##bwlq(__mem, __val); \
381}
382
383#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
384 \
385static inline void pfx##out##bwlq##p(type val, unsigned long port) \
386{ \
387 volatile type *__addr; \
388 type __val; \
389 \
390 war_octeon_io_reorder_wmb(); \
391 \
392 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
393 \
394 __val = pfx##ioswab##bwlq(__addr, val); \
395 \
396 /* Really, we want this to be atomic */ \
397 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
398 \
399 *__addr = __val; \
400 slow; \
401} \
402 \
403static inline type pfx##in##bwlq##p(unsigned long port) \
404{ \
405 volatile type *__addr; \
406 type __val; \
407 \
408 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
409 \
410 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
411 \
412 __val = *__addr; \
413 slow; \
414 \
415 return pfx##ioswab##bwlq(__addr, __val); \
416}
417
418#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
419 \
420__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
421
422#define BUILDIO_MEM(bwlq, type) \
423 \
424__BUILD_MEMORY_PFX(__raw_, bwlq, type) \
425__BUILD_MEMORY_PFX(, bwlq, type) \
426__BUILD_MEMORY_PFX(__mem_, bwlq, type) \
427
428BUILDIO_MEM(b, u8)
429BUILDIO_MEM(w, u16)
430BUILDIO_MEM(l, u32)
431BUILDIO_MEM(q, u64)
432
433#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
434 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
435 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
436
437#define BUILDIO_IOPORT(bwlq, type) \
438 __BUILD_IOPORT_PFX(, bwlq, type) \
439 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
440
441BUILDIO_IOPORT(b, u8)
442BUILDIO_IOPORT(w, u16)
443BUILDIO_IOPORT(l, u32)
444#ifdef CONFIG_64BIT
445BUILDIO_IOPORT(q, u64)
446#endif
447
448#define __BUILDIO(bwlq, type) \
449 \
450__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
451
452__BUILDIO(q, u64)
453
454#define readb_relaxed readb
455#define readw_relaxed readw
456#define readl_relaxed readl
457#define readq_relaxed readq
458
459#define writeb_relaxed writeb
460#define writew_relaxed writew
461#define writel_relaxed writel
462#define writeq_relaxed writeq
463
464#define readb_be(addr) \
465 __raw_readb((__force unsigned *)(addr))
466#define readw_be(addr) \
467 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
468#define readl_be(addr) \
469 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
470#define readq_be(addr) \
471 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
472
473#define writeb_be(val, addr) \
474 __raw_writeb((val), (__force unsigned *)(addr))
475#define writew_be(val, addr) \
476 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
477#define writel_be(val, addr) \
478 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
479#define writeq_be(val, addr) \
480 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
481
482/*
483 * Some code tests for these symbols
484 */
485#define readq readq
486#define writeq writeq
487
488#define __BUILD_MEMORY_STRING(bwlq, type) \
489 \
490static inline void writes##bwlq(volatile void __iomem *mem, \
491 const void *addr, unsigned int count) \
492{ \
493 const volatile type *__addr = addr; \
494 \
495 while (count--) { \
496 __mem_write##bwlq(*__addr, mem); \
497 __addr++; \
498 } \
499} \
500 \
501static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
502 unsigned int count) \
503{ \
504 volatile type *__addr = addr; \
505 \
506 while (count--) { \
507 *__addr = __mem_read##bwlq(mem); \
508 __addr++; \
509 } \
510}
511
512#define __BUILD_IOPORT_STRING(bwlq, type) \
513 \
514static inline void outs##bwlq(unsigned long port, const void *addr, \
515 unsigned int count) \
516{ \
517 const volatile type *__addr = addr; \
518 \
519 while (count--) { \
520 __mem_out##bwlq(*__addr, port); \
521 __addr++; \
522 } \
523} \
524 \
525static inline void ins##bwlq(unsigned long port, void *addr, \
526 unsigned int count) \
527{ \
528 volatile type *__addr = addr; \
529 \
530 while (count--) { \
531 *__addr = __mem_in##bwlq(port); \
532 __addr++; \
533 } \
534}
535
536#define BUILDSTRING(bwlq, type) \
537 \
538__BUILD_MEMORY_STRING(bwlq, type) \
539__BUILD_IOPORT_STRING(bwlq, type)
540
541BUILDSTRING(b, u8)
542BUILDSTRING(w, u16)
543BUILDSTRING(l, u32)
544#ifdef CONFIG_64BIT
545BUILDSTRING(q, u64)
546#endif
547
548
549#ifdef CONFIG_CPU_CAVIUM_OCTEON
550#define mmiowb() wmb()
551#else
552/* Depends on MIPS II instruction set */
553#define mmiowb() asm volatile ("sync" ::: "memory")
554#endif
555
556static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
557{
558 memset((void __force *) addr, val, count);
559}
560static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
561{
562 memcpy(dst, (void __force *) src, count);
563}
564static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
565{
566 memcpy((void __force *) dst, src, count);
567}
568
569/*
570 * The caches on some architectures aren't dma-coherent and have need to
571 * handle this in software. There are three types of operations that
572 * can be applied to dma buffers.
573 *
574 * - dma_cache_wback_inv(start, size) makes caches and coherent by
575 * writing the content of the caches back to memory, if necessary.
576 * The function also invalidates the affected part of the caches as
577 * necessary before DMA transfers from outside to memory.
578 * - dma_cache_wback(start, size) makes caches and coherent by
579 * writing the content of the caches back to memory, if necessary.
580 * The function also invalidates the affected part of the caches as
581 * necessary before DMA transfers from outside to memory.
582 * - dma_cache_inv(start, size) invalidates the affected parts of the
583 * caches. Dirty lines of the caches may be written back or simply
584 * be discarded. This operation is necessary before dma operations
585 * to the memory.
586 *
587 * This API used to be exported; it now is for arch code internal use only.
588 */
589#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
590
591extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
592extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
593extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
594
595#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
596#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
597#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
598
599#else /* Sane hardware */
600
601#define dma_cache_wback_inv(start,size) \
602 do { (void) (start); (void) (size); } while (0)
603#define dma_cache_wback(start,size) \
604 do { (void) (start); (void) (size); } while (0)
605#define dma_cache_inv(start,size) \
606 do { (void) (start); (void) (size); } while (0)
607
608#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
609
610/*
611 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
612 * Avoid interrupt mucking, just adjust the address for 4-byte access.
613 * Assume the addresses are 8-byte aligned.
614 */
615#ifdef __MIPSEB__
616#define __CSR_32_ADJUST 4
617#else
618#define __CSR_32_ADJUST 0
619#endif
620
621#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
622#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
623
624/*
625 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
626 * access
627 */
628#define xlate_dev_mem_ptr(p) __va(p)
629
630/*
631 * Convert a virtual cached pointer to an uncached pointer
632 */
633#define xlate_dev_kmem_ptr(p) p
634
635#endif /* _ASM_IO_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
11 */
12#ifndef _ASM_IO_H
13#define _ASM_IO_H
14
15#define ARCH_HAS_IOREMAP_WC
16
17#include <linux/compiler.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/irqflags.h>
21
22#include <asm/addrspace.h>
23#include <asm/barrier.h>
24#include <asm/bug.h>
25#include <asm/byteorder.h>
26#include <asm/cpu.h>
27#include <asm/cpu-features.h>
28#include <asm-generic/iomap.h>
29#include <asm/page.h>
30#include <asm/pgtable-bits.h>
31#include <asm/processor.h>
32#include <asm/string.h>
33#include <mangle-port.h>
34
35/*
36 * Raw operations are never swapped in software. OTOH values that raw
37 * operations are working on may or may not have been swapped by the bus
38 * hardware. An example use would be for flash memory that's used for
39 * execute in place.
40 */
41# define __raw_ioswabb(a, x) (x)
42# define __raw_ioswabw(a, x) (x)
43# define __raw_ioswabl(a, x) (x)
44# define __raw_ioswabq(a, x) (x)
45# define ____raw_ioswabq(a, x) (x)
46
47# define __relaxed_ioswabb ioswabb
48# define __relaxed_ioswabw ioswabw
49# define __relaxed_ioswabl ioswabl
50# define __relaxed_ioswabq ioswabq
51
52/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
53
54/*
55 * On MIPS I/O ports are memory mapped, so we access them using normal
56 * load/store instructions. mips_io_port_base is the virtual address to
57 * which all ports are being mapped. For sake of efficiency some code
58 * assumes that this is an address that can be loaded with a single lui
59 * instruction, so the lower 16 bits must be zero. Should be true on
60 * any sane architecture; generic code does not use this assumption.
61 */
62extern unsigned long mips_io_port_base;
63
64static inline void set_io_port_base(unsigned long base)
65{
66 mips_io_port_base = base;
67}
68
69/*
70 * Provide the necessary definitions for generic iomap. We make use of
71 * mips_io_port_base for iomap(), but we don't reserve any low addresses for
72 * use with I/O ports.
73 */
74
75#define HAVE_ARCH_PIO_SIZE
76#define PIO_OFFSET mips_io_port_base
77#define PIO_MASK IO_SPACE_LIMIT
78#define PIO_RESERVED 0x0UL
79
80/*
81 * Enforce in-order execution of data I/O. In the MIPS architecture
82 * these are equivalent to corresponding platform-specific memory
83 * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
84 * with sync additionally defined.
85 */
86#define iobarrier_rw() mb()
87#define iobarrier_r() rmb()
88#define iobarrier_w() wmb()
89#define iobarrier_sync() iob()
90
91/*
92 * virt_to_phys - map virtual addresses to physical
93 * @address: address to remap
94 *
95 * The returned physical address is the physical (CPU) mapping for
96 * the memory address given. It is only valid to use this function on
97 * addresses directly mapped or allocated via kmalloc.
98 *
99 * This function does not give bus mappings for DMA transfers. In
100 * almost all conceivable cases a device driver should not be using
101 * this function
102 */
103static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
104{
105 return __pa(address);
106}
107
108#ifdef CONFIG_DEBUG_VIRTUAL
109extern phys_addr_t __virt_to_phys(volatile const void *x);
110#else
111#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
112#endif
113
114#define virt_to_phys virt_to_phys
115static inline phys_addr_t virt_to_phys(const volatile void *x)
116{
117 return __virt_to_phys(x);
118}
119
120/*
121 * phys_to_virt - map physical address to virtual
122 * @address: address to remap
123 *
124 * The returned virtual address is a current CPU mapping for
125 * the memory address given. It is only valid to use this function on
126 * addresses that have a kernel mapping
127 *
128 * This function does not handle bus mappings for DMA transfers. In
129 * almost all conceivable cases a device driver should not be using
130 * this function
131 */
132static inline void * phys_to_virt(unsigned long address)
133{
134 return __va(address);
135}
136
137/*
138 * ISA I/O bus memory addresses are 1:1 with the physical address.
139 */
140static inline unsigned long isa_virt_to_bus(volatile void *address)
141{
142 return virt_to_phys(address);
143}
144
145static inline void *isa_bus_to_virt(unsigned long address)
146{
147 return phys_to_virt(address);
148}
149
150/*
151 * Change "struct page" to physical address.
152 */
153#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
154
155void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
156 unsigned long prot_val);
157void iounmap(const volatile void __iomem *addr);
158
159/*
160 * ioremap - map bus memory into CPU space
161 * @offset: bus address of the memory
162 * @size: size of the resource to map
163 *
164 * ioremap performs a platform specific sequence of operations to
165 * make bus memory CPU accessible via the readb/readw/readl/writeb/
166 * writew/writel functions and the other mmio helpers. The returned
167 * address is not guaranteed to be usable directly as a virtual
168 * address.
169 */
170#define ioremap(offset, size) \
171 ioremap_prot((offset), (size), _CACHE_UNCACHED)
172#define ioremap_uc ioremap
173
174/*
175 * ioremap_cache - map bus memory into CPU space
176 * @offset: bus address of the memory
177 * @size: size of the resource to map
178 *
179 * ioremap_cache performs a platform specific sequence of operations to
180 * make bus memory CPU accessible via the readb/readw/readl/writeb/
181 * writew/writel functions and the other mmio helpers. The returned
182 * address is not guaranteed to be usable directly as a virtual
183 * address.
184 *
185 * This version of ioremap ensures that the memory is marked cachable by
186 * the CPU. Also enables full write-combining. Useful for some
187 * memory-like regions on I/O busses.
188 */
189#define ioremap_cache(offset, size) \
190 ioremap_prot((offset), (size), _page_cachable_default)
191
192/*
193 * ioremap_wc - map bus memory into CPU space
194 * @offset: bus address of the memory
195 * @size: size of the resource to map
196 *
197 * ioremap_wc performs a platform specific sequence of operations to
198 * make bus memory CPU accessible via the readb/readw/readl/writeb/
199 * writew/writel functions and the other mmio helpers. The returned
200 * address is not guaranteed to be usable directly as a virtual
201 * address.
202 *
203 * This version of ioremap ensures that the memory is marked uncachable
204 * but accelerated by means of write-combining feature. It is specifically
205 * useful for PCIe prefetchable windows, which may vastly improve a
206 * communications performance. If it was determined on boot stage, what
207 * CPU CCA doesn't support UCA, the method shall fall-back to the
208 * _CACHE_UNCACHED option (see cpu_probe() method).
209 */
210#define ioremap_wc(offset, size) \
211 ioremap_prot((offset), (size), boot_cpu_data.writecombine)
212
213#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
214#define war_io_reorder_wmb() wmb()
215#else
216#define war_io_reorder_wmb() barrier()
217#endif
218
219#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
220 \
221static inline void pfx##write##bwlq(type val, \
222 volatile void __iomem *mem) \
223{ \
224 volatile type *__mem; \
225 type __val; \
226 \
227 if (barrier) \
228 iobarrier_rw(); \
229 else \
230 war_io_reorder_wmb(); \
231 \
232 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
233 \
234 __val = pfx##ioswab##bwlq(__mem, val); \
235 \
236 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
237 *__mem = __val; \
238 else if (cpu_has_64bits) { \
239 unsigned long __flags; \
240 type __tmp; \
241 \
242 if (irq) \
243 local_irq_save(__flags); \
244 __asm__ __volatile__( \
245 ".set push" "\t\t# __writeq""\n\t" \
246 ".set arch=r4000" "\n\t" \
247 "dsll32 %L0, %L0, 0" "\n\t" \
248 "dsrl32 %L0, %L0, 0" "\n\t" \
249 "dsll32 %M0, %M0, 0" "\n\t" \
250 "or %L0, %L0, %M0" "\n\t" \
251 "sd %L0, %2" "\n\t" \
252 ".set pop" "\n" \
253 : "=r" (__tmp) \
254 : "0" (__val), "m" (*__mem)); \
255 if (irq) \
256 local_irq_restore(__flags); \
257 } else \
258 BUG(); \
259} \
260 \
261static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
262{ \
263 volatile type *__mem; \
264 type __val; \
265 \
266 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
267 \
268 if (barrier) \
269 iobarrier_rw(); \
270 \
271 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
272 __val = *__mem; \
273 else if (cpu_has_64bits) { \
274 unsigned long __flags; \
275 \
276 if (irq) \
277 local_irq_save(__flags); \
278 __asm__ __volatile__( \
279 ".set push" "\t\t# __readq" "\n\t" \
280 ".set arch=r4000" "\n\t" \
281 "ld %L0, %1" "\n\t" \
282 "dsra32 %M0, %L0, 0" "\n\t" \
283 "sll %L0, %L0, 0" "\n\t" \
284 ".set pop" "\n" \
285 : "=r" (__val) \
286 : "m" (*__mem)); \
287 if (irq) \
288 local_irq_restore(__flags); \
289 } else { \
290 __val = 0; \
291 BUG(); \
292 } \
293 \
294 /* prevent prefetching of coherent DMA data prematurely */ \
295 if (!relax) \
296 rmb(); \
297 return pfx##ioswab##bwlq(__mem, __val); \
298}
299
300#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
301 \
302static inline void pfx##out##bwlq##p(type val, unsigned long port) \
303{ \
304 volatile type *__addr; \
305 type __val; \
306 \
307 if (barrier) \
308 iobarrier_rw(); \
309 else \
310 war_io_reorder_wmb(); \
311 \
312 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
313 \
314 __val = pfx##ioswab##bwlq(__addr, val); \
315 \
316 /* Really, we want this to be atomic */ \
317 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
318 \
319 *__addr = __val; \
320} \
321 \
322static inline type pfx##in##bwlq##p(unsigned long port) \
323{ \
324 volatile type *__addr; \
325 type __val; \
326 \
327 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
328 \
329 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
330 \
331 if (barrier) \
332 iobarrier_rw(); \
333 \
334 __val = *__addr; \
335 \
336 /* prevent prefetching of coherent DMA data prematurely */ \
337 if (!relax) \
338 rmb(); \
339 return pfx##ioswab##bwlq(__addr, __val); \
340}
341
342#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
343 \
344__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
345
346#define BUILDIO_MEM(bwlq, type) \
347 \
348__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
349__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
350__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
351__BUILD_MEMORY_PFX(, bwlq, type, 0)
352
353BUILDIO_MEM(b, u8)
354BUILDIO_MEM(w, u16)
355BUILDIO_MEM(l, u32)
356#ifdef CONFIG_64BIT
357BUILDIO_MEM(q, u64)
358#else
359__BUILD_MEMORY_PFX(__raw_, q, u64, 0)
360__BUILD_MEMORY_PFX(__mem_, q, u64, 0)
361#endif
362
363#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
364 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
365 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
366
367#define BUILDIO_IOPORT(bwlq, type) \
368 __BUILD_IOPORT_PFX(, bwlq, type) \
369 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
370
371BUILDIO_IOPORT(b, u8)
372BUILDIO_IOPORT(w, u16)
373BUILDIO_IOPORT(l, u32)
374#ifdef CONFIG_64BIT
375BUILDIO_IOPORT(q, u64)
376#endif
377
378#define __BUILDIO(bwlq, type) \
379 \
380__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
381
382__BUILDIO(q, u64)
383
384#define readb_relaxed __relaxed_readb
385#define readw_relaxed __relaxed_readw
386#define readl_relaxed __relaxed_readl
387#ifdef CONFIG_64BIT
388#define readq_relaxed __relaxed_readq
389#endif
390
391#define writeb_relaxed __relaxed_writeb
392#define writew_relaxed __relaxed_writew
393#define writel_relaxed __relaxed_writel
394#ifdef CONFIG_64BIT
395#define writeq_relaxed __relaxed_writeq
396#endif
397
398#define readb_be(addr) \
399 __raw_readb((__force unsigned *)(addr))
400#define readw_be(addr) \
401 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
402#define readl_be(addr) \
403 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
404#define readq_be(addr) \
405 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
406
407#define writeb_be(val, addr) \
408 __raw_writeb((val), (__force unsigned *)(addr))
409#define writew_be(val, addr) \
410 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
411#define writel_be(val, addr) \
412 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
413#define writeq_be(val, addr) \
414 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
415
416/*
417 * Some code tests for these symbols
418 */
419#ifdef CONFIG_64BIT
420#define readq readq
421#define writeq writeq
422#endif
423
424#define __BUILD_MEMORY_STRING(bwlq, type) \
425 \
426static inline void writes##bwlq(volatile void __iomem *mem, \
427 const void *addr, unsigned int count) \
428{ \
429 const volatile type *__addr = addr; \
430 \
431 while (count--) { \
432 __mem_write##bwlq(*__addr, mem); \
433 __addr++; \
434 } \
435} \
436 \
437static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
438 unsigned int count) \
439{ \
440 volatile type *__addr = addr; \
441 \
442 while (count--) { \
443 *__addr = __mem_read##bwlq(mem); \
444 __addr++; \
445 } \
446}
447
448#define __BUILD_IOPORT_STRING(bwlq, type) \
449 \
450static inline void outs##bwlq(unsigned long port, const void *addr, \
451 unsigned int count) \
452{ \
453 const volatile type *__addr = addr; \
454 \
455 while (count--) { \
456 __mem_out##bwlq(*__addr, port); \
457 __addr++; \
458 } \
459} \
460 \
461static inline void ins##bwlq(unsigned long port, void *addr, \
462 unsigned int count) \
463{ \
464 volatile type *__addr = addr; \
465 \
466 while (count--) { \
467 *__addr = __mem_in##bwlq(port); \
468 __addr++; \
469 } \
470}
471
472#define BUILDSTRING(bwlq, type) \
473 \
474__BUILD_MEMORY_STRING(bwlq, type) \
475__BUILD_IOPORT_STRING(bwlq, type)
476
477BUILDSTRING(b, u8)
478BUILDSTRING(w, u16)
479BUILDSTRING(l, u32)
480#ifdef CONFIG_64BIT
481BUILDSTRING(q, u64)
482#endif
483
484static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
485{
486 memset((void __force *) addr, val, count);
487}
488static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
489{
490 memcpy(dst, (void __force *) src, count);
491}
492static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
493{
494 memcpy((void __force *) dst, src, count);
495}
496
497/*
498 * The caches on some architectures aren't dma-coherent and have need to
499 * handle this in software. There are three types of operations that
500 * can be applied to dma buffers.
501 *
502 * - dma_cache_wback_inv(start, size) makes caches and coherent by
503 * writing the content of the caches back to memory, if necessary.
504 * The function also invalidates the affected part of the caches as
505 * necessary before DMA transfers from outside to memory.
506 * - dma_cache_wback(start, size) makes caches and coherent by
507 * writing the content of the caches back to memory, if necessary.
508 * The function also invalidates the affected part of the caches as
509 * necessary before DMA transfers from outside to memory.
510 * - dma_cache_inv(start, size) invalidates the affected parts of the
511 * caches. Dirty lines of the caches may be written back or simply
512 * be discarded. This operation is necessary before dma operations
513 * to the memory.
514 *
515 * This API used to be exported; it now is for arch code internal use only.
516 */
517#ifdef CONFIG_DMA_NONCOHERENT
518
519extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
520extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
521extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
522
523#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
524#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
525#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
526
527#else /* Sane hardware */
528
529#define dma_cache_wback_inv(start,size) \
530 do { (void) (start); (void) (size); } while (0)
531#define dma_cache_wback(start,size) \
532 do { (void) (start); (void) (size); } while (0)
533#define dma_cache_inv(start,size) \
534 do { (void) (start); (void) (size); } while (0)
535
536#endif /* CONFIG_DMA_NONCOHERENT */
537
538/*
539 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
540 * Avoid interrupt mucking, just adjust the address for 4-byte access.
541 * Assume the addresses are 8-byte aligned.
542 */
543#ifdef __MIPSEB__
544#define __CSR_32_ADJUST 4
545#else
546#define __CSR_32_ADJUST 0
547#endif
548
549#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
550#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
551
552/*
553 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
554 * access
555 */
556#define xlate_dev_mem_ptr(p) __va(p)
557
558void __ioread64_copy(void *to, const void __iomem *from, size_t count);
559
560#endif /* _ASM_IO_H */