Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
11 */
12#ifndef _ASM_IO_H
13#define _ASM_IO_H
14
15#define ARCH_HAS_IOREMAP_WC
16
17#include <linux/compiler.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/irqflags.h>
21
22#include <asm/addrspace.h>
23#include <asm/barrier.h>
24#include <asm/bug.h>
25#include <asm/byteorder.h>
26#include <asm/cpu.h>
27#include <asm/cpu-features.h>
28#include <asm-generic/iomap.h>
29#include <asm/page.h>
30#include <asm/pgtable-bits.h>
31#include <asm/processor.h>
32#include <asm/string.h>
33#include <mangle-port.h>
34
35/*
36 * Raw operations are never swapped in software. OTOH values that raw
37 * operations are working on may or may not have been swapped by the bus
38 * hardware. An example use would be for flash memory that's used for
39 * execute in place.
40 */
41# define __raw_ioswabb(a, x) (x)
42# define __raw_ioswabw(a, x) (x)
43# define __raw_ioswabl(a, x) (x)
44# define __raw_ioswabq(a, x) (x)
45# define ____raw_ioswabq(a, x) (x)
46
47# define __relaxed_ioswabb ioswabb
48# define __relaxed_ioswabw ioswabw
49# define __relaxed_ioswabl ioswabl
50# define __relaxed_ioswabq ioswabq
51
52/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
53
54/*
55 * On MIPS I/O ports are memory mapped, so we access them using normal
56 * load/store instructions. mips_io_port_base is the virtual address to
57 * which all ports are being mapped. For sake of efficiency some code
58 * assumes that this is an address that can be loaded with a single lui
59 * instruction, so the lower 16 bits must be zero. Should be true on
60 * any sane architecture; generic code does not use this assumption.
61 */
62extern unsigned long mips_io_port_base;
63
64static inline void set_io_port_base(unsigned long base)
65{
66 mips_io_port_base = base;
67}
68
69/*
70 * Provide the necessary definitions for generic iomap. We make use of
71 * mips_io_port_base for iomap(), but we don't reserve any low addresses for
72 * use with I/O ports.
73 */
74
75#define HAVE_ARCH_PIO_SIZE
76#define PIO_OFFSET mips_io_port_base
77#define PIO_MASK IO_SPACE_LIMIT
78#define PIO_RESERVED 0x0UL
79
80/*
81 * Enforce in-order execution of data I/O. In the MIPS architecture
82 * these are equivalent to corresponding platform-specific memory
83 * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
84 * with sync additionally defined.
85 */
86#define iobarrier_rw() mb()
87#define iobarrier_r() rmb()
88#define iobarrier_w() wmb()
89#define iobarrier_sync() iob()
90
91/*
92 * virt_to_phys - map virtual addresses to physical
93 * @address: address to remap
94 *
95 * The returned physical address is the physical (CPU) mapping for
96 * the memory address given. It is only valid to use this function on
97 * addresses directly mapped or allocated via kmalloc.
98 *
99 * This function does not give bus mappings for DMA transfers. In
100 * almost all conceivable cases a device driver should not be using
101 * this function
102 */
103static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
104{
105 return __pa(address);
106}
107
108#ifdef CONFIG_DEBUG_VIRTUAL
109extern phys_addr_t __virt_to_phys(volatile const void *x);
110#else
111#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
112#endif
113
114#define virt_to_phys virt_to_phys
115static inline phys_addr_t virt_to_phys(const volatile void *x)
116{
117 return __virt_to_phys(x);
118}
119
120/*
121 * phys_to_virt - map physical address to virtual
122 * @address: address to remap
123 *
124 * The returned virtual address is a current CPU mapping for
125 * the memory address given. It is only valid to use this function on
126 * addresses that have a kernel mapping
127 *
128 * This function does not handle bus mappings for DMA transfers. In
129 * almost all conceivable cases a device driver should not be using
130 * this function
131 */
132static inline void * phys_to_virt(unsigned long address)
133{
134 return __va(address);
135}
136
137/*
138 * ISA I/O bus memory addresses are 1:1 with the physical address.
139 */
140static inline unsigned long isa_virt_to_bus(volatile void *address)
141{
142 return virt_to_phys(address);
143}
144
145static inline void *isa_bus_to_virt(unsigned long address)
146{
147 return phys_to_virt(address);
148}
149
150/*
151 * Change "struct page" to physical address.
152 */
153#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
154
155void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
156 unsigned long prot_val);
157void iounmap(const volatile void __iomem *addr);
158
159/*
160 * ioremap - map bus memory into CPU space
161 * @offset: bus address of the memory
162 * @size: size of the resource to map
163 *
164 * ioremap performs a platform specific sequence of operations to
165 * make bus memory CPU accessible via the readb/readw/readl/writeb/
166 * writew/writel functions and the other mmio helpers. The returned
167 * address is not guaranteed to be usable directly as a virtual
168 * address.
169 */
170#define ioremap(offset, size) \
171 ioremap_prot((offset), (size), _CACHE_UNCACHED)
172#define ioremap_uc ioremap
173
174/*
175 * ioremap_cache - map bus memory into CPU space
176 * @offset: bus address of the memory
177 * @size: size of the resource to map
178 *
179 * ioremap_cache performs a platform specific sequence of operations to
180 * make bus memory CPU accessible via the readb/readw/readl/writeb/
181 * writew/writel functions and the other mmio helpers. The returned
182 * address is not guaranteed to be usable directly as a virtual
183 * address.
184 *
185 * This version of ioremap ensures that the memory is marked cachable by
186 * the CPU. Also enables full write-combining. Useful for some
187 * memory-like regions on I/O busses.
188 */
189#define ioremap_cache(offset, size) \
190 ioremap_prot((offset), (size), _page_cachable_default)
191
192/*
193 * ioremap_wc - map bus memory into CPU space
194 * @offset: bus address of the memory
195 * @size: size of the resource to map
196 *
197 * ioremap_wc performs a platform specific sequence of operations to
198 * make bus memory CPU accessible via the readb/readw/readl/writeb/
199 * writew/writel functions and the other mmio helpers. The returned
200 * address is not guaranteed to be usable directly as a virtual
201 * address.
202 *
203 * This version of ioremap ensures that the memory is marked uncachable
204 * but accelerated by means of write-combining feature. It is specifically
205 * useful for PCIe prefetchable windows, which may vastly improve a
206 * communications performance. If it was determined on boot stage, what
207 * CPU CCA doesn't support UCA, the method shall fall-back to the
208 * _CACHE_UNCACHED option (see cpu_probe() method).
209 */
210#define ioremap_wc(offset, size) \
211 ioremap_prot((offset), (size), boot_cpu_data.writecombine)
212
213#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
214#define war_io_reorder_wmb() wmb()
215#else
216#define war_io_reorder_wmb() barrier()
217#endif
218
219#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
220 \
221static inline void pfx##write##bwlq(type val, \
222 volatile void __iomem *mem) \
223{ \
224 volatile type *__mem; \
225 type __val; \
226 \
227 if (barrier) \
228 iobarrier_rw(); \
229 else \
230 war_io_reorder_wmb(); \
231 \
232 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
233 \
234 __val = pfx##ioswab##bwlq(__mem, val); \
235 \
236 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
237 *__mem = __val; \
238 else if (cpu_has_64bits) { \
239 unsigned long __flags; \
240 type __tmp; \
241 \
242 if (irq) \
243 local_irq_save(__flags); \
244 __asm__ __volatile__( \
245 ".set push" "\t\t# __writeq""\n\t" \
246 ".set arch=r4000" "\n\t" \
247 "dsll32 %L0, %L0, 0" "\n\t" \
248 "dsrl32 %L0, %L0, 0" "\n\t" \
249 "dsll32 %M0, %M0, 0" "\n\t" \
250 "or %L0, %L0, %M0" "\n\t" \
251 "sd %L0, %2" "\n\t" \
252 ".set pop" "\n" \
253 : "=r" (__tmp) \
254 : "0" (__val), "m" (*__mem)); \
255 if (irq) \
256 local_irq_restore(__flags); \
257 } else \
258 BUG(); \
259} \
260 \
261static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
262{ \
263 volatile type *__mem; \
264 type __val; \
265 \
266 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
267 \
268 if (barrier) \
269 iobarrier_rw(); \
270 \
271 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
272 __val = *__mem; \
273 else if (cpu_has_64bits) { \
274 unsigned long __flags; \
275 \
276 if (irq) \
277 local_irq_save(__flags); \
278 __asm__ __volatile__( \
279 ".set push" "\t\t# __readq" "\n\t" \
280 ".set arch=r4000" "\n\t" \
281 "ld %L0, %1" "\n\t" \
282 "dsra32 %M0, %L0, 0" "\n\t" \
283 "sll %L0, %L0, 0" "\n\t" \
284 ".set pop" "\n" \
285 : "=r" (__val) \
286 : "m" (*__mem)); \
287 if (irq) \
288 local_irq_restore(__flags); \
289 } else { \
290 __val = 0; \
291 BUG(); \
292 } \
293 \
294 /* prevent prefetching of coherent DMA data prematurely */ \
295 if (!relax) \
296 rmb(); \
297 return pfx##ioswab##bwlq(__mem, __val); \
298}
299
300#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
301 \
302static inline void pfx##out##bwlq##p(type val, unsigned long port) \
303{ \
304 volatile type *__addr; \
305 type __val; \
306 \
307 if (barrier) \
308 iobarrier_rw(); \
309 else \
310 war_io_reorder_wmb(); \
311 \
312 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
313 \
314 __val = pfx##ioswab##bwlq(__addr, val); \
315 \
316 /* Really, we want this to be atomic */ \
317 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
318 \
319 *__addr = __val; \
320} \
321 \
322static inline type pfx##in##bwlq##p(unsigned long port) \
323{ \
324 volatile type *__addr; \
325 type __val; \
326 \
327 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
328 \
329 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
330 \
331 if (barrier) \
332 iobarrier_rw(); \
333 \
334 __val = *__addr; \
335 \
336 /* prevent prefetching of coherent DMA data prematurely */ \
337 if (!relax) \
338 rmb(); \
339 return pfx##ioswab##bwlq(__addr, __val); \
340}
341
342#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
343 \
344__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
345
346#define BUILDIO_MEM(bwlq, type) \
347 \
348__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
349__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
350__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
351__BUILD_MEMORY_PFX(, bwlq, type, 0)
352
353BUILDIO_MEM(b, u8)
354BUILDIO_MEM(w, u16)
355BUILDIO_MEM(l, u32)
356#ifdef CONFIG_64BIT
357BUILDIO_MEM(q, u64)
358#else
359__BUILD_MEMORY_PFX(__raw_, q, u64, 0)
360__BUILD_MEMORY_PFX(__mem_, q, u64, 0)
361#endif
362
363#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
364 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
365 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
366
367#define BUILDIO_IOPORT(bwlq, type) \
368 __BUILD_IOPORT_PFX(, bwlq, type) \
369 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
370
371BUILDIO_IOPORT(b, u8)
372BUILDIO_IOPORT(w, u16)
373BUILDIO_IOPORT(l, u32)
374#ifdef CONFIG_64BIT
375BUILDIO_IOPORT(q, u64)
376#endif
377
378#define __BUILDIO(bwlq, type) \
379 \
380__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
381
382__BUILDIO(q, u64)
383
384#define readb_relaxed __relaxed_readb
385#define readw_relaxed __relaxed_readw
386#define readl_relaxed __relaxed_readl
387#ifdef CONFIG_64BIT
388#define readq_relaxed __relaxed_readq
389#endif
390
391#define writeb_relaxed __relaxed_writeb
392#define writew_relaxed __relaxed_writew
393#define writel_relaxed __relaxed_writel
394#ifdef CONFIG_64BIT
395#define writeq_relaxed __relaxed_writeq
396#endif
397
398#define readb_be(addr) \
399 __raw_readb((__force unsigned *)(addr))
400#define readw_be(addr) \
401 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
402#define readl_be(addr) \
403 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
404#define readq_be(addr) \
405 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
406
407#define writeb_be(val, addr) \
408 __raw_writeb((val), (__force unsigned *)(addr))
409#define writew_be(val, addr) \
410 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
411#define writel_be(val, addr) \
412 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
413#define writeq_be(val, addr) \
414 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
415
416/*
417 * Some code tests for these symbols
418 */
419#ifdef CONFIG_64BIT
420#define readq readq
421#define writeq writeq
422#endif
423
424#define __BUILD_MEMORY_STRING(bwlq, type) \
425 \
426static inline void writes##bwlq(volatile void __iomem *mem, \
427 const void *addr, unsigned int count) \
428{ \
429 const volatile type *__addr = addr; \
430 \
431 while (count--) { \
432 __mem_write##bwlq(*__addr, mem); \
433 __addr++; \
434 } \
435} \
436 \
437static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
438 unsigned int count) \
439{ \
440 volatile type *__addr = addr; \
441 \
442 while (count--) { \
443 *__addr = __mem_read##bwlq(mem); \
444 __addr++; \
445 } \
446}
447
448#define __BUILD_IOPORT_STRING(bwlq, type) \
449 \
450static inline void outs##bwlq(unsigned long port, const void *addr, \
451 unsigned int count) \
452{ \
453 const volatile type *__addr = addr; \
454 \
455 while (count--) { \
456 __mem_out##bwlq(*__addr, port); \
457 __addr++; \
458 } \
459} \
460 \
461static inline void ins##bwlq(unsigned long port, void *addr, \
462 unsigned int count) \
463{ \
464 volatile type *__addr = addr; \
465 \
466 while (count--) { \
467 *__addr = __mem_in##bwlq(port); \
468 __addr++; \
469 } \
470}
471
472#define BUILDSTRING(bwlq, type) \
473 \
474__BUILD_MEMORY_STRING(bwlq, type) \
475__BUILD_IOPORT_STRING(bwlq, type)
476
477BUILDSTRING(b, u8)
478BUILDSTRING(w, u16)
479BUILDSTRING(l, u32)
480#ifdef CONFIG_64BIT
481BUILDSTRING(q, u64)
482#endif
483
484static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
485{
486 memset((void __force *) addr, val, count);
487}
488static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
489{
490 memcpy(dst, (void __force *) src, count);
491}
492static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
493{
494 memcpy((void __force *) dst, src, count);
495}
496
497/*
498 * The caches on some architectures aren't dma-coherent and have need to
499 * handle this in software. There are three types of operations that
500 * can be applied to dma buffers.
501 *
502 * - dma_cache_wback_inv(start, size) makes caches and coherent by
503 * writing the content of the caches back to memory, if necessary.
504 * The function also invalidates the affected part of the caches as
505 * necessary before DMA transfers from outside to memory.
506 * - dma_cache_wback(start, size) makes caches and coherent by
507 * writing the content of the caches back to memory, if necessary.
508 * The function also invalidates the affected part of the caches as
509 * necessary before DMA transfers from outside to memory.
510 * - dma_cache_inv(start, size) invalidates the affected parts of the
511 * caches. Dirty lines of the caches may be written back or simply
512 * be discarded. This operation is necessary before dma operations
513 * to the memory.
514 *
515 * This API used to be exported; it now is for arch code internal use only.
516 */
517#ifdef CONFIG_DMA_NONCOHERENT
518
519extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
520extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
521extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
522
523#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
524#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
525#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
526
527#else /* Sane hardware */
528
529#define dma_cache_wback_inv(start,size) \
530 do { (void) (start); (void) (size); } while (0)
531#define dma_cache_wback(start,size) \
532 do { (void) (start); (void) (size); } while (0)
533#define dma_cache_inv(start,size) \
534 do { (void) (start); (void) (size); } while (0)
535
536#endif /* CONFIG_DMA_NONCOHERENT */
537
538/*
539 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
540 * Avoid interrupt mucking, just adjust the address for 4-byte access.
541 * Assume the addresses are 8-byte aligned.
542 */
543#ifdef __MIPSEB__
544#define __CSR_32_ADJUST 4
545#else
546#define __CSR_32_ADJUST 0
547#endif
548
549#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
550#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
551
552/*
553 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
554 * access
555 */
556#define xlate_dev_mem_ptr(p) __va(p)
557
558void __ioread64_copy(void *to, const void __iomem *from, size_t count);
559
560#endif /* _ASM_IO_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
11 */
12#ifndef _ASM_IO_H
13#define _ASM_IO_H
14
15#include <linux/compiler.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18
19#include <asm/addrspace.h>
20#include <asm/bug.h>
21#include <asm/byteorder.h>
22#include <asm/cpu.h>
23#include <asm/cpu-features.h>
24#include <asm-generic/iomap.h>
25#include <asm/page.h>
26#include <asm/pgtable-bits.h>
27#include <asm/processor.h>
28#include <asm/string.h>
29
30#include <ioremap.h>
31#include <mangle-port.h>
32
33/*
34 * Slowdown I/O port space accesses for antique hardware.
35 */
36#undef CONF_SLOWDOWN_IO
37
38/*
39 * Raw operations are never swapped in software. OTOH values that raw
40 * operations are working on may or may not have been swapped by the bus
41 * hardware. An example use would be for flash memory that's used for
42 * execute in place.
43 */
44# define __raw_ioswabb(a, x) (x)
45# define __raw_ioswabw(a, x) (x)
46# define __raw_ioswabl(a, x) (x)
47# define __raw_ioswabq(a, x) (x)
48# define ____raw_ioswabq(a, x) (x)
49
50/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
51
52#define IO_SPACE_LIMIT 0xffff
53
54/*
55 * On MIPS I/O ports are memory mapped, so we access them using normal
56 * load/store instructions. mips_io_port_base is the virtual address to
57 * which all ports are being mapped. For sake of efficiency some code
58 * assumes that this is an address that can be loaded with a single lui
59 * instruction, so the lower 16 bits must be zero. Should be true on
60 * on any sane architecture; generic code does not use this assumption.
61 */
62extern const unsigned long mips_io_port_base;
63
64/*
65 * Gcc will generate code to load the value of mips_io_port_base after each
66 * function call which may be fairly wasteful in some cases. So we don't
67 * play quite by the book. We tell gcc mips_io_port_base is a long variable
68 * which solves the code generation issue. Now we need to violate the
69 * aliasing rules a little to make initialization possible and finally we
70 * will need the barrier() to fight side effects of the aliasing chat.
71 * This trickery will eventually collapse under gcc's optimizer. Oh well.
72 */
73static inline void set_io_port_base(unsigned long base)
74{
75 * (unsigned long *) &mips_io_port_base = base;
76 barrier();
77}
78
79/*
80 * Thanks to James van Artsdalen for a better timing-fix than
81 * the two short jumps: using outb's to a nonexistent port seems
82 * to guarantee better timings even on fast machines.
83 *
84 * On the other hand, I'd like to be sure of a non-existent port:
85 * I feel a bit unsafe about using 0x80 (should be safe, though)
86 *
87 * Linus
88 *
89 */
90
91#define __SLOW_DOWN_IO \
92 __asm__ __volatile__( \
93 "sb\t$0,0x80(%0)" \
94 : : "r" (mips_io_port_base));
95
96#ifdef CONF_SLOWDOWN_IO
97#ifdef REALLY_SLOW_IO
98#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
99#else
100#define SLOW_DOWN_IO __SLOW_DOWN_IO
101#endif
102#else
103#define SLOW_DOWN_IO
104#endif
105
106/*
107 * virt_to_phys - map virtual addresses to physical
108 * @address: address to remap
109 *
110 * The returned physical address is the physical (CPU) mapping for
111 * the memory address given. It is only valid to use this function on
112 * addresses directly mapped or allocated via kmalloc.
113 *
114 * This function does not give bus mappings for DMA transfers. In
115 * almost all conceivable cases a device driver should not be using
116 * this function
117 */
118static inline unsigned long virt_to_phys(volatile const void *address)
119{
120 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
121}
122
123/*
124 * phys_to_virt - map physical address to virtual
125 * @address: address to remap
126 *
127 * The returned virtual address is a current CPU mapping for
128 * the memory address given. It is only valid to use this function on
129 * addresses that have a kernel mapping
130 *
131 * This function does not handle bus mappings for DMA transfers. In
132 * almost all conceivable cases a device driver should not be using
133 * this function
134 */
135static inline void * phys_to_virt(unsigned long address)
136{
137 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
138}
139
140/*
141 * ISA I/O bus memory addresses are 1:1 with the physical address.
142 */
143static inline unsigned long isa_virt_to_bus(volatile void * address)
144{
145 return (unsigned long)address - PAGE_OFFSET;
146}
147
148static inline void * isa_bus_to_virt(unsigned long address)
149{
150 return (void *)(address + PAGE_OFFSET);
151}
152
153#define isa_page_to_bus page_to_phys
154
155/*
156 * However PCI ones are not necessarily 1:1 and therefore these interfaces
157 * are forbidden in portable PCI drivers.
158 *
159 * Allow them for x86 for legacy drivers, though.
160 */
161#define virt_to_bus virt_to_phys
162#define bus_to_virt phys_to_virt
163
164/*
165 * Change "struct page" to physical address.
166 */
167#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
168
169extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
170extern void __iounmap(const volatile void __iomem *addr);
171
172static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
173 unsigned long flags)
174{
175 void __iomem *addr = plat_ioremap(offset, size, flags);
176
177 if (addr)
178 return addr;
179
180#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
181
182 if (cpu_has_64bit_addresses) {
183 u64 base = UNCAC_BASE;
184
185 /*
186 * R10000 supports a 2 bit uncached attribute therefore
187 * UNCAC_BASE may not equal IO_BASE.
188 */
189 if (flags == _CACHE_UNCACHED)
190 base = (u64) IO_BASE;
191 return (void __iomem *) (unsigned long) (base + offset);
192 } else if (__builtin_constant_p(offset) &&
193 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
194 phys_t phys_addr, last_addr;
195
196 phys_addr = fixup_bigphys_addr(offset, size);
197
198 /* Don't allow wraparound or zero size. */
199 last_addr = phys_addr + size - 1;
200 if (!size || last_addr < phys_addr)
201 return NULL;
202
203 /*
204 * Map uncached objects in the low 512MB of address
205 * space using KSEG1.
206 */
207 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
208 flags == _CACHE_UNCACHED)
209 return (void __iomem *)
210 (unsigned long)CKSEG1ADDR(phys_addr);
211 }
212
213 return __ioremap(offset, size, flags);
214
215#undef __IS_LOW512
216}
217
218/*
219 * ioremap - map bus memory into CPU space
220 * @offset: bus address of the memory
221 * @size: size of the resource to map
222 *
223 * ioremap performs a platform specific sequence of operations to
224 * make bus memory CPU accessible via the readb/readw/readl/writeb/
225 * writew/writel functions and the other mmio helpers. The returned
226 * address is not guaranteed to be usable directly as a virtual
227 * address.
228 */
229#define ioremap(offset, size) \
230 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
231
232/*
233 * ioremap_nocache - map bus memory into CPU space
234 * @offset: bus address of the memory
235 * @size: size of the resource to map
236 *
237 * ioremap_nocache performs a platform specific sequence of operations to
238 * make bus memory CPU accessible via the readb/readw/readl/writeb/
239 * writew/writel functions and the other mmio helpers. The returned
240 * address is not guaranteed to be usable directly as a virtual
241 * address.
242 *
243 * This version of ioremap ensures that the memory is marked uncachable
244 * on the CPU as well as honouring existing caching rules from things like
245 * the PCI bus. Note that there are other caches and buffers on many
246 * busses. In particular driver authors should read up on PCI writes
247 *
248 * It's useful if some control registers are in such an area and
249 * write combining or read caching is not desirable:
250 */
251#define ioremap_nocache(offset, size) \
252 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
253
254/*
255 * ioremap_cachable - map bus memory into CPU space
256 * @offset: bus address of the memory
257 * @size: size of the resource to map
258 *
259 * ioremap_nocache performs a platform specific sequence of operations to
260 * make bus memory CPU accessible via the readb/readw/readl/writeb/
261 * writew/writel functions and the other mmio helpers. The returned
262 * address is not guaranteed to be usable directly as a virtual
263 * address.
264 *
265 * This version of ioremap ensures that the memory is marked cachable by
266 * the CPU. Also enables full write-combining. Useful for some
267 * memory-like regions on I/O busses.
268 */
269#define ioremap_cachable(offset, size) \
270 __ioremap_mode((offset), (size), _page_cachable_default)
271
272/*
273 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
274 * requests a cachable mapping, ioremap_uncached_accelerated requests a
275 * mapping using the uncached accelerated mode which isn't supported on
276 * all processors.
277 */
278#define ioremap_cacheable_cow(offset, size) \
279 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
280#define ioremap_uncached_accelerated(offset, size) \
281 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
282
283static inline void iounmap(const volatile void __iomem *addr)
284{
285 if (plat_iounmap(addr))
286 return;
287
288#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
289
290 if (cpu_has_64bit_addresses ||
291 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
292 return;
293
294 __iounmap(addr);
295
296#undef __IS_KSEG1
297}
298
299#ifdef CONFIG_CPU_CAVIUM_OCTEON
300#define war_octeon_io_reorder_wmb() wmb()
301#else
302#define war_octeon_io_reorder_wmb() do { } while (0)
303#endif
304
305#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
306 \
307static inline void pfx##write##bwlq(type val, \
308 volatile void __iomem *mem) \
309{ \
310 volatile type *__mem; \
311 type __val; \
312 \
313 war_octeon_io_reorder_wmb(); \
314 \
315 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
316 \
317 __val = pfx##ioswab##bwlq(__mem, val); \
318 \
319 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
320 *__mem = __val; \
321 else if (cpu_has_64bits) { \
322 unsigned long __flags; \
323 type __tmp; \
324 \
325 if (irq) \
326 local_irq_save(__flags); \
327 __asm__ __volatile__( \
328 ".set mips3" "\t\t# __writeq""\n\t" \
329 "dsll32 %L0, %L0, 0" "\n\t" \
330 "dsrl32 %L0, %L0, 0" "\n\t" \
331 "dsll32 %M0, %M0, 0" "\n\t" \
332 "or %L0, %L0, %M0" "\n\t" \
333 "sd %L0, %2" "\n\t" \
334 ".set mips0" "\n" \
335 : "=r" (__tmp) \
336 : "0" (__val), "m" (*__mem)); \
337 if (irq) \
338 local_irq_restore(__flags); \
339 } else \
340 BUG(); \
341} \
342 \
343static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
344{ \
345 volatile type *__mem; \
346 type __val; \
347 \
348 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
349 \
350 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
351 __val = *__mem; \
352 else if (cpu_has_64bits) { \
353 unsigned long __flags; \
354 \
355 if (irq) \
356 local_irq_save(__flags); \
357 __asm__ __volatile__( \
358 ".set mips3" "\t\t# __readq" "\n\t" \
359 "ld %L0, %1" "\n\t" \
360 "dsra32 %M0, %L0, 0" "\n\t" \
361 "sll %L0, %L0, 0" "\n\t" \
362 ".set mips0" "\n" \
363 : "=r" (__val) \
364 : "m" (*__mem)); \
365 if (irq) \
366 local_irq_restore(__flags); \
367 } else { \
368 __val = 0; \
369 BUG(); \
370 } \
371 \
372 return pfx##ioswab##bwlq(__mem, __val); \
373}
374
375#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
376 \
377static inline void pfx##out##bwlq##p(type val, unsigned long port) \
378{ \
379 volatile type *__addr; \
380 type __val; \
381 \
382 war_octeon_io_reorder_wmb(); \
383 \
384 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
385 \
386 __val = pfx##ioswab##bwlq(__addr, val); \
387 \
388 /* Really, we want this to be atomic */ \
389 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
390 \
391 *__addr = __val; \
392 slow; \
393} \
394 \
395static inline type pfx##in##bwlq##p(unsigned long port) \
396{ \
397 volatile type *__addr; \
398 type __val; \
399 \
400 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
401 \
402 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
403 \
404 __val = *__addr; \
405 slow; \
406 \
407 return pfx##ioswab##bwlq(__addr, __val); \
408}
409
410#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
411 \
412__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
413
414#define BUILDIO_MEM(bwlq, type) \
415 \
416__BUILD_MEMORY_PFX(__raw_, bwlq, type) \
417__BUILD_MEMORY_PFX(, bwlq, type) \
418__BUILD_MEMORY_PFX(__mem_, bwlq, type) \
419
420BUILDIO_MEM(b, u8)
421BUILDIO_MEM(w, u16)
422BUILDIO_MEM(l, u32)
423BUILDIO_MEM(q, u64)
424
425#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
426 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
427 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
428
429#define BUILDIO_IOPORT(bwlq, type) \
430 __BUILD_IOPORT_PFX(, bwlq, type) \
431 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
432
433BUILDIO_IOPORT(b, u8)
434BUILDIO_IOPORT(w, u16)
435BUILDIO_IOPORT(l, u32)
436#ifdef CONFIG_64BIT
437BUILDIO_IOPORT(q, u64)
438#endif
439
440#define __BUILDIO(bwlq, type) \
441 \
442__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
443
444__BUILDIO(q, u64)
445
446#define readb_relaxed readb
447#define readw_relaxed readw
448#define readl_relaxed readl
449#define readq_relaxed readq
450
451#define readb_be(addr) \
452 __raw_readb((__force unsigned *)(addr))
453#define readw_be(addr) \
454 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
455#define readl_be(addr) \
456 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
457#define readq_be(addr) \
458 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
459
460#define writeb_be(val, addr) \
461 __raw_writeb((val), (__force unsigned *)(addr))
462#define writew_be(val, addr) \
463 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
464#define writel_be(val, addr) \
465 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
466#define writeq_be(val, addr) \
467 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
468
469/*
470 * Some code tests for these symbols
471 */
472#define readq readq
473#define writeq writeq
474
475#define __BUILD_MEMORY_STRING(bwlq, type) \
476 \
477static inline void writes##bwlq(volatile void __iomem *mem, \
478 const void *addr, unsigned int count) \
479{ \
480 const volatile type *__addr = addr; \
481 \
482 while (count--) { \
483 __mem_write##bwlq(*__addr, mem); \
484 __addr++; \
485 } \
486} \
487 \
488static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
489 unsigned int count) \
490{ \
491 volatile type *__addr = addr; \
492 \
493 while (count--) { \
494 *__addr = __mem_read##bwlq(mem); \
495 __addr++; \
496 } \
497}
498
499#define __BUILD_IOPORT_STRING(bwlq, type) \
500 \
501static inline void outs##bwlq(unsigned long port, const void *addr, \
502 unsigned int count) \
503{ \
504 const volatile type *__addr = addr; \
505 \
506 while (count--) { \
507 __mem_out##bwlq(*__addr, port); \
508 __addr++; \
509 } \
510} \
511 \
512static inline void ins##bwlq(unsigned long port, void *addr, \
513 unsigned int count) \
514{ \
515 volatile type *__addr = addr; \
516 \
517 while (count--) { \
518 *__addr = __mem_in##bwlq(port); \
519 __addr++; \
520 } \
521}
522
523#define BUILDSTRING(bwlq, type) \
524 \
525__BUILD_MEMORY_STRING(bwlq, type) \
526__BUILD_IOPORT_STRING(bwlq, type)
527
528BUILDSTRING(b, u8)
529BUILDSTRING(w, u16)
530BUILDSTRING(l, u32)
531#ifdef CONFIG_64BIT
532BUILDSTRING(q, u64)
533#endif
534
535
536#ifdef CONFIG_CPU_CAVIUM_OCTEON
537#define mmiowb() wmb()
538#else
539/* Depends on MIPS II instruction set */
540#define mmiowb() asm volatile ("sync" ::: "memory")
541#endif
542
543static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
544{
545 memset((void __force *) addr, val, count);
546}
547static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
548{
549 memcpy(dst, (void __force *) src, count);
550}
551static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
552{
553 memcpy((void __force *) dst, src, count);
554}
555
556/*
557 * The caches on some architectures aren't dma-coherent and have need to
558 * handle this in software. There are three types of operations that
559 * can be applied to dma buffers.
560 *
561 * - dma_cache_wback_inv(start, size) makes caches and coherent by
562 * writing the content of the caches back to memory, if necessary.
563 * The function also invalidates the affected part of the caches as
564 * necessary before DMA transfers from outside to memory.
565 * - dma_cache_wback(start, size) makes caches and coherent by
566 * writing the content of the caches back to memory, if necessary.
567 * The function also invalidates the affected part of the caches as
568 * necessary before DMA transfers from outside to memory.
569 * - dma_cache_inv(start, size) invalidates the affected parts of the
570 * caches. Dirty lines of the caches may be written back or simply
571 * be discarded. This operation is necessary before dma operations
572 * to the memory.
573 *
574 * This API used to be exported; it now is for arch code internal use only.
575 */
576#ifdef CONFIG_DMA_NONCOHERENT
577
578extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
579extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
580extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
581
582#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
583#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
584#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
585
586#else /* Sane hardware */
587
588#define dma_cache_wback_inv(start,size) \
589 do { (void) (start); (void) (size); } while (0)
590#define dma_cache_wback(start,size) \
591 do { (void) (start); (void) (size); } while (0)
592#define dma_cache_inv(start,size) \
593 do { (void) (start); (void) (size); } while (0)
594
595#endif /* CONFIG_DMA_NONCOHERENT */
596
597/*
598 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
599 * Avoid interrupt mucking, just adjust the address for 4-byte access.
600 * Assume the addresses are 8-byte aligned.
601 */
602#ifdef __MIPSEB__
603#define __CSR_32_ADJUST 4
604#else
605#define __CSR_32_ADJUST 0
606#endif
607
608#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
609#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
610
611/*
612 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
613 * access
614 */
615#define xlate_dev_mem_ptr(p) __va(p)
616
617/*
618 * Convert a virtual cached pointer to an uncached pointer
619 */
620#define xlate_dev_kmem_ptr(p) p
621
622#endif /* _ASM_IO_H */