Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6 */
7#include <linux/dma-direct.h>
8#include <linux/dma-map-ops.h>
9#include <linux/highmem.h>
10
11#include <asm/cache.h>
12#include <asm/cpu-type.h>
13#include <asm/io.h>
14
15/*
16 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
17 * fill random cachelines with stale data at any time, requiring an extra
18 * flush post-DMA.
19 *
20 * Warning on the terminology - Linux calls an uncached area coherent; MIPS
21 * terminology calls memory areas with hardware maintained coherency coherent.
22 *
23 * Note that the R14000 and R16000 should also be checked for in this condition.
24 * However this function is only called on non-I/O-coherent systems and only the
25 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
26 * SGI IP32 aka O2.
27 */
28static inline bool cpu_needs_post_dma_flush(void)
29{
30 switch (boot_cpu_type()) {
31 case CPU_R10000:
32 case CPU_R12000:
33 case CPU_BMIPS5000:
34 case CPU_LOONGSON2EF:
35 case CPU_XBURST:
36 return true;
37 default:
38 /*
39 * Presence of MAARs suggests that the CPU supports
40 * speculatively prefetching data, and therefore requires
41 * the post-DMA flush/invalidate.
42 */
43 return cpu_has_maar;
44 }
45}
46
47void arch_dma_prep_coherent(struct page *page, size_t size)
48{
49 dma_cache_wback_inv((unsigned long)page_address(page), size);
50}
51
52void *arch_dma_set_uncached(void *addr, size_t size)
53{
54 return (void *)(__pa(addr) + UNCAC_BASE);
55}
56
57static inline void dma_sync_virt_for_device(void *addr, size_t size,
58 enum dma_data_direction dir)
59{
60 switch (dir) {
61 case DMA_TO_DEVICE:
62 dma_cache_wback((unsigned long)addr, size);
63 break;
64 case DMA_FROM_DEVICE:
65 dma_cache_inv((unsigned long)addr, size);
66 break;
67 case DMA_BIDIRECTIONAL:
68 dma_cache_wback_inv((unsigned long)addr, size);
69 break;
70 default:
71 BUG();
72 }
73}
74
75static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
76 enum dma_data_direction dir)
77{
78 switch (dir) {
79 case DMA_TO_DEVICE:
80 break;
81 case DMA_FROM_DEVICE:
82 case DMA_BIDIRECTIONAL:
83 dma_cache_inv((unsigned long)addr, size);
84 break;
85 default:
86 BUG();
87 }
88}
89
90/*
91 * A single sg entry may refer to multiple physically contiguous pages. But
92 * we still need to process highmem pages individually. If highmem is not
93 * configured then the bulk of this loop gets optimized out.
94 */
95static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
96 enum dma_data_direction dir, bool for_device)
97{
98 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
99 unsigned long offset = paddr & ~PAGE_MASK;
100 size_t left = size;
101
102 do {
103 size_t len = left;
104 void *addr;
105
106 if (PageHighMem(page)) {
107 if (offset + len > PAGE_SIZE)
108 len = PAGE_SIZE - offset;
109 }
110
111 addr = kmap_atomic(page);
112 if (for_device)
113 dma_sync_virt_for_device(addr + offset, len, dir);
114 else
115 dma_sync_virt_for_cpu(addr + offset, len, dir);
116 kunmap_atomic(addr);
117
118 offset = 0;
119 page++;
120 left -= len;
121 } while (left);
122}
123
124void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
125 enum dma_data_direction dir)
126{
127 dma_sync_phys(paddr, size, dir, true);
128}
129
130#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
131void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
132 enum dma_data_direction dir)
133{
134 if (cpu_needs_post_dma_flush())
135 dma_sync_phys(paddr, size, dir, false);
136}
137#endif
138
139#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
140void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
141 const struct iommu_ops *iommu, bool coherent)
142{
143 dev->dma_coherent = coherent;
144}
145#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6 */
7#include <linux/dma-direct.h>
8#include <linux/dma-noncoherent.h>
9#include <linux/dma-contiguous.h>
10#include <linux/highmem.h>
11
12#include <asm/cache.h>
13#include <asm/cpu-type.h>
14#include <asm/dma-coherence.h>
15#include <asm/io.h>
16
17/*
18 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19 * fill random cachelines with stale data at any time, requiring an extra
20 * flush post-DMA.
21 *
22 * Warning on the terminology - Linux calls an uncached area coherent; MIPS
23 * terminology calls memory areas with hardware maintained coherency coherent.
24 *
25 * Note that the R14000 and R16000 should also be checked for in this condition.
26 * However this function is only called on non-I/O-coherent systems and only the
27 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28 * SGI IP32 aka O2.
29 */
30static inline bool cpu_needs_post_dma_flush(struct device *dev)
31{
32 switch (boot_cpu_type()) {
33 case CPU_R10000:
34 case CPU_R12000:
35 case CPU_BMIPS5000:
36 return true;
37 default:
38 /*
39 * Presence of MAARs suggests that the CPU supports
40 * speculatively prefetching data, and therefore requires
41 * the post-DMA flush/invalidate.
42 */
43 return cpu_has_maar;
44 }
45}
46
47void arch_dma_prep_coherent(struct page *page, size_t size)
48{
49 dma_cache_wback_inv((unsigned long)page_address(page), size);
50}
51
52void *uncached_kernel_address(void *addr)
53{
54 return (void *)(__pa(addr) + UNCAC_BASE);
55}
56
57void *cached_kernel_address(void *addr)
58{
59 return __va(addr) - UNCAC_BASE;
60}
61
62long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
63 dma_addr_t dma_addr)
64{
65 return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
66}
67
68static inline void dma_sync_virt(void *addr, size_t size,
69 enum dma_data_direction dir)
70{
71 switch (dir) {
72 case DMA_TO_DEVICE:
73 dma_cache_wback((unsigned long)addr, size);
74 break;
75
76 case DMA_FROM_DEVICE:
77 dma_cache_inv((unsigned long)addr, size);
78 break;
79
80 case DMA_BIDIRECTIONAL:
81 dma_cache_wback_inv((unsigned long)addr, size);
82 break;
83
84 default:
85 BUG();
86 }
87}
88
89/*
90 * A single sg entry may refer to multiple physically contiguous pages. But
91 * we still need to process highmem pages individually. If highmem is not
92 * configured then the bulk of this loop gets optimized out.
93 */
94static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
95 enum dma_data_direction dir)
96{
97 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
98 unsigned long offset = paddr & ~PAGE_MASK;
99 size_t left = size;
100
101 do {
102 size_t len = left;
103
104 if (PageHighMem(page)) {
105 void *addr;
106
107 if (offset + len > PAGE_SIZE)
108 len = PAGE_SIZE - offset;
109
110 addr = kmap_atomic(page);
111 dma_sync_virt(addr + offset, len, dir);
112 kunmap_atomic(addr);
113 } else
114 dma_sync_virt(page_address(page) + offset, size, dir);
115 offset = 0;
116 page++;
117 left -= len;
118 } while (left);
119}
120
121void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
122 size_t size, enum dma_data_direction dir)
123{
124 dma_sync_phys(paddr, size, dir);
125}
126
127#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
128void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
129 size_t size, enum dma_data_direction dir)
130{
131 if (cpu_needs_post_dma_flush(dev))
132 dma_sync_phys(paddr, size, dir);
133}
134#endif
135
136void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
137 enum dma_data_direction direction)
138{
139 BUG_ON(direction == DMA_NONE);
140
141 dma_sync_virt(vaddr, size, direction);
142}
143
144#ifdef CONFIG_DMA_PERDEV_COHERENT
145void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
146 const struct iommu_ops *iommu, bool coherent)
147{
148 dev->dma_coherent = coherent;
149}
150#endif