Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/dma-map-ops.h>
8#include <linux/kernel.h>
9#include <asm/cacheflush.h>
10
11#ifndef CONFIG_COLDFIRE
12void arch_dma_prep_coherent(struct page *page, size_t size)
13{
14 cache_push(page_to_phys(page), size);
15}
16
17pgprot_t pgprot_dmacoherent(pgprot_t prot)
18{
19 if (CPU_IS_040_OR_060) {
20 pgprot_val(prot) &= ~_PAGE_CACHE040;
21 pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
22 } else {
23 pgprot_val(prot) |= _PAGE_NOCACHE030;
24 }
25 return prot;
26}
27#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
28
29void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
30 enum dma_data_direction dir)
31{
32 switch (dir) {
33 case DMA_BIDIRECTIONAL:
34 case DMA_TO_DEVICE:
35 cache_push(handle, size);
36 break;
37 case DMA_FROM_DEVICE:
38 cache_clear(handle, size);
39 break;
40 default:
41 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
42 dir);
43 break;
44 }
45}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#undef DEBUG
8
9#include <linux/dma-map-ops.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/platform_device.h>
13#include <linux/scatterlist.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/export.h>
17
18#include <asm/cacheflush.h>
19
20#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
21void arch_dma_prep_coherent(struct page *page, size_t size)
22{
23 cache_push(page_to_phys(page), size);
24}
25
26pgprot_t pgprot_dmacoherent(pgprot_t prot)
27{
28 if (CPU_IS_040_OR_060) {
29 pgprot_val(prot) &= ~_PAGE_CACHE040;
30 pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
31 } else {
32 pgprot_val(prot) |= _PAGE_NOCACHE030;
33 }
34 return prot;
35}
36#else
37void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
38 gfp_t gfp, unsigned long attrs)
39{
40 void *ret;
41
42 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
43 gfp |= GFP_DMA;
44 ret = (void *)__get_free_pages(gfp, get_order(size));
45
46 if (ret != NULL) {
47 memset(ret, 0, size);
48 *dma_handle = virt_to_phys(ret);
49 }
50 return ret;
51}
52
53void arch_dma_free(struct device *dev, size_t size, void *vaddr,
54 dma_addr_t dma_handle, unsigned long attrs)
55{
56 free_pages((unsigned long)vaddr, get_order(size));
57}
58
59#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
60
61void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
62 enum dma_data_direction dir)
63{
64 switch (dir) {
65 case DMA_BIDIRECTIONAL:
66 case DMA_TO_DEVICE:
67 cache_push(handle, size);
68 break;
69 case DMA_FROM_DEVICE:
70 cache_clear(handle, size);
71 break;
72 default:
73 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
74 dir);
75 break;
76 }
77}