Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/dma-map-ops.h>
6#include <linux/genalloc.h>
7#include <linux/highmem.h>
8#include <linux/io.h>
9#include <linux/mm.h>
10#include <linux/scatterlist.h>
11#include <linux/types.h>
12#include <asm/cache.h>
13
14static inline void cache_op(phys_addr_t paddr, size_t size,
15 void (*fn)(unsigned long start, unsigned long end))
16{
17 struct page *page = phys_to_page(paddr);
18 void *start = __va(page_to_phys(page));
19 unsigned long offset = offset_in_page(paddr);
20 size_t left = size;
21
22 do {
23 size_t len = left;
24
25 if (offset + len > PAGE_SIZE)
26 len = PAGE_SIZE - offset;
27
28 if (PageHighMem(page)) {
29 start = kmap_atomic(page);
30
31 fn((unsigned long)start + offset,
32 (unsigned long)start + offset + len);
33
34 kunmap_atomic(start);
35 } else {
36 fn((unsigned long)start + offset,
37 (unsigned long)start + offset + len);
38 }
39 offset = 0;
40
41 page++;
42 start += PAGE_SIZE;
43 left -= len;
44 } while (left);
45}
46
47static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
48{
49 memset((void *)start, 0, end - start);
50 dma_wbinv_range(start, end);
51}
52
53void arch_dma_prep_coherent(struct page *page, size_t size)
54{
55 cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
56}
57
58void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
59 enum dma_data_direction dir)
60{
61 switch (dir) {
62 case DMA_TO_DEVICE:
63 cache_op(paddr, size, dma_wb_range);
64 break;
65 case DMA_FROM_DEVICE:
66 case DMA_BIDIRECTIONAL:
67 cache_op(paddr, size, dma_wbinv_range);
68 break;
69 default:
70 BUG();
71 }
72}
73
74void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
75 enum dma_data_direction dir)
76{
77 switch (dir) {
78 case DMA_TO_DEVICE:
79 return;
80 case DMA_FROM_DEVICE:
81 case DMA_BIDIRECTIONAL:
82 cache_op(paddr, size, dma_inv_range);
83 break;
84 default:
85 BUG();
86 }
87}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/dma-map-ops.h>
6#include <linux/genalloc.h>
7#include <linux/highmem.h>
8#include <linux/io.h>
9#include <linux/mm.h>
10#include <linux/scatterlist.h>
11#include <linux/types.h>
12#include <linux/version.h>
13#include <asm/cache.h>
14
15static inline void cache_op(phys_addr_t paddr, size_t size,
16 void (*fn)(unsigned long start, unsigned long end))
17{
18 struct page *page = phys_to_page(paddr);
19 void *start = __va(page_to_phys(page));
20 unsigned long offset = offset_in_page(paddr);
21 size_t left = size;
22
23 do {
24 size_t len = left;
25
26 if (offset + len > PAGE_SIZE)
27 len = PAGE_SIZE - offset;
28
29 if (PageHighMem(page)) {
30 start = kmap_atomic(page);
31
32 fn((unsigned long)start + offset,
33 (unsigned long)start + offset + len);
34
35 kunmap_atomic(start);
36 } else {
37 fn((unsigned long)start + offset,
38 (unsigned long)start + offset + len);
39 }
40 offset = 0;
41
42 page++;
43 start += PAGE_SIZE;
44 left -= len;
45 } while (left);
46}
47
48static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
49{
50 memset((void *)start, 0, end - start);
51 dma_wbinv_range(start, end);
52}
53
54void arch_dma_prep_coherent(struct page *page, size_t size)
55{
56 cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
57}
58
59void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
60 enum dma_data_direction dir)
61{
62 switch (dir) {
63 case DMA_TO_DEVICE:
64 cache_op(paddr, size, dma_wb_range);
65 break;
66 case DMA_FROM_DEVICE:
67 case DMA_BIDIRECTIONAL:
68 cache_op(paddr, size, dma_wbinv_range);
69 break;
70 default:
71 BUG();
72 }
73}
74
75void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
76 enum dma_data_direction dir)
77{
78 switch (dir) {
79 case DMA_TO_DEVICE:
80 return;
81 case DMA_FROM_DEVICE:
82 case DMA_BIDIRECTIONAL:
83 cache_op(paddr, size, dma_inv_range);
84 break;
85 default:
86 BUG();
87 }
88}