Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/spinlock.h>
5#include <linux/smp.h>
6#include <linux/mm.h>
7#include <asm/cache.h>
8#include <asm/barrier.h>
9
10/* for L1-cache */
11#define INS_CACHE (1 << 0)
12#define DATA_CACHE (1 << 1)
13#define CACHE_INV (1 << 4)
14#define CACHE_CLR (1 << 5)
15#define CACHE_OMS (1 << 6)
16
17void local_icache_inv_all(void *priv)
18{
19 mtcr("cr17", INS_CACHE|CACHE_INV);
20 sync_is();
21}
22
23#ifdef CONFIG_CPU_HAS_ICACHE_INS
24void icache_inv_range(unsigned long start, unsigned long end)
25{
26 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
27
28 for (; i < end; i += L1_CACHE_BYTES)
29 asm volatile("icache.iva %0\n"::"r"(i):"memory");
30 sync_is();
31}
32#else
33struct cache_range {
34 unsigned long start;
35 unsigned long end;
36};
37
38static DEFINE_SPINLOCK(cache_lock);
39
40static inline void cache_op_line(unsigned long i, unsigned int val)
41{
42 mtcr("cr22", i);
43 mtcr("cr17", val);
44}
45
46void local_icache_inv_range(void *priv)
47{
48 struct cache_range *param = priv;
49 unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
50 unsigned long flags;
51
52 spin_lock_irqsave(&cache_lock, flags);
53
54 for (; i < param->end; i += L1_CACHE_BYTES)
55 cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
56
57 spin_unlock_irqrestore(&cache_lock, flags);
58
59 sync_is();
60}
61
62void icache_inv_range(unsigned long start, unsigned long end)
63{
64 struct cache_range param = { start, end };
65
66 if (irqs_disabled())
67 local_icache_inv_range(¶m);
68 else
69 on_each_cpu(local_icache_inv_range, ¶m, 1);
70}
71#endif
72
73inline void dcache_wb_line(unsigned long start)
74{
75 asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
76 sync_is();
77}
78
79void dcache_wb_range(unsigned long start, unsigned long end)
80{
81 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
82
83 for (; i < end; i += L1_CACHE_BYTES)
84 asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
85 sync_is();
86}
87
88void cache_wbinv_range(unsigned long start, unsigned long end)
89{
90 dcache_wb_range(start, end);
91 icache_inv_range(start, end);
92}
93EXPORT_SYMBOL(cache_wbinv_range);
94
95void dma_wbinv_range(unsigned long start, unsigned long end)
96{
97 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
98
99 for (; i < end; i += L1_CACHE_BYTES)
100 asm volatile("dcache.civa %0\n"::"r"(i):"memory");
101 sync_is();
102}
103
104void dma_inv_range(unsigned long start, unsigned long end)
105{
106 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
107
108 for (; i < end; i += L1_CACHE_BYTES)
109 asm volatile("dcache.iva %0\n"::"r"(i):"memory");
110 sync_is();
111}
112
113void dma_wb_range(unsigned long start, unsigned long end)
114{
115 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
116
117 for (; i < end; i += L1_CACHE_BYTES)
118 asm volatile("dcache.cva %0\n"::"r"(i):"memory");
119 sync_is();
120}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/spinlock.h>
5#include <linux/smp.h>
6#include <asm/cache.h>
7#include <asm/barrier.h>
8
9inline void dcache_wb_line(unsigned long start)
10{
11 asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
12 sync_is();
13}
14
15void icache_inv_range(unsigned long start, unsigned long end)
16{
17 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
18
19 for (; i < end; i += L1_CACHE_BYTES)
20 asm volatile("icache.iva %0\n"::"r"(i):"memory");
21 sync_is();
22}
23
24void icache_inv_all(void)
25{
26 asm volatile("icache.ialls\n":::"memory");
27 sync_is();
28}
29
30void dcache_wb_range(unsigned long start, unsigned long end)
31{
32 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
33
34 for (; i < end; i += L1_CACHE_BYTES)
35 asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
36 sync_is();
37}
38
39void dcache_inv_range(unsigned long start, unsigned long end)
40{
41 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
42
43 for (; i < end; i += L1_CACHE_BYTES)
44 asm volatile("dcache.civa %0\n"::"r"(i):"memory");
45 sync_is();
46}
47
48void cache_wbinv_range(unsigned long start, unsigned long end)
49{
50 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
51
52 for (; i < end; i += L1_CACHE_BYTES)
53 asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
54 sync_is();
55
56 i = start & ~(L1_CACHE_BYTES - 1);
57 for (; i < end; i += L1_CACHE_BYTES)
58 asm volatile("icache.iva %0\n"::"r"(i):"memory");
59 sync_is();
60}
61EXPORT_SYMBOL(cache_wbinv_range);
62
63void dma_wbinv_range(unsigned long start, unsigned long end)
64{
65 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
66
67 for (; i < end; i += L1_CACHE_BYTES)
68 asm volatile("dcache.civa %0\n"::"r"(i):"memory");
69 sync_is();
70}
71
72void dma_inv_range(unsigned long start, unsigned long end)
73{
74 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
75
76 for (; i < end; i += L1_CACHE_BYTES)
77 asm volatile("dcache.iva %0\n"::"r"(i):"memory");
78 sync_is();
79}
80
81void dma_wb_range(unsigned long start, unsigned long end)
82{
83 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
84
85 for (; i < end; i += L1_CACHE_BYTES)
86 asm volatile("dcache.cva %0\n"::"r"(i):"memory");
87 sync_is();
88}