Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <asm/mmu_context.h>
4#include <asm/cache_insns.h>
5#include <asm/cacheflush.h>
6#include <asm/traps.h>
7
8/*
9 * Write back the dirty D-caches, but not invalidate them.
10 *
11 * START: Virtual Address (U0, P1, or P3)
12 * SIZE: Size of the region.
13 */
14static void sh4__flush_wback_region(void *start, int size)
15{
16 reg_size_t aligned_start, v, cnt, end;
17
18 aligned_start = register_align(start);
19 v = aligned_start & ~(L1_CACHE_BYTES-1);
20 end = (aligned_start + size + L1_CACHE_BYTES-1)
21 & ~(L1_CACHE_BYTES-1);
22 cnt = (end - v) / L1_CACHE_BYTES;
23
24 while (cnt >= 8) {
25 __ocbwb(v); v += L1_CACHE_BYTES;
26 __ocbwb(v); v += L1_CACHE_BYTES;
27 __ocbwb(v); v += L1_CACHE_BYTES;
28 __ocbwb(v); v += L1_CACHE_BYTES;
29 __ocbwb(v); v += L1_CACHE_BYTES;
30 __ocbwb(v); v += L1_CACHE_BYTES;
31 __ocbwb(v); v += L1_CACHE_BYTES;
32 __ocbwb(v); v += L1_CACHE_BYTES;
33 cnt -= 8;
34 }
35
36 while (cnt) {
37 __ocbwb(v); v += L1_CACHE_BYTES;
38 cnt--;
39 }
40}
41
42/*
43 * Write back the dirty D-caches and invalidate them.
44 *
45 * START: Virtual Address (U0, P1, or P3)
46 * SIZE: Size of the region.
47 */
48static void sh4__flush_purge_region(void *start, int size)
49{
50 reg_size_t aligned_start, v, cnt, end;
51
52 aligned_start = register_align(start);
53 v = aligned_start & ~(L1_CACHE_BYTES-1);
54 end = (aligned_start + size + L1_CACHE_BYTES-1)
55 & ~(L1_CACHE_BYTES-1);
56 cnt = (end - v) / L1_CACHE_BYTES;
57
58 while (cnt >= 8) {
59 __ocbp(v); v += L1_CACHE_BYTES;
60 __ocbp(v); v += L1_CACHE_BYTES;
61 __ocbp(v); v += L1_CACHE_BYTES;
62 __ocbp(v); v += L1_CACHE_BYTES;
63 __ocbp(v); v += L1_CACHE_BYTES;
64 __ocbp(v); v += L1_CACHE_BYTES;
65 __ocbp(v); v += L1_CACHE_BYTES;
66 __ocbp(v); v += L1_CACHE_BYTES;
67 cnt -= 8;
68 }
69 while (cnt) {
70 __ocbp(v); v += L1_CACHE_BYTES;
71 cnt--;
72 }
73}
74
75/*
76 * No write back please
77 */
78static void sh4__flush_invalidate_region(void *start, int size)
79{
80 reg_size_t aligned_start, v, cnt, end;
81
82 aligned_start = register_align(start);
83 v = aligned_start & ~(L1_CACHE_BYTES-1);
84 end = (aligned_start + size + L1_CACHE_BYTES-1)
85 & ~(L1_CACHE_BYTES-1);
86 cnt = (end - v) / L1_CACHE_BYTES;
87
88 while (cnt >= 8) {
89 __ocbi(v); v += L1_CACHE_BYTES;
90 __ocbi(v); v += L1_CACHE_BYTES;
91 __ocbi(v); v += L1_CACHE_BYTES;
92 __ocbi(v); v += L1_CACHE_BYTES;
93 __ocbi(v); v += L1_CACHE_BYTES;
94 __ocbi(v); v += L1_CACHE_BYTES;
95 __ocbi(v); v += L1_CACHE_BYTES;
96 __ocbi(v); v += L1_CACHE_BYTES;
97 cnt -= 8;
98 }
99
100 while (cnt) {
101 __ocbi(v); v += L1_CACHE_BYTES;
102 cnt--;
103 }
104}
105
106void __init sh4__flush_region_init(void)
107{
108 __flush_wback_region = sh4__flush_wback_region;
109 __flush_invalidate_region = sh4__flush_invalidate_region;
110 __flush_purge_region = sh4__flush_purge_region;
111}
1#include <linux/mm.h>
2#include <asm/mmu_context.h>
3#include <asm/cacheflush.h>
4
5/*
6 * Write back the dirty D-caches, but not invalidate them.
7 *
8 * START: Virtual Address (U0, P1, or P3)
9 * SIZE: Size of the region.
10 */
11static void sh4__flush_wback_region(void *start, int size)
12{
13 reg_size_t aligned_start, v, cnt, end;
14
15 aligned_start = register_align(start);
16 v = aligned_start & ~(L1_CACHE_BYTES-1);
17 end = (aligned_start + size + L1_CACHE_BYTES-1)
18 & ~(L1_CACHE_BYTES-1);
19 cnt = (end - v) / L1_CACHE_BYTES;
20
21 while (cnt >= 8) {
22 __ocbwb(v); v += L1_CACHE_BYTES;
23 __ocbwb(v); v += L1_CACHE_BYTES;
24 __ocbwb(v); v += L1_CACHE_BYTES;
25 __ocbwb(v); v += L1_CACHE_BYTES;
26 __ocbwb(v); v += L1_CACHE_BYTES;
27 __ocbwb(v); v += L1_CACHE_BYTES;
28 __ocbwb(v); v += L1_CACHE_BYTES;
29 __ocbwb(v); v += L1_CACHE_BYTES;
30 cnt -= 8;
31 }
32
33 while (cnt) {
34 __ocbwb(v); v += L1_CACHE_BYTES;
35 cnt--;
36 }
37}
38
39/*
40 * Write back the dirty D-caches and invalidate them.
41 *
42 * START: Virtual Address (U0, P1, or P3)
43 * SIZE: Size of the region.
44 */
45static void sh4__flush_purge_region(void *start, int size)
46{
47 reg_size_t aligned_start, v, cnt, end;
48
49 aligned_start = register_align(start);
50 v = aligned_start & ~(L1_CACHE_BYTES-1);
51 end = (aligned_start + size + L1_CACHE_BYTES-1)
52 & ~(L1_CACHE_BYTES-1);
53 cnt = (end - v) / L1_CACHE_BYTES;
54
55 while (cnt >= 8) {
56 __ocbp(v); v += L1_CACHE_BYTES;
57 __ocbp(v); v += L1_CACHE_BYTES;
58 __ocbp(v); v += L1_CACHE_BYTES;
59 __ocbp(v); v += L1_CACHE_BYTES;
60 __ocbp(v); v += L1_CACHE_BYTES;
61 __ocbp(v); v += L1_CACHE_BYTES;
62 __ocbp(v); v += L1_CACHE_BYTES;
63 __ocbp(v); v += L1_CACHE_BYTES;
64 cnt -= 8;
65 }
66 while (cnt) {
67 __ocbp(v); v += L1_CACHE_BYTES;
68 cnt--;
69 }
70}
71
72/*
73 * No write back please
74 */
75static void sh4__flush_invalidate_region(void *start, int size)
76{
77 reg_size_t aligned_start, v, cnt, end;
78
79 aligned_start = register_align(start);
80 v = aligned_start & ~(L1_CACHE_BYTES-1);
81 end = (aligned_start + size + L1_CACHE_BYTES-1)
82 & ~(L1_CACHE_BYTES-1);
83 cnt = (end - v) / L1_CACHE_BYTES;
84
85 while (cnt >= 8) {
86 __ocbi(v); v += L1_CACHE_BYTES;
87 __ocbi(v); v += L1_CACHE_BYTES;
88 __ocbi(v); v += L1_CACHE_BYTES;
89 __ocbi(v); v += L1_CACHE_BYTES;
90 __ocbi(v); v += L1_CACHE_BYTES;
91 __ocbi(v); v += L1_CACHE_BYTES;
92 __ocbi(v); v += L1_CACHE_BYTES;
93 __ocbi(v); v += L1_CACHE_BYTES;
94 cnt -= 8;
95 }
96
97 while (cnt) {
98 __ocbi(v); v += L1_CACHE_BYTES;
99 cnt--;
100 }
101}
102
103void __init sh4__flush_region_init(void)
104{
105 __flush_wback_region = sh4__flush_wback_region;
106 __flush_invalidate_region = sh4__flush_invalidate_region;
107 __flush_purge_region = sh4__flush_purge_region;
108}