Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
  4 *
  5 * Copyright (C) 2007 ARM Limited
  6 */
  7#include <linux/init.h>
  8#include <linux/highmem.h>
  9#include <asm/cp15.h>
 10#include <asm/cputype.h>
 11#include <asm/cacheflush.h>
 12
 13#define CR_L2	(1 << 26)
 14
 15#define CACHE_LINE_SIZE		32
 16#define CACHE_LINE_SHIFT	5
 17#define CACHE_WAY_PER_SET	8
 18
 19#define CACHE_WAY_SIZE(l2ctype)	(8192 << (((l2ctype) >> 8) & 0xf))
 20#define CACHE_SET_SIZE(l2ctype)	(CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
 21
 22static inline int xsc3_l2_present(void)
 23{
 24	unsigned long l2ctype;
 25
 26	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
 27
 28	return !!(l2ctype & 0xf8);
 29}
 30
 31static inline void xsc3_l2_clean_mva(unsigned long addr)
 32{
 33	__asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
 34}
 35
 36static inline void xsc3_l2_inv_mva(unsigned long addr)
 37{
 38	__asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
 39}
 40
 41static inline void xsc3_l2_inv_all(void)
 42{
 43	unsigned long l2ctype, set_way;
 44	int set, way;
 45
 46	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
 47
 48	for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
 49		for (way = 0; way < CACHE_WAY_PER_SET; way++) {
 50			set_way = (way << 29) | (set << 5);
 51			__asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
 52		}
 53	}
 54
 55	dsb();
 56}
 57
 58static inline void l2_unmap_va(unsigned long va)
 59{
 60#ifdef CONFIG_HIGHMEM
 61	if (va != -1)
 62		kunmap_atomic((void *)va);
 63#endif
 64}
 65
 66static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
 67{
 68#ifdef CONFIG_HIGHMEM
 69	unsigned long va = prev_va & PAGE_MASK;
 70	unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
 71	if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
 72		/*
 73		 * Switching to a new page.  Because cache ops are
 74		 * using virtual addresses only, we must put a mapping
 75		 * in place for it.
 76		 */
 77		l2_unmap_va(prev_va);
 78		va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
 79	}
 80	return va + (pa_offset >> (32 - PAGE_SHIFT));
 81#else
 82	return __phys_to_virt(pa);
 83#endif
 84}
 85
 86static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 87{
 88	unsigned long vaddr;
 89
 90	if (start == 0 && end == -1ul) {
 91		xsc3_l2_inv_all();
 92		return;
 93	}
 94
 95	vaddr = -1;  /* to force the first mapping */
 96
 97	/*
 98	 * Clean and invalidate partial first cache line.
 99	 */
100	if (start & (CACHE_LINE_SIZE - 1)) {
101		vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
102		xsc3_l2_clean_mva(vaddr);
103		xsc3_l2_inv_mva(vaddr);
104		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
105	}
106
107	/*
108	 * Invalidate all full cache lines between 'start' and 'end'.
109	 */
110	while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
111		vaddr = l2_map_va(start, vaddr);
112		xsc3_l2_inv_mva(vaddr);
113		start += CACHE_LINE_SIZE;
114	}
115
116	/*
117	 * Clean and invalidate partial last cache line.
118	 */
119	if (start < end) {
120		vaddr = l2_map_va(start, vaddr);
121		xsc3_l2_clean_mva(vaddr);
122		xsc3_l2_inv_mva(vaddr);
123	}
124
125	l2_unmap_va(vaddr);
126
127	dsb();
128}
129
130static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
131{
132	unsigned long vaddr;
133
134	vaddr = -1;  /* to force the first mapping */
135
136	start &= ~(CACHE_LINE_SIZE - 1);
137	while (start < end) {
138		vaddr = l2_map_va(start, vaddr);
139		xsc3_l2_clean_mva(vaddr);
140		start += CACHE_LINE_SIZE;
141	}
142
143	l2_unmap_va(vaddr);
144
145	dsb();
146}
147
148/*
149 * optimize L2 flush all operation by set/way format
150 */
151static inline void xsc3_l2_flush_all(void)
152{
153	unsigned long l2ctype, set_way;
154	int set, way;
155
156	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
157
158	for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
159		for (way = 0; way < CACHE_WAY_PER_SET; way++) {
160			set_way = (way << 29) | (set << 5);
161			__asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
162		}
163	}
164
165	dsb();
166}
167
168static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
169{
170	unsigned long vaddr;
171
172	if (start == 0 && end == -1ul) {
173		xsc3_l2_flush_all();
174		return;
175	}
176
177	vaddr = -1;  /* to force the first mapping */
178
179	start &= ~(CACHE_LINE_SIZE - 1);
180	while (start < end) {
181		vaddr = l2_map_va(start, vaddr);
182		xsc3_l2_clean_mva(vaddr);
183		xsc3_l2_inv_mva(vaddr);
184		start += CACHE_LINE_SIZE;
185	}
186
187	l2_unmap_va(vaddr);
188
189	dsb();
190}
191
192static int __init xsc3_l2_init(void)
193{
194	if (!cpu_is_xsc3() || !xsc3_l2_present())
195		return 0;
196
197	if (get_cr() & CR_L2) {
198		pr_info("XScale3 L2 cache enabled.\n");
199		xsc3_l2_inv_all();
200
201		outer_cache.inv_range = xsc3_l2_inv_range;
202		outer_cache.clean_range = xsc3_l2_clean_range;
203		outer_cache.flush_range = xsc3_l2_flush_range;
204	}
205
206	return 0;
207}
208core_initcall(xsc3_l2_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
  4 *
  5 * Copyright (C) 2007 ARM Limited
  6 */
  7#include <linux/init.h>
  8#include <linux/highmem.h>
  9#include <asm/cp15.h>
 10#include <asm/cputype.h>
 11#include <asm/cacheflush.h>
 12
 13#define CR_L2	(1 << 26)
 14
 15#define CACHE_LINE_SIZE		32
 16#define CACHE_LINE_SHIFT	5
 17#define CACHE_WAY_PER_SET	8
 18
 19#define CACHE_WAY_SIZE(l2ctype)	(8192 << (((l2ctype) >> 8) & 0xf))
 20#define CACHE_SET_SIZE(l2ctype)	(CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
 21
 22static inline int xsc3_l2_present(void)
 23{
 24	unsigned long l2ctype;
 25
 26	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
 27
 28	return !!(l2ctype & 0xf8);
 29}
 30
 31static inline void xsc3_l2_clean_mva(unsigned long addr)
 32{
 33	__asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
 34}
 35
 36static inline void xsc3_l2_inv_mva(unsigned long addr)
 37{
 38	__asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
 39}
 40
 41static inline void xsc3_l2_inv_all(void)
 42{
 43	unsigned long l2ctype, set_way;
 44	int set, way;
 45
 46	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
 47
 48	for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
 49		for (way = 0; way < CACHE_WAY_PER_SET; way++) {
 50			set_way = (way << 29) | (set << 5);
 51			__asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
 52		}
 53	}
 54
 55	dsb();
 56}
 57
 58static inline void l2_unmap_va(unsigned long va)
 59{
 60#ifdef CONFIG_HIGHMEM
 61	if (va != -1)
 62		kunmap_atomic((void *)va);
 63#endif
 64}
 65
 66static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
 67{
 68#ifdef CONFIG_HIGHMEM
 69	unsigned long va = prev_va & PAGE_MASK;
 70	unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
 71	if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
 72		/*
 73		 * Switching to a new page.  Because cache ops are
 74		 * using virtual addresses only, we must put a mapping
 75		 * in place for it.
 76		 */
 77		l2_unmap_va(prev_va);
 78		va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
 79	}
 80	return va + (pa_offset >> (32 - PAGE_SHIFT));
 81#else
 82	return __phys_to_virt(pa);
 83#endif
 84}
 85
 86static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 87{
 88	unsigned long vaddr;
 89
 90	if (start == 0 && end == -1ul) {
 91		xsc3_l2_inv_all();
 92		return;
 93	}
 94
 95	vaddr = -1;  /* to force the first mapping */
 96
 97	/*
 98	 * Clean and invalidate partial first cache line.
 99	 */
100	if (start & (CACHE_LINE_SIZE - 1)) {
101		vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
102		xsc3_l2_clean_mva(vaddr);
103		xsc3_l2_inv_mva(vaddr);
104		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
105	}
106
107	/*
108	 * Invalidate all full cache lines between 'start' and 'end'.
109	 */
110	while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
111		vaddr = l2_map_va(start, vaddr);
112		xsc3_l2_inv_mva(vaddr);
113		start += CACHE_LINE_SIZE;
114	}
115
116	/*
117	 * Clean and invalidate partial last cache line.
118	 */
119	if (start < end) {
120		vaddr = l2_map_va(start, vaddr);
121		xsc3_l2_clean_mva(vaddr);
122		xsc3_l2_inv_mva(vaddr);
123	}
124
125	l2_unmap_va(vaddr);
126
127	dsb();
128}
129
130static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
131{
132	unsigned long vaddr;
133
134	vaddr = -1;  /* to force the first mapping */
135
136	start &= ~(CACHE_LINE_SIZE - 1);
137	while (start < end) {
138		vaddr = l2_map_va(start, vaddr);
139		xsc3_l2_clean_mva(vaddr);
140		start += CACHE_LINE_SIZE;
141	}
142
143	l2_unmap_va(vaddr);
144
145	dsb();
146}
147
148/*
149 * optimize L2 flush all operation by set/way format
150 */
151static inline void xsc3_l2_flush_all(void)
152{
153	unsigned long l2ctype, set_way;
154	int set, way;
155
156	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
157
158	for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
159		for (way = 0; way < CACHE_WAY_PER_SET; way++) {
160			set_way = (way << 29) | (set << 5);
161			__asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
162		}
163	}
164
165	dsb();
166}
167
168static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
169{
170	unsigned long vaddr;
171
172	if (start == 0 && end == -1ul) {
173		xsc3_l2_flush_all();
174		return;
175	}
176
177	vaddr = -1;  /* to force the first mapping */
178
179	start &= ~(CACHE_LINE_SIZE - 1);
180	while (start < end) {
181		vaddr = l2_map_va(start, vaddr);
182		xsc3_l2_clean_mva(vaddr);
183		xsc3_l2_inv_mva(vaddr);
184		start += CACHE_LINE_SIZE;
185	}
186
187	l2_unmap_va(vaddr);
188
189	dsb();
190}
191
192static int __init xsc3_l2_init(void)
193{
194	if (!cpu_is_xsc3() || !xsc3_l2_present())
195		return 0;
196
197	if (get_cr() & CR_L2) {
198		pr_info("XScale3 L2 cache enabled.\n");
199		xsc3_l2_inv_all();
200
201		outer_cache.inv_range = xsc3_l2_inv_range;
202		outer_cache.clean_range = xsc3_l2_clean_range;
203		outer_cache.flush_range = xsc3_l2_flush_range;
204	}
205
206	return 0;
207}
208core_initcall(xsc3_l2_init);