Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * arch/sh/mm/cache.c
  4 *
  5 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
  6 * Copyright (C) 2002 - 2010  Paul Mundt
 
 
  7 */
  8#include <linux/mm.h>
  9#include <linux/init.h>
 10#include <linux/mutex.h>
 11#include <linux/fs.h>
 12#include <linux/smp.h>
 13#include <linux/highmem.h>
 14#include <linux/module.h>
 15#include <asm/mmu_context.h>
 16#include <asm/cacheflush.h>
 17
 18void (*local_flush_cache_all)(void *args) = cache_noop;
 19void (*local_flush_cache_mm)(void *args) = cache_noop;
 20void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
 21void (*local_flush_cache_page)(void *args) = cache_noop;
 22void (*local_flush_cache_range)(void *args) = cache_noop;
 23void (*local_flush_dcache_folio)(void *args) = cache_noop;
 24void (*local_flush_icache_range)(void *args) = cache_noop;
 25void (*local_flush_icache_folio)(void *args) = cache_noop;
 26void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
 27
 28void (*__flush_wback_region)(void *start, int size);
 29EXPORT_SYMBOL(__flush_wback_region);
 30void (*__flush_purge_region)(void *start, int size);
 31EXPORT_SYMBOL(__flush_purge_region);
 32void (*__flush_invalidate_region)(void *start, int size);
 33EXPORT_SYMBOL(__flush_invalidate_region);
 34
 35static inline void noop__flush_region(void *start, int size)
 36{
 37}
 38
 39static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
 40                                   int wait)
 41{
 42	preempt_disable();
 43
 44	/* Needing IPI for cross-core flush is SHX3-specific. */
 45#ifdef CONFIG_CPU_SHX3
 46	/*
 47	 * It's possible that this gets called early on when IRQs are
 48	 * still disabled due to ioremapping by the boot CPU, so don't
 49	 * even attempt IPIs unless there are other CPUs online.
 50	 */
 51	if (num_online_cpus() > 1)
 52		smp_call_function(func, info, wait);
 53#endif
 54
 55	func(info);
 56
 57	preempt_enable();
 58}
 59
 60void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 61		       unsigned long vaddr, void *dst, const void *src,
 62		       unsigned long len)
 63{
 64	struct folio *folio = page_folio(page);
 65
 66	if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
 67	    test_bit(PG_dcache_clean, &folio->flags)) {
 68		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 69		memcpy(vto, src, len);
 70		kunmap_coherent(vto);
 71	} else {
 72		memcpy(dst, src, len);
 73		if (boot_cpu_data.dcache.n_aliases)
 74			clear_bit(PG_dcache_clean, &folio->flags);
 75	}
 76
 77	if (vma->vm_flags & VM_EXEC)
 78		flush_cache_page(vma, vaddr, page_to_pfn(page));
 79}
 80
 81void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 82			 unsigned long vaddr, void *dst, const void *src,
 83			 unsigned long len)
 84{
 85	struct folio *folio = page_folio(page);
 86
 87	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
 88	    test_bit(PG_dcache_clean, &folio->flags)) {
 89		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 90		memcpy(dst, vfrom, len);
 91		kunmap_coherent(vfrom);
 92	} else {
 93		memcpy(dst, src, len);
 94		if (boot_cpu_data.dcache.n_aliases)
 95			clear_bit(PG_dcache_clean, &folio->flags);
 96	}
 97}
 98
 99void copy_user_highpage(struct page *to, struct page *from,
100			unsigned long vaddr, struct vm_area_struct *vma)
101{
102	struct folio *src = page_folio(from);
103	void *vfrom, *vto;
104
105	vto = kmap_atomic(to);
106
107	if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
108	    test_bit(PG_dcache_clean, &src->flags)) {
109		vfrom = kmap_coherent(from, vaddr);
110		copy_page(vto, vfrom);
111		kunmap_coherent(vfrom);
112	} else {
113		vfrom = kmap_atomic(from);
114		copy_page(vto, vfrom);
115		kunmap_atomic(vfrom);
116	}
117
118	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
119	    (vma->vm_flags & VM_EXEC))
120		__flush_purge_region(vto, PAGE_SIZE);
121
122	kunmap_atomic(vto);
123	/* Make sure this page is cleared on other CPU's too before using it */
124	smp_wmb();
125}
126EXPORT_SYMBOL(copy_user_highpage);
127
128void clear_user_highpage(struct page *page, unsigned long vaddr)
129{
130	void *kaddr = kmap_atomic(page);
131
132	clear_page(kaddr);
133
134	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
135		__flush_purge_region(kaddr, PAGE_SIZE);
136
137	kunmap_atomic(kaddr);
138}
139EXPORT_SYMBOL(clear_user_highpage);
140
141void __update_cache(struct vm_area_struct *vma,
142		    unsigned long address, pte_t pte)
143{
 
144	unsigned long pfn = pte_pfn(pte);
145
146	if (!boot_cpu_data.dcache.n_aliases)
147		return;
148
 
149	if (pfn_valid(pfn)) {
150		struct folio *folio = page_folio(pfn_to_page(pfn));
151		int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
152		if (dirty)
153			__flush_purge_region(folio_address(folio),
154						folio_size(folio));
155	}
156}
157
158void __flush_anon_page(struct page *page, unsigned long vmaddr)
159{
160	struct folio *folio = page_folio(page);
161	unsigned long addr = (unsigned long) page_address(page);
162
163	if (pages_do_alias(addr, vmaddr)) {
164		if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
165		    test_bit(PG_dcache_clean, &folio->flags)) {
166			void *kaddr;
167
168			kaddr = kmap_coherent(page, vmaddr);
169			/* XXX.. For now kunmap_coherent() does a purge */
170			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
171			kunmap_coherent(kaddr);
172		} else
173			__flush_purge_region(folio_address(folio),
174						folio_size(folio));
175	}
176}
177
178void flush_cache_all(void)
179{
180	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
181}
182EXPORT_SYMBOL(flush_cache_all);
183
184void flush_cache_mm(struct mm_struct *mm)
185{
186	if (boot_cpu_data.dcache.n_aliases == 0)
187		return;
188
189	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
190}
191
192void flush_cache_dup_mm(struct mm_struct *mm)
193{
194	if (boot_cpu_data.dcache.n_aliases == 0)
195		return;
196
197	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
198}
199
200void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
201		      unsigned long pfn)
202{
203	struct flusher_data data;
204
205	data.vma = vma;
206	data.addr1 = addr;
207	data.addr2 = pfn;
208
209	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
210}
211
212void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
213		       unsigned long end)
214{
215	struct flusher_data data;
216
217	data.vma = vma;
218	data.addr1 = start;
219	data.addr2 = end;
220
221	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
222}
223EXPORT_SYMBOL(flush_cache_range);
224
225void flush_dcache_folio(struct folio *folio)
226{
227	cacheop_on_each_cpu(local_flush_dcache_folio, folio, 1);
228}
229EXPORT_SYMBOL(flush_dcache_folio);
230
231void flush_icache_range(unsigned long start, unsigned long end)
232{
233	struct flusher_data data;
234
235	data.vma = NULL;
236	data.addr1 = start;
237	data.addr2 = end;
238
239	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
240}
241EXPORT_SYMBOL(flush_icache_range);
242
243void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
244		unsigned int nr)
245{
246	/* Nothing uses the VMA, so just pass the folio along */
247	cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1);
248}
249
250void flush_cache_sigtramp(unsigned long address)
251{
252	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
253}
254
255static void compute_alias(struct cache_info *c)
256{
257#ifdef CONFIG_MMU
258	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
259#else
260	c->alias_mask = 0;
261#endif
262	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
263}
264
265static void __init emit_cache_params(void)
266{
267	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
268		boot_cpu_data.icache.ways,
269		boot_cpu_data.icache.sets,
270		boot_cpu_data.icache.way_incr);
271	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
272		boot_cpu_data.icache.entry_mask,
273		boot_cpu_data.icache.alias_mask,
274		boot_cpu_data.icache.n_aliases);
275	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
276		boot_cpu_data.dcache.ways,
277		boot_cpu_data.dcache.sets,
278		boot_cpu_data.dcache.way_incr);
279	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
280		boot_cpu_data.dcache.entry_mask,
281		boot_cpu_data.dcache.alias_mask,
282		boot_cpu_data.dcache.n_aliases);
283
284	/*
285	 * Emit Secondary Cache parameters if the CPU has a probed L2.
286	 */
287	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
288		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
289			boot_cpu_data.scache.ways,
290			boot_cpu_data.scache.sets,
291			boot_cpu_data.scache.way_incr);
292		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
293			boot_cpu_data.scache.entry_mask,
294			boot_cpu_data.scache.alias_mask,
295			boot_cpu_data.scache.n_aliases);
296	}
297}
298
299void __init cpu_cache_init(void)
300{
301	unsigned int cache_disabled = 0;
302
303#ifdef SH_CCR
304	cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
305#endif
306
307	compute_alias(&boot_cpu_data.icache);
308	compute_alias(&boot_cpu_data.dcache);
309	compute_alias(&boot_cpu_data.scache);
310
311	__flush_wback_region		= noop__flush_region;
312	__flush_purge_region		= noop__flush_region;
313	__flush_invalidate_region	= noop__flush_region;
314
315	/*
316	 * No flushing is necessary in the disabled cache case so we can
317	 * just keep the noop functions in local_flush_..() and __flush_..()
318	 */
319	if (unlikely(cache_disabled))
320		goto skip;
321
322	if (boot_cpu_data.type == CPU_J2) {
323		extern void __weak j2_cache_init(void);
324
325		j2_cache_init();
326	} else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
327		extern void __weak sh2_cache_init(void);
328
329		sh2_cache_init();
330	}
331
332	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
333		extern void __weak sh2a_cache_init(void);
334
335		sh2a_cache_init();
336	}
337
338	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
339		extern void __weak sh3_cache_init(void);
340
341		sh3_cache_init();
342
343		if ((boot_cpu_data.type == CPU_SH7705) &&
344		    (boot_cpu_data.dcache.sets == 512)) {
345			extern void __weak sh7705_cache_init(void);
346
347			sh7705_cache_init();
348		}
349	}
350
351	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
352	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
353	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
354		extern void __weak sh4_cache_init(void);
355
356		sh4_cache_init();
357
358		if ((boot_cpu_data.type == CPU_SH7786) ||
359		    (boot_cpu_data.type == CPU_SHX3)) {
360			extern void __weak shx3_cache_init(void);
361
362			shx3_cache_init();
363		}
 
 
 
 
 
 
364	}
365
366skip:
367	emit_cache_params();
368}
v3.15
 
  1/*
  2 * arch/sh/mm/cache.c
  3 *
  4 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
  5 * Copyright (C) 2002 - 2010  Paul Mundt
  6 *
  7 * Released under the terms of the GNU GPL v2.0.
  8 */
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/mutex.h>
 12#include <linux/fs.h>
 13#include <linux/smp.h>
 14#include <linux/highmem.h>
 15#include <linux/module.h>
 16#include <asm/mmu_context.h>
 17#include <asm/cacheflush.h>
 18
 19void (*local_flush_cache_all)(void *args) = cache_noop;
 20void (*local_flush_cache_mm)(void *args) = cache_noop;
 21void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
 22void (*local_flush_cache_page)(void *args) = cache_noop;
 23void (*local_flush_cache_range)(void *args) = cache_noop;
 24void (*local_flush_dcache_page)(void *args) = cache_noop;
 25void (*local_flush_icache_range)(void *args) = cache_noop;
 26void (*local_flush_icache_page)(void *args) = cache_noop;
 27void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
 28
 29void (*__flush_wback_region)(void *start, int size);
 30EXPORT_SYMBOL(__flush_wback_region);
 31void (*__flush_purge_region)(void *start, int size);
 32EXPORT_SYMBOL(__flush_purge_region);
 33void (*__flush_invalidate_region)(void *start, int size);
 34EXPORT_SYMBOL(__flush_invalidate_region);
 35
 36static inline void noop__flush_region(void *start, int size)
 37{
 38}
 39
 40static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
 41                                   int wait)
 42{
 43	preempt_disable();
 44
 
 
 45	/*
 46	 * It's possible that this gets called early on when IRQs are
 47	 * still disabled due to ioremapping by the boot CPU, so don't
 48	 * even attempt IPIs unless there are other CPUs online.
 49	 */
 50	if (num_online_cpus() > 1)
 51		smp_call_function(func, info, wait);
 
 52
 53	func(info);
 54
 55	preempt_enable();
 56}
 57
 58void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 59		       unsigned long vaddr, void *dst, const void *src,
 60		       unsigned long len)
 61{
 62	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
 63	    test_bit(PG_dcache_clean, &page->flags)) {
 
 
 64		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 65		memcpy(vto, src, len);
 66		kunmap_coherent(vto);
 67	} else {
 68		memcpy(dst, src, len);
 69		if (boot_cpu_data.dcache.n_aliases)
 70			clear_bit(PG_dcache_clean, &page->flags);
 71	}
 72
 73	if (vma->vm_flags & VM_EXEC)
 74		flush_cache_page(vma, vaddr, page_to_pfn(page));
 75}
 76
 77void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 78			 unsigned long vaddr, void *dst, const void *src,
 79			 unsigned long len)
 80{
 81	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
 82	    test_bit(PG_dcache_clean, &page->flags)) {
 
 
 83		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 84		memcpy(dst, vfrom, len);
 85		kunmap_coherent(vfrom);
 86	} else {
 87		memcpy(dst, src, len);
 88		if (boot_cpu_data.dcache.n_aliases)
 89			clear_bit(PG_dcache_clean, &page->flags);
 90	}
 91}
 92
 93void copy_user_highpage(struct page *to, struct page *from,
 94			unsigned long vaddr, struct vm_area_struct *vma)
 95{
 
 96	void *vfrom, *vto;
 97
 98	vto = kmap_atomic(to);
 99
100	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101	    test_bit(PG_dcache_clean, &from->flags)) {
102		vfrom = kmap_coherent(from, vaddr);
103		copy_page(vto, vfrom);
104		kunmap_coherent(vfrom);
105	} else {
106		vfrom = kmap_atomic(from);
107		copy_page(vto, vfrom);
108		kunmap_atomic(vfrom);
109	}
110
111	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112	    (vma->vm_flags & VM_EXEC))
113		__flush_purge_region(vto, PAGE_SIZE);
114
115	kunmap_atomic(vto);
116	/* Make sure this page is cleared on other CPU's too before using it */
117	smp_wmb();
118}
119EXPORT_SYMBOL(copy_user_highpage);
120
121void clear_user_highpage(struct page *page, unsigned long vaddr)
122{
123	void *kaddr = kmap_atomic(page);
124
125	clear_page(kaddr);
126
127	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
128		__flush_purge_region(kaddr, PAGE_SIZE);
129
130	kunmap_atomic(kaddr);
131}
132EXPORT_SYMBOL(clear_user_highpage);
133
134void __update_cache(struct vm_area_struct *vma,
135		    unsigned long address, pte_t pte)
136{
137	struct page *page;
138	unsigned long pfn = pte_pfn(pte);
139
140	if (!boot_cpu_data.dcache.n_aliases)
141		return;
142
143	page = pfn_to_page(pfn);
144	if (pfn_valid(pfn)) {
145		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
 
146		if (dirty)
147			__flush_purge_region(page_address(page), PAGE_SIZE);
 
148	}
149}
150
151void __flush_anon_page(struct page *page, unsigned long vmaddr)
152{
 
153	unsigned long addr = (unsigned long) page_address(page);
154
155	if (pages_do_alias(addr, vmaddr)) {
156		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
157		    test_bit(PG_dcache_clean, &page->flags)) {
158			void *kaddr;
159
160			kaddr = kmap_coherent(page, vmaddr);
161			/* XXX.. For now kunmap_coherent() does a purge */
162			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
163			kunmap_coherent(kaddr);
164		} else
165			__flush_purge_region((void *)addr, PAGE_SIZE);
 
166	}
167}
168
169void flush_cache_all(void)
170{
171	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
172}
173EXPORT_SYMBOL(flush_cache_all);
174
175void flush_cache_mm(struct mm_struct *mm)
176{
177	if (boot_cpu_data.dcache.n_aliases == 0)
178		return;
179
180	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
181}
182
183void flush_cache_dup_mm(struct mm_struct *mm)
184{
185	if (boot_cpu_data.dcache.n_aliases == 0)
186		return;
187
188	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
189}
190
191void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
192		      unsigned long pfn)
193{
194	struct flusher_data data;
195
196	data.vma = vma;
197	data.addr1 = addr;
198	data.addr2 = pfn;
199
200	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
201}
202
203void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
204		       unsigned long end)
205{
206	struct flusher_data data;
207
208	data.vma = vma;
209	data.addr1 = start;
210	data.addr2 = end;
211
212	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
213}
214EXPORT_SYMBOL(flush_cache_range);
215
216void flush_dcache_page(struct page *page)
217{
218	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
219}
220EXPORT_SYMBOL(flush_dcache_page);
221
222void flush_icache_range(unsigned long start, unsigned long end)
223{
224	struct flusher_data data;
225
226	data.vma = NULL;
227	data.addr1 = start;
228	data.addr2 = end;
229
230	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
231}
 
232
233void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 
234{
235	/* Nothing uses the VMA, so just pass the struct page along */
236	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
237}
238
239void flush_cache_sigtramp(unsigned long address)
240{
241	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
242}
243
244static void compute_alias(struct cache_info *c)
245{
 
246	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
 
 
 
247	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
248}
249
250static void __init emit_cache_params(void)
251{
252	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
253		boot_cpu_data.icache.ways,
254		boot_cpu_data.icache.sets,
255		boot_cpu_data.icache.way_incr);
256	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
257		boot_cpu_data.icache.entry_mask,
258		boot_cpu_data.icache.alias_mask,
259		boot_cpu_data.icache.n_aliases);
260	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
261		boot_cpu_data.dcache.ways,
262		boot_cpu_data.dcache.sets,
263		boot_cpu_data.dcache.way_incr);
264	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
265		boot_cpu_data.dcache.entry_mask,
266		boot_cpu_data.dcache.alias_mask,
267		boot_cpu_data.dcache.n_aliases);
268
269	/*
270	 * Emit Secondary Cache parameters if the CPU has a probed L2.
271	 */
272	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
273		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
274			boot_cpu_data.scache.ways,
275			boot_cpu_data.scache.sets,
276			boot_cpu_data.scache.way_incr);
277		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
278			boot_cpu_data.scache.entry_mask,
279			boot_cpu_data.scache.alias_mask,
280			boot_cpu_data.scache.n_aliases);
281	}
282}
283
284void __init cpu_cache_init(void)
285{
286	unsigned int cache_disabled = 0;
287
288#ifdef SH_CCR
289	cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
290#endif
291
292	compute_alias(&boot_cpu_data.icache);
293	compute_alias(&boot_cpu_data.dcache);
294	compute_alias(&boot_cpu_data.scache);
295
296	__flush_wback_region		= noop__flush_region;
297	__flush_purge_region		= noop__flush_region;
298	__flush_invalidate_region	= noop__flush_region;
299
300	/*
301	 * No flushing is necessary in the disabled cache case so we can
302	 * just keep the noop functions in local_flush_..() and __flush_..()
303	 */
304	if (unlikely(cache_disabled))
305		goto skip;
306
307	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
 
 
 
 
308		extern void __weak sh2_cache_init(void);
309
310		sh2_cache_init();
311	}
312
313	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
314		extern void __weak sh2a_cache_init(void);
315
316		sh2a_cache_init();
317	}
318
319	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
320		extern void __weak sh3_cache_init(void);
321
322		sh3_cache_init();
323
324		if ((boot_cpu_data.type == CPU_SH7705) &&
325		    (boot_cpu_data.dcache.sets == 512)) {
326			extern void __weak sh7705_cache_init(void);
327
328			sh7705_cache_init();
329		}
330	}
331
332	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
333	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
334	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
335		extern void __weak sh4_cache_init(void);
336
337		sh4_cache_init();
338
339		if ((boot_cpu_data.type == CPU_SH7786) ||
340		    (boot_cpu_data.type == CPU_SHX3)) {
341			extern void __weak shx3_cache_init(void);
342
343			shx3_cache_init();
344		}
345	}
346
347	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
348		extern void __weak sh5_cache_init(void);
349
350		sh5_cache_init();
351	}
352
353skip:
354	emit_cache_params();
355}