Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2005-2007 Cavium Networks
  7 */
  8#include <linux/export.h>
  9#include <linux/kernel.h>
 10#include <linux/sched.h>
 11#include <linux/smp.h>
 12#include <linux/mm.h>
 13#include <linux/bitops.h>
 14#include <linux/cpu.h>
 15#include <linux/io.h>
 16
 17#include <asm/bcache.h>
 18#include <asm/bootinfo.h>
 19#include <asm/cacheops.h>
 20#include <asm/cpu-features.h>
 21#include <asm/cpu-type.h>
 22#include <asm/page.h>
 23#include <asm/pgtable.h>
 24#include <asm/r4kcache.h>
 25#include <asm/traps.h>
 26#include <asm/mmu_context.h>
 27#include <asm/war.h>
 28
 29#include <asm/octeon/octeon.h>
 30
 31unsigned long long cache_err_dcache[NR_CPUS];
 32EXPORT_SYMBOL_GPL(cache_err_dcache);
 33
 34/**
 35 * Octeon automatically flushes the dcache on tlb changes, so
 36 * from Linux's viewpoint it acts much like a physically
 37 * tagged cache. No flushing is needed
 38 *
 39 */
 40static void octeon_flush_data_cache_page(unsigned long addr)
 41{
 42    /* Nothing to do */
 43}
 44
 45static inline void octeon_local_flush_icache(void)
 46{
 47	asm volatile ("synci 0($0)");
 48}
 49
 50/*
 51 * Flush local I-cache for the specified range.
 52 */
 53static void local_octeon_flush_icache_range(unsigned long start,
 54					    unsigned long end)
 55{
 56	octeon_local_flush_icache();
 57}
 58
 59/**
 60 * Flush caches as necessary for all cores affected by a
 61 * vma. If no vma is supplied, all cores are flushed.
 62 *
 63 * @vma:    VMA to flush or NULL to flush all icaches.
 64 */
 65static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
 66{
 67	extern void octeon_send_ipi_single(int cpu, unsigned int action);
 68#ifdef CONFIG_SMP
 69	int cpu;
 70	cpumask_t mask;
 71#endif
 72
 73	mb();
 74	octeon_local_flush_icache();
 75#ifdef CONFIG_SMP
 76	preempt_disable();
 77	cpu = smp_processor_id();
 78
 79	/*
 80	 * If we have a vma structure, we only need to worry about
 81	 * cores it has been used on
 82	 */
 83	if (vma)
 84		mask = *mm_cpumask(vma->vm_mm);
 85	else
 86		mask = *cpu_online_mask;
 87	cpumask_clear_cpu(cpu, &mask);
 88	for_each_cpu(cpu, &mask)
 89		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
 90
 91	preempt_enable();
 92#endif
 93}
 94
 95
 96/**
 97 * Called to flush the icache on all cores
 98 */
 99static void octeon_flush_icache_all(void)
100{
101	octeon_flush_icache_all_cores(NULL);
102}
103
104
105/**
106 * Called to flush all memory associated with a memory
107 * context.
108 *
109 * @mm:	    Memory context to flush
110 */
111static void octeon_flush_cache_mm(struct mm_struct *mm)
112{
113	/*
114	 * According to the R4K version of this file, CPUs without
115	 * dcache aliases don't need to do anything here
116	 */
117}
118
119
120/**
121 * Flush a range of kernel addresses out of the icache
122 *
123 */
124static void octeon_flush_icache_range(unsigned long start, unsigned long end)
125{
126	octeon_flush_icache_all_cores(NULL);
127}
128
129
130/**
131 * Flush the icache for a trampoline. These are used for interrupt
132 * and exception hooking.
133 *
134 * @addr:   Address to flush
135 */
136static void octeon_flush_cache_sigtramp(unsigned long addr)
137{
138	struct vm_area_struct *vma;
139
140	down_read(&current->mm->mmap_sem);
141	vma = find_vma(current->mm, addr);
142	octeon_flush_icache_all_cores(vma);
143	up_read(&current->mm->mmap_sem);
144}
145
146
147/**
148 * Flush a range out of a vma
149 *
150 * @vma:    VMA to flush
151 * @start:
152 * @end:
153 */
154static void octeon_flush_cache_range(struct vm_area_struct *vma,
155				     unsigned long start, unsigned long end)
156{
157	if (vma->vm_flags & VM_EXEC)
158		octeon_flush_icache_all_cores(vma);
159}
160
161
162/**
163 * Flush a specific page of a vma
164 *
165 * @vma:    VMA to flush page for
166 * @page:   Page to flush
167 * @pfn:
168 */
169static void octeon_flush_cache_page(struct vm_area_struct *vma,
170				    unsigned long page, unsigned long pfn)
171{
172	if (vma->vm_flags & VM_EXEC)
173		octeon_flush_icache_all_cores(vma);
174}
175
176static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
177{
178	BUG();
179}
180
181/**
182 * Probe Octeon's caches
183 *
184 */
185static void probe_octeon(void)
186{
187	unsigned long icache_size;
188	unsigned long dcache_size;
189	unsigned int config1;
190	struct cpuinfo_mips *c = &current_cpu_data;
191	int cputype = current_cpu_type();
192
193	config1 = read_c0_config1();
194	switch (cputype) {
195	case CPU_CAVIUM_OCTEON:
196	case CPU_CAVIUM_OCTEON_PLUS:
197		c->icache.linesz = 2 << ((config1 >> 19) & 7);
198		c->icache.sets = 64 << ((config1 >> 22) & 7);
199		c->icache.ways = 1 + ((config1 >> 16) & 7);
200		c->icache.flags |= MIPS_CACHE_VTAG;
201		icache_size =
202			c->icache.sets * c->icache.ways * c->icache.linesz;
203		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
204		c->dcache.linesz = 128;
205		if (cputype == CPU_CAVIUM_OCTEON_PLUS)
206			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
207		else
208			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
209		c->dcache.ways = 64;
210		dcache_size =
211			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
212		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
213		c->options |= MIPS_CPU_PREFETCH;
214		break;
215
216	case CPU_CAVIUM_OCTEON2:
217		c->icache.linesz = 2 << ((config1 >> 19) & 7);
218		c->icache.sets = 8;
219		c->icache.ways = 37;
220		c->icache.flags |= MIPS_CACHE_VTAG;
221		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
222
223		c->dcache.linesz = 128;
224		c->dcache.ways = 32;
225		c->dcache.sets = 8;
226		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
227		c->options |= MIPS_CPU_PREFETCH;
228		break;
229
230	case CPU_CAVIUM_OCTEON3:
231		c->icache.linesz = 128;
232		c->icache.sets = 16;
233		c->icache.ways = 39;
234		c->icache.flags |= MIPS_CACHE_VTAG;
235		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
236
237		c->dcache.linesz = 128;
238		c->dcache.ways = 32;
239		c->dcache.sets = 8;
240		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
241		c->options |= MIPS_CPU_PREFETCH;
242		break;
243
244	default:
245		panic("Unsupported Cavium Networks CPU type");
246		break;
247	}
248
249	/* compute a couple of other cache variables */
250	c->icache.waysize = icache_size / c->icache.ways;
251	c->dcache.waysize = dcache_size / c->dcache.ways;
252
253	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
254	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
255
256	if (smp_processor_id() == 0) {
257		pr_notice("Primary instruction cache %ldkB, %s, %d way, "
258			  "%d sets, linesize %d bytes.\n",
259			  icache_size >> 10,
260			  cpu_has_vtag_icache ?
261				"virtually tagged" : "physically tagged",
262			  c->icache.ways, c->icache.sets, c->icache.linesz);
263
264		pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
265			  "linesize %d bytes.\n",
266			  dcache_size >> 10, c->dcache.ways,
267			  c->dcache.sets, c->dcache.linesz);
268	}
269}
270
271static void  octeon_cache_error_setup(void)
272{
273	extern char except_vec2_octeon;
274	set_handler(0x100, &except_vec2_octeon, 0x80);
275}
276
277/**
278 * Setup the Octeon cache flush routines
279 *
280 */
281void octeon_cache_init(void)
282{
283	probe_octeon();
284
285	shm_align_mask = PAGE_SIZE - 1;
286
287	flush_cache_all			= octeon_flush_icache_all;
288	__flush_cache_all		= octeon_flush_icache_all;
289	flush_cache_mm			= octeon_flush_cache_mm;
290	flush_cache_page		= octeon_flush_cache_page;
291	flush_cache_range		= octeon_flush_cache_range;
292	flush_cache_sigtramp		= octeon_flush_cache_sigtramp;
293	flush_icache_all		= octeon_flush_icache_all;
294	flush_data_cache_page		= octeon_flush_data_cache_page;
295	flush_icache_range		= octeon_flush_icache_range;
296	local_flush_icache_range	= local_octeon_flush_icache_range;
297	__flush_icache_user_range	= octeon_flush_icache_range;
298	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
299
300	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
301
302	build_clear_page();
303	build_copy_page();
304
305	board_cache_error_setup = octeon_cache_error_setup;
306}
307
308/*
309 * Handle a cache error exception
310 */
311static RAW_NOTIFIER_HEAD(co_cache_error_chain);
312
313int register_co_cache_error_notifier(struct notifier_block *nb)
314{
315	return raw_notifier_chain_register(&co_cache_error_chain, nb);
316}
317EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
318
319int unregister_co_cache_error_notifier(struct notifier_block *nb)
320{
321	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
322}
323EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
324
325static void co_cache_error_call_notifiers(unsigned long val)
326{
327	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
328	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
329		u64 dcache_err;
330		unsigned long coreid = cvmx_get_core_num();
331		u64 icache_err = read_octeon_c0_icacheerr();
332
333		if (val) {
334			dcache_err = cache_err_dcache[coreid];
335			cache_err_dcache[coreid] = 0;
336		} else {
337			dcache_err = read_octeon_c0_dcacheerr();
338		}
339
340		pr_err("Core%lu: Cache error exception:\n", coreid);
341		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
342		if (icache_err & 1) {
343			pr_err("CacheErr (Icache) == %llx\n",
344			       (unsigned long long)icache_err);
345			write_octeon_c0_icacheerr(0);
346		}
347		if (dcache_err & 1) {
348			pr_err("CacheErr (Dcache) == %llx\n",
349			       (unsigned long long)dcache_err);
350		}
351	}
352}
353
354/*
355 * Called when the the exception is recoverable
356 */
357
358asmlinkage void cache_parity_error_octeon_recoverable(void)
359{
360	co_cache_error_call_notifiers(0);
361}
362
363/**
364 * Called when the the exception is not recoverable
365 */
366
367asmlinkage void cache_parity_error_octeon_non_recoverable(void)
368{
369	co_cache_error_call_notifiers(1);
370	panic("Can't handle cache error: nested exception");
371}
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2005-2007 Cavium Networks
  7 */
  8#include <linux/export.h>
  9#include <linux/kernel.h>
 10#include <linux/sched.h>
 11#include <linux/smp.h>
 12#include <linux/mm.h>
 13#include <linux/bitops.h>
 14#include <linux/cpu.h>
 15#include <linux/io.h>
 16
 17#include <asm/bcache.h>
 18#include <asm/bootinfo.h>
 19#include <asm/cacheops.h>
 20#include <asm/cpu-features.h>
 21#include <asm/cpu-type.h>
 22#include <asm/page.h>
 
 23#include <asm/r4kcache.h>
 24#include <asm/traps.h>
 25#include <asm/mmu_context.h>
 26#include <asm/war.h>
 27
 28#include <asm/octeon/octeon.h>
 29
 30unsigned long long cache_err_dcache[NR_CPUS];
 31EXPORT_SYMBOL_GPL(cache_err_dcache);
 32
 33/**
 34 * Octeon automatically flushes the dcache on tlb changes, so
 35 * from Linux's viewpoint it acts much like a physically
 36 * tagged cache. No flushing is needed
 37 *
 38 */
 39static void octeon_flush_data_cache_page(unsigned long addr)
 40{
 41    /* Nothing to do */
 42}
 43
 44static inline void octeon_local_flush_icache(void)
 45{
 46	asm volatile ("synci 0($0)");
 47}
 48
 49/*
 50 * Flush local I-cache for the specified range.
 51 */
 52static void local_octeon_flush_icache_range(unsigned long start,
 53					    unsigned long end)
 54{
 55	octeon_local_flush_icache();
 56}
 57
 58/**
 59 * Flush caches as necessary for all cores affected by a
 60 * vma. If no vma is supplied, all cores are flushed.
 61 *
 62 * @vma:    VMA to flush or NULL to flush all icaches.
 63 */
 64static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
 65{
 66	extern void octeon_send_ipi_single(int cpu, unsigned int action);
 67#ifdef CONFIG_SMP
 68	int cpu;
 69	cpumask_t mask;
 70#endif
 71
 72	mb();
 73	octeon_local_flush_icache();
 74#ifdef CONFIG_SMP
 75	preempt_disable();
 76	cpu = smp_processor_id();
 77
 78	/*
 79	 * If we have a vma structure, we only need to worry about
 80	 * cores it has been used on
 81	 */
 82	if (vma)
 83		mask = *mm_cpumask(vma->vm_mm);
 84	else
 85		mask = *cpu_online_mask;
 86	cpumask_clear_cpu(cpu, &mask);
 87	for_each_cpu(cpu, &mask)
 88		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
 89
 90	preempt_enable();
 91#endif
 92}
 93
 94
 95/**
 96 * Called to flush the icache on all cores
 97 */
 98static void octeon_flush_icache_all(void)
 99{
100	octeon_flush_icache_all_cores(NULL);
101}
102
103
104/**
105 * Called to flush all memory associated with a memory
106 * context.
107 *
108 * @mm:	    Memory context to flush
109 */
110static void octeon_flush_cache_mm(struct mm_struct *mm)
111{
112	/*
113	 * According to the R4K version of this file, CPUs without
114	 * dcache aliases don't need to do anything here
115	 */
116}
117
118
119/**
120 * Flush a range of kernel addresses out of the icache
121 *
122 */
123static void octeon_flush_icache_range(unsigned long start, unsigned long end)
124{
125	octeon_flush_icache_all_cores(NULL);
126}
127
128
129/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130 * Flush a range out of a vma
131 *
132 * @vma:    VMA to flush
133 * @start:
134 * @end:
135 */
136static void octeon_flush_cache_range(struct vm_area_struct *vma,
137				     unsigned long start, unsigned long end)
138{
139	if (vma->vm_flags & VM_EXEC)
140		octeon_flush_icache_all_cores(vma);
141}
142
143
144/**
145 * Flush a specific page of a vma
146 *
147 * @vma:    VMA to flush page for
148 * @page:   Page to flush
149 * @pfn:
150 */
151static void octeon_flush_cache_page(struct vm_area_struct *vma,
152				    unsigned long page, unsigned long pfn)
153{
154	if (vma->vm_flags & VM_EXEC)
155		octeon_flush_icache_all_cores(vma);
156}
157
158static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
159{
160	BUG();
161}
162
163/**
164 * Probe Octeon's caches
165 *
166 */
167static void probe_octeon(void)
168{
169	unsigned long icache_size;
170	unsigned long dcache_size;
171	unsigned int config1;
172	struct cpuinfo_mips *c = &current_cpu_data;
173	int cputype = current_cpu_type();
174
175	config1 = read_c0_config1();
176	switch (cputype) {
177	case CPU_CAVIUM_OCTEON:
178	case CPU_CAVIUM_OCTEON_PLUS:
179		c->icache.linesz = 2 << ((config1 >> 19) & 7);
180		c->icache.sets = 64 << ((config1 >> 22) & 7);
181		c->icache.ways = 1 + ((config1 >> 16) & 7);
182		c->icache.flags |= MIPS_CACHE_VTAG;
183		icache_size =
184			c->icache.sets * c->icache.ways * c->icache.linesz;
185		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
186		c->dcache.linesz = 128;
187		if (cputype == CPU_CAVIUM_OCTEON_PLUS)
188			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
189		else
190			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
191		c->dcache.ways = 64;
192		dcache_size =
193			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
194		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
195		c->options |= MIPS_CPU_PREFETCH;
196		break;
197
198	case CPU_CAVIUM_OCTEON2:
199		c->icache.linesz = 2 << ((config1 >> 19) & 7);
200		c->icache.sets = 8;
201		c->icache.ways = 37;
202		c->icache.flags |= MIPS_CACHE_VTAG;
203		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
204
205		c->dcache.linesz = 128;
206		c->dcache.ways = 32;
207		c->dcache.sets = 8;
208		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
209		c->options |= MIPS_CPU_PREFETCH;
210		break;
211
212	case CPU_CAVIUM_OCTEON3:
213		c->icache.linesz = 128;
214		c->icache.sets = 16;
215		c->icache.ways = 39;
216		c->icache.flags |= MIPS_CACHE_VTAG;
217		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
218
219		c->dcache.linesz = 128;
220		c->dcache.ways = 32;
221		c->dcache.sets = 8;
222		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
223		c->options |= MIPS_CPU_PREFETCH;
224		break;
225
226	default:
227		panic("Unsupported Cavium Networks CPU type");
228		break;
229	}
230
231	/* compute a couple of other cache variables */
232	c->icache.waysize = icache_size / c->icache.ways;
233	c->dcache.waysize = dcache_size / c->dcache.ways;
234
235	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
236	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
237
238	if (smp_processor_id() == 0) {
239		pr_info("Primary instruction cache %ldkB, %s, %d way, "
240			"%d sets, linesize %d bytes.\n",
241			icache_size >> 10,
242			cpu_has_vtag_icache ?
243				"virtually tagged" : "physically tagged",
244			c->icache.ways, c->icache.sets, c->icache.linesz);
245
246		pr_info("Primary data cache %ldkB, %d-way, %d sets, "
247			"linesize %d bytes.\n",
248			dcache_size >> 10, c->dcache.ways,
249			c->dcache.sets, c->dcache.linesz);
250	}
251}
252
253static void  octeon_cache_error_setup(void)
254{
255	extern char except_vec2_octeon;
256	set_handler(0x100, &except_vec2_octeon, 0x80);
257}
258
259/**
260 * Setup the Octeon cache flush routines
261 *
262 */
263void octeon_cache_init(void)
264{
265	probe_octeon();
266
267	shm_align_mask = PAGE_SIZE - 1;
268
269	flush_cache_all			= octeon_flush_icache_all;
270	__flush_cache_all		= octeon_flush_icache_all;
271	flush_cache_mm			= octeon_flush_cache_mm;
272	flush_cache_page		= octeon_flush_cache_page;
273	flush_cache_range		= octeon_flush_cache_range;
 
274	flush_icache_all		= octeon_flush_icache_all;
275	flush_data_cache_page		= octeon_flush_data_cache_page;
276	flush_icache_range		= octeon_flush_icache_range;
277	local_flush_icache_range	= local_octeon_flush_icache_range;
278	__flush_icache_user_range	= octeon_flush_icache_range;
279	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
280
281	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
282
283	build_clear_page();
284	build_copy_page();
285
286	board_cache_error_setup = octeon_cache_error_setup;
287}
288
289/*
290 * Handle a cache error exception
291 */
292static RAW_NOTIFIER_HEAD(co_cache_error_chain);
293
294int register_co_cache_error_notifier(struct notifier_block *nb)
295{
296	return raw_notifier_chain_register(&co_cache_error_chain, nb);
297}
298EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
299
300int unregister_co_cache_error_notifier(struct notifier_block *nb)
301{
302	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
303}
304EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
305
306static void co_cache_error_call_notifiers(unsigned long val)
307{
308	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
309	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
310		u64 dcache_err;
311		unsigned long coreid = cvmx_get_core_num();
312		u64 icache_err = read_octeon_c0_icacheerr();
313
314		if (val) {
315			dcache_err = cache_err_dcache[coreid];
316			cache_err_dcache[coreid] = 0;
317		} else {
318			dcache_err = read_octeon_c0_dcacheerr();
319		}
320
321		pr_err("Core%lu: Cache error exception:\n", coreid);
322		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
323		if (icache_err & 1) {
324			pr_err("CacheErr (Icache) == %llx\n",
325			       (unsigned long long)icache_err);
326			write_octeon_c0_icacheerr(0);
327		}
328		if (dcache_err & 1) {
329			pr_err("CacheErr (Dcache) == %llx\n",
330			       (unsigned long long)dcache_err);
331		}
332	}
333}
334
335/*
336 * Called when the the exception is recoverable
337 */
338
339asmlinkage void cache_parity_error_octeon_recoverable(void)
340{
341	co_cache_error_call_notifiers(0);
342}
343
344/**
345 * Called when the the exception is not recoverable
346 */
347
348asmlinkage void cache_parity_error_octeon_non_recoverable(void)
349{
350	co_cache_error_call_notifiers(1);
351	panic("Can't handle cache error: nested exception");
352}