Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/module.h>
  5#include <linux/sched.h>
  6#include <linux/mm.h>
  7#include <asm/nds32.h>
  8#include <asm/pgtable.h>
  9#include <asm/tlbflush.h>
 10#include <asm/cacheflush.h>
 11#include <asm/l2_cache.h>
 12#include <nds32_intrinsic.h>
 13
 14#include <asm/cache_info.h>
 15extern struct cache_info L1_cache_info[2];
 16
 17int va_kernel_present(unsigned long addr)
 18{
 19	pmd_t *pmd;
 20	pte_t *ptep, pte;
 21
 22	pmd = pmd_offset(pgd_offset_k(addr), addr);
 23	if (!pmd_none(*pmd)) {
 24		ptep = pte_offset_map(pmd, addr);
 25		pte = *ptep;
 26		if (pte_present(pte))
 27			return pte;
 28	}
 29	return 0;
 30}
 31
 32pte_t va_present(struct mm_struct * mm, unsigned long addr)
 33{
 34	pgd_t *pgd;
 
 35	pud_t *pud;
 36	pmd_t *pmd;
 37	pte_t *ptep, pte;
 38
 39	pgd = pgd_offset(mm, addr);
 40	if (!pgd_none(*pgd)) {
 41		pud = pud_offset(pgd, addr);
 42		if (!pud_none(*pud)) {
 43			pmd = pmd_offset(pud, addr);
 44			if (!pmd_none(*pmd)) {
 45				ptep = pte_offset_map(pmd, addr);
 46				pte = *ptep;
 47				if (pte_present(pte))
 48					return pte;
 
 
 
 49			}
 50		}
 51	}
 52	return 0;
 53
 54}
 55
 56int va_readable(struct pt_regs *regs, unsigned long addr)
 57{
 58	struct mm_struct *mm = current->mm;
 59	pte_t pte;
 60	int ret = 0;
 61
 62	if (user_mode(regs)) {
 63		/* user mode */
 64		pte = va_present(mm, addr);
 65		if (!pte && pte_read(pte))
 66			ret = 1;
 67	} else {
 68		/* superuser mode is always readable, so we can only
 69		 * check it is present or not*/
 70		return (! !va_kernel_present(addr));
 71	}
 72	return ret;
 73}
 74
 75int va_writable(struct pt_regs *regs, unsigned long addr)
 76{
 77	struct mm_struct *mm = current->mm;
 78	pte_t pte;
 79	int ret = 0;
 80
 81	if (user_mode(regs)) {
 82		/* user mode */
 83		pte = va_present(mm, addr);
 84		if (!pte && pte_write(pte))
 85			ret = 1;
 86	} else {
 87		/* superuser mode */
 88		pte = va_kernel_present(addr);
 89		if (!pte && pte_kernel_write(pte))
 90			ret = 1;
 91	}
 92	return ret;
 93}
 94
 95/*
 96 * All
 97 */
 98void cpu_icache_inval_all(void)
 99{
100	unsigned long end, line_size;
101
102	line_size = L1_cache_info[ICACHE].line_size;
103	end =
104	    line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
105
106	do {
107		end -= line_size;
108		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
109		end -= line_size;
110		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
111		end -= line_size;
112		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
113		end -= line_size;
114		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
115	} while (end > 0);
116	__nds32__isb();
117}
118
119void cpu_dcache_inval_all(void)
120{
121	__nds32__cctl_l1d_invalall();
122}
123
124#ifdef CONFIG_CACHE_L2
125void dcache_wb_all_level(void)
126{
127	unsigned long flags, cmd;
128	local_irq_save(flags);
129	__nds32__cctl_l1d_wball_alvl();
130	/* Section 1: Ensure the section 2 & 3 program code execution after */
131	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
132
133	/* Section 2: Confirm the writeback all level is done in CPU and L2C */
134	cmd = CCTL_CMD_L2_SYNC;
135	L2_CMD_RDY();
136	L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
137	L2_CMD_RDY();
138
139	/* Section 3: Writeback whole L2 cache */
140	cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
141	L2_CMD_RDY();
142	L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
143	L2_CMD_RDY();
144	__nds32__msync_all();
145	local_irq_restore(flags);
146}
147EXPORT_SYMBOL(dcache_wb_all_level);
148#endif
149
150void cpu_dcache_wb_all(void)
151{
152	__nds32__cctl_l1d_wball_one_lvl();
153	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
154}
155
156void cpu_dcache_wbinval_all(void)
157{
158#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
159	unsigned long flags;
160	local_irq_save(flags);
161#endif
162	cpu_dcache_wb_all();
163	cpu_dcache_inval_all();
164#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
165	local_irq_restore(flags);
166#endif
167}
168
169/*
170 * Page
171 */
172void cpu_icache_inval_page(unsigned long start)
173{
174	unsigned long line_size, end;
175
176	line_size = L1_cache_info[ICACHE].line_size;
177	end = start + PAGE_SIZE;
178
179	do {
180		end -= line_size;
181		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
182		end -= line_size;
183		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
184		end -= line_size;
185		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
186		end -= line_size;
187		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
188	} while (end != start);
189	__nds32__isb();
190}
191
192void cpu_dcache_inval_page(unsigned long start)
193{
194	unsigned long line_size, end;
195
196	line_size = L1_cache_info[DCACHE].line_size;
197	end = start + PAGE_SIZE;
198
199	do {
200		end -= line_size;
201		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
202		end -= line_size;
203		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
204		end -= line_size;
205		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
206		end -= line_size;
207		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
208	} while (end != start);
209}
210
211void cpu_dcache_wb_page(unsigned long start)
212{
213#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
214	unsigned long line_size, end;
215
216	line_size = L1_cache_info[DCACHE].line_size;
217	end = start + PAGE_SIZE;
218
219	do {
220		end -= line_size;
221		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
222		end -= line_size;
223		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
224		end -= line_size;
225		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
226		end -= line_size;
227		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
228	} while (end != start);
229	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
230#endif
231}
232
233void cpu_dcache_wbinval_page(unsigned long start)
234{
235	unsigned long line_size, end;
236
237	line_size = L1_cache_info[DCACHE].line_size;
238	end = start + PAGE_SIZE;
239
240	do {
241		end -= line_size;
242#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
243		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
244#endif
245		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
246		end -= line_size;
247#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
248		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
249#endif
250		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
251		end -= line_size;
252#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
253		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
254#endif
255		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
256		end -= line_size;
257#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
258		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
259#endif
260		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
261	} while (end != start);
262	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
263}
264
265void cpu_cache_wbinval_page(unsigned long page, int flushi)
266{
267	cpu_dcache_wbinval_page(page);
268	if (flushi)
269		cpu_icache_inval_page(page);
270}
271
272/*
273 * Range
274 */
275void cpu_icache_inval_range(unsigned long start, unsigned long end)
276{
277	unsigned long line_size;
278
279	line_size = L1_cache_info[ICACHE].line_size;
280
281	while (end > start) {
282		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
283		start += line_size;
284	}
285	__nds32__isb();
286}
287
288void cpu_dcache_inval_range(unsigned long start, unsigned long end)
289{
290	unsigned long line_size;
291
292	line_size = L1_cache_info[DCACHE].line_size;
293
294	while (end > start) {
295		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
296		start += line_size;
297	}
298}
299
300void cpu_dcache_wb_range(unsigned long start, unsigned long end)
301{
302#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
303	unsigned long line_size;
304
305	line_size = L1_cache_info[DCACHE].line_size;
306
307	while (end > start) {
308		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
309		start += line_size;
310	}
311	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
312#endif
313}
314
315void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
316{
317	unsigned long line_size;
318
319	line_size = L1_cache_info[DCACHE].line_size;
320
321	while (end > start) {
322#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
323		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
324#endif
325		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
326		start += line_size;
327	}
328	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
329}
330
331void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
332{
333	unsigned long line_size, align_start, align_end;
334
335	line_size = L1_cache_info[DCACHE].line_size;
336	align_start = start & ~(line_size - 1);
337	align_end = (end + line_size - 1) & ~(line_size - 1);
338	cpu_dcache_wbinval_range(align_start, align_end);
339
340	if (flushi) {
341		line_size = L1_cache_info[ICACHE].line_size;
342		align_start = start & ~(line_size - 1);
343		align_end = (end + line_size - 1) & ~(line_size - 1);
344		cpu_icache_inval_range(align_start, align_end);
345	}
346}
347
348void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
349				   unsigned long start, unsigned long end,
350				   bool flushi, bool wbd)
351{
352	unsigned long line_size, t_start, t_end;
353
354	if (!flushi && !wbd)
355		return;
356	line_size = L1_cache_info[DCACHE].line_size;
357	start = start & ~(line_size - 1);
358	end = (end + line_size - 1) & ~(line_size - 1);
359
360	if ((end - start) > (8 * PAGE_SIZE)) {
361		if (wbd)
362			cpu_dcache_wbinval_all();
363		if (flushi)
364			cpu_icache_inval_all();
365		return;
366	}
367
368	t_start = (start + PAGE_SIZE) & PAGE_MASK;
369	t_end = ((end - 1) & PAGE_MASK);
370
371	if ((start & PAGE_MASK) == t_end) {
372		if (va_present(vma->vm_mm, start)) {
373			if (wbd)
374				cpu_dcache_wbinval_range(start, end);
375			if (flushi)
376				cpu_icache_inval_range(start, end);
377		}
378		return;
379	}
380
381	if (va_present(vma->vm_mm, start)) {
382		if (wbd)
383			cpu_dcache_wbinval_range(start, t_start);
384		if (flushi)
385			cpu_icache_inval_range(start, t_start);
386	}
387
388	if (va_present(vma->vm_mm, end - 1)) {
389		if (wbd)
390			cpu_dcache_wbinval_range(t_end, end);
391		if (flushi)
392			cpu_icache_inval_range(t_end, end);
393	}
394
395	while (t_start < t_end) {
396		if (va_present(vma->vm_mm, t_start)) {
397			if (wbd)
398				cpu_dcache_wbinval_page(t_start);
399			if (flushi)
400				cpu_icache_inval_page(t_start);
401		}
402		t_start += PAGE_SIZE;
403	}
404}
405
406#ifdef CONFIG_CACHE_L2
407static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
408{
409	if (atl2c_base) {
410		unsigned long p_start = __pa(start);
411		unsigned long p_end = __pa(end);
412		unsigned long cmd;
413		unsigned long line_size;
414		/* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
415		line_size = L2_CACHE_LINE_SIZE();
416		p_start = p_start & (~(line_size - 1));
417		p_end = (p_end + line_size - 1) & (~(line_size - 1));
418		cmd =
419		    (p_start & ~(line_size - 1)) | op |
420		    CCTL_SINGLE_CMD;
421		do {
422			L2_CMD_RDY();
423			L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
424			cmd += line_size;
425			p_start += line_size;
426		} while (p_end > p_start);
427		cmd = CCTL_CMD_L2_SYNC;
428		L2_CMD_RDY();
429		L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
430		L2_CMD_RDY();
431	}
432}
433#else
434#define cpu_l2cache_op(start,end,op) do { } while (0)
435#endif
436/*
437 * DMA
438 */
439void cpu_dma_wb_range(unsigned long start, unsigned long end)
440{
441	unsigned long line_size;
442	unsigned long flags;
443	line_size = L1_cache_info[DCACHE].line_size;
444	start = start & (~(line_size - 1));
445	end = (end + line_size - 1) & (~(line_size - 1));
446	if (unlikely(start == end))
447		return;
448
449	local_irq_save(flags);
450	cpu_dcache_wb_range(start, end);
451	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
452	__nds32__msync_all();
453	local_irq_restore(flags);
454}
455
456void cpu_dma_inval_range(unsigned long start, unsigned long end)
457{
458	unsigned long line_size;
459	unsigned long old_start = start;
460	unsigned long old_end = end;
461	unsigned long flags;
462	line_size = L1_cache_info[DCACHE].line_size;
463	start = start & (~(line_size - 1));
464	end = (end + line_size - 1) & (~(line_size - 1));
465	if (unlikely(start == end))
466		return;
467	local_irq_save(flags);
468	if (start != old_start) {
469		cpu_dcache_wbinval_range(start, start + line_size);
470		cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
471	}
472	if (end != old_end) {
473		cpu_dcache_wbinval_range(end - line_size, end);
474		cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
475	}
476	cpu_dcache_inval_range(start, end);
477	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
478	__nds32__msync_all();
479	local_irq_restore(flags);
480
481}
482
483void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
484{
485	unsigned long line_size;
486	unsigned long flags;
487	line_size = L1_cache_info[DCACHE].line_size;
488	start = start & (~(line_size - 1));
489	end = (end + line_size - 1) & (~(line_size - 1));
490	if (unlikely(start == end))
491		return;
492
493	local_irq_save(flags);
494	cpu_dcache_wbinval_range(start, end);
495	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
496	__nds32__msync_all();
497	local_irq_restore(flags);
498}
499
500void cpu_proc_init(void)
501{
502}
503
504void cpu_proc_fin(void)
505{
506}
507
508void cpu_do_idle(void)
509{
510	__nds32__standby_no_wake_grant();
511}
512
513void cpu_reset(unsigned long reset)
514{
515	u32 tmp;
516	GIE_DISABLE();
517	tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
518	tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
519	__nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
520	cpu_dcache_wbinval_all();
521	cpu_icache_inval_all();
522
523	__asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
524}
525
526void cpu_switch_mm(struct mm_struct *mm)
527{
528	unsigned long cid;
529	cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
530	cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
531	__nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
532	__nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
533}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/module.h>
  5#include <linux/sched.h>
  6#include <linux/mm.h>
  7#include <asm/nds32.h>
 
  8#include <asm/tlbflush.h>
  9#include <asm/cacheflush.h>
 10#include <asm/l2_cache.h>
 11#include <nds32_intrinsic.h>
 12
 13#include <asm/cache_info.h>
 14extern struct cache_info L1_cache_info[2];
 15
 16int va_kernel_present(unsigned long addr)
 17{
 18	pmd_t *pmd;
 19	pte_t *ptep, pte;
 20
 21	pmd = pmd_off_k(addr);
 22	if (!pmd_none(*pmd)) {
 23		ptep = pte_offset_map(pmd, addr);
 24		pte = *ptep;
 25		if (pte_present(pte))
 26			return pte;
 27	}
 28	return 0;
 29}
 30
 31pte_t va_present(struct mm_struct * mm, unsigned long addr)
 32{
 33	pgd_t *pgd;
 34	p4d_t *p4d;
 35	pud_t *pud;
 36	pmd_t *pmd;
 37	pte_t *ptep, pte;
 38
 39	pgd = pgd_offset(mm, addr);
 40	if (!pgd_none(*pgd)) {
 41		p4d = p4d_offset(pgd, addr);
 42		if (!p4d_none(*p4d)) {
 43			pud = pud_offset(p4d, addr);
 44			if (!pud_none(*pud)) {
 45				pmd = pmd_offset(pud, addr);
 46				if (!pmd_none(*pmd)) {
 47					ptep = pte_offset_map(pmd, addr);
 48					pte = *ptep;
 49					if (pte_present(pte))
 50						return pte;
 51				}
 52			}
 53		}
 54	}
 55	return 0;
 56
 57}
 58
 59int va_readable(struct pt_regs *regs, unsigned long addr)
 60{
 61	struct mm_struct *mm = current->mm;
 62	pte_t pte;
 63	int ret = 0;
 64
 65	if (user_mode(regs)) {
 66		/* user mode */
 67		pte = va_present(mm, addr);
 68		if (!pte && pte_read(pte))
 69			ret = 1;
 70	} else {
 71		/* superuser mode is always readable, so we can only
 72		 * check it is present or not*/
 73		return (! !va_kernel_present(addr));
 74	}
 75	return ret;
 76}
 77
 78int va_writable(struct pt_regs *regs, unsigned long addr)
 79{
 80	struct mm_struct *mm = current->mm;
 81	pte_t pte;
 82	int ret = 0;
 83
 84	if (user_mode(regs)) {
 85		/* user mode */
 86		pte = va_present(mm, addr);
 87		if (!pte && pte_write(pte))
 88			ret = 1;
 89	} else {
 90		/* superuser mode */
 91		pte = va_kernel_present(addr);
 92		if (!pte && pte_kernel_write(pte))
 93			ret = 1;
 94	}
 95	return ret;
 96}
 97
 98/*
 99 * All
100 */
101void cpu_icache_inval_all(void)
102{
103	unsigned long end, line_size;
104
105	line_size = L1_cache_info[ICACHE].line_size;
106	end =
107	    line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
108
109	do {
110		end -= line_size;
111		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
112		end -= line_size;
113		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
114		end -= line_size;
115		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
116		end -= line_size;
117		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
118	} while (end > 0);
119	__nds32__isb();
120}
121
122void cpu_dcache_inval_all(void)
123{
124	__nds32__cctl_l1d_invalall();
125}
126
127#ifdef CONFIG_CACHE_L2
128void dcache_wb_all_level(void)
129{
130	unsigned long flags, cmd;
131	local_irq_save(flags);
132	__nds32__cctl_l1d_wball_alvl();
133	/* Section 1: Ensure the section 2 & 3 program code execution after */
134	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
135
136	/* Section 2: Confirm the writeback all level is done in CPU and L2C */
137	cmd = CCTL_CMD_L2_SYNC;
138	L2_CMD_RDY();
139	L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
140	L2_CMD_RDY();
141
142	/* Section 3: Writeback whole L2 cache */
143	cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
144	L2_CMD_RDY();
145	L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
146	L2_CMD_RDY();
147	__nds32__msync_all();
148	local_irq_restore(flags);
149}
150EXPORT_SYMBOL(dcache_wb_all_level);
151#endif
152
153void cpu_dcache_wb_all(void)
154{
155	__nds32__cctl_l1d_wball_one_lvl();
156	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
157}
158
159void cpu_dcache_wbinval_all(void)
160{
161#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
162	unsigned long flags;
163	local_irq_save(flags);
164#endif
165	cpu_dcache_wb_all();
166	cpu_dcache_inval_all();
167#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
168	local_irq_restore(flags);
169#endif
170}
171
172/*
173 * Page
174 */
175void cpu_icache_inval_page(unsigned long start)
176{
177	unsigned long line_size, end;
178
179	line_size = L1_cache_info[ICACHE].line_size;
180	end = start + PAGE_SIZE;
181
182	do {
183		end -= line_size;
184		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
185		end -= line_size;
186		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
187		end -= line_size;
188		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
189		end -= line_size;
190		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
191	} while (end != start);
192	__nds32__isb();
193}
194
195void cpu_dcache_inval_page(unsigned long start)
196{
197	unsigned long line_size, end;
198
199	line_size = L1_cache_info[DCACHE].line_size;
200	end = start + PAGE_SIZE;
201
202	do {
203		end -= line_size;
204		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
205		end -= line_size;
206		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
207		end -= line_size;
208		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
209		end -= line_size;
210		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
211	} while (end != start);
212}
213
214void cpu_dcache_wb_page(unsigned long start)
215{
216#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
217	unsigned long line_size, end;
218
219	line_size = L1_cache_info[DCACHE].line_size;
220	end = start + PAGE_SIZE;
221
222	do {
223		end -= line_size;
224		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
225		end -= line_size;
226		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
227		end -= line_size;
228		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
229		end -= line_size;
230		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
231	} while (end != start);
232	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
233#endif
234}
235
236void cpu_dcache_wbinval_page(unsigned long start)
237{
238	unsigned long line_size, end;
239
240	line_size = L1_cache_info[DCACHE].line_size;
241	end = start + PAGE_SIZE;
242
243	do {
244		end -= line_size;
245#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
246		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
247#endif
248		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
249		end -= line_size;
250#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
251		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
252#endif
253		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
254		end -= line_size;
255#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
256		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
257#endif
258		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
259		end -= line_size;
260#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
261		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
262#endif
263		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
264	} while (end != start);
265	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
266}
267
268void cpu_cache_wbinval_page(unsigned long page, int flushi)
269{
270	cpu_dcache_wbinval_page(page);
271	if (flushi)
272		cpu_icache_inval_page(page);
273}
274
275/*
276 * Range
277 */
278void cpu_icache_inval_range(unsigned long start, unsigned long end)
279{
280	unsigned long line_size;
281
282	line_size = L1_cache_info[ICACHE].line_size;
283
284	while (end > start) {
285		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
286		start += line_size;
287	}
288	__nds32__isb();
289}
290
291void cpu_dcache_inval_range(unsigned long start, unsigned long end)
292{
293	unsigned long line_size;
294
295	line_size = L1_cache_info[DCACHE].line_size;
296
297	while (end > start) {
298		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
299		start += line_size;
300	}
301}
302
303void cpu_dcache_wb_range(unsigned long start, unsigned long end)
304{
305#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
306	unsigned long line_size;
307
308	line_size = L1_cache_info[DCACHE].line_size;
309
310	while (end > start) {
311		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
312		start += line_size;
313	}
314	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
315#endif
316}
317
318void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
319{
320	unsigned long line_size;
321
322	line_size = L1_cache_info[DCACHE].line_size;
323
324	while (end > start) {
325#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
326		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
327#endif
328		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
329		start += line_size;
330	}
331	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
332}
333
334void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
335{
336	unsigned long line_size, align_start, align_end;
337
338	line_size = L1_cache_info[DCACHE].line_size;
339	align_start = start & ~(line_size - 1);
340	align_end = (end + line_size - 1) & ~(line_size - 1);
341	cpu_dcache_wbinval_range(align_start, align_end);
342
343	if (flushi) {
344		line_size = L1_cache_info[ICACHE].line_size;
345		align_start = start & ~(line_size - 1);
346		align_end = (end + line_size - 1) & ~(line_size - 1);
347		cpu_icache_inval_range(align_start, align_end);
348	}
349}
350
351void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
352				   unsigned long start, unsigned long end,
353				   bool flushi, bool wbd)
354{
355	unsigned long line_size, t_start, t_end;
356
357	if (!flushi && !wbd)
358		return;
359	line_size = L1_cache_info[DCACHE].line_size;
360	start = start & ~(line_size - 1);
361	end = (end + line_size - 1) & ~(line_size - 1);
362
363	if ((end - start) > (8 * PAGE_SIZE)) {
364		if (wbd)
365			cpu_dcache_wbinval_all();
366		if (flushi)
367			cpu_icache_inval_all();
368		return;
369	}
370
371	t_start = (start + PAGE_SIZE) & PAGE_MASK;
372	t_end = ((end - 1) & PAGE_MASK);
373
374	if ((start & PAGE_MASK) == t_end) {
375		if (va_present(vma->vm_mm, start)) {
376			if (wbd)
377				cpu_dcache_wbinval_range(start, end);
378			if (flushi)
379				cpu_icache_inval_range(start, end);
380		}
381		return;
382	}
383
384	if (va_present(vma->vm_mm, start)) {
385		if (wbd)
386			cpu_dcache_wbinval_range(start, t_start);
387		if (flushi)
388			cpu_icache_inval_range(start, t_start);
389	}
390
391	if (va_present(vma->vm_mm, end - 1)) {
392		if (wbd)
393			cpu_dcache_wbinval_range(t_end, end);
394		if (flushi)
395			cpu_icache_inval_range(t_end, end);
396	}
397
398	while (t_start < t_end) {
399		if (va_present(vma->vm_mm, t_start)) {
400			if (wbd)
401				cpu_dcache_wbinval_page(t_start);
402			if (flushi)
403				cpu_icache_inval_page(t_start);
404		}
405		t_start += PAGE_SIZE;
406	}
407}
408
409#ifdef CONFIG_CACHE_L2
410static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
411{
412	if (atl2c_base) {
413		unsigned long p_start = __pa(start);
414		unsigned long p_end = __pa(end);
415		unsigned long cmd;
416		unsigned long line_size;
417		/* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
418		line_size = L2_CACHE_LINE_SIZE();
419		p_start = p_start & (~(line_size - 1));
420		p_end = (p_end + line_size - 1) & (~(line_size - 1));
421		cmd =
422		    (p_start & ~(line_size - 1)) | op |
423		    CCTL_SINGLE_CMD;
424		do {
425			L2_CMD_RDY();
426			L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
427			cmd += line_size;
428			p_start += line_size;
429		} while (p_end > p_start);
430		cmd = CCTL_CMD_L2_SYNC;
431		L2_CMD_RDY();
432		L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
433		L2_CMD_RDY();
434	}
435}
436#else
437#define cpu_l2cache_op(start,end,op) do { } while (0)
438#endif
439/*
440 * DMA
441 */
442void cpu_dma_wb_range(unsigned long start, unsigned long end)
443{
444	unsigned long line_size;
445	unsigned long flags;
446	line_size = L1_cache_info[DCACHE].line_size;
447	start = start & (~(line_size - 1));
448	end = (end + line_size - 1) & (~(line_size - 1));
449	if (unlikely(start == end))
450		return;
451
452	local_irq_save(flags);
453	cpu_dcache_wb_range(start, end);
454	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
455	__nds32__msync_all();
456	local_irq_restore(flags);
457}
458
459void cpu_dma_inval_range(unsigned long start, unsigned long end)
460{
461	unsigned long line_size;
462	unsigned long old_start = start;
463	unsigned long old_end = end;
464	unsigned long flags;
465	line_size = L1_cache_info[DCACHE].line_size;
466	start = start & (~(line_size - 1));
467	end = (end + line_size - 1) & (~(line_size - 1));
468	if (unlikely(start == end))
469		return;
470	local_irq_save(flags);
471	if (start != old_start) {
472		cpu_dcache_wbinval_range(start, start + line_size);
473		cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
474	}
475	if (end != old_end) {
476		cpu_dcache_wbinval_range(end - line_size, end);
477		cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
478	}
479	cpu_dcache_inval_range(start, end);
480	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
481	__nds32__msync_all();
482	local_irq_restore(flags);
483
484}
485
486void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
487{
488	unsigned long line_size;
489	unsigned long flags;
490	line_size = L1_cache_info[DCACHE].line_size;
491	start = start & (~(line_size - 1));
492	end = (end + line_size - 1) & (~(line_size - 1));
493	if (unlikely(start == end))
494		return;
495
496	local_irq_save(flags);
497	cpu_dcache_wbinval_range(start, end);
498	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
499	__nds32__msync_all();
500	local_irq_restore(flags);
501}
502
503void cpu_proc_init(void)
504{
505}
506
507void cpu_proc_fin(void)
508{
509}
510
511void cpu_do_idle(void)
512{
513	__nds32__standby_no_wake_grant();
514}
515
516void cpu_reset(unsigned long reset)
517{
518	u32 tmp;
519	GIE_DISABLE();
520	tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
521	tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
522	__nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
523	cpu_dcache_wbinval_all();
524	cpu_icache_inval_all();
525
526	__asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
527}
528
529void cpu_switch_mm(struct mm_struct *mm)
530{
531	unsigned long cid;
532	cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
533	cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
534	__nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
535	__nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
536}