Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/arch/sparc/mm/leon_m.c
  3 *
  4 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
  5 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
  6 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
  7 *
  8 * do srmmu probe in software
  9 *
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <asm/asi.h>
 15#include <asm/leon.h>
 16#include <asm/tlbflush.h>
 17
 
 
 18int leon_flush_during_switch = 1;
 19int srmmu_swprobe_trace;
 
 
 
 
 
 
 
 
 
 
 
 
 20
 21unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
 22{
 23
 24	unsigned int ctxtbl;
 25	unsigned int pgd, pmd, ped;
 26	unsigned int ptr;
 27	unsigned int lvl, pte, paddrbase;
 28	unsigned int ctx;
 29	unsigned int paddr_calc;
 30
 31	paddrbase = 0;
 32
 33	if (srmmu_swprobe_trace)
 34		printk(KERN_INFO "swprobe: trace on\n");
 35
 36	ctxtbl = srmmu_get_ctable_ptr();
 37	if (!(ctxtbl)) {
 38		if (srmmu_swprobe_trace)
 39			printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n");
 40		return 0;
 41	}
 42	if (!_pfn_valid(PFN(ctxtbl))) {
 43		if (srmmu_swprobe_trace)
 44			printk(KERN_INFO
 45			       "swprobe: !_pfn_valid(%x)=>0\n",
 46			       PFN(ctxtbl));
 47		return 0;
 48	}
 49
 50	ctx = srmmu_get_context();
 51	if (srmmu_swprobe_trace)
 52		printk(KERN_INFO "swprobe:  --- ctx (%x) ---\n", ctx);
 53
 54	pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
 55
 56	if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
 57		if (srmmu_swprobe_trace)
 58			printk(KERN_INFO "swprobe: pgd is entry level 3\n");
 59		lvl = 3;
 60		pte = pgd;
 61		paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
 62		goto ready;
 63	}
 64	if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
 65		if (srmmu_swprobe_trace)
 66			printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
 67		return 0;
 68	}
 69
 70	if (srmmu_swprobe_trace)
 71		printk(KERN_INFO "swprobe:  --- pgd (%x) ---\n", pgd);
 72
 73	ptr = (pgd & SRMMU_PTD_PMASK) << 4;
 74	ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
 75	if (!_pfn_valid(PFN(ptr)))
 76		return 0;
 77
 78	pmd = LEON_BYPASS_LOAD_PA(ptr);
 79	if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
 80		if (srmmu_swprobe_trace)
 81			printk(KERN_INFO "swprobe: pmd is entry level 2\n");
 82		lvl = 2;
 83		pte = pmd;
 84		paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
 85		goto ready;
 86	}
 87	if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
 88		if (srmmu_swprobe_trace)
 89			printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
 90		return 0;
 91	}
 92
 93	if (srmmu_swprobe_trace)
 94		printk(KERN_INFO "swprobe:  --- pmd (%x) ---\n", pmd);
 95
 96	ptr = (pmd & SRMMU_PTD_PMASK) << 4;
 97	ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
 98	if (!_pfn_valid(PFN(ptr))) {
 99		if (srmmu_swprobe_trace)
100			printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
101			       PFN(ptr));
102		return 0;
103	}
104
105	ped = LEON_BYPASS_LOAD_PA(ptr);
106
107	if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
108		if (srmmu_swprobe_trace)
109			printk(KERN_INFO "swprobe: ped is entry level 1\n");
110		lvl = 1;
111		pte = ped;
112		paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
113		goto ready;
114	}
115	if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
116		if (srmmu_swprobe_trace)
117			printk(KERN_INFO "swprobe: ped is invalid => 0\n");
118		return 0;
119	}
120
121	if (srmmu_swprobe_trace)
122		printk(KERN_INFO "swprobe:  --- ped (%x) ---\n", ped);
123
124	ptr = (ped & SRMMU_PTD_PMASK) << 4;
125	ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
126	if (!_pfn_valid(PFN(ptr)))
127		return 0;
128
129	ptr = LEON_BYPASS_LOAD_PA(ptr);
130	if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
131		if (srmmu_swprobe_trace)
132			printk(KERN_INFO "swprobe: ptr is entry level 0\n");
133		lvl = 0;
134		pte = ptr;
135		paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
136		goto ready;
137	}
138	if (srmmu_swprobe_trace)
139		printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
140	return 0;
141
142ready:
143	switch (lvl) {
144	case 0:
145		paddr_calc =
146		    (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
147		break;
148	case 1:
149		paddr_calc =
150		    (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
151		break;
152	case 2:
153		paddr_calc =
154		    (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
155		break;
156	default:
157	case 3:
158		paddr_calc = vaddr;
159		break;
160	}
161	if (srmmu_swprobe_trace)
162		printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
163	if (paddr)
164		*paddr = paddr_calc;
165	return pte;
166}
167
168void leon_flush_icache_all(void)
169{
170	__asm__ __volatile__(" flush ");	/*iflush*/
171}
172
173void leon_flush_dcache_all(void)
174{
175	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
176			     "i"(ASI_LEON_DFLUSH) : "memory");
177}
178
179void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
180{
181	if (vma->vm_flags & VM_EXEC)
182		leon_flush_icache_all();
183	leon_flush_dcache_all();
184}
185
186void leon_flush_cache_all(void)
187{
188	__asm__ __volatile__(" flush ");	/*iflush*/
189	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
190			     "i"(ASI_LEON_DFLUSH) : "memory");
191}
192
193void leon_flush_tlb_all(void)
194{
195	leon_flush_cache_all();
196	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
197			     "i"(ASI_LEON_MMUFLUSH) : "memory");
198}
199
200/* get all cache regs */
201void leon3_getCacheRegs(struct leon3_cacheregs *regs)
202{
203	unsigned long ccr, iccr, dccr;
204
205	if (!regs)
206		return;
207	/* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
208	__asm__ __volatile__("lda [%%g0] %3, %0\n\t"
209			     "mov 0x08, %%g1\n\t"
210			     "lda [%%g1] %3, %1\n\t"
211			     "mov 0x0c, %%g1\n\t"
212			     "lda [%%g1] %3, %2\n\t"
213			     : "=r"(ccr), "=r"(iccr), "=r"(dccr)
214			       /* output */
215			     : "i"(ASI_LEON_CACHEREGS)	/* input */
216			     : "g1"	/* clobber list */
217	    );
218	regs->ccr = ccr;
219	regs->iccr = iccr;
220	regs->dccr = dccr;
221}
222
223/* Due to virtual cache we need to check cache configuration if
224 * it is possible to skip flushing in some cases.
225 *
226 * Leon2 and Leon3 differ in their way of telling cache information
227 *
228 */
229int __init leon_flush_needed(void)
230{
231	int flush_needed = -1;
232	unsigned int ssize, sets;
233	char *setStr[4] =
234	    { "direct mapped", "2-way associative", "3-way associative",
235		"4-way associative"
236	};
237	/* leon 3 */
238	struct leon3_cacheregs cregs;
239	leon3_getCacheRegs(&cregs);
240	sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
241	/* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
242	ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
243
244	printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
245	       sets > 3 ? "unknown" : setStr[sets], ssize);
246	if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
247		/* Set Size <= Page size  ==>
248		   flush on every context switch not needed. */
249		flush_needed = 0;
250		printk(KERN_INFO "CACHE: not flushing on every context switch\n");
251	}
252	return flush_needed;
253}
254
255void leon_switch_mm(void)
256{
257	flush_tlb_mm((void *)0);
258	if (leon_flush_during_switch)
259		leon_flush_cache_all();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/sparc/mm/leon_m.c
  4 *
  5 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
  6 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
  7 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
  8 *
  9 * do srmmu probe in software
 10 *
 11 */
 12
 13#include <linux/kernel.h>
 14#include <linux/mm.h>
 15#include <asm/asi.h>
 16#include <asm/leon.h>
 17#include <asm/tlbflush.h>
 18
 19#include "mm_32.h"
 20
 21int leon_flush_during_switch = 1;
 22static int srmmu_swprobe_trace;
 23
 24static inline unsigned long leon_get_ctable_ptr(void)
 25{
 26	unsigned int retval;
 27
 28	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
 29			     "=r" (retval) :
 30			     "r" (SRMMU_CTXTBL_PTR),
 31			     "i" (ASI_LEON_MMUREGS));
 32	return (retval & SRMMU_CTX_PMASK) << 4;
 33}
 34
 35
 36unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
 37{
 38
 39	unsigned int ctxtbl;
 40	unsigned int pgd, pmd, ped;
 41	unsigned int ptr;
 42	unsigned int lvl, pte, paddrbase;
 43	unsigned int ctx;
 44	unsigned int paddr_calc;
 45
 46	paddrbase = 0;
 47
 48	if (srmmu_swprobe_trace)
 49		printk(KERN_INFO "swprobe: trace on\n");
 50
 51	ctxtbl = leon_get_ctable_ptr();
 52	if (!(ctxtbl)) {
 53		if (srmmu_swprobe_trace)
 54			printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
 55		return 0;
 56	}
 57	if (!_pfn_valid(PFN(ctxtbl))) {
 58		if (srmmu_swprobe_trace)
 59			printk(KERN_INFO
 60			       "swprobe: !_pfn_valid(%x)=>0\n",
 61			       PFN(ctxtbl));
 62		return 0;
 63	}
 64
 65	ctx = srmmu_get_context();
 66	if (srmmu_swprobe_trace)
 67		printk(KERN_INFO "swprobe:  --- ctx (%x) ---\n", ctx);
 68
 69	pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
 70
 71	if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
 72		if (srmmu_swprobe_trace)
 73			printk(KERN_INFO "swprobe: pgd is entry level 3\n");
 74		lvl = 3;
 75		pte = pgd;
 76		paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
 77		goto ready;
 78	}
 79	if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
 80		if (srmmu_swprobe_trace)
 81			printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
 82		return 0;
 83	}
 84
 85	if (srmmu_swprobe_trace)
 86		printk(KERN_INFO "swprobe:  --- pgd (%x) ---\n", pgd);
 87
 88	ptr = (pgd & SRMMU_PTD_PMASK) << 4;
 89	ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
 90	if (!_pfn_valid(PFN(ptr)))
 91		return 0;
 92
 93	pmd = LEON_BYPASS_LOAD_PA(ptr);
 94	if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
 95		if (srmmu_swprobe_trace)
 96			printk(KERN_INFO "swprobe: pmd is entry level 2\n");
 97		lvl = 2;
 98		pte = pmd;
 99		paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
100		goto ready;
101	}
102	if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
103		if (srmmu_swprobe_trace)
104			printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
105		return 0;
106	}
107
108	if (srmmu_swprobe_trace)
109		printk(KERN_INFO "swprobe:  --- pmd (%x) ---\n", pmd);
110
111	ptr = (pmd & SRMMU_PTD_PMASK) << 4;
112	ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
113	if (!_pfn_valid(PFN(ptr))) {
114		if (srmmu_swprobe_trace)
115			printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
116			       PFN(ptr));
117		return 0;
118	}
119
120	ped = LEON_BYPASS_LOAD_PA(ptr);
121
122	if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
123		if (srmmu_swprobe_trace)
124			printk(KERN_INFO "swprobe: ped is entry level 1\n");
125		lvl = 1;
126		pte = ped;
127		paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
128		goto ready;
129	}
130	if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
131		if (srmmu_swprobe_trace)
132			printk(KERN_INFO "swprobe: ped is invalid => 0\n");
133		return 0;
134	}
135
136	if (srmmu_swprobe_trace)
137		printk(KERN_INFO "swprobe:  --- ped (%x) ---\n", ped);
138
139	ptr = (ped & SRMMU_PTD_PMASK) << 4;
140	ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
141	if (!_pfn_valid(PFN(ptr)))
142		return 0;
143
144	ptr = LEON_BYPASS_LOAD_PA(ptr);
145	if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
146		if (srmmu_swprobe_trace)
147			printk(KERN_INFO "swprobe: ptr is entry level 0\n");
148		lvl = 0;
149		pte = ptr;
150		paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
151		goto ready;
152	}
153	if (srmmu_swprobe_trace)
154		printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
155	return 0;
156
157ready:
158	switch (lvl) {
159	case 0:
160		paddr_calc =
161		    (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
162		break;
163	case 1:
164		paddr_calc =
165		    (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
166		break;
167	case 2:
168		paddr_calc =
169		    (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
170		break;
171	default:
172	case 3:
173		paddr_calc = vaddr;
174		break;
175	}
176	if (srmmu_swprobe_trace)
177		printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
178	if (paddr)
179		*paddr = paddr_calc;
180	return pte;
181}
182
183void leon_flush_icache_all(void)
184{
185	__asm__ __volatile__(" flush ");	/*iflush*/
186}
187
188void leon_flush_dcache_all(void)
189{
190	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
191			     "i"(ASI_LEON_DFLUSH) : "memory");
192}
193
194void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
195{
196	if (vma->vm_flags & VM_EXEC)
197		leon_flush_icache_all();
198	leon_flush_dcache_all();
199}
200
201void leon_flush_cache_all(void)
202{
203	__asm__ __volatile__(" flush ");	/*iflush*/
204	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
205			     "i"(ASI_LEON_DFLUSH) : "memory");
206}
207
208void leon_flush_tlb_all(void)
209{
210	leon_flush_cache_all();
211	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
212			     "i"(ASI_LEON_MMUFLUSH) : "memory");
213}
214
215/* get all cache regs */
216void leon3_getCacheRegs(struct leon3_cacheregs *regs)
217{
218	unsigned long ccr, iccr, dccr;
219
220	if (!regs)
221		return;
222	/* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
223	__asm__ __volatile__("lda [%%g0] %3, %0\n\t"
224			     "mov 0x08, %%g1\n\t"
225			     "lda [%%g1] %3, %1\n\t"
226			     "mov 0x0c, %%g1\n\t"
227			     "lda [%%g1] %3, %2\n\t"
228			     : "=r"(ccr), "=r"(iccr), "=r"(dccr)
229			       /* output */
230			     : "i"(ASI_LEON_CACHEREGS)	/* input */
231			     : "g1"	/* clobber list */
232	    );
233	regs->ccr = ccr;
234	regs->iccr = iccr;
235	regs->dccr = dccr;
236}
237
238/* Due to virtual cache we need to check cache configuration if
239 * it is possible to skip flushing in some cases.
240 *
241 * Leon2 and Leon3 differ in their way of telling cache information
242 *
243 */
244int __init leon_flush_needed(void)
245{
246	int flush_needed = -1;
247	unsigned int ssize, sets;
248	char *setStr[4] =
249	    { "direct mapped", "2-way associative", "3-way associative",
250		"4-way associative"
251	};
252	/* leon 3 */
253	struct leon3_cacheregs cregs;
254	leon3_getCacheRegs(&cregs);
255	sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
256	/* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
257	ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
258
259	printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
260	       sets > 3 ? "unknown" : setStr[sets], ssize);
261	if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
262		/* Set Size <= Page size  ==>
263		   flush on every context switch not needed. */
264		flush_needed = 0;
265		printk(KERN_INFO "CACHE: not flushing on every context switch\n");
266	}
267	return flush_needed;
268}
269
270void leon_switch_mm(void)
271{
272	flush_tlb_mm((void *)0);
273	if (leon_flush_during_switch)
274		leon_flush_cache_all();
275}
276
277static void leon_flush_cache_mm(struct mm_struct *mm)
278{
279	leon_flush_cache_all();
280}
281
282static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
283{
284	leon_flush_pcache_all(vma, page);
285}
286
287static void leon_flush_cache_range(struct vm_area_struct *vma,
288				   unsigned long start,
289				   unsigned long end)
290{
291	leon_flush_cache_all();
292}
293
294static void leon_flush_tlb_mm(struct mm_struct *mm)
295{
296	leon_flush_tlb_all();
297}
298
299static void leon_flush_tlb_page(struct vm_area_struct *vma,
300				unsigned long page)
301{
302	leon_flush_tlb_all();
303}
304
305static void leon_flush_tlb_range(struct vm_area_struct *vma,
306				 unsigned long start,
307				 unsigned long end)
308{
309	leon_flush_tlb_all();
310}
311
312static void leon_flush_page_to_ram(unsigned long page)
313{
314	leon_flush_cache_all();
315}
316
317static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
318{
319	leon_flush_cache_all();
320}
321
322static void leon_flush_page_for_dma(unsigned long page)
323{
324	leon_flush_dcache_all();
325}
326
327void __init poke_leonsparc(void)
328{
329}
330
331static const struct sparc32_cachetlb_ops leon_ops = {
332	.cache_all	= leon_flush_cache_all,
333	.cache_mm	= leon_flush_cache_mm,
334	.cache_page	= leon_flush_cache_page,
335	.cache_range	= leon_flush_cache_range,
336	.tlb_all	= leon_flush_tlb_all,
337	.tlb_mm		= leon_flush_tlb_mm,
338	.tlb_page	= leon_flush_tlb_page,
339	.tlb_range	= leon_flush_tlb_range,
340	.page_to_ram	= leon_flush_page_to_ram,
341	.sig_insns	= leon_flush_sig_insns,
342	.page_for_dma	= leon_flush_page_for_dma,
343};
344
345void __init init_leon(void)
346{
347	srmmu_name = "LEON";
348	sparc32_cachetlb_ops = &leon_ops;
349	poke_srmmu = poke_leonsparc;
350
351	leon_flush_during_switch = leon_flush_needed();
352}