Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
  4 *
  5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/bug.h>
 11#include <linux/mm_types.h>
 12
 13#include <asm/arcregs.h>
 14#include <asm/setup.h>
 15#include <asm/mmu_context.h>
 16#include <asm/mmu.h>
 17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18/* A copy of the ASID from the PID reg is kept in asid_cache */
 19DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
 20
 21static int __read_mostly pae_exists;
 22
 23/*
 24 * Utility Routine to erase a J-TLB entry
 25 * Caller needs to setup Index Reg (manually or via getIndex)
 26 */
 27static inline void __tlb_entry_erase(void)
 28{
 29	write_aux_reg(ARC_REG_TLBPD1, 0);
 30
 31	if (is_pae40_enabled())
 32		write_aux_reg(ARC_REG_TLBPD1HI, 0);
 33
 34	write_aux_reg(ARC_REG_TLBPD0, 0);
 35	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 36}
 37
 38static void utlb_invalidate(void)
 39{
 40	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
 41}
 42
 43#ifdef CONFIG_ARC_MMU_V3
 44
 45static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
 46{
 47	unsigned int idx;
 48
 49	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
 50
 51	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
 52	idx = read_aux_reg(ARC_REG_TLBINDEX);
 53
 54	return idx;
 55}
 56
 57static void tlb_entry_erase(unsigned int vaddr_n_asid)
 58{
 59	unsigned int idx;
 60
 61	/* Locate the TLB entry for this vaddr + ASID */
 62	idx = tlb_entry_lkup(vaddr_n_asid);
 63
 64	/* No error means entry found, zero it out */
 65	if (likely(!(idx & TLB_LKUP_ERR))) {
 66		__tlb_entry_erase();
 67	} else {
 68		/* Duplicate entry error */
 69		WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
 70					   vaddr_n_asid);
 71	}
 72}
 73
 74static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75{
 76	unsigned int idx;
 77
 78	/*
 79	 * First verify if entry for this vaddr+ASID already exists
 80	 * This also sets up PD0 (vaddr, ASID..) for final commit
 81	 */
 82	idx = tlb_entry_lkup(pd0);
 83
 84	/*
 85	 * If Not already present get a free slot from MMU.
 86	 * Otherwise, Probe would have located the entry and set INDEX Reg
 87	 * with existing location. This will cause Write CMD to over-write
 88	 * existing entry with new PD0 and PD1
 89	 */
 90	if (likely(idx & TLB_LKUP_ERR))
 91		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
 92
 93	/* setup the other half of TLB entry (pfn, rwx..) */
 94	write_aux_reg(ARC_REG_TLBPD1, pd1);
 95
 96	/*
 97	 * Commit the Entry to MMU
 98	 * It doesn't sound safe to use the TLBWriteNI cmd here
 99	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
100	 */
101	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
102}
103
104#else	/* MMUv4 */
105
106static void tlb_entry_erase(unsigned int vaddr_n_asid)
107{
108	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
109	write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
110}
111
112static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
113{
114	write_aux_reg(ARC_REG_TLBPD0, pd0);
115
116	if (!is_pae40_enabled()) {
117		write_aux_reg(ARC_REG_TLBPD1, pd1);
118	} else {
119		write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
120		write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
121	}
122
123	write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
124}
125
126#endif
127
128/*
129 * Un-conditionally (without lookup) erase the entire MMU contents
130 */
131
132noinline void local_flush_tlb_all(void)
133{
134	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
135	unsigned long flags;
136	unsigned int entry;
137	int num_tlb = mmu->sets * mmu->ways;
138
139	local_irq_save(flags);
140
141	/* Load PD0 and PD1 with template for a Blank Entry */
142	write_aux_reg(ARC_REG_TLBPD1, 0);
143
144	if (is_pae40_enabled())
145		write_aux_reg(ARC_REG_TLBPD1HI, 0);
146
147	write_aux_reg(ARC_REG_TLBPD0, 0);
148
149	for (entry = 0; entry < num_tlb; entry++) {
150		/* write this entry to the TLB */
151		write_aux_reg(ARC_REG_TLBINDEX, entry);
152		write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
153	}
154
155	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
156		const int stlb_idx = 0x800;
157
158		/* Blank sTLB entry */
159		write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
160
161		for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
162			write_aux_reg(ARC_REG_TLBINDEX, entry);
163			write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
164		}
165	}
166
167	utlb_invalidate();
168
169	local_irq_restore(flags);
170}
171
172/*
173 * Flush the entire MM for userland. The fastest way is to move to Next ASID
174 */
175noinline void local_flush_tlb_mm(struct mm_struct *mm)
176{
177	/*
178	 * Small optimisation courtesy IA64
179	 * flush_mm called during fork,exit,munmap etc, multiple times as well.
180	 * Only for fork( ) do we need to move parent to a new MMU ctxt,
181	 * all other cases are NOPs, hence this check.
182	 */
183	if (atomic_read(&mm->mm_users) == 0)
184		return;
185
186	/*
187	 * - Move to a new ASID, but only if the mm is still wired in
188	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
189	 *    causing h/w - s/w ASID to get out of sync)
190	 * - Also get_new_mmu_context() new implementation allocates a new
191	 *   ASID only if it is not allocated already - so unallocate first
192	 */
193	destroy_context(mm);
194	if (current->mm == mm)
195		get_new_mmu_context(mm);
196}
197
198/*
199 * Flush a Range of TLB entries for userland.
200 * @start is inclusive, while @end is exclusive
201 * Difference between this and Kernel Range Flush is
202 *  -Here the fastest way (if range is too large) is to move to next ASID
203 *      without doing any explicit Shootdown
204 *  -In case of kernel Flush, entry has to be shot down explicitly
205 */
206void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
207			   unsigned long end)
208{
209	const unsigned int cpu = smp_processor_id();
210	unsigned long flags;
211
212	/* If range @start to @end is more than 32 TLB entries deep,
213	 * its better to move to a new ASID rather than searching for
214	 * individual entries and then shooting them down
215	 *
216	 * The calc above is rough, doesn't account for unaligned parts,
217	 * since this is heuristics based anyways
218	 */
219	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
220		local_flush_tlb_mm(vma->vm_mm);
221		return;
222	}
223
224	/*
225	 * @start moved to page start: this alone suffices for checking
226	 * loop end condition below, w/o need for aligning @end to end
227	 * e.g. 2000 to 4001 will anyhow loop twice
228	 */
229	start &= PAGE_MASK;
230
231	local_irq_save(flags);
232
233	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
234		while (start < end) {
235			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
236			start += PAGE_SIZE;
237		}
238	}
239
 
 
240	local_irq_restore(flags);
241}
242
243/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
244 *  @start, @end interpreted as kvaddr
245 * Interestingly, shared TLB entries can also be flushed using just
246 * @start,@end alone (interpreted as user vaddr), although technically SASID
247 * is also needed. However our smart TLbProbe lookup takes care of that.
248 */
249void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
250{
251	unsigned long flags;
252
253	/* exactly same as above, except for TLB entry not taking ASID */
254
255	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
256		local_flush_tlb_all();
257		return;
258	}
259
260	start &= PAGE_MASK;
261
262	local_irq_save(flags);
263	while (start < end) {
264		tlb_entry_erase(start);
265		start += PAGE_SIZE;
266	}
267
 
 
268	local_irq_restore(flags);
269}
270
271/*
272 * Delete TLB entry in MMU for a given page (??? address)
273 * NOTE One TLB entry contains translation for single PAGE
274 */
275
276void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
277{
278	const unsigned int cpu = smp_processor_id();
279	unsigned long flags;
280
281	/* Note that it is critical that interrupts are DISABLED between
282	 * checking the ASID and using it flush the TLB entry
283	 */
284	local_irq_save(flags);
285
286	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
287		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
 
288	}
289
290	local_irq_restore(flags);
291}
292
293#ifdef CONFIG_SMP
294
295struct tlb_args {
296	struct vm_area_struct *ta_vma;
297	unsigned long ta_start;
298	unsigned long ta_end;
299};
300
301static inline void ipi_flush_tlb_page(void *arg)
302{
303	struct tlb_args *ta = arg;
304
305	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
306}
307
308static inline void ipi_flush_tlb_range(void *arg)
309{
310	struct tlb_args *ta = arg;
311
312	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
313}
314
315#ifdef CONFIG_TRANSPARENT_HUGEPAGE
316static inline void ipi_flush_pmd_tlb_range(void *arg)
317{
318	struct tlb_args *ta = arg;
319
320	local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
321}
322#endif
323
324static inline void ipi_flush_tlb_kernel_range(void *arg)
325{
326	struct tlb_args *ta = (struct tlb_args *)arg;
327
328	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
329}
330
331void flush_tlb_all(void)
332{
333	on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
334}
335
336void flush_tlb_mm(struct mm_struct *mm)
337{
338	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
339			 mm, 1);
340}
341
342void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
343{
344	struct tlb_args ta = {
345		.ta_vma = vma,
346		.ta_start = uaddr
347	};
348
349	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
350}
351
352void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
353		     unsigned long end)
354{
355	struct tlb_args ta = {
356		.ta_vma = vma,
357		.ta_start = start,
358		.ta_end = end
359	};
360
361	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
362}
363
364#ifdef CONFIG_TRANSPARENT_HUGEPAGE
365void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
366			 unsigned long end)
367{
368	struct tlb_args ta = {
369		.ta_vma = vma,
370		.ta_start = start,
371		.ta_end = end
372	};
373
374	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
375}
376#endif
377
378void flush_tlb_kernel_range(unsigned long start, unsigned long end)
379{
380	struct tlb_args ta = {
381		.ta_start = start,
382		.ta_end = end
383	};
384
385	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
386}
387#endif
388
389/*
390 * Routine to create a TLB entry
391 */
392void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
393{
394	unsigned long flags;
395	unsigned int asid_or_sasid, rwx;
396	unsigned long pd0;
397	phys_addr_t pd1;
398
399	/*
400	 * create_tlb() assumes that current->mm == vma->mm, since
401	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
402	 * -completes the lazy write to SASID reg (again valid for curr tsk)
403	 *
404	 * Removing the assumption involves
405	 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
 
406	 * -More importantly it makes this handler inconsistent with fast-path
407	 *  TLB Refill handler which always deals with "current"
408	 *
409	 * Lets see the use cases when current->mm != vma->mm and we land here
410	 *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
411	 *     Here VM wants to pre-install a TLB entry for user stack while
412	 *     current->mm still points to pre-execve mm (hence the condition).
413	 *     However the stack vaddr is soon relocated (randomization) and
414	 *     move_page_tables() tries to undo that TLB entry.
415	 *     Thus not creating TLB entry is not any worse.
416	 *
417	 *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
418	 *     breakpoint in debugged task. Not creating a TLB now is not
419	 *     performance critical.
420	 *
421	 * Both the cases above are not good enough for code churn.
422	 */
423	if (current->active_mm != vma->vm_mm)
424		return;
425
426	local_irq_save(flags);
427
428	vaddr &= PAGE_MASK;
 
 
429
430	/* update this PTE credentials */
431	pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
432
433	/* Create HW TLB(PD0,PD1) from PTE  */
434
435	/* ASID for this task */
436	asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
437
438	pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
439
440	/*
441	 * ARC MMU provides fully orthogonal access bits for K/U mode,
442	 * however Linux only saves 1 set to save PTE real-estate
443	 * Here we convert 3 PTE bits into 6 MMU bits:
444	 * -Kernel only entries have Kr Kw Kx 0 0 0
445	 * -User entries have mirrored K and U bits
446	 */
447	rwx = pte_val(*ptep) & PTE_BITS_RWX;
448
449	if (pte_val(*ptep) & _PAGE_GLOBAL)
450		rwx <<= 3;		/* r w x => Kr Kw Kx 0 0 0 */
451	else
452		rwx |= (rwx << 3);	/* r w x => Kr Kw Kx Ur Uw Ux */
453
454	pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
455
456	tlb_entry_insert(pd0, pd1);
457
458	local_irq_restore(flags);
459}
460
461/*
462 * Called at the end of pagefault, for a userspace mapped page
463 *  -pre-install the corresponding TLB entry into MMU
464 *  -Finalize the delayed D-cache flush of kernel mapping of page due to
465 *  	flush_dcache_page(), copy_user_page()
466 *
467 * Note that flush (when done) involves both WBACK - so physical page is
468 * in sync as well as INV - so any non-congruent aliases don't remain
469 */
470void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
471		      pte_t *ptep)
472{
473	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
474	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
475	struct page *page = pfn_to_page(pte_pfn(*ptep));
476
477	create_tlb(vma, vaddr, ptep);
478
479	if (page == ZERO_PAGE(0)) {
480		return;
481	}
482
483	/*
484	 * Exec page : Independent of aliasing/page-color considerations,
485	 *	       since icache doesn't snoop dcache on ARC, any dirty
486	 *	       K-mapping of a code page needs to be wback+inv so that
487	 *	       icache fetch by userspace sees code correctly.
488	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
489	 *	       so userspace sees the right data.
490	 *  (Avoids the flush for Non-exec + congruent mapping case)
491	 */
492	if ((vma->vm_flags & VM_EXEC) ||
493	     addr_not_cache_congruent(paddr, vaddr)) {
494
495		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
496		if (dirty) {
497			/* wback + inv dcache lines (K-mapping) */
498			__flush_dcache_page(paddr, paddr);
499
500			/* invalidate any existing icache lines (U-mapping) */
501			if (vma->vm_flags & VM_EXEC)
502				__inv_icache_page(paddr, vaddr);
503		}
504	}
505}
506
507#ifdef CONFIG_TRANSPARENT_HUGEPAGE
508
509/*
510 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
511 * support.
512 *
513 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
514 * new bit "SZ" in TLB page descriptor to distinguish between them.
515 * Super Page size is configurable in hardware (4K to 16M), but fixed once
516 * RTL builds.
517 *
518 * The exact THP size a Linux configuration will support is a function of:
519 *  - MMU page size (typical 8K, RTL fixed)
520 *  - software page walker address split between PGD:PTE:PFN (typical
521 *    11:8:13, but can be changed with 1 line)
522 * So for above default, THP size supported is 8K * (2^8) = 2M
523 *
524 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
525 * reduces to 1 level (as PTE is folded into PGD and canonically referred
526 * to as PMD).
527 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
528 */
529
530void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
531				 pmd_t *pmd)
532{
533	pte_t pte = __pte(pmd_val(*pmd));
534	update_mmu_cache(vma, addr, &pte);
535}
536
537void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
538			       unsigned long end)
539{
540	unsigned int cpu;
541	unsigned long flags;
542
543	local_irq_save(flags);
544
545	cpu = smp_processor_id();
546
547	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
548		unsigned int asid = hw_pid(vma->vm_mm, cpu);
549
550		/* No need to loop here: this will always be for 1 Huge Page */
551		tlb_entry_erase(start | _PAGE_HW_SZ | asid);
552	}
553
554	local_irq_restore(flags);
555}
556
557#endif
558
559/* Read the Cache Build Configuration Registers, Decode them and save into
560 * the cpuinfo structure for later use.
561 * No Validation is done here, simply read/convert the BCRs
562 */
563void read_decode_mmu_bcr(void)
564{
565	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
566	unsigned int tmp;
567	struct bcr_mmu_3 {
568#ifdef CONFIG_CPU_BIG_ENDIAN
569	unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
570		     u_itlb:4, u_dtlb:4;
571#else
572	unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
573		     ways:4, ver:8;
574#endif
575	} *mmu3;
576
577	struct bcr_mmu_4 {
578#ifdef CONFIG_CPU_BIG_ENDIAN
579	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
580		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
581#else
582	/*           DTLB      ITLB      JES        JE         JA      */
583	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
584		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
585#endif
586	} *mmu4;
587
588	tmp = read_aux_reg(ARC_REG_MMU_BCR);
589	mmu->ver = (tmp >> 24);
590
591	if (is_isa_arcompact() && mmu->ver == 3) {
 
 
 
 
 
 
 
592		mmu3 = (struct bcr_mmu_3 *)&tmp;
593		mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
594		mmu->sets = 1 << mmu3->sets;
595		mmu->ways = 1 << mmu3->ways;
596		mmu->u_dtlb = mmu3->u_dtlb;
597		mmu->u_itlb = mmu3->u_itlb;
598		mmu->sasid = mmu3->sasid;
599	} else {
600		mmu4 = (struct bcr_mmu_4 *)&tmp;
601		mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
602		mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
603		mmu->sets = 64 << mmu4->n_entry;
604		mmu->ways = mmu4->n_ways * 2;
605		mmu->u_dtlb = mmu4->u_dtlb * 4;
606		mmu->u_itlb = mmu4->u_itlb * 4;
607		mmu->sasid = mmu4->sasid;
608		pae_exists = mmu->pae = mmu4->pae;
609	}
 
 
610}
611
612char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
613{
614	int n = 0;
615	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
616	char super_pg[64] = "";
617
618	if (p_mmu->s_pg_sz_m)
619		scnprintf(super_pg, 64, "%dM Super Page %s",
620			  p_mmu->s_pg_sz_m,
621			  IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
622
623	n += scnprintf(buf + n, len - n,
624		      "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
625		       p_mmu->ver, p_mmu->pg_sz_k, super_pg,  CONFIG_PGTABLE_LEVELS,
626		       p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
627		       p_mmu->u_dtlb, p_mmu->u_itlb,
628		       IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
629
630	return buf;
631}
632
633int pae40_exist_but_not_enab(void)
634{
635	return pae_exists && !is_pae40_enabled();
636}
637
638void arc_mmu_init(void)
639{
640	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
641	char str[256];
642	int compat = 0;
643
644	pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
645
646	/*
647	 * Can't be done in processor.h due to header include dependencies
648	 */
649	BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
650
651	/*
652	 * stack top size sanity check,
653	 * Can't be done in processor.h due to header include dependencies
654	 */
655	BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
656
657	/*
658	 * Ensure that MMU features assumed by kernel exist in hardware.
659	 *  - For older ARC700 cpus, only v3 supported
660	 *  - For HS cpus, v4 was baseline and v5 is backwards compatible
661	 *    (will run older software).
662	 */
663	if (is_isa_arcompact() && mmu->ver == 3)
664		compat = 1;
665	else if (is_isa_arcv2() && mmu->ver >= 4)
666		compat = 1;
667
668	if (!compat)
669		panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
 
 
 
 
 
 
 
 
 
 
670
671	if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
672		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
673
674	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
675	    mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
676		panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
677		      (unsigned long)TO_MB(HPAGE_PMD_SIZE));
678
679	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
680		panic("Hardware doesn't support PAE40\n");
681
682	/* Enable the MMU with ASID 0 */
683	mmu_setup_asid(NULL, 0);
684
685	/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
686	mmu_setup_pgd(NULL, swapper_pg_dir);
687
688	if (pae40_exist_but_not_enab())
689		write_aux_reg(ARC_REG_TLBPD1HI, 0);
690}
691
692/*
693 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
694 * The mapping is Column-first.
695 *		---------------------	-----------
696 *		|way0|way1|way2|way3|	|way0|way1|
697 *		---------------------	-----------
698 * [set0]	|  0 |  1 |  2 |  3 |	|  0 |  1 |
699 * [set1]	|  4 |  5 |  6 |  7 |	|  2 |  3 |
700 *		~		    ~	~	  ~
701 * [set127]	| 508| 509| 510| 511|	| 254| 255|
702 *		---------------------	-----------
703 * For normal operations we don't(must not) care how above works since
704 * MMU cmd getIndex(vaddr) abstracts that out.
705 * However for walking WAYS of a SET, we need to know this
706 */
707#define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
708
709/* Handling of Duplicate PD (TLB entry) in MMU.
710 * -Could be due to buggy customer tapeouts or obscure kernel bugs
711 * -MMU complaints not at the time of duplicate PD installation, but at the
712 *      time of lookup matching multiple ways.
713 * -Ideally these should never happen - but if they do - workaround by deleting
714 *      the duplicate one.
715 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
716 */
717volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
718
719void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
720			  struct pt_regs *regs)
721{
 
 
722	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
723	unsigned long flags;
724	int set, n_ways = mmu->ways;
725
726	n_ways = min(n_ways, 4);
727	BUG_ON(mmu->ways > 4);
728
729	local_irq_save(flags);
730
 
 
 
731	/* loop thru all sets of TLB */
732	for (set = 0; set < mmu->sets; set++) {
733
734		int is_valid, way;
735		unsigned int pd0[4];
736
737		/* read out all the ways of current set */
738		for (way = 0, is_valid = 0; way < n_ways; way++) {
739			write_aux_reg(ARC_REG_TLBINDEX,
740					  SET_WAY_TO_IDX(mmu, set, way));
741			write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
742			pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
 
743			is_valid |= pd0[way] & _PAGE_PRESENT;
744			pd0[way] &= PAGE_MASK;
745		}
746
747		/* If all the WAYS in SET are empty, skip to next SET */
748		if (!is_valid)
749			continue;
750
751		/* Scan the set for duplicate ways: needs a nested loop */
752		for (way = 0; way < n_ways - 1; way++) {
753
754			int n;
755
756			if (!pd0[way])
757				continue;
758
759			for (n = way + 1; n < n_ways; n++) {
760				if (pd0[way] != pd0[n])
761					continue;
762
763				if (!dup_pd_silent)
764					pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
765						pd0[way], set, way, n);
766
767				/*
768				 * clear entry @way and not @n.
769				 * This is critical to our optimised loop
770				 */
771				pd0[way] = 0;
772				write_aux_reg(ARC_REG_TLBINDEX,
 
 
 
 
773						SET_WAY_TO_IDX(mmu, set, way));
774				__tlb_entry_erase();
 
775			}
776		}
777	}
778
779	local_irq_restore(flags);
780}
v3.15
 
  1/*
  2 * TLB Management (flush/create/diagnostics) for ARC700
  3 *
  4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * vineetg: Aug 2011
 11 *  -Reintroduce duplicate PD fixup - some customer chips still have the issue
 12 *
 13 * vineetg: May 2011
 14 *  -No need to flush_cache_page( ) for each call to update_mmu_cache()
 15 *   some of the LMBench tests improved amazingly
 16 *      = page-fault thrice as fast (75 usec to 28 usec)
 17 *      = mmap twice as fast (9.6 msec to 4.6 msec),
 18 *      = fork (5.3 msec to 3.7 msec)
 19 *
 20 * vineetg: April 2011 :
 21 *  -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
 22 *      helps avoid a shift when preparing PD0 from PTE
 23 *
 24 * vineetg: April 2011 : Preparing for MMU V3
 25 *  -MMU v2/v3 BCRs decoded differently
 26 *  -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
 27 *  -tlb_entry_erase( ) can be void
 28 *  -local_flush_tlb_range( ):
 29 *      = need not "ceil" @end
 30 *      = walks MMU only if range spans < 32 entries, as opposed to 256
 31 *
 32 * Vineetg: Sept 10th 2008
 33 *  -Changes related to MMU v2 (Rel 4.8)
 34 *
 35 * Vineetg: Aug 29th 2008
 36 *  -In TLB Flush operations (Metal Fix MMU) there is a explict command to
 37 *    flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
 38 *    it fails. Thus need to load it with ANY valid value before invoking
 39 *    TLBIVUTLB cmd
 40 *
 41 * Vineetg: Aug 21th 2008:
 42 *  -Reduced the duration of IRQ lockouts in TLB Flush routines
 43 *  -Multiple copies of TLB erase code seperated into a "single" function
 44 *  -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
 45 *       in interrupt-safe region.
 46 *
 47 * Vineetg: April 23rd Bug #93131
 48 *    Problem: tlb_flush_kernel_range() doesnt do anything if the range to
 49 *              flush is more than the size of TLB itself.
 50 *
 51 * Rahul Trivedi : Codito Technologies 2004
 52 */
 53
 54#include <linux/module.h>
 55#include <linux/bug.h>
 
 
 56#include <asm/arcregs.h>
 57#include <asm/setup.h>
 58#include <asm/mmu_context.h>
 59#include <asm/mmu.h>
 60
 61/*			Need for ARC MMU v2
 62 *
 63 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
 64 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
 65 * map into same set, there would be contention for the 2 ways causing severe
 66 * Thrashing.
 67 *
 68 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
 69 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
 70 * Given this, the thrasing problem should never happen because once the 3
 71 * J-TLB entries are created (even though 3rd will knock out one of the prev
 72 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
 73 *
 74 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
 75 * This is a simple design for keeping them in sync. So what do we do?
 76 * The solution which James came up was pretty neat. It utilised the assoc
 77 * of uTLBs by not invalidating always but only when absolutely necessary.
 78 *
 79 * - Existing TLB commands work as before
 80 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
 81 * - New command (TLBIVUTLB) to invalidate uTLBs.
 82 *
 83 * The uTLBs need only be invalidated when pages are being removed from the
 84 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
 85 * as a result of a miss, the removed entry is still allowed to exist in the
 86 * uTLBs as it is still valid and present in the OS page table. This allows the
 87 * full associativity of the uTLBs to hide the limited associativity of the main
 88 * TLB.
 89 *
 90 * During a miss handler, the new "TLBWriteNI" command is used to load
 91 * entries without clearing the uTLBs.
 92 *
 93 * When the OS page table is updated, TLB entries that may be associated with a
 94 * removed page are removed (flushed) from the TLB using TLBWrite. In this
 95 * circumstance, the uTLBs must also be cleared. This is done by using the
 96 * existing TLBWrite command. An explicit IVUTLB is also required for those
 97 * corner cases when TLBWrite was not executed at all because the corresp
 98 * J-TLB entry got evicted/replaced.
 99 */
100
101
102/* A copy of the ASID from the PID reg is kept in asid_cache */
103DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
104
 
 
105/*
106 * Utility Routine to erase a J-TLB entry
107 * Caller needs to setup Index Reg (manually or via getIndex)
108 */
109static inline void __tlb_entry_erase(void)
110{
111	write_aux_reg(ARC_REG_TLBPD1, 0);
 
 
 
 
112	write_aux_reg(ARC_REG_TLBPD0, 0);
113	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
114}
115
 
 
 
 
 
 
 
116static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
117{
118	unsigned int idx;
119
120	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
121
122	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
123	idx = read_aux_reg(ARC_REG_TLBINDEX);
124
125	return idx;
126}
127
128static void tlb_entry_erase(unsigned int vaddr_n_asid)
129{
130	unsigned int idx;
131
132	/* Locate the TLB entry for this vaddr + ASID */
133	idx = tlb_entry_lkup(vaddr_n_asid);
134
135	/* No error means entry found, zero it out */
136	if (likely(!(idx & TLB_LKUP_ERR))) {
137		__tlb_entry_erase();
138	} else {
139		/* Duplicate entry error */
140		WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
141					   vaddr_n_asid);
142	}
143}
144
145/****************************************************************************
146 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
147 *
148 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
149 *
150 * utlb_invalidate ( )
151 *  -For v2 MMU calls Flush uTLB Cmd
152 *  -For v1 MMU does nothing (except for Metal Fix v1 MMU)
153 *      This is because in v1 TLBWrite itself invalidate uTLBs
154 ***************************************************************************/
155
156static void utlb_invalidate(void)
157{
158#if (CONFIG_ARC_MMU_VER >= 2)
159
160#if (CONFIG_ARC_MMU_VER == 2)
161	/* MMU v2 introduced the uTLB Flush command.
162	 * There was however an obscure hardware bug, where uTLB flush would
163	 * fail when a prior probe for J-TLB (both totally unrelated) would
164	 * return lkup err - because the entry didnt exist in MMU.
165	 * The Workround was to set Index reg with some valid value, prior to
166	 * flush. This was fixed in MMU v3 hence not needed any more
167	 */
168	unsigned int idx;
169
170	/* make sure INDEX Reg is valid */
171	idx = read_aux_reg(ARC_REG_TLBINDEX);
172
173	/* If not write some dummy val */
174	if (unlikely(idx & TLB_LKUP_ERR))
175		write_aux_reg(ARC_REG_TLBINDEX, 0xa);
176#endif
177
178	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
179#endif
180
181}
182
183static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
184{
185	unsigned int idx;
186
187	/*
188	 * First verify if entry for this vaddr+ASID already exists
189	 * This also sets up PD0 (vaddr, ASID..) for final commit
190	 */
191	idx = tlb_entry_lkup(pd0);
192
193	/*
194	 * If Not already present get a free slot from MMU.
195	 * Otherwise, Probe would have located the entry and set INDEX Reg
196	 * with existing location. This will cause Write CMD to over-write
197	 * existing entry with new PD0 and PD1
198	 */
199	if (likely(idx & TLB_LKUP_ERR))
200		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
201
202	/* setup the other half of TLB entry (pfn, rwx..) */
203	write_aux_reg(ARC_REG_TLBPD1, pd1);
204
205	/*
206	 * Commit the Entry to MMU
207	 * It doesnt sound safe to use the TLBWriteNI cmd here
208	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
209	 */
210	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
211}
212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213/*
214 * Un-conditionally (without lookup) erase the entire MMU contents
215 */
216
217noinline void local_flush_tlb_all(void)
218{
 
219	unsigned long flags;
220	unsigned int entry;
221	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
222
223	local_irq_save(flags);
224
225	/* Load PD0 and PD1 with template for a Blank Entry */
226	write_aux_reg(ARC_REG_TLBPD1, 0);
 
 
 
 
227	write_aux_reg(ARC_REG_TLBPD0, 0);
228
229	for (entry = 0; entry < mmu->num_tlb; entry++) {
230		/* write this entry to the TLB */
231		write_aux_reg(ARC_REG_TLBINDEX, entry);
232		write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 
 
 
 
 
 
 
 
 
 
 
 
233	}
234
235	utlb_invalidate();
236
237	local_irq_restore(flags);
238}
239
240/*
241 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
242 */
243noinline void local_flush_tlb_mm(struct mm_struct *mm)
244{
245	/*
246	 * Small optimisation courtesy IA64
247	 * flush_mm called during fork,exit,munmap etc, multiple times as well.
248	 * Only for fork( ) do we need to move parent to a new MMU ctxt,
249	 * all other cases are NOPs, hence this check.
250	 */
251	if (atomic_read(&mm->mm_users) == 0)
252		return;
253
254	/*
255	 * - Move to a new ASID, but only if the mm is still wired in
256	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
257	 *    causing h/w - s/w ASID to get out of sync)
258	 * - Also get_new_mmu_context() new implementation allocates a new
259	 *   ASID only if it is not allocated already - so unallocate first
260	 */
261	destroy_context(mm);
262	if (current->mm == mm)
263		get_new_mmu_context(mm);
264}
265
266/*
267 * Flush a Range of TLB entries for userland.
268 * @start is inclusive, while @end is exclusive
269 * Difference between this and Kernel Range Flush is
270 *  -Here the fastest way (if range is too large) is to move to next ASID
271 *      without doing any explicit Shootdown
272 *  -In case of kernel Flush, entry has to be shot down explictly
273 */
274void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
275			   unsigned long end)
276{
277	const unsigned int cpu = smp_processor_id();
278	unsigned long flags;
279
280	/* If range @start to @end is more than 32 TLB entries deep,
281	 * its better to move to a new ASID rather than searching for
282	 * individual entries and then shooting them down
283	 *
284	 * The calc above is rough, doesn't account for unaligned parts,
285	 * since this is heuristics based anyways
286	 */
287	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
288		local_flush_tlb_mm(vma->vm_mm);
289		return;
290	}
291
292	/*
293	 * @start moved to page start: this alone suffices for checking
294	 * loop end condition below, w/o need for aligning @end to end
295	 * e.g. 2000 to 4001 will anyhow loop twice
296	 */
297	start &= PAGE_MASK;
298
299	local_irq_save(flags);
300
301	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
302		while (start < end) {
303			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
304			start += PAGE_SIZE;
305		}
306	}
307
308	utlb_invalidate();
309
310	local_irq_restore(flags);
311}
312
313/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
314 *  @start, @end interpreted as kvaddr
315 * Interestingly, shared TLB entries can also be flushed using just
316 * @start,@end alone (interpreted as user vaddr), although technically SASID
317 * is also needed. However our smart TLbProbe lookup takes care of that.
318 */
319void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
320{
321	unsigned long flags;
322
323	/* exactly same as above, except for TLB entry not taking ASID */
324
325	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
326		local_flush_tlb_all();
327		return;
328	}
329
330	start &= PAGE_MASK;
331
332	local_irq_save(flags);
333	while (start < end) {
334		tlb_entry_erase(start);
335		start += PAGE_SIZE;
336	}
337
338	utlb_invalidate();
339
340	local_irq_restore(flags);
341}
342
343/*
344 * Delete TLB entry in MMU for a given page (??? address)
345 * NOTE One TLB entry contains translation for single PAGE
346 */
347
348void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
349{
350	const unsigned int cpu = smp_processor_id();
351	unsigned long flags;
352
353	/* Note that it is critical that interrupts are DISABLED between
354	 * checking the ASID and using it flush the TLB entry
355	 */
356	local_irq_save(flags);
357
358	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
359		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
360		utlb_invalidate();
361	}
362
363	local_irq_restore(flags);
364}
365
366#ifdef CONFIG_SMP
367
368struct tlb_args {
369	struct vm_area_struct *ta_vma;
370	unsigned long ta_start;
371	unsigned long ta_end;
372};
373
374static inline void ipi_flush_tlb_page(void *arg)
375{
376	struct tlb_args *ta = arg;
377
378	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
379}
380
381static inline void ipi_flush_tlb_range(void *arg)
382{
383	struct tlb_args *ta = arg;
384
385	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
386}
387
 
 
 
 
 
 
 
 
 
388static inline void ipi_flush_tlb_kernel_range(void *arg)
389{
390	struct tlb_args *ta = (struct tlb_args *)arg;
391
392	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
393}
394
395void flush_tlb_all(void)
396{
397	on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
398}
399
400void flush_tlb_mm(struct mm_struct *mm)
401{
402	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
403			 mm, 1);
404}
405
406void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
407{
408	struct tlb_args ta = {
409		.ta_vma = vma,
410		.ta_start = uaddr
411	};
412
413	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
414}
415
416void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
417		     unsigned long end)
418{
419	struct tlb_args ta = {
420		.ta_vma = vma,
421		.ta_start = start,
422		.ta_end = end
423	};
424
425	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
426}
427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428void flush_tlb_kernel_range(unsigned long start, unsigned long end)
429{
430	struct tlb_args ta = {
431		.ta_start = start,
432		.ta_end = end
433	};
434
435	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
436}
437#endif
438
439/*
440 * Routine to create a TLB entry
441 */
442void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
443{
444	unsigned long flags;
445	unsigned int asid_or_sasid, rwx;
446	unsigned long pd0, pd1;
 
447
448	/*
449	 * create_tlb() assumes that current->mm == vma->mm, since
450	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
451	 * -completes the lazy write to SASID reg (again valid for curr tsk)
452	 *
453	 * Removing the assumption involves
454	 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
455	 * -Fix the TLB paranoid debug code to not trigger false negatives.
456	 * -More importantly it makes this handler inconsistent with fast-path
457	 *  TLB Refill handler which always deals with "current"
458	 *
459	 * Lets see the use cases when current->mm != vma->mm and we land here
460	 *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
461	 *     Here VM wants to pre-install a TLB entry for user stack while
462	 *     current->mm still points to pre-execve mm (hence the condition).
463	 *     However the stack vaddr is soon relocated (randomization) and
464	 *     move_page_tables() tries to undo that TLB entry.
465	 *     Thus not creating TLB entry is not any worse.
466	 *
467	 *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
468	 *     breakpoint in debugged task. Not creating a TLB now is not
469	 *     performance critical.
470	 *
471	 * Both the cases above are not good enough for code churn.
472	 */
473	if (current->active_mm != vma->vm_mm)
474		return;
475
476	local_irq_save(flags);
477
478	tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
479
480	address &= PAGE_MASK;
481
482	/* update this PTE credentials */
483	pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
484
485	/* Create HW TLB(PD0,PD1) from PTE  */
486
487	/* ASID for this task */
488	asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
489
490	pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
491
492	/*
493	 * ARC MMU provides fully orthogonal access bits for K/U mode,
494	 * however Linux only saves 1 set to save PTE real-estate
495	 * Here we convert 3 PTE bits into 6 MMU bits:
496	 * -Kernel only entries have Kr Kw Kx 0 0 0
497	 * -User entries have mirrored K and U bits
498	 */
499	rwx = pte_val(*ptep) & PTE_BITS_RWX;
500
501	if (pte_val(*ptep) & _PAGE_GLOBAL)
502		rwx <<= 3;		/* r w x => Kr Kw Kx 0 0 0 */
503	else
504		rwx |= (rwx << 3);	/* r w x => Kr Kw Kx Ur Uw Ux */
505
506	pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
507
508	tlb_entry_insert(pd0, pd1);
509
510	local_irq_restore(flags);
511}
512
513/*
514 * Called at the end of pagefault, for a userspace mapped page
515 *  -pre-install the corresponding TLB entry into MMU
516 *  -Finalize the delayed D-cache flush of kernel mapping of page due to
517 *  	flush_dcache_page(), copy_user_page()
518 *
519 * Note that flush (when done) involves both WBACK - so physical page is
520 * in sync as well as INV - so any non-congruent aliases don't remain
521 */
522void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
523		      pte_t *ptep)
524{
525	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
526	unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
527	struct page *page = pfn_to_page(pte_pfn(*ptep));
528
529	create_tlb(vma, vaddr, ptep);
530
531	if (page == ZERO_PAGE(0)) {
532		return;
533	}
534
535	/*
536	 * Exec page : Independent of aliasing/page-color considerations,
537	 *	       since icache doesn't snoop dcache on ARC, any dirty
538	 *	       K-mapping of a code page needs to be wback+inv so that
539	 *	       icache fetch by userspace sees code correctly.
540	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
541	 *	       so userspace sees the right data.
542	 *  (Avoids the flush for Non-exec + congruent mapping case)
543	 */
544	if ((vma->vm_flags & VM_EXEC) ||
545	     addr_not_cache_congruent(paddr, vaddr)) {
546
547		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
548		if (dirty) {
549			/* wback + inv dcache lines */
550			__flush_dcache_page(paddr, paddr);
551
552			/* invalidate any existing icache lines */
553			if (vma->vm_flags & VM_EXEC)
554				__inv_icache_page(paddr, vaddr);
555		}
556	}
557}
558
559/* Read the Cache Build Confuration Registers, Decode them and save into
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
560 * the cpuinfo structure for later use.
561 * No Validation is done here, simply read/convert the BCRs
562 */
563void read_decode_mmu_bcr(void)
564{
565	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
566	unsigned int tmp;
567	struct bcr_mmu_1_2 {
568#ifdef CONFIG_CPU_BIG_ENDIAN
569		unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
 
570#else
571		unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
 
572#endif
573	} *mmu2;
574
575	struct bcr_mmu_3 {
576#ifdef CONFIG_CPU_BIG_ENDIAN
577	unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
578		     u_itlb:4, u_dtlb:4;
579#else
580	unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
581		     ways:4, ver:8;
 
582#endif
583	} *mmu3;
584
585	tmp = read_aux_reg(ARC_REG_MMU_BCR);
586	mmu->ver = (tmp >> 24);
587
588	if (mmu->ver <= 2) {
589		mmu2 = (struct bcr_mmu_1_2 *)&tmp;
590		mmu->pg_sz = PAGE_SIZE;
591		mmu->sets = 1 << mmu2->sets;
592		mmu->ways = 1 << mmu2->ways;
593		mmu->u_dtlb = mmu2->u_dtlb;
594		mmu->u_itlb = mmu2->u_itlb;
595	} else {
596		mmu3 = (struct bcr_mmu_3 *)&tmp;
597		mmu->pg_sz = 512 << mmu3->pg_sz;
598		mmu->sets = 1 << mmu3->sets;
599		mmu->ways = 1 << mmu3->ways;
600		mmu->u_dtlb = mmu3->u_dtlb;
601		mmu->u_itlb = mmu3->u_itlb;
 
 
 
 
 
 
 
 
 
 
 
602	}
603
604	mmu->num_tlb = mmu->sets * mmu->ways;
605}
606
607char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
608{
609	int n = 0;
610	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
 
611
612	n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
613		       p_mmu->ver, TO_KB(p_mmu->pg_sz));
 
 
614
615	n += scnprintf(buf + n, len - n,
616		       "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
617		       p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
 
618		       p_mmu->u_dtlb, p_mmu->u_itlb,
619		       IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : "");
620
621	return buf;
622}
623
 
 
 
 
 
624void arc_mmu_init(void)
625{
 
626	char str[256];
627	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
628
629	printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
 
 
 
 
 
 
 
 
 
630
631	/* For efficiency sake, kernel is compile time built for a MMU ver
632	 * This must match the hardware it is running on.
633	 * Linux built for MMU V2, if run on MMU V1 will break down because V1
634	 *  hardware doesn't understand cmds such as WriteNI, or IVUTLB
635	 * On the other hand, Linux built for V1 if run on MMU V2 will do
636	 *   un-needed workarounds to prevent memcpy thrashing.
637	 * Similarly MMU V3 has new features which won't work on older MMU
638	 */
639	if (mmu->ver != CONFIG_ARC_MMU_VER) {
640		panic("MMU ver %d doesn't match kernel built for %d...\n",
641		      mmu->ver, CONFIG_ARC_MMU_VER);
642	}
643
644	if (mmu->pg_sz != PAGE_SIZE)
645		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
646
647	/* Enable the MMU */
648	write_aux_reg(ARC_REG_PID, MMU_ENABLE);
 
 
 
 
 
649
650	/* In smp we use this reg for interrupt 1 scratch */
651#ifndef CONFIG_SMP
652	/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
653	write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
654#endif
 
 
 
655}
656
657/*
658 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
659 * The mapping is Column-first.
660 *		---------------------	-----------
661 *		|way0|way1|way2|way3|	|way0|way1|
662 *		---------------------	-----------
663 * [set0]	|  0 |  1 |  2 |  3 |	|  0 |  1 |
664 * [set1]	|  4 |  5 |  6 |  7 |	|  2 |  3 |
665 *		~		    ~	~	  ~
666 * [set127]	| 508| 509| 510| 511|	| 254| 255|
667 *		---------------------	-----------
668 * For normal operations we don't(must not) care how above works since
669 * MMU cmd getIndex(vaddr) abstracts that out.
670 * However for walking WAYS of a SET, we need to know this
671 */
672#define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
673
674/* Handling of Duplicate PD (TLB entry) in MMU.
675 * -Could be due to buggy customer tapeouts or obscure kernel bugs
676 * -MMU complaints not at the time of duplicate PD installation, but at the
677 *      time of lookup matching multiple ways.
678 * -Ideally these should never happen - but if they do - workaround by deleting
679 *      the duplicate one.
680 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
681 */
682volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
683
684void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
685			  struct pt_regs *regs)
686{
687	int set, way, n;
688	unsigned long flags, is_valid;
689	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
690	unsigned int pd0[mmu->ways], pd1[mmu->ways];
 
 
 
 
691
692	local_irq_save(flags);
693
694	/* re-enable the MMU */
695	write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
696
697	/* loop thru all sets of TLB */
698	for (set = 0; set < mmu->sets; set++) {
699
 
 
 
700		/* read out all the ways of current set */
701		for (way = 0, is_valid = 0; way < mmu->ways; way++) {
702			write_aux_reg(ARC_REG_TLBINDEX,
703					  SET_WAY_TO_IDX(mmu, set, way));
704			write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
705			pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
706			pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
707			is_valid |= pd0[way] & _PAGE_PRESENT;
 
708		}
709
710		/* If all the WAYS in SET are empty, skip to next SET */
711		if (!is_valid)
712			continue;
713
714		/* Scan the set for duplicate ways: needs a nested loop */
715		for (way = 0; way < mmu->ways - 1; way++) {
 
 
 
716			if (!pd0[way])
717				continue;
718
719			for (n = way + 1; n < mmu->ways; n++) {
720				if ((pd0[way] & PAGE_MASK) ==
721				    (pd0[n] & PAGE_MASK)) {
722
723					if (dup_pd_verbose) {
724						pr_info("Duplicate PD's @"
725							"[%d:%d]/[%d:%d]\n",
726						     set, way, set, n);
727						pr_info("TLBPD0[%u]: %08x\n",
728						     way, pd0[way]);
729					}
730
731					/*
732					 * clear entry @way and not @n. This is
733					 * critical to our optimised loop
734					 */
735					pd0[way] = pd1[way] = 0;
736					write_aux_reg(ARC_REG_TLBINDEX,
737						SET_WAY_TO_IDX(mmu, set, way));
738					__tlb_entry_erase();
739				}
740			}
741		}
742	}
743
744	local_irq_restore(flags);
745}
746
747/***********************************************************************
748 * Diagnostic Routines
749 *  -Called from Low Level TLB Hanlders if things don;t look good
750 **********************************************************************/
751
752#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
753
754/*
755 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
756 * don't match
757 */
758void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
759{
760	pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
761	       is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
762
763	__asm__ __volatile__("flag 1");
764}
765
766void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
767{
768	unsigned int mmu_asid;
769
770	mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
771
772	/*
773	 * At the time of a TLB miss/installation
774	 *   - HW version needs to match SW version
775	 *   - SW needs to have a valid ASID
776	 */
777	if (addr < 0x70000000 &&
778	    ((mm_asid == MM_CTXT_NO_ASID) ||
779	      (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
780		print_asid_mismatch(mm_asid, mmu_asid, 0);
781}
782#endif