Loading...
1/*
2 * TLB Management (flush/create/diagnostics) for ARC700
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: Aug 2011
11 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
12 *
13 * vineetg: May 2011
14 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
15 * some of the LMBench tests improved amazingly
16 * = page-fault thrice as fast (75 usec to 28 usec)
17 * = mmap twice as fast (9.6 msec to 4.6 msec),
18 * = fork (5.3 msec to 3.7 msec)
19 *
20 * vineetg: April 2011 :
21 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * helps avoid a shift when preparing PD0 from PTE
23 *
24 * vineetg: April 2011 : Preparing for MMU V3
25 * -MMU v2/v3 BCRs decoded differently
26 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
27 * -tlb_entry_erase( ) can be void
28 * -local_flush_tlb_range( ):
29 * = need not "ceil" @end
30 * = walks MMU only if range spans < 32 entries, as opposed to 256
31 *
32 * Vineetg: Sept 10th 2008
33 * -Changes related to MMU v2 (Rel 4.8)
34 *
35 * Vineetg: Aug 29th 2008
36 * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
37 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
38 * it fails. Thus need to load it with ANY valid value before invoking
39 * TLBIVUTLB cmd
40 *
41 * Vineetg: Aug 21th 2008:
42 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43 * -Multiple copies of TLB erase code seperated into a "single" function
44 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
45 * in interrupt-safe region.
46 *
47 * Vineetg: April 23rd Bug #93131
48 * Problem: tlb_flush_kernel_range() doesn't do anything if the range to
49 * flush is more than the size of TLB itself.
50 *
51 * Rahul Trivedi : Codito Technologies 2004
52 */
53
54#include <linux/module.h>
55#include <linux/bug.h>
56#include <linux/mm_types.h>
57
58#include <asm/arcregs.h>
59#include <asm/setup.h>
60#include <asm/mmu_context.h>
61#include <asm/mmu.h>
62
63/* Need for ARC MMU v2
64 *
65 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
66 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
67 * map into same set, there would be contention for the 2 ways causing severe
68 * Thrashing.
69 *
70 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
71 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
72 * Given this, the thrasing problem should never happen because once the 3
73 * J-TLB entries are created (even though 3rd will knock out one of the prev
74 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
75 *
76 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
77 * This is a simple design for keeping them in sync. So what do we do?
78 * The solution which James came up was pretty neat. It utilised the assoc
79 * of uTLBs by not invalidating always but only when absolutely necessary.
80 *
81 * - Existing TLB commands work as before
82 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
83 * - New command (TLBIVUTLB) to invalidate uTLBs.
84 *
85 * The uTLBs need only be invalidated when pages are being removed from the
86 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
87 * as a result of a miss, the removed entry is still allowed to exist in the
88 * uTLBs as it is still valid and present in the OS page table. This allows the
89 * full associativity of the uTLBs to hide the limited associativity of the main
90 * TLB.
91 *
92 * During a miss handler, the new "TLBWriteNI" command is used to load
93 * entries without clearing the uTLBs.
94 *
95 * When the OS page table is updated, TLB entries that may be associated with a
96 * removed page are removed (flushed) from the TLB using TLBWrite. In this
97 * circumstance, the uTLBs must also be cleared. This is done by using the
98 * existing TLBWrite command. An explicit IVUTLB is also required for those
99 * corner cases when TLBWrite was not executed at all because the corresp
100 * J-TLB entry got evicted/replaced.
101 */
102
103
104/* A copy of the ASID from the PID reg is kept in asid_cache */
105DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
106
107static int __read_mostly pae_exists;
108
109/*
110 * Utility Routine to erase a J-TLB entry
111 * Caller needs to setup Index Reg (manually or via getIndex)
112 */
113static inline void __tlb_entry_erase(void)
114{
115 write_aux_reg(ARC_REG_TLBPD1, 0);
116
117 if (is_pae40_enabled())
118 write_aux_reg(ARC_REG_TLBPD1HI, 0);
119
120 write_aux_reg(ARC_REG_TLBPD0, 0);
121 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
122}
123
124#if (CONFIG_ARC_MMU_VER < 4)
125
126static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
127{
128 unsigned int idx;
129
130 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
131
132 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
133 idx = read_aux_reg(ARC_REG_TLBINDEX);
134
135 return idx;
136}
137
138static void tlb_entry_erase(unsigned int vaddr_n_asid)
139{
140 unsigned int idx;
141
142 /* Locate the TLB entry for this vaddr + ASID */
143 idx = tlb_entry_lkup(vaddr_n_asid);
144
145 /* No error means entry found, zero it out */
146 if (likely(!(idx & TLB_LKUP_ERR))) {
147 __tlb_entry_erase();
148 } else {
149 /* Duplicate entry error */
150 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
151 vaddr_n_asid);
152 }
153}
154
155/****************************************************************************
156 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
157 *
158 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
159 *
160 * utlb_invalidate ( )
161 * -For v2 MMU calls Flush uTLB Cmd
162 * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
163 * This is because in v1 TLBWrite itself invalidate uTLBs
164 ***************************************************************************/
165
166static void utlb_invalidate(void)
167{
168#if (CONFIG_ARC_MMU_VER >= 2)
169
170#if (CONFIG_ARC_MMU_VER == 2)
171 /* MMU v2 introduced the uTLB Flush command.
172 * There was however an obscure hardware bug, where uTLB flush would
173 * fail when a prior probe for J-TLB (both totally unrelated) would
174 * return lkup err - because the entry didn't exist in MMU.
175 * The Workround was to set Index reg with some valid value, prior to
176 * flush. This was fixed in MMU v3 hence not needed any more
177 */
178 unsigned int idx;
179
180 /* make sure INDEX Reg is valid */
181 idx = read_aux_reg(ARC_REG_TLBINDEX);
182
183 /* If not write some dummy val */
184 if (unlikely(idx & TLB_LKUP_ERR))
185 write_aux_reg(ARC_REG_TLBINDEX, 0xa);
186#endif
187
188 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
189#endif
190
191}
192
193static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
194{
195 unsigned int idx;
196
197 /*
198 * First verify if entry for this vaddr+ASID already exists
199 * This also sets up PD0 (vaddr, ASID..) for final commit
200 */
201 idx = tlb_entry_lkup(pd0);
202
203 /*
204 * If Not already present get a free slot from MMU.
205 * Otherwise, Probe would have located the entry and set INDEX Reg
206 * with existing location. This will cause Write CMD to over-write
207 * existing entry with new PD0 and PD1
208 */
209 if (likely(idx & TLB_LKUP_ERR))
210 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
211
212 /* setup the other half of TLB entry (pfn, rwx..) */
213 write_aux_reg(ARC_REG_TLBPD1, pd1);
214
215 /*
216 * Commit the Entry to MMU
217 * It doesn't sound safe to use the TLBWriteNI cmd here
218 * which doesn't flush uTLBs. I'd rather be safe than sorry.
219 */
220 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
221}
222
223#else /* CONFIG_ARC_MMU_VER >= 4) */
224
225static void utlb_invalidate(void)
226{
227 /* No need since uTLB is always in sync with JTLB */
228}
229
230static void tlb_entry_erase(unsigned int vaddr_n_asid)
231{
232 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
233 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
234}
235
236static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
237{
238 write_aux_reg(ARC_REG_TLBPD0, pd0);
239 write_aux_reg(ARC_REG_TLBPD1, pd1);
240
241 if (is_pae40_enabled())
242 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
243
244 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
245}
246
247#endif
248
249/*
250 * Un-conditionally (without lookup) erase the entire MMU contents
251 */
252
253noinline void local_flush_tlb_all(void)
254{
255 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
256 unsigned long flags;
257 unsigned int entry;
258 int num_tlb = mmu->sets * mmu->ways;
259
260 local_irq_save(flags);
261
262 /* Load PD0 and PD1 with template for a Blank Entry */
263 write_aux_reg(ARC_REG_TLBPD1, 0);
264
265 if (is_pae40_enabled())
266 write_aux_reg(ARC_REG_TLBPD1HI, 0);
267
268 write_aux_reg(ARC_REG_TLBPD0, 0);
269
270 for (entry = 0; entry < num_tlb; entry++) {
271 /* write this entry to the TLB */
272 write_aux_reg(ARC_REG_TLBINDEX, entry);
273 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
274 }
275
276 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
277 const int stlb_idx = 0x800;
278
279 /* Blank sTLB entry */
280 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
281
282 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
283 write_aux_reg(ARC_REG_TLBINDEX, entry);
284 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
285 }
286 }
287
288 utlb_invalidate();
289
290 local_irq_restore(flags);
291}
292
293/*
294 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
295 */
296noinline void local_flush_tlb_mm(struct mm_struct *mm)
297{
298 /*
299 * Small optimisation courtesy IA64
300 * flush_mm called during fork,exit,munmap etc, multiple times as well.
301 * Only for fork( ) do we need to move parent to a new MMU ctxt,
302 * all other cases are NOPs, hence this check.
303 */
304 if (atomic_read(&mm->mm_users) == 0)
305 return;
306
307 /*
308 * - Move to a new ASID, but only if the mm is still wired in
309 * (Android Binder ended up calling this for vma->mm != tsk->mm,
310 * causing h/w - s/w ASID to get out of sync)
311 * - Also get_new_mmu_context() new implementation allocates a new
312 * ASID only if it is not allocated already - so unallocate first
313 */
314 destroy_context(mm);
315 if (current->mm == mm)
316 get_new_mmu_context(mm);
317}
318
319/*
320 * Flush a Range of TLB entries for userland.
321 * @start is inclusive, while @end is exclusive
322 * Difference between this and Kernel Range Flush is
323 * -Here the fastest way (if range is too large) is to move to next ASID
324 * without doing any explicit Shootdown
325 * -In case of kernel Flush, entry has to be shot down explictly
326 */
327void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
328 unsigned long end)
329{
330 const unsigned int cpu = smp_processor_id();
331 unsigned long flags;
332
333 /* If range @start to @end is more than 32 TLB entries deep,
334 * its better to move to a new ASID rather than searching for
335 * individual entries and then shooting them down
336 *
337 * The calc above is rough, doesn't account for unaligned parts,
338 * since this is heuristics based anyways
339 */
340 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
341 local_flush_tlb_mm(vma->vm_mm);
342 return;
343 }
344
345 /*
346 * @start moved to page start: this alone suffices for checking
347 * loop end condition below, w/o need for aligning @end to end
348 * e.g. 2000 to 4001 will anyhow loop twice
349 */
350 start &= PAGE_MASK;
351
352 local_irq_save(flags);
353
354 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
355 while (start < end) {
356 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
357 start += PAGE_SIZE;
358 }
359 }
360
361 utlb_invalidate();
362
363 local_irq_restore(flags);
364}
365
366/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
367 * @start, @end interpreted as kvaddr
368 * Interestingly, shared TLB entries can also be flushed using just
369 * @start,@end alone (interpreted as user vaddr), although technically SASID
370 * is also needed. However our smart TLbProbe lookup takes care of that.
371 */
372void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
373{
374 unsigned long flags;
375
376 /* exactly same as above, except for TLB entry not taking ASID */
377
378 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
379 local_flush_tlb_all();
380 return;
381 }
382
383 start &= PAGE_MASK;
384
385 local_irq_save(flags);
386 while (start < end) {
387 tlb_entry_erase(start);
388 start += PAGE_SIZE;
389 }
390
391 utlb_invalidate();
392
393 local_irq_restore(flags);
394}
395
396/*
397 * Delete TLB entry in MMU for a given page (??? address)
398 * NOTE One TLB entry contains translation for single PAGE
399 */
400
401void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
402{
403 const unsigned int cpu = smp_processor_id();
404 unsigned long flags;
405
406 /* Note that it is critical that interrupts are DISABLED between
407 * checking the ASID and using it flush the TLB entry
408 */
409 local_irq_save(flags);
410
411 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
412 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
413 utlb_invalidate();
414 }
415
416 local_irq_restore(flags);
417}
418
419#ifdef CONFIG_SMP
420
421struct tlb_args {
422 struct vm_area_struct *ta_vma;
423 unsigned long ta_start;
424 unsigned long ta_end;
425};
426
427static inline void ipi_flush_tlb_page(void *arg)
428{
429 struct tlb_args *ta = arg;
430
431 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
432}
433
434static inline void ipi_flush_tlb_range(void *arg)
435{
436 struct tlb_args *ta = arg;
437
438 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
439}
440
441#ifdef CONFIG_TRANSPARENT_HUGEPAGE
442static inline void ipi_flush_pmd_tlb_range(void *arg)
443{
444 struct tlb_args *ta = arg;
445
446 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
447}
448#endif
449
450static inline void ipi_flush_tlb_kernel_range(void *arg)
451{
452 struct tlb_args *ta = (struct tlb_args *)arg;
453
454 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
455}
456
457void flush_tlb_all(void)
458{
459 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
460}
461
462void flush_tlb_mm(struct mm_struct *mm)
463{
464 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
465 mm, 1);
466}
467
468void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
469{
470 struct tlb_args ta = {
471 .ta_vma = vma,
472 .ta_start = uaddr
473 };
474
475 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
476}
477
478void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
479 unsigned long end)
480{
481 struct tlb_args ta = {
482 .ta_vma = vma,
483 .ta_start = start,
484 .ta_end = end
485 };
486
487 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
488}
489
490#ifdef CONFIG_TRANSPARENT_HUGEPAGE
491void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
492 unsigned long end)
493{
494 struct tlb_args ta = {
495 .ta_vma = vma,
496 .ta_start = start,
497 .ta_end = end
498 };
499
500 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
501}
502#endif
503
504void flush_tlb_kernel_range(unsigned long start, unsigned long end)
505{
506 struct tlb_args ta = {
507 .ta_start = start,
508 .ta_end = end
509 };
510
511 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
512}
513#endif
514
515/*
516 * Routine to create a TLB entry
517 */
518void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
519{
520 unsigned long flags;
521 unsigned int asid_or_sasid, rwx;
522 unsigned long pd0;
523 pte_t pd1;
524
525 /*
526 * create_tlb() assumes that current->mm == vma->mm, since
527 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
528 * -completes the lazy write to SASID reg (again valid for curr tsk)
529 *
530 * Removing the assumption involves
531 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
532 * -Fix the TLB paranoid debug code to not trigger false negatives.
533 * -More importantly it makes this handler inconsistent with fast-path
534 * TLB Refill handler which always deals with "current"
535 *
536 * Lets see the use cases when current->mm != vma->mm and we land here
537 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
538 * Here VM wants to pre-install a TLB entry for user stack while
539 * current->mm still points to pre-execve mm (hence the condition).
540 * However the stack vaddr is soon relocated (randomization) and
541 * move_page_tables() tries to undo that TLB entry.
542 * Thus not creating TLB entry is not any worse.
543 *
544 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
545 * breakpoint in debugged task. Not creating a TLB now is not
546 * performance critical.
547 *
548 * Both the cases above are not good enough for code churn.
549 */
550 if (current->active_mm != vma->vm_mm)
551 return;
552
553 local_irq_save(flags);
554
555 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
556
557 vaddr &= PAGE_MASK;
558
559 /* update this PTE credentials */
560 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
561
562 /* Create HW TLB(PD0,PD1) from PTE */
563
564 /* ASID for this task */
565 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
566
567 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
568
569 /*
570 * ARC MMU provides fully orthogonal access bits for K/U mode,
571 * however Linux only saves 1 set to save PTE real-estate
572 * Here we convert 3 PTE bits into 6 MMU bits:
573 * -Kernel only entries have Kr Kw Kx 0 0 0
574 * -User entries have mirrored K and U bits
575 */
576 rwx = pte_val(*ptep) & PTE_BITS_RWX;
577
578 if (pte_val(*ptep) & _PAGE_GLOBAL)
579 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
580 else
581 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
582
583 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
584
585 tlb_entry_insert(pd0, pd1);
586
587 local_irq_restore(flags);
588}
589
590/*
591 * Called at the end of pagefault, for a userspace mapped page
592 * -pre-install the corresponding TLB entry into MMU
593 * -Finalize the delayed D-cache flush of kernel mapping of page due to
594 * flush_dcache_page(), copy_user_page()
595 *
596 * Note that flush (when done) involves both WBACK - so physical page is
597 * in sync as well as INV - so any non-congruent aliases don't remain
598 */
599void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
600 pte_t *ptep)
601{
602 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
603 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
604 struct page *page = pfn_to_page(pte_pfn(*ptep));
605
606 create_tlb(vma, vaddr, ptep);
607
608 if (page == ZERO_PAGE(0)) {
609 return;
610 }
611
612 /*
613 * Exec page : Independent of aliasing/page-color considerations,
614 * since icache doesn't snoop dcache on ARC, any dirty
615 * K-mapping of a code page needs to be wback+inv so that
616 * icache fetch by userspace sees code correctly.
617 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
618 * so userspace sees the right data.
619 * (Avoids the flush for Non-exec + congruent mapping case)
620 */
621 if ((vma->vm_flags & VM_EXEC) ||
622 addr_not_cache_congruent(paddr, vaddr)) {
623
624 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
625 if (dirty) {
626 /* wback + inv dcache lines (K-mapping) */
627 __flush_dcache_page(paddr, paddr);
628
629 /* invalidate any existing icache lines (U-mapping) */
630 if (vma->vm_flags & VM_EXEC)
631 __inv_icache_page(paddr, vaddr);
632 }
633 }
634}
635
636#ifdef CONFIG_TRANSPARENT_HUGEPAGE
637
638/*
639 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
640 * support.
641 *
642 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
643 * new bit "SZ" in TLB page descriptor to distinguish between them.
644 * Super Page size is configurable in hardware (4K to 16M), but fixed once
645 * RTL builds.
646 *
647 * The exact THP size a Linx configuration will support is a function of:
648 * - MMU page size (typical 8K, RTL fixed)
649 * - software page walker address split between PGD:PTE:PFN (typical
650 * 11:8:13, but can be changed with 1 line)
651 * So for above default, THP size supported is 8K * (2^8) = 2M
652 *
653 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
654 * reduces to 1 level (as PTE is folded into PGD and canonically referred
655 * to as PMD).
656 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
657 */
658
659void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
660 pmd_t *pmd)
661{
662 pte_t pte = __pte(pmd_val(*pmd));
663 update_mmu_cache(vma, addr, &pte);
664}
665
666void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
667 pgtable_t pgtable)
668{
669 struct list_head *lh = (struct list_head *) pgtable;
670
671 assert_spin_locked(&mm->page_table_lock);
672
673 /* FIFO */
674 if (!pmd_huge_pte(mm, pmdp))
675 INIT_LIST_HEAD(lh);
676 else
677 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
678 pmd_huge_pte(mm, pmdp) = pgtable;
679}
680
681pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
682{
683 struct list_head *lh;
684 pgtable_t pgtable;
685
686 assert_spin_locked(&mm->page_table_lock);
687
688 pgtable = pmd_huge_pte(mm, pmdp);
689 lh = (struct list_head *) pgtable;
690 if (list_empty(lh))
691 pmd_huge_pte(mm, pmdp) = NULL;
692 else {
693 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
694 list_del(lh);
695 }
696
697 pte_val(pgtable[0]) = 0;
698 pte_val(pgtable[1]) = 0;
699
700 return pgtable;
701}
702
703void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
704 unsigned long end)
705{
706 unsigned int cpu;
707 unsigned long flags;
708
709 local_irq_save(flags);
710
711 cpu = smp_processor_id();
712
713 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
714 unsigned int asid = hw_pid(vma->vm_mm, cpu);
715
716 /* No need to loop here: this will always be for 1 Huge Page */
717 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
718 }
719
720 local_irq_restore(flags);
721}
722
723#endif
724
725/* Read the Cache Build Confuration Registers, Decode them and save into
726 * the cpuinfo structure for later use.
727 * No Validation is done here, simply read/convert the BCRs
728 */
729void read_decode_mmu_bcr(void)
730{
731 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
732 unsigned int tmp;
733 struct bcr_mmu_1_2 {
734#ifdef CONFIG_CPU_BIG_ENDIAN
735 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
736#else
737 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
738#endif
739 } *mmu2;
740
741 struct bcr_mmu_3 {
742#ifdef CONFIG_CPU_BIG_ENDIAN
743 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
744 u_itlb:4, u_dtlb:4;
745#else
746 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
747 ways:4, ver:8;
748#endif
749 } *mmu3;
750
751 struct bcr_mmu_4 {
752#ifdef CONFIG_CPU_BIG_ENDIAN
753 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
754 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
755#else
756 /* DTLB ITLB JES JE JA */
757 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
758 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
759#endif
760 } *mmu4;
761
762 tmp = read_aux_reg(ARC_REG_MMU_BCR);
763 mmu->ver = (tmp >> 24);
764
765 if (is_isa_arcompact()) {
766 if (mmu->ver <= 2) {
767 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
768 mmu->pg_sz_k = TO_KB(0x2000);
769 mmu->sets = 1 << mmu2->sets;
770 mmu->ways = 1 << mmu2->ways;
771 mmu->u_dtlb = mmu2->u_dtlb;
772 mmu->u_itlb = mmu2->u_itlb;
773 } else {
774 mmu3 = (struct bcr_mmu_3 *)&tmp;
775 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
776 mmu->sets = 1 << mmu3->sets;
777 mmu->ways = 1 << mmu3->ways;
778 mmu->u_dtlb = mmu3->u_dtlb;
779 mmu->u_itlb = mmu3->u_itlb;
780 mmu->sasid = mmu3->sasid;
781 }
782 } else {
783 mmu4 = (struct bcr_mmu_4 *)&tmp;
784 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
785 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
786 mmu->sets = 64 << mmu4->n_entry;
787 mmu->ways = mmu4->n_ways * 2;
788 mmu->u_dtlb = mmu4->u_dtlb * 4;
789 mmu->u_itlb = mmu4->u_itlb * 4;
790 mmu->sasid = mmu4->sasid;
791 pae_exists = mmu->pae = mmu4->pae;
792 }
793}
794
795char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
796{
797 int n = 0;
798 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
799 char super_pg[64] = "";
800
801 if (p_mmu->s_pg_sz_m)
802 scnprintf(super_pg, 64, "%dM Super Page %s",
803 p_mmu->s_pg_sz_m,
804 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
805
806 n += scnprintf(buf + n, len - n,
807 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
808 p_mmu->ver, p_mmu->pg_sz_k, super_pg,
809 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
810 p_mmu->u_dtlb, p_mmu->u_itlb,
811 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
812
813 return buf;
814}
815
816int pae40_exist_but_not_enab(void)
817{
818 return pae_exists && !is_pae40_enabled();
819}
820
821void arc_mmu_init(void)
822{
823 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
824 char str[256];
825 int compat = 0;
826
827 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
828
829 /*
830 * Can't be done in processor.h due to header include depenedencies
831 */
832 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
833
834 /*
835 * stack top size sanity check,
836 * Can't be done in processor.h due to header include depenedencies
837 */
838 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
839
840 /*
841 * Ensure that MMU features assumed by kernel exist in hardware.
842 * For older ARC700 cpus, it has to be exact match, since the MMU
843 * revisions were not backwards compatible (MMUv3 TLB layout changed
844 * so even if kernel for v2 didn't use any new cmds of v3, it would
845 * still not work.
846 * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
847 * (will run older software).
848 */
849 if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
850 compat = 1;
851 else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
852 compat = 1;
853
854 if (!compat) {
855 panic("MMU ver %d doesn't match kernel built for %d...\n",
856 mmu->ver, CONFIG_ARC_MMU_VER);
857 }
858
859 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
860 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
861
862 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
863 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
864 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
865 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
866
867 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
868 panic("Hardware doesn't support PAE40\n");
869
870 /* Enable the MMU */
871 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
872
873 /* In smp we use this reg for interrupt 1 scratch */
874#ifndef CONFIG_SMP
875 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
876 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
877#endif
878
879 if (pae40_exist_but_not_enab())
880 write_aux_reg(ARC_REG_TLBPD1HI, 0);
881}
882
883/*
884 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
885 * The mapping is Column-first.
886 * --------------------- -----------
887 * |way0|way1|way2|way3| |way0|way1|
888 * --------------------- -----------
889 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
890 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
891 * ~ ~ ~ ~
892 * [set127] | 508| 509| 510| 511| | 254| 255|
893 * --------------------- -----------
894 * For normal operations we don't(must not) care how above works since
895 * MMU cmd getIndex(vaddr) abstracts that out.
896 * However for walking WAYS of a SET, we need to know this
897 */
898#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
899
900/* Handling of Duplicate PD (TLB entry) in MMU.
901 * -Could be due to buggy customer tapeouts or obscure kernel bugs
902 * -MMU complaints not at the time of duplicate PD installation, but at the
903 * time of lookup matching multiple ways.
904 * -Ideally these should never happen - but if they do - workaround by deleting
905 * the duplicate one.
906 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
907 */
908volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
909
910void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
911 struct pt_regs *regs)
912{
913 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
914 unsigned int pd0[mmu->ways];
915 unsigned long flags;
916 int set;
917
918 local_irq_save(flags);
919
920 /* loop thru all sets of TLB */
921 for (set = 0; set < mmu->sets; set++) {
922
923 int is_valid, way;
924
925 /* read out all the ways of current set */
926 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
927 write_aux_reg(ARC_REG_TLBINDEX,
928 SET_WAY_TO_IDX(mmu, set, way));
929 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
930 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
931 is_valid |= pd0[way] & _PAGE_PRESENT;
932 pd0[way] &= PAGE_MASK;
933 }
934
935 /* If all the WAYS in SET are empty, skip to next SET */
936 if (!is_valid)
937 continue;
938
939 /* Scan the set for duplicate ways: needs a nested loop */
940 for (way = 0; way < mmu->ways - 1; way++) {
941
942 int n;
943
944 if (!pd0[way])
945 continue;
946
947 for (n = way + 1; n < mmu->ways; n++) {
948 if (pd0[way] != pd0[n])
949 continue;
950
951 if (!dup_pd_silent)
952 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
953 pd0[way], set, way, n);
954
955 /*
956 * clear entry @way and not @n.
957 * This is critical to our optimised loop
958 */
959 pd0[way] = 0;
960 write_aux_reg(ARC_REG_TLBINDEX,
961 SET_WAY_TO_IDX(mmu, set, way));
962 __tlb_entry_erase();
963 }
964 }
965 }
966
967 local_irq_restore(flags);
968}
969
970/***********************************************************************
971 * Diagnostic Routines
972 * -Called from Low Level TLB Hanlders if things don;t look good
973 **********************************************************************/
974
975#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
976
977/*
978 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
979 * don't match
980 */
981void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
982{
983 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
984 is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
985
986 __asm__ __volatile__("flag 1");
987}
988
989void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
990{
991 unsigned int mmu_asid;
992
993 mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
994
995 /*
996 * At the time of a TLB miss/installation
997 * - HW version needs to match SW version
998 * - SW needs to have a valid ASID
999 */
1000 if (addr < 0x70000000 &&
1001 ((mm_asid == MM_CTXT_NO_ASID) ||
1002 (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
1003 print_asid_mismatch(mm_asid, mmu_asid, 0);
1004}
1005#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
4 *
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 */
8
9#include <linux/module.h>
10#include <linux/bug.h>
11#include <linux/mm_types.h>
12
13#include <asm/arcregs.h>
14#include <asm/setup.h>
15#include <asm/mmu_context.h>
16#include <asm/mmu.h>
17
18/* A copy of the ASID from the PID reg is kept in asid_cache */
19DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
20
21static int __read_mostly pae_exists;
22
23/*
24 * Utility Routine to erase a J-TLB entry
25 * Caller needs to setup Index Reg (manually or via getIndex)
26 */
27static inline void __tlb_entry_erase(void)
28{
29 write_aux_reg(ARC_REG_TLBPD1, 0);
30
31 if (is_pae40_enabled())
32 write_aux_reg(ARC_REG_TLBPD1HI, 0);
33
34 write_aux_reg(ARC_REG_TLBPD0, 0);
35 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
36}
37
38static void utlb_invalidate(void)
39{
40 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
41}
42
43#ifdef CONFIG_ARC_MMU_V3
44
45static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
46{
47 unsigned int idx;
48
49 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
50
51 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
52 idx = read_aux_reg(ARC_REG_TLBINDEX);
53
54 return idx;
55}
56
57static void tlb_entry_erase(unsigned int vaddr_n_asid)
58{
59 unsigned int idx;
60
61 /* Locate the TLB entry for this vaddr + ASID */
62 idx = tlb_entry_lkup(vaddr_n_asid);
63
64 /* No error means entry found, zero it out */
65 if (likely(!(idx & TLB_LKUP_ERR))) {
66 __tlb_entry_erase();
67 } else {
68 /* Duplicate entry error */
69 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
70 vaddr_n_asid);
71 }
72}
73
74static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
75{
76 unsigned int idx;
77
78 /*
79 * First verify if entry for this vaddr+ASID already exists
80 * This also sets up PD0 (vaddr, ASID..) for final commit
81 */
82 idx = tlb_entry_lkup(pd0);
83
84 /*
85 * If Not already present get a free slot from MMU.
86 * Otherwise, Probe would have located the entry and set INDEX Reg
87 * with existing location. This will cause Write CMD to over-write
88 * existing entry with new PD0 and PD1
89 */
90 if (likely(idx & TLB_LKUP_ERR))
91 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
92
93 /* setup the other half of TLB entry (pfn, rwx..) */
94 write_aux_reg(ARC_REG_TLBPD1, pd1);
95
96 /*
97 * Commit the Entry to MMU
98 * It doesn't sound safe to use the TLBWriteNI cmd here
99 * which doesn't flush uTLBs. I'd rather be safe than sorry.
100 */
101 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
102}
103
104#else /* MMUv4 */
105
106static void tlb_entry_erase(unsigned int vaddr_n_asid)
107{
108 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
109 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
110}
111
112static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
113{
114 write_aux_reg(ARC_REG_TLBPD0, pd0);
115
116 if (!is_pae40_enabled()) {
117 write_aux_reg(ARC_REG_TLBPD1, pd1);
118 } else {
119 write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
120 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
121 }
122
123 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
124}
125
126#endif
127
128/*
129 * Un-conditionally (without lookup) erase the entire MMU contents
130 */
131
132noinline void local_flush_tlb_all(void)
133{
134 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
135 unsigned long flags;
136 unsigned int entry;
137 int num_tlb = mmu->sets * mmu->ways;
138
139 local_irq_save(flags);
140
141 /* Load PD0 and PD1 with template for a Blank Entry */
142 write_aux_reg(ARC_REG_TLBPD1, 0);
143
144 if (is_pae40_enabled())
145 write_aux_reg(ARC_REG_TLBPD1HI, 0);
146
147 write_aux_reg(ARC_REG_TLBPD0, 0);
148
149 for (entry = 0; entry < num_tlb; entry++) {
150 /* write this entry to the TLB */
151 write_aux_reg(ARC_REG_TLBINDEX, entry);
152 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
153 }
154
155 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
156 const int stlb_idx = 0x800;
157
158 /* Blank sTLB entry */
159 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
160
161 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
162 write_aux_reg(ARC_REG_TLBINDEX, entry);
163 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
164 }
165 }
166
167 utlb_invalidate();
168
169 local_irq_restore(flags);
170}
171
172/*
173 * Flush the entire MM for userland. The fastest way is to move to Next ASID
174 */
175noinline void local_flush_tlb_mm(struct mm_struct *mm)
176{
177 /*
178 * Small optimisation courtesy IA64
179 * flush_mm called during fork,exit,munmap etc, multiple times as well.
180 * Only for fork( ) do we need to move parent to a new MMU ctxt,
181 * all other cases are NOPs, hence this check.
182 */
183 if (atomic_read(&mm->mm_users) == 0)
184 return;
185
186 /*
187 * - Move to a new ASID, but only if the mm is still wired in
188 * (Android Binder ended up calling this for vma->mm != tsk->mm,
189 * causing h/w - s/w ASID to get out of sync)
190 * - Also get_new_mmu_context() new implementation allocates a new
191 * ASID only if it is not allocated already - so unallocate first
192 */
193 destroy_context(mm);
194 if (current->mm == mm)
195 get_new_mmu_context(mm);
196}
197
198/*
199 * Flush a Range of TLB entries for userland.
200 * @start is inclusive, while @end is exclusive
201 * Difference between this and Kernel Range Flush is
202 * -Here the fastest way (if range is too large) is to move to next ASID
203 * without doing any explicit Shootdown
204 * -In case of kernel Flush, entry has to be shot down explicitly
205 */
206void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
207 unsigned long end)
208{
209 const unsigned int cpu = smp_processor_id();
210 unsigned long flags;
211
212 /* If range @start to @end is more than 32 TLB entries deep,
213 * its better to move to a new ASID rather than searching for
214 * individual entries and then shooting them down
215 *
216 * The calc above is rough, doesn't account for unaligned parts,
217 * since this is heuristics based anyways
218 */
219 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
220 local_flush_tlb_mm(vma->vm_mm);
221 return;
222 }
223
224 /*
225 * @start moved to page start: this alone suffices for checking
226 * loop end condition below, w/o need for aligning @end to end
227 * e.g. 2000 to 4001 will anyhow loop twice
228 */
229 start &= PAGE_MASK;
230
231 local_irq_save(flags);
232
233 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
234 while (start < end) {
235 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
236 start += PAGE_SIZE;
237 }
238 }
239
240 local_irq_restore(flags);
241}
242
243/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
244 * @start, @end interpreted as kvaddr
245 * Interestingly, shared TLB entries can also be flushed using just
246 * @start,@end alone (interpreted as user vaddr), although technically SASID
247 * is also needed. However our smart TLbProbe lookup takes care of that.
248 */
249void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
250{
251 unsigned long flags;
252
253 /* exactly same as above, except for TLB entry not taking ASID */
254
255 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
256 local_flush_tlb_all();
257 return;
258 }
259
260 start &= PAGE_MASK;
261
262 local_irq_save(flags);
263 while (start < end) {
264 tlb_entry_erase(start);
265 start += PAGE_SIZE;
266 }
267
268 local_irq_restore(flags);
269}
270
271/*
272 * Delete TLB entry in MMU for a given page (??? address)
273 * NOTE One TLB entry contains translation for single PAGE
274 */
275
276void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
277{
278 const unsigned int cpu = smp_processor_id();
279 unsigned long flags;
280
281 /* Note that it is critical that interrupts are DISABLED between
282 * checking the ASID and using it flush the TLB entry
283 */
284 local_irq_save(flags);
285
286 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
287 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
288 }
289
290 local_irq_restore(flags);
291}
292
293#ifdef CONFIG_SMP
294
295struct tlb_args {
296 struct vm_area_struct *ta_vma;
297 unsigned long ta_start;
298 unsigned long ta_end;
299};
300
301static inline void ipi_flush_tlb_page(void *arg)
302{
303 struct tlb_args *ta = arg;
304
305 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
306}
307
308static inline void ipi_flush_tlb_range(void *arg)
309{
310 struct tlb_args *ta = arg;
311
312 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
313}
314
315#ifdef CONFIG_TRANSPARENT_HUGEPAGE
316static inline void ipi_flush_pmd_tlb_range(void *arg)
317{
318 struct tlb_args *ta = arg;
319
320 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
321}
322#endif
323
324static inline void ipi_flush_tlb_kernel_range(void *arg)
325{
326 struct tlb_args *ta = (struct tlb_args *)arg;
327
328 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
329}
330
331void flush_tlb_all(void)
332{
333 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
334}
335
336void flush_tlb_mm(struct mm_struct *mm)
337{
338 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
339 mm, 1);
340}
341
342void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
343{
344 struct tlb_args ta = {
345 .ta_vma = vma,
346 .ta_start = uaddr
347 };
348
349 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
350}
351
352void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
353 unsigned long end)
354{
355 struct tlb_args ta = {
356 .ta_vma = vma,
357 .ta_start = start,
358 .ta_end = end
359 };
360
361 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
362}
363
364#ifdef CONFIG_TRANSPARENT_HUGEPAGE
365void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
366 unsigned long end)
367{
368 struct tlb_args ta = {
369 .ta_vma = vma,
370 .ta_start = start,
371 .ta_end = end
372 };
373
374 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
375}
376#endif
377
378void flush_tlb_kernel_range(unsigned long start, unsigned long end)
379{
380 struct tlb_args ta = {
381 .ta_start = start,
382 .ta_end = end
383 };
384
385 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
386}
387#endif
388
389/*
390 * Routine to create a TLB entry
391 */
392void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
393{
394 unsigned long flags;
395 unsigned int asid_or_sasid, rwx;
396 unsigned long pd0;
397 phys_addr_t pd1;
398
399 /*
400 * create_tlb() assumes that current->mm == vma->mm, since
401 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
402 * -completes the lazy write to SASID reg (again valid for curr tsk)
403 *
404 * Removing the assumption involves
405 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
406 * -More importantly it makes this handler inconsistent with fast-path
407 * TLB Refill handler which always deals with "current"
408 *
409 * Lets see the use cases when current->mm != vma->mm and we land here
410 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
411 * Here VM wants to pre-install a TLB entry for user stack while
412 * current->mm still points to pre-execve mm (hence the condition).
413 * However the stack vaddr is soon relocated (randomization) and
414 * move_page_tables() tries to undo that TLB entry.
415 * Thus not creating TLB entry is not any worse.
416 *
417 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
418 * breakpoint in debugged task. Not creating a TLB now is not
419 * performance critical.
420 *
421 * Both the cases above are not good enough for code churn.
422 */
423 if (current->active_mm != vma->vm_mm)
424 return;
425
426 local_irq_save(flags);
427
428 vaddr &= PAGE_MASK;
429
430 /* update this PTE credentials */
431 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
432
433 /* Create HW TLB(PD0,PD1) from PTE */
434
435 /* ASID for this task */
436 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
437
438 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
439
440 /*
441 * ARC MMU provides fully orthogonal access bits for K/U mode,
442 * however Linux only saves 1 set to save PTE real-estate
443 * Here we convert 3 PTE bits into 6 MMU bits:
444 * -Kernel only entries have Kr Kw Kx 0 0 0
445 * -User entries have mirrored K and U bits
446 */
447 rwx = pte_val(*ptep) & PTE_BITS_RWX;
448
449 if (pte_val(*ptep) & _PAGE_GLOBAL)
450 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
451 else
452 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
453
454 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
455
456 tlb_entry_insert(pd0, pd1);
457
458 local_irq_restore(flags);
459}
460
461/*
462 * Called at the end of pagefault, for a userspace mapped page
463 * -pre-install the corresponding TLB entry into MMU
464 * -Finalize the delayed D-cache flush of kernel mapping of page due to
465 * flush_dcache_page(), copy_user_page()
466 *
467 * Note that flush (when done) involves both WBACK - so physical page is
468 * in sync as well as INV - so any non-congruent aliases don't remain
469 */
470void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
471 pte_t *ptep)
472{
473 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
474 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
475 struct page *page = pfn_to_page(pte_pfn(*ptep));
476
477 create_tlb(vma, vaddr, ptep);
478
479 if (page == ZERO_PAGE(0)) {
480 return;
481 }
482
483 /*
484 * Exec page : Independent of aliasing/page-color considerations,
485 * since icache doesn't snoop dcache on ARC, any dirty
486 * K-mapping of a code page needs to be wback+inv so that
487 * icache fetch by userspace sees code correctly.
488 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
489 * so userspace sees the right data.
490 * (Avoids the flush for Non-exec + congruent mapping case)
491 */
492 if ((vma->vm_flags & VM_EXEC) ||
493 addr_not_cache_congruent(paddr, vaddr)) {
494
495 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
496 if (dirty) {
497 /* wback + inv dcache lines (K-mapping) */
498 __flush_dcache_page(paddr, paddr);
499
500 /* invalidate any existing icache lines (U-mapping) */
501 if (vma->vm_flags & VM_EXEC)
502 __inv_icache_page(paddr, vaddr);
503 }
504 }
505}
506
507#ifdef CONFIG_TRANSPARENT_HUGEPAGE
508
509/*
510 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
511 * support.
512 *
513 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
514 * new bit "SZ" in TLB page descriptor to distinguish between them.
515 * Super Page size is configurable in hardware (4K to 16M), but fixed once
516 * RTL builds.
517 *
518 * The exact THP size a Linux configuration will support is a function of:
519 * - MMU page size (typical 8K, RTL fixed)
520 * - software page walker address split between PGD:PTE:PFN (typical
521 * 11:8:13, but can be changed with 1 line)
522 * So for above default, THP size supported is 8K * (2^8) = 2M
523 *
524 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
525 * reduces to 1 level (as PTE is folded into PGD and canonically referred
526 * to as PMD).
527 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
528 */
529
530void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
531 pmd_t *pmd)
532{
533 pte_t pte = __pte(pmd_val(*pmd));
534 update_mmu_cache(vma, addr, &pte);
535}
536
537void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
538 unsigned long end)
539{
540 unsigned int cpu;
541 unsigned long flags;
542
543 local_irq_save(flags);
544
545 cpu = smp_processor_id();
546
547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
548 unsigned int asid = hw_pid(vma->vm_mm, cpu);
549
550 /* No need to loop here: this will always be for 1 Huge Page */
551 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
552 }
553
554 local_irq_restore(flags);
555}
556
557#endif
558
559/* Read the Cache Build Configuration Registers, Decode them and save into
560 * the cpuinfo structure for later use.
561 * No Validation is done here, simply read/convert the BCRs
562 */
563void read_decode_mmu_bcr(void)
564{
565 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
566 unsigned int tmp;
567 struct bcr_mmu_3 {
568#ifdef CONFIG_CPU_BIG_ENDIAN
569 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
570 u_itlb:4, u_dtlb:4;
571#else
572 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
573 ways:4, ver:8;
574#endif
575 } *mmu3;
576
577 struct bcr_mmu_4 {
578#ifdef CONFIG_CPU_BIG_ENDIAN
579 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
580 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
581#else
582 /* DTLB ITLB JES JE JA */
583 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
584 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
585#endif
586 } *mmu4;
587
588 tmp = read_aux_reg(ARC_REG_MMU_BCR);
589 mmu->ver = (tmp >> 24);
590
591 if (is_isa_arcompact() && mmu->ver == 3) {
592 mmu3 = (struct bcr_mmu_3 *)&tmp;
593 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
594 mmu->sets = 1 << mmu3->sets;
595 mmu->ways = 1 << mmu3->ways;
596 mmu->u_dtlb = mmu3->u_dtlb;
597 mmu->u_itlb = mmu3->u_itlb;
598 mmu->sasid = mmu3->sasid;
599 } else {
600 mmu4 = (struct bcr_mmu_4 *)&tmp;
601 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
602 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
603 mmu->sets = 64 << mmu4->n_entry;
604 mmu->ways = mmu4->n_ways * 2;
605 mmu->u_dtlb = mmu4->u_dtlb * 4;
606 mmu->u_itlb = mmu4->u_itlb * 4;
607 mmu->sasid = mmu4->sasid;
608 pae_exists = mmu->pae = mmu4->pae;
609 }
610}
611
612char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
613{
614 int n = 0;
615 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
616 char super_pg[64] = "";
617
618 if (p_mmu->s_pg_sz_m)
619 scnprintf(super_pg, 64, "%dM Super Page %s",
620 p_mmu->s_pg_sz_m,
621 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
622
623 n += scnprintf(buf + n, len - n,
624 "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
625 p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
626 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
627 p_mmu->u_dtlb, p_mmu->u_itlb,
628 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
629
630 return buf;
631}
632
633int pae40_exist_but_not_enab(void)
634{
635 return pae_exists && !is_pae40_enabled();
636}
637
638void arc_mmu_init(void)
639{
640 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
641 char str[256];
642 int compat = 0;
643
644 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
645
646 /*
647 * Can't be done in processor.h due to header include dependencies
648 */
649 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
650
651 /*
652 * stack top size sanity check,
653 * Can't be done in processor.h due to header include dependencies
654 */
655 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
656
657 /*
658 * Ensure that MMU features assumed by kernel exist in hardware.
659 * - For older ARC700 cpus, only v3 supported
660 * - For HS cpus, v4 was baseline and v5 is backwards compatible
661 * (will run older software).
662 */
663 if (is_isa_arcompact() && mmu->ver == 3)
664 compat = 1;
665 else if (is_isa_arcv2() && mmu->ver >= 4)
666 compat = 1;
667
668 if (!compat)
669 panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
670
671 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
672 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
673
674 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
675 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
676 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
677 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
678
679 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
680 panic("Hardware doesn't support PAE40\n");
681
682 /* Enable the MMU with ASID 0 */
683 mmu_setup_asid(NULL, 0);
684
685 /* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
686 mmu_setup_pgd(NULL, swapper_pg_dir);
687
688 if (pae40_exist_but_not_enab())
689 write_aux_reg(ARC_REG_TLBPD1HI, 0);
690}
691
692/*
693 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
694 * The mapping is Column-first.
695 * --------------------- -----------
696 * |way0|way1|way2|way3| |way0|way1|
697 * --------------------- -----------
698 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
699 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
700 * ~ ~ ~ ~
701 * [set127] | 508| 509| 510| 511| | 254| 255|
702 * --------------------- -----------
703 * For normal operations we don't(must not) care how above works since
704 * MMU cmd getIndex(vaddr) abstracts that out.
705 * However for walking WAYS of a SET, we need to know this
706 */
707#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
708
709/* Handling of Duplicate PD (TLB entry) in MMU.
710 * -Could be due to buggy customer tapeouts or obscure kernel bugs
711 * -MMU complaints not at the time of duplicate PD installation, but at the
712 * time of lookup matching multiple ways.
713 * -Ideally these should never happen - but if they do - workaround by deleting
714 * the duplicate one.
715 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
716 */
717volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
718
719void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
720 struct pt_regs *regs)
721{
722 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
723 unsigned long flags;
724 int set, n_ways = mmu->ways;
725
726 n_ways = min(n_ways, 4);
727 BUG_ON(mmu->ways > 4);
728
729 local_irq_save(flags);
730
731 /* loop thru all sets of TLB */
732 for (set = 0; set < mmu->sets; set++) {
733
734 int is_valid, way;
735 unsigned int pd0[4];
736
737 /* read out all the ways of current set */
738 for (way = 0, is_valid = 0; way < n_ways; way++) {
739 write_aux_reg(ARC_REG_TLBINDEX,
740 SET_WAY_TO_IDX(mmu, set, way));
741 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
742 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
743 is_valid |= pd0[way] & _PAGE_PRESENT;
744 pd0[way] &= PAGE_MASK;
745 }
746
747 /* If all the WAYS in SET are empty, skip to next SET */
748 if (!is_valid)
749 continue;
750
751 /* Scan the set for duplicate ways: needs a nested loop */
752 for (way = 0; way < n_ways - 1; way++) {
753
754 int n;
755
756 if (!pd0[way])
757 continue;
758
759 for (n = way + 1; n < n_ways; n++) {
760 if (pd0[way] != pd0[n])
761 continue;
762
763 if (!dup_pd_silent)
764 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
765 pd0[way], set, way, n);
766
767 /*
768 * clear entry @way and not @n.
769 * This is critical to our optimised loop
770 */
771 pd0[way] = 0;
772 write_aux_reg(ARC_REG_TLBINDEX,
773 SET_WAY_TO_IDX(mmu, set, way));
774 __tlb_entry_erase();
775 }
776 }
777 }
778
779 local_irq_restore(flags);
780}