Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <linux/syscalls.h>
23#include <linux/vmalloc.h>
24#include <asm/pdc.h>
25#include <asm/cache.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/page.h>
29#include <asm/processor.h>
30#include <asm/sections.h>
31#include <asm/shmparam.h>
32#include <asm/mmu_context.h>
33#include <asm/cachectl.h>
34
35#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
36
37/*
38 * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
39 * of page flushes done flush_cache_page_if_present. There are some
40 * pros and cons in using this option. It may increase the risk of
41 * random segmentation faults.
42 */
43#define CONFIG_FLUSH_PAGE_ACCESSED 0
44
45int split_tlb __ro_after_init;
46int dcache_stride __ro_after_init;
47int icache_stride __ro_after_init;
48EXPORT_SYMBOL(dcache_stride);
49
50/* Internal implementation in arch/parisc/kernel/pacache.S */
51void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
52EXPORT_SYMBOL(flush_dcache_page_asm);
53void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
54void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
55void flush_data_cache_local(void *); /* flushes local data-cache only */
56void flush_instruction_cache_local(void); /* flushes local code-cache only */
57
58static void flush_kernel_dcache_page_addr(const void *addr);
59
60/* On some machines (i.e., ones with the Merced bus), there can be
61 * only a single PxTLB broadcast at a time; this must be guaranteed
62 * by software. We need a spinlock around all TLB flushes to ensure
63 * this.
64 */
65DEFINE_SPINLOCK(pa_tlb_flush_lock);
66
67#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
68int pa_serialize_tlb_flushes __ro_after_init;
69#endif
70
71struct pdc_cache_info cache_info __ro_after_init;
72#ifndef CONFIG_PA20
73struct pdc_btlb_info btlb_info;
74#endif
75
76DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
77DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
78DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
79
80static void cache_flush_local_cpu(void *dummy)
81{
82 if (static_branch_likely(&parisc_has_icache))
83 flush_instruction_cache_local();
84 if (static_branch_likely(&parisc_has_dcache))
85 flush_data_cache_local(NULL);
86}
87
88void flush_cache_all_local(void)
89{
90 cache_flush_local_cpu(NULL);
91}
92
93void flush_cache_all(void)
94{
95 if (static_branch_likely(&parisc_has_cache))
96 on_each_cpu(cache_flush_local_cpu, NULL, 1);
97}
98
99static inline void flush_data_cache(void)
100{
101 if (static_branch_likely(&parisc_has_dcache))
102 on_each_cpu(flush_data_cache_local, NULL, 1);
103}
104
105
106/* Kernel virtual address of pfn. */
107#define pfn_va(pfn) __va(PFN_PHYS(pfn))
108
109void __update_cache(pte_t pte)
110{
111 unsigned long pfn = pte_pfn(pte);
112 struct folio *folio;
113 unsigned int nr;
114
115 /* We don't have pte special. As a result, we can be called with
116 an invalid pfn and we don't need to flush the kernel dcache page.
117 This occurs with FireGL card in C8000. */
118 if (!pfn_valid(pfn))
119 return;
120
121 folio = page_folio(pfn_to_page(pfn));
122 pfn = folio_pfn(folio);
123 nr = folio_nr_pages(folio);
124 if (folio_flush_mapping(folio) &&
125 test_bit(PG_dcache_dirty, &folio->flags)) {
126 while (nr--)
127 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
128 clear_bit(PG_dcache_dirty, &folio->flags);
129 } else if (parisc_requires_coherency())
130 while (nr--)
131 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
132}
133
134void
135show_cache_info(struct seq_file *m)
136{
137 char buf[32];
138
139 seq_printf(m, "I-cache\t\t: %ld KB\n",
140 cache_info.ic_size/1024 );
141 if (cache_info.dc_loop != 1)
142 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
143 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
144 cache_info.dc_size/1024,
145 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
146 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
147 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
148 cache_info.dc_conf.cc_alias
149 );
150 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
151 cache_info.it_size,
152 cache_info.dt_size,
153 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
154 );
155
156#ifndef CONFIG_PA20
157 /* BTLB - Block TLB */
158 if (btlb_info.max_size==0) {
159 seq_printf(m, "BTLB\t\t: not supported\n" );
160 } else {
161 seq_printf(m,
162 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
163 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
164 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
165 btlb_info.max_size, (int)4096,
166 btlb_info.max_size>>8,
167 btlb_info.fixed_range_info.num_i,
168 btlb_info.fixed_range_info.num_d,
169 btlb_info.fixed_range_info.num_comb,
170 btlb_info.variable_range_info.num_i,
171 btlb_info.variable_range_info.num_d,
172 btlb_info.variable_range_info.num_comb
173 );
174 }
175#endif
176}
177
178void __init
179parisc_cache_init(void)
180{
181 if (pdc_cache_info(&cache_info) < 0)
182 panic("parisc_cache_init: pdc_cache_info failed");
183
184#if 0
185 printk("ic_size %lx dc_size %lx it_size %lx\n",
186 cache_info.ic_size,
187 cache_info.dc_size,
188 cache_info.it_size);
189
190 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
191 cache_info.dc_base,
192 cache_info.dc_stride,
193 cache_info.dc_count,
194 cache_info.dc_loop);
195
196 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
197 *(unsigned long *) (&cache_info.dc_conf),
198 cache_info.dc_conf.cc_alias,
199 cache_info.dc_conf.cc_block,
200 cache_info.dc_conf.cc_line,
201 cache_info.dc_conf.cc_shift);
202 printk(" wt %d sh %d cst %d hv %d\n",
203 cache_info.dc_conf.cc_wt,
204 cache_info.dc_conf.cc_sh,
205 cache_info.dc_conf.cc_cst,
206 cache_info.dc_conf.cc_hv);
207
208 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
209 cache_info.ic_base,
210 cache_info.ic_stride,
211 cache_info.ic_count,
212 cache_info.ic_loop);
213
214 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
215 cache_info.it_sp_base,
216 cache_info.it_sp_stride,
217 cache_info.it_sp_count,
218 cache_info.it_loop,
219 cache_info.it_off_base,
220 cache_info.it_off_stride,
221 cache_info.it_off_count);
222
223 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
224 cache_info.dt_sp_base,
225 cache_info.dt_sp_stride,
226 cache_info.dt_sp_count,
227 cache_info.dt_loop,
228 cache_info.dt_off_base,
229 cache_info.dt_off_stride,
230 cache_info.dt_off_count);
231
232 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
233 *(unsigned long *) (&cache_info.ic_conf),
234 cache_info.ic_conf.cc_alias,
235 cache_info.ic_conf.cc_block,
236 cache_info.ic_conf.cc_line,
237 cache_info.ic_conf.cc_shift);
238 printk(" wt %d sh %d cst %d hv %d\n",
239 cache_info.ic_conf.cc_wt,
240 cache_info.ic_conf.cc_sh,
241 cache_info.ic_conf.cc_cst,
242 cache_info.ic_conf.cc_hv);
243
244 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
245 cache_info.dt_conf.tc_sh,
246 cache_info.dt_conf.tc_page,
247 cache_info.dt_conf.tc_cst,
248 cache_info.dt_conf.tc_aid,
249 cache_info.dt_conf.tc_sr);
250
251 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
252 cache_info.it_conf.tc_sh,
253 cache_info.it_conf.tc_page,
254 cache_info.it_conf.tc_cst,
255 cache_info.it_conf.tc_aid,
256 cache_info.it_conf.tc_sr);
257#endif
258
259 split_tlb = 0;
260 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
261 if (cache_info.dt_conf.tc_sh == 2)
262 printk(KERN_WARNING "Unexpected TLB configuration. "
263 "Will flush I/D separately (could be optimized).\n");
264
265 split_tlb = 1;
266 }
267
268 /* "New and Improved" version from Jim Hull
269 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
270 * The following CAFL_STRIDE is an optimized version, see
271 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
272 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
273 */
274#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
275 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
276 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
277#undef CAFL_STRIDE
278
279 /* stride needs to be non-zero, otherwise cache flushes will not work */
280 WARN_ON(cache_info.dc_size && dcache_stride == 0);
281 WARN_ON(cache_info.ic_size && icache_stride == 0);
282
283 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
284 PDC_MODEL_NVA_UNSUPPORTED) {
285 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
286#if 0
287 panic("SMP kernel required to avoid non-equivalent aliasing");
288#endif
289 }
290}
291
292void disable_sr_hashing(void)
293{
294 int srhash_type, retval;
295 unsigned long space_bits;
296
297 switch (boot_cpu_data.cpu_type) {
298 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
299 BUG();
300 return;
301
302 case pcxs:
303 case pcxt:
304 case pcxt_:
305 srhash_type = SRHASH_PCXST;
306 break;
307
308 case pcxl:
309 srhash_type = SRHASH_PCXL;
310 break;
311
312 case pcxl2: /* pcxl2 doesn't support space register hashing */
313 return;
314
315 default: /* Currently all PA2.0 machines use the same ins. sequence */
316 srhash_type = SRHASH_PA20;
317 break;
318 }
319
320 disable_sr_hashing_asm(srhash_type);
321
322 retval = pdc_spaceid_bits(&space_bits);
323 /* If this procedure isn't implemented, don't panic. */
324 if (retval < 0 && retval != PDC_BAD_OPTION)
325 panic("pdc_spaceid_bits call failed.\n");
326 if (space_bits != 0)
327 panic("SpaceID hashing is still on!\n");
328}
329
330static inline void
331__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
332 unsigned long physaddr)
333{
334 if (!static_branch_likely(&parisc_has_cache))
335 return;
336
337 /*
338 * The TLB is the engine of coherence on parisc. The CPU is
339 * entitled to speculate any page with a TLB mapping, so here
340 * we kill the mapping then flush the page along a special flush
341 * only alias mapping. This guarantees that the page is no-longer
342 * in the cache for any process and nor may it be speculatively
343 * read in (until the user or kernel specifically accesses it,
344 * of course).
345 */
346 flush_tlb_page(vma, vmaddr);
347
348 preempt_disable();
349 flush_dcache_page_asm(physaddr, vmaddr);
350 if (vma->vm_flags & VM_EXEC)
351 flush_icache_page_asm(physaddr, vmaddr);
352 preempt_enable();
353}
354
355static void flush_kernel_dcache_page_addr(const void *addr)
356{
357 unsigned long vaddr = (unsigned long)addr;
358 unsigned long flags;
359
360 /* Purge TLB entry to remove translation on all CPUs */
361 purge_tlb_start(flags);
362 pdtlb(SR_KERNEL, addr);
363 purge_tlb_end(flags);
364
365 /* Use tmpalias flush to prevent data cache move-in */
366 preempt_disable();
367 flush_dcache_page_asm(__pa(vaddr), vaddr);
368 preempt_enable();
369}
370
371static void flush_kernel_icache_page_addr(const void *addr)
372{
373 unsigned long vaddr = (unsigned long)addr;
374 unsigned long flags;
375
376 /* Purge TLB entry to remove translation on all CPUs */
377 purge_tlb_start(flags);
378 pdtlb(SR_KERNEL, addr);
379 purge_tlb_end(flags);
380
381 /* Use tmpalias flush to prevent instruction cache move-in */
382 preempt_disable();
383 flush_icache_page_asm(__pa(vaddr), vaddr);
384 preempt_enable();
385}
386
387void kunmap_flush_on_unmap(const void *addr)
388{
389 flush_kernel_dcache_page_addr(addr);
390}
391EXPORT_SYMBOL(kunmap_flush_on_unmap);
392
393void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
394 unsigned int nr)
395{
396 void *kaddr = page_address(page);
397
398 for (;;) {
399 flush_kernel_dcache_page_addr(kaddr);
400 flush_kernel_icache_page_addr(kaddr);
401 if (--nr == 0)
402 break;
403 kaddr += PAGE_SIZE;
404 }
405}
406
407/*
408 * Walk page directory for MM to find PTEP pointer for address ADDR.
409 */
410static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
411{
412 pte_t *ptep = NULL;
413 pgd_t *pgd = mm->pgd;
414 p4d_t *p4d;
415 pud_t *pud;
416 pmd_t *pmd;
417
418 if (!pgd_none(*pgd)) {
419 p4d = p4d_offset(pgd, addr);
420 if (!p4d_none(*p4d)) {
421 pud = pud_offset(p4d, addr);
422 if (!pud_none(*pud)) {
423 pmd = pmd_offset(pud, addr);
424 if (!pmd_none(*pmd))
425 ptep = pte_offset_map(pmd, addr);
426 }
427 }
428 }
429 return ptep;
430}
431
432static inline bool pte_needs_flush(pte_t pte)
433{
434 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
435 == (_PAGE_PRESENT | _PAGE_ACCESSED);
436}
437
438/*
439 * Return user physical address. Returns 0 if page is not present.
440 */
441static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
442{
443 unsigned long flags, space, pgd, prot, pa;
444#ifdef CONFIG_TLB_PTLOCK
445 unsigned long pgd_lock;
446#endif
447
448 /* Save context */
449 local_irq_save(flags);
450 prot = mfctl(8);
451 space = mfsp(SR_USER);
452 pgd = mfctl(25);
453#ifdef CONFIG_TLB_PTLOCK
454 pgd_lock = mfctl(28);
455#endif
456
457 /* Set context for lpa_user */
458 switch_mm_irqs_off(NULL, mm, NULL);
459 pa = lpa_user(addr);
460
461 /* Restore previous context */
462#ifdef CONFIG_TLB_PTLOCK
463 mtctl(pgd_lock, 28);
464#endif
465 mtctl(pgd, 25);
466 mtsp(space, SR_USER);
467 mtctl(prot, 8);
468 local_irq_restore(flags);
469
470 return pa;
471}
472
473void flush_dcache_folio(struct folio *folio)
474{
475 struct address_space *mapping = folio_flush_mapping(folio);
476 struct vm_area_struct *vma;
477 unsigned long addr, old_addr = 0;
478 void *kaddr;
479 unsigned long count = 0;
480 unsigned long i, nr, flags;
481 pgoff_t pgoff;
482
483 if (mapping && !mapping_mapped(mapping)) {
484 set_bit(PG_dcache_dirty, &folio->flags);
485 return;
486 }
487
488 nr = folio_nr_pages(folio);
489 kaddr = folio_address(folio);
490 for (i = 0; i < nr; i++)
491 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
492
493 if (!mapping)
494 return;
495
496 pgoff = folio->index;
497
498 /*
499 * We have carefully arranged in arch_get_unmapped_area() that
500 * *any* mappings of a file are always congruently mapped (whether
501 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
502 * to flush one address here for them all to become coherent
503 * on machines that support equivalent aliasing
504 */
505 flush_dcache_mmap_lock_irqsave(mapping, flags);
506 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
507 unsigned long offset = pgoff - vma->vm_pgoff;
508 unsigned long pfn = folio_pfn(folio);
509
510 addr = vma->vm_start;
511 nr = folio_nr_pages(folio);
512 if (offset > -nr) {
513 pfn -= offset;
514 nr += offset;
515 } else {
516 addr += offset * PAGE_SIZE;
517 }
518 if (addr + nr * PAGE_SIZE > vma->vm_end)
519 nr = (vma->vm_end - addr) / PAGE_SIZE;
520
521 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
522 != (addr & (SHM_COLOUR - 1))) {
523 for (i = 0; i < nr; i++)
524 __flush_cache_page(vma,
525 addr + i * PAGE_SIZE,
526 (pfn + i) * PAGE_SIZE);
527 /*
528 * Software is allowed to have any number
529 * of private mappings to a page.
530 */
531 if (!(vma->vm_flags & VM_SHARED))
532 continue;
533 if (old_addr)
534 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
535 old_addr, addr, vma->vm_file);
536 if (nr == folio_nr_pages(folio))
537 old_addr = addr;
538 }
539 WARN_ON(++count == 4096);
540 }
541 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
542}
543EXPORT_SYMBOL(flush_dcache_folio);
544
545/* Defined in arch/parisc/kernel/pacache.S */
546EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
547EXPORT_SYMBOL(flush_kernel_icache_range_asm);
548
549#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
550static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
551
552#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
553static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
554
555void __init parisc_setup_cache_timing(void)
556{
557 unsigned long rangetime, alltime;
558 unsigned long size;
559 unsigned long threshold, threshold2;
560
561 alltime = mfctl(16);
562 flush_data_cache();
563 alltime = mfctl(16) - alltime;
564
565 size = (unsigned long)(_end - _text);
566 rangetime = mfctl(16);
567 flush_kernel_dcache_range((unsigned long)_text, size);
568 rangetime = mfctl(16) - rangetime;
569
570 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
571 alltime, size, rangetime);
572
573 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
574 pr_info("Calculated flush threshold is %lu KiB\n",
575 threshold/1024);
576
577 /*
578 * The threshold computed above isn't very reliable. The following
579 * heuristic works reasonably well on c8000/rp3440.
580 */
581 threshold2 = cache_info.dc_size * num_online_cpus();
582 parisc_cache_flush_threshold = threshold2;
583 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
584 parisc_cache_flush_threshold/1024);
585
586 /* calculate TLB flush threshold */
587
588 /* On SMP machines, skip the TLB measure of kernel text which
589 * has been mapped as huge pages. */
590 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
591 threshold = max(cache_info.it_size, cache_info.dt_size);
592 threshold *= PAGE_SIZE;
593 threshold /= num_online_cpus();
594 goto set_tlb_threshold;
595 }
596
597 size = (unsigned long)_end - (unsigned long)_text;
598 rangetime = mfctl(16);
599 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
600 rangetime = mfctl(16) - rangetime;
601
602 alltime = mfctl(16);
603 flush_tlb_all();
604 alltime = mfctl(16) - alltime;
605
606 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
607 alltime, size, rangetime);
608
609 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
610 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
611 threshold/1024);
612
613set_tlb_threshold:
614 parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD);
615 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
616 parisc_tlb_flush_threshold/1024);
617}
618
619extern void purge_kernel_dcache_page_asm(unsigned long);
620extern void clear_user_page_asm(void *, unsigned long);
621extern void copy_user_page_asm(void *, void *, unsigned long);
622
623static void flush_cache_page_if_present(struct vm_area_struct *vma,
624 unsigned long vmaddr)
625{
626#if CONFIG_FLUSH_PAGE_ACCESSED
627 bool needs_flush = false;
628 pte_t *ptep, pte;
629
630 ptep = get_ptep(vma->vm_mm, vmaddr);
631 if (ptep) {
632 pte = ptep_get(ptep);
633 needs_flush = pte_needs_flush(pte);
634 pte_unmap(ptep);
635 }
636 if (needs_flush)
637 __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
638#else
639 struct mm_struct *mm = vma->vm_mm;
640 unsigned long physaddr = get_upa(mm, vmaddr);
641
642 if (physaddr)
643 __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
644#endif
645}
646
647void copy_user_highpage(struct page *to, struct page *from,
648 unsigned long vaddr, struct vm_area_struct *vma)
649{
650 void *kto, *kfrom;
651
652 kfrom = kmap_local_page(from);
653 kto = kmap_local_page(to);
654 __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
655 copy_page_asm(kto, kfrom);
656 kunmap_local(kto);
657 kunmap_local(kfrom);
658}
659
660void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
661 unsigned long user_vaddr, void *dst, void *src, int len)
662{
663 __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
664 memcpy(dst, src, len);
665 flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
666}
667
668void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
669 unsigned long user_vaddr, void *dst, void *src, int len)
670{
671 __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
672 memcpy(dst, src, len);
673 flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
674}
675
676/* __flush_tlb_range()
677 *
678 * returns 1 if all TLBs were flushed.
679 */
680int __flush_tlb_range(unsigned long sid, unsigned long start,
681 unsigned long end)
682{
683 unsigned long flags;
684
685 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
686 end - start >= parisc_tlb_flush_threshold) {
687 flush_tlb_all();
688 return 1;
689 }
690
691 /* Purge TLB entries for small ranges using the pdtlb and
692 pitlb instructions. These instructions execute locally
693 but cause a purge request to be broadcast to other TLBs. */
694 while (start < end) {
695 purge_tlb_start(flags);
696 mtsp(sid, SR_TEMP1);
697 pdtlb(SR_TEMP1, start);
698 pitlb(SR_TEMP1, start);
699 purge_tlb_end(flags);
700 start += PAGE_SIZE;
701 }
702 return 0;
703}
704
705static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
706{
707 unsigned long addr;
708
709 for (addr = start; addr < end; addr += PAGE_SIZE)
710 flush_cache_page_if_present(vma, addr);
711}
712
713static inline unsigned long mm_total_size(struct mm_struct *mm)
714{
715 struct vm_area_struct *vma;
716 unsigned long usize = 0;
717 VMA_ITERATOR(vmi, mm, 0);
718
719 for_each_vma(vmi, vma) {
720 if (usize >= parisc_cache_flush_threshold)
721 break;
722 usize += vma->vm_end - vma->vm_start;
723 }
724 return usize;
725}
726
727void flush_cache_mm(struct mm_struct *mm)
728{
729 struct vm_area_struct *vma;
730 VMA_ITERATOR(vmi, mm, 0);
731
732 /*
733 * Flushing the whole cache on each cpu takes forever on
734 * rp3440, etc. So, avoid it if the mm isn't too big.
735 *
736 * Note that we must flush the entire cache on machines
737 * with aliasing caches to prevent random segmentation
738 * faults.
739 */
740 if (!parisc_requires_coherency()
741 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
742 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
743 return;
744 flush_tlb_all();
745 flush_cache_all();
746 return;
747 }
748
749 /* Flush mm */
750 for_each_vma(vmi, vma)
751 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
752}
753
754void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
755{
756 if (!parisc_requires_coherency()
757 || end - start >= parisc_cache_flush_threshold) {
758 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
759 return;
760 flush_tlb_range(vma, start, end);
761 if (vma->vm_flags & VM_EXEC)
762 flush_cache_all();
763 else
764 flush_data_cache();
765 return;
766 }
767
768 flush_cache_pages(vma, start & PAGE_MASK, end);
769}
770
771void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
772{
773 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
774}
775
776void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
777{
778 if (!PageAnon(page))
779 return;
780
781 __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
782}
783
784int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
785 pte_t *ptep)
786{
787 pte_t pte = ptep_get(ptep);
788
789 if (!pte_young(pte))
790 return 0;
791 set_pte(ptep, pte_mkold(pte));
792#if CONFIG_FLUSH_PAGE_ACCESSED
793 __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
794#endif
795 return 1;
796}
797
798/*
799 * After a PTE is cleared, we have no way to flush the cache for
800 * the physical page. On PA8800 and PA8900 processors, these lines
801 * can cause random cache corruption. Thus, we must flush the cache
802 * as well as the TLB when clearing a PTE that's valid.
803 */
804pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
805 pte_t *ptep)
806{
807 struct mm_struct *mm = (vma)->vm_mm;
808 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
809 unsigned long pfn = pte_pfn(pte);
810
811 if (pfn_valid(pfn))
812 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
813 else if (pte_accessible(mm, pte))
814 flush_tlb_page(vma, addr);
815
816 return pte;
817}
818
819/*
820 * The physical address for pages in the ioremap case can be obtained
821 * from the vm_struct struct. I wasn't able to successfully handle the
822 * vmalloc and vmap cases. We have an array of struct page pointers in
823 * the uninitialized vmalloc case but the flush failed using page_to_pfn.
824 */
825void flush_cache_vmap(unsigned long start, unsigned long end)
826{
827 unsigned long addr, physaddr;
828 struct vm_struct *vm;
829
830 /* Prevent cache move-in */
831 flush_tlb_kernel_range(start, end);
832
833 if (end - start >= parisc_cache_flush_threshold) {
834 flush_cache_all();
835 return;
836 }
837
838 if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
839 flush_cache_all();
840 return;
841 }
842
843 vm = find_vm_area((void *)start);
844 if (WARN_ON_ONCE(!vm)) {
845 flush_cache_all();
846 return;
847 }
848
849 /* The physical addresses of IOREMAP regions are contiguous */
850 if (vm->flags & VM_IOREMAP) {
851 physaddr = vm->phys_addr;
852 for (addr = start; addr < end; addr += PAGE_SIZE) {
853 preempt_disable();
854 flush_dcache_page_asm(physaddr, start);
855 flush_icache_page_asm(physaddr, start);
856 preempt_enable();
857 physaddr += PAGE_SIZE;
858 }
859 return;
860 }
861
862 flush_cache_all();
863}
864EXPORT_SYMBOL(flush_cache_vmap);
865
866/*
867 * The vm_struct has been retired and the page table is set up. The
868 * last page in the range is a guard page. Its physical address can't
869 * be determined using lpa, so there is no way to flush the range
870 * using flush_dcache_page_asm.
871 */
872void flush_cache_vunmap(unsigned long start, unsigned long end)
873{
874 /* Prevent cache move-in */
875 flush_tlb_kernel_range(start, end);
876 flush_data_cache();
877}
878EXPORT_SYMBOL(flush_cache_vunmap);
879
880/*
881 * On systems with PA8800/PA8900 processors, there is no way to flush
882 * a vmap range other than using the architected loop to flush the
883 * entire cache. The page directory is not set up, so we can't use
884 * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
885 * L2 is physically indexed but FDCE/FICE instructions in virtual
886 * mode output their virtual address on the core bus, not their
887 * real address. As a result, the L2 cache index formed from the
888 * virtual address will most likely not be the same as the L2 index
889 * formed from the real address.
890 */
891void flush_kernel_vmap_range(void *vaddr, int size)
892{
893 unsigned long start = (unsigned long)vaddr;
894 unsigned long end = start + size;
895
896 flush_tlb_kernel_range(start, end);
897
898 if (!static_branch_likely(&parisc_has_dcache))
899 return;
900
901 /* If interrupts are disabled, we can only do local flush */
902 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
903 flush_data_cache_local(NULL);
904 return;
905 }
906
907 flush_data_cache();
908}
909EXPORT_SYMBOL(flush_kernel_vmap_range);
910
911void invalidate_kernel_vmap_range(void *vaddr, int size)
912{
913 unsigned long start = (unsigned long)vaddr;
914 unsigned long end = start + size;
915
916 /* Ensure DMA is complete */
917 asm_syncdma();
918
919 flush_tlb_kernel_range(start, end);
920
921 if (!static_branch_likely(&parisc_has_dcache))
922 return;
923
924 /* If interrupts are disabled, we can only do local flush */
925 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
926 flush_data_cache_local(NULL);
927 return;
928 }
929
930 flush_data_cache();
931}
932EXPORT_SYMBOL(invalidate_kernel_vmap_range);
933
934
935SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
936 unsigned int, cache)
937{
938 unsigned long start, end;
939 ASM_EXCEPTIONTABLE_VAR(error);
940
941 if (bytes == 0)
942 return 0;
943 if (!access_ok((void __user *) addr, bytes))
944 return -EFAULT;
945
946 end = addr + bytes;
947
948 if (cache & DCACHE) {
949 start = addr;
950 __asm__ __volatile__ (
951#ifdef CONFIG_64BIT
952 "1: cmpb,*<<,n %0,%2,1b\n"
953#else
954 "1: cmpb,<<,n %0,%2,1b\n"
955#endif
956 " fic,m %3(%4,%0)\n"
957 "2: sync\n"
958 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
959 : "+r" (start), "+r" (error)
960 : "r" (end), "r" (dcache_stride), "i" (SR_USER));
961 }
962
963 if (cache & ICACHE && error == 0) {
964 start = addr;
965 __asm__ __volatile__ (
966#ifdef CONFIG_64BIT
967 "1: cmpb,*<<,n %0,%2,1b\n"
968#else
969 "1: cmpb,<<,n %0,%2,1b\n"
970#endif
971 " fdc,m %3(%4,%0)\n"
972 "2: sync\n"
973 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
974 : "+r" (start), "+r" (error)
975 : "r" (end), "r" (icache_stride), "i" (SR_USER));
976 }
977
978 return error;
979}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <linux/syscalls.h>
23#include <asm/pdc.h>
24#include <asm/cache.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/page.h>
28#include <asm/processor.h>
29#include <asm/sections.h>
30#include <asm/shmparam.h>
31#include <asm/mmu_context.h>
32#include <asm/cachectl.h>
33
34int split_tlb __ro_after_init;
35int dcache_stride __ro_after_init;
36int icache_stride __ro_after_init;
37EXPORT_SYMBOL(dcache_stride);
38
39void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40EXPORT_SYMBOL(flush_dcache_page_asm);
41void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
43
44/* Internal implementation in arch/parisc/kernel/pacache.S */
45void flush_data_cache_local(void *); /* flushes local data-cache only */
46void flush_instruction_cache_local(void); /* flushes local code-cache only */
47
48/* On some machines (i.e., ones with the Merced bus), there can be
49 * only a single PxTLB broadcast at a time; this must be guaranteed
50 * by software. We need a spinlock around all TLB flushes to ensure
51 * this.
52 */
53DEFINE_SPINLOCK(pa_tlb_flush_lock);
54
55#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56int pa_serialize_tlb_flushes __ro_after_init;
57#endif
58
59struct pdc_cache_info cache_info __ro_after_init;
60#ifndef CONFIG_PA20
61struct pdc_btlb_info btlb_info;
62#endif
63
64DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
67
68static void cache_flush_local_cpu(void *dummy)
69{
70 if (static_branch_likely(&parisc_has_icache))
71 flush_instruction_cache_local();
72 if (static_branch_likely(&parisc_has_dcache))
73 flush_data_cache_local(NULL);
74}
75
76void flush_cache_all_local(void)
77{
78 cache_flush_local_cpu(NULL);
79}
80
81void flush_cache_all(void)
82{
83 if (static_branch_likely(&parisc_has_cache))
84 on_each_cpu(cache_flush_local_cpu, NULL, 1);
85}
86
87static inline void flush_data_cache(void)
88{
89 if (static_branch_likely(&parisc_has_dcache))
90 on_each_cpu(flush_data_cache_local, NULL, 1);
91}
92
93
94/* Kernel virtual address of pfn. */
95#define pfn_va(pfn) __va(PFN_PHYS(pfn))
96
97void __update_cache(pte_t pte)
98{
99 unsigned long pfn = pte_pfn(pte);
100 struct folio *folio;
101 unsigned int nr;
102
103 /* We don't have pte special. As a result, we can be called with
104 an invalid pfn and we don't need to flush the kernel dcache page.
105 This occurs with FireGL card in C8000. */
106 if (!pfn_valid(pfn))
107 return;
108
109 folio = page_folio(pfn_to_page(pfn));
110 pfn = folio_pfn(folio);
111 nr = folio_nr_pages(folio);
112 if (folio_flush_mapping(folio) &&
113 test_bit(PG_dcache_dirty, &folio->flags)) {
114 while (nr--)
115 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116 clear_bit(PG_dcache_dirty, &folio->flags);
117 } else if (parisc_requires_coherency())
118 while (nr--)
119 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
120}
121
122void
123show_cache_info(struct seq_file *m)
124{
125 char buf[32];
126
127 seq_printf(m, "I-cache\t\t: %ld KB\n",
128 cache_info.ic_size/1024 );
129 if (cache_info.dc_loop != 1)
130 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
131 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
132 cache_info.dc_size/1024,
133 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
134 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
135 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
136 cache_info.dc_conf.cc_alias
137 );
138 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
139 cache_info.it_size,
140 cache_info.dt_size,
141 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
142 );
143
144#ifndef CONFIG_PA20
145 /* BTLB - Block TLB */
146 if (btlb_info.max_size==0) {
147 seq_printf(m, "BTLB\t\t: not supported\n" );
148 } else {
149 seq_printf(m,
150 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
151 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
152 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
153 btlb_info.max_size, (int)4096,
154 btlb_info.max_size>>8,
155 btlb_info.fixed_range_info.num_i,
156 btlb_info.fixed_range_info.num_d,
157 btlb_info.fixed_range_info.num_comb,
158 btlb_info.variable_range_info.num_i,
159 btlb_info.variable_range_info.num_d,
160 btlb_info.variable_range_info.num_comb
161 );
162 }
163#endif
164}
165
166void __init
167parisc_cache_init(void)
168{
169 if (pdc_cache_info(&cache_info) < 0)
170 panic("parisc_cache_init: pdc_cache_info failed");
171
172#if 0
173 printk("ic_size %lx dc_size %lx it_size %lx\n",
174 cache_info.ic_size,
175 cache_info.dc_size,
176 cache_info.it_size);
177
178 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179 cache_info.dc_base,
180 cache_info.dc_stride,
181 cache_info.dc_count,
182 cache_info.dc_loop);
183
184 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
185 *(unsigned long *) (&cache_info.dc_conf),
186 cache_info.dc_conf.cc_alias,
187 cache_info.dc_conf.cc_block,
188 cache_info.dc_conf.cc_line,
189 cache_info.dc_conf.cc_shift);
190 printk(" wt %d sh %d cst %d hv %d\n",
191 cache_info.dc_conf.cc_wt,
192 cache_info.dc_conf.cc_sh,
193 cache_info.dc_conf.cc_cst,
194 cache_info.dc_conf.cc_hv);
195
196 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
197 cache_info.ic_base,
198 cache_info.ic_stride,
199 cache_info.ic_count,
200 cache_info.ic_loop);
201
202 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
203 cache_info.it_sp_base,
204 cache_info.it_sp_stride,
205 cache_info.it_sp_count,
206 cache_info.it_loop,
207 cache_info.it_off_base,
208 cache_info.it_off_stride,
209 cache_info.it_off_count);
210
211 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
212 cache_info.dt_sp_base,
213 cache_info.dt_sp_stride,
214 cache_info.dt_sp_count,
215 cache_info.dt_loop,
216 cache_info.dt_off_base,
217 cache_info.dt_off_stride,
218 cache_info.dt_off_count);
219
220 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
221 *(unsigned long *) (&cache_info.ic_conf),
222 cache_info.ic_conf.cc_alias,
223 cache_info.ic_conf.cc_block,
224 cache_info.ic_conf.cc_line,
225 cache_info.ic_conf.cc_shift);
226 printk(" wt %d sh %d cst %d hv %d\n",
227 cache_info.ic_conf.cc_wt,
228 cache_info.ic_conf.cc_sh,
229 cache_info.ic_conf.cc_cst,
230 cache_info.ic_conf.cc_hv);
231
232 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
233 cache_info.dt_conf.tc_sh,
234 cache_info.dt_conf.tc_page,
235 cache_info.dt_conf.tc_cst,
236 cache_info.dt_conf.tc_aid,
237 cache_info.dt_conf.tc_sr);
238
239 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
240 cache_info.it_conf.tc_sh,
241 cache_info.it_conf.tc_page,
242 cache_info.it_conf.tc_cst,
243 cache_info.it_conf.tc_aid,
244 cache_info.it_conf.tc_sr);
245#endif
246
247 split_tlb = 0;
248 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
249 if (cache_info.dt_conf.tc_sh == 2)
250 printk(KERN_WARNING "Unexpected TLB configuration. "
251 "Will flush I/D separately (could be optimized).\n");
252
253 split_tlb = 1;
254 }
255
256 /* "New and Improved" version from Jim Hull
257 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
258 * The following CAFL_STRIDE is an optimized version, see
259 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
260 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
261 */
262#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
263 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
265#undef CAFL_STRIDE
266
267 /* stride needs to be non-zero, otherwise cache flushes will not work */
268 WARN_ON(cache_info.dc_size && dcache_stride == 0);
269 WARN_ON(cache_info.ic_size && icache_stride == 0);
270
271 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
272 PDC_MODEL_NVA_UNSUPPORTED) {
273 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
274#if 0
275 panic("SMP kernel required to avoid non-equivalent aliasing");
276#endif
277 }
278}
279
280void disable_sr_hashing(void)
281{
282 int srhash_type, retval;
283 unsigned long space_bits;
284
285 switch (boot_cpu_data.cpu_type) {
286 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
287 BUG();
288 return;
289
290 case pcxs:
291 case pcxt:
292 case pcxt_:
293 srhash_type = SRHASH_PCXST;
294 break;
295
296 case pcxl:
297 srhash_type = SRHASH_PCXL;
298 break;
299
300 case pcxl2: /* pcxl2 doesn't support space register hashing */
301 return;
302
303 default: /* Currently all PA2.0 machines use the same ins. sequence */
304 srhash_type = SRHASH_PA20;
305 break;
306 }
307
308 disable_sr_hashing_asm(srhash_type);
309
310 retval = pdc_spaceid_bits(&space_bits);
311 /* If this procedure isn't implemented, don't panic. */
312 if (retval < 0 && retval != PDC_BAD_OPTION)
313 panic("pdc_spaceid_bits call failed.\n");
314 if (space_bits != 0)
315 panic("SpaceID hashing is still on!\n");
316}
317
318static inline void
319__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
320 unsigned long physaddr)
321{
322 if (!static_branch_likely(&parisc_has_cache))
323 return;
324 preempt_disable();
325 flush_dcache_page_asm(physaddr, vmaddr);
326 if (vma->vm_flags & VM_EXEC)
327 flush_icache_page_asm(physaddr, vmaddr);
328 preempt_enable();
329}
330
331static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
332{
333 unsigned long flags, space, pgd, prot;
334#ifdef CONFIG_TLB_PTLOCK
335 unsigned long pgd_lock;
336#endif
337
338 vmaddr &= PAGE_MASK;
339
340 preempt_disable();
341
342 /* Set context for flush */
343 local_irq_save(flags);
344 prot = mfctl(8);
345 space = mfsp(SR_USER);
346 pgd = mfctl(25);
347#ifdef CONFIG_TLB_PTLOCK
348 pgd_lock = mfctl(28);
349#endif
350 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
351 local_irq_restore(flags);
352
353 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
354 if (vma->vm_flags & VM_EXEC)
355 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
356 flush_tlb_page(vma, vmaddr);
357
358 /* Restore previous context */
359 local_irq_save(flags);
360#ifdef CONFIG_TLB_PTLOCK
361 mtctl(pgd_lock, 28);
362#endif
363 mtctl(pgd, 25);
364 mtsp(space, SR_USER);
365 mtctl(prot, 8);
366 local_irq_restore(flags);
367
368 preempt_enable();
369}
370
371void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
372 unsigned int nr)
373{
374 void *kaddr = page_address(page);
375
376 for (;;) {
377 flush_kernel_dcache_page_addr(kaddr);
378 flush_kernel_icache_page(kaddr);
379 if (--nr == 0)
380 break;
381 kaddr += PAGE_SIZE;
382 }
383}
384
385static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
386{
387 pte_t *ptep = NULL;
388 pgd_t *pgd = mm->pgd;
389 p4d_t *p4d;
390 pud_t *pud;
391 pmd_t *pmd;
392
393 if (!pgd_none(*pgd)) {
394 p4d = p4d_offset(pgd, addr);
395 if (!p4d_none(*p4d)) {
396 pud = pud_offset(p4d, addr);
397 if (!pud_none(*pud)) {
398 pmd = pmd_offset(pud, addr);
399 if (!pmd_none(*pmd))
400 ptep = pte_offset_map(pmd, addr);
401 }
402 }
403 }
404 return ptep;
405}
406
407static inline bool pte_needs_flush(pte_t pte)
408{
409 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
410 == (_PAGE_PRESENT | _PAGE_ACCESSED);
411}
412
413void flush_dcache_folio(struct folio *folio)
414{
415 struct address_space *mapping = folio_flush_mapping(folio);
416 struct vm_area_struct *vma;
417 unsigned long addr, old_addr = 0;
418 void *kaddr;
419 unsigned long count = 0;
420 unsigned long i, nr, flags;
421 pgoff_t pgoff;
422
423 if (mapping && !mapping_mapped(mapping)) {
424 set_bit(PG_dcache_dirty, &folio->flags);
425 return;
426 }
427
428 nr = folio_nr_pages(folio);
429 kaddr = folio_address(folio);
430 for (i = 0; i < nr; i++)
431 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
432
433 if (!mapping)
434 return;
435
436 pgoff = folio->index;
437
438 /*
439 * We have carefully arranged in arch_get_unmapped_area() that
440 * *any* mappings of a file are always congruently mapped (whether
441 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
442 * to flush one address here for them all to become coherent
443 * on machines that support equivalent aliasing
444 */
445 flush_dcache_mmap_lock_irqsave(mapping, flags);
446 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
447 unsigned long offset = pgoff - vma->vm_pgoff;
448 unsigned long pfn = folio_pfn(folio);
449
450 addr = vma->vm_start;
451 nr = folio_nr_pages(folio);
452 if (offset > -nr) {
453 pfn -= offset;
454 nr += offset;
455 } else {
456 addr += offset * PAGE_SIZE;
457 }
458 if (addr + nr * PAGE_SIZE > vma->vm_end)
459 nr = (vma->vm_end - addr) / PAGE_SIZE;
460
461 if (parisc_requires_coherency()) {
462 for (i = 0; i < nr; i++) {
463 pte_t *ptep = get_ptep(vma->vm_mm,
464 addr + i * PAGE_SIZE);
465 if (!ptep)
466 continue;
467 if (pte_needs_flush(*ptep))
468 flush_user_cache_page(vma,
469 addr + i * PAGE_SIZE);
470 /* Optimise accesses to the same table? */
471 pte_unmap(ptep);
472 }
473 } else {
474 /*
475 * The TLB is the engine of coherence on parisc:
476 * The CPU is entitled to speculate any page
477 * with a TLB mapping, so here we kill the
478 * mapping then flush the page along a special
479 * flush only alias mapping. This guarantees that
480 * the page is no-longer in the cache for any
481 * process and nor may it be speculatively read
482 * in (until the user or kernel specifically
483 * accesses it, of course)
484 */
485 for (i = 0; i < nr; i++)
486 flush_tlb_page(vma, addr + i * PAGE_SIZE);
487 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
488 != (addr & (SHM_COLOUR - 1))) {
489 for (i = 0; i < nr; i++)
490 __flush_cache_page(vma,
491 addr + i * PAGE_SIZE,
492 (pfn + i) * PAGE_SIZE);
493 /*
494 * Software is allowed to have any number
495 * of private mappings to a page.
496 */
497 if (!(vma->vm_flags & VM_SHARED))
498 continue;
499 if (old_addr)
500 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
501 old_addr, addr, vma->vm_file);
502 if (nr == folio_nr_pages(folio))
503 old_addr = addr;
504 }
505 }
506 WARN_ON(++count == 4096);
507 }
508 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
509}
510EXPORT_SYMBOL(flush_dcache_folio);
511
512/* Defined in arch/parisc/kernel/pacache.S */
513EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
514EXPORT_SYMBOL(flush_kernel_icache_range_asm);
515
516#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
517static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
518
519#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
520static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
521
522void __init parisc_setup_cache_timing(void)
523{
524 unsigned long rangetime, alltime;
525 unsigned long size;
526 unsigned long threshold, threshold2;
527
528 alltime = mfctl(16);
529 flush_data_cache();
530 alltime = mfctl(16) - alltime;
531
532 size = (unsigned long)(_end - _text);
533 rangetime = mfctl(16);
534 flush_kernel_dcache_range((unsigned long)_text, size);
535 rangetime = mfctl(16) - rangetime;
536
537 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
538 alltime, size, rangetime);
539
540 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
541 pr_info("Calculated flush threshold is %lu KiB\n",
542 threshold/1024);
543
544 /*
545 * The threshold computed above isn't very reliable. The following
546 * heuristic works reasonably well on c8000/rp3440.
547 */
548 threshold2 = cache_info.dc_size * num_online_cpus();
549 parisc_cache_flush_threshold = threshold2;
550 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
551 parisc_cache_flush_threshold/1024);
552
553 /* calculate TLB flush threshold */
554
555 /* On SMP machines, skip the TLB measure of kernel text which
556 * has been mapped as huge pages. */
557 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
558 threshold = max(cache_info.it_size, cache_info.dt_size);
559 threshold *= PAGE_SIZE;
560 threshold /= num_online_cpus();
561 goto set_tlb_threshold;
562 }
563
564 size = (unsigned long)_end - (unsigned long)_text;
565 rangetime = mfctl(16);
566 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
567 rangetime = mfctl(16) - rangetime;
568
569 alltime = mfctl(16);
570 flush_tlb_all();
571 alltime = mfctl(16) - alltime;
572
573 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
574 alltime, size, rangetime);
575
576 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
577 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
578 threshold/1024);
579
580set_tlb_threshold:
581 if (threshold > FLUSH_TLB_THRESHOLD)
582 parisc_tlb_flush_threshold = threshold;
583 else
584 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
585
586 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
587 parisc_tlb_flush_threshold/1024);
588}
589
590extern void purge_kernel_dcache_page_asm(unsigned long);
591extern void clear_user_page_asm(void *, unsigned long);
592extern void copy_user_page_asm(void *, void *, unsigned long);
593
594void flush_kernel_dcache_page_addr(const void *addr)
595{
596 unsigned long flags;
597
598 flush_kernel_dcache_page_asm(addr);
599 purge_tlb_start(flags);
600 pdtlb(SR_KERNEL, addr);
601 purge_tlb_end(flags);
602}
603EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
604
605static void flush_cache_page_if_present(struct vm_area_struct *vma,
606 unsigned long vmaddr, unsigned long pfn)
607{
608 bool needs_flush = false;
609 pte_t *ptep;
610
611 /*
612 * The pte check is racy and sometimes the flush will trigger
613 * a non-access TLB miss. Hopefully, the page has already been
614 * flushed.
615 */
616 ptep = get_ptep(vma->vm_mm, vmaddr);
617 if (ptep) {
618 needs_flush = pte_needs_flush(*ptep);
619 pte_unmap(ptep);
620 }
621 if (needs_flush)
622 flush_cache_page(vma, vmaddr, pfn);
623}
624
625void copy_user_highpage(struct page *to, struct page *from,
626 unsigned long vaddr, struct vm_area_struct *vma)
627{
628 void *kto, *kfrom;
629
630 kfrom = kmap_local_page(from);
631 kto = kmap_local_page(to);
632 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
633 copy_page_asm(kto, kfrom);
634 kunmap_local(kto);
635 kunmap_local(kfrom);
636}
637
638void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
639 unsigned long user_vaddr, void *dst, void *src, int len)
640{
641 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
642 memcpy(dst, src, len);
643 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
644}
645
646void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
647 unsigned long user_vaddr, void *dst, void *src, int len)
648{
649 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
650 memcpy(dst, src, len);
651}
652
653/* __flush_tlb_range()
654 *
655 * returns 1 if all TLBs were flushed.
656 */
657int __flush_tlb_range(unsigned long sid, unsigned long start,
658 unsigned long end)
659{
660 unsigned long flags;
661
662 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
663 end - start >= parisc_tlb_flush_threshold) {
664 flush_tlb_all();
665 return 1;
666 }
667
668 /* Purge TLB entries for small ranges using the pdtlb and
669 pitlb instructions. These instructions execute locally
670 but cause a purge request to be broadcast to other TLBs. */
671 while (start < end) {
672 purge_tlb_start(flags);
673 mtsp(sid, SR_TEMP1);
674 pdtlb(SR_TEMP1, start);
675 pitlb(SR_TEMP1, start);
676 purge_tlb_end(flags);
677 start += PAGE_SIZE;
678 }
679 return 0;
680}
681
682static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
683{
684 unsigned long addr, pfn;
685 pte_t *ptep;
686
687 for (addr = start; addr < end; addr += PAGE_SIZE) {
688 bool needs_flush = false;
689 /*
690 * The vma can contain pages that aren't present. Although
691 * the pte search is expensive, we need the pte to find the
692 * page pfn and to check whether the page should be flushed.
693 */
694 ptep = get_ptep(vma->vm_mm, addr);
695 if (ptep) {
696 needs_flush = pte_needs_flush(*ptep);
697 pfn = pte_pfn(*ptep);
698 pte_unmap(ptep);
699 }
700 if (needs_flush) {
701 if (parisc_requires_coherency()) {
702 flush_user_cache_page(vma, addr);
703 } else {
704 if (WARN_ON(!pfn_valid(pfn)))
705 return;
706 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
707 }
708 }
709 }
710}
711
712static inline unsigned long mm_total_size(struct mm_struct *mm)
713{
714 struct vm_area_struct *vma;
715 unsigned long usize = 0;
716 VMA_ITERATOR(vmi, mm, 0);
717
718 for_each_vma(vmi, vma) {
719 if (usize >= parisc_cache_flush_threshold)
720 break;
721 usize += vma->vm_end - vma->vm_start;
722 }
723 return usize;
724}
725
726void flush_cache_mm(struct mm_struct *mm)
727{
728 struct vm_area_struct *vma;
729 VMA_ITERATOR(vmi, mm, 0);
730
731 /*
732 * Flushing the whole cache on each cpu takes forever on
733 * rp3440, etc. So, avoid it if the mm isn't too big.
734 *
735 * Note that we must flush the entire cache on machines
736 * with aliasing caches to prevent random segmentation
737 * faults.
738 */
739 if (!parisc_requires_coherency()
740 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
741 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
742 return;
743 flush_tlb_all();
744 flush_cache_all();
745 return;
746 }
747
748 /* Flush mm */
749 for_each_vma(vmi, vma)
750 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
751}
752
753void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
754{
755 if (!parisc_requires_coherency()
756 || end - start >= parisc_cache_flush_threshold) {
757 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
758 return;
759 flush_tlb_range(vma, start, end);
760 flush_cache_all();
761 return;
762 }
763
764 flush_cache_pages(vma, start, end);
765}
766
767void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
768{
769 if (WARN_ON(!pfn_valid(pfn)))
770 return;
771 if (parisc_requires_coherency())
772 flush_user_cache_page(vma, vmaddr);
773 else
774 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
775}
776
777void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
778{
779 if (!PageAnon(page))
780 return;
781
782 if (parisc_requires_coherency()) {
783 if (vma->vm_flags & VM_SHARED)
784 flush_data_cache();
785 else
786 flush_user_cache_page(vma, vmaddr);
787 return;
788 }
789
790 flush_tlb_page(vma, vmaddr);
791 preempt_disable();
792 flush_dcache_page_asm(page_to_phys(page), vmaddr);
793 preempt_enable();
794}
795
796void flush_kernel_vmap_range(void *vaddr, int size)
797{
798 unsigned long start = (unsigned long)vaddr;
799 unsigned long end = start + size;
800
801 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
802 (unsigned long)size >= parisc_cache_flush_threshold) {
803 flush_tlb_kernel_range(start, end);
804 flush_data_cache();
805 return;
806 }
807
808 flush_kernel_dcache_range_asm(start, end);
809 flush_tlb_kernel_range(start, end);
810}
811EXPORT_SYMBOL(flush_kernel_vmap_range);
812
813void invalidate_kernel_vmap_range(void *vaddr, int size)
814{
815 unsigned long start = (unsigned long)vaddr;
816 unsigned long end = start + size;
817
818 /* Ensure DMA is complete */
819 asm_syncdma();
820
821 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
822 (unsigned long)size >= parisc_cache_flush_threshold) {
823 flush_tlb_kernel_range(start, end);
824 flush_data_cache();
825 return;
826 }
827
828 purge_kernel_dcache_range_asm(start, end);
829 flush_tlb_kernel_range(start, end);
830}
831EXPORT_SYMBOL(invalidate_kernel_vmap_range);
832
833
834SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
835 unsigned int, cache)
836{
837 unsigned long start, end;
838 ASM_EXCEPTIONTABLE_VAR(error);
839
840 if (bytes == 0)
841 return 0;
842 if (!access_ok((void __user *) addr, bytes))
843 return -EFAULT;
844
845 end = addr + bytes;
846
847 if (cache & DCACHE) {
848 start = addr;
849 __asm__ __volatile__ (
850#ifdef CONFIG_64BIT
851 "1: cmpb,*<<,n %0,%2,1b\n"
852#else
853 "1: cmpb,<<,n %0,%2,1b\n"
854#endif
855 " fic,m %3(%4,%0)\n"
856 "2: sync\n"
857 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
858 : "+r" (start), "+r" (error)
859 : "r" (end), "r" (dcache_stride), "i" (SR_USER));
860 }
861
862 if (cache & ICACHE && error == 0) {
863 start = addr;
864 __asm__ __volatile__ (
865#ifdef CONFIG_64BIT
866 "1: cmpb,*<<,n %0,%2,1b\n"
867#else
868 "1: cmpb,<<,n %0,%2,1b\n"
869#endif
870 " fdc,m %3(%4,%0)\n"
871 "2: sync\n"
872 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
873 : "+r" (start), "+r" (error)
874 : "r" (end), "r" (icache_stride), "i" (SR_USER));
875 }
876
877 return error;
878}