Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41/* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46DEFINE_SPINLOCK(pa_tlb_lock);
47
48struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20
50static struct pdc_btlb_info btlb_info __read_mostly;
51#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
57 on_each_cpu(flush_data_cache_local, NULL, 1);
58}
59void
60flush_instruction_cache(void)
61{
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63}
64#endif
65
66void
67flush_cache_all_local(void)
68{
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
71}
72EXPORT_SYMBOL(flush_cache_all_local);
73
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
77void
78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79{
80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
82
83 /* We don't have pte special. As a result, we can be called with
84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
88
89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
92 clear_bit(PG_dcache_dirty, &page->flags);
93 } else if (parisc_requires_coherency())
94 flush_kernel_dcache_page_addr(pfn_va(pfn));
95}
96
97void
98show_cache_info(struct seq_file *m)
99{
100 char buf[32];
101
102 seq_printf(m, "I-cache\t\t: %ld KB\n",
103 cache_info.ic_size/1024 );
104 if (cache_info.dc_loop != 1)
105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 cache_info.dc_size/1024,
108 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 cache_info.it_size,
113 cache_info.dt_size,
114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 );
116
117#ifndef CONFIG_PA20
118 /* BTLB - Block TLB */
119 if (btlb_info.max_size==0) {
120 seq_printf(m, "BTLB\t\t: not supported\n" );
121 } else {
122 seq_printf(m,
123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 btlb_info.max_size, (int)4096,
127 btlb_info.max_size>>8,
128 btlb_info.fixed_range_info.num_i,
129 btlb_info.fixed_range_info.num_d,
130 btlb_info.fixed_range_info.num_comb,
131 btlb_info.variable_range_info.num_i,
132 btlb_info.variable_range_info.num_d,
133 btlb_info.variable_range_info.num_comb
134 );
135 }
136#endif
137}
138
139void __init
140parisc_cache_init(void)
141{
142 if (pdc_cache_info(&cache_info) < 0)
143 panic("parisc_cache_init: pdc_cache_info failed");
144
145#if 0
146 printk("ic_size %lx dc_size %lx it_size %lx\n",
147 cache_info.ic_size,
148 cache_info.dc_size,
149 cache_info.it_size);
150
151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 cache_info.dc_base,
153 cache_info.dc_stride,
154 cache_info.dc_count,
155 cache_info.dc_loop);
156
157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
158 *(unsigned long *) (&cache_info.dc_conf),
159 cache_info.dc_conf.cc_alias,
160 cache_info.dc_conf.cc_block,
161 cache_info.dc_conf.cc_line,
162 cache_info.dc_conf.cc_shift);
163 printk(" wt %d sh %d cst %d hv %d\n",
164 cache_info.dc_conf.cc_wt,
165 cache_info.dc_conf.cc_sh,
166 cache_info.dc_conf.cc_cst,
167 cache_info.dc_conf.cc_hv);
168
169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 cache_info.ic_base,
171 cache_info.ic_stride,
172 cache_info.ic_count,
173 cache_info.ic_loop);
174
175 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
176 cache_info.it_sp_base,
177 cache_info.it_sp_stride,
178 cache_info.it_sp_count,
179 cache_info.it_loop,
180 cache_info.it_off_base,
181 cache_info.it_off_stride,
182 cache_info.it_off_count);
183
184 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185 cache_info.dt_sp_base,
186 cache_info.dt_sp_stride,
187 cache_info.dt_sp_count,
188 cache_info.dt_loop,
189 cache_info.dt_off_base,
190 cache_info.dt_off_stride,
191 cache_info.dt_off_count);
192
193 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
194 *(unsigned long *) (&cache_info.ic_conf),
195 cache_info.ic_conf.cc_alias,
196 cache_info.ic_conf.cc_block,
197 cache_info.ic_conf.cc_line,
198 cache_info.ic_conf.cc_shift);
199 printk(" wt %d sh %d cst %d hv %d\n",
200 cache_info.ic_conf.cc_wt,
201 cache_info.ic_conf.cc_sh,
202 cache_info.ic_conf.cc_cst,
203 cache_info.ic_conf.cc_hv);
204
205 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
206 cache_info.dt_conf.tc_sh,
207 cache_info.dt_conf.tc_page,
208 cache_info.dt_conf.tc_cst,
209 cache_info.dt_conf.tc_aid,
210 cache_info.dt_conf.tc_sr);
211
212 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
213 cache_info.it_conf.tc_sh,
214 cache_info.it_conf.tc_page,
215 cache_info.it_conf.tc_cst,
216 cache_info.it_conf.tc_aid,
217 cache_info.it_conf.tc_sr);
218#endif
219
220 split_tlb = 0;
221 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
222 if (cache_info.dt_conf.tc_sh == 2)
223 printk(KERN_WARNING "Unexpected TLB configuration. "
224 "Will flush I/D separately (could be optimized).\n");
225
226 split_tlb = 1;
227 }
228
229 /* "New and Improved" version from Jim Hull
230 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
231 * The following CAFL_STRIDE is an optimized version, see
232 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
233 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
234 */
235#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
236 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
237 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
238#undef CAFL_STRIDE
239
240#ifndef CONFIG_PA20
241 if (pdc_btlb_info(&btlb_info) < 0) {
242 memset(&btlb_info, 0, sizeof btlb_info);
243 }
244#endif
245
246 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
247 PDC_MODEL_NVA_UNSUPPORTED) {
248 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
249#if 0
250 panic("SMP kernel required to avoid non-equivalent aliasing");
251#endif
252 }
253}
254
255void disable_sr_hashing(void)
256{
257 int srhash_type, retval;
258 unsigned long space_bits;
259
260 switch (boot_cpu_data.cpu_type) {
261 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
262 BUG();
263 return;
264
265 case pcxs:
266 case pcxt:
267 case pcxt_:
268 srhash_type = SRHASH_PCXST;
269 break;
270
271 case pcxl:
272 srhash_type = SRHASH_PCXL;
273 break;
274
275 case pcxl2: /* pcxl2 doesn't support space register hashing */
276 return;
277
278 default: /* Currently all PA2.0 machines use the same ins. sequence */
279 srhash_type = SRHASH_PA20;
280 break;
281 }
282
283 disable_sr_hashing_asm(srhash_type);
284
285 retval = pdc_spaceid_bits(&space_bits);
286 /* If this procedure isn't implemented, don't panic. */
287 if (retval < 0 && retval != PDC_BAD_OPTION)
288 panic("pdc_spaceid_bits call failed.\n");
289 if (space_bits != 0)
290 panic("SpaceID hashing is still on!\n");
291}
292
293static inline void
294__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
295 unsigned long physaddr)
296{
297 preempt_disable();
298 flush_dcache_page_asm(physaddr, vmaddr);
299 if (vma->vm_flags & VM_EXEC)
300 flush_icache_page_asm(physaddr, vmaddr);
301 preempt_enable();
302}
303
304void flush_dcache_page(struct page *page)
305{
306 struct address_space *mapping = page_mapping(page);
307 struct vm_area_struct *mpnt;
308 unsigned long offset;
309 unsigned long addr, old_addr = 0;
310 pgoff_t pgoff;
311
312 if (mapping && !mapping_mapped(mapping)) {
313 set_bit(PG_dcache_dirty, &page->flags);
314 return;
315 }
316
317 flush_kernel_dcache_page(page);
318
319 if (!mapping)
320 return;
321
322 pgoff = page->index;
323
324 /* We have carefully arranged in arch_get_unmapped_area() that
325 * *any* mappings of a file are always congruently mapped (whether
326 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
327 * to flush one address here for them all to become coherent */
328
329 flush_dcache_mmap_lock(mapping);
330 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
331 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
332 addr = mpnt->vm_start + offset;
333
334 /* The TLB is the engine of coherence on parisc: The
335 * CPU is entitled to speculate any page with a TLB
336 * mapping, so here we kill the mapping then flush the
337 * page along a special flush only alias mapping.
338 * This guarantees that the page is no-longer in the
339 * cache for any process and nor may it be
340 * speculatively read in (until the user or kernel
341 * specifically accesses it, of course) */
342
343 flush_tlb_page(mpnt, addr);
344 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
345 != (addr & (SHM_COLOUR - 1))) {
346 __flush_cache_page(mpnt, addr, page_to_phys(page));
347 if (old_addr)
348 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
349 old_addr = addr;
350 }
351 }
352 flush_dcache_mmap_unlock(mapping);
353}
354EXPORT_SYMBOL(flush_dcache_page);
355
356/* Defined in arch/parisc/kernel/pacache.S */
357EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
358EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
359EXPORT_SYMBOL(flush_data_cache_local);
360EXPORT_SYMBOL(flush_kernel_icache_range_asm);
361
362#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
363static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
364
365#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
366static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
367
368void __init parisc_setup_cache_timing(void)
369{
370 unsigned long rangetime, alltime;
371 unsigned long size, start;
372
373 alltime = mfctl(16);
374 flush_data_cache();
375 alltime = mfctl(16) - alltime;
376
377 size = (unsigned long)(_end - _text);
378 rangetime = mfctl(16);
379 flush_kernel_dcache_range((unsigned long)_text, size);
380 rangetime = mfctl(16) - rangetime;
381
382 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
383 alltime, size, rangetime);
384
385 /* Racy, but if we see an intermediate value, it's ok too... */
386 parisc_cache_flush_threshold = size * alltime / rangetime;
387
388 parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
389 if (!parisc_cache_flush_threshold)
390 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
391
392 if (parisc_cache_flush_threshold > cache_info.dc_size)
393 parisc_cache_flush_threshold = cache_info.dc_size;
394
395 printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
396 parisc_cache_flush_threshold/1024);
397
398 /* calculate TLB flush threshold */
399
400 alltime = mfctl(16);
401 flush_tlb_all();
402 alltime = mfctl(16) - alltime;
403
404 size = PAGE_SIZE;
405 start = (unsigned long) _text;
406 rangetime = mfctl(16);
407 while (start < (unsigned long) _end) {
408 flush_tlb_kernel_range(start, start + PAGE_SIZE);
409 start += PAGE_SIZE;
410 size += PAGE_SIZE;
411 }
412 rangetime = mfctl(16) - rangetime;
413
414 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
415 alltime, size, rangetime);
416
417 parisc_tlb_flush_threshold = size * alltime / rangetime;
418 parisc_tlb_flush_threshold *= num_online_cpus();
419 parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
420 if (!parisc_tlb_flush_threshold)
421 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
422
423 printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
424 parisc_tlb_flush_threshold/1024);
425}
426
427extern void purge_kernel_dcache_page_asm(unsigned long);
428extern void clear_user_page_asm(void *, unsigned long);
429extern void copy_user_page_asm(void *, void *, unsigned long);
430
431void flush_kernel_dcache_page_addr(void *addr)
432{
433 unsigned long flags;
434
435 flush_kernel_dcache_page_asm(addr);
436 purge_tlb_start(flags);
437 pdtlb_kernel(addr);
438 purge_tlb_end(flags);
439}
440EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
441
442void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
443 struct page *pg)
444{
445 /* Copy using kernel mapping. No coherency is needed (all in
446 kunmap) for the `to' page. However, the `from' page needs to
447 be flushed through a mapping equivalent to the user mapping
448 before it can be accessed through the kernel mapping. */
449 preempt_disable();
450 flush_dcache_page_asm(__pa(vfrom), vaddr);
451 preempt_enable();
452 copy_page_asm(vto, vfrom);
453}
454EXPORT_SYMBOL(copy_user_page);
455
456/* __flush_tlb_range()
457 *
458 * returns 1 if all TLBs were flushed.
459 */
460int __flush_tlb_range(unsigned long sid, unsigned long start,
461 unsigned long end)
462{
463 unsigned long flags, size;
464
465 size = (end - start);
466 if (size >= parisc_tlb_flush_threshold) {
467 flush_tlb_all();
468 return 1;
469 }
470
471 /* Purge TLB entries for small ranges using the pdtlb and
472 pitlb instructions. These instructions execute locally
473 but cause a purge request to be broadcast to other TLBs. */
474 if (likely(!split_tlb)) {
475 while (start < end) {
476 purge_tlb_start(flags);
477 mtsp(sid, 1);
478 pdtlb(start);
479 purge_tlb_end(flags);
480 start += PAGE_SIZE;
481 }
482 return 0;
483 }
484
485 /* split TLB case */
486 while (start < end) {
487 purge_tlb_start(flags);
488 mtsp(sid, 1);
489 pdtlb(start);
490 pitlb(start);
491 purge_tlb_end(flags);
492 start += PAGE_SIZE;
493 }
494 return 0;
495}
496
497static void cacheflush_h_tmp_function(void *dummy)
498{
499 flush_cache_all_local();
500}
501
502void flush_cache_all(void)
503{
504 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
505}
506
507static inline unsigned long mm_total_size(struct mm_struct *mm)
508{
509 struct vm_area_struct *vma;
510 unsigned long usize = 0;
511
512 for (vma = mm->mmap; vma; vma = vma->vm_next)
513 usize += vma->vm_end - vma->vm_start;
514 return usize;
515}
516
517static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
518{
519 pte_t *ptep = NULL;
520
521 if (!pgd_none(*pgd)) {
522 pud_t *pud = pud_offset(pgd, addr);
523 if (!pud_none(*pud)) {
524 pmd_t *pmd = pmd_offset(pud, addr);
525 if (!pmd_none(*pmd))
526 ptep = pte_offset_map(pmd, addr);
527 }
528 }
529 return ptep;
530}
531
532void flush_cache_mm(struct mm_struct *mm)
533{
534 struct vm_area_struct *vma;
535 pgd_t *pgd;
536
537 /* Flushing the whole cache on each cpu takes forever on
538 rp3440, etc. So, avoid it if the mm isn't too big. */
539 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
540 flush_cache_all();
541 return;
542 }
543
544 if (mm->context == mfsp(3)) {
545 for (vma = mm->mmap; vma; vma = vma->vm_next) {
546 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
547 if ((vma->vm_flags & VM_EXEC) == 0)
548 continue;
549 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
550 }
551 return;
552 }
553
554 pgd = mm->pgd;
555 for (vma = mm->mmap; vma; vma = vma->vm_next) {
556 unsigned long addr;
557
558 for (addr = vma->vm_start; addr < vma->vm_end;
559 addr += PAGE_SIZE) {
560 unsigned long pfn;
561 pte_t *ptep = get_ptep(pgd, addr);
562 if (!ptep)
563 continue;
564 pfn = pte_pfn(*ptep);
565 if (!pfn_valid(pfn))
566 continue;
567 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
568 }
569 }
570}
571
572void
573flush_user_dcache_range(unsigned long start, unsigned long end)
574{
575 if ((end - start) < parisc_cache_flush_threshold)
576 flush_user_dcache_range_asm(start,end);
577 else
578 flush_data_cache();
579}
580
581void
582flush_user_icache_range(unsigned long start, unsigned long end)
583{
584 if ((end - start) < parisc_cache_flush_threshold)
585 flush_user_icache_range_asm(start,end);
586 else
587 flush_instruction_cache();
588}
589
590void flush_cache_range(struct vm_area_struct *vma,
591 unsigned long start, unsigned long end)
592{
593 unsigned long addr;
594 pgd_t *pgd;
595
596 BUG_ON(!vma->vm_mm->context);
597
598 if ((end - start) >= parisc_cache_flush_threshold) {
599 flush_cache_all();
600 return;
601 }
602
603 if (vma->vm_mm->context == mfsp(3)) {
604 flush_user_dcache_range_asm(start, end);
605 if (vma->vm_flags & VM_EXEC)
606 flush_user_icache_range_asm(start, end);
607 return;
608 }
609
610 pgd = vma->vm_mm->pgd;
611 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
612 unsigned long pfn;
613 pte_t *ptep = get_ptep(pgd, addr);
614 if (!ptep)
615 continue;
616 pfn = pte_pfn(*ptep);
617 if (pfn_valid(pfn))
618 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
619 }
620}
621
622void
623flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
624{
625 BUG_ON(!vma->vm_mm->context);
626
627 if (pfn_valid(pfn)) {
628 flush_tlb_page(vma, vmaddr);
629 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
630 }
631}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <asm/pdc.h>
23#include <asm/cache.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/page.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30#include <asm/mmu_context.h>
31
32int split_tlb __ro_after_init;
33int dcache_stride __ro_after_init;
34int icache_stride __ro_after_init;
35EXPORT_SYMBOL(dcache_stride);
36
37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38EXPORT_SYMBOL(flush_dcache_page_asm);
39void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41
42/* Internal implementation in arch/parisc/kernel/pacache.S */
43void flush_data_cache_local(void *); /* flushes local data-cache only */
44void flush_instruction_cache_local(void); /* flushes local code-cache only */
45
46/* On some machines (i.e., ones with the Merced bus), there can be
47 * only a single PxTLB broadcast at a time; this must be guaranteed
48 * by software. We need a spinlock around all TLB flushes to ensure
49 * this.
50 */
51DEFINE_SPINLOCK(pa_tlb_flush_lock);
52
53#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
54int pa_serialize_tlb_flushes __ro_after_init;
55#endif
56
57struct pdc_cache_info cache_info __ro_after_init;
58#ifndef CONFIG_PA20
59static struct pdc_btlb_info btlb_info __ro_after_init;
60#endif
61
62DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
63DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
64DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
65
66static void cache_flush_local_cpu(void *dummy)
67{
68 if (static_branch_likely(&parisc_has_icache))
69 flush_instruction_cache_local();
70 if (static_branch_likely(&parisc_has_dcache))
71 flush_data_cache_local(NULL);
72}
73
74void flush_cache_all_local(void)
75{
76 cache_flush_local_cpu(NULL);
77}
78
79void flush_cache_all(void)
80{
81 if (static_branch_likely(&parisc_has_cache))
82 on_each_cpu(cache_flush_local_cpu, NULL, 1);
83}
84
85static inline void flush_data_cache(void)
86{
87 if (static_branch_likely(&parisc_has_dcache))
88 on_each_cpu(flush_data_cache_local, NULL, 1);
89}
90
91
92/* Kernel virtual address of pfn. */
93#define pfn_va(pfn) __va(PFN_PHYS(pfn))
94
95void
96__update_cache(pte_t pte)
97{
98 unsigned long pfn = pte_pfn(pte);
99 struct page *page;
100
101 /* We don't have pte special. As a result, we can be called with
102 an invalid pfn and we don't need to flush the kernel dcache page.
103 This occurs with FireGL card in C8000. */
104 if (!pfn_valid(pfn))
105 return;
106
107 page = pfn_to_page(pfn);
108 if (page_mapping_file(page) &&
109 test_bit(PG_dcache_dirty, &page->flags)) {
110 flush_kernel_dcache_page_addr(pfn_va(pfn));
111 clear_bit(PG_dcache_dirty, &page->flags);
112 } else if (parisc_requires_coherency())
113 flush_kernel_dcache_page_addr(pfn_va(pfn));
114}
115
116void
117show_cache_info(struct seq_file *m)
118{
119 char buf[32];
120
121 seq_printf(m, "I-cache\t\t: %ld KB\n",
122 cache_info.ic_size/1024 );
123 if (cache_info.dc_loop != 1)
124 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
125 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
126 cache_info.dc_size/1024,
127 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
128 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
129 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
130 cache_info.dc_conf.cc_alias
131 );
132 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
133 cache_info.it_size,
134 cache_info.dt_size,
135 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
136 );
137
138#ifndef CONFIG_PA20
139 /* BTLB - Block TLB */
140 if (btlb_info.max_size==0) {
141 seq_printf(m, "BTLB\t\t: not supported\n" );
142 } else {
143 seq_printf(m,
144 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
145 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
146 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
147 btlb_info.max_size, (int)4096,
148 btlb_info.max_size>>8,
149 btlb_info.fixed_range_info.num_i,
150 btlb_info.fixed_range_info.num_d,
151 btlb_info.fixed_range_info.num_comb,
152 btlb_info.variable_range_info.num_i,
153 btlb_info.variable_range_info.num_d,
154 btlb_info.variable_range_info.num_comb
155 );
156 }
157#endif
158}
159
160void __init
161parisc_cache_init(void)
162{
163 if (pdc_cache_info(&cache_info) < 0)
164 panic("parisc_cache_init: pdc_cache_info failed");
165
166#if 0
167 printk("ic_size %lx dc_size %lx it_size %lx\n",
168 cache_info.ic_size,
169 cache_info.dc_size,
170 cache_info.it_size);
171
172 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
173 cache_info.dc_base,
174 cache_info.dc_stride,
175 cache_info.dc_count,
176 cache_info.dc_loop);
177
178 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
179 *(unsigned long *) (&cache_info.dc_conf),
180 cache_info.dc_conf.cc_alias,
181 cache_info.dc_conf.cc_block,
182 cache_info.dc_conf.cc_line,
183 cache_info.dc_conf.cc_shift);
184 printk(" wt %d sh %d cst %d hv %d\n",
185 cache_info.dc_conf.cc_wt,
186 cache_info.dc_conf.cc_sh,
187 cache_info.dc_conf.cc_cst,
188 cache_info.dc_conf.cc_hv);
189
190 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
191 cache_info.ic_base,
192 cache_info.ic_stride,
193 cache_info.ic_count,
194 cache_info.ic_loop);
195
196 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
197 cache_info.it_sp_base,
198 cache_info.it_sp_stride,
199 cache_info.it_sp_count,
200 cache_info.it_loop,
201 cache_info.it_off_base,
202 cache_info.it_off_stride,
203 cache_info.it_off_count);
204
205 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
206 cache_info.dt_sp_base,
207 cache_info.dt_sp_stride,
208 cache_info.dt_sp_count,
209 cache_info.dt_loop,
210 cache_info.dt_off_base,
211 cache_info.dt_off_stride,
212 cache_info.dt_off_count);
213
214 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
215 *(unsigned long *) (&cache_info.ic_conf),
216 cache_info.ic_conf.cc_alias,
217 cache_info.ic_conf.cc_block,
218 cache_info.ic_conf.cc_line,
219 cache_info.ic_conf.cc_shift);
220 printk(" wt %d sh %d cst %d hv %d\n",
221 cache_info.ic_conf.cc_wt,
222 cache_info.ic_conf.cc_sh,
223 cache_info.ic_conf.cc_cst,
224 cache_info.ic_conf.cc_hv);
225
226 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
227 cache_info.dt_conf.tc_sh,
228 cache_info.dt_conf.tc_page,
229 cache_info.dt_conf.tc_cst,
230 cache_info.dt_conf.tc_aid,
231 cache_info.dt_conf.tc_sr);
232
233 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
234 cache_info.it_conf.tc_sh,
235 cache_info.it_conf.tc_page,
236 cache_info.it_conf.tc_cst,
237 cache_info.it_conf.tc_aid,
238 cache_info.it_conf.tc_sr);
239#endif
240
241 split_tlb = 0;
242 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
243 if (cache_info.dt_conf.tc_sh == 2)
244 printk(KERN_WARNING "Unexpected TLB configuration. "
245 "Will flush I/D separately (could be optimized).\n");
246
247 split_tlb = 1;
248 }
249
250 /* "New and Improved" version from Jim Hull
251 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
252 * The following CAFL_STRIDE is an optimized version, see
253 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
254 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
255 */
256#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
257 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
258 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
259#undef CAFL_STRIDE
260
261#ifndef CONFIG_PA20
262 if (pdc_btlb_info(&btlb_info) < 0) {
263 memset(&btlb_info, 0, sizeof btlb_info);
264 }
265#endif
266
267 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
268 PDC_MODEL_NVA_UNSUPPORTED) {
269 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
270#if 0
271 panic("SMP kernel required to avoid non-equivalent aliasing");
272#endif
273 }
274}
275
276void disable_sr_hashing(void)
277{
278 int srhash_type, retval;
279 unsigned long space_bits;
280
281 switch (boot_cpu_data.cpu_type) {
282 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
283 BUG();
284 return;
285
286 case pcxs:
287 case pcxt:
288 case pcxt_:
289 srhash_type = SRHASH_PCXST;
290 break;
291
292 case pcxl:
293 srhash_type = SRHASH_PCXL;
294 break;
295
296 case pcxl2: /* pcxl2 doesn't support space register hashing */
297 return;
298
299 default: /* Currently all PA2.0 machines use the same ins. sequence */
300 srhash_type = SRHASH_PA20;
301 break;
302 }
303
304 disable_sr_hashing_asm(srhash_type);
305
306 retval = pdc_spaceid_bits(&space_bits);
307 /* If this procedure isn't implemented, don't panic. */
308 if (retval < 0 && retval != PDC_BAD_OPTION)
309 panic("pdc_spaceid_bits call failed.\n");
310 if (space_bits != 0)
311 panic("SpaceID hashing is still on!\n");
312}
313
314static inline void
315__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316 unsigned long physaddr)
317{
318 if (!static_branch_likely(&parisc_has_cache))
319 return;
320 preempt_disable();
321 flush_dcache_page_asm(physaddr, vmaddr);
322 if (vma->vm_flags & VM_EXEC)
323 flush_icache_page_asm(physaddr, vmaddr);
324 preempt_enable();
325}
326
327static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
328{
329 unsigned long flags, space, pgd, prot;
330#ifdef CONFIG_TLB_PTLOCK
331 unsigned long pgd_lock;
332#endif
333
334 vmaddr &= PAGE_MASK;
335
336 preempt_disable();
337
338 /* Set context for flush */
339 local_irq_save(flags);
340 prot = mfctl(8);
341 space = mfsp(SR_USER);
342 pgd = mfctl(25);
343#ifdef CONFIG_TLB_PTLOCK
344 pgd_lock = mfctl(28);
345#endif
346 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
347 local_irq_restore(flags);
348
349 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
350 if (vma->vm_flags & VM_EXEC)
351 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
352 flush_tlb_page(vma, vmaddr);
353
354 /* Restore previous context */
355 local_irq_save(flags);
356#ifdef CONFIG_TLB_PTLOCK
357 mtctl(pgd_lock, 28);
358#endif
359 mtctl(pgd, 25);
360 mtsp(space, SR_USER);
361 mtctl(prot, 8);
362 local_irq_restore(flags);
363
364 preempt_enable();
365}
366
367static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
368{
369 pte_t *ptep = NULL;
370 pgd_t *pgd = mm->pgd;
371 p4d_t *p4d;
372 pud_t *pud;
373 pmd_t *pmd;
374
375 if (!pgd_none(*pgd)) {
376 p4d = p4d_offset(pgd, addr);
377 if (!p4d_none(*p4d)) {
378 pud = pud_offset(p4d, addr);
379 if (!pud_none(*pud)) {
380 pmd = pmd_offset(pud, addr);
381 if (!pmd_none(*pmd))
382 ptep = pte_offset_map(pmd, addr);
383 }
384 }
385 }
386 return ptep;
387}
388
389static inline bool pte_needs_flush(pte_t pte)
390{
391 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
392 == (_PAGE_PRESENT | _PAGE_ACCESSED);
393}
394
395void flush_dcache_page(struct page *page)
396{
397 struct address_space *mapping = page_mapping_file(page);
398 struct vm_area_struct *mpnt;
399 unsigned long offset;
400 unsigned long addr, old_addr = 0;
401 unsigned long count = 0;
402 pgoff_t pgoff;
403
404 if (mapping && !mapping_mapped(mapping)) {
405 set_bit(PG_dcache_dirty, &page->flags);
406 return;
407 }
408
409 flush_kernel_dcache_page_addr(page_address(page));
410
411 if (!mapping)
412 return;
413
414 pgoff = page->index;
415
416 /*
417 * We have carefully arranged in arch_get_unmapped_area() that
418 * *any* mappings of a file are always congruently mapped (whether
419 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
420 * to flush one address here for them all to become coherent
421 * on machines that support equivalent aliasing
422 */
423 flush_dcache_mmap_lock(mapping);
424 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
425 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
426 addr = mpnt->vm_start + offset;
427 if (parisc_requires_coherency()) {
428 pte_t *ptep;
429
430 ptep = get_ptep(mpnt->vm_mm, addr);
431 if (ptep && pte_needs_flush(*ptep))
432 flush_user_cache_page(mpnt, addr);
433 } else {
434 /*
435 * The TLB is the engine of coherence on parisc:
436 * The CPU is entitled to speculate any page
437 * with a TLB mapping, so here we kill the
438 * mapping then flush the page along a special
439 * flush only alias mapping. This guarantees that
440 * the page is no-longer in the cache for any
441 * process and nor may it be speculatively read
442 * in (until the user or kernel specifically
443 * accesses it, of course)
444 */
445 flush_tlb_page(mpnt, addr);
446 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
447 != (addr & (SHM_COLOUR - 1))) {
448 __flush_cache_page(mpnt, addr, page_to_phys(page));
449 /*
450 * Software is allowed to have any number
451 * of private mappings to a page.
452 */
453 if (!(mpnt->vm_flags & VM_SHARED))
454 continue;
455 if (old_addr)
456 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
457 old_addr, addr, mpnt->vm_file);
458 old_addr = addr;
459 }
460 }
461 WARN_ON(++count == 4096);
462 }
463 flush_dcache_mmap_unlock(mapping);
464}
465EXPORT_SYMBOL(flush_dcache_page);
466
467/* Defined in arch/parisc/kernel/pacache.S */
468EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
469EXPORT_SYMBOL(flush_kernel_icache_range_asm);
470
471#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
472static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
473
474#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
475static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
476
477void __init parisc_setup_cache_timing(void)
478{
479 unsigned long rangetime, alltime;
480 unsigned long size;
481 unsigned long threshold, threshold2;
482
483 alltime = mfctl(16);
484 flush_data_cache();
485 alltime = mfctl(16) - alltime;
486
487 size = (unsigned long)(_end - _text);
488 rangetime = mfctl(16);
489 flush_kernel_dcache_range((unsigned long)_text, size);
490 rangetime = mfctl(16) - rangetime;
491
492 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
493 alltime, size, rangetime);
494
495 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
496 pr_info("Calculated flush threshold is %lu KiB\n",
497 threshold/1024);
498
499 /*
500 * The threshold computed above isn't very reliable. The following
501 * heuristic works reasonably well on c8000/rp3440.
502 */
503 threshold2 = cache_info.dc_size * num_online_cpus();
504 parisc_cache_flush_threshold = threshold2;
505 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
506 parisc_cache_flush_threshold/1024);
507
508 /* calculate TLB flush threshold */
509
510 /* On SMP machines, skip the TLB measure of kernel text which
511 * has been mapped as huge pages. */
512 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
513 threshold = max(cache_info.it_size, cache_info.dt_size);
514 threshold *= PAGE_SIZE;
515 threshold /= num_online_cpus();
516 goto set_tlb_threshold;
517 }
518
519 size = (unsigned long)_end - (unsigned long)_text;
520 rangetime = mfctl(16);
521 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
522 rangetime = mfctl(16) - rangetime;
523
524 alltime = mfctl(16);
525 flush_tlb_all();
526 alltime = mfctl(16) - alltime;
527
528 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
529 alltime, size, rangetime);
530
531 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
532 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
533 threshold/1024);
534
535set_tlb_threshold:
536 if (threshold > FLUSH_TLB_THRESHOLD)
537 parisc_tlb_flush_threshold = threshold;
538 else
539 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
540
541 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
542 parisc_tlb_flush_threshold/1024);
543}
544
545extern void purge_kernel_dcache_page_asm(unsigned long);
546extern void clear_user_page_asm(void *, unsigned long);
547extern void copy_user_page_asm(void *, void *, unsigned long);
548
549void flush_kernel_dcache_page_addr(const void *addr)
550{
551 unsigned long flags;
552
553 flush_kernel_dcache_page_asm(addr);
554 purge_tlb_start(flags);
555 pdtlb(SR_KERNEL, addr);
556 purge_tlb_end(flags);
557}
558EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
559
560static void flush_cache_page_if_present(struct vm_area_struct *vma,
561 unsigned long vmaddr, unsigned long pfn)
562{
563 pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
564
565 /*
566 * The pte check is racy and sometimes the flush will trigger
567 * a non-access TLB miss. Hopefully, the page has already been
568 * flushed.
569 */
570 if (ptep && pte_needs_flush(*ptep))
571 flush_cache_page(vma, vmaddr, pfn);
572}
573
574void copy_user_highpage(struct page *to, struct page *from,
575 unsigned long vaddr, struct vm_area_struct *vma)
576{
577 void *kto, *kfrom;
578
579 kfrom = kmap_local_page(from);
580 kto = kmap_local_page(to);
581 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
582 copy_page_asm(kto, kfrom);
583 kunmap_local(kto);
584 kunmap_local(kfrom);
585}
586
587void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
588 unsigned long user_vaddr, void *dst, void *src, int len)
589{
590 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
591 memcpy(dst, src, len);
592 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
593}
594
595void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
596 unsigned long user_vaddr, void *dst, void *src, int len)
597{
598 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
599 memcpy(dst, src, len);
600}
601
602/* __flush_tlb_range()
603 *
604 * returns 1 if all TLBs were flushed.
605 */
606int __flush_tlb_range(unsigned long sid, unsigned long start,
607 unsigned long end)
608{
609 unsigned long flags;
610
611 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
612 end - start >= parisc_tlb_flush_threshold) {
613 flush_tlb_all();
614 return 1;
615 }
616
617 /* Purge TLB entries for small ranges using the pdtlb and
618 pitlb instructions. These instructions execute locally
619 but cause a purge request to be broadcast to other TLBs. */
620 while (start < end) {
621 purge_tlb_start(flags);
622 mtsp(sid, SR_TEMP1);
623 pdtlb(SR_TEMP1, start);
624 pitlb(SR_TEMP1, start);
625 purge_tlb_end(flags);
626 start += PAGE_SIZE;
627 }
628 return 0;
629}
630
631static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
632{
633 unsigned long addr, pfn;
634 pte_t *ptep;
635
636 for (addr = start; addr < end; addr += PAGE_SIZE) {
637 /*
638 * The vma can contain pages that aren't present. Although
639 * the pte search is expensive, we need the pte to find the
640 * page pfn and to check whether the page should be flushed.
641 */
642 ptep = get_ptep(vma->vm_mm, addr);
643 if (ptep && pte_needs_flush(*ptep)) {
644 if (parisc_requires_coherency()) {
645 flush_user_cache_page(vma, addr);
646 } else {
647 pfn = pte_pfn(*ptep);
648 if (WARN_ON(!pfn_valid(pfn)))
649 return;
650 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
651 }
652 }
653 }
654}
655
656static inline unsigned long mm_total_size(struct mm_struct *mm)
657{
658 struct vm_area_struct *vma;
659 unsigned long usize = 0;
660 VMA_ITERATOR(vmi, mm, 0);
661
662 for_each_vma(vmi, vma) {
663 if (usize >= parisc_cache_flush_threshold)
664 break;
665 usize += vma->vm_end - vma->vm_start;
666 }
667 return usize;
668}
669
670void flush_cache_mm(struct mm_struct *mm)
671{
672 struct vm_area_struct *vma;
673 VMA_ITERATOR(vmi, mm, 0);
674
675 /*
676 * Flushing the whole cache on each cpu takes forever on
677 * rp3440, etc. So, avoid it if the mm isn't too big.
678 *
679 * Note that we must flush the entire cache on machines
680 * with aliasing caches to prevent random segmentation
681 * faults.
682 */
683 if (!parisc_requires_coherency()
684 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
685 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
686 return;
687 flush_tlb_all();
688 flush_cache_all();
689 return;
690 }
691
692 /* Flush mm */
693 for_each_vma(vmi, vma)
694 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
695}
696
697void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
698{
699 if (!parisc_requires_coherency()
700 || end - start >= parisc_cache_flush_threshold) {
701 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
702 return;
703 flush_tlb_range(vma, start, end);
704 flush_cache_all();
705 return;
706 }
707
708 flush_cache_pages(vma, start, end);
709}
710
711void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
712{
713 if (WARN_ON(!pfn_valid(pfn)))
714 return;
715 if (parisc_requires_coherency())
716 flush_user_cache_page(vma, vmaddr);
717 else
718 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
719}
720
721void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
722{
723 if (!PageAnon(page))
724 return;
725
726 if (parisc_requires_coherency()) {
727 if (vma->vm_flags & VM_SHARED)
728 flush_data_cache();
729 else
730 flush_user_cache_page(vma, vmaddr);
731 return;
732 }
733
734 flush_tlb_page(vma, vmaddr);
735 preempt_disable();
736 flush_dcache_page_asm(page_to_phys(page), vmaddr);
737 preempt_enable();
738}
739
740void flush_kernel_vmap_range(void *vaddr, int size)
741{
742 unsigned long start = (unsigned long)vaddr;
743 unsigned long end = start + size;
744
745 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
746 (unsigned long)size >= parisc_cache_flush_threshold) {
747 flush_tlb_kernel_range(start, end);
748 flush_data_cache();
749 return;
750 }
751
752 flush_kernel_dcache_range_asm(start, end);
753 flush_tlb_kernel_range(start, end);
754}
755EXPORT_SYMBOL(flush_kernel_vmap_range);
756
757void invalidate_kernel_vmap_range(void *vaddr, int size)
758{
759 unsigned long start = (unsigned long)vaddr;
760 unsigned long end = start + size;
761
762 /* Ensure DMA is complete */
763 asm_syncdma();
764
765 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
766 (unsigned long)size >= parisc_cache_flush_threshold) {
767 flush_tlb_kernel_range(start, end);
768 flush_data_cache();
769 return;
770 }
771
772 purge_kernel_dcache_range_asm(start, end);
773 flush_tlb_kernel_range(start, end);
774}
775EXPORT_SYMBOL(invalidate_kernel_vmap_range);