Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <asm/pdc.h>
23#include <asm/cache.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/page.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30
31int split_tlb __ro_after_init;
32int dcache_stride __ro_after_init;
33int icache_stride __ro_after_init;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
41
42/* On some machines (i.e., ones with the Merced bus), there can be
43 * only a single PxTLB broadcast at a time; this must be guaranteed
44 * by software. We need a spinlock around all TLB flushes to ensure
45 * this.
46 */
47DEFINE_SPINLOCK(pa_tlb_flush_lock);
48
49/* Swapper page setup lock. */
50DEFINE_SPINLOCK(pa_swapper_pg_lock);
51
52#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
53int pa_serialize_tlb_flushes __ro_after_init;
54#endif
55
56struct pdc_cache_info cache_info __ro_after_init;
57#ifndef CONFIG_PA20
58static struct pdc_btlb_info btlb_info __ro_after_init;
59#endif
60
61#ifdef CONFIG_SMP
62void
63flush_data_cache(void)
64{
65 on_each_cpu(flush_data_cache_local, NULL, 1);
66}
67void
68flush_instruction_cache(void)
69{
70 on_each_cpu(flush_instruction_cache_local, NULL, 1);
71}
72#endif
73
74void
75flush_cache_all_local(void)
76{
77 flush_instruction_cache_local(NULL);
78 flush_data_cache_local(NULL);
79}
80EXPORT_SYMBOL(flush_cache_all_local);
81
82/* Virtual address of pfn. */
83#define pfn_va(pfn) __va(PFN_PHYS(pfn))
84
85void
86update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
87{
88 unsigned long pfn = pte_pfn(*ptep);
89 struct page *page;
90
91 /* We don't have pte special. As a result, we can be called with
92 an invalid pfn and we don't need to flush the kernel dcache page.
93 This occurs with FireGL card in C8000. */
94 if (!pfn_valid(pfn))
95 return;
96
97 page = pfn_to_page(pfn);
98 if (page_mapping_file(page) &&
99 test_bit(PG_dcache_dirty, &page->flags)) {
100 flush_kernel_dcache_page_addr(pfn_va(pfn));
101 clear_bit(PG_dcache_dirty, &page->flags);
102 } else if (parisc_requires_coherency())
103 flush_kernel_dcache_page_addr(pfn_va(pfn));
104}
105
106void
107show_cache_info(struct seq_file *m)
108{
109 char buf[32];
110
111 seq_printf(m, "I-cache\t\t: %ld KB\n",
112 cache_info.ic_size/1024 );
113 if (cache_info.dc_loop != 1)
114 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
115 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
116 cache_info.dc_size/1024,
117 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
118 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
119 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
120 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
121 cache_info.it_size,
122 cache_info.dt_size,
123 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
124 );
125
126#ifndef CONFIG_PA20
127 /* BTLB - Block TLB */
128 if (btlb_info.max_size==0) {
129 seq_printf(m, "BTLB\t\t: not supported\n" );
130 } else {
131 seq_printf(m,
132 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
133 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
134 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
135 btlb_info.max_size, (int)4096,
136 btlb_info.max_size>>8,
137 btlb_info.fixed_range_info.num_i,
138 btlb_info.fixed_range_info.num_d,
139 btlb_info.fixed_range_info.num_comb,
140 btlb_info.variable_range_info.num_i,
141 btlb_info.variable_range_info.num_d,
142 btlb_info.variable_range_info.num_comb
143 );
144 }
145#endif
146}
147
148void __init
149parisc_cache_init(void)
150{
151 if (pdc_cache_info(&cache_info) < 0)
152 panic("parisc_cache_init: pdc_cache_info failed");
153
154#if 0
155 printk("ic_size %lx dc_size %lx it_size %lx\n",
156 cache_info.ic_size,
157 cache_info.dc_size,
158 cache_info.it_size);
159
160 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161 cache_info.dc_base,
162 cache_info.dc_stride,
163 cache_info.dc_count,
164 cache_info.dc_loop);
165
166 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info.dc_conf),
168 cache_info.dc_conf.cc_alias,
169 cache_info.dc_conf.cc_block,
170 cache_info.dc_conf.cc_line,
171 cache_info.dc_conf.cc_shift);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info.dc_conf.cc_wt,
174 cache_info.dc_conf.cc_sh,
175 cache_info.dc_conf.cc_cst,
176 cache_info.dc_conf.cc_hv);
177
178 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179 cache_info.ic_base,
180 cache_info.ic_stride,
181 cache_info.ic_count,
182 cache_info.ic_loop);
183
184 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185 cache_info.it_sp_base,
186 cache_info.it_sp_stride,
187 cache_info.it_sp_count,
188 cache_info.it_loop,
189 cache_info.it_off_base,
190 cache_info.it_off_stride,
191 cache_info.it_off_count);
192
193 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
194 cache_info.dt_sp_base,
195 cache_info.dt_sp_stride,
196 cache_info.dt_sp_count,
197 cache_info.dt_loop,
198 cache_info.dt_off_base,
199 cache_info.dt_off_stride,
200 cache_info.dt_off_count);
201
202 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
203 *(unsigned long *) (&cache_info.ic_conf),
204 cache_info.ic_conf.cc_alias,
205 cache_info.ic_conf.cc_block,
206 cache_info.ic_conf.cc_line,
207 cache_info.ic_conf.cc_shift);
208 printk(" wt %d sh %d cst %d hv %d\n",
209 cache_info.ic_conf.cc_wt,
210 cache_info.ic_conf.cc_sh,
211 cache_info.ic_conf.cc_cst,
212 cache_info.ic_conf.cc_hv);
213
214 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
215 cache_info.dt_conf.tc_sh,
216 cache_info.dt_conf.tc_page,
217 cache_info.dt_conf.tc_cst,
218 cache_info.dt_conf.tc_aid,
219 cache_info.dt_conf.tc_sr);
220
221 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
222 cache_info.it_conf.tc_sh,
223 cache_info.it_conf.tc_page,
224 cache_info.it_conf.tc_cst,
225 cache_info.it_conf.tc_aid,
226 cache_info.it_conf.tc_sr);
227#endif
228
229 split_tlb = 0;
230 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
231 if (cache_info.dt_conf.tc_sh == 2)
232 printk(KERN_WARNING "Unexpected TLB configuration. "
233 "Will flush I/D separately (could be optimized).\n");
234
235 split_tlb = 1;
236 }
237
238 /* "New and Improved" version from Jim Hull
239 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
240 * The following CAFL_STRIDE is an optimized version, see
241 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
242 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
243 */
244#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
245 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
246 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
247#undef CAFL_STRIDE
248
249#ifndef CONFIG_PA20
250 if (pdc_btlb_info(&btlb_info) < 0) {
251 memset(&btlb_info, 0, sizeof btlb_info);
252 }
253#endif
254
255 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
256 PDC_MODEL_NVA_UNSUPPORTED) {
257 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
258#if 0
259 panic("SMP kernel required to avoid non-equivalent aliasing");
260#endif
261 }
262}
263
264void __init disable_sr_hashing(void)
265{
266 int srhash_type, retval;
267 unsigned long space_bits;
268
269 switch (boot_cpu_data.cpu_type) {
270 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
271 BUG();
272 return;
273
274 case pcxs:
275 case pcxt:
276 case pcxt_:
277 srhash_type = SRHASH_PCXST;
278 break;
279
280 case pcxl:
281 srhash_type = SRHASH_PCXL;
282 break;
283
284 case pcxl2: /* pcxl2 doesn't support space register hashing */
285 return;
286
287 default: /* Currently all PA2.0 machines use the same ins. sequence */
288 srhash_type = SRHASH_PA20;
289 break;
290 }
291
292 disable_sr_hashing_asm(srhash_type);
293
294 retval = pdc_spaceid_bits(&space_bits);
295 /* If this procedure isn't implemented, don't panic. */
296 if (retval < 0 && retval != PDC_BAD_OPTION)
297 panic("pdc_spaceid_bits call failed.\n");
298 if (space_bits != 0)
299 panic("SpaceID hashing is still on!\n");
300}
301
302static inline void
303__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
304 unsigned long physaddr)
305{
306 preempt_disable();
307 flush_dcache_page_asm(physaddr, vmaddr);
308 if (vma->vm_flags & VM_EXEC)
309 flush_icache_page_asm(physaddr, vmaddr);
310 preempt_enable();
311}
312
313static inline void
314__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
315 unsigned long physaddr)
316{
317 preempt_disable();
318 purge_dcache_page_asm(physaddr, vmaddr);
319 if (vma->vm_flags & VM_EXEC)
320 flush_icache_page_asm(physaddr, vmaddr);
321 preempt_enable();
322}
323
324void flush_dcache_page(struct page *page)
325{
326 struct address_space *mapping = page_mapping_file(page);
327 struct vm_area_struct *mpnt;
328 unsigned long offset;
329 unsigned long addr, old_addr = 0;
330 pgoff_t pgoff;
331
332 if (mapping && !mapping_mapped(mapping)) {
333 set_bit(PG_dcache_dirty, &page->flags);
334 return;
335 }
336
337 flush_kernel_dcache_page(page);
338
339 if (!mapping)
340 return;
341
342 pgoff = page->index;
343
344 /* We have carefully arranged in arch_get_unmapped_area() that
345 * *any* mappings of a file are always congruently mapped (whether
346 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
347 * to flush one address here for them all to become coherent */
348
349 flush_dcache_mmap_lock(mapping);
350 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
351 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
352 addr = mpnt->vm_start + offset;
353
354 /* The TLB is the engine of coherence on parisc: The
355 * CPU is entitled to speculate any page with a TLB
356 * mapping, so here we kill the mapping then flush the
357 * page along a special flush only alias mapping.
358 * This guarantees that the page is no-longer in the
359 * cache for any process and nor may it be
360 * speculatively read in (until the user or kernel
361 * specifically accesses it, of course) */
362
363 flush_tlb_page(mpnt, addr);
364 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
365 != (addr & (SHM_COLOUR - 1))) {
366 __flush_cache_page(mpnt, addr, page_to_phys(page));
367 if (parisc_requires_coherency() && old_addr)
368 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
369 old_addr = addr;
370 }
371 }
372 flush_dcache_mmap_unlock(mapping);
373}
374EXPORT_SYMBOL(flush_dcache_page);
375
376/* Defined in arch/parisc/kernel/pacache.S */
377EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
378EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
379EXPORT_SYMBOL(flush_data_cache_local);
380EXPORT_SYMBOL(flush_kernel_icache_range_asm);
381
382#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
383static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
384
385#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
386static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
387
388void __init parisc_setup_cache_timing(void)
389{
390 unsigned long rangetime, alltime;
391 unsigned long size, start;
392 unsigned long threshold;
393
394 alltime = mfctl(16);
395 flush_data_cache();
396 alltime = mfctl(16) - alltime;
397
398 size = (unsigned long)(_end - _text);
399 rangetime = mfctl(16);
400 flush_kernel_dcache_range((unsigned long)_text, size);
401 rangetime = mfctl(16) - rangetime;
402
403 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
404 alltime, size, rangetime);
405
406 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
407 if (threshold > cache_info.dc_size)
408 threshold = cache_info.dc_size;
409 if (threshold)
410 parisc_cache_flush_threshold = threshold;
411 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
412 parisc_cache_flush_threshold/1024);
413
414 /* calculate TLB flush threshold */
415
416 /* On SMP machines, skip the TLB measure of kernel text which
417 * has been mapped as huge pages. */
418 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
419 threshold = max(cache_info.it_size, cache_info.dt_size);
420 threshold *= PAGE_SIZE;
421 threshold /= num_online_cpus();
422 goto set_tlb_threshold;
423 }
424
425 size = 0;
426 start = (unsigned long) _text;
427 rangetime = mfctl(16);
428 while (start < (unsigned long) _end) {
429 flush_tlb_kernel_range(start, start + PAGE_SIZE);
430 start += PAGE_SIZE;
431 size += PAGE_SIZE;
432 }
433 rangetime = mfctl(16) - rangetime;
434
435 alltime = mfctl(16);
436 flush_tlb_all();
437 alltime = mfctl(16) - alltime;
438
439 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
440 alltime, size, rangetime);
441
442 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
443 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
444 threshold/1024);
445
446set_tlb_threshold:
447 if (threshold > parisc_tlb_flush_threshold)
448 parisc_tlb_flush_threshold = threshold;
449 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
450 parisc_tlb_flush_threshold/1024);
451}
452
453extern void purge_kernel_dcache_page_asm(unsigned long);
454extern void clear_user_page_asm(void *, unsigned long);
455extern void copy_user_page_asm(void *, void *, unsigned long);
456
457void flush_kernel_dcache_page_addr(void *addr)
458{
459 unsigned long flags;
460
461 flush_kernel_dcache_page_asm(addr);
462 purge_tlb_start(flags);
463 pdtlb_kernel(addr);
464 purge_tlb_end(flags);
465}
466EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
467
468void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
469 struct page *pg)
470{
471 /* Copy using kernel mapping. No coherency is needed (all in
472 kunmap) for the `to' page. However, the `from' page needs to
473 be flushed through a mapping equivalent to the user mapping
474 before it can be accessed through the kernel mapping. */
475 preempt_disable();
476 flush_dcache_page_asm(__pa(vfrom), vaddr);
477 copy_page_asm(vto, vfrom);
478 preempt_enable();
479}
480EXPORT_SYMBOL(copy_user_page);
481
482/* __flush_tlb_range()
483 *
484 * returns 1 if all TLBs were flushed.
485 */
486int __flush_tlb_range(unsigned long sid, unsigned long start,
487 unsigned long end)
488{
489 unsigned long flags;
490
491 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
492 end - start >= parisc_tlb_flush_threshold) {
493 flush_tlb_all();
494 return 1;
495 }
496
497 /* Purge TLB entries for small ranges using the pdtlb and
498 pitlb instructions. These instructions execute locally
499 but cause a purge request to be broadcast to other TLBs. */
500 while (start < end) {
501 purge_tlb_start(flags);
502 mtsp(sid, 1);
503 pdtlb(start);
504 pitlb(start);
505 purge_tlb_end(flags);
506 start += PAGE_SIZE;
507 }
508 return 0;
509}
510
511static void cacheflush_h_tmp_function(void *dummy)
512{
513 flush_cache_all_local();
514}
515
516void flush_cache_all(void)
517{
518 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
519}
520
521static inline unsigned long mm_total_size(struct mm_struct *mm)
522{
523 struct vm_area_struct *vma;
524 unsigned long usize = 0;
525
526 for (vma = mm->mmap; vma; vma = vma->vm_next)
527 usize += vma->vm_end - vma->vm_start;
528 return usize;
529}
530
531static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
532{
533 pte_t *ptep = NULL;
534
535 if (!pgd_none(*pgd)) {
536 p4d_t *p4d = p4d_offset(pgd, addr);
537 if (!p4d_none(*p4d)) {
538 pud_t *pud = pud_offset(p4d, addr);
539 if (!pud_none(*pud)) {
540 pmd_t *pmd = pmd_offset(pud, addr);
541 if (!pmd_none(*pmd))
542 ptep = pte_offset_map(pmd, addr);
543 }
544 }
545 }
546 return ptep;
547}
548
549void flush_cache_mm(struct mm_struct *mm)
550{
551 struct vm_area_struct *vma;
552 pgd_t *pgd;
553
554 /* Flushing the whole cache on each cpu takes forever on
555 rp3440, etc. So, avoid it if the mm isn't too big. */
556 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
557 mm_total_size(mm) >= parisc_cache_flush_threshold) {
558 if (mm->context)
559 flush_tlb_all();
560 flush_cache_all();
561 return;
562 }
563
564 if (mm->context == mfsp(3)) {
565 for (vma = mm->mmap; vma; vma = vma->vm_next) {
566 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
567 if (vma->vm_flags & VM_EXEC)
568 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
569 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
570 }
571 return;
572 }
573
574 pgd = mm->pgd;
575 for (vma = mm->mmap; vma; vma = vma->vm_next) {
576 unsigned long addr;
577
578 for (addr = vma->vm_start; addr < vma->vm_end;
579 addr += PAGE_SIZE) {
580 unsigned long pfn;
581 pte_t *ptep = get_ptep(pgd, addr);
582 if (!ptep)
583 continue;
584 pfn = pte_pfn(*ptep);
585 if (!pfn_valid(pfn))
586 continue;
587 if (unlikely(mm->context)) {
588 flush_tlb_page(vma, addr);
589 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
590 } else {
591 __purge_cache_page(vma, addr, PFN_PHYS(pfn));
592 }
593 }
594 }
595}
596
597void flush_cache_range(struct vm_area_struct *vma,
598 unsigned long start, unsigned long end)
599{
600 pgd_t *pgd;
601 unsigned long addr;
602
603 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
604 end - start >= parisc_cache_flush_threshold) {
605 if (vma->vm_mm->context)
606 flush_tlb_range(vma, start, end);
607 flush_cache_all();
608 return;
609 }
610
611 if (vma->vm_mm->context == mfsp(3)) {
612 flush_user_dcache_range_asm(start, end);
613 if (vma->vm_flags & VM_EXEC)
614 flush_user_icache_range_asm(start, end);
615 flush_tlb_range(vma, start, end);
616 return;
617 }
618
619 pgd = vma->vm_mm->pgd;
620 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
621 unsigned long pfn;
622 pte_t *ptep = get_ptep(pgd, addr);
623 if (!ptep)
624 continue;
625 pfn = pte_pfn(*ptep);
626 if (pfn_valid(pfn)) {
627 if (unlikely(vma->vm_mm->context)) {
628 flush_tlb_page(vma, addr);
629 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
630 } else {
631 __purge_cache_page(vma, addr, PFN_PHYS(pfn));
632 }
633 }
634 }
635}
636
637void
638flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
639{
640 if (pfn_valid(pfn)) {
641 if (likely(vma->vm_mm->context)) {
642 flush_tlb_page(vma, vmaddr);
643 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
644 } else {
645 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
646 }
647 }
648}
649
650void flush_kernel_vmap_range(void *vaddr, int size)
651{
652 unsigned long start = (unsigned long)vaddr;
653 unsigned long end = start + size;
654
655 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
656 (unsigned long)size >= parisc_cache_flush_threshold) {
657 flush_tlb_kernel_range(start, end);
658 flush_data_cache();
659 return;
660 }
661
662 flush_kernel_dcache_range_asm(start, end);
663 flush_tlb_kernel_range(start, end);
664}
665EXPORT_SYMBOL(flush_kernel_vmap_range);
666
667void invalidate_kernel_vmap_range(void *vaddr, int size)
668{
669 unsigned long start = (unsigned long)vaddr;
670 unsigned long end = start + size;
671
672 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
673 (unsigned long)size >= parisc_cache_flush_threshold) {
674 flush_tlb_kernel_range(start, end);
675 flush_data_cache();
676 return;
677 }
678
679 purge_kernel_dcache_range_asm(start, end);
680 flush_tlb_kernel_range(start, end);
681}
682EXPORT_SYMBOL(invalidate_kernel_vmap_range);
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <asm/pdc.h>
23#include <asm/cache.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/page.h>
27#include <asm/pgalloc.h>
28#include <asm/processor.h>
29#include <asm/sections.h>
30#include <asm/shmparam.h>
31
32int split_tlb __read_mostly;
33int dcache_stride __read_mostly;
34int icache_stride __read_mostly;
35EXPORT_SYMBOL(dcache_stride);
36
37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38EXPORT_SYMBOL(flush_dcache_page_asm);
39void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
41
42/* On some machines (e.g. ones with the Merced bus), there can be
43 * only a single PxTLB broadcast at a time; this must be guaranteed
44 * by software. We put a spinlock around all TLB flushes to
45 * ensure this.
46 */
47DEFINE_SPINLOCK(pa_tlb_lock);
48
49struct pdc_cache_info cache_info __read_mostly;
50#ifndef CONFIG_PA20
51static struct pdc_btlb_info btlb_info __read_mostly;
52#endif
53
54#ifdef CONFIG_SMP
55void
56flush_data_cache(void)
57{
58 on_each_cpu(flush_data_cache_local, NULL, 1);
59}
60void
61flush_instruction_cache(void)
62{
63 on_each_cpu(flush_instruction_cache_local, NULL, 1);
64}
65#endif
66
67void
68flush_cache_all_local(void)
69{
70 flush_instruction_cache_local(NULL);
71 flush_data_cache_local(NULL);
72}
73EXPORT_SYMBOL(flush_cache_all_local);
74
75/* Virtual address of pfn. */
76#define pfn_va(pfn) __va(PFN_PHYS(pfn))
77
78void
79update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
80{
81 unsigned long pfn = pte_pfn(*ptep);
82 struct page *page;
83
84 /* We don't have pte special. As a result, we can be called with
85 an invalid pfn and we don't need to flush the kernel dcache page.
86 This occurs with FireGL card in C8000. */
87 if (!pfn_valid(pfn))
88 return;
89
90 page = pfn_to_page(pfn);
91 if (page_mapping_file(page) &&
92 test_bit(PG_dcache_dirty, &page->flags)) {
93 flush_kernel_dcache_page_addr(pfn_va(pfn));
94 clear_bit(PG_dcache_dirty, &page->flags);
95 } else if (parisc_requires_coherency())
96 flush_kernel_dcache_page_addr(pfn_va(pfn));
97}
98
99void
100show_cache_info(struct seq_file *m)
101{
102 char buf[32];
103
104 seq_printf(m, "I-cache\t\t: %ld KB\n",
105 cache_info.ic_size/1024 );
106 if (cache_info.dc_loop != 1)
107 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
108 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
109 cache_info.dc_size/1024,
110 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
111 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
112 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
113 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
114 cache_info.it_size,
115 cache_info.dt_size,
116 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
117 );
118
119#ifndef CONFIG_PA20
120 /* BTLB - Block TLB */
121 if (btlb_info.max_size==0) {
122 seq_printf(m, "BTLB\t\t: not supported\n" );
123 } else {
124 seq_printf(m,
125 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
126 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
127 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
128 btlb_info.max_size, (int)4096,
129 btlb_info.max_size>>8,
130 btlb_info.fixed_range_info.num_i,
131 btlb_info.fixed_range_info.num_d,
132 btlb_info.fixed_range_info.num_comb,
133 btlb_info.variable_range_info.num_i,
134 btlb_info.variable_range_info.num_d,
135 btlb_info.variable_range_info.num_comb
136 );
137 }
138#endif
139}
140
141void __init
142parisc_cache_init(void)
143{
144 if (pdc_cache_info(&cache_info) < 0)
145 panic("parisc_cache_init: pdc_cache_info failed");
146
147#if 0
148 printk("ic_size %lx dc_size %lx it_size %lx\n",
149 cache_info.ic_size,
150 cache_info.dc_size,
151 cache_info.it_size);
152
153 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
154 cache_info.dc_base,
155 cache_info.dc_stride,
156 cache_info.dc_count,
157 cache_info.dc_loop);
158
159 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
160 *(unsigned long *) (&cache_info.dc_conf),
161 cache_info.dc_conf.cc_alias,
162 cache_info.dc_conf.cc_block,
163 cache_info.dc_conf.cc_line,
164 cache_info.dc_conf.cc_shift);
165 printk(" wt %d sh %d cst %d hv %d\n",
166 cache_info.dc_conf.cc_wt,
167 cache_info.dc_conf.cc_sh,
168 cache_info.dc_conf.cc_cst,
169 cache_info.dc_conf.cc_hv);
170
171 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
172 cache_info.ic_base,
173 cache_info.ic_stride,
174 cache_info.ic_count,
175 cache_info.ic_loop);
176
177 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
178 cache_info.it_sp_base,
179 cache_info.it_sp_stride,
180 cache_info.it_sp_count,
181 cache_info.it_loop,
182 cache_info.it_off_base,
183 cache_info.it_off_stride,
184 cache_info.it_off_count);
185
186 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
187 cache_info.dt_sp_base,
188 cache_info.dt_sp_stride,
189 cache_info.dt_sp_count,
190 cache_info.dt_loop,
191 cache_info.dt_off_base,
192 cache_info.dt_off_stride,
193 cache_info.dt_off_count);
194
195 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
196 *(unsigned long *) (&cache_info.ic_conf),
197 cache_info.ic_conf.cc_alias,
198 cache_info.ic_conf.cc_block,
199 cache_info.ic_conf.cc_line,
200 cache_info.ic_conf.cc_shift);
201 printk(" wt %d sh %d cst %d hv %d\n",
202 cache_info.ic_conf.cc_wt,
203 cache_info.ic_conf.cc_sh,
204 cache_info.ic_conf.cc_cst,
205 cache_info.ic_conf.cc_hv);
206
207 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
208 cache_info.dt_conf.tc_sh,
209 cache_info.dt_conf.tc_page,
210 cache_info.dt_conf.tc_cst,
211 cache_info.dt_conf.tc_aid,
212 cache_info.dt_conf.tc_sr);
213
214 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
215 cache_info.it_conf.tc_sh,
216 cache_info.it_conf.tc_page,
217 cache_info.it_conf.tc_cst,
218 cache_info.it_conf.tc_aid,
219 cache_info.it_conf.tc_sr);
220#endif
221
222 split_tlb = 0;
223 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
224 if (cache_info.dt_conf.tc_sh == 2)
225 printk(KERN_WARNING "Unexpected TLB configuration. "
226 "Will flush I/D separately (could be optimized).\n");
227
228 split_tlb = 1;
229 }
230
231 /* "New and Improved" version from Jim Hull
232 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
233 * The following CAFL_STRIDE is an optimized version, see
234 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
235 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
236 */
237#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
238 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
239 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
240#undef CAFL_STRIDE
241
242#ifndef CONFIG_PA20
243 if (pdc_btlb_info(&btlb_info) < 0) {
244 memset(&btlb_info, 0, sizeof btlb_info);
245 }
246#endif
247
248 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
249 PDC_MODEL_NVA_UNSUPPORTED) {
250 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
251#if 0
252 panic("SMP kernel required to avoid non-equivalent aliasing");
253#endif
254 }
255}
256
257void __init disable_sr_hashing(void)
258{
259 int srhash_type, retval;
260 unsigned long space_bits;
261
262 switch (boot_cpu_data.cpu_type) {
263 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
264 BUG();
265 return;
266
267 case pcxs:
268 case pcxt:
269 case pcxt_:
270 srhash_type = SRHASH_PCXST;
271 break;
272
273 case pcxl:
274 srhash_type = SRHASH_PCXL;
275 break;
276
277 case pcxl2: /* pcxl2 doesn't support space register hashing */
278 return;
279
280 default: /* Currently all PA2.0 machines use the same ins. sequence */
281 srhash_type = SRHASH_PA20;
282 break;
283 }
284
285 disable_sr_hashing_asm(srhash_type);
286
287 retval = pdc_spaceid_bits(&space_bits);
288 /* If this procedure isn't implemented, don't panic. */
289 if (retval < 0 && retval != PDC_BAD_OPTION)
290 panic("pdc_spaceid_bits call failed.\n");
291 if (space_bits != 0)
292 panic("SpaceID hashing is still on!\n");
293}
294
295static inline void
296__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
297 unsigned long physaddr)
298{
299 preempt_disable();
300 flush_dcache_page_asm(physaddr, vmaddr);
301 if (vma->vm_flags & VM_EXEC)
302 flush_icache_page_asm(physaddr, vmaddr);
303 preempt_enable();
304}
305
306void flush_dcache_page(struct page *page)
307{
308 struct address_space *mapping = page_mapping_file(page);
309 struct vm_area_struct *mpnt;
310 unsigned long offset;
311 unsigned long addr, old_addr = 0;
312 pgoff_t pgoff;
313
314 if (mapping && !mapping_mapped(mapping)) {
315 set_bit(PG_dcache_dirty, &page->flags);
316 return;
317 }
318
319 flush_kernel_dcache_page(page);
320
321 if (!mapping)
322 return;
323
324 pgoff = page->index;
325
326 /* We have carefully arranged in arch_get_unmapped_area() that
327 * *any* mappings of a file are always congruently mapped (whether
328 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
329 * to flush one address here for them all to become coherent */
330
331 flush_dcache_mmap_lock(mapping);
332 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
333 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
334 addr = mpnt->vm_start + offset;
335
336 /* The TLB is the engine of coherence on parisc: The
337 * CPU is entitled to speculate any page with a TLB
338 * mapping, so here we kill the mapping then flush the
339 * page along a special flush only alias mapping.
340 * This guarantees that the page is no-longer in the
341 * cache for any process and nor may it be
342 * speculatively read in (until the user or kernel
343 * specifically accesses it, of course) */
344
345 flush_tlb_page(mpnt, addr);
346 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
347 != (addr & (SHM_COLOUR - 1))) {
348 __flush_cache_page(mpnt, addr, page_to_phys(page));
349 if (old_addr)
350 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
351 old_addr = addr;
352 }
353 }
354 flush_dcache_mmap_unlock(mapping);
355}
356EXPORT_SYMBOL(flush_dcache_page);
357
358/* Defined in arch/parisc/kernel/pacache.S */
359EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
360EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
361EXPORT_SYMBOL(flush_data_cache_local);
362EXPORT_SYMBOL(flush_kernel_icache_range_asm);
363
364#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
365static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
366
367#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
368static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
369
370void __init parisc_setup_cache_timing(void)
371{
372 unsigned long rangetime, alltime;
373 unsigned long size, start;
374 unsigned long threshold;
375
376 alltime = mfctl(16);
377 flush_data_cache();
378 alltime = mfctl(16) - alltime;
379
380 size = (unsigned long)(_end - _text);
381 rangetime = mfctl(16);
382 flush_kernel_dcache_range((unsigned long)_text, size);
383 rangetime = mfctl(16) - rangetime;
384
385 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
386 alltime, size, rangetime);
387
388 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
389 if (threshold > cache_info.dc_size)
390 threshold = cache_info.dc_size;
391 if (threshold)
392 parisc_cache_flush_threshold = threshold;
393 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
394 parisc_cache_flush_threshold/1024);
395
396 /* calculate TLB flush threshold */
397
398 /* On SMP machines, skip the TLB measure of kernel text which
399 * has been mapped as huge pages. */
400 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
401 threshold = max(cache_info.it_size, cache_info.dt_size);
402 threshold *= PAGE_SIZE;
403 threshold /= num_online_cpus();
404 goto set_tlb_threshold;
405 }
406
407 alltime = mfctl(16);
408 flush_tlb_all();
409 alltime = mfctl(16) - alltime;
410
411 size = 0;
412 start = (unsigned long) _text;
413 rangetime = mfctl(16);
414 while (start < (unsigned long) _end) {
415 flush_tlb_kernel_range(start, start + PAGE_SIZE);
416 start += PAGE_SIZE;
417 size += PAGE_SIZE;
418 }
419 rangetime = mfctl(16) - rangetime;
420
421 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
422 alltime, size, rangetime);
423
424 threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
425
426set_tlb_threshold:
427 if (threshold)
428 parisc_tlb_flush_threshold = threshold;
429 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
430 parisc_tlb_flush_threshold/1024);
431}
432
433extern void purge_kernel_dcache_page_asm(unsigned long);
434extern void clear_user_page_asm(void *, unsigned long);
435extern void copy_user_page_asm(void *, void *, unsigned long);
436
437void flush_kernel_dcache_page_addr(void *addr)
438{
439 unsigned long flags;
440
441 flush_kernel_dcache_page_asm(addr);
442 purge_tlb_start(flags);
443 pdtlb_kernel(addr);
444 purge_tlb_end(flags);
445}
446EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
447
448void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
449 struct page *pg)
450{
451 /* Copy using kernel mapping. No coherency is needed (all in
452 kunmap) for the `to' page. However, the `from' page needs to
453 be flushed through a mapping equivalent to the user mapping
454 before it can be accessed through the kernel mapping. */
455 preempt_disable();
456 flush_dcache_page_asm(__pa(vfrom), vaddr);
457 copy_page_asm(vto, vfrom);
458 preempt_enable();
459}
460EXPORT_SYMBOL(copy_user_page);
461
462/* __flush_tlb_range()
463 *
464 * returns 1 if all TLBs were flushed.
465 */
466int __flush_tlb_range(unsigned long sid, unsigned long start,
467 unsigned long end)
468{
469 unsigned long flags;
470
471 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
472 end - start >= parisc_tlb_flush_threshold) {
473 flush_tlb_all();
474 return 1;
475 }
476
477 /* Purge TLB entries for small ranges using the pdtlb and
478 pitlb instructions. These instructions execute locally
479 but cause a purge request to be broadcast to other TLBs. */
480 if (likely(!split_tlb)) {
481 while (start < end) {
482 purge_tlb_start(flags);
483 mtsp(sid, 1);
484 pdtlb(start);
485 purge_tlb_end(flags);
486 start += PAGE_SIZE;
487 }
488 return 0;
489 }
490
491 /* split TLB case */
492 while (start < end) {
493 purge_tlb_start(flags);
494 mtsp(sid, 1);
495 pdtlb(start);
496 pitlb(start);
497 purge_tlb_end(flags);
498 start += PAGE_SIZE;
499 }
500 return 0;
501}
502
503static void cacheflush_h_tmp_function(void *dummy)
504{
505 flush_cache_all_local();
506}
507
508void flush_cache_all(void)
509{
510 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
511}
512
513static inline unsigned long mm_total_size(struct mm_struct *mm)
514{
515 struct vm_area_struct *vma;
516 unsigned long usize = 0;
517
518 for (vma = mm->mmap; vma; vma = vma->vm_next)
519 usize += vma->vm_end - vma->vm_start;
520 return usize;
521}
522
523static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
524{
525 pte_t *ptep = NULL;
526
527 if (!pgd_none(*pgd)) {
528 pud_t *pud = pud_offset(pgd, addr);
529 if (!pud_none(*pud)) {
530 pmd_t *pmd = pmd_offset(pud, addr);
531 if (!pmd_none(*pmd))
532 ptep = pte_offset_map(pmd, addr);
533 }
534 }
535 return ptep;
536}
537
538void flush_cache_mm(struct mm_struct *mm)
539{
540 struct vm_area_struct *vma;
541 pgd_t *pgd;
542
543 /* Flushing the whole cache on each cpu takes forever on
544 rp3440, etc. So, avoid it if the mm isn't too big. */
545 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
546 mm_total_size(mm) >= parisc_cache_flush_threshold) {
547 if (mm->context)
548 flush_tlb_all();
549 flush_cache_all();
550 return;
551 }
552
553 if (mm->context == mfsp(3)) {
554 for (vma = mm->mmap; vma; vma = vma->vm_next) {
555 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
556 if (vma->vm_flags & VM_EXEC)
557 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
558 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
559 }
560 return;
561 }
562
563 pgd = mm->pgd;
564 for (vma = mm->mmap; vma; vma = vma->vm_next) {
565 unsigned long addr;
566
567 for (addr = vma->vm_start; addr < vma->vm_end;
568 addr += PAGE_SIZE) {
569 unsigned long pfn;
570 pte_t *ptep = get_ptep(pgd, addr);
571 if (!ptep)
572 continue;
573 pfn = pte_pfn(*ptep);
574 if (!pfn_valid(pfn))
575 continue;
576 if (unlikely(mm->context))
577 flush_tlb_page(vma, addr);
578 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
579 }
580 }
581}
582
583void flush_cache_range(struct vm_area_struct *vma,
584 unsigned long start, unsigned long end)
585{
586 pgd_t *pgd;
587 unsigned long addr;
588
589 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
590 end - start >= parisc_cache_flush_threshold) {
591 if (vma->vm_mm->context)
592 flush_tlb_range(vma, start, end);
593 flush_cache_all();
594 return;
595 }
596
597 if (vma->vm_mm->context == mfsp(3)) {
598 flush_user_dcache_range_asm(start, end);
599 if (vma->vm_flags & VM_EXEC)
600 flush_user_icache_range_asm(start, end);
601 flush_tlb_range(vma, start, end);
602 return;
603 }
604
605 pgd = vma->vm_mm->pgd;
606 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
607 unsigned long pfn;
608 pte_t *ptep = get_ptep(pgd, addr);
609 if (!ptep)
610 continue;
611 pfn = pte_pfn(*ptep);
612 if (pfn_valid(pfn)) {
613 if (unlikely(vma->vm_mm->context))
614 flush_tlb_page(vma, addr);
615 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
616 }
617 }
618}
619
620void
621flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
622{
623 if (pfn_valid(pfn)) {
624 if (likely(vma->vm_mm->context))
625 flush_tlb_page(vma, vmaddr);
626 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
627 }
628}
629
630void flush_kernel_vmap_range(void *vaddr, int size)
631{
632 unsigned long start = (unsigned long)vaddr;
633 unsigned long end = start + size;
634
635 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
636 (unsigned long)size >= parisc_cache_flush_threshold) {
637 flush_tlb_kernel_range(start, end);
638 flush_data_cache();
639 return;
640 }
641
642 flush_kernel_dcache_range_asm(start, end);
643 flush_tlb_kernel_range(start, end);
644}
645EXPORT_SYMBOL(flush_kernel_vmap_range);
646
647void invalidate_kernel_vmap_range(void *vaddr, int size)
648{
649 unsigned long start = (unsigned long)vaddr;
650 unsigned long end = start + size;
651
652 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
653 (unsigned long)size >= parisc_cache_flush_threshold) {
654 flush_tlb_kernel_range(start, end);
655 flush_data_cache();
656 return;
657 }
658
659 purge_kernel_dcache_range_asm(start, end);
660 flush_tlb_kernel_range(start, end);
661}
662EXPORT_SYMBOL(invalidate_kernel_vmap_range);