Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41/* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46DEFINE_SPINLOCK(pa_tlb_lock);
47
48struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20
50static struct pdc_btlb_info btlb_info __read_mostly;
51#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
57 on_each_cpu(flush_data_cache_local, NULL, 1);
58}
59void
60flush_instruction_cache(void)
61{
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63}
64#endif
65
66void
67flush_cache_all_local(void)
68{
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
71}
72EXPORT_SYMBOL(flush_cache_all_local);
73
74void
75update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
76{
77 struct page *page = pte_page(*ptep);
78
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
80 test_bit(PG_dcache_dirty, &page->flags)) {
81
82 flush_kernel_dcache_page(page);
83 clear_bit(PG_dcache_dirty, &page->flags);
84 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page);
86}
87
88void
89show_cache_info(struct seq_file *m)
90{
91 char buf[32];
92
93 seq_printf(m, "I-cache\t\t: %ld KB\n",
94 cache_info.ic_size/1024 );
95 if (cache_info.dc_loop != 1)
96 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
97 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
98 cache_info.dc_size/1024,
99 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
100 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
101 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
102 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
103 cache_info.it_size,
104 cache_info.dt_size,
105 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
106 );
107
108#ifndef CONFIG_PA20
109 /* BTLB - Block TLB */
110 if (btlb_info.max_size==0) {
111 seq_printf(m, "BTLB\t\t: not supported\n" );
112 } else {
113 seq_printf(m,
114 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
115 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
116 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
117 btlb_info.max_size, (int)4096,
118 btlb_info.max_size>>8,
119 btlb_info.fixed_range_info.num_i,
120 btlb_info.fixed_range_info.num_d,
121 btlb_info.fixed_range_info.num_comb,
122 btlb_info.variable_range_info.num_i,
123 btlb_info.variable_range_info.num_d,
124 btlb_info.variable_range_info.num_comb
125 );
126 }
127#endif
128}
129
130void __init
131parisc_cache_init(void)
132{
133 if (pdc_cache_info(&cache_info) < 0)
134 panic("parisc_cache_init: pdc_cache_info failed");
135
136#if 0
137 printk("ic_size %lx dc_size %lx it_size %lx\n",
138 cache_info.ic_size,
139 cache_info.dc_size,
140 cache_info.it_size);
141
142 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
143 cache_info.dc_base,
144 cache_info.dc_stride,
145 cache_info.dc_count,
146 cache_info.dc_loop);
147
148 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
149 *(unsigned long *) (&cache_info.dc_conf),
150 cache_info.dc_conf.cc_alias,
151 cache_info.dc_conf.cc_block,
152 cache_info.dc_conf.cc_line,
153 cache_info.dc_conf.cc_shift);
154 printk(" wt %d sh %d cst %d hv %d\n",
155 cache_info.dc_conf.cc_wt,
156 cache_info.dc_conf.cc_sh,
157 cache_info.dc_conf.cc_cst,
158 cache_info.dc_conf.cc_hv);
159
160 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161 cache_info.ic_base,
162 cache_info.ic_stride,
163 cache_info.ic_count,
164 cache_info.ic_loop);
165
166 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info.ic_conf),
168 cache_info.ic_conf.cc_alias,
169 cache_info.ic_conf.cc_block,
170 cache_info.ic_conf.cc_line,
171 cache_info.ic_conf.cc_shift);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info.ic_conf.cc_wt,
174 cache_info.ic_conf.cc_sh,
175 cache_info.ic_conf.cc_cst,
176 cache_info.ic_conf.cc_hv);
177
178 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
179 cache_info.dt_conf.tc_sh,
180 cache_info.dt_conf.tc_page,
181 cache_info.dt_conf.tc_cst,
182 cache_info.dt_conf.tc_aid,
183 cache_info.dt_conf.tc_pad1);
184
185 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
186 cache_info.it_conf.tc_sh,
187 cache_info.it_conf.tc_page,
188 cache_info.it_conf.tc_cst,
189 cache_info.it_conf.tc_aid,
190 cache_info.it_conf.tc_pad1);
191#endif
192
193 split_tlb = 0;
194 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
195 if (cache_info.dt_conf.tc_sh == 2)
196 printk(KERN_WARNING "Unexpected TLB configuration. "
197 "Will flush I/D separately (could be optimized).\n");
198
199 split_tlb = 1;
200 }
201
202 /* "New and Improved" version from Jim Hull
203 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
204 * The following CAFL_STRIDE is an optimized version, see
205 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
207 */
208#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
209 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
210 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
211#undef CAFL_STRIDE
212
213#ifndef CONFIG_PA20
214 if (pdc_btlb_info(&btlb_info) < 0) {
215 memset(&btlb_info, 0, sizeof btlb_info);
216 }
217#endif
218
219 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
220 PDC_MODEL_NVA_UNSUPPORTED) {
221 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
222#if 0
223 panic("SMP kernel required to avoid non-equivalent aliasing");
224#endif
225 }
226}
227
228void disable_sr_hashing(void)
229{
230 int srhash_type, retval;
231 unsigned long space_bits;
232
233 switch (boot_cpu_data.cpu_type) {
234 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
235 BUG();
236 return;
237
238 case pcxs:
239 case pcxt:
240 case pcxt_:
241 srhash_type = SRHASH_PCXST;
242 break;
243
244 case pcxl:
245 srhash_type = SRHASH_PCXL;
246 break;
247
248 case pcxl2: /* pcxl2 doesn't support space register hashing */
249 return;
250
251 default: /* Currently all PA2.0 machines use the same ins. sequence */
252 srhash_type = SRHASH_PA20;
253 break;
254 }
255
256 disable_sr_hashing_asm(srhash_type);
257
258 retval = pdc_spaceid_bits(&space_bits);
259 /* If this procedure isn't implemented, don't panic. */
260 if (retval < 0 && retval != PDC_BAD_OPTION)
261 panic("pdc_spaceid_bits call failed.\n");
262 if (space_bits != 0)
263 panic("SpaceID hashing is still on!\n");
264}
265
266static inline void
267__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
268 unsigned long physaddr)
269{
270 flush_dcache_page_asm(physaddr, vmaddr);
271 if (vma->vm_flags & VM_EXEC)
272 flush_icache_page_asm(physaddr, vmaddr);
273}
274
275void flush_dcache_page(struct page *page)
276{
277 struct address_space *mapping = page_mapping(page);
278 struct vm_area_struct *mpnt;
279 struct prio_tree_iter iter;
280 unsigned long offset;
281 unsigned long addr, old_addr = 0;
282 pgoff_t pgoff;
283
284 if (mapping && !mapping_mapped(mapping)) {
285 set_bit(PG_dcache_dirty, &page->flags);
286 return;
287 }
288
289 flush_kernel_dcache_page(page);
290
291 if (!mapping)
292 return;
293
294 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
295
296 /* We have carefully arranged in arch_get_unmapped_area() that
297 * *any* mappings of a file are always congruently mapped (whether
298 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
299 * to flush one address here for them all to become coherent */
300
301 flush_dcache_mmap_lock(mapping);
302 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
303 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
304 addr = mpnt->vm_start + offset;
305
306 /* The TLB is the engine of coherence on parisc: The
307 * CPU is entitled to speculate any page with a TLB
308 * mapping, so here we kill the mapping then flush the
309 * page along a special flush only alias mapping.
310 * This guarantees that the page is no-longer in the
311 * cache for any process and nor may it be
312 * speculatively read in (until the user or kernel
313 * specifically accesses it, of course) */
314
315 flush_tlb_page(mpnt, addr);
316 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
317 __flush_cache_page(mpnt, addr, page_to_phys(page));
318 if (old_addr)
319 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
320 old_addr = addr;
321 }
322 }
323 flush_dcache_mmap_unlock(mapping);
324}
325EXPORT_SYMBOL(flush_dcache_page);
326
327/* Defined in arch/parisc/kernel/pacache.S */
328EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
329EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
330EXPORT_SYMBOL(flush_data_cache_local);
331EXPORT_SYMBOL(flush_kernel_icache_range_asm);
332
333void clear_user_page_asm(void *page, unsigned long vaddr)
334{
335 unsigned long flags;
336 /* This function is implemented in assembly in pacache.S */
337 extern void __clear_user_page_asm(void *page, unsigned long vaddr);
338
339 purge_tlb_start(flags);
340 __clear_user_page_asm(page, vaddr);
341 purge_tlb_end(flags);
342}
343
344#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
345int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
346
347void __init parisc_setup_cache_timing(void)
348{
349 unsigned long rangetime, alltime;
350 unsigned long size;
351
352 alltime = mfctl(16);
353 flush_data_cache();
354 alltime = mfctl(16) - alltime;
355
356 size = (unsigned long)(_end - _text);
357 rangetime = mfctl(16);
358 flush_kernel_dcache_range((unsigned long)_text, size);
359 rangetime = mfctl(16) - rangetime;
360
361 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
362 alltime, size, rangetime);
363
364 /* Racy, but if we see an intermediate value, it's ok too... */
365 parisc_cache_flush_threshold = size * alltime / rangetime;
366
367 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
368 if (!parisc_cache_flush_threshold)
369 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
370
371 if (parisc_cache_flush_threshold > cache_info.dc_size)
372 parisc_cache_flush_threshold = cache_info.dc_size;
373
374 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
375}
376
377extern void purge_kernel_dcache_page(unsigned long);
378extern void clear_user_page_asm(void *page, unsigned long vaddr);
379
380void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
381{
382 unsigned long flags;
383
384 purge_kernel_dcache_page((unsigned long)page);
385 purge_tlb_start(flags);
386 pdtlb_kernel(page);
387 purge_tlb_end(flags);
388 clear_user_page_asm(page, vaddr);
389}
390EXPORT_SYMBOL(clear_user_page);
391
392void flush_kernel_dcache_page_addr(void *addr)
393{
394 unsigned long flags;
395
396 flush_kernel_dcache_page_asm(addr);
397 purge_tlb_start(flags);
398 pdtlb_kernel(addr);
399 purge_tlb_end(flags);
400}
401EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
402
403void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
404 struct page *pg)
405{
406 /* no coherency needed (all in kmap/kunmap) */
407 copy_user_page_asm(vto, vfrom);
408 if (!parisc_requires_coherency())
409 flush_kernel_dcache_page_asm(vto);
410}
411EXPORT_SYMBOL(copy_user_page);
412
413#ifdef CONFIG_PA8X00
414
415void kunmap_parisc(void *addr)
416{
417 if (parisc_requires_coherency())
418 flush_kernel_dcache_page_addr(addr);
419}
420EXPORT_SYMBOL(kunmap_parisc);
421#endif
422
423void __flush_tlb_range(unsigned long sid, unsigned long start,
424 unsigned long end)
425{
426 unsigned long npages;
427
428 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
429 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
430 flush_tlb_all();
431 else {
432 unsigned long flags;
433
434 mtsp(sid, 1);
435 purge_tlb_start(flags);
436 if (split_tlb) {
437 while (npages--) {
438 pdtlb(start);
439 pitlb(start);
440 start += PAGE_SIZE;
441 }
442 } else {
443 while (npages--) {
444 pdtlb(start);
445 start += PAGE_SIZE;
446 }
447 }
448 purge_tlb_end(flags);
449 }
450}
451
452static void cacheflush_h_tmp_function(void *dummy)
453{
454 flush_cache_all_local();
455}
456
457void flush_cache_all(void)
458{
459 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
460}
461
462void flush_cache_mm(struct mm_struct *mm)
463{
464#ifdef CONFIG_SMP
465 flush_cache_all();
466#else
467 flush_cache_all_local();
468#endif
469}
470
471void
472flush_user_dcache_range(unsigned long start, unsigned long end)
473{
474 if ((end - start) < parisc_cache_flush_threshold)
475 flush_user_dcache_range_asm(start,end);
476 else
477 flush_data_cache();
478}
479
480void
481flush_user_icache_range(unsigned long start, unsigned long end)
482{
483 if ((end - start) < parisc_cache_flush_threshold)
484 flush_user_icache_range_asm(start,end);
485 else
486 flush_instruction_cache();
487}
488
489
490void flush_cache_range(struct vm_area_struct *vma,
491 unsigned long start, unsigned long end)
492{
493 int sr3;
494
495 BUG_ON(!vma->vm_mm->context);
496
497 sr3 = mfsp(3);
498 if (vma->vm_mm->context == sr3) {
499 flush_user_dcache_range(start,end);
500 flush_user_icache_range(start,end);
501 } else {
502 flush_cache_all();
503 }
504}
505
506void
507flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
508{
509 BUG_ON(!vma->vm_mm->context);
510
511 flush_tlb_page(vma, vmaddr);
512 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
513
514}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <linux/syscalls.h>
23#include <asm/pdc.h>
24#include <asm/cache.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/page.h>
28#include <asm/processor.h>
29#include <asm/sections.h>
30#include <asm/shmparam.h>
31#include <asm/mmu_context.h>
32#include <asm/cachectl.h>
33
34int split_tlb __ro_after_init;
35int dcache_stride __ro_after_init;
36int icache_stride __ro_after_init;
37EXPORT_SYMBOL(dcache_stride);
38
39void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40EXPORT_SYMBOL(flush_dcache_page_asm);
41void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
43
44/* Internal implementation in arch/parisc/kernel/pacache.S */
45void flush_data_cache_local(void *); /* flushes local data-cache only */
46void flush_instruction_cache_local(void); /* flushes local code-cache only */
47
48/* On some machines (i.e., ones with the Merced bus), there can be
49 * only a single PxTLB broadcast at a time; this must be guaranteed
50 * by software. We need a spinlock around all TLB flushes to ensure
51 * this.
52 */
53DEFINE_SPINLOCK(pa_tlb_flush_lock);
54
55#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56int pa_serialize_tlb_flushes __ro_after_init;
57#endif
58
59struct pdc_cache_info cache_info __ro_after_init;
60#ifndef CONFIG_PA20
61struct pdc_btlb_info btlb_info;
62#endif
63
64DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
67
68static void cache_flush_local_cpu(void *dummy)
69{
70 if (static_branch_likely(&parisc_has_icache))
71 flush_instruction_cache_local();
72 if (static_branch_likely(&parisc_has_dcache))
73 flush_data_cache_local(NULL);
74}
75
76void flush_cache_all_local(void)
77{
78 cache_flush_local_cpu(NULL);
79}
80
81void flush_cache_all(void)
82{
83 if (static_branch_likely(&parisc_has_cache))
84 on_each_cpu(cache_flush_local_cpu, NULL, 1);
85}
86
87static inline void flush_data_cache(void)
88{
89 if (static_branch_likely(&parisc_has_dcache))
90 on_each_cpu(flush_data_cache_local, NULL, 1);
91}
92
93
94/* Kernel virtual address of pfn. */
95#define pfn_va(pfn) __va(PFN_PHYS(pfn))
96
97void __update_cache(pte_t pte)
98{
99 unsigned long pfn = pte_pfn(pte);
100 struct folio *folio;
101 unsigned int nr;
102
103 /* We don't have pte special. As a result, we can be called with
104 an invalid pfn and we don't need to flush the kernel dcache page.
105 This occurs with FireGL card in C8000. */
106 if (!pfn_valid(pfn))
107 return;
108
109 folio = page_folio(pfn_to_page(pfn));
110 pfn = folio_pfn(folio);
111 nr = folio_nr_pages(folio);
112 if (folio_flush_mapping(folio) &&
113 test_bit(PG_dcache_dirty, &folio->flags)) {
114 while (nr--)
115 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116 clear_bit(PG_dcache_dirty, &folio->flags);
117 } else if (parisc_requires_coherency())
118 while (nr--)
119 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
120}
121
122void
123show_cache_info(struct seq_file *m)
124{
125 char buf[32];
126
127 seq_printf(m, "I-cache\t\t: %ld KB\n",
128 cache_info.ic_size/1024 );
129 if (cache_info.dc_loop != 1)
130 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
131 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
132 cache_info.dc_size/1024,
133 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
134 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
135 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
136 cache_info.dc_conf.cc_alias
137 );
138 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
139 cache_info.it_size,
140 cache_info.dt_size,
141 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
142 );
143
144#ifndef CONFIG_PA20
145 /* BTLB - Block TLB */
146 if (btlb_info.max_size==0) {
147 seq_printf(m, "BTLB\t\t: not supported\n" );
148 } else {
149 seq_printf(m,
150 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
151 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
152 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
153 btlb_info.max_size, (int)4096,
154 btlb_info.max_size>>8,
155 btlb_info.fixed_range_info.num_i,
156 btlb_info.fixed_range_info.num_d,
157 btlb_info.fixed_range_info.num_comb,
158 btlb_info.variable_range_info.num_i,
159 btlb_info.variable_range_info.num_d,
160 btlb_info.variable_range_info.num_comb
161 );
162 }
163#endif
164}
165
166void __init
167parisc_cache_init(void)
168{
169 if (pdc_cache_info(&cache_info) < 0)
170 panic("parisc_cache_init: pdc_cache_info failed");
171
172#if 0
173 printk("ic_size %lx dc_size %lx it_size %lx\n",
174 cache_info.ic_size,
175 cache_info.dc_size,
176 cache_info.it_size);
177
178 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179 cache_info.dc_base,
180 cache_info.dc_stride,
181 cache_info.dc_count,
182 cache_info.dc_loop);
183
184 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
185 *(unsigned long *) (&cache_info.dc_conf),
186 cache_info.dc_conf.cc_alias,
187 cache_info.dc_conf.cc_block,
188 cache_info.dc_conf.cc_line,
189 cache_info.dc_conf.cc_shift);
190 printk(" wt %d sh %d cst %d hv %d\n",
191 cache_info.dc_conf.cc_wt,
192 cache_info.dc_conf.cc_sh,
193 cache_info.dc_conf.cc_cst,
194 cache_info.dc_conf.cc_hv);
195
196 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
197 cache_info.ic_base,
198 cache_info.ic_stride,
199 cache_info.ic_count,
200 cache_info.ic_loop);
201
202 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
203 cache_info.it_sp_base,
204 cache_info.it_sp_stride,
205 cache_info.it_sp_count,
206 cache_info.it_loop,
207 cache_info.it_off_base,
208 cache_info.it_off_stride,
209 cache_info.it_off_count);
210
211 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
212 cache_info.dt_sp_base,
213 cache_info.dt_sp_stride,
214 cache_info.dt_sp_count,
215 cache_info.dt_loop,
216 cache_info.dt_off_base,
217 cache_info.dt_off_stride,
218 cache_info.dt_off_count);
219
220 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
221 *(unsigned long *) (&cache_info.ic_conf),
222 cache_info.ic_conf.cc_alias,
223 cache_info.ic_conf.cc_block,
224 cache_info.ic_conf.cc_line,
225 cache_info.ic_conf.cc_shift);
226 printk(" wt %d sh %d cst %d hv %d\n",
227 cache_info.ic_conf.cc_wt,
228 cache_info.ic_conf.cc_sh,
229 cache_info.ic_conf.cc_cst,
230 cache_info.ic_conf.cc_hv);
231
232 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
233 cache_info.dt_conf.tc_sh,
234 cache_info.dt_conf.tc_page,
235 cache_info.dt_conf.tc_cst,
236 cache_info.dt_conf.tc_aid,
237 cache_info.dt_conf.tc_sr);
238
239 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
240 cache_info.it_conf.tc_sh,
241 cache_info.it_conf.tc_page,
242 cache_info.it_conf.tc_cst,
243 cache_info.it_conf.tc_aid,
244 cache_info.it_conf.tc_sr);
245#endif
246
247 split_tlb = 0;
248 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
249 if (cache_info.dt_conf.tc_sh == 2)
250 printk(KERN_WARNING "Unexpected TLB configuration. "
251 "Will flush I/D separately (could be optimized).\n");
252
253 split_tlb = 1;
254 }
255
256 /* "New and Improved" version from Jim Hull
257 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
258 * The following CAFL_STRIDE is an optimized version, see
259 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
260 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
261 */
262#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
263 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
265#undef CAFL_STRIDE
266
267 /* stride needs to be non-zero, otherwise cache flushes will not work */
268 WARN_ON(cache_info.dc_size && dcache_stride == 0);
269 WARN_ON(cache_info.ic_size && icache_stride == 0);
270
271 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
272 PDC_MODEL_NVA_UNSUPPORTED) {
273 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
274#if 0
275 panic("SMP kernel required to avoid non-equivalent aliasing");
276#endif
277 }
278}
279
280void disable_sr_hashing(void)
281{
282 int srhash_type, retval;
283 unsigned long space_bits;
284
285 switch (boot_cpu_data.cpu_type) {
286 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
287 BUG();
288 return;
289
290 case pcxs:
291 case pcxt:
292 case pcxt_:
293 srhash_type = SRHASH_PCXST;
294 break;
295
296 case pcxl:
297 srhash_type = SRHASH_PCXL;
298 break;
299
300 case pcxl2: /* pcxl2 doesn't support space register hashing */
301 return;
302
303 default: /* Currently all PA2.0 machines use the same ins. sequence */
304 srhash_type = SRHASH_PA20;
305 break;
306 }
307
308 disable_sr_hashing_asm(srhash_type);
309
310 retval = pdc_spaceid_bits(&space_bits);
311 /* If this procedure isn't implemented, don't panic. */
312 if (retval < 0 && retval != PDC_BAD_OPTION)
313 panic("pdc_spaceid_bits call failed.\n");
314 if (space_bits != 0)
315 panic("SpaceID hashing is still on!\n");
316}
317
318static inline void
319__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
320 unsigned long physaddr)
321{
322 if (!static_branch_likely(&parisc_has_cache))
323 return;
324 preempt_disable();
325 flush_dcache_page_asm(physaddr, vmaddr);
326 if (vma->vm_flags & VM_EXEC)
327 flush_icache_page_asm(physaddr, vmaddr);
328 preempt_enable();
329}
330
331static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
332{
333 unsigned long flags, space, pgd, prot;
334#ifdef CONFIG_TLB_PTLOCK
335 unsigned long pgd_lock;
336#endif
337
338 vmaddr &= PAGE_MASK;
339
340 preempt_disable();
341
342 /* Set context for flush */
343 local_irq_save(flags);
344 prot = mfctl(8);
345 space = mfsp(SR_USER);
346 pgd = mfctl(25);
347#ifdef CONFIG_TLB_PTLOCK
348 pgd_lock = mfctl(28);
349#endif
350 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
351 local_irq_restore(flags);
352
353 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
354 if (vma->vm_flags & VM_EXEC)
355 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
356 flush_tlb_page(vma, vmaddr);
357
358 /* Restore previous context */
359 local_irq_save(flags);
360#ifdef CONFIG_TLB_PTLOCK
361 mtctl(pgd_lock, 28);
362#endif
363 mtctl(pgd, 25);
364 mtsp(space, SR_USER);
365 mtctl(prot, 8);
366 local_irq_restore(flags);
367
368 preempt_enable();
369}
370
371void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
372 unsigned int nr)
373{
374 void *kaddr = page_address(page);
375
376 for (;;) {
377 flush_kernel_dcache_page_addr(kaddr);
378 flush_kernel_icache_page(kaddr);
379 if (--nr == 0)
380 break;
381 kaddr += PAGE_SIZE;
382 }
383}
384
385static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
386{
387 pte_t *ptep = NULL;
388 pgd_t *pgd = mm->pgd;
389 p4d_t *p4d;
390 pud_t *pud;
391 pmd_t *pmd;
392
393 if (!pgd_none(*pgd)) {
394 p4d = p4d_offset(pgd, addr);
395 if (!p4d_none(*p4d)) {
396 pud = pud_offset(p4d, addr);
397 if (!pud_none(*pud)) {
398 pmd = pmd_offset(pud, addr);
399 if (!pmd_none(*pmd))
400 ptep = pte_offset_map(pmd, addr);
401 }
402 }
403 }
404 return ptep;
405}
406
407static inline bool pte_needs_flush(pte_t pte)
408{
409 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
410 == (_PAGE_PRESENT | _PAGE_ACCESSED);
411}
412
413void flush_dcache_folio(struct folio *folio)
414{
415 struct address_space *mapping = folio_flush_mapping(folio);
416 struct vm_area_struct *vma;
417 unsigned long addr, old_addr = 0;
418 void *kaddr;
419 unsigned long count = 0;
420 unsigned long i, nr, flags;
421 pgoff_t pgoff;
422
423 if (mapping && !mapping_mapped(mapping)) {
424 set_bit(PG_dcache_dirty, &folio->flags);
425 return;
426 }
427
428 nr = folio_nr_pages(folio);
429 kaddr = folio_address(folio);
430 for (i = 0; i < nr; i++)
431 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
432
433 if (!mapping)
434 return;
435
436 pgoff = folio->index;
437
438 /*
439 * We have carefully arranged in arch_get_unmapped_area() that
440 * *any* mappings of a file are always congruently mapped (whether
441 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
442 * to flush one address here for them all to become coherent
443 * on machines that support equivalent aliasing
444 */
445 flush_dcache_mmap_lock_irqsave(mapping, flags);
446 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
447 unsigned long offset = pgoff - vma->vm_pgoff;
448 unsigned long pfn = folio_pfn(folio);
449
450 addr = vma->vm_start;
451 nr = folio_nr_pages(folio);
452 if (offset > -nr) {
453 pfn -= offset;
454 nr += offset;
455 } else {
456 addr += offset * PAGE_SIZE;
457 }
458 if (addr + nr * PAGE_SIZE > vma->vm_end)
459 nr = (vma->vm_end - addr) / PAGE_SIZE;
460
461 if (parisc_requires_coherency()) {
462 for (i = 0; i < nr; i++) {
463 pte_t *ptep = get_ptep(vma->vm_mm,
464 addr + i * PAGE_SIZE);
465 if (!ptep)
466 continue;
467 if (pte_needs_flush(*ptep))
468 flush_user_cache_page(vma,
469 addr + i * PAGE_SIZE);
470 /* Optimise accesses to the same table? */
471 pte_unmap(ptep);
472 }
473 } else {
474 /*
475 * The TLB is the engine of coherence on parisc:
476 * The CPU is entitled to speculate any page
477 * with a TLB mapping, so here we kill the
478 * mapping then flush the page along a special
479 * flush only alias mapping. This guarantees that
480 * the page is no-longer in the cache for any
481 * process and nor may it be speculatively read
482 * in (until the user or kernel specifically
483 * accesses it, of course)
484 */
485 for (i = 0; i < nr; i++)
486 flush_tlb_page(vma, addr + i * PAGE_SIZE);
487 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
488 != (addr & (SHM_COLOUR - 1))) {
489 for (i = 0; i < nr; i++)
490 __flush_cache_page(vma,
491 addr + i * PAGE_SIZE,
492 (pfn + i) * PAGE_SIZE);
493 /*
494 * Software is allowed to have any number
495 * of private mappings to a page.
496 */
497 if (!(vma->vm_flags & VM_SHARED))
498 continue;
499 if (old_addr)
500 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
501 old_addr, addr, vma->vm_file);
502 if (nr == folio_nr_pages(folio))
503 old_addr = addr;
504 }
505 }
506 WARN_ON(++count == 4096);
507 }
508 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
509}
510EXPORT_SYMBOL(flush_dcache_folio);
511
512/* Defined in arch/parisc/kernel/pacache.S */
513EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
514EXPORT_SYMBOL(flush_kernel_icache_range_asm);
515
516#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
517static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
518
519#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
520static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
521
522void __init parisc_setup_cache_timing(void)
523{
524 unsigned long rangetime, alltime;
525 unsigned long size;
526 unsigned long threshold, threshold2;
527
528 alltime = mfctl(16);
529 flush_data_cache();
530 alltime = mfctl(16) - alltime;
531
532 size = (unsigned long)(_end - _text);
533 rangetime = mfctl(16);
534 flush_kernel_dcache_range((unsigned long)_text, size);
535 rangetime = mfctl(16) - rangetime;
536
537 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
538 alltime, size, rangetime);
539
540 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
541 pr_info("Calculated flush threshold is %lu KiB\n",
542 threshold/1024);
543
544 /*
545 * The threshold computed above isn't very reliable. The following
546 * heuristic works reasonably well on c8000/rp3440.
547 */
548 threshold2 = cache_info.dc_size * num_online_cpus();
549 parisc_cache_flush_threshold = threshold2;
550 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
551 parisc_cache_flush_threshold/1024);
552
553 /* calculate TLB flush threshold */
554
555 /* On SMP machines, skip the TLB measure of kernel text which
556 * has been mapped as huge pages. */
557 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
558 threshold = max(cache_info.it_size, cache_info.dt_size);
559 threshold *= PAGE_SIZE;
560 threshold /= num_online_cpus();
561 goto set_tlb_threshold;
562 }
563
564 size = (unsigned long)_end - (unsigned long)_text;
565 rangetime = mfctl(16);
566 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
567 rangetime = mfctl(16) - rangetime;
568
569 alltime = mfctl(16);
570 flush_tlb_all();
571 alltime = mfctl(16) - alltime;
572
573 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
574 alltime, size, rangetime);
575
576 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
577 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
578 threshold/1024);
579
580set_tlb_threshold:
581 if (threshold > FLUSH_TLB_THRESHOLD)
582 parisc_tlb_flush_threshold = threshold;
583 else
584 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
585
586 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
587 parisc_tlb_flush_threshold/1024);
588}
589
590extern void purge_kernel_dcache_page_asm(unsigned long);
591extern void clear_user_page_asm(void *, unsigned long);
592extern void copy_user_page_asm(void *, void *, unsigned long);
593
594void flush_kernel_dcache_page_addr(const void *addr)
595{
596 unsigned long flags;
597
598 flush_kernel_dcache_page_asm(addr);
599 purge_tlb_start(flags);
600 pdtlb(SR_KERNEL, addr);
601 purge_tlb_end(flags);
602}
603EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
604
605static void flush_cache_page_if_present(struct vm_area_struct *vma,
606 unsigned long vmaddr, unsigned long pfn)
607{
608 bool needs_flush = false;
609 pte_t *ptep;
610
611 /*
612 * The pte check is racy and sometimes the flush will trigger
613 * a non-access TLB miss. Hopefully, the page has already been
614 * flushed.
615 */
616 ptep = get_ptep(vma->vm_mm, vmaddr);
617 if (ptep) {
618 needs_flush = pte_needs_flush(*ptep);
619 pte_unmap(ptep);
620 }
621 if (needs_flush)
622 flush_cache_page(vma, vmaddr, pfn);
623}
624
625void copy_user_highpage(struct page *to, struct page *from,
626 unsigned long vaddr, struct vm_area_struct *vma)
627{
628 void *kto, *kfrom;
629
630 kfrom = kmap_local_page(from);
631 kto = kmap_local_page(to);
632 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
633 copy_page_asm(kto, kfrom);
634 kunmap_local(kto);
635 kunmap_local(kfrom);
636}
637
638void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
639 unsigned long user_vaddr, void *dst, void *src, int len)
640{
641 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
642 memcpy(dst, src, len);
643 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
644}
645
646void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
647 unsigned long user_vaddr, void *dst, void *src, int len)
648{
649 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
650 memcpy(dst, src, len);
651}
652
653/* __flush_tlb_range()
654 *
655 * returns 1 if all TLBs were flushed.
656 */
657int __flush_tlb_range(unsigned long sid, unsigned long start,
658 unsigned long end)
659{
660 unsigned long flags;
661
662 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
663 end - start >= parisc_tlb_flush_threshold) {
664 flush_tlb_all();
665 return 1;
666 }
667
668 /* Purge TLB entries for small ranges using the pdtlb and
669 pitlb instructions. These instructions execute locally
670 but cause a purge request to be broadcast to other TLBs. */
671 while (start < end) {
672 purge_tlb_start(flags);
673 mtsp(sid, SR_TEMP1);
674 pdtlb(SR_TEMP1, start);
675 pitlb(SR_TEMP1, start);
676 purge_tlb_end(flags);
677 start += PAGE_SIZE;
678 }
679 return 0;
680}
681
682static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
683{
684 unsigned long addr, pfn;
685 pte_t *ptep;
686
687 for (addr = start; addr < end; addr += PAGE_SIZE) {
688 bool needs_flush = false;
689 /*
690 * The vma can contain pages that aren't present. Although
691 * the pte search is expensive, we need the pte to find the
692 * page pfn and to check whether the page should be flushed.
693 */
694 ptep = get_ptep(vma->vm_mm, addr);
695 if (ptep) {
696 needs_flush = pte_needs_flush(*ptep);
697 pfn = pte_pfn(*ptep);
698 pte_unmap(ptep);
699 }
700 if (needs_flush) {
701 if (parisc_requires_coherency()) {
702 flush_user_cache_page(vma, addr);
703 } else {
704 if (WARN_ON(!pfn_valid(pfn)))
705 return;
706 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
707 }
708 }
709 }
710}
711
712static inline unsigned long mm_total_size(struct mm_struct *mm)
713{
714 struct vm_area_struct *vma;
715 unsigned long usize = 0;
716 VMA_ITERATOR(vmi, mm, 0);
717
718 for_each_vma(vmi, vma) {
719 if (usize >= parisc_cache_flush_threshold)
720 break;
721 usize += vma->vm_end - vma->vm_start;
722 }
723 return usize;
724}
725
726void flush_cache_mm(struct mm_struct *mm)
727{
728 struct vm_area_struct *vma;
729 VMA_ITERATOR(vmi, mm, 0);
730
731 /*
732 * Flushing the whole cache on each cpu takes forever on
733 * rp3440, etc. So, avoid it if the mm isn't too big.
734 *
735 * Note that we must flush the entire cache on machines
736 * with aliasing caches to prevent random segmentation
737 * faults.
738 */
739 if (!parisc_requires_coherency()
740 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
741 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
742 return;
743 flush_tlb_all();
744 flush_cache_all();
745 return;
746 }
747
748 /* Flush mm */
749 for_each_vma(vmi, vma)
750 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
751}
752
753void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
754{
755 if (!parisc_requires_coherency()
756 || end - start >= parisc_cache_flush_threshold) {
757 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
758 return;
759 flush_tlb_range(vma, start, end);
760 flush_cache_all();
761 return;
762 }
763
764 flush_cache_pages(vma, start, end);
765}
766
767void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
768{
769 if (WARN_ON(!pfn_valid(pfn)))
770 return;
771 if (parisc_requires_coherency())
772 flush_user_cache_page(vma, vmaddr);
773 else
774 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
775}
776
777void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
778{
779 if (!PageAnon(page))
780 return;
781
782 if (parisc_requires_coherency()) {
783 if (vma->vm_flags & VM_SHARED)
784 flush_data_cache();
785 else
786 flush_user_cache_page(vma, vmaddr);
787 return;
788 }
789
790 flush_tlb_page(vma, vmaddr);
791 preempt_disable();
792 flush_dcache_page_asm(page_to_phys(page), vmaddr);
793 preempt_enable();
794}
795
796void flush_kernel_vmap_range(void *vaddr, int size)
797{
798 unsigned long start = (unsigned long)vaddr;
799 unsigned long end = start + size;
800
801 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
802 (unsigned long)size >= parisc_cache_flush_threshold) {
803 flush_tlb_kernel_range(start, end);
804 flush_data_cache();
805 return;
806 }
807
808 flush_kernel_dcache_range_asm(start, end);
809 flush_tlb_kernel_range(start, end);
810}
811EXPORT_SYMBOL(flush_kernel_vmap_range);
812
813void invalidate_kernel_vmap_range(void *vaddr, int size)
814{
815 unsigned long start = (unsigned long)vaddr;
816 unsigned long end = start + size;
817
818 /* Ensure DMA is complete */
819 asm_syncdma();
820
821 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
822 (unsigned long)size >= parisc_cache_flush_threshold) {
823 flush_tlb_kernel_range(start, end);
824 flush_data_cache();
825 return;
826 }
827
828 purge_kernel_dcache_range_asm(start, end);
829 flush_tlb_kernel_range(start, end);
830}
831EXPORT_SYMBOL(invalidate_kernel_vmap_range);
832
833
834SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
835 unsigned int, cache)
836{
837 unsigned long start, end;
838 ASM_EXCEPTIONTABLE_VAR(error);
839
840 if (bytes == 0)
841 return 0;
842 if (!access_ok((void __user *) addr, bytes))
843 return -EFAULT;
844
845 end = addr + bytes;
846
847 if (cache & DCACHE) {
848 start = addr;
849 __asm__ __volatile__ (
850#ifdef CONFIG_64BIT
851 "1: cmpb,*<<,n %0,%2,1b\n"
852#else
853 "1: cmpb,<<,n %0,%2,1b\n"
854#endif
855 " fic,m %3(%4,%0)\n"
856 "2: sync\n"
857 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
858 : "+r" (start), "+r" (error)
859 : "r" (end), "r" (dcache_stride), "i" (SR_USER));
860 }
861
862 if (cache & ICACHE && error == 0) {
863 start = addr;
864 __asm__ __volatile__ (
865#ifdef CONFIG_64BIT
866 "1: cmpb,*<<,n %0,%2,1b\n"
867#else
868 "1: cmpb,<<,n %0,%2,1b\n"
869#endif
870 " fdc,m %3(%4,%0)\n"
871 "2: sync\n"
872 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
873 : "+r" (start), "+r" (error)
874 : "r" (end), "r" (icache_stride), "i" (SR_USER));
875 }
876
877 return error;
878}