Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41/* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46DEFINE_SPINLOCK(pa_tlb_lock);
47
48struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20
50static struct pdc_btlb_info btlb_info __read_mostly;
51#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
57 on_each_cpu(flush_data_cache_local, NULL, 1);
58}
59void
60flush_instruction_cache(void)
61{
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63}
64#endif
65
66void
67flush_cache_all_local(void)
68{
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
71}
72EXPORT_SYMBOL(flush_cache_all_local);
73
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
77void
78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79{
80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
82
83 /* We don't have pte special. As a result, we can be called with
84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
88
89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
92 clear_bit(PG_dcache_dirty, &page->flags);
93 } else if (parisc_requires_coherency())
94 flush_kernel_dcache_page_addr(pfn_va(pfn));
95}
96
97void
98show_cache_info(struct seq_file *m)
99{
100 char buf[32];
101
102 seq_printf(m, "I-cache\t\t: %ld KB\n",
103 cache_info.ic_size/1024 );
104 if (cache_info.dc_loop != 1)
105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 cache_info.dc_size/1024,
108 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 cache_info.it_size,
113 cache_info.dt_size,
114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 );
116
117#ifndef CONFIG_PA20
118 /* BTLB - Block TLB */
119 if (btlb_info.max_size==0) {
120 seq_printf(m, "BTLB\t\t: not supported\n" );
121 } else {
122 seq_printf(m,
123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 btlb_info.max_size, (int)4096,
127 btlb_info.max_size>>8,
128 btlb_info.fixed_range_info.num_i,
129 btlb_info.fixed_range_info.num_d,
130 btlb_info.fixed_range_info.num_comb,
131 btlb_info.variable_range_info.num_i,
132 btlb_info.variable_range_info.num_d,
133 btlb_info.variable_range_info.num_comb
134 );
135 }
136#endif
137}
138
139void __init
140parisc_cache_init(void)
141{
142 if (pdc_cache_info(&cache_info) < 0)
143 panic("parisc_cache_init: pdc_cache_info failed");
144
145#if 0
146 printk("ic_size %lx dc_size %lx it_size %lx\n",
147 cache_info.ic_size,
148 cache_info.dc_size,
149 cache_info.it_size);
150
151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 cache_info.dc_base,
153 cache_info.dc_stride,
154 cache_info.dc_count,
155 cache_info.dc_loop);
156
157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
158 *(unsigned long *) (&cache_info.dc_conf),
159 cache_info.dc_conf.cc_alias,
160 cache_info.dc_conf.cc_block,
161 cache_info.dc_conf.cc_line,
162 cache_info.dc_conf.cc_shift);
163 printk(" wt %d sh %d cst %d hv %d\n",
164 cache_info.dc_conf.cc_wt,
165 cache_info.dc_conf.cc_sh,
166 cache_info.dc_conf.cc_cst,
167 cache_info.dc_conf.cc_hv);
168
169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 cache_info.ic_base,
171 cache_info.ic_stride,
172 cache_info.ic_count,
173 cache_info.ic_loop);
174
175 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
176 cache_info.it_sp_base,
177 cache_info.it_sp_stride,
178 cache_info.it_sp_count,
179 cache_info.it_loop,
180 cache_info.it_off_base,
181 cache_info.it_off_stride,
182 cache_info.it_off_count);
183
184 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185 cache_info.dt_sp_base,
186 cache_info.dt_sp_stride,
187 cache_info.dt_sp_count,
188 cache_info.dt_loop,
189 cache_info.dt_off_base,
190 cache_info.dt_off_stride,
191 cache_info.dt_off_count);
192
193 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
194 *(unsigned long *) (&cache_info.ic_conf),
195 cache_info.ic_conf.cc_alias,
196 cache_info.ic_conf.cc_block,
197 cache_info.ic_conf.cc_line,
198 cache_info.ic_conf.cc_shift);
199 printk(" wt %d sh %d cst %d hv %d\n",
200 cache_info.ic_conf.cc_wt,
201 cache_info.ic_conf.cc_sh,
202 cache_info.ic_conf.cc_cst,
203 cache_info.ic_conf.cc_hv);
204
205 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
206 cache_info.dt_conf.tc_sh,
207 cache_info.dt_conf.tc_page,
208 cache_info.dt_conf.tc_cst,
209 cache_info.dt_conf.tc_aid,
210 cache_info.dt_conf.tc_sr);
211
212 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
213 cache_info.it_conf.tc_sh,
214 cache_info.it_conf.tc_page,
215 cache_info.it_conf.tc_cst,
216 cache_info.it_conf.tc_aid,
217 cache_info.it_conf.tc_sr);
218#endif
219
220 split_tlb = 0;
221 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
222 if (cache_info.dt_conf.tc_sh == 2)
223 printk(KERN_WARNING "Unexpected TLB configuration. "
224 "Will flush I/D separately (could be optimized).\n");
225
226 split_tlb = 1;
227 }
228
229 /* "New and Improved" version from Jim Hull
230 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
231 * The following CAFL_STRIDE is an optimized version, see
232 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
233 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
234 */
235#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
236 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
237 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
238#undef CAFL_STRIDE
239
240#ifndef CONFIG_PA20
241 if (pdc_btlb_info(&btlb_info) < 0) {
242 memset(&btlb_info, 0, sizeof btlb_info);
243 }
244#endif
245
246 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
247 PDC_MODEL_NVA_UNSUPPORTED) {
248 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
249#if 0
250 panic("SMP kernel required to avoid non-equivalent aliasing");
251#endif
252 }
253}
254
255void disable_sr_hashing(void)
256{
257 int srhash_type, retval;
258 unsigned long space_bits;
259
260 switch (boot_cpu_data.cpu_type) {
261 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
262 BUG();
263 return;
264
265 case pcxs:
266 case pcxt:
267 case pcxt_:
268 srhash_type = SRHASH_PCXST;
269 break;
270
271 case pcxl:
272 srhash_type = SRHASH_PCXL;
273 break;
274
275 case pcxl2: /* pcxl2 doesn't support space register hashing */
276 return;
277
278 default: /* Currently all PA2.0 machines use the same ins. sequence */
279 srhash_type = SRHASH_PA20;
280 break;
281 }
282
283 disable_sr_hashing_asm(srhash_type);
284
285 retval = pdc_spaceid_bits(&space_bits);
286 /* If this procedure isn't implemented, don't panic. */
287 if (retval < 0 && retval != PDC_BAD_OPTION)
288 panic("pdc_spaceid_bits call failed.\n");
289 if (space_bits != 0)
290 panic("SpaceID hashing is still on!\n");
291}
292
293static inline void
294__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
295 unsigned long physaddr)
296{
297 preempt_disable();
298 flush_dcache_page_asm(physaddr, vmaddr);
299 if (vma->vm_flags & VM_EXEC)
300 flush_icache_page_asm(physaddr, vmaddr);
301 preempt_enable();
302}
303
304void flush_dcache_page(struct page *page)
305{
306 struct address_space *mapping = page_mapping(page);
307 struct vm_area_struct *mpnt;
308 unsigned long offset;
309 unsigned long addr, old_addr = 0;
310 pgoff_t pgoff;
311
312 if (mapping && !mapping_mapped(mapping)) {
313 set_bit(PG_dcache_dirty, &page->flags);
314 return;
315 }
316
317 flush_kernel_dcache_page(page);
318
319 if (!mapping)
320 return;
321
322 pgoff = page->index;
323
324 /* We have carefully arranged in arch_get_unmapped_area() that
325 * *any* mappings of a file are always congruently mapped (whether
326 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
327 * to flush one address here for them all to become coherent */
328
329 flush_dcache_mmap_lock(mapping);
330 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
331 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
332 addr = mpnt->vm_start + offset;
333
334 /* The TLB is the engine of coherence on parisc: The
335 * CPU is entitled to speculate any page with a TLB
336 * mapping, so here we kill the mapping then flush the
337 * page along a special flush only alias mapping.
338 * This guarantees that the page is no-longer in the
339 * cache for any process and nor may it be
340 * speculatively read in (until the user or kernel
341 * specifically accesses it, of course) */
342
343 flush_tlb_page(mpnt, addr);
344 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
345 != (addr & (SHM_COLOUR - 1))) {
346 __flush_cache_page(mpnt, addr, page_to_phys(page));
347 if (old_addr)
348 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
349 old_addr = addr;
350 }
351 }
352 flush_dcache_mmap_unlock(mapping);
353}
354EXPORT_SYMBOL(flush_dcache_page);
355
356/* Defined in arch/parisc/kernel/pacache.S */
357EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
358EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
359EXPORT_SYMBOL(flush_data_cache_local);
360EXPORT_SYMBOL(flush_kernel_icache_range_asm);
361
362#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
363static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
364
365#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
366static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
367
368void __init parisc_setup_cache_timing(void)
369{
370 unsigned long rangetime, alltime;
371 unsigned long size, start;
372
373 alltime = mfctl(16);
374 flush_data_cache();
375 alltime = mfctl(16) - alltime;
376
377 size = (unsigned long)(_end - _text);
378 rangetime = mfctl(16);
379 flush_kernel_dcache_range((unsigned long)_text, size);
380 rangetime = mfctl(16) - rangetime;
381
382 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
383 alltime, size, rangetime);
384
385 /* Racy, but if we see an intermediate value, it's ok too... */
386 parisc_cache_flush_threshold = size * alltime / rangetime;
387
388 parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
389 if (!parisc_cache_flush_threshold)
390 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
391
392 if (parisc_cache_flush_threshold > cache_info.dc_size)
393 parisc_cache_flush_threshold = cache_info.dc_size;
394
395 printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
396 parisc_cache_flush_threshold/1024);
397
398 /* calculate TLB flush threshold */
399
400 alltime = mfctl(16);
401 flush_tlb_all();
402 alltime = mfctl(16) - alltime;
403
404 size = PAGE_SIZE;
405 start = (unsigned long) _text;
406 rangetime = mfctl(16);
407 while (start < (unsigned long) _end) {
408 flush_tlb_kernel_range(start, start + PAGE_SIZE);
409 start += PAGE_SIZE;
410 size += PAGE_SIZE;
411 }
412 rangetime = mfctl(16) - rangetime;
413
414 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
415 alltime, size, rangetime);
416
417 parisc_tlb_flush_threshold = size * alltime / rangetime;
418 parisc_tlb_flush_threshold *= num_online_cpus();
419 parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
420 if (!parisc_tlb_flush_threshold)
421 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
422
423 printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
424 parisc_tlb_flush_threshold/1024);
425}
426
427extern void purge_kernel_dcache_page_asm(unsigned long);
428extern void clear_user_page_asm(void *, unsigned long);
429extern void copy_user_page_asm(void *, void *, unsigned long);
430
431void flush_kernel_dcache_page_addr(void *addr)
432{
433 unsigned long flags;
434
435 flush_kernel_dcache_page_asm(addr);
436 purge_tlb_start(flags);
437 pdtlb_kernel(addr);
438 purge_tlb_end(flags);
439}
440EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
441
442void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
443 struct page *pg)
444{
445 /* Copy using kernel mapping. No coherency is needed (all in
446 kunmap) for the `to' page. However, the `from' page needs to
447 be flushed through a mapping equivalent to the user mapping
448 before it can be accessed through the kernel mapping. */
449 preempt_disable();
450 flush_dcache_page_asm(__pa(vfrom), vaddr);
451 preempt_enable();
452 copy_page_asm(vto, vfrom);
453}
454EXPORT_SYMBOL(copy_user_page);
455
456/* __flush_tlb_range()
457 *
458 * returns 1 if all TLBs were flushed.
459 */
460int __flush_tlb_range(unsigned long sid, unsigned long start,
461 unsigned long end)
462{
463 unsigned long flags, size;
464
465 size = (end - start);
466 if (size >= parisc_tlb_flush_threshold) {
467 flush_tlb_all();
468 return 1;
469 }
470
471 /* Purge TLB entries for small ranges using the pdtlb and
472 pitlb instructions. These instructions execute locally
473 but cause a purge request to be broadcast to other TLBs. */
474 if (likely(!split_tlb)) {
475 while (start < end) {
476 purge_tlb_start(flags);
477 mtsp(sid, 1);
478 pdtlb(start);
479 purge_tlb_end(flags);
480 start += PAGE_SIZE;
481 }
482 return 0;
483 }
484
485 /* split TLB case */
486 while (start < end) {
487 purge_tlb_start(flags);
488 mtsp(sid, 1);
489 pdtlb(start);
490 pitlb(start);
491 purge_tlb_end(flags);
492 start += PAGE_SIZE;
493 }
494 return 0;
495}
496
497static void cacheflush_h_tmp_function(void *dummy)
498{
499 flush_cache_all_local();
500}
501
502void flush_cache_all(void)
503{
504 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
505}
506
507static inline unsigned long mm_total_size(struct mm_struct *mm)
508{
509 struct vm_area_struct *vma;
510 unsigned long usize = 0;
511
512 for (vma = mm->mmap; vma; vma = vma->vm_next)
513 usize += vma->vm_end - vma->vm_start;
514 return usize;
515}
516
517static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
518{
519 pte_t *ptep = NULL;
520
521 if (!pgd_none(*pgd)) {
522 pud_t *pud = pud_offset(pgd, addr);
523 if (!pud_none(*pud)) {
524 pmd_t *pmd = pmd_offset(pud, addr);
525 if (!pmd_none(*pmd))
526 ptep = pte_offset_map(pmd, addr);
527 }
528 }
529 return ptep;
530}
531
532void flush_cache_mm(struct mm_struct *mm)
533{
534 struct vm_area_struct *vma;
535 pgd_t *pgd;
536
537 /* Flushing the whole cache on each cpu takes forever on
538 rp3440, etc. So, avoid it if the mm isn't too big. */
539 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
540 flush_cache_all();
541 return;
542 }
543
544 if (mm->context == mfsp(3)) {
545 for (vma = mm->mmap; vma; vma = vma->vm_next) {
546 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
547 if ((vma->vm_flags & VM_EXEC) == 0)
548 continue;
549 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
550 }
551 return;
552 }
553
554 pgd = mm->pgd;
555 for (vma = mm->mmap; vma; vma = vma->vm_next) {
556 unsigned long addr;
557
558 for (addr = vma->vm_start; addr < vma->vm_end;
559 addr += PAGE_SIZE) {
560 unsigned long pfn;
561 pte_t *ptep = get_ptep(pgd, addr);
562 if (!ptep)
563 continue;
564 pfn = pte_pfn(*ptep);
565 if (!pfn_valid(pfn))
566 continue;
567 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
568 }
569 }
570}
571
572void
573flush_user_dcache_range(unsigned long start, unsigned long end)
574{
575 if ((end - start) < parisc_cache_flush_threshold)
576 flush_user_dcache_range_asm(start,end);
577 else
578 flush_data_cache();
579}
580
581void
582flush_user_icache_range(unsigned long start, unsigned long end)
583{
584 if ((end - start) < parisc_cache_flush_threshold)
585 flush_user_icache_range_asm(start,end);
586 else
587 flush_instruction_cache();
588}
589
590void flush_cache_range(struct vm_area_struct *vma,
591 unsigned long start, unsigned long end)
592{
593 unsigned long addr;
594 pgd_t *pgd;
595
596 BUG_ON(!vma->vm_mm->context);
597
598 if ((end - start) >= parisc_cache_flush_threshold) {
599 flush_cache_all();
600 return;
601 }
602
603 if (vma->vm_mm->context == mfsp(3)) {
604 flush_user_dcache_range_asm(start, end);
605 if (vma->vm_flags & VM_EXEC)
606 flush_user_icache_range_asm(start, end);
607 return;
608 }
609
610 pgd = vma->vm_mm->pgd;
611 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
612 unsigned long pfn;
613 pte_t *ptep = get_ptep(pgd, addr);
614 if (!ptep)
615 continue;
616 pfn = pte_pfn(*ptep);
617 if (pfn_valid(pfn))
618 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
619 }
620}
621
622void
623flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
624{
625 BUG_ON(!vma->vm_mm->context);
626
627 if (pfn_valid(pfn)) {
628 flush_tlb_page(vma, vmaddr);
629 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
630 }
631}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41/* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46DEFINE_SPINLOCK(pa_tlb_lock);
47
48struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20
50static struct pdc_btlb_info btlb_info __read_mostly;
51#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
57 on_each_cpu(flush_data_cache_local, NULL, 1);
58}
59void
60flush_instruction_cache(void)
61{
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63}
64#endif
65
66void
67flush_cache_all_local(void)
68{
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
71}
72EXPORT_SYMBOL(flush_cache_all_local);
73
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
77void
78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79{
80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
82
83 /* We don't have pte special. As a result, we can be called with
84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
88
89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
92 clear_bit(PG_dcache_dirty, &page->flags);
93 } else if (parisc_requires_coherency())
94 flush_kernel_dcache_page_addr(pfn_va(pfn));
95}
96
97void
98show_cache_info(struct seq_file *m)
99{
100 char buf[32];
101
102 seq_printf(m, "I-cache\t\t: %ld KB\n",
103 cache_info.ic_size/1024 );
104 if (cache_info.dc_loop != 1)
105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 cache_info.dc_size/1024,
108 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 cache_info.it_size,
113 cache_info.dt_size,
114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 );
116
117#ifndef CONFIG_PA20
118 /* BTLB - Block TLB */
119 if (btlb_info.max_size==0) {
120 seq_printf(m, "BTLB\t\t: not supported\n" );
121 } else {
122 seq_printf(m,
123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 btlb_info.max_size, (int)4096,
127 btlb_info.max_size>>8,
128 btlb_info.fixed_range_info.num_i,
129 btlb_info.fixed_range_info.num_d,
130 btlb_info.fixed_range_info.num_comb,
131 btlb_info.variable_range_info.num_i,
132 btlb_info.variable_range_info.num_d,
133 btlb_info.variable_range_info.num_comb
134 );
135 }
136#endif
137}
138
139void __init
140parisc_cache_init(void)
141{
142 if (pdc_cache_info(&cache_info) < 0)
143 panic("parisc_cache_init: pdc_cache_info failed");
144
145#if 0
146 printk("ic_size %lx dc_size %lx it_size %lx\n",
147 cache_info.ic_size,
148 cache_info.dc_size,
149 cache_info.it_size);
150
151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 cache_info.dc_base,
153 cache_info.dc_stride,
154 cache_info.dc_count,
155 cache_info.dc_loop);
156
157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
158 *(unsigned long *) (&cache_info.dc_conf),
159 cache_info.dc_conf.cc_alias,
160 cache_info.dc_conf.cc_block,
161 cache_info.dc_conf.cc_line,
162 cache_info.dc_conf.cc_shift);
163 printk(" wt %d sh %d cst %d hv %d\n",
164 cache_info.dc_conf.cc_wt,
165 cache_info.dc_conf.cc_sh,
166 cache_info.dc_conf.cc_cst,
167 cache_info.dc_conf.cc_hv);
168
169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 cache_info.ic_base,
171 cache_info.ic_stride,
172 cache_info.ic_count,
173 cache_info.ic_loop);
174
175 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
176 cache_info.it_sp_base,
177 cache_info.it_sp_stride,
178 cache_info.it_sp_count,
179 cache_info.it_loop,
180 cache_info.it_off_base,
181 cache_info.it_off_stride,
182 cache_info.it_off_count);
183
184 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185 cache_info.dt_sp_base,
186 cache_info.dt_sp_stride,
187 cache_info.dt_sp_count,
188 cache_info.dt_loop,
189 cache_info.dt_off_base,
190 cache_info.dt_off_stride,
191 cache_info.dt_off_count);
192
193 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
194 *(unsigned long *) (&cache_info.ic_conf),
195 cache_info.ic_conf.cc_alias,
196 cache_info.ic_conf.cc_block,
197 cache_info.ic_conf.cc_line,
198 cache_info.ic_conf.cc_shift);
199 printk(" wt %d sh %d cst %d hv %d\n",
200 cache_info.ic_conf.cc_wt,
201 cache_info.ic_conf.cc_sh,
202 cache_info.ic_conf.cc_cst,
203 cache_info.ic_conf.cc_hv);
204
205 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
206 cache_info.dt_conf.tc_sh,
207 cache_info.dt_conf.tc_page,
208 cache_info.dt_conf.tc_cst,
209 cache_info.dt_conf.tc_aid,
210 cache_info.dt_conf.tc_sr);
211
212 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
213 cache_info.it_conf.tc_sh,
214 cache_info.it_conf.tc_page,
215 cache_info.it_conf.tc_cst,
216 cache_info.it_conf.tc_aid,
217 cache_info.it_conf.tc_sr);
218#endif
219
220 split_tlb = 0;
221 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
222 if (cache_info.dt_conf.tc_sh == 2)
223 printk(KERN_WARNING "Unexpected TLB configuration. "
224 "Will flush I/D separately (could be optimized).\n");
225
226 split_tlb = 1;
227 }
228
229 /* "New and Improved" version from Jim Hull
230 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
231 * The following CAFL_STRIDE is an optimized version, see
232 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
233 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
234 */
235#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
236 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
237 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
238#undef CAFL_STRIDE
239
240#ifndef CONFIG_PA20
241 if (pdc_btlb_info(&btlb_info) < 0) {
242 memset(&btlb_info, 0, sizeof btlb_info);
243 }
244#endif
245
246 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
247 PDC_MODEL_NVA_UNSUPPORTED) {
248 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
249#if 0
250 panic("SMP kernel required to avoid non-equivalent aliasing");
251#endif
252 }
253}
254
255void disable_sr_hashing(void)
256{
257 int srhash_type, retval;
258 unsigned long space_bits;
259
260 switch (boot_cpu_data.cpu_type) {
261 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
262 BUG();
263 return;
264
265 case pcxs:
266 case pcxt:
267 case pcxt_:
268 srhash_type = SRHASH_PCXST;
269 break;
270
271 case pcxl:
272 srhash_type = SRHASH_PCXL;
273 break;
274
275 case pcxl2: /* pcxl2 doesn't support space register hashing */
276 return;
277
278 default: /* Currently all PA2.0 machines use the same ins. sequence */
279 srhash_type = SRHASH_PA20;
280 break;
281 }
282
283 disable_sr_hashing_asm(srhash_type);
284
285 retval = pdc_spaceid_bits(&space_bits);
286 /* If this procedure isn't implemented, don't panic. */
287 if (retval < 0 && retval != PDC_BAD_OPTION)
288 panic("pdc_spaceid_bits call failed.\n");
289 if (space_bits != 0)
290 panic("SpaceID hashing is still on!\n");
291}
292
293static inline void
294__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
295 unsigned long physaddr)
296{
297 preempt_disable();
298 flush_dcache_page_asm(physaddr, vmaddr);
299 if (vma->vm_flags & VM_EXEC)
300 flush_icache_page_asm(physaddr, vmaddr);
301 preempt_enable();
302}
303
304void flush_dcache_page(struct page *page)
305{
306 struct address_space *mapping = page_mapping(page);
307 struct vm_area_struct *mpnt;
308 unsigned long offset;
309 unsigned long addr, old_addr = 0;
310 pgoff_t pgoff;
311
312 if (mapping && !mapping_mapped(mapping)) {
313 set_bit(PG_dcache_dirty, &page->flags);
314 return;
315 }
316
317 flush_kernel_dcache_page(page);
318
319 if (!mapping)
320 return;
321
322 pgoff = page->index;
323
324 /* We have carefully arranged in arch_get_unmapped_area() that
325 * *any* mappings of a file are always congruently mapped (whether
326 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
327 * to flush one address here for them all to become coherent */
328
329 flush_dcache_mmap_lock(mapping);
330 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
331 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
332 addr = mpnt->vm_start + offset;
333
334 /* The TLB is the engine of coherence on parisc: The
335 * CPU is entitled to speculate any page with a TLB
336 * mapping, so here we kill the mapping then flush the
337 * page along a special flush only alias mapping.
338 * This guarantees that the page is no-longer in the
339 * cache for any process and nor may it be
340 * speculatively read in (until the user or kernel
341 * specifically accesses it, of course) */
342
343 flush_tlb_page(mpnt, addr);
344 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
345 != (addr & (SHM_COLOUR - 1))) {
346 __flush_cache_page(mpnt, addr, page_to_phys(page));
347 if (old_addr)
348 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
349 old_addr = addr;
350 }
351 }
352 flush_dcache_mmap_unlock(mapping);
353}
354EXPORT_SYMBOL(flush_dcache_page);
355
356/* Defined in arch/parisc/kernel/pacache.S */
357EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
358EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
359EXPORT_SYMBOL(flush_data_cache_local);
360EXPORT_SYMBOL(flush_kernel_icache_range_asm);
361
362#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
363static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
364
365#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
366static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
367
368void __init parisc_setup_cache_timing(void)
369{
370 unsigned long rangetime, alltime;
371 unsigned long size, start;
372 unsigned long threshold;
373
374 alltime = mfctl(16);
375 flush_data_cache();
376 alltime = mfctl(16) - alltime;
377
378 size = (unsigned long)(_end - _text);
379 rangetime = mfctl(16);
380 flush_kernel_dcache_range((unsigned long)_text, size);
381 rangetime = mfctl(16) - rangetime;
382
383 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
384 alltime, size, rangetime);
385
386 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
387 if (threshold > cache_info.dc_size)
388 threshold = cache_info.dc_size;
389 if (threshold)
390 parisc_cache_flush_threshold = threshold;
391 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
392 parisc_cache_flush_threshold/1024);
393
394 /* calculate TLB flush threshold */
395
396 /* On SMP machines, skip the TLB measure of kernel text which
397 * has been mapped as huge pages. */
398 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
399 threshold = max(cache_info.it_size, cache_info.dt_size);
400 threshold *= PAGE_SIZE;
401 threshold /= num_online_cpus();
402 goto set_tlb_threshold;
403 }
404
405 alltime = mfctl(16);
406 flush_tlb_all();
407 alltime = mfctl(16) - alltime;
408
409 size = 0;
410 start = (unsigned long) _text;
411 rangetime = mfctl(16);
412 while (start < (unsigned long) _end) {
413 flush_tlb_kernel_range(start, start + PAGE_SIZE);
414 start += PAGE_SIZE;
415 size += PAGE_SIZE;
416 }
417 rangetime = mfctl(16) - rangetime;
418
419 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
420 alltime, size, rangetime);
421
422 threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
423
424set_tlb_threshold:
425 if (threshold)
426 parisc_tlb_flush_threshold = threshold;
427 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
428 parisc_tlb_flush_threshold/1024);
429}
430
431extern void purge_kernel_dcache_page_asm(unsigned long);
432extern void clear_user_page_asm(void *, unsigned long);
433extern void copy_user_page_asm(void *, void *, unsigned long);
434
435void flush_kernel_dcache_page_addr(void *addr)
436{
437 unsigned long flags;
438
439 flush_kernel_dcache_page_asm(addr);
440 purge_tlb_start(flags);
441 pdtlb_kernel(addr);
442 purge_tlb_end(flags);
443}
444EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
445
446void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
447 struct page *pg)
448{
449 /* Copy using kernel mapping. No coherency is needed (all in
450 kunmap) for the `to' page. However, the `from' page needs to
451 be flushed through a mapping equivalent to the user mapping
452 before it can be accessed through the kernel mapping. */
453 preempt_disable();
454 flush_dcache_page_asm(__pa(vfrom), vaddr);
455 preempt_enable();
456 copy_page_asm(vto, vfrom);
457}
458EXPORT_SYMBOL(copy_user_page);
459
460/* __flush_tlb_range()
461 *
462 * returns 1 if all TLBs were flushed.
463 */
464int __flush_tlb_range(unsigned long sid, unsigned long start,
465 unsigned long end)
466{
467 unsigned long flags, size;
468
469 size = (end - start);
470 if (size >= parisc_tlb_flush_threshold) {
471 flush_tlb_all();
472 return 1;
473 }
474
475 /* Purge TLB entries for small ranges using the pdtlb and
476 pitlb instructions. These instructions execute locally
477 but cause a purge request to be broadcast to other TLBs. */
478 if (likely(!split_tlb)) {
479 while (start < end) {
480 purge_tlb_start(flags);
481 mtsp(sid, 1);
482 pdtlb(start);
483 purge_tlb_end(flags);
484 start += PAGE_SIZE;
485 }
486 return 0;
487 }
488
489 /* split TLB case */
490 while (start < end) {
491 purge_tlb_start(flags);
492 mtsp(sid, 1);
493 pdtlb(start);
494 pitlb(start);
495 purge_tlb_end(flags);
496 start += PAGE_SIZE;
497 }
498 return 0;
499}
500
501static void cacheflush_h_tmp_function(void *dummy)
502{
503 flush_cache_all_local();
504}
505
506void flush_cache_all(void)
507{
508 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
509}
510
511static inline unsigned long mm_total_size(struct mm_struct *mm)
512{
513 struct vm_area_struct *vma;
514 unsigned long usize = 0;
515
516 for (vma = mm->mmap; vma; vma = vma->vm_next)
517 usize += vma->vm_end - vma->vm_start;
518 return usize;
519}
520
521static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
522{
523 pte_t *ptep = NULL;
524
525 if (!pgd_none(*pgd)) {
526 pud_t *pud = pud_offset(pgd, addr);
527 if (!pud_none(*pud)) {
528 pmd_t *pmd = pmd_offset(pud, addr);
529 if (!pmd_none(*pmd))
530 ptep = pte_offset_map(pmd, addr);
531 }
532 }
533 return ptep;
534}
535
536void flush_cache_mm(struct mm_struct *mm)
537{
538 struct vm_area_struct *vma;
539 pgd_t *pgd;
540
541 /* Flushing the whole cache on each cpu takes forever on
542 rp3440, etc. So, avoid it if the mm isn't too big. */
543 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
544 flush_cache_all();
545 return;
546 }
547
548 if (mm->context == mfsp(3)) {
549 for (vma = mm->mmap; vma; vma = vma->vm_next) {
550 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
551 if ((vma->vm_flags & VM_EXEC) == 0)
552 continue;
553 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
554 }
555 return;
556 }
557
558 pgd = mm->pgd;
559 for (vma = mm->mmap; vma; vma = vma->vm_next) {
560 unsigned long addr;
561
562 for (addr = vma->vm_start; addr < vma->vm_end;
563 addr += PAGE_SIZE) {
564 unsigned long pfn;
565 pte_t *ptep = get_ptep(pgd, addr);
566 if (!ptep)
567 continue;
568 pfn = pte_pfn(*ptep);
569 if (!pfn_valid(pfn))
570 continue;
571 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
572 }
573 }
574}
575
576void
577flush_user_dcache_range(unsigned long start, unsigned long end)
578{
579 if ((end - start) < parisc_cache_flush_threshold)
580 flush_user_dcache_range_asm(start,end);
581 else
582 flush_data_cache();
583}
584
585void
586flush_user_icache_range(unsigned long start, unsigned long end)
587{
588 if ((end - start) < parisc_cache_flush_threshold)
589 flush_user_icache_range_asm(start,end);
590 else
591 flush_instruction_cache();
592}
593
594void flush_cache_range(struct vm_area_struct *vma,
595 unsigned long start, unsigned long end)
596{
597 unsigned long addr;
598 pgd_t *pgd;
599
600 BUG_ON(!vma->vm_mm->context);
601
602 if ((end - start) >= parisc_cache_flush_threshold) {
603 flush_cache_all();
604 return;
605 }
606
607 if (vma->vm_mm->context == mfsp(3)) {
608 flush_user_dcache_range_asm(start, end);
609 if (vma->vm_flags & VM_EXEC)
610 flush_user_icache_range_asm(start, end);
611 return;
612 }
613
614 pgd = vma->vm_mm->pgd;
615 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
616 unsigned long pfn;
617 pte_t *ptep = get_ptep(pgd, addr);
618 if (!ptep)
619 continue;
620 pfn = pte_pfn(*ptep);
621 if (pfn_valid(pfn))
622 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
623 }
624}
625
626void
627flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
628{
629 BUG_ON(!vma->vm_mm->context);
630
631 if (pfn_valid(pfn)) {
632 flush_tlb_page(vma, vmaddr);
633 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
634 }
635}
636
637void flush_kernel_vmap_range(void *vaddr, int size)
638{
639 unsigned long start = (unsigned long)vaddr;
640
641 if ((unsigned long)size > parisc_cache_flush_threshold)
642 flush_data_cache();
643 else
644 flush_kernel_dcache_range_asm(start, start + size);
645}
646EXPORT_SYMBOL(flush_kernel_vmap_range);
647
648void invalidate_kernel_vmap_range(void *vaddr, int size)
649{
650 unsigned long start = (unsigned long)vaddr;
651
652 if ((unsigned long)size > parisc_cache_flush_threshold)
653 flush_data_cache();
654 else
655 flush_kernel_dcache_range_asm(start, start + size);
656}
657EXPORT_SYMBOL(invalidate_kernel_vmap_range);