Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7 * Copyright (C) 1999 SuSE GmbH Nuernberg
  8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9 *
 10 * Cache and TLB management
 11 *
 12 */
 13 
 14#include <linux/init.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/pagemap.h>
 20#include <linux/sched.h>
 21#include <linux/sched/mm.h>
 
 22#include <asm/pdc.h>
 23#include <asm/cache.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 26#include <asm/page.h>
 27#include <asm/pgalloc.h>
 28#include <asm/processor.h>
 29#include <asm/sections.h>
 30#include <asm/shmparam.h>
 
 
 31
 32int split_tlb __ro_after_init;
 33int dcache_stride __ro_after_init;
 34int icache_stride __ro_after_init;
 35EXPORT_SYMBOL(dcache_stride);
 36
 37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 38EXPORT_SYMBOL(flush_dcache_page_asm);
 39void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 40void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 41
 
 
 
 42
 43/* On some machines (i.e., ones with the Merced bus), there can be
 44 * only a single PxTLB broadcast at a time; this must be guaranteed
 45 * by software. We need a spinlock around all TLB flushes to ensure
 46 * this.
 47 */
 48DEFINE_SPINLOCK(pa_tlb_flush_lock);
 49
 50/* Swapper page setup lock. */
 51DEFINE_SPINLOCK(pa_swapper_pg_lock);
 52
 53#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 54int pa_serialize_tlb_flushes __ro_after_init;
 55#endif
 56
 57struct pdc_cache_info cache_info __ro_after_init;
 58#ifndef CONFIG_PA20
 59static struct pdc_btlb_info btlb_info __ro_after_init;
 60#endif
 61
 62#ifdef CONFIG_SMP
 63void
 64flush_data_cache(void)
 
 
 65{
 66	on_each_cpu(flush_data_cache_local, NULL, 1);
 
 
 
 67}
 68void 
 69flush_instruction_cache(void)
 70{
 71	on_each_cpu(flush_instruction_cache_local, NULL, 1);
 72}
 73#endif
 74
 75void
 76flush_cache_all_local(void)
 
 
 
 
 
 77{
 78	flush_instruction_cache_local(NULL);
 79	flush_data_cache_local(NULL);
 80}
 81EXPORT_SYMBOL(flush_cache_all_local);
 82
 83/* Virtual address of pfn.  */
 
 84#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 85
 86void
 87update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 88{
 89	unsigned long pfn = pte_pfn(*ptep);
 90	struct page *page;
 
 91
 92	/* We don't have pte special.  As a result, we can be called with
 93	   an invalid pfn and we don't need to flush the kernel dcache page.
 94	   This occurs with FireGL card in C8000.  */
 95	if (!pfn_valid(pfn))
 96		return;
 97
 98	page = pfn_to_page(pfn);
 99	if (page_mapping_file(page) &&
100	    test_bit(PG_dcache_dirty, &page->flags)) {
101		flush_kernel_dcache_page_addr(pfn_va(pfn));
102		clear_bit(PG_dcache_dirty, &page->flags);
 
 
 
103	} else if (parisc_requires_coherency())
104		flush_kernel_dcache_page_addr(pfn_va(pfn));
 
105}
106
107void
108show_cache_info(struct seq_file *m)
109{
110	char buf[32];
111
112	seq_printf(m, "I-cache\t\t: %ld KB\n", 
113		cache_info.ic_size/1024 );
114	if (cache_info.dc_loop != 1)
115		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
116	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
117		cache_info.dc_size/1024,
118		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
119		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
120		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
 
 
121	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
122		cache_info.it_size,
123		cache_info.dt_size,
124		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
125	);
126		
127#ifndef CONFIG_PA20
128	/* BTLB - Block TLB */
129	if (btlb_info.max_size==0) {
130		seq_printf(m, "BTLB\t\t: not supported\n" );
131	} else {
132		seq_printf(m, 
133		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
134		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
135		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
136		btlb_info.max_size, (int)4096,
137		btlb_info.max_size>>8,
138		btlb_info.fixed_range_info.num_i,
139		btlb_info.fixed_range_info.num_d,
140		btlb_info.fixed_range_info.num_comb, 
141		btlb_info.variable_range_info.num_i,
142		btlb_info.variable_range_info.num_d,
143		btlb_info.variable_range_info.num_comb
144		);
145	}
146#endif
147}
148
149void __init 
150parisc_cache_init(void)
151{
152	if (pdc_cache_info(&cache_info) < 0)
153		panic("parisc_cache_init: pdc_cache_info failed");
154
155#if 0
156	printk("ic_size %lx dc_size %lx it_size %lx\n",
157		cache_info.ic_size,
158		cache_info.dc_size,
159		cache_info.it_size);
160
161	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162		cache_info.dc_base,
163		cache_info.dc_stride,
164		cache_info.dc_count,
165		cache_info.dc_loop);
166
167	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
168		*(unsigned long *) (&cache_info.dc_conf),
169		cache_info.dc_conf.cc_alias,
170		cache_info.dc_conf.cc_block,
171		cache_info.dc_conf.cc_line,
172		cache_info.dc_conf.cc_shift);
173	printk("	wt %d sh %d cst %d hv %d\n",
174		cache_info.dc_conf.cc_wt,
175		cache_info.dc_conf.cc_sh,
176		cache_info.dc_conf.cc_cst,
177		cache_info.dc_conf.cc_hv);
178
179	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180		cache_info.ic_base,
181		cache_info.ic_stride,
182		cache_info.ic_count,
183		cache_info.ic_loop);
184
185	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
186		cache_info.it_sp_base,
187		cache_info.it_sp_stride,
188		cache_info.it_sp_count,
189		cache_info.it_loop,
190		cache_info.it_off_base,
191		cache_info.it_off_stride,
192		cache_info.it_off_count);
193
194	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
195		cache_info.dt_sp_base,
196		cache_info.dt_sp_stride,
197		cache_info.dt_sp_count,
198		cache_info.dt_loop,
199		cache_info.dt_off_base,
200		cache_info.dt_off_stride,
201		cache_info.dt_off_count);
202
203	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
204		*(unsigned long *) (&cache_info.ic_conf),
205		cache_info.ic_conf.cc_alias,
206		cache_info.ic_conf.cc_block,
207		cache_info.ic_conf.cc_line,
208		cache_info.ic_conf.cc_shift);
209	printk("	wt %d sh %d cst %d hv %d\n",
210		cache_info.ic_conf.cc_wt,
211		cache_info.ic_conf.cc_sh,
212		cache_info.ic_conf.cc_cst,
213		cache_info.ic_conf.cc_hv);
214
215	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
216		cache_info.dt_conf.tc_sh,
217		cache_info.dt_conf.tc_page,
218		cache_info.dt_conf.tc_cst,
219		cache_info.dt_conf.tc_aid,
220		cache_info.dt_conf.tc_sr);
221
222	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
223		cache_info.it_conf.tc_sh,
224		cache_info.it_conf.tc_page,
225		cache_info.it_conf.tc_cst,
226		cache_info.it_conf.tc_aid,
227		cache_info.it_conf.tc_sr);
228#endif
229
230	split_tlb = 0;
231	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
232		if (cache_info.dt_conf.tc_sh == 2)
233			printk(KERN_WARNING "Unexpected TLB configuration. "
234			"Will flush I/D separately (could be optimized).\n");
235
236		split_tlb = 1;
237	}
238
239	/* "New and Improved" version from Jim Hull 
240	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
241	 * The following CAFL_STRIDE is an optimized version, see
242	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
243	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
244	 */
245#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
246	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
247	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
248#undef CAFL_STRIDE
249
250#ifndef CONFIG_PA20
251	if (pdc_btlb_info(&btlb_info) < 0) {
252		memset(&btlb_info, 0, sizeof btlb_info);
253	}
254#endif
255
256	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
257						PDC_MODEL_NVA_UNSUPPORTED) {
258		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
259#if 0
260		panic("SMP kernel required to avoid non-equivalent aliasing");
261#endif
262	}
263}
264
265void __init disable_sr_hashing(void)
266{
267	int srhash_type, retval;
268	unsigned long space_bits;
269
270	switch (boot_cpu_data.cpu_type) {
271	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
272		BUG();
273		return;
274
275	case pcxs:
276	case pcxt:
277	case pcxt_:
278		srhash_type = SRHASH_PCXST;
279		break;
280
281	case pcxl:
282		srhash_type = SRHASH_PCXL;
283		break;
284
285	case pcxl2: /* pcxl2 doesn't support space register hashing */
286		return;
287
288	default: /* Currently all PA2.0 machines use the same ins. sequence */
289		srhash_type = SRHASH_PA20;
290		break;
291	}
292
293	disable_sr_hashing_asm(srhash_type);
294
295	retval = pdc_spaceid_bits(&space_bits);
296	/* If this procedure isn't implemented, don't panic. */
297	if (retval < 0 && retval != PDC_BAD_OPTION)
298		panic("pdc_spaceid_bits call failed.\n");
299	if (space_bits != 0)
300		panic("SpaceID hashing is still on!\n");
301}
302
303static inline void
304__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
305		   unsigned long physaddr)
306{
 
 
307	preempt_disable();
308	flush_dcache_page_asm(physaddr, vmaddr);
309	if (vma->vm_flags & VM_EXEC)
310		flush_icache_page_asm(physaddr, vmaddr);
311	preempt_enable();
312}
313
314static inline void
315__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316		   unsigned long physaddr)
317{
 
 
 
 
 
 
 
318	preempt_disable();
319	purge_dcache_page_asm(physaddr, vmaddr);
 
 
 
 
 
 
 
 
 
 
 
 
320	if (vma->vm_flags & VM_EXEC)
321		flush_icache_page_asm(physaddr, vmaddr);
 
 
 
 
 
 
 
 
 
 
 
 
322	preempt_enable();
323}
324
325void flush_dcache_page(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326{
327	struct address_space *mapping = page_mapping_file(page);
328	struct vm_area_struct *mpnt;
329	unsigned long offset;
330	unsigned long addr, old_addr = 0;
 
 
 
331	pgoff_t pgoff;
332
333	if (mapping && !mapping_mapped(mapping)) {
334		set_bit(PG_dcache_dirty, &page->flags);
335		return;
336	}
337
338	flush_kernel_dcache_page(page);
 
 
 
339
340	if (!mapping)
341		return;
342
343	pgoff = page->index;
344
345	/* We have carefully arranged in arch_get_unmapped_area() that
 
346	 * *any* mappings of a file are always congruently mapped (whether
347	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
348	 * to flush one address here for them all to become coherent */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
350	flush_dcache_mmap_lock(mapping);
351	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
352		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
353		addr = mpnt->vm_start + offset;
354
355		/* The TLB is the engine of coherence on parisc: The
356		 * CPU is entitled to speculate any page with a TLB
357		 * mapping, so here we kill the mapping then flush the
358		 * page along a special flush only alias mapping.
359		 * This guarantees that the page is no-longer in the
360		 * cache for any process and nor may it be
361		 * speculatively read in (until the user or kernel
362		 * specifically accesses it, of course) */
363
364		flush_tlb_page(mpnt, addr);
365		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
366				      != (addr & (SHM_COLOUR - 1))) {
367			__flush_cache_page(mpnt, addr, page_to_phys(page));
368			if (old_addr)
369				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
370			old_addr = addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371		}
 
372	}
373	flush_dcache_mmap_unlock(mapping);
374}
375EXPORT_SYMBOL(flush_dcache_page);
376
377/* Defined in arch/parisc/kernel/pacache.S */
378EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
379EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
380EXPORT_SYMBOL(flush_data_cache_local);
381EXPORT_SYMBOL(flush_kernel_icache_range_asm);
382
383#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
384static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
385
386#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
387static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
388
389void __init parisc_setup_cache_timing(void)
390{
391	unsigned long rangetime, alltime;
392	unsigned long size, start;
393	unsigned long threshold;
394
395	alltime = mfctl(16);
396	flush_data_cache();
397	alltime = mfctl(16) - alltime;
398
399	size = (unsigned long)(_end - _text);
400	rangetime = mfctl(16);
401	flush_kernel_dcache_range((unsigned long)_text, size);
402	rangetime = mfctl(16) - rangetime;
403
404	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
405		alltime, size, rangetime);
406
407	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
408	if (threshold > cache_info.dc_size)
409		threshold = cache_info.dc_size;
410	if (threshold)
411		parisc_cache_flush_threshold = threshold;
 
 
 
 
 
412	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
413		parisc_cache_flush_threshold/1024);
414
415	/* calculate TLB flush threshold */
416
417	/* On SMP machines, skip the TLB measure of kernel text which
418	 * has been mapped as huge pages. */
419	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
420		threshold = max(cache_info.it_size, cache_info.dt_size);
421		threshold *= PAGE_SIZE;
422		threshold /= num_online_cpus();
423		goto set_tlb_threshold;
424	}
425
426	size = 0;
427	start = (unsigned long) _text;
428	rangetime = mfctl(16);
429	while (start < (unsigned long) _end) {
430		flush_tlb_kernel_range(start, start + PAGE_SIZE);
431		start += PAGE_SIZE;
432		size += PAGE_SIZE;
433	}
434	rangetime = mfctl(16) - rangetime;
435
436	alltime = mfctl(16);
437	flush_tlb_all();
438	alltime = mfctl(16) - alltime;
439
440	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
441		alltime, size, rangetime);
442
443	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
444	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
445		threshold/1024);
446
447set_tlb_threshold:
448	if (threshold > parisc_tlb_flush_threshold)
449		parisc_tlb_flush_threshold = threshold;
 
 
 
450	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
451		parisc_tlb_flush_threshold/1024);
452}
453
454extern void purge_kernel_dcache_page_asm(unsigned long);
455extern void clear_user_page_asm(void *, unsigned long);
456extern void copy_user_page_asm(void *, void *, unsigned long);
457
458void flush_kernel_dcache_page_addr(void *addr)
459{
460	unsigned long flags;
461
462	flush_kernel_dcache_page_asm(addr);
463	purge_tlb_start(flags);
464	pdtlb_kernel(addr);
465	purge_tlb_end(flags);
466}
467EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
468
469void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
470	struct page *pg)
471{
472       /* Copy using kernel mapping.  No coherency is needed (all in
473	  kunmap) for the `to' page.  However, the `from' page needs to
474	  be flushed through a mapping equivalent to the user mapping
475	  before it can be accessed through the kernel mapping. */
476	preempt_disable();
477	flush_dcache_page_asm(__pa(vfrom), vaddr);
478	copy_page_asm(vto, vfrom);
479	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480}
481EXPORT_SYMBOL(copy_user_page);
482
483/* __flush_tlb_range()
484 *
485 * returns 1 if all TLBs were flushed.
486 */
487int __flush_tlb_range(unsigned long sid, unsigned long start,
488		      unsigned long end)
489{
490	unsigned long flags;
491
492	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
493	    end - start >= parisc_tlb_flush_threshold) {
494		flush_tlb_all();
495		return 1;
496	}
497
498	/* Purge TLB entries for small ranges using the pdtlb and
499	   pitlb instructions.  These instructions execute locally
500	   but cause a purge request to be broadcast to other TLBs.  */
501	while (start < end) {
502		purge_tlb_start(flags);
503		mtsp(sid, 1);
504		pdtlb(start);
505		pitlb(start);
506		purge_tlb_end(flags);
507		start += PAGE_SIZE;
508	}
509	return 0;
510}
511
512static void cacheflush_h_tmp_function(void *dummy)
513{
514	flush_cache_all_local();
515}
516
517void flush_cache_all(void)
518{
519	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520}
521
522static inline unsigned long mm_total_size(struct mm_struct *mm)
523{
524	struct vm_area_struct *vma;
525	unsigned long usize = 0;
 
526
527	for (vma = mm->mmap; vma; vma = vma->vm_next)
 
 
528		usize += vma->vm_end - vma->vm_start;
529	return usize;
530}
531
532static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
533{
534	pte_t *ptep = NULL;
535
536	if (!pgd_none(*pgd)) {
537		pud_t *pud = pud_offset(pgd, addr);
538		if (!pud_none(*pud)) {
539			pmd_t *pmd = pmd_offset(pud, addr);
540			if (!pmd_none(*pmd))
541				ptep = pte_offset_map(pmd, addr);
542		}
543	}
544	return ptep;
545}
546
547void flush_cache_mm(struct mm_struct *mm)
548{
549	struct vm_area_struct *vma;
550	pgd_t *pgd;
551
552	/* Flushing the whole cache on each cpu takes forever on
553	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
554	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
555	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
556		if (mm->context)
557			flush_tlb_all();
 
 
 
 
 
 
 
558		flush_cache_all();
559		return;
560	}
561
562	if (mm->context == mfsp(3)) {
563		for (vma = mm->mmap; vma; vma = vma->vm_next) {
564			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
565			if (vma->vm_flags & VM_EXEC)
566				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
567			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
568		}
569		return;
570	}
571
572	pgd = mm->pgd;
573	for (vma = mm->mmap; vma; vma = vma->vm_next) {
574		unsigned long addr;
575
576		for (addr = vma->vm_start; addr < vma->vm_end;
577		     addr += PAGE_SIZE) {
578			unsigned long pfn;
579			pte_t *ptep = get_ptep(pgd, addr);
580			if (!ptep)
581				continue;
582			pfn = pte_pfn(*ptep);
583			if (!pfn_valid(pfn))
584				continue;
585			if (unlikely(mm->context)) {
586				flush_tlb_page(vma, addr);
587				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
588			} else {
589				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
590			}
591		}
592	}
593}
594
595void flush_cache_range(struct vm_area_struct *vma,
596		unsigned long start, unsigned long end)
597{
598	pgd_t *pgd;
599	unsigned long addr;
600
601	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
602	    end - start >= parisc_cache_flush_threshold) {
603		if (vma->vm_mm->context)
604			flush_tlb_range(vma, start, end);
605		flush_cache_all();
606		return;
607	}
608
609	if (vma->vm_mm->context == mfsp(3)) {
610		flush_user_dcache_range_asm(start, end);
611		if (vma->vm_flags & VM_EXEC)
612			flush_user_icache_range_asm(start, end);
613		flush_tlb_range(vma, start, end);
614		return;
615	}
616
617	pgd = vma->vm_mm->pgd;
618	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
619		unsigned long pfn;
620		pte_t *ptep = get_ptep(pgd, addr);
621		if (!ptep)
622			continue;
623		pfn = pte_pfn(*ptep);
624		if (pfn_valid(pfn)) {
625			if (unlikely(vma->vm_mm->context)) {
626				flush_tlb_page(vma, addr);
627				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
628			} else {
629				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
630			}
631		}
632	}
633}
634
635void
636flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
637{
638	if (pfn_valid(pfn)) {
639		if (likely(vma->vm_mm->context)) {
640			flush_tlb_page(vma, vmaddr);
641			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
642		} else {
643			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
644		}
 
 
645	}
 
 
 
 
 
646}
647
648void flush_kernel_vmap_range(void *vaddr, int size)
649{
650	unsigned long start = (unsigned long)vaddr;
651	unsigned long end = start + size;
652
653	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
654	    (unsigned long)size >= parisc_cache_flush_threshold) {
655		flush_tlb_kernel_range(start, end);
656		flush_data_cache();
657		return;
658	}
659
660	flush_kernel_dcache_range_asm(start, end);
661	flush_tlb_kernel_range(start, end);
662}
663EXPORT_SYMBOL(flush_kernel_vmap_range);
664
665void invalidate_kernel_vmap_range(void *vaddr, int size)
666{
667	unsigned long start = (unsigned long)vaddr;
668	unsigned long end = start + size;
669
 
 
 
670	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
671	    (unsigned long)size >= parisc_cache_flush_threshold) {
672		flush_tlb_kernel_range(start, end);
673		flush_data_cache();
674		return;
675	}
676
677	purge_kernel_dcache_range_asm(start, end);
678	flush_tlb_kernel_range(start, end);
679}
680EXPORT_SYMBOL(invalidate_kernel_vmap_range);
v6.8
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7 * Copyright (C) 1999 SuSE GmbH Nuernberg
  8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9 *
 10 * Cache and TLB management
 11 *
 12 */
 13 
 14#include <linux/init.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/pagemap.h>
 20#include <linux/sched.h>
 21#include <linux/sched/mm.h>
 22#include <linux/syscalls.h>
 23#include <asm/pdc.h>
 24#include <asm/cache.h>
 25#include <asm/cacheflush.h>
 26#include <asm/tlbflush.h>
 27#include <asm/page.h>
 
 28#include <asm/processor.h>
 29#include <asm/sections.h>
 30#include <asm/shmparam.h>
 31#include <asm/mmu_context.h>
 32#include <asm/cachectl.h>
 33
 34int split_tlb __ro_after_init;
 35int dcache_stride __ro_after_init;
 36int icache_stride __ro_after_init;
 37EXPORT_SYMBOL(dcache_stride);
 38
 39void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 40EXPORT_SYMBOL(flush_dcache_page_asm);
 41void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 42void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 43
 44/* Internal implementation in arch/parisc/kernel/pacache.S */
 45void flush_data_cache_local(void *);  /* flushes local data-cache only */
 46void flush_instruction_cache_local(void); /* flushes local code-cache only */
 47
 48/* On some machines (i.e., ones with the Merced bus), there can be
 49 * only a single PxTLB broadcast at a time; this must be guaranteed
 50 * by software. We need a spinlock around all TLB flushes to ensure
 51 * this.
 52 */
 53DEFINE_SPINLOCK(pa_tlb_flush_lock);
 54
 
 
 
 55#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 56int pa_serialize_tlb_flushes __ro_after_init;
 57#endif
 58
 59struct pdc_cache_info cache_info __ro_after_init;
 60#ifndef CONFIG_PA20
 61struct pdc_btlb_info btlb_info;
 62#endif
 63
 64DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
 65DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
 66DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
 67
 68static void cache_flush_local_cpu(void *dummy)
 69{
 70	if (static_branch_likely(&parisc_has_icache))
 71		flush_instruction_cache_local();
 72	if (static_branch_likely(&parisc_has_dcache))
 73		flush_data_cache_local(NULL);
 74}
 75
 76void flush_cache_all_local(void)
 77{
 78	cache_flush_local_cpu(NULL);
 79}
 
 80
 81void flush_cache_all(void)
 82{
 83	if (static_branch_likely(&parisc_has_cache))
 84		on_each_cpu(cache_flush_local_cpu, NULL, 1);
 85}
 86
 87static inline void flush_data_cache(void)
 88{
 89	if (static_branch_likely(&parisc_has_dcache))
 90		on_each_cpu(flush_data_cache_local, NULL, 1);
 91}
 
 92
 93
 94/* Kernel virtual address of pfn.  */
 95#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 96
 97void __update_cache(pte_t pte)
 
 98{
 99	unsigned long pfn = pte_pfn(pte);
100	struct folio *folio;
101	unsigned int nr;
102
103	/* We don't have pte special.  As a result, we can be called with
104	   an invalid pfn and we don't need to flush the kernel dcache page.
105	   This occurs with FireGL card in C8000.  */
106	if (!pfn_valid(pfn))
107		return;
108
109	folio = page_folio(pfn_to_page(pfn));
110	pfn = folio_pfn(folio);
111	nr = folio_nr_pages(folio);
112	if (folio_flush_mapping(folio) &&
113	    test_bit(PG_dcache_dirty, &folio->flags)) {
114		while (nr--)
115			flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116		clear_bit(PG_dcache_dirty, &folio->flags);
117	} else if (parisc_requires_coherency())
118		while (nr--)
119			flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
120}
121
122void
123show_cache_info(struct seq_file *m)
124{
125	char buf[32];
126
127	seq_printf(m, "I-cache\t\t: %ld KB\n", 
128		cache_info.ic_size/1024 );
129	if (cache_info.dc_loop != 1)
130		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
131	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
132		cache_info.dc_size/1024,
133		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
134		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
135		((cache_info.dc_loop == 1) ? "direct mapped" : buf),
136		cache_info.dc_conf.cc_alias
137	);
138	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
139		cache_info.it_size,
140		cache_info.dt_size,
141		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
142	);
143		
144#ifndef CONFIG_PA20
145	/* BTLB - Block TLB */
146	if (btlb_info.max_size==0) {
147		seq_printf(m, "BTLB\t\t: not supported\n" );
148	} else {
149		seq_printf(m, 
150		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
151		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
152		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
153		btlb_info.max_size, (int)4096,
154		btlb_info.max_size>>8,
155		btlb_info.fixed_range_info.num_i,
156		btlb_info.fixed_range_info.num_d,
157		btlb_info.fixed_range_info.num_comb, 
158		btlb_info.variable_range_info.num_i,
159		btlb_info.variable_range_info.num_d,
160		btlb_info.variable_range_info.num_comb
161		);
162	}
163#endif
164}
165
166void __init 
167parisc_cache_init(void)
168{
169	if (pdc_cache_info(&cache_info) < 0)
170		panic("parisc_cache_init: pdc_cache_info failed");
171
172#if 0
173	printk("ic_size %lx dc_size %lx it_size %lx\n",
174		cache_info.ic_size,
175		cache_info.dc_size,
176		cache_info.it_size);
177
178	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179		cache_info.dc_base,
180		cache_info.dc_stride,
181		cache_info.dc_count,
182		cache_info.dc_loop);
183
184	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
185		*(unsigned long *) (&cache_info.dc_conf),
186		cache_info.dc_conf.cc_alias,
187		cache_info.dc_conf.cc_block,
188		cache_info.dc_conf.cc_line,
189		cache_info.dc_conf.cc_shift);
190	printk("	wt %d sh %d cst %d hv %d\n",
191		cache_info.dc_conf.cc_wt,
192		cache_info.dc_conf.cc_sh,
193		cache_info.dc_conf.cc_cst,
194		cache_info.dc_conf.cc_hv);
195
196	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
197		cache_info.ic_base,
198		cache_info.ic_stride,
199		cache_info.ic_count,
200		cache_info.ic_loop);
201
202	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
203		cache_info.it_sp_base,
204		cache_info.it_sp_stride,
205		cache_info.it_sp_count,
206		cache_info.it_loop,
207		cache_info.it_off_base,
208		cache_info.it_off_stride,
209		cache_info.it_off_count);
210
211	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
212		cache_info.dt_sp_base,
213		cache_info.dt_sp_stride,
214		cache_info.dt_sp_count,
215		cache_info.dt_loop,
216		cache_info.dt_off_base,
217		cache_info.dt_off_stride,
218		cache_info.dt_off_count);
219
220	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
221		*(unsigned long *) (&cache_info.ic_conf),
222		cache_info.ic_conf.cc_alias,
223		cache_info.ic_conf.cc_block,
224		cache_info.ic_conf.cc_line,
225		cache_info.ic_conf.cc_shift);
226	printk("	wt %d sh %d cst %d hv %d\n",
227		cache_info.ic_conf.cc_wt,
228		cache_info.ic_conf.cc_sh,
229		cache_info.ic_conf.cc_cst,
230		cache_info.ic_conf.cc_hv);
231
232	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
233		cache_info.dt_conf.tc_sh,
234		cache_info.dt_conf.tc_page,
235		cache_info.dt_conf.tc_cst,
236		cache_info.dt_conf.tc_aid,
237		cache_info.dt_conf.tc_sr);
238
239	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
240		cache_info.it_conf.tc_sh,
241		cache_info.it_conf.tc_page,
242		cache_info.it_conf.tc_cst,
243		cache_info.it_conf.tc_aid,
244		cache_info.it_conf.tc_sr);
245#endif
246
247	split_tlb = 0;
248	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
249		if (cache_info.dt_conf.tc_sh == 2)
250			printk(KERN_WARNING "Unexpected TLB configuration. "
251			"Will flush I/D separately (could be optimized).\n");
252
253		split_tlb = 1;
254	}
255
256	/* "New and Improved" version from Jim Hull 
257	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
258	 * The following CAFL_STRIDE is an optimized version, see
259	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
260	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
261	 */
262#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
263	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
265#undef CAFL_STRIDE
266
267	/* stride needs to be non-zero, otherwise cache flushes will not work */
268	WARN_ON(cache_info.dc_size && dcache_stride == 0);
269	WARN_ON(cache_info.ic_size && icache_stride == 0);
 
 
270
271	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
272						PDC_MODEL_NVA_UNSUPPORTED) {
273		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
274#if 0
275		panic("SMP kernel required to avoid non-equivalent aliasing");
276#endif
277	}
278}
279
280void disable_sr_hashing(void)
281{
282	int srhash_type, retval;
283	unsigned long space_bits;
284
285	switch (boot_cpu_data.cpu_type) {
286	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
287		BUG();
288		return;
289
290	case pcxs:
291	case pcxt:
292	case pcxt_:
293		srhash_type = SRHASH_PCXST;
294		break;
295
296	case pcxl:
297		srhash_type = SRHASH_PCXL;
298		break;
299
300	case pcxl2: /* pcxl2 doesn't support space register hashing */
301		return;
302
303	default: /* Currently all PA2.0 machines use the same ins. sequence */
304		srhash_type = SRHASH_PA20;
305		break;
306	}
307
308	disable_sr_hashing_asm(srhash_type);
309
310	retval = pdc_spaceid_bits(&space_bits);
311	/* If this procedure isn't implemented, don't panic. */
312	if (retval < 0 && retval != PDC_BAD_OPTION)
313		panic("pdc_spaceid_bits call failed.\n");
314	if (space_bits != 0)
315		panic("SpaceID hashing is still on!\n");
316}
317
318static inline void
319__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
320		   unsigned long physaddr)
321{
322	if (!static_branch_likely(&parisc_has_cache))
323		return;
324	preempt_disable();
325	flush_dcache_page_asm(physaddr, vmaddr);
326	if (vma->vm_flags & VM_EXEC)
327		flush_icache_page_asm(physaddr, vmaddr);
328	preempt_enable();
329}
330
331static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
 
 
332{
333	unsigned long flags, space, pgd, prot;
334#ifdef CONFIG_TLB_PTLOCK
335	unsigned long pgd_lock;
336#endif
337
338	vmaddr &= PAGE_MASK;
339
340	preempt_disable();
341
342	/* Set context for flush */
343	local_irq_save(flags);
344	prot = mfctl(8);
345	space = mfsp(SR_USER);
346	pgd = mfctl(25);
347#ifdef CONFIG_TLB_PTLOCK
348	pgd_lock = mfctl(28);
349#endif
350	switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
351	local_irq_restore(flags);
352
353	flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
354	if (vma->vm_flags & VM_EXEC)
355		flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
356	flush_tlb_page(vma, vmaddr);
357
358	/* Restore previous context */
359	local_irq_save(flags);
360#ifdef CONFIG_TLB_PTLOCK
361	mtctl(pgd_lock, 28);
362#endif
363	mtctl(pgd, 25);
364	mtsp(space, SR_USER);
365	mtctl(prot, 8);
366	local_irq_restore(flags);
367
368	preempt_enable();
369}
370
371void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
372		unsigned int nr)
373{
374	void *kaddr = page_address(page);
375
376	for (;;) {
377		flush_kernel_dcache_page_addr(kaddr);
378		flush_kernel_icache_page(kaddr);
379		if (--nr == 0)
380			break;
381		kaddr += PAGE_SIZE;
382	}
383}
384
385static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
386{
387	pte_t *ptep = NULL;
388	pgd_t *pgd = mm->pgd;
389	p4d_t *p4d;
390	pud_t *pud;
391	pmd_t *pmd;
392
393	if (!pgd_none(*pgd)) {
394		p4d = p4d_offset(pgd, addr);
395		if (!p4d_none(*p4d)) {
396			pud = pud_offset(p4d, addr);
397			if (!pud_none(*pud)) {
398				pmd = pmd_offset(pud, addr);
399				if (!pmd_none(*pmd))
400					ptep = pte_offset_map(pmd, addr);
401			}
402		}
403	}
404	return ptep;
405}
406
407static inline bool pte_needs_flush(pte_t pte)
408{
409	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
410		== (_PAGE_PRESENT | _PAGE_ACCESSED);
411}
412
413void flush_dcache_folio(struct folio *folio)
414{
415	struct address_space *mapping = folio_flush_mapping(folio);
416	struct vm_area_struct *vma;
 
417	unsigned long addr, old_addr = 0;
418	void *kaddr;
419	unsigned long count = 0;
420	unsigned long i, nr, flags;
421	pgoff_t pgoff;
422
423	if (mapping && !mapping_mapped(mapping)) {
424		set_bit(PG_dcache_dirty, &folio->flags);
425		return;
426	}
427
428	nr = folio_nr_pages(folio);
429	kaddr = folio_address(folio);
430	for (i = 0; i < nr; i++)
431		flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
432
433	if (!mapping)
434		return;
435
436	pgoff = folio->index;
437
438	/*
439	 * We have carefully arranged in arch_get_unmapped_area() that
440	 * *any* mappings of a file are always congruently mapped (whether
441	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
442	 * to flush one address here for them all to become coherent
443	 * on machines that support equivalent aliasing
444	 */
445	flush_dcache_mmap_lock_irqsave(mapping, flags);
446	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
447		unsigned long offset = pgoff - vma->vm_pgoff;
448		unsigned long pfn = folio_pfn(folio);
449
450		addr = vma->vm_start;
451		nr = folio_nr_pages(folio);
452		if (offset > -nr) {
453			pfn -= offset;
454			nr += offset;
455		} else {
456			addr += offset * PAGE_SIZE;
457		}
458		if (addr + nr * PAGE_SIZE > vma->vm_end)
459			nr = (vma->vm_end - addr) / PAGE_SIZE;
460
461		if (parisc_requires_coherency()) {
462			for (i = 0; i < nr; i++) {
463				pte_t *ptep = get_ptep(vma->vm_mm,
464							addr + i * PAGE_SIZE);
465				if (!ptep)
466					continue;
467				if (pte_needs_flush(*ptep))
468					flush_user_cache_page(vma,
469							addr + i * PAGE_SIZE);
470				/* Optimise accesses to the same table? */
471				pte_unmap(ptep);
472			}
473		} else {
474			/*
475			 * The TLB is the engine of coherence on parisc:
476			 * The CPU is entitled to speculate any page
477			 * with a TLB mapping, so here we kill the
478			 * mapping then flush the page along a special
479			 * flush only alias mapping. This guarantees that
480			 * the page is no-longer in the cache for any
481			 * process and nor may it be speculatively read
482			 * in (until the user or kernel specifically
483			 * accesses it, of course)
484			 */
485			for (i = 0; i < nr; i++)
486				flush_tlb_page(vma, addr + i * PAGE_SIZE);
487			if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
488					!= (addr & (SHM_COLOUR - 1))) {
489				for (i = 0; i < nr; i++)
490					__flush_cache_page(vma,
491						addr + i * PAGE_SIZE,
492						(pfn + i) * PAGE_SIZE);
493				/*
494				 * Software is allowed to have any number
495				 * of private mappings to a page.
496				 */
497				if (!(vma->vm_flags & VM_SHARED))
498					continue;
499				if (old_addr)
500					pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
501						old_addr, addr, vma->vm_file);
502				if (nr == folio_nr_pages(folio))
503					old_addr = addr;
504			}
505		}
506		WARN_ON(++count == 4096);
507	}
508	flush_dcache_mmap_unlock_irqrestore(mapping, flags);
509}
510EXPORT_SYMBOL(flush_dcache_folio);
511
512/* Defined in arch/parisc/kernel/pacache.S */
513EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
 
 
514EXPORT_SYMBOL(flush_kernel_icache_range_asm);
515
516#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
517static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
518
519#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
520static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
521
522void __init parisc_setup_cache_timing(void)
523{
524	unsigned long rangetime, alltime;
525	unsigned long size;
526	unsigned long threshold, threshold2;
527
528	alltime = mfctl(16);
529	flush_data_cache();
530	alltime = mfctl(16) - alltime;
531
532	size = (unsigned long)(_end - _text);
533	rangetime = mfctl(16);
534	flush_kernel_dcache_range((unsigned long)_text, size);
535	rangetime = mfctl(16) - rangetime;
536
537	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
538		alltime, size, rangetime);
539
540	threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
541	pr_info("Calculated flush threshold is %lu KiB\n",
542		threshold/1024);
543
544	/*
545	 * The threshold computed above isn't very reliable. The following
546	 * heuristic works reasonably well on c8000/rp3440.
547	 */
548	threshold2 = cache_info.dc_size * num_online_cpus();
549	parisc_cache_flush_threshold = threshold2;
550	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
551		parisc_cache_flush_threshold/1024);
552
553	/* calculate TLB flush threshold */
554
555	/* On SMP machines, skip the TLB measure of kernel text which
556	 * has been mapped as huge pages. */
557	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
558		threshold = max(cache_info.it_size, cache_info.dt_size);
559		threshold *= PAGE_SIZE;
560		threshold /= num_online_cpus();
561		goto set_tlb_threshold;
562	}
563
564	size = (unsigned long)_end - (unsigned long)_text;
 
565	rangetime = mfctl(16);
566	flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
 
 
 
 
567	rangetime = mfctl(16) - rangetime;
568
569	alltime = mfctl(16);
570	flush_tlb_all();
571	alltime = mfctl(16) - alltime;
572
573	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
574		alltime, size, rangetime);
575
576	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
577	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
578		threshold/1024);
579
580set_tlb_threshold:
581	if (threshold > FLUSH_TLB_THRESHOLD)
582		parisc_tlb_flush_threshold = threshold;
583	else
584		parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
585
586	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
587		parisc_tlb_flush_threshold/1024);
588}
589
590extern void purge_kernel_dcache_page_asm(unsigned long);
591extern void clear_user_page_asm(void *, unsigned long);
592extern void copy_user_page_asm(void *, void *, unsigned long);
593
594void flush_kernel_dcache_page_addr(const void *addr)
595{
596	unsigned long flags;
597
598	flush_kernel_dcache_page_asm(addr);
599	purge_tlb_start(flags);
600	pdtlb(SR_KERNEL, addr);
601	purge_tlb_end(flags);
602}
603EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
604
605static void flush_cache_page_if_present(struct vm_area_struct *vma,
606	unsigned long vmaddr, unsigned long pfn)
607{
608	bool needs_flush = false;
609	pte_t *ptep;
610
611	/*
612	 * The pte check is racy and sometimes the flush will trigger
613	 * a non-access TLB miss. Hopefully, the page has already been
614	 * flushed.
615	 */
616	ptep = get_ptep(vma->vm_mm, vmaddr);
617	if (ptep) {
618		needs_flush = pte_needs_flush(*ptep);
619		pte_unmap(ptep);
620	}
621	if (needs_flush)
622		flush_cache_page(vma, vmaddr, pfn);
623}
624
625void copy_user_highpage(struct page *to, struct page *from,
626	unsigned long vaddr, struct vm_area_struct *vma)
627{
628	void *kto, *kfrom;
629
630	kfrom = kmap_local_page(from);
631	kto = kmap_local_page(to);
632	flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
633	copy_page_asm(kto, kfrom);
634	kunmap_local(kto);
635	kunmap_local(kfrom);
636}
637
638void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
639		unsigned long user_vaddr, void *dst, void *src, int len)
640{
641	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
642	memcpy(dst, src, len);
643	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
644}
645
646void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
647		unsigned long user_vaddr, void *dst, void *src, int len)
648{
649	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
650	memcpy(dst, src, len);
651}
 
652
653/* __flush_tlb_range()
654 *
655 * returns 1 if all TLBs were flushed.
656 */
657int __flush_tlb_range(unsigned long sid, unsigned long start,
658		      unsigned long end)
659{
660	unsigned long flags;
661
662	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
663	    end - start >= parisc_tlb_flush_threshold) {
664		flush_tlb_all();
665		return 1;
666	}
667
668	/* Purge TLB entries for small ranges using the pdtlb and
669	   pitlb instructions.  These instructions execute locally
670	   but cause a purge request to be broadcast to other TLBs.  */
671	while (start < end) {
672		purge_tlb_start(flags);
673		mtsp(sid, SR_TEMP1);
674		pdtlb(SR_TEMP1, start);
675		pitlb(SR_TEMP1, start);
676		purge_tlb_end(flags);
677		start += PAGE_SIZE;
678	}
679	return 0;
680}
681
682static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
683{
684	unsigned long addr, pfn;
685	pte_t *ptep;
686
687	for (addr = start; addr < end; addr += PAGE_SIZE) {
688		bool needs_flush = false;
689		/*
690		 * The vma can contain pages that aren't present. Although
691		 * the pte search is expensive, we need the pte to find the
692		 * page pfn and to check whether the page should be flushed.
693		 */
694		ptep = get_ptep(vma->vm_mm, addr);
695		if (ptep) {
696			needs_flush = pte_needs_flush(*ptep);
697			pfn = pte_pfn(*ptep);
698			pte_unmap(ptep);
699		}
700		if (needs_flush) {
701			if (parisc_requires_coherency()) {
702				flush_user_cache_page(vma, addr);
703			} else {
704				if (WARN_ON(!pfn_valid(pfn)))
705					return;
706				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
707			}
708		}
709	}
710}
711
712static inline unsigned long mm_total_size(struct mm_struct *mm)
713{
714	struct vm_area_struct *vma;
715	unsigned long usize = 0;
716	VMA_ITERATOR(vmi, mm, 0);
717
718	for_each_vma(vmi, vma) {
719		if (usize >= parisc_cache_flush_threshold)
720			break;
721		usize += vma->vm_end - vma->vm_start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722	}
723	return usize;
724}
725
726void flush_cache_mm(struct mm_struct *mm)
727{
728	struct vm_area_struct *vma;
729	VMA_ITERATOR(vmi, mm, 0);
730
731	/*
732	 * Flushing the whole cache on each cpu takes forever on
733	 * rp3440, etc. So, avoid it if the mm isn't too big.
734	 *
735	 * Note that we must flush the entire cache on machines
736	 * with aliasing caches to prevent random segmentation
737	 * faults.
738	 */
739	if (!parisc_requires_coherency()
740	    ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
741		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
742			return;
743		flush_tlb_all();
744		flush_cache_all();
745		return;
746	}
747
748	/* Flush mm */
749	for_each_vma(vmi, vma)
750		flush_cache_pages(vma, vma->vm_start, vma->vm_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751}
752
753void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 
754{
755	if (!parisc_requires_coherency()
756	    || end - start >= parisc_cache_flush_threshold) {
757		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
758			return;
759		flush_tlb_range(vma, start, end);
 
 
760		flush_cache_all();
761		return;
762	}
763
764	flush_cache_pages(vma, start, end);
765}
 
 
 
 
 
766
767void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
768{
769	if (WARN_ON(!pfn_valid(pfn)))
770		return;
771	if (parisc_requires_coherency())
772		flush_user_cache_page(vma, vmaddr);
773	else
774		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 
 
 
 
 
 
 
 
775}
776
777void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 
778{
779	if (!PageAnon(page))
780		return;
781
782	if (parisc_requires_coherency()) {
783		if (vma->vm_flags & VM_SHARED)
784			flush_data_cache();
785		else
786			flush_user_cache_page(vma, vmaddr);
787		return;
788	}
789
790	flush_tlb_page(vma, vmaddr);
791	preempt_disable();
792	flush_dcache_page_asm(page_to_phys(page), vmaddr);
793	preempt_enable();
794}
795
796void flush_kernel_vmap_range(void *vaddr, int size)
797{
798	unsigned long start = (unsigned long)vaddr;
799	unsigned long end = start + size;
800
801	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
802	    (unsigned long)size >= parisc_cache_flush_threshold) {
803		flush_tlb_kernel_range(start, end);
804		flush_data_cache();
805		return;
806	}
807
808	flush_kernel_dcache_range_asm(start, end);
809	flush_tlb_kernel_range(start, end);
810}
811EXPORT_SYMBOL(flush_kernel_vmap_range);
812
813void invalidate_kernel_vmap_range(void *vaddr, int size)
814{
815	unsigned long start = (unsigned long)vaddr;
816	unsigned long end = start + size;
817
818	/* Ensure DMA is complete */
819	asm_syncdma();
820
821	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
822	    (unsigned long)size >= parisc_cache_flush_threshold) {
823		flush_tlb_kernel_range(start, end);
824		flush_data_cache();
825		return;
826	}
827
828	purge_kernel_dcache_range_asm(start, end);
829	flush_tlb_kernel_range(start, end);
830}
831EXPORT_SYMBOL(invalidate_kernel_vmap_range);
832
833
834SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
835	unsigned int, cache)
836{
837	unsigned long start, end;
838	ASM_EXCEPTIONTABLE_VAR(error);
839
840	if (bytes == 0)
841		return 0;
842	if (!access_ok((void __user *) addr, bytes))
843		return -EFAULT;
844
845	end = addr + bytes;
846
847	if (cache & DCACHE) {
848		start = addr;
849		__asm__ __volatile__ (
850#ifdef CONFIG_64BIT
851			"1: cmpb,*<<,n	%0,%2,1b\n"
852#else
853			"1: cmpb,<<,n	%0,%2,1b\n"
854#endif
855			"   fic,m	%3(%4,%0)\n"
856			"2: sync\n"
857			ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
858			: "+r" (start), "+r" (error)
859			: "r" (end), "r" (dcache_stride), "i" (SR_USER));
860	}
861
862	if (cache & ICACHE && error == 0) {
863		start = addr;
864		__asm__ __volatile__ (
865#ifdef CONFIG_64BIT
866			"1: cmpb,*<<,n	%0,%2,1b\n"
867#else
868			"1: cmpb,<<,n	%0,%2,1b\n"
869#endif
870			"   fdc,m	%3(%4,%0)\n"
871			"2: sync\n"
872			ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
873			: "+r" (start), "+r" (error)
874			: "r" (end), "r" (icache_stride), "i" (SR_USER));
875	}
876
877	return error;
878}