Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7 * Copyright (C) 1999 SuSE GmbH Nuernberg
  8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9 *
 10 * Cache and TLB management
 11 *
 12 */
 13 
 14#include <linux/init.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/pagemap.h>
 20#include <linux/sched.h>
 21#include <linux/sched/mm.h>
 22#include <asm/pdc.h>
 23#include <asm/cache.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 26#include <asm/page.h>
 27#include <asm/pgalloc.h>
 28#include <asm/processor.h>
 29#include <asm/sections.h>
 30#include <asm/shmparam.h>
 
 31
 32int split_tlb __ro_after_init;
 33int dcache_stride __ro_after_init;
 34int icache_stride __ro_after_init;
 35EXPORT_SYMBOL(dcache_stride);
 36
 37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 38EXPORT_SYMBOL(flush_dcache_page_asm);
 39void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 40void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 41
 
 
 
 42
 43/* On some machines (i.e., ones with the Merced bus), there can be
 44 * only a single PxTLB broadcast at a time; this must be guaranteed
 45 * by software. We need a spinlock around all TLB flushes to ensure
 46 * this.
 47 */
 48DEFINE_SPINLOCK(pa_tlb_flush_lock);
 49
 50/* Swapper page setup lock. */
 51DEFINE_SPINLOCK(pa_swapper_pg_lock);
 52
 53#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 54int pa_serialize_tlb_flushes __ro_after_init;
 55#endif
 56
 57struct pdc_cache_info cache_info __ro_after_init;
 58#ifndef CONFIG_PA20
 59static struct pdc_btlb_info btlb_info __ro_after_init;
 60#endif
 61
 62#ifdef CONFIG_SMP
 63void
 64flush_data_cache(void)
 
 
 65{
 66	on_each_cpu(flush_data_cache_local, NULL, 1);
 
 
 
 67}
 68void 
 69flush_instruction_cache(void)
 70{
 71	on_each_cpu(flush_instruction_cache_local, NULL, 1);
 72}
 73#endif
 74
 75void
 76flush_cache_all_local(void)
 77{
 78	flush_instruction_cache_local(NULL);
 79	flush_data_cache_local(NULL);
 80}
 81EXPORT_SYMBOL(flush_cache_all_local);
 82
 83/* Virtual address of pfn.  */
 
 
 
 
 
 
 
 84#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 85
 86void
 87update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 88{
 89	unsigned long pfn = pte_pfn(*ptep);
 90	struct page *page;
 91
 92	/* We don't have pte special.  As a result, we can be called with
 93	   an invalid pfn and we don't need to flush the kernel dcache page.
 94	   This occurs with FireGL card in C8000.  */
 95	if (!pfn_valid(pfn))
 96		return;
 97
 98	page = pfn_to_page(pfn);
 99	if (page_mapping_file(page) &&
100	    test_bit(PG_dcache_dirty, &page->flags)) {
101		flush_kernel_dcache_page_addr(pfn_va(pfn));
102		clear_bit(PG_dcache_dirty, &page->flags);
103	} else if (parisc_requires_coherency())
104		flush_kernel_dcache_page_addr(pfn_va(pfn));
105}
106
107void
108show_cache_info(struct seq_file *m)
109{
110	char buf[32];
111
112	seq_printf(m, "I-cache\t\t: %ld KB\n", 
113		cache_info.ic_size/1024 );
114	if (cache_info.dc_loop != 1)
115		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
116	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
117		cache_info.dc_size/1024,
118		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
119		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
120		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
 
 
121	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
122		cache_info.it_size,
123		cache_info.dt_size,
124		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
125	);
126		
127#ifndef CONFIG_PA20
128	/* BTLB - Block TLB */
129	if (btlb_info.max_size==0) {
130		seq_printf(m, "BTLB\t\t: not supported\n" );
131	} else {
132		seq_printf(m, 
133		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
134		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
135		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
136		btlb_info.max_size, (int)4096,
137		btlb_info.max_size>>8,
138		btlb_info.fixed_range_info.num_i,
139		btlb_info.fixed_range_info.num_d,
140		btlb_info.fixed_range_info.num_comb, 
141		btlb_info.variable_range_info.num_i,
142		btlb_info.variable_range_info.num_d,
143		btlb_info.variable_range_info.num_comb
144		);
145	}
146#endif
147}
148
149void __init 
150parisc_cache_init(void)
151{
152	if (pdc_cache_info(&cache_info) < 0)
153		panic("parisc_cache_init: pdc_cache_info failed");
154
155#if 0
156	printk("ic_size %lx dc_size %lx it_size %lx\n",
157		cache_info.ic_size,
158		cache_info.dc_size,
159		cache_info.it_size);
160
161	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162		cache_info.dc_base,
163		cache_info.dc_stride,
164		cache_info.dc_count,
165		cache_info.dc_loop);
166
167	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
168		*(unsigned long *) (&cache_info.dc_conf),
169		cache_info.dc_conf.cc_alias,
170		cache_info.dc_conf.cc_block,
171		cache_info.dc_conf.cc_line,
172		cache_info.dc_conf.cc_shift);
173	printk("	wt %d sh %d cst %d hv %d\n",
174		cache_info.dc_conf.cc_wt,
175		cache_info.dc_conf.cc_sh,
176		cache_info.dc_conf.cc_cst,
177		cache_info.dc_conf.cc_hv);
178
179	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180		cache_info.ic_base,
181		cache_info.ic_stride,
182		cache_info.ic_count,
183		cache_info.ic_loop);
184
185	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
186		cache_info.it_sp_base,
187		cache_info.it_sp_stride,
188		cache_info.it_sp_count,
189		cache_info.it_loop,
190		cache_info.it_off_base,
191		cache_info.it_off_stride,
192		cache_info.it_off_count);
193
194	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
195		cache_info.dt_sp_base,
196		cache_info.dt_sp_stride,
197		cache_info.dt_sp_count,
198		cache_info.dt_loop,
199		cache_info.dt_off_base,
200		cache_info.dt_off_stride,
201		cache_info.dt_off_count);
202
203	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
204		*(unsigned long *) (&cache_info.ic_conf),
205		cache_info.ic_conf.cc_alias,
206		cache_info.ic_conf.cc_block,
207		cache_info.ic_conf.cc_line,
208		cache_info.ic_conf.cc_shift);
209	printk("	wt %d sh %d cst %d hv %d\n",
210		cache_info.ic_conf.cc_wt,
211		cache_info.ic_conf.cc_sh,
212		cache_info.ic_conf.cc_cst,
213		cache_info.ic_conf.cc_hv);
214
215	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
216		cache_info.dt_conf.tc_sh,
217		cache_info.dt_conf.tc_page,
218		cache_info.dt_conf.tc_cst,
219		cache_info.dt_conf.tc_aid,
220		cache_info.dt_conf.tc_sr);
221
222	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
223		cache_info.it_conf.tc_sh,
224		cache_info.it_conf.tc_page,
225		cache_info.it_conf.tc_cst,
226		cache_info.it_conf.tc_aid,
227		cache_info.it_conf.tc_sr);
228#endif
229
230	split_tlb = 0;
231	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
232		if (cache_info.dt_conf.tc_sh == 2)
233			printk(KERN_WARNING "Unexpected TLB configuration. "
234			"Will flush I/D separately (could be optimized).\n");
235
236		split_tlb = 1;
237	}
238
239	/* "New and Improved" version from Jim Hull 
240	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
241	 * The following CAFL_STRIDE is an optimized version, see
242	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
243	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
244	 */
245#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
246	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
247	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
248#undef CAFL_STRIDE
249
250#ifndef CONFIG_PA20
251	if (pdc_btlb_info(&btlb_info) < 0) {
252		memset(&btlb_info, 0, sizeof btlb_info);
253	}
254#endif
255
256	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
257						PDC_MODEL_NVA_UNSUPPORTED) {
258		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
259#if 0
260		panic("SMP kernel required to avoid non-equivalent aliasing");
261#endif
262	}
263}
264
265void __init disable_sr_hashing(void)
266{
267	int srhash_type, retval;
268	unsigned long space_bits;
269
270	switch (boot_cpu_data.cpu_type) {
271	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
272		BUG();
273		return;
274
275	case pcxs:
276	case pcxt:
277	case pcxt_:
278		srhash_type = SRHASH_PCXST;
279		break;
280
281	case pcxl:
282		srhash_type = SRHASH_PCXL;
283		break;
284
285	case pcxl2: /* pcxl2 doesn't support space register hashing */
286		return;
287
288	default: /* Currently all PA2.0 machines use the same ins. sequence */
289		srhash_type = SRHASH_PA20;
290		break;
291	}
292
293	disable_sr_hashing_asm(srhash_type);
294
295	retval = pdc_spaceid_bits(&space_bits);
296	/* If this procedure isn't implemented, don't panic. */
297	if (retval < 0 && retval != PDC_BAD_OPTION)
298		panic("pdc_spaceid_bits call failed.\n");
299	if (space_bits != 0)
300		panic("SpaceID hashing is still on!\n");
301}
302
303static inline void
304__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
305		   unsigned long physaddr)
306{
 
 
307	preempt_disable();
308	flush_dcache_page_asm(physaddr, vmaddr);
309	if (vma->vm_flags & VM_EXEC)
310		flush_icache_page_asm(physaddr, vmaddr);
311	preempt_enable();
312}
313
314static inline void
315__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316		   unsigned long physaddr)
317{
 
 
 
 
 
 
 
318	preempt_disable();
319	purge_dcache_page_asm(physaddr, vmaddr);
 
 
 
 
 
 
 
 
 
 
 
 
320	if (vma->vm_flags & VM_EXEC)
321		flush_icache_page_asm(physaddr, vmaddr);
 
 
 
 
 
 
 
 
 
 
 
 
322	preempt_enable();
323}
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325void flush_dcache_page(struct page *page)
326{
327	struct address_space *mapping = page_mapping_file(page);
328	struct vm_area_struct *mpnt;
329	unsigned long offset;
330	unsigned long addr, old_addr = 0;
 
331	pgoff_t pgoff;
332
333	if (mapping && !mapping_mapped(mapping)) {
334		set_bit(PG_dcache_dirty, &page->flags);
335		return;
336	}
337
338	flush_kernel_dcache_page(page);
339
340	if (!mapping)
341		return;
342
343	pgoff = page->index;
344
345	/* We have carefully arranged in arch_get_unmapped_area() that
 
346	 * *any* mappings of a file are always congruently mapped (whether
347	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
348	 * to flush one address here for them all to become coherent */
349
 
350	flush_dcache_mmap_lock(mapping);
351	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
352		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
353		addr = mpnt->vm_start + offset;
 
 
354
355		/* The TLB is the engine of coherence on parisc: The
356		 * CPU is entitled to speculate any page with a TLB
357		 * mapping, so here we kill the mapping then flush the
358		 * page along a special flush only alias mapping.
359		 * This guarantees that the page is no-longer in the
360		 * cache for any process and nor may it be
361		 * speculatively read in (until the user or kernel
362		 * specifically accesses it, of course) */
363
364		flush_tlb_page(mpnt, addr);
365		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
366				      != (addr & (SHM_COLOUR - 1))) {
367			__flush_cache_page(mpnt, addr, page_to_phys(page));
368			if (old_addr)
369				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
370			old_addr = addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371		}
 
372	}
373	flush_dcache_mmap_unlock(mapping);
374}
375EXPORT_SYMBOL(flush_dcache_page);
376
377/* Defined in arch/parisc/kernel/pacache.S */
378EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
379EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
380EXPORT_SYMBOL(flush_data_cache_local);
381EXPORT_SYMBOL(flush_kernel_icache_range_asm);
382
383#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
384static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
385
386#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
387static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
388
389void __init parisc_setup_cache_timing(void)
390{
391	unsigned long rangetime, alltime;
392	unsigned long size, start;
393	unsigned long threshold;
394
395	alltime = mfctl(16);
396	flush_data_cache();
397	alltime = mfctl(16) - alltime;
398
399	size = (unsigned long)(_end - _text);
400	rangetime = mfctl(16);
401	flush_kernel_dcache_range((unsigned long)_text, size);
402	rangetime = mfctl(16) - rangetime;
403
404	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
405		alltime, size, rangetime);
406
407	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
408	if (threshold > cache_info.dc_size)
409		threshold = cache_info.dc_size;
410	if (threshold)
411		parisc_cache_flush_threshold = threshold;
 
 
 
 
 
412	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
413		parisc_cache_flush_threshold/1024);
414
415	/* calculate TLB flush threshold */
416
417	/* On SMP machines, skip the TLB measure of kernel text which
418	 * has been mapped as huge pages. */
419	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
420		threshold = max(cache_info.it_size, cache_info.dt_size);
421		threshold *= PAGE_SIZE;
422		threshold /= num_online_cpus();
423		goto set_tlb_threshold;
424	}
425
426	size = 0;
427	start = (unsigned long) _text;
428	rangetime = mfctl(16);
429	while (start < (unsigned long) _end) {
430		flush_tlb_kernel_range(start, start + PAGE_SIZE);
431		start += PAGE_SIZE;
432		size += PAGE_SIZE;
433	}
434	rangetime = mfctl(16) - rangetime;
435
436	alltime = mfctl(16);
437	flush_tlb_all();
438	alltime = mfctl(16) - alltime;
439
440	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
441		alltime, size, rangetime);
442
443	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
444	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
445		threshold/1024);
446
447set_tlb_threshold:
448	if (threshold > parisc_tlb_flush_threshold)
449		parisc_tlb_flush_threshold = threshold;
 
 
 
450	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
451		parisc_tlb_flush_threshold/1024);
452}
453
454extern void purge_kernel_dcache_page_asm(unsigned long);
455extern void clear_user_page_asm(void *, unsigned long);
456extern void copy_user_page_asm(void *, void *, unsigned long);
457
458void flush_kernel_dcache_page_addr(void *addr)
459{
460	unsigned long flags;
461
462	flush_kernel_dcache_page_asm(addr);
463	purge_tlb_start(flags);
464	pdtlb_kernel(addr);
465	purge_tlb_end(flags);
466}
467EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
468
469void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
470	struct page *pg)
471{
472       /* Copy using kernel mapping.  No coherency is needed (all in
473	  kunmap) for the `to' page.  However, the `from' page needs to
474	  be flushed through a mapping equivalent to the user mapping
475	  before it can be accessed through the kernel mapping. */
476	preempt_disable();
477	flush_dcache_page_asm(__pa(vfrom), vaddr);
478	copy_page_asm(vto, vfrom);
479	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480}
481EXPORT_SYMBOL(copy_user_page);
482
483/* __flush_tlb_range()
484 *
485 * returns 1 if all TLBs were flushed.
486 */
487int __flush_tlb_range(unsigned long sid, unsigned long start,
488		      unsigned long end)
489{
490	unsigned long flags;
491
492	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
493	    end - start >= parisc_tlb_flush_threshold) {
494		flush_tlb_all();
495		return 1;
496	}
497
498	/* Purge TLB entries for small ranges using the pdtlb and
499	   pitlb instructions.  These instructions execute locally
500	   but cause a purge request to be broadcast to other TLBs.  */
501	while (start < end) {
502		purge_tlb_start(flags);
503		mtsp(sid, 1);
504		pdtlb(start);
505		pitlb(start);
506		purge_tlb_end(flags);
507		start += PAGE_SIZE;
508	}
509	return 0;
510}
511
512static void cacheflush_h_tmp_function(void *dummy)
513{
514	flush_cache_all_local();
515}
516
517void flush_cache_all(void)
518{
519	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520}
521
522static inline unsigned long mm_total_size(struct mm_struct *mm)
523{
524	struct vm_area_struct *vma;
525	unsigned long usize = 0;
 
526
527	for (vma = mm->mmap; vma; vma = vma->vm_next)
 
 
528		usize += vma->vm_end - vma->vm_start;
529	return usize;
530}
531
532static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
533{
534	pte_t *ptep = NULL;
535
536	if (!pgd_none(*pgd)) {
537		pud_t *pud = pud_offset(pgd, addr);
538		if (!pud_none(*pud)) {
539			pmd_t *pmd = pmd_offset(pud, addr);
540			if (!pmd_none(*pmd))
541				ptep = pte_offset_map(pmd, addr);
542		}
543	}
544	return ptep;
545}
546
547void flush_cache_mm(struct mm_struct *mm)
548{
549	struct vm_area_struct *vma;
550	pgd_t *pgd;
551
552	/* Flushing the whole cache on each cpu takes forever on
553	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
554	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
555	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
556		if (mm->context)
557			flush_tlb_all();
 
 
 
 
 
 
 
558		flush_cache_all();
559		return;
560	}
561
562	if (mm->context == mfsp(3)) {
563		for (vma = mm->mmap; vma; vma = vma->vm_next) {
564			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
565			if (vma->vm_flags & VM_EXEC)
566				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
567			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
568		}
569		return;
570	}
571
572	pgd = mm->pgd;
573	for (vma = mm->mmap; vma; vma = vma->vm_next) {
574		unsigned long addr;
575
576		for (addr = vma->vm_start; addr < vma->vm_end;
577		     addr += PAGE_SIZE) {
578			unsigned long pfn;
579			pte_t *ptep = get_ptep(pgd, addr);
580			if (!ptep)
581				continue;
582			pfn = pte_pfn(*ptep);
583			if (!pfn_valid(pfn))
584				continue;
585			if (unlikely(mm->context)) {
586				flush_tlb_page(vma, addr);
587				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
588			} else {
589				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
590			}
591		}
592	}
593}
594
595void flush_cache_range(struct vm_area_struct *vma,
596		unsigned long start, unsigned long end)
597{
598	pgd_t *pgd;
599	unsigned long addr;
600
601	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
602	    end - start >= parisc_cache_flush_threshold) {
603		if (vma->vm_mm->context)
604			flush_tlb_range(vma, start, end);
605		flush_cache_all();
606		return;
607	}
608
609	if (vma->vm_mm->context == mfsp(3)) {
610		flush_user_dcache_range_asm(start, end);
611		if (vma->vm_flags & VM_EXEC)
612			flush_user_icache_range_asm(start, end);
613		flush_tlb_range(vma, start, end);
614		return;
615	}
616
617	pgd = vma->vm_mm->pgd;
618	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
619		unsigned long pfn;
620		pte_t *ptep = get_ptep(pgd, addr);
621		if (!ptep)
622			continue;
623		pfn = pte_pfn(*ptep);
624		if (pfn_valid(pfn)) {
625			if (unlikely(vma->vm_mm->context)) {
626				flush_tlb_page(vma, addr);
627				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
628			} else {
629				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
630			}
631		}
632	}
633}
634
635void
636flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
637{
638	if (pfn_valid(pfn)) {
639		if (likely(vma->vm_mm->context)) {
640			flush_tlb_page(vma, vmaddr);
641			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
642		} else {
643			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
644		}
 
 
645	}
 
 
 
 
 
646}
647
648void flush_kernel_vmap_range(void *vaddr, int size)
649{
650	unsigned long start = (unsigned long)vaddr;
651	unsigned long end = start + size;
652
653	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
654	    (unsigned long)size >= parisc_cache_flush_threshold) {
655		flush_tlb_kernel_range(start, end);
656		flush_data_cache();
657		return;
658	}
659
660	flush_kernel_dcache_range_asm(start, end);
661	flush_tlb_kernel_range(start, end);
662}
663EXPORT_SYMBOL(flush_kernel_vmap_range);
664
665void invalidate_kernel_vmap_range(void *vaddr, int size)
666{
667	unsigned long start = (unsigned long)vaddr;
668	unsigned long end = start + size;
 
 
 
669
670	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
671	    (unsigned long)size >= parisc_cache_flush_threshold) {
672		flush_tlb_kernel_range(start, end);
673		flush_data_cache();
674		return;
675	}
676
677	purge_kernel_dcache_range_asm(start, end);
678	flush_tlb_kernel_range(start, end);
679}
680EXPORT_SYMBOL(invalidate_kernel_vmap_range);
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7 * Copyright (C) 1999 SuSE GmbH Nuernberg
  8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9 *
 10 * Cache and TLB management
 11 *
 12 */
 13 
 14#include <linux/init.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/pagemap.h>
 20#include <linux/sched.h>
 21#include <linux/sched/mm.h>
 22#include <asm/pdc.h>
 23#include <asm/cache.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 26#include <asm/page.h>
 
 27#include <asm/processor.h>
 28#include <asm/sections.h>
 29#include <asm/shmparam.h>
 30#include <asm/mmu_context.h>
 31
 32int split_tlb __ro_after_init;
 33int dcache_stride __ro_after_init;
 34int icache_stride __ro_after_init;
 35EXPORT_SYMBOL(dcache_stride);
 36
 37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 38EXPORT_SYMBOL(flush_dcache_page_asm);
 39void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 40void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 41
 42/* Internal implementation in arch/parisc/kernel/pacache.S */
 43void flush_data_cache_local(void *);  /* flushes local data-cache only */
 44void flush_instruction_cache_local(void); /* flushes local code-cache only */
 45
 46/* On some machines (i.e., ones with the Merced bus), there can be
 47 * only a single PxTLB broadcast at a time; this must be guaranteed
 48 * by software. We need a spinlock around all TLB flushes to ensure
 49 * this.
 50 */
 51DEFINE_SPINLOCK(pa_tlb_flush_lock);
 52
 
 
 
 53#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 54int pa_serialize_tlb_flushes __ro_after_init;
 55#endif
 56
 57struct pdc_cache_info cache_info __ro_after_init;
 58#ifndef CONFIG_PA20
 59static struct pdc_btlb_info btlb_info __ro_after_init;
 60#endif
 61
 62DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
 63DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
 64DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
 65
 66static void cache_flush_local_cpu(void *dummy)
 67{
 68	if (static_branch_likely(&parisc_has_icache))
 69		flush_instruction_cache_local();
 70	if (static_branch_likely(&parisc_has_dcache))
 71		flush_data_cache_local(NULL);
 72}
 73
 74void flush_cache_all_local(void)
 75{
 76	cache_flush_local_cpu(NULL);
 77}
 
 78
 79void flush_cache_all(void)
 
 80{
 81	if (static_branch_likely(&parisc_has_cache))
 82		on_each_cpu(cache_flush_local_cpu, NULL, 1);
 83}
 
 84
 85static inline void flush_data_cache(void)
 86{
 87	if (static_branch_likely(&parisc_has_dcache))
 88		on_each_cpu(flush_data_cache_local, NULL, 1);
 89}
 90
 91
 92/* Kernel virtual address of pfn.  */
 93#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 94
 95void
 96__update_cache(pte_t pte)
 97{
 98	unsigned long pfn = pte_pfn(pte);
 99	struct page *page;
100
101	/* We don't have pte special.  As a result, we can be called with
102	   an invalid pfn and we don't need to flush the kernel dcache page.
103	   This occurs with FireGL card in C8000.  */
104	if (!pfn_valid(pfn))
105		return;
106
107	page = pfn_to_page(pfn);
108	if (page_mapping_file(page) &&
109	    test_bit(PG_dcache_dirty, &page->flags)) {
110		flush_kernel_dcache_page_addr(pfn_va(pfn));
111		clear_bit(PG_dcache_dirty, &page->flags);
112	} else if (parisc_requires_coherency())
113		flush_kernel_dcache_page_addr(pfn_va(pfn));
114}
115
116void
117show_cache_info(struct seq_file *m)
118{
119	char buf[32];
120
121	seq_printf(m, "I-cache\t\t: %ld KB\n", 
122		cache_info.ic_size/1024 );
123	if (cache_info.dc_loop != 1)
124		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
125	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
126		cache_info.dc_size/1024,
127		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
128		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
129		((cache_info.dc_loop == 1) ? "direct mapped" : buf),
130		cache_info.dc_conf.cc_alias
131	);
132	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
133		cache_info.it_size,
134		cache_info.dt_size,
135		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
136	);
137		
138#ifndef CONFIG_PA20
139	/* BTLB - Block TLB */
140	if (btlb_info.max_size==0) {
141		seq_printf(m, "BTLB\t\t: not supported\n" );
142	} else {
143		seq_printf(m, 
144		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
145		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
146		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
147		btlb_info.max_size, (int)4096,
148		btlb_info.max_size>>8,
149		btlb_info.fixed_range_info.num_i,
150		btlb_info.fixed_range_info.num_d,
151		btlb_info.fixed_range_info.num_comb, 
152		btlb_info.variable_range_info.num_i,
153		btlb_info.variable_range_info.num_d,
154		btlb_info.variable_range_info.num_comb
155		);
156	}
157#endif
158}
159
160void __init 
161parisc_cache_init(void)
162{
163	if (pdc_cache_info(&cache_info) < 0)
164		panic("parisc_cache_init: pdc_cache_info failed");
165
166#if 0
167	printk("ic_size %lx dc_size %lx it_size %lx\n",
168		cache_info.ic_size,
169		cache_info.dc_size,
170		cache_info.it_size);
171
172	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
173		cache_info.dc_base,
174		cache_info.dc_stride,
175		cache_info.dc_count,
176		cache_info.dc_loop);
177
178	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
179		*(unsigned long *) (&cache_info.dc_conf),
180		cache_info.dc_conf.cc_alias,
181		cache_info.dc_conf.cc_block,
182		cache_info.dc_conf.cc_line,
183		cache_info.dc_conf.cc_shift);
184	printk("	wt %d sh %d cst %d hv %d\n",
185		cache_info.dc_conf.cc_wt,
186		cache_info.dc_conf.cc_sh,
187		cache_info.dc_conf.cc_cst,
188		cache_info.dc_conf.cc_hv);
189
190	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
191		cache_info.ic_base,
192		cache_info.ic_stride,
193		cache_info.ic_count,
194		cache_info.ic_loop);
195
196	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
197		cache_info.it_sp_base,
198		cache_info.it_sp_stride,
199		cache_info.it_sp_count,
200		cache_info.it_loop,
201		cache_info.it_off_base,
202		cache_info.it_off_stride,
203		cache_info.it_off_count);
204
205	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
206		cache_info.dt_sp_base,
207		cache_info.dt_sp_stride,
208		cache_info.dt_sp_count,
209		cache_info.dt_loop,
210		cache_info.dt_off_base,
211		cache_info.dt_off_stride,
212		cache_info.dt_off_count);
213
214	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
215		*(unsigned long *) (&cache_info.ic_conf),
216		cache_info.ic_conf.cc_alias,
217		cache_info.ic_conf.cc_block,
218		cache_info.ic_conf.cc_line,
219		cache_info.ic_conf.cc_shift);
220	printk("	wt %d sh %d cst %d hv %d\n",
221		cache_info.ic_conf.cc_wt,
222		cache_info.ic_conf.cc_sh,
223		cache_info.ic_conf.cc_cst,
224		cache_info.ic_conf.cc_hv);
225
226	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
227		cache_info.dt_conf.tc_sh,
228		cache_info.dt_conf.tc_page,
229		cache_info.dt_conf.tc_cst,
230		cache_info.dt_conf.tc_aid,
231		cache_info.dt_conf.tc_sr);
232
233	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
234		cache_info.it_conf.tc_sh,
235		cache_info.it_conf.tc_page,
236		cache_info.it_conf.tc_cst,
237		cache_info.it_conf.tc_aid,
238		cache_info.it_conf.tc_sr);
239#endif
240
241	split_tlb = 0;
242	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
243		if (cache_info.dt_conf.tc_sh == 2)
244			printk(KERN_WARNING "Unexpected TLB configuration. "
245			"Will flush I/D separately (could be optimized).\n");
246
247		split_tlb = 1;
248	}
249
250	/* "New and Improved" version from Jim Hull 
251	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
252	 * The following CAFL_STRIDE is an optimized version, see
253	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
254	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
255	 */
256#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
257	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
258	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
259#undef CAFL_STRIDE
260
261#ifndef CONFIG_PA20
262	if (pdc_btlb_info(&btlb_info) < 0) {
263		memset(&btlb_info, 0, sizeof btlb_info);
264	}
265#endif
266
267	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
268						PDC_MODEL_NVA_UNSUPPORTED) {
269		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
270#if 0
271		panic("SMP kernel required to avoid non-equivalent aliasing");
272#endif
273	}
274}
275
276void disable_sr_hashing(void)
277{
278	int srhash_type, retval;
279	unsigned long space_bits;
280
281	switch (boot_cpu_data.cpu_type) {
282	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
283		BUG();
284		return;
285
286	case pcxs:
287	case pcxt:
288	case pcxt_:
289		srhash_type = SRHASH_PCXST;
290		break;
291
292	case pcxl:
293		srhash_type = SRHASH_PCXL;
294		break;
295
296	case pcxl2: /* pcxl2 doesn't support space register hashing */
297		return;
298
299	default: /* Currently all PA2.0 machines use the same ins. sequence */
300		srhash_type = SRHASH_PA20;
301		break;
302	}
303
304	disable_sr_hashing_asm(srhash_type);
305
306	retval = pdc_spaceid_bits(&space_bits);
307	/* If this procedure isn't implemented, don't panic. */
308	if (retval < 0 && retval != PDC_BAD_OPTION)
309		panic("pdc_spaceid_bits call failed.\n");
310	if (space_bits != 0)
311		panic("SpaceID hashing is still on!\n");
312}
313
314static inline void
315__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316		   unsigned long physaddr)
317{
318	if (!static_branch_likely(&parisc_has_cache))
319		return;
320	preempt_disable();
321	flush_dcache_page_asm(physaddr, vmaddr);
322	if (vma->vm_flags & VM_EXEC)
323		flush_icache_page_asm(physaddr, vmaddr);
324	preempt_enable();
325}
326
327static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
 
 
328{
329	unsigned long flags, space, pgd, prot;
330#ifdef CONFIG_TLB_PTLOCK
331	unsigned long pgd_lock;
332#endif
333
334	vmaddr &= PAGE_MASK;
335
336	preempt_disable();
337
338	/* Set context for flush */
339	local_irq_save(flags);
340	prot = mfctl(8);
341	space = mfsp(SR_USER);
342	pgd = mfctl(25);
343#ifdef CONFIG_TLB_PTLOCK
344	pgd_lock = mfctl(28);
345#endif
346	switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
347	local_irq_restore(flags);
348
349	flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
350	if (vma->vm_flags & VM_EXEC)
351		flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
352	flush_tlb_page(vma, vmaddr);
353
354	/* Restore previous context */
355	local_irq_save(flags);
356#ifdef CONFIG_TLB_PTLOCK
357	mtctl(pgd_lock, 28);
358#endif
359	mtctl(pgd, 25);
360	mtsp(space, SR_USER);
361	mtctl(prot, 8);
362	local_irq_restore(flags);
363
364	preempt_enable();
365}
366
367static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
368{
369	pte_t *ptep = NULL;
370	pgd_t *pgd = mm->pgd;
371	p4d_t *p4d;
372	pud_t *pud;
373	pmd_t *pmd;
374
375	if (!pgd_none(*pgd)) {
376		p4d = p4d_offset(pgd, addr);
377		if (!p4d_none(*p4d)) {
378			pud = pud_offset(p4d, addr);
379			if (!pud_none(*pud)) {
380				pmd = pmd_offset(pud, addr);
381				if (!pmd_none(*pmd))
382					ptep = pte_offset_map(pmd, addr);
383			}
384		}
385	}
386	return ptep;
387}
388
389static inline bool pte_needs_flush(pte_t pte)
390{
391	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
392		== (_PAGE_PRESENT | _PAGE_ACCESSED);
393}
394
395void flush_dcache_page(struct page *page)
396{
397	struct address_space *mapping = page_mapping_file(page);
398	struct vm_area_struct *mpnt;
399	unsigned long offset;
400	unsigned long addr, old_addr = 0;
401	unsigned long count = 0;
402	pgoff_t pgoff;
403
404	if (mapping && !mapping_mapped(mapping)) {
405		set_bit(PG_dcache_dirty, &page->flags);
406		return;
407	}
408
409	flush_kernel_dcache_page_addr(page_address(page));
410
411	if (!mapping)
412		return;
413
414	pgoff = page->index;
415
416	/*
417	 * We have carefully arranged in arch_get_unmapped_area() that
418	 * *any* mappings of a file are always congruently mapped (whether
419	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
420	 * to flush one address here for them all to become coherent
421	 * on machines that support equivalent aliasing
422	 */
423	flush_dcache_mmap_lock(mapping);
424	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
425		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
426		addr = mpnt->vm_start + offset;
427		if (parisc_requires_coherency()) {
428			pte_t *ptep;
429
430			ptep = get_ptep(mpnt->vm_mm, addr);
431			if (ptep && pte_needs_flush(*ptep))
432				flush_user_cache_page(mpnt, addr);
433		} else {
434			/*
435			 * The TLB is the engine of coherence on parisc:
436			 * The CPU is entitled to speculate any page
437			 * with a TLB mapping, so here we kill the
438			 * mapping then flush the page along a special
439			 * flush only alias mapping. This guarantees that
440			 * the page is no-longer in the cache for any
441			 * process and nor may it be speculatively read
442			 * in (until the user or kernel specifically
443			 * accesses it, of course)
444			 */
445			flush_tlb_page(mpnt, addr);
446			if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
447					!= (addr & (SHM_COLOUR - 1))) {
448				__flush_cache_page(mpnt, addr, page_to_phys(page));
449				/*
450				 * Software is allowed to have any number
451				 * of private mappings to a page.
452				 */
453				if (!(mpnt->vm_flags & VM_SHARED))
454					continue;
455				if (old_addr)
456					pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
457						old_addr, addr, mpnt->vm_file);
458				old_addr = addr;
459			}
460		}
461		WARN_ON(++count == 4096);
462	}
463	flush_dcache_mmap_unlock(mapping);
464}
465EXPORT_SYMBOL(flush_dcache_page);
466
467/* Defined in arch/parisc/kernel/pacache.S */
468EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
 
 
469EXPORT_SYMBOL(flush_kernel_icache_range_asm);
470
471#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
472static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
473
474#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
475static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
476
477void __init parisc_setup_cache_timing(void)
478{
479	unsigned long rangetime, alltime;
480	unsigned long size;
481	unsigned long threshold, threshold2;
482
483	alltime = mfctl(16);
484	flush_data_cache();
485	alltime = mfctl(16) - alltime;
486
487	size = (unsigned long)(_end - _text);
488	rangetime = mfctl(16);
489	flush_kernel_dcache_range((unsigned long)_text, size);
490	rangetime = mfctl(16) - rangetime;
491
492	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
493		alltime, size, rangetime);
494
495	threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
496	pr_info("Calculated flush threshold is %lu KiB\n",
497		threshold/1024);
498
499	/*
500	 * The threshold computed above isn't very reliable. The following
501	 * heuristic works reasonably well on c8000/rp3440.
502	 */
503	threshold2 = cache_info.dc_size * num_online_cpus();
504	parisc_cache_flush_threshold = threshold2;
505	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
506		parisc_cache_flush_threshold/1024);
507
508	/* calculate TLB flush threshold */
509
510	/* On SMP machines, skip the TLB measure of kernel text which
511	 * has been mapped as huge pages. */
512	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
513		threshold = max(cache_info.it_size, cache_info.dt_size);
514		threshold *= PAGE_SIZE;
515		threshold /= num_online_cpus();
516		goto set_tlb_threshold;
517	}
518
519	size = (unsigned long)_end - (unsigned long)_text;
 
520	rangetime = mfctl(16);
521	flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
 
 
 
 
522	rangetime = mfctl(16) - rangetime;
523
524	alltime = mfctl(16);
525	flush_tlb_all();
526	alltime = mfctl(16) - alltime;
527
528	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
529		alltime, size, rangetime);
530
531	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
532	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
533		threshold/1024);
534
535set_tlb_threshold:
536	if (threshold > FLUSH_TLB_THRESHOLD)
537		parisc_tlb_flush_threshold = threshold;
538	else
539		parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
540
541	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
542		parisc_tlb_flush_threshold/1024);
543}
544
545extern void purge_kernel_dcache_page_asm(unsigned long);
546extern void clear_user_page_asm(void *, unsigned long);
547extern void copy_user_page_asm(void *, void *, unsigned long);
548
549void flush_kernel_dcache_page_addr(const void *addr)
550{
551	unsigned long flags;
552
553	flush_kernel_dcache_page_asm(addr);
554	purge_tlb_start(flags);
555	pdtlb(SR_KERNEL, addr);
556	purge_tlb_end(flags);
557}
558EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
559
560static void flush_cache_page_if_present(struct vm_area_struct *vma,
561	unsigned long vmaddr, unsigned long pfn)
562{
563	pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
564
565	/*
566	 * The pte check is racy and sometimes the flush will trigger
567	 * a non-access TLB miss. Hopefully, the page has already been
568	 * flushed.
569	 */
570	if (ptep && pte_needs_flush(*ptep))
571		flush_cache_page(vma, vmaddr, pfn);
572}
573
574void copy_user_highpage(struct page *to, struct page *from,
575	unsigned long vaddr, struct vm_area_struct *vma)
576{
577	void *kto, *kfrom;
578
579	kfrom = kmap_local_page(from);
580	kto = kmap_local_page(to);
581	flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
582	copy_page_asm(kto, kfrom);
583	kunmap_local(kto);
584	kunmap_local(kfrom);
585}
586
587void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
588		unsigned long user_vaddr, void *dst, void *src, int len)
589{
590	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
591	memcpy(dst, src, len);
592	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
593}
594
595void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
596		unsigned long user_vaddr, void *dst, void *src, int len)
597{
598	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
599	memcpy(dst, src, len);
600}
 
601
602/* __flush_tlb_range()
603 *
604 * returns 1 if all TLBs were flushed.
605 */
606int __flush_tlb_range(unsigned long sid, unsigned long start,
607		      unsigned long end)
608{
609	unsigned long flags;
610
611	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
612	    end - start >= parisc_tlb_flush_threshold) {
613		flush_tlb_all();
614		return 1;
615	}
616
617	/* Purge TLB entries for small ranges using the pdtlb and
618	   pitlb instructions.  These instructions execute locally
619	   but cause a purge request to be broadcast to other TLBs.  */
620	while (start < end) {
621		purge_tlb_start(flags);
622		mtsp(sid, SR_TEMP1);
623		pdtlb(SR_TEMP1, start);
624		pitlb(SR_TEMP1, start);
625		purge_tlb_end(flags);
626		start += PAGE_SIZE;
627	}
628	return 0;
629}
630
631static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
632{
633	unsigned long addr, pfn;
634	pte_t *ptep;
635
636	for (addr = start; addr < end; addr += PAGE_SIZE) {
637		/*
638		 * The vma can contain pages that aren't present. Although
639		 * the pte search is expensive, we need the pte to find the
640		 * page pfn and to check whether the page should be flushed.
641		 */
642		ptep = get_ptep(vma->vm_mm, addr);
643		if (ptep && pte_needs_flush(*ptep)) {
644			if (parisc_requires_coherency()) {
645				flush_user_cache_page(vma, addr);
646			} else {
647				pfn = pte_pfn(*ptep);
648				if (WARN_ON(!pfn_valid(pfn)))
649					return;
650				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
651			}
652		}
653	}
654}
655
656static inline unsigned long mm_total_size(struct mm_struct *mm)
657{
658	struct vm_area_struct *vma;
659	unsigned long usize = 0;
660	VMA_ITERATOR(vmi, mm, 0);
661
662	for_each_vma(vmi, vma) {
663		if (usize >= parisc_cache_flush_threshold)
664			break;
665		usize += vma->vm_end - vma->vm_start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666	}
667	return usize;
668}
669
670void flush_cache_mm(struct mm_struct *mm)
671{
672	struct vm_area_struct *vma;
673	VMA_ITERATOR(vmi, mm, 0);
674
675	/*
676	 * Flushing the whole cache on each cpu takes forever on
677	 * rp3440, etc. So, avoid it if the mm isn't too big.
678	 *
679	 * Note that we must flush the entire cache on machines
680	 * with aliasing caches to prevent random segmentation
681	 * faults.
682	 */
683	if (!parisc_requires_coherency()
684	    ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
685		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
686			return;
687		flush_tlb_all();
688		flush_cache_all();
689		return;
690	}
691
692	/* Flush mm */
693	for_each_vma(vmi, vma)
694		flush_cache_pages(vma, vma->vm_start, vma->vm_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695}
696
697void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 
698{
699	if (!parisc_requires_coherency()
700	    || end - start >= parisc_cache_flush_threshold) {
701		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
702			return;
703		flush_tlb_range(vma, start, end);
 
 
704		flush_cache_all();
705		return;
706	}
707
708	flush_cache_pages(vma, start, end);
709}
 
 
 
 
 
710
711void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
712{
713	if (WARN_ON(!pfn_valid(pfn)))
714		return;
715	if (parisc_requires_coherency())
716		flush_user_cache_page(vma, vmaddr);
717	else
718		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 
 
 
 
 
 
 
 
719}
720
721void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 
722{
723	if (!PageAnon(page))
724		return;
725
726	if (parisc_requires_coherency()) {
727		if (vma->vm_flags & VM_SHARED)
728			flush_data_cache();
729		else
730			flush_user_cache_page(vma, vmaddr);
731		return;
732	}
733
734	flush_tlb_page(vma, vmaddr);
735	preempt_disable();
736	flush_dcache_page_asm(page_to_phys(page), vmaddr);
737	preempt_enable();
738}
739
740void flush_kernel_vmap_range(void *vaddr, int size)
741{
742	unsigned long start = (unsigned long)vaddr;
743	unsigned long end = start + size;
744
745	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
746	    (unsigned long)size >= parisc_cache_flush_threshold) {
747		flush_tlb_kernel_range(start, end);
748		flush_data_cache();
749		return;
750	}
751
752	flush_kernel_dcache_range_asm(start, end);
753	flush_tlb_kernel_range(start, end);
754}
755EXPORT_SYMBOL(flush_kernel_vmap_range);
756
757void invalidate_kernel_vmap_range(void *vaddr, int size)
758{
759	unsigned long start = (unsigned long)vaddr;
760	unsigned long end = start + size;
761
762	/* Ensure DMA is complete */
763	asm_syncdma();
764
765	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
766	    (unsigned long)size >= parisc_cache_flush_threshold) {
767		flush_tlb_kernel_range(start, end);
768		flush_data_cache();
769		return;
770	}
771
772	purge_kernel_dcache_range_asm(start, end);
773	flush_tlb_kernel_range(start, end);
774}
775EXPORT_SYMBOL(invalidate_kernel_vmap_range);