Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7 * Copyright (C) 1999 SuSE GmbH Nuernberg
  8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9 *
 10 * Cache and TLB management
 11 *
 12 */
 13 
 14#include <linux/init.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/pagemap.h>
 20#include <linux/sched.h>
 21#include <linux/sched/mm.h>
 22#include <asm/pdc.h>
 23#include <asm/cache.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 26#include <asm/page.h>
 27#include <asm/pgalloc.h>
 28#include <asm/processor.h>
 29#include <asm/sections.h>
 30#include <asm/shmparam.h>
 31
 32int split_tlb __ro_after_init;
 33int dcache_stride __ro_after_init;
 34int icache_stride __ro_after_init;
 35EXPORT_SYMBOL(dcache_stride);
 36
 37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 38EXPORT_SYMBOL(flush_dcache_page_asm);
 39void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 40void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 41
 42
 43/* On some machines (i.e., ones with the Merced bus), there can be
 44 * only a single PxTLB broadcast at a time; this must be guaranteed
 45 * by software. We need a spinlock around all TLB flushes to ensure
 46 * this.
 47 */
 48DEFINE_SPINLOCK(pa_tlb_flush_lock);
 49
 50/* Swapper page setup lock. */
 51DEFINE_SPINLOCK(pa_swapper_pg_lock);
 52
 53#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 54int pa_serialize_tlb_flushes __ro_after_init;
 55#endif
 56
 57struct pdc_cache_info cache_info __ro_after_init;
 58#ifndef CONFIG_PA20
 59static struct pdc_btlb_info btlb_info __ro_after_init;
 60#endif
 61
 62#ifdef CONFIG_SMP
 63void
 64flush_data_cache(void)
 65{
 66	on_each_cpu(flush_data_cache_local, NULL, 1);
 67}
 68void 
 69flush_instruction_cache(void)
 70{
 71	on_each_cpu(flush_instruction_cache_local, NULL, 1);
 72}
 73#endif
 74
 75void
 76flush_cache_all_local(void)
 77{
 78	flush_instruction_cache_local(NULL);
 79	flush_data_cache_local(NULL);
 80}
 81EXPORT_SYMBOL(flush_cache_all_local);
 82
 83/* Virtual address of pfn.  */
 84#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 85
 86void
 87update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 88{
 89	unsigned long pfn = pte_pfn(*ptep);
 90	struct page *page;
 91
 92	/* We don't have pte special.  As a result, we can be called with
 93	   an invalid pfn and we don't need to flush the kernel dcache page.
 94	   This occurs with FireGL card in C8000.  */
 95	if (!pfn_valid(pfn))
 96		return;
 97
 98	page = pfn_to_page(pfn);
 99	if (page_mapping_file(page) &&
100	    test_bit(PG_dcache_dirty, &page->flags)) {
101		flush_kernel_dcache_page_addr(pfn_va(pfn));
102		clear_bit(PG_dcache_dirty, &page->flags);
103	} else if (parisc_requires_coherency())
104		flush_kernel_dcache_page_addr(pfn_va(pfn));
105}
106
107void
108show_cache_info(struct seq_file *m)
109{
110	char buf[32];
111
112	seq_printf(m, "I-cache\t\t: %ld KB\n", 
113		cache_info.ic_size/1024 );
114	if (cache_info.dc_loop != 1)
115		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
116	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
117		cache_info.dc_size/1024,
118		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
119		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
120		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
121	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
122		cache_info.it_size,
123		cache_info.dt_size,
124		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
125	);
126		
127#ifndef CONFIG_PA20
128	/* BTLB - Block TLB */
129	if (btlb_info.max_size==0) {
130		seq_printf(m, "BTLB\t\t: not supported\n" );
131	} else {
132		seq_printf(m, 
133		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
134		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
135		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
136		btlb_info.max_size, (int)4096,
137		btlb_info.max_size>>8,
138		btlb_info.fixed_range_info.num_i,
139		btlb_info.fixed_range_info.num_d,
140		btlb_info.fixed_range_info.num_comb, 
141		btlb_info.variable_range_info.num_i,
142		btlb_info.variable_range_info.num_d,
143		btlb_info.variable_range_info.num_comb
144		);
145	}
146#endif
147}
148
149void __init 
150parisc_cache_init(void)
151{
152	if (pdc_cache_info(&cache_info) < 0)
153		panic("parisc_cache_init: pdc_cache_info failed");
154
155#if 0
156	printk("ic_size %lx dc_size %lx it_size %lx\n",
157		cache_info.ic_size,
158		cache_info.dc_size,
159		cache_info.it_size);
160
161	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162		cache_info.dc_base,
163		cache_info.dc_stride,
164		cache_info.dc_count,
165		cache_info.dc_loop);
166
167	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
168		*(unsigned long *) (&cache_info.dc_conf),
169		cache_info.dc_conf.cc_alias,
170		cache_info.dc_conf.cc_block,
171		cache_info.dc_conf.cc_line,
172		cache_info.dc_conf.cc_shift);
173	printk("	wt %d sh %d cst %d hv %d\n",
174		cache_info.dc_conf.cc_wt,
175		cache_info.dc_conf.cc_sh,
176		cache_info.dc_conf.cc_cst,
177		cache_info.dc_conf.cc_hv);
178
179	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180		cache_info.ic_base,
181		cache_info.ic_stride,
182		cache_info.ic_count,
183		cache_info.ic_loop);
184
185	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
186		cache_info.it_sp_base,
187		cache_info.it_sp_stride,
188		cache_info.it_sp_count,
189		cache_info.it_loop,
190		cache_info.it_off_base,
191		cache_info.it_off_stride,
192		cache_info.it_off_count);
193
194	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
195		cache_info.dt_sp_base,
196		cache_info.dt_sp_stride,
197		cache_info.dt_sp_count,
198		cache_info.dt_loop,
199		cache_info.dt_off_base,
200		cache_info.dt_off_stride,
201		cache_info.dt_off_count);
202
203	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
204		*(unsigned long *) (&cache_info.ic_conf),
205		cache_info.ic_conf.cc_alias,
206		cache_info.ic_conf.cc_block,
207		cache_info.ic_conf.cc_line,
208		cache_info.ic_conf.cc_shift);
209	printk("	wt %d sh %d cst %d hv %d\n",
210		cache_info.ic_conf.cc_wt,
211		cache_info.ic_conf.cc_sh,
212		cache_info.ic_conf.cc_cst,
213		cache_info.ic_conf.cc_hv);
214
215	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
216		cache_info.dt_conf.tc_sh,
217		cache_info.dt_conf.tc_page,
218		cache_info.dt_conf.tc_cst,
219		cache_info.dt_conf.tc_aid,
220		cache_info.dt_conf.tc_sr);
221
222	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
223		cache_info.it_conf.tc_sh,
224		cache_info.it_conf.tc_page,
225		cache_info.it_conf.tc_cst,
226		cache_info.it_conf.tc_aid,
227		cache_info.it_conf.tc_sr);
228#endif
229
230	split_tlb = 0;
231	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
232		if (cache_info.dt_conf.tc_sh == 2)
233			printk(KERN_WARNING "Unexpected TLB configuration. "
234			"Will flush I/D separately (could be optimized).\n");
235
236		split_tlb = 1;
237	}
238
239	/* "New and Improved" version from Jim Hull 
240	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
241	 * The following CAFL_STRIDE is an optimized version, see
242	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
243	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
244	 */
245#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
246	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
247	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
248#undef CAFL_STRIDE
249
250#ifndef CONFIG_PA20
251	if (pdc_btlb_info(&btlb_info) < 0) {
252		memset(&btlb_info, 0, sizeof btlb_info);
253	}
254#endif
255
256	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
257						PDC_MODEL_NVA_UNSUPPORTED) {
258		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
259#if 0
260		panic("SMP kernel required to avoid non-equivalent aliasing");
261#endif
262	}
263}
264
265void __init disable_sr_hashing(void)
266{
267	int srhash_type, retval;
268	unsigned long space_bits;
269
270	switch (boot_cpu_data.cpu_type) {
271	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
272		BUG();
273		return;
274
275	case pcxs:
276	case pcxt:
277	case pcxt_:
278		srhash_type = SRHASH_PCXST;
279		break;
280
281	case pcxl:
282		srhash_type = SRHASH_PCXL;
283		break;
284
285	case pcxl2: /* pcxl2 doesn't support space register hashing */
286		return;
287
288	default: /* Currently all PA2.0 machines use the same ins. sequence */
289		srhash_type = SRHASH_PA20;
290		break;
291	}
292
293	disable_sr_hashing_asm(srhash_type);
294
295	retval = pdc_spaceid_bits(&space_bits);
296	/* If this procedure isn't implemented, don't panic. */
297	if (retval < 0 && retval != PDC_BAD_OPTION)
298		panic("pdc_spaceid_bits call failed.\n");
299	if (space_bits != 0)
300		panic("SpaceID hashing is still on!\n");
301}
302
303static inline void
304__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
305		   unsigned long physaddr)
306{
307	preempt_disable();
308	flush_dcache_page_asm(physaddr, vmaddr);
309	if (vma->vm_flags & VM_EXEC)
310		flush_icache_page_asm(physaddr, vmaddr);
311	preempt_enable();
312}
313
314static inline void
315__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316		   unsigned long physaddr)
317{
318	preempt_disable();
319	purge_dcache_page_asm(physaddr, vmaddr);
320	if (vma->vm_flags & VM_EXEC)
321		flush_icache_page_asm(physaddr, vmaddr);
322	preempt_enable();
323}
324
325void flush_dcache_page(struct page *page)
326{
327	struct address_space *mapping = page_mapping_file(page);
328	struct vm_area_struct *mpnt;
329	unsigned long offset;
330	unsigned long addr, old_addr = 0;
331	pgoff_t pgoff;
332
333	if (mapping && !mapping_mapped(mapping)) {
334		set_bit(PG_dcache_dirty, &page->flags);
335		return;
336	}
337
338	flush_kernel_dcache_page(page);
339
340	if (!mapping)
341		return;
342
343	pgoff = page->index;
344
345	/* We have carefully arranged in arch_get_unmapped_area() that
346	 * *any* mappings of a file are always congruently mapped (whether
347	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
348	 * to flush one address here for them all to become coherent */
349
350	flush_dcache_mmap_lock(mapping);
351	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
352		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
353		addr = mpnt->vm_start + offset;
354
355		/* The TLB is the engine of coherence on parisc: The
356		 * CPU is entitled to speculate any page with a TLB
357		 * mapping, so here we kill the mapping then flush the
358		 * page along a special flush only alias mapping.
359		 * This guarantees that the page is no-longer in the
360		 * cache for any process and nor may it be
361		 * speculatively read in (until the user or kernel
362		 * specifically accesses it, of course) */
363
364		flush_tlb_page(mpnt, addr);
365		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
366				      != (addr & (SHM_COLOUR - 1))) {
367			__flush_cache_page(mpnt, addr, page_to_phys(page));
368			if (old_addr)
369				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
370			old_addr = addr;
371		}
372	}
373	flush_dcache_mmap_unlock(mapping);
374}
375EXPORT_SYMBOL(flush_dcache_page);
376
377/* Defined in arch/parisc/kernel/pacache.S */
378EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
379EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
380EXPORT_SYMBOL(flush_data_cache_local);
381EXPORT_SYMBOL(flush_kernel_icache_range_asm);
382
383#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
384static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
385
386#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
387static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
388
389void __init parisc_setup_cache_timing(void)
390{
391	unsigned long rangetime, alltime;
392	unsigned long size, start;
393	unsigned long threshold;
394
395	alltime = mfctl(16);
396	flush_data_cache();
397	alltime = mfctl(16) - alltime;
398
399	size = (unsigned long)(_end - _text);
400	rangetime = mfctl(16);
401	flush_kernel_dcache_range((unsigned long)_text, size);
402	rangetime = mfctl(16) - rangetime;
403
404	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
405		alltime, size, rangetime);
406
407	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
408	if (threshold > cache_info.dc_size)
409		threshold = cache_info.dc_size;
410	if (threshold)
411		parisc_cache_flush_threshold = threshold;
412	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
413		parisc_cache_flush_threshold/1024);
414
415	/* calculate TLB flush threshold */
416
417	/* On SMP machines, skip the TLB measure of kernel text which
418	 * has been mapped as huge pages. */
419	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
420		threshold = max(cache_info.it_size, cache_info.dt_size);
421		threshold *= PAGE_SIZE;
422		threshold /= num_online_cpus();
423		goto set_tlb_threshold;
424	}
425
 
 
 
 
426	size = 0;
427	start = (unsigned long) _text;
428	rangetime = mfctl(16);
429	while (start < (unsigned long) _end) {
430		flush_tlb_kernel_range(start, start + PAGE_SIZE);
431		start += PAGE_SIZE;
432		size += PAGE_SIZE;
433	}
434	rangetime = mfctl(16) - rangetime;
435
436	alltime = mfctl(16);
437	flush_tlb_all();
438	alltime = mfctl(16) - alltime;
439
440	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
441		alltime, size, rangetime);
442
443	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
444	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
445		threshold/1024);
446
447set_tlb_threshold:
448	if (threshold > parisc_tlb_flush_threshold)
449		parisc_tlb_flush_threshold = threshold;
450	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
451		parisc_tlb_flush_threshold/1024);
452}
453
454extern void purge_kernel_dcache_page_asm(unsigned long);
455extern void clear_user_page_asm(void *, unsigned long);
456extern void copy_user_page_asm(void *, void *, unsigned long);
457
458void flush_kernel_dcache_page_addr(void *addr)
459{
460	unsigned long flags;
461
462	flush_kernel_dcache_page_asm(addr);
463	purge_tlb_start(flags);
464	pdtlb_kernel(addr);
465	purge_tlb_end(flags);
466}
467EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
468
469void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
470	struct page *pg)
471{
472       /* Copy using kernel mapping.  No coherency is needed (all in
473	  kunmap) for the `to' page.  However, the `from' page needs to
474	  be flushed through a mapping equivalent to the user mapping
475	  before it can be accessed through the kernel mapping. */
476	preempt_disable();
477	flush_dcache_page_asm(__pa(vfrom), vaddr);
478	copy_page_asm(vto, vfrom);
479	preempt_enable();
 
480}
481EXPORT_SYMBOL(copy_user_page);
482
483/* __flush_tlb_range()
484 *
485 * returns 1 if all TLBs were flushed.
486 */
487int __flush_tlb_range(unsigned long sid, unsigned long start,
488		      unsigned long end)
489{
490	unsigned long flags;
491
492	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
493	    end - start >= parisc_tlb_flush_threshold) {
494		flush_tlb_all();
495		return 1;
496	}
497
498	/* Purge TLB entries for small ranges using the pdtlb and
499	   pitlb instructions.  These instructions execute locally
500	   but cause a purge request to be broadcast to other TLBs.  */
 
 
 
 
 
 
 
 
 
 
 
 
501	while (start < end) {
502		purge_tlb_start(flags);
503		mtsp(sid, 1);
504		pdtlb(start);
505		pitlb(start);
506		purge_tlb_end(flags);
507		start += PAGE_SIZE;
508	}
509	return 0;
510}
511
512static void cacheflush_h_tmp_function(void *dummy)
513{
514	flush_cache_all_local();
515}
516
517void flush_cache_all(void)
518{
519	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
520}
521
522static inline unsigned long mm_total_size(struct mm_struct *mm)
523{
524	struct vm_area_struct *vma;
525	unsigned long usize = 0;
526
527	for (vma = mm->mmap; vma; vma = vma->vm_next)
528		usize += vma->vm_end - vma->vm_start;
529	return usize;
530}
531
532static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
533{
534	pte_t *ptep = NULL;
535
536	if (!pgd_none(*pgd)) {
537		pud_t *pud = pud_offset(pgd, addr);
538		if (!pud_none(*pud)) {
539			pmd_t *pmd = pmd_offset(pud, addr);
540			if (!pmd_none(*pmd))
541				ptep = pte_offset_map(pmd, addr);
542		}
543	}
544	return ptep;
545}
546
547void flush_cache_mm(struct mm_struct *mm)
548{
549	struct vm_area_struct *vma;
550	pgd_t *pgd;
551
552	/* Flushing the whole cache on each cpu takes forever on
553	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
554	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
555	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
556		if (mm->context)
557			flush_tlb_all();
558		flush_cache_all();
559		return;
560	}
561
562	if (mm->context == mfsp(3)) {
563		for (vma = mm->mmap; vma; vma = vma->vm_next) {
564			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
565			if (vma->vm_flags & VM_EXEC)
566				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
567			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
568		}
569		return;
570	}
571
572	pgd = mm->pgd;
573	for (vma = mm->mmap; vma; vma = vma->vm_next) {
574		unsigned long addr;
575
576		for (addr = vma->vm_start; addr < vma->vm_end;
577		     addr += PAGE_SIZE) {
578			unsigned long pfn;
579			pte_t *ptep = get_ptep(pgd, addr);
580			if (!ptep)
581				continue;
582			pfn = pte_pfn(*ptep);
583			if (!pfn_valid(pfn))
584				continue;
585			if (unlikely(mm->context)) {
586				flush_tlb_page(vma, addr);
587				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
588			} else {
589				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
590			}
591		}
592	}
593}
594
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595void flush_cache_range(struct vm_area_struct *vma,
596		unsigned long start, unsigned long end)
597{
598	pgd_t *pgd;
599	unsigned long addr;
 
 
 
600
601	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
602	    end - start >= parisc_cache_flush_threshold) {
603		if (vma->vm_mm->context)
604			flush_tlb_range(vma, start, end);
605		flush_cache_all();
606		return;
607	}
608
609	if (vma->vm_mm->context == mfsp(3)) {
610		flush_user_dcache_range_asm(start, end);
611		if (vma->vm_flags & VM_EXEC)
612			flush_user_icache_range_asm(start, end);
613		flush_tlb_range(vma, start, end);
614		return;
615	}
616
617	pgd = vma->vm_mm->pgd;
618	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
619		unsigned long pfn;
620		pte_t *ptep = get_ptep(pgd, addr);
621		if (!ptep)
622			continue;
623		pfn = pte_pfn(*ptep);
624		if (pfn_valid(pfn)) {
625			if (unlikely(vma->vm_mm->context)) {
626				flush_tlb_page(vma, addr);
627				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
628			} else {
629				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
630			}
631		}
632	}
633}
634
635void
636flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
637{
 
 
638	if (pfn_valid(pfn)) {
639		if (likely(vma->vm_mm->context)) {
640			flush_tlb_page(vma, vmaddr);
641			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
642		} else {
643			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
644		}
645	}
646}
647
648void flush_kernel_vmap_range(void *vaddr, int size)
649{
650	unsigned long start = (unsigned long)vaddr;
651	unsigned long end = start + size;
652
653	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
654	    (unsigned long)size >= parisc_cache_flush_threshold) {
655		flush_tlb_kernel_range(start, end);
656		flush_data_cache();
657		return;
658	}
659
660	flush_kernel_dcache_range_asm(start, end);
661	flush_tlb_kernel_range(start, end);
662}
663EXPORT_SYMBOL(flush_kernel_vmap_range);
664
665void invalidate_kernel_vmap_range(void *vaddr, int size)
666{
667	unsigned long start = (unsigned long)vaddr;
668	unsigned long end = start + size;
669
670	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
671	    (unsigned long)size >= parisc_cache_flush_threshold) {
672		flush_tlb_kernel_range(start, end);
673		flush_data_cache();
674		return;
675	}
676
677	purge_kernel_dcache_range_asm(start, end);
678	flush_tlb_kernel_range(start, end);
679}
680EXPORT_SYMBOL(invalidate_kernel_vmap_range);
v4.10.11
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7 * Copyright (C) 1999 SuSE GmbH Nuernberg
  8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9 *
 10 * Cache and TLB management
 11 *
 12 */
 13 
 14#include <linux/init.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/pagemap.h>
 20#include <linux/sched.h>
 
 21#include <asm/pdc.h>
 22#include <asm/cache.h>
 23#include <asm/cacheflush.h>
 24#include <asm/tlbflush.h>
 25#include <asm/page.h>
 26#include <asm/pgalloc.h>
 27#include <asm/processor.h>
 28#include <asm/sections.h>
 29#include <asm/shmparam.h>
 30
 31int split_tlb __read_mostly;
 32int dcache_stride __read_mostly;
 33int icache_stride __read_mostly;
 34EXPORT_SYMBOL(dcache_stride);
 35
 36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 37EXPORT_SYMBOL(flush_dcache_page_asm);
 
 38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 39
 40
 41/* On some machines (e.g. ones with the Merced bus), there can be
 42 * only a single PxTLB broadcast at a time; this must be guaranteed
 43 * by software.  We put a spinlock around all TLB flushes  to
 44 * ensure this.
 45 */
 46DEFINE_SPINLOCK(pa_tlb_lock);
 47
 48struct pdc_cache_info cache_info __read_mostly;
 
 
 
 
 
 
 
 49#ifndef CONFIG_PA20
 50static struct pdc_btlb_info btlb_info __read_mostly;
 51#endif
 52
 53#ifdef CONFIG_SMP
 54void
 55flush_data_cache(void)
 56{
 57	on_each_cpu(flush_data_cache_local, NULL, 1);
 58}
 59void 
 60flush_instruction_cache(void)
 61{
 62	on_each_cpu(flush_instruction_cache_local, NULL, 1);
 63}
 64#endif
 65
 66void
 67flush_cache_all_local(void)
 68{
 69	flush_instruction_cache_local(NULL);
 70	flush_data_cache_local(NULL);
 71}
 72EXPORT_SYMBOL(flush_cache_all_local);
 73
 74/* Virtual address of pfn.  */
 75#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 76
 77void
 78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 79{
 80	unsigned long pfn = pte_pfn(*ptep);
 81	struct page *page;
 82
 83	/* We don't have pte special.  As a result, we can be called with
 84	   an invalid pfn and we don't need to flush the kernel dcache page.
 85	   This occurs with FireGL card in C8000.  */
 86	if (!pfn_valid(pfn))
 87		return;
 88
 89	page = pfn_to_page(pfn);
 90	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
 
 91		flush_kernel_dcache_page_addr(pfn_va(pfn));
 92		clear_bit(PG_dcache_dirty, &page->flags);
 93	} else if (parisc_requires_coherency())
 94		flush_kernel_dcache_page_addr(pfn_va(pfn));
 95}
 96
 97void
 98show_cache_info(struct seq_file *m)
 99{
100	char buf[32];
101
102	seq_printf(m, "I-cache\t\t: %ld KB\n", 
103		cache_info.ic_size/1024 );
104	if (cache_info.dc_loop != 1)
105		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107		cache_info.dc_size/1024,
108		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
109		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112		cache_info.it_size,
113		cache_info.dt_size,
114		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115	);
116		
117#ifndef CONFIG_PA20
118	/* BTLB - Block TLB */
119	if (btlb_info.max_size==0) {
120		seq_printf(m, "BTLB\t\t: not supported\n" );
121	} else {
122		seq_printf(m, 
123		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126		btlb_info.max_size, (int)4096,
127		btlb_info.max_size>>8,
128		btlb_info.fixed_range_info.num_i,
129		btlb_info.fixed_range_info.num_d,
130		btlb_info.fixed_range_info.num_comb, 
131		btlb_info.variable_range_info.num_i,
132		btlb_info.variable_range_info.num_d,
133		btlb_info.variable_range_info.num_comb
134		);
135	}
136#endif
137}
138
139void __init 
140parisc_cache_init(void)
141{
142	if (pdc_cache_info(&cache_info) < 0)
143		panic("parisc_cache_init: pdc_cache_info failed");
144
145#if 0
146	printk("ic_size %lx dc_size %lx it_size %lx\n",
147		cache_info.ic_size,
148		cache_info.dc_size,
149		cache_info.it_size);
150
151	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152		cache_info.dc_base,
153		cache_info.dc_stride,
154		cache_info.dc_count,
155		cache_info.dc_loop);
156
157	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
158		*(unsigned long *) (&cache_info.dc_conf),
159		cache_info.dc_conf.cc_alias,
160		cache_info.dc_conf.cc_block,
161		cache_info.dc_conf.cc_line,
162		cache_info.dc_conf.cc_shift);
163	printk("	wt %d sh %d cst %d hv %d\n",
164		cache_info.dc_conf.cc_wt,
165		cache_info.dc_conf.cc_sh,
166		cache_info.dc_conf.cc_cst,
167		cache_info.dc_conf.cc_hv);
168
169	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170		cache_info.ic_base,
171		cache_info.ic_stride,
172		cache_info.ic_count,
173		cache_info.ic_loop);
174
175	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
176		cache_info.it_sp_base,
177		cache_info.it_sp_stride,
178		cache_info.it_sp_count,
179		cache_info.it_loop,
180		cache_info.it_off_base,
181		cache_info.it_off_stride,
182		cache_info.it_off_count);
183
184	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185		cache_info.dt_sp_base,
186		cache_info.dt_sp_stride,
187		cache_info.dt_sp_count,
188		cache_info.dt_loop,
189		cache_info.dt_off_base,
190		cache_info.dt_off_stride,
191		cache_info.dt_off_count);
192
193	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
194		*(unsigned long *) (&cache_info.ic_conf),
195		cache_info.ic_conf.cc_alias,
196		cache_info.ic_conf.cc_block,
197		cache_info.ic_conf.cc_line,
198		cache_info.ic_conf.cc_shift);
199	printk("	wt %d sh %d cst %d hv %d\n",
200		cache_info.ic_conf.cc_wt,
201		cache_info.ic_conf.cc_sh,
202		cache_info.ic_conf.cc_cst,
203		cache_info.ic_conf.cc_hv);
204
205	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
206		cache_info.dt_conf.tc_sh,
207		cache_info.dt_conf.tc_page,
208		cache_info.dt_conf.tc_cst,
209		cache_info.dt_conf.tc_aid,
210		cache_info.dt_conf.tc_sr);
211
212	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
213		cache_info.it_conf.tc_sh,
214		cache_info.it_conf.tc_page,
215		cache_info.it_conf.tc_cst,
216		cache_info.it_conf.tc_aid,
217		cache_info.it_conf.tc_sr);
218#endif
219
220	split_tlb = 0;
221	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
222		if (cache_info.dt_conf.tc_sh == 2)
223			printk(KERN_WARNING "Unexpected TLB configuration. "
224			"Will flush I/D separately (could be optimized).\n");
225
226		split_tlb = 1;
227	}
228
229	/* "New and Improved" version from Jim Hull 
230	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
231	 * The following CAFL_STRIDE is an optimized version, see
232	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
233	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
234	 */
235#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
236	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
237	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
238#undef CAFL_STRIDE
239
240#ifndef CONFIG_PA20
241	if (pdc_btlb_info(&btlb_info) < 0) {
242		memset(&btlb_info, 0, sizeof btlb_info);
243	}
244#endif
245
246	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
247						PDC_MODEL_NVA_UNSUPPORTED) {
248		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
249#if 0
250		panic("SMP kernel required to avoid non-equivalent aliasing");
251#endif
252	}
253}
254
255void disable_sr_hashing(void)
256{
257	int srhash_type, retval;
258	unsigned long space_bits;
259
260	switch (boot_cpu_data.cpu_type) {
261	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
262		BUG();
263		return;
264
265	case pcxs:
266	case pcxt:
267	case pcxt_:
268		srhash_type = SRHASH_PCXST;
269		break;
270
271	case pcxl:
272		srhash_type = SRHASH_PCXL;
273		break;
274
275	case pcxl2: /* pcxl2 doesn't support space register hashing */
276		return;
277
278	default: /* Currently all PA2.0 machines use the same ins. sequence */
279		srhash_type = SRHASH_PA20;
280		break;
281	}
282
283	disable_sr_hashing_asm(srhash_type);
284
285	retval = pdc_spaceid_bits(&space_bits);
286	/* If this procedure isn't implemented, don't panic. */
287	if (retval < 0 && retval != PDC_BAD_OPTION)
288		panic("pdc_spaceid_bits call failed.\n");
289	if (space_bits != 0)
290		panic("SpaceID hashing is still on!\n");
291}
292
293static inline void
294__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
295		   unsigned long physaddr)
296{
297	preempt_disable();
298	flush_dcache_page_asm(physaddr, vmaddr);
299	if (vma->vm_flags & VM_EXEC)
300		flush_icache_page_asm(physaddr, vmaddr);
301	preempt_enable();
302}
303
 
 
 
 
 
 
 
 
 
 
 
304void flush_dcache_page(struct page *page)
305{
306	struct address_space *mapping = page_mapping(page);
307	struct vm_area_struct *mpnt;
308	unsigned long offset;
309	unsigned long addr, old_addr = 0;
310	pgoff_t pgoff;
311
312	if (mapping && !mapping_mapped(mapping)) {
313		set_bit(PG_dcache_dirty, &page->flags);
314		return;
315	}
316
317	flush_kernel_dcache_page(page);
318
319	if (!mapping)
320		return;
321
322	pgoff = page->index;
323
324	/* We have carefully arranged in arch_get_unmapped_area() that
325	 * *any* mappings of a file are always congruently mapped (whether
326	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
327	 * to flush one address here for them all to become coherent */
328
329	flush_dcache_mmap_lock(mapping);
330	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
331		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
332		addr = mpnt->vm_start + offset;
333
334		/* The TLB is the engine of coherence on parisc: The
335		 * CPU is entitled to speculate any page with a TLB
336		 * mapping, so here we kill the mapping then flush the
337		 * page along a special flush only alias mapping.
338		 * This guarantees that the page is no-longer in the
339		 * cache for any process and nor may it be
340		 * speculatively read in (until the user or kernel
341		 * specifically accesses it, of course) */
342
343		flush_tlb_page(mpnt, addr);
344		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
345				      != (addr & (SHM_COLOUR - 1))) {
346			__flush_cache_page(mpnt, addr, page_to_phys(page));
347			if (old_addr)
348				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
349			old_addr = addr;
350		}
351	}
352	flush_dcache_mmap_unlock(mapping);
353}
354EXPORT_SYMBOL(flush_dcache_page);
355
356/* Defined in arch/parisc/kernel/pacache.S */
357EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
358EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
359EXPORT_SYMBOL(flush_data_cache_local);
360EXPORT_SYMBOL(flush_kernel_icache_range_asm);
361
362#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
363static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
364
365#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
366static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
367
368void __init parisc_setup_cache_timing(void)
369{
370	unsigned long rangetime, alltime;
371	unsigned long size, start;
372	unsigned long threshold;
373
374	alltime = mfctl(16);
375	flush_data_cache();
376	alltime = mfctl(16) - alltime;
377
378	size = (unsigned long)(_end - _text);
379	rangetime = mfctl(16);
380	flush_kernel_dcache_range((unsigned long)_text, size);
381	rangetime = mfctl(16) - rangetime;
382
383	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
384		alltime, size, rangetime);
385
386	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
387	if (threshold > cache_info.dc_size)
388		threshold = cache_info.dc_size;
389	if (threshold)
390		parisc_cache_flush_threshold = threshold;
391	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
392		parisc_cache_flush_threshold/1024);
393
394	/* calculate TLB flush threshold */
395
396	/* On SMP machines, skip the TLB measure of kernel text which
397	 * has been mapped as huge pages. */
398	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
399		threshold = max(cache_info.it_size, cache_info.dt_size);
400		threshold *= PAGE_SIZE;
401		threshold /= num_online_cpus();
402		goto set_tlb_threshold;
403	}
404
405	alltime = mfctl(16);
406	flush_tlb_all();
407	alltime = mfctl(16) - alltime;
408
409	size = 0;
410	start = (unsigned long) _text;
411	rangetime = mfctl(16);
412	while (start < (unsigned long) _end) {
413		flush_tlb_kernel_range(start, start + PAGE_SIZE);
414		start += PAGE_SIZE;
415		size += PAGE_SIZE;
416	}
417	rangetime = mfctl(16) - rangetime;
418
419	printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
 
 
 
 
420		alltime, size, rangetime);
421
422	threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
 
 
423
424set_tlb_threshold:
425	if (threshold)
426		parisc_tlb_flush_threshold = threshold;
427	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
428		parisc_tlb_flush_threshold/1024);
429}
430
431extern void purge_kernel_dcache_page_asm(unsigned long);
432extern void clear_user_page_asm(void *, unsigned long);
433extern void copy_user_page_asm(void *, void *, unsigned long);
434
435void flush_kernel_dcache_page_addr(void *addr)
436{
437	unsigned long flags;
438
439	flush_kernel_dcache_page_asm(addr);
440	purge_tlb_start(flags);
441	pdtlb_kernel(addr);
442	purge_tlb_end(flags);
443}
444EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
445
446void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
447	struct page *pg)
448{
449       /* Copy using kernel mapping.  No coherency is needed (all in
450	  kunmap) for the `to' page.  However, the `from' page needs to
451	  be flushed through a mapping equivalent to the user mapping
452	  before it can be accessed through the kernel mapping. */
453	preempt_disable();
454	flush_dcache_page_asm(__pa(vfrom), vaddr);
 
455	preempt_enable();
456	copy_page_asm(vto, vfrom);
457}
458EXPORT_SYMBOL(copy_user_page);
459
460/* __flush_tlb_range()
461 *
462 * returns 1 if all TLBs were flushed.
463 */
464int __flush_tlb_range(unsigned long sid, unsigned long start,
465		      unsigned long end)
466{
467	unsigned long flags, size;
468
469	size = (end - start);
470	if (size >= parisc_tlb_flush_threshold) {
471		flush_tlb_all();
472		return 1;
473	}
474
475	/* Purge TLB entries for small ranges using the pdtlb and
476	   pitlb instructions.  These instructions execute locally
477	   but cause a purge request to be broadcast to other TLBs.  */
478	if (likely(!split_tlb)) {
479		while (start < end) {
480			purge_tlb_start(flags);
481			mtsp(sid, 1);
482			pdtlb(start);
483			purge_tlb_end(flags);
484			start += PAGE_SIZE;
485		}
486		return 0;
487	}
488
489	/* split TLB case */
490	while (start < end) {
491		purge_tlb_start(flags);
492		mtsp(sid, 1);
493		pdtlb(start);
494		pitlb(start);
495		purge_tlb_end(flags);
496		start += PAGE_SIZE;
497	}
498	return 0;
499}
500
501static void cacheflush_h_tmp_function(void *dummy)
502{
503	flush_cache_all_local();
504}
505
506void flush_cache_all(void)
507{
508	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
509}
510
511static inline unsigned long mm_total_size(struct mm_struct *mm)
512{
513	struct vm_area_struct *vma;
514	unsigned long usize = 0;
515
516	for (vma = mm->mmap; vma; vma = vma->vm_next)
517		usize += vma->vm_end - vma->vm_start;
518	return usize;
519}
520
521static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
522{
523	pte_t *ptep = NULL;
524
525	if (!pgd_none(*pgd)) {
526		pud_t *pud = pud_offset(pgd, addr);
527		if (!pud_none(*pud)) {
528			pmd_t *pmd = pmd_offset(pud, addr);
529			if (!pmd_none(*pmd))
530				ptep = pte_offset_map(pmd, addr);
531		}
532	}
533	return ptep;
534}
535
536void flush_cache_mm(struct mm_struct *mm)
537{
538	struct vm_area_struct *vma;
539	pgd_t *pgd;
540
541	/* Flushing the whole cache on each cpu takes forever on
542	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
543	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
 
 
 
544		flush_cache_all();
545		return;
546	}
547
548	if (mm->context == mfsp(3)) {
549		for (vma = mm->mmap; vma; vma = vma->vm_next) {
550			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
551			if ((vma->vm_flags & VM_EXEC) == 0)
552				continue;
553			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
554		}
555		return;
556	}
557
558	pgd = mm->pgd;
559	for (vma = mm->mmap; vma; vma = vma->vm_next) {
560		unsigned long addr;
561
562		for (addr = vma->vm_start; addr < vma->vm_end;
563		     addr += PAGE_SIZE) {
564			unsigned long pfn;
565			pte_t *ptep = get_ptep(pgd, addr);
566			if (!ptep)
567				continue;
568			pfn = pte_pfn(*ptep);
569			if (!pfn_valid(pfn))
570				continue;
571			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
 
 
 
 
 
572		}
573	}
574}
575
576void
577flush_user_dcache_range(unsigned long start, unsigned long end)
578{
579	if ((end - start) < parisc_cache_flush_threshold)
580		flush_user_dcache_range_asm(start,end);
581	else
582		flush_data_cache();
583}
584
585void
586flush_user_icache_range(unsigned long start, unsigned long end)
587{
588	if ((end - start) < parisc_cache_flush_threshold)
589		flush_user_icache_range_asm(start,end);
590	else
591		flush_instruction_cache();
592}
593
594void flush_cache_range(struct vm_area_struct *vma,
595		unsigned long start, unsigned long end)
596{
 
597	unsigned long addr;
598	pgd_t *pgd;
599
600	BUG_ON(!vma->vm_mm->context);
601
602	if ((end - start) >= parisc_cache_flush_threshold) {
 
 
 
603		flush_cache_all();
604		return;
605	}
606
607	if (vma->vm_mm->context == mfsp(3)) {
608		flush_user_dcache_range_asm(start, end);
609		if (vma->vm_flags & VM_EXEC)
610			flush_user_icache_range_asm(start, end);
 
611		return;
612	}
613
614	pgd = vma->vm_mm->pgd;
615	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
616		unsigned long pfn;
617		pte_t *ptep = get_ptep(pgd, addr);
618		if (!ptep)
619			continue;
620		pfn = pte_pfn(*ptep);
621		if (pfn_valid(pfn))
622			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
 
 
 
 
 
 
623	}
624}
625
626void
627flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
628{
629	BUG_ON(!vma->vm_mm->context);
630
631	if (pfn_valid(pfn)) {
632		flush_tlb_page(vma, vmaddr);
633		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 
 
 
 
634	}
635}
636
637void flush_kernel_vmap_range(void *vaddr, int size)
638{
639	unsigned long start = (unsigned long)vaddr;
 
640
641	if ((unsigned long)size > parisc_cache_flush_threshold)
 
 
642		flush_data_cache();
643	else
644		flush_kernel_dcache_range_asm(start, start + size);
 
 
 
645}
646EXPORT_SYMBOL(flush_kernel_vmap_range);
647
648void invalidate_kernel_vmap_range(void *vaddr, int size)
649{
650	unsigned long start = (unsigned long)vaddr;
 
651
652	if ((unsigned long)size > parisc_cache_flush_threshold)
 
 
653		flush_data_cache();
654	else
655		flush_kernel_dcache_range_asm(start, start + size);
 
 
 
656}
657EXPORT_SYMBOL(invalidate_kernel_vmap_range);