Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <asm/cache.h>
8
9void flush_icache_page(struct vm_area_struct *vma, struct page *page)
10{
11 unsigned long start;
12
13 start = (unsigned long) kmap_atomic(page);
14
15 cache_wbinv_range(start, start + PAGE_SIZE);
16
17 kunmap_atomic((void *)start);
18}
19
20void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
21 unsigned long vaddr, int len)
22{
23 unsigned long kaddr;
24
25 kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
26
27 cache_wbinv_range(kaddr, kaddr + len);
28
29 kunmap_atomic((void *)kaddr);
30}
31
32void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
33 pte_t *pte)
34{
35 unsigned long addr, pfn;
36 struct page *page;
37
38 pfn = pte_pfn(*pte);
39 if (unlikely(!pfn_valid(pfn)))
40 return;
41
42 page = pfn_to_page(pfn);
43 if (page == ZERO_PAGE(0))
44 return;
45
46 addr = (unsigned long) kmap_atomic(page);
47
48 cache_wbinv_range(addr, addr + PAGE_SIZE);
49
50 kunmap_atomic((void *) addr);
51}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <asm/cache.h>
8#include <asm/tlbflush.h>
9
10void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
11 unsigned long address, pte_t *pte, unsigned int nr)
12{
13 unsigned long pfn = pte_pfn(*pte);
14 struct folio *folio;
15 unsigned int i;
16
17 flush_tlb_page(vma, address);
18
19 if (!pfn_valid(pfn))
20 return;
21
22 folio = page_folio(pfn_to_page(pfn));
23
24 if (test_and_set_bit(PG_dcache_clean, &folio->flags))
25 return;
26
27 icache_inv_range(address, address + nr*PAGE_SIZE);
28 for (i = 0; i < folio_nr_pages(folio); i++) {
29 unsigned long addr = (unsigned long) kmap_local_folio(folio,
30 i * PAGE_SIZE);
31
32 dcache_wb_range(addr, addr + PAGE_SIZE);
33 if (vma->vm_flags & VM_EXEC)
34 icache_inv_range(addr, addr + PAGE_SIZE);
35 kunmap_local((void *) addr);
36 }
37}
38
39void flush_icache_deferred(struct mm_struct *mm)
40{
41 unsigned int cpu = smp_processor_id();
42 cpumask_t *mask = &mm->context.icache_stale_mask;
43
44 if (cpumask_test_cpu(cpu, mask)) {
45 cpumask_clear_cpu(cpu, mask);
46 /*
47 * Ensure the remote hart's writes are visible to this hart.
48 * This pairs with a barrier in flush_icache_mm.
49 */
50 smp_mb();
51 local_icache_inv_all(NULL);
52 }
53}
54
55void flush_icache_mm_range(struct mm_struct *mm,
56 unsigned long start, unsigned long end)
57{
58 unsigned int cpu;
59 cpumask_t others, *mask;
60
61 preempt_disable();
62
63#ifdef CONFIG_CPU_HAS_ICACHE_INS
64 if (mm == current->mm) {
65 icache_inv_range(start, end);
66 preempt_enable();
67 return;
68 }
69#endif
70
71 /* Mark every hart's icache as needing a flush for this MM. */
72 mask = &mm->context.icache_stale_mask;
73 cpumask_setall(mask);
74
75 /* Flush this hart's I$ now, and mark it as flushed. */
76 cpu = smp_processor_id();
77 cpumask_clear_cpu(cpu, mask);
78 local_icache_inv_all(NULL);
79
80 /*
81 * Flush the I$ of other harts concurrently executing, and mark them as
82 * flushed.
83 */
84 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
85
86 if (mm != current->active_mm || !cpumask_empty(&others)) {
87 on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
88 cpumask_clear(mask);
89 }
90
91 preempt_enable();
92}