Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/syscalls.h>
9#include <linux/spinlock.h>
10#include <asm/page.h>
11#include <asm/cache.h>
12#include <asm/cacheflush.h>
13#include <asm/cachectl.h>
14#include <asm/tlbflush.h>
15
16#define PG_dcache_clean PG_arch_1
17
18void flush_dcache_folio(struct folio *folio)
19{
20 struct address_space *mapping;
21
22 if (is_zero_pfn(folio_pfn(folio)))
23 return;
24
25 mapping = folio_flush_mapping(folio);
26
27 if (mapping && !folio_mapped(folio))
28 clear_bit(PG_dcache_clean, &folio->flags);
29 else {
30 dcache_wbinv_all();
31 if (mapping)
32 icache_inv_all();
33 set_bit(PG_dcache_clean, &folio->flags);
34 }
35}
36EXPORT_SYMBOL(flush_dcache_folio);
37
38void flush_dcache_page(struct page *page)
39{
40 flush_dcache_folio(page_folio(page));
41}
42EXPORT_SYMBOL(flush_dcache_page);
43
44void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
45 unsigned long addr, pte_t *ptep, unsigned int nr)
46{
47 unsigned long pfn = pte_pfn(*ptep);
48 struct folio *folio;
49
50 flush_tlb_page(vma, addr);
51
52 if (!pfn_valid(pfn))
53 return;
54
55 if (is_zero_pfn(pfn))
56 return;
57
58 folio = page_folio(pfn_to_page(pfn));
59 if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
60 dcache_wbinv_all();
61
62 if (folio_flush_mapping(folio)) {
63 if (vma->vm_flags & VM_EXEC)
64 icache_inv_all();
65 }
66}
67
68void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
69 unsigned long end)
70{
71 dcache_wbinv_all();
72
73 if (vma->vm_flags & VM_EXEC)
74 icache_inv_all();
75}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/fs.h>
7#include <linux/syscalls.h>
8#include <linux/spinlock.h>
9#include <asm/page.h>
10#include <asm/cache.h>
11#include <asm/cacheflush.h>
12#include <asm/cachectl.h>
13
14#define PG_dcache_clean PG_arch_1
15
16void flush_dcache_page(struct page *page)
17{
18 struct address_space *mapping;
19
20 if (page == ZERO_PAGE(0))
21 return;
22
23 mapping = page_mapping_file(page);
24
25 if (mapping && !page_mapcount(page))
26 clear_bit(PG_dcache_clean, &page->flags);
27 else {
28 dcache_wbinv_all();
29 if (mapping)
30 icache_inv_all();
31 set_bit(PG_dcache_clean, &page->flags);
32 }
33}
34EXPORT_SYMBOL(flush_dcache_page);
35
36void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
37 pte_t *ptep)
38{
39 unsigned long pfn = pte_pfn(*ptep);
40 struct page *page;
41
42 if (!pfn_valid(pfn))
43 return;
44
45 page = pfn_to_page(pfn);
46 if (page == ZERO_PAGE(0))
47 return;
48
49 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
50 dcache_wbinv_all();
51
52 if (page_mapping_file(page)) {
53 if (vma->vm_flags & VM_EXEC)
54 icache_inv_all();
55 }
56}
57
58void flush_kernel_dcache_page(struct page *page)
59{
60 struct address_space *mapping;
61
62 mapping = page_mapping_file(page);
63
64 if (!mapping || mapping_mapped(mapping))
65 dcache_wbinv_all();
66}
67EXPORT_SYMBOL(flush_kernel_dcache_page);
68
69void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
70 unsigned long end)
71{
72 dcache_wbinv_all();
73
74 if (vma->vm_flags & VM_EXEC)
75 icache_inv_all();
76}