Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/syscalls.h>
9#include <linux/spinlock.h>
10#include <asm/page.h>
11#include <asm/cache.h>
12#include <asm/cacheflush.h>
13#include <asm/cachectl.h>
14
15#define PG_dcache_clean PG_arch_1
16
17void flush_dcache_page(struct page *page)
18{
19 struct address_space *mapping;
20
21 if (page == ZERO_PAGE(0))
22 return;
23
24 mapping = page_mapping_file(page);
25
26 if (mapping && !page_mapcount(page))
27 clear_bit(PG_dcache_clean, &page->flags);
28 else {
29 dcache_wbinv_all();
30 if (mapping)
31 icache_inv_all();
32 set_bit(PG_dcache_clean, &page->flags);
33 }
34}
35EXPORT_SYMBOL(flush_dcache_page);
36
37void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
38 pte_t *ptep)
39{
40 unsigned long pfn = pte_pfn(*ptep);
41 struct page *page;
42
43 if (!pfn_valid(pfn))
44 return;
45
46 page = pfn_to_page(pfn);
47 if (page == ZERO_PAGE(0))
48 return;
49
50 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
51 dcache_wbinv_all();
52
53 if (page_mapping_file(page)) {
54 if (vma->vm_flags & VM_EXEC)
55 icache_inv_all();
56 }
57}
58
59void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
60 unsigned long end)
61{
62 dcache_wbinv_all();
63
64 if (vma->vm_flags & VM_EXEC)
65 icache_inv_all();
66}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/fs.h>
7#include <linux/syscalls.h>
8#include <linux/spinlock.h>
9#include <asm/page.h>
10#include <asm/cache.h>
11#include <asm/cacheflush.h>
12#include <asm/cachectl.h>
13
14#define PG_dcache_clean PG_arch_1
15
16void flush_dcache_page(struct page *page)
17{
18 struct address_space *mapping;
19
20 if (page == ZERO_PAGE(0))
21 return;
22
23 mapping = page_mapping_file(page);
24
25 if (mapping && !page_mapcount(page))
26 clear_bit(PG_dcache_clean, &page->flags);
27 else {
28 dcache_wbinv_all();
29 if (mapping)
30 icache_inv_all();
31 set_bit(PG_dcache_clean, &page->flags);
32 }
33}
34EXPORT_SYMBOL(flush_dcache_page);
35
36void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
37 pte_t *ptep)
38{
39 unsigned long pfn = pte_pfn(*ptep);
40 struct page *page;
41
42 if (!pfn_valid(pfn))
43 return;
44
45 page = pfn_to_page(pfn);
46 if (page == ZERO_PAGE(0))
47 return;
48
49 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
50 dcache_wbinv_all();
51
52 if (page_mapping_file(page)) {
53 if (vma->vm_flags & VM_EXEC)
54 icache_inv_all();
55 }
56}
57
58void flush_kernel_dcache_page(struct page *page)
59{
60 struct address_space *mapping;
61
62 mapping = page_mapping_file(page);
63
64 if (!mapping || mapping_mapped(mapping))
65 dcache_wbinv_all();
66}
67EXPORT_SYMBOL(flush_kernel_dcache_page);
68
69void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
70 unsigned long end)
71{
72 dcache_wbinv_all();
73
74 if (vma->vm_flags & VM_EXEC)
75 icache_inv_all();
76}