Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <asm/sbi.h>
7
8void flush_tlb_all(void)
9{
10 sbi_remote_sfence_vma(NULL, 0, -1);
11}
12
13/*
14 * This function must not be called with cmask being null.
15 * Kernel may panic if cmask is NULL.
16 */
17static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
18 unsigned long size)
19{
20 struct cpumask hmask;
21 unsigned int cpuid;
22
23 if (cpumask_empty(cmask))
24 return;
25
26 cpuid = get_cpu();
27
28 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
29 /* local cpu is the only cpu present in cpumask */
30 if (size <= PAGE_SIZE)
31 local_flush_tlb_page(start);
32 else
33 local_flush_tlb_all();
34 } else {
35 riscv_cpuid_to_hartid_mask(cmask, &hmask);
36 sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
37 }
38
39 put_cpu();
40}
41
42void flush_tlb_mm(struct mm_struct *mm)
43{
44 __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
45}
46
47void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
48{
49 __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
50}
51
52void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
53 unsigned long end)
54{
55 __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
56}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <asm/sbi.h>
7#include <asm/mmu_context.h>
8#include <asm/tlbflush.h>
9
10void flush_tlb_all(void)
11{
12 sbi_remote_sfence_vma(NULL, 0, -1);
13}
14
15static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
16 unsigned long size, unsigned long stride)
17{
18 struct cpumask *pmask = &mm->context.tlb_stale_mask;
19 struct cpumask *cmask = mm_cpumask(mm);
20 unsigned int cpuid;
21 bool broadcast;
22
23 if (cpumask_empty(cmask))
24 return;
25
26 cpuid = get_cpu();
27 /* check if the tlbflush needs to be sent to other CPUs */
28 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
29 if (static_branch_unlikely(&use_asid_allocator)) {
30 unsigned long asid = atomic_long_read(&mm->context.id);
31
32 /*
33 * TLB will be immediately flushed on harts concurrently
34 * executing this MM context. TLB flush on other harts
35 * is deferred until this MM context migrates there.
36 */
37 cpumask_setall(pmask);
38 cpumask_clear_cpu(cpuid, pmask);
39 cpumask_andnot(pmask, pmask, cmask);
40
41 if (broadcast) {
42 sbi_remote_sfence_vma_asid(cmask, start, size, asid);
43 } else if (size <= stride) {
44 local_flush_tlb_page_asid(start, asid);
45 } else {
46 local_flush_tlb_all_asid(asid);
47 }
48 } else {
49 if (broadcast) {
50 sbi_remote_sfence_vma(cmask, start, size);
51 } else if (size <= stride) {
52 local_flush_tlb_page(start);
53 } else {
54 local_flush_tlb_all();
55 }
56 }
57
58 put_cpu();
59}
60
61void flush_tlb_mm(struct mm_struct *mm)
62{
63 __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
64}
65
66void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
67{
68 __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
69}
70
71void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
72 unsigned long end)
73{
74 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
75}
76#ifdef CONFIG_TRANSPARENT_HUGEPAGE
77void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
78 unsigned long end)
79{
80 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
81}
82#endif