Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <asm/sbi.h>
7
8void flush_tlb_all(void)
9{
10 sbi_remote_sfence_vma(NULL, 0, -1);
11}
12
13/*
14 * This function must not be called with cmask being null.
15 * Kernel may panic if cmask is NULL.
16 */
17static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
18 unsigned long size)
19{
20 struct cpumask hmask;
21 unsigned int cpuid;
22
23 if (cpumask_empty(cmask))
24 return;
25
26 cpuid = get_cpu();
27
28 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
29 /* local cpu is the only cpu present in cpumask */
30 if (size <= PAGE_SIZE)
31 local_flush_tlb_page(start);
32 else
33 local_flush_tlb_all();
34 } else {
35 riscv_cpuid_to_hartid_mask(cmask, &hmask);
36 sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
37 }
38
39 put_cpu();
40}
41
42void flush_tlb_mm(struct mm_struct *mm)
43{
44 __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
45}
46
47void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
48{
49 __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
50}
51
52void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
53 unsigned long end)
54{
55 __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
56}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <linux/hugetlb.h>
7#include <asm/sbi.h>
8#include <asm/mmu_context.h>
9
10/*
11 * Flush entire TLB if number of entries to be flushed is greater
12 * than the threshold below.
13 */
14unsigned long tlb_flush_all_threshold __read_mostly = 64;
15
16static void local_flush_tlb_range_threshold_asid(unsigned long start,
17 unsigned long size,
18 unsigned long stride,
19 unsigned long asid)
20{
21 unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
22 int i;
23
24 if (nr_ptes_in_range > tlb_flush_all_threshold) {
25 local_flush_tlb_all_asid(asid);
26 return;
27 }
28
29 for (i = 0; i < nr_ptes_in_range; ++i) {
30 local_flush_tlb_page_asid(start, asid);
31 start += stride;
32 }
33}
34
35static inline void local_flush_tlb_range_asid(unsigned long start,
36 unsigned long size, unsigned long stride, unsigned long asid)
37{
38 if (size <= stride)
39 local_flush_tlb_page_asid(start, asid);
40 else if (size == FLUSH_TLB_MAX_SIZE)
41 local_flush_tlb_all_asid(asid);
42 else
43 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
44}
45
46/* Flush a range of kernel pages without broadcasting */
47void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
48{
49 local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
50}
51
52static void __ipi_flush_tlb_all(void *info)
53{
54 local_flush_tlb_all();
55}
56
57void flush_tlb_all(void)
58{
59 if (num_online_cpus() < 2)
60 local_flush_tlb_all();
61 else if (riscv_use_sbi_for_rfence())
62 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
63 else
64 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
65}
66
67struct flush_tlb_range_data {
68 unsigned long asid;
69 unsigned long start;
70 unsigned long size;
71 unsigned long stride;
72};
73
74static void __ipi_flush_tlb_range_asid(void *info)
75{
76 struct flush_tlb_range_data *d = info;
77
78 local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
79}
80
81static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
82 unsigned long start, unsigned long size,
83 unsigned long stride)
84{
85 unsigned int cpu;
86
87 if (cpumask_empty(cmask))
88 return;
89
90 cpu = get_cpu();
91
92 /* Check if the TLB flush needs to be sent to other CPUs. */
93 if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
94 local_flush_tlb_range_asid(start, size, stride, asid);
95 } else if (riscv_use_sbi_for_rfence()) {
96 sbi_remote_sfence_vma_asid(cmask, start, size, asid);
97 } else {
98 struct flush_tlb_range_data ftd;
99
100 ftd.asid = asid;
101 ftd.start = start;
102 ftd.size = size;
103 ftd.stride = stride;
104 on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
105 }
106
107 put_cpu();
108}
109
110static inline unsigned long get_mm_asid(struct mm_struct *mm)
111{
112 return cntx2asid(atomic_long_read(&mm->context.id));
113}
114
115void flush_tlb_mm(struct mm_struct *mm)
116{
117 __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
118 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
119}
120
121void flush_tlb_mm_range(struct mm_struct *mm,
122 unsigned long start, unsigned long end,
123 unsigned int page_size)
124{
125 __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
126 start, end - start, page_size);
127}
128
129void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
130{
131 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
132 addr, PAGE_SIZE, PAGE_SIZE);
133}
134
135void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
136 unsigned long end)
137{
138 unsigned long stride_size;
139
140 if (!is_vm_hugetlb_page(vma)) {
141 stride_size = PAGE_SIZE;
142 } else {
143 stride_size = huge_page_size(hstate_vma(vma));
144
145 /*
146 * As stated in the privileged specification, every PTE in a
147 * NAPOT region must be invalidated, so reset the stride in that
148 * case.
149 */
150 if (has_svnapot()) {
151 if (stride_size >= PGDIR_SIZE)
152 stride_size = PGDIR_SIZE;
153 else if (stride_size >= P4D_SIZE)
154 stride_size = P4D_SIZE;
155 else if (stride_size >= PUD_SIZE)
156 stride_size = PUD_SIZE;
157 else if (stride_size >= PMD_SIZE)
158 stride_size = PMD_SIZE;
159 else
160 stride_size = PAGE_SIZE;
161 }
162 }
163
164 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
165 start, end - start, stride_size);
166}
167
168void flush_tlb_kernel_range(unsigned long start, unsigned long end)
169{
170 __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
171 start, end - start, PAGE_SIZE);
172}
173
174#ifdef CONFIG_TRANSPARENT_HUGEPAGE
175void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
176 unsigned long end)
177{
178 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
179 start, end - start, PMD_SIZE);
180}
181#endif
182
183bool arch_tlbbatch_should_defer(struct mm_struct *mm)
184{
185 return true;
186}
187
188void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
189 struct mm_struct *mm,
190 unsigned long uaddr)
191{
192 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
193}
194
195void arch_flush_tlb_batched_pending(struct mm_struct *mm)
196{
197 flush_tlb_mm(mm);
198}
199
200void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
201{
202 __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
203 FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
204 cpumask_clear(&batch->cpumask);
205}