Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/sched/signal.h>
9
10#include <asm/tlbflush.h>
11#include <asm/mmu_context.h>
12#include <as-layout.h>
13#include <mem_user.h>
14#include <os.h>
15#include <skas.h>
16#include <kern_util.h>
17
18struct vm_ops {
19 struct mm_id *mm_idp;
20
21 int (*mmap)(struct mm_id *mm_idp,
22 unsigned long virt, unsigned long len, int prot,
23 int phys_fd, unsigned long long offset);
24 int (*unmap)(struct mm_id *mm_idp,
25 unsigned long virt, unsigned long len);
26};
27
28static int kern_map(struct mm_id *mm_idp,
29 unsigned long virt, unsigned long len, int prot,
30 int phys_fd, unsigned long long offset)
31{
32 /* TODO: Why is executable needed to be always set in the kernel? */
33 return os_map_memory((void *)virt, phys_fd, offset, len,
34 prot & UM_PROT_READ, prot & UM_PROT_WRITE,
35 1);
36}
37
38static int kern_unmap(struct mm_id *mm_idp,
39 unsigned long virt, unsigned long len)
40{
41 return os_unmap_memory((void *)virt, len);
42}
43
44void report_enomem(void)
45{
46 printk(KERN_ERR "UML ran out of memory on the host side! "
47 "This can happen due to a memory limitation or "
48 "vm.max_map_count has been reached.\n");
49}
50
51static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
52 unsigned long end,
53 struct vm_ops *ops)
54{
55 pte_t *pte;
56 int ret = 0;
57
58 pte = pte_offset_kernel(pmd, addr);
59 do {
60 if (!pte_needsync(*pte))
61 continue;
62
63 if (pte_present(*pte)) {
64 __u64 offset;
65 unsigned long phys = pte_val(*pte) & PAGE_MASK;
66 int fd = phys_mapping(phys, &offset);
67 int r, w, x, prot;
68
69 r = pte_read(*pte);
70 w = pte_write(*pte);
71 x = pte_exec(*pte);
72 if (!pte_young(*pte)) {
73 r = 0;
74 w = 0;
75 } else if (!pte_dirty(*pte))
76 w = 0;
77
78 prot = (r ? UM_PROT_READ : 0) |
79 (w ? UM_PROT_WRITE : 0) |
80 (x ? UM_PROT_EXEC : 0);
81
82 ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
83 prot, fd, offset);
84 } else
85 ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
86
87 *pte = pte_mkuptodate(*pte);
88 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
89 return ret;
90}
91
92static inline int update_pmd_range(pud_t *pud, unsigned long addr,
93 unsigned long end,
94 struct vm_ops *ops)
95{
96 pmd_t *pmd;
97 unsigned long next;
98 int ret = 0;
99
100 pmd = pmd_offset(pud, addr);
101 do {
102 next = pmd_addr_end(addr, end);
103 if (!pmd_present(*pmd)) {
104 if (pmd_needsync(*pmd)) {
105 ret = ops->unmap(ops->mm_idp, addr,
106 next - addr);
107 pmd_mkuptodate(*pmd);
108 }
109 }
110 else ret = update_pte_range(pmd, addr, next, ops);
111 } while (pmd++, addr = next, ((addr < end) && !ret));
112 return ret;
113}
114
115static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
116 unsigned long end,
117 struct vm_ops *ops)
118{
119 pud_t *pud;
120 unsigned long next;
121 int ret = 0;
122
123 pud = pud_offset(p4d, addr);
124 do {
125 next = pud_addr_end(addr, end);
126 if (!pud_present(*pud)) {
127 if (pud_needsync(*pud)) {
128 ret = ops->unmap(ops->mm_idp, addr,
129 next - addr);
130 pud_mkuptodate(*pud);
131 }
132 }
133 else ret = update_pmd_range(pud, addr, next, ops);
134 } while (pud++, addr = next, ((addr < end) && !ret));
135 return ret;
136}
137
138static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
139 unsigned long end,
140 struct vm_ops *ops)
141{
142 p4d_t *p4d;
143 unsigned long next;
144 int ret = 0;
145
146 p4d = p4d_offset(pgd, addr);
147 do {
148 next = p4d_addr_end(addr, end);
149 if (!p4d_present(*p4d)) {
150 if (p4d_needsync(*p4d)) {
151 ret = ops->unmap(ops->mm_idp, addr,
152 next - addr);
153 p4d_mkuptodate(*p4d);
154 }
155 } else
156 ret = update_pud_range(p4d, addr, next, ops);
157 } while (p4d++, addr = next, ((addr < end) && !ret));
158 return ret;
159}
160
161int um_tlb_sync(struct mm_struct *mm)
162{
163 pgd_t *pgd;
164 struct vm_ops ops;
165 unsigned long addr = mm->context.sync_tlb_range_from, next;
166 int ret = 0;
167
168 if (mm->context.sync_tlb_range_to == 0)
169 return 0;
170
171 ops.mm_idp = &mm->context.id;
172 if (mm == &init_mm) {
173 ops.mmap = kern_map;
174 ops.unmap = kern_unmap;
175 } else {
176 ops.mmap = map;
177 ops.unmap = unmap;
178 }
179
180 pgd = pgd_offset(mm, addr);
181 do {
182 next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
183 if (!pgd_present(*pgd)) {
184 if (pgd_needsync(*pgd)) {
185 ret = ops.unmap(ops.mm_idp, addr,
186 next - addr);
187 pgd_mkuptodate(*pgd);
188 }
189 } else
190 ret = update_p4d_range(pgd, addr, next, &ops);
191 } while (pgd++, addr = next,
192 ((addr < mm->context.sync_tlb_range_to) && !ret));
193
194 if (ret == -ENOMEM)
195 report_enomem();
196
197 mm->context.sync_tlb_range_from = 0;
198 mm->context.sync_tlb_range_to = 0;
199
200 return ret;
201}
202
203void flush_tlb_all(void)
204{
205 /*
206 * Don't bother flushing if this address space is about to be
207 * destroyed.
208 */
209 if (atomic_read(¤t->mm->mm_users) == 0)
210 return;
211
212 flush_tlb_mm(current->mm);
213}
214
215void flush_tlb_mm(struct mm_struct *mm)
216{
217 struct vm_area_struct *vma;
218 VMA_ITERATOR(vmi, mm, 0);
219
220 for_each_vma(vmi, vma)
221 um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
222}
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include "as-layout.h"
11#include "mem_user.h"
12#include "os.h"
13#include "skas.h"
14#include "tlb.h"
15
16struct host_vm_change {
17 struct host_vm_op {
18 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
19 union {
20 struct {
21 unsigned long addr;
22 unsigned long len;
23 unsigned int prot;
24 int fd;
25 __u64 offset;
26 } mmap;
27 struct {
28 unsigned long addr;
29 unsigned long len;
30 } munmap;
31 struct {
32 unsigned long addr;
33 unsigned long len;
34 unsigned int prot;
35 } mprotect;
36 } u;
37 } ops[1];
38 int index;
39 struct mm_id *id;
40 void *data;
41 int force;
42};
43
44#define INIT_HVC(mm, force) \
45 ((struct host_vm_change) \
46 { .ops = { { .type = NONE } }, \
47 .id = &mm->context.id, \
48 .data = NULL, \
49 .index = 0, \
50 .force = force })
51
52static int do_ops(struct host_vm_change *hvc, int end,
53 int finished)
54{
55 struct host_vm_op *op;
56 int i, ret = 0;
57
58 for (i = 0; i < end && !ret; i++) {
59 op = &hvc->ops[i];
60 switch (op->type) {
61 case MMAP:
62 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
63 op->u.mmap.prot, op->u.mmap.fd,
64 op->u.mmap.offset, finished, &hvc->data);
65 break;
66 case MUNMAP:
67 ret = unmap(hvc->id, op->u.munmap.addr,
68 op->u.munmap.len, finished, &hvc->data);
69 break;
70 case MPROTECT:
71 ret = protect(hvc->id, op->u.mprotect.addr,
72 op->u.mprotect.len, op->u.mprotect.prot,
73 finished, &hvc->data);
74 break;
75 default:
76 printk(KERN_ERR "Unknown op type %d in do_ops\n",
77 op->type);
78 break;
79 }
80 }
81
82 return ret;
83}
84
85static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
86 unsigned int prot, struct host_vm_change *hvc)
87{
88 __u64 offset;
89 struct host_vm_op *last;
90 int fd, ret = 0;
91
92 fd = phys_mapping(phys, &offset);
93 if (hvc->index != 0) {
94 last = &hvc->ops[hvc->index - 1];
95 if ((last->type == MMAP) &&
96 (last->u.mmap.addr + last->u.mmap.len == virt) &&
97 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
98 (last->u.mmap.offset + last->u.mmap.len == offset)) {
99 last->u.mmap.len += len;
100 return 0;
101 }
102 }
103
104 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
105 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
106 hvc->index = 0;
107 }
108
109 hvc->ops[hvc->index++] = ((struct host_vm_op)
110 { .type = MMAP,
111 .u = { .mmap = { .addr = virt,
112 .len = len,
113 .prot = prot,
114 .fd = fd,
115 .offset = offset }
116 } });
117 return ret;
118}
119
120static int add_munmap(unsigned long addr, unsigned long len,
121 struct host_vm_change *hvc)
122{
123 struct host_vm_op *last;
124 int ret = 0;
125
126 if (hvc->index != 0) {
127 last = &hvc->ops[hvc->index - 1];
128 if ((last->type == MUNMAP) &&
129 (last->u.munmap.addr + last->u.mmap.len == addr)) {
130 last->u.munmap.len += len;
131 return 0;
132 }
133 }
134
135 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
136 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
137 hvc->index = 0;
138 }
139
140 hvc->ops[hvc->index++] = ((struct host_vm_op)
141 { .type = MUNMAP,
142 .u = { .munmap = { .addr = addr,
143 .len = len } } });
144 return ret;
145}
146
147static int add_mprotect(unsigned long addr, unsigned long len,
148 unsigned int prot, struct host_vm_change *hvc)
149{
150 struct host_vm_op *last;
151 int ret = 0;
152
153 if (hvc->index != 0) {
154 last = &hvc->ops[hvc->index - 1];
155 if ((last->type == MPROTECT) &&
156 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
157 (last->u.mprotect.prot == prot)) {
158 last->u.mprotect.len += len;
159 return 0;
160 }
161 }
162
163 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
164 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
165 hvc->index = 0;
166 }
167
168 hvc->ops[hvc->index++] = ((struct host_vm_op)
169 { .type = MPROTECT,
170 .u = { .mprotect = { .addr = addr,
171 .len = len,
172 .prot = prot } } });
173 return ret;
174}
175
176#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
177
178static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
179 unsigned long end,
180 struct host_vm_change *hvc)
181{
182 pte_t *pte;
183 int r, w, x, prot, ret = 0;
184
185 pte = pte_offset_kernel(pmd, addr);
186 do {
187 if ((addr >= STUB_START) && (addr < STUB_END))
188 continue;
189
190 r = pte_read(*pte);
191 w = pte_write(*pte);
192 x = pte_exec(*pte);
193 if (!pte_young(*pte)) {
194 r = 0;
195 w = 0;
196 } else if (!pte_dirty(*pte))
197 w = 0;
198
199 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
200 (x ? UM_PROT_EXEC : 0));
201 if (hvc->force || pte_newpage(*pte)) {
202 if (pte_present(*pte))
203 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
204 PAGE_SIZE, prot, hvc);
205 else
206 ret = add_munmap(addr, PAGE_SIZE, hvc);
207 } else if (pte_newprot(*pte))
208 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
209 *pte = pte_mkuptodate(*pte);
210 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
211 return ret;
212}
213
214static inline int update_pmd_range(pud_t *pud, unsigned long addr,
215 unsigned long end,
216 struct host_vm_change *hvc)
217{
218 pmd_t *pmd;
219 unsigned long next;
220 int ret = 0;
221
222 pmd = pmd_offset(pud, addr);
223 do {
224 next = pmd_addr_end(addr, end);
225 if (!pmd_present(*pmd)) {
226 if (hvc->force || pmd_newpage(*pmd)) {
227 ret = add_munmap(addr, next - addr, hvc);
228 pmd_mkuptodate(*pmd);
229 }
230 }
231 else ret = update_pte_range(pmd, addr, next, hvc);
232 } while (pmd++, addr = next, ((addr < end) && !ret));
233 return ret;
234}
235
236static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
237 unsigned long end,
238 struct host_vm_change *hvc)
239{
240 pud_t *pud;
241 unsigned long next;
242 int ret = 0;
243
244 pud = pud_offset(pgd, addr);
245 do {
246 next = pud_addr_end(addr, end);
247 if (!pud_present(*pud)) {
248 if (hvc->force || pud_newpage(*pud)) {
249 ret = add_munmap(addr, next - addr, hvc);
250 pud_mkuptodate(*pud);
251 }
252 }
253 else ret = update_pmd_range(pud, addr, next, hvc);
254 } while (pud++, addr = next, ((addr < end) && !ret));
255 return ret;
256}
257
258void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
259 unsigned long end_addr, int force)
260{
261 pgd_t *pgd;
262 struct host_vm_change hvc;
263 unsigned long addr = start_addr, next;
264 int ret = 0;
265
266 hvc = INIT_HVC(mm, force);
267 pgd = pgd_offset(mm, addr);
268 do {
269 next = pgd_addr_end(addr, end_addr);
270 if (!pgd_present(*pgd)) {
271 if (force || pgd_newpage(*pgd)) {
272 ret = add_munmap(addr, next - addr, &hvc);
273 pgd_mkuptodate(*pgd);
274 }
275 }
276 else ret = update_pud_range(pgd, addr, next, &hvc);
277 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
278
279 if (!ret)
280 ret = do_ops(&hvc, hvc.index, 1);
281
282 /* This is not an else because ret is modified above */
283 if (ret) {
284 printk(KERN_ERR "fix_range_common: failed, killing current "
285 "process\n");
286 force_sig(SIGKILL, current);
287 }
288}
289
290int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
291{
292 struct mm_struct *mm;
293 pgd_t *pgd;
294 pud_t *pud;
295 pmd_t *pmd;
296 pte_t *pte;
297 unsigned long addr, last;
298 int updated = 0, err;
299
300 mm = &init_mm;
301 for (addr = start; addr < end;) {
302 pgd = pgd_offset(mm, addr);
303 if (!pgd_present(*pgd)) {
304 last = ADD_ROUND(addr, PGDIR_SIZE);
305 if (last > end)
306 last = end;
307 if (pgd_newpage(*pgd)) {
308 updated = 1;
309 err = os_unmap_memory((void *) addr,
310 last - addr);
311 if (err < 0)
312 panic("munmap failed, errno = %d\n",
313 -err);
314 }
315 addr = last;
316 continue;
317 }
318
319 pud = pud_offset(pgd, addr);
320 if (!pud_present(*pud)) {
321 last = ADD_ROUND(addr, PUD_SIZE);
322 if (last > end)
323 last = end;
324 if (pud_newpage(*pud)) {
325 updated = 1;
326 err = os_unmap_memory((void *) addr,
327 last - addr);
328 if (err < 0)
329 panic("munmap failed, errno = %d\n",
330 -err);
331 }
332 addr = last;
333 continue;
334 }
335
336 pmd = pmd_offset(pud, addr);
337 if (!pmd_present(*pmd)) {
338 last = ADD_ROUND(addr, PMD_SIZE);
339 if (last > end)
340 last = end;
341 if (pmd_newpage(*pmd)) {
342 updated = 1;
343 err = os_unmap_memory((void *) addr,
344 last - addr);
345 if (err < 0)
346 panic("munmap failed, errno = %d\n",
347 -err);
348 }
349 addr = last;
350 continue;
351 }
352
353 pte = pte_offset_kernel(pmd, addr);
354 if (!pte_present(*pte) || pte_newpage(*pte)) {
355 updated = 1;
356 err = os_unmap_memory((void *) addr,
357 PAGE_SIZE);
358 if (err < 0)
359 panic("munmap failed, errno = %d\n",
360 -err);
361 if (pte_present(*pte))
362 map_memory(addr,
363 pte_val(*pte) & PAGE_MASK,
364 PAGE_SIZE, 1, 1, 1);
365 }
366 else if (pte_newprot(*pte)) {
367 updated = 1;
368 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
369 }
370 addr += PAGE_SIZE;
371 }
372 return updated;
373}
374
375void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
376{
377 pgd_t *pgd;
378 pud_t *pud;
379 pmd_t *pmd;
380 pte_t *pte;
381 struct mm_struct *mm = vma->vm_mm;
382 void *flush = NULL;
383 int r, w, x, prot, err = 0;
384 struct mm_id *mm_id;
385
386 address &= PAGE_MASK;
387 pgd = pgd_offset(mm, address);
388 if (!pgd_present(*pgd))
389 goto kill;
390
391 pud = pud_offset(pgd, address);
392 if (!pud_present(*pud))
393 goto kill;
394
395 pmd = pmd_offset(pud, address);
396 if (!pmd_present(*pmd))
397 goto kill;
398
399 pte = pte_offset_kernel(pmd, address);
400
401 r = pte_read(*pte);
402 w = pte_write(*pte);
403 x = pte_exec(*pte);
404 if (!pte_young(*pte)) {
405 r = 0;
406 w = 0;
407 } else if (!pte_dirty(*pte)) {
408 w = 0;
409 }
410
411 mm_id = &mm->context.id;
412 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
413 (x ? UM_PROT_EXEC : 0));
414 if (pte_newpage(*pte)) {
415 if (pte_present(*pte)) {
416 unsigned long long offset;
417 int fd;
418
419 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
420 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
421 1, &flush);
422 }
423 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
424 }
425 else if (pte_newprot(*pte))
426 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
427
428 if (err)
429 goto kill;
430
431 *pte = pte_mkuptodate(*pte);
432
433 return;
434
435kill:
436 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
437 force_sig(SIGKILL, current);
438}
439
440pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
441{
442 return pgd_offset(mm, address);
443}
444
445pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
446{
447 return pud_offset(pgd, address);
448}
449
450pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
451{
452 return pmd_offset(pud, address);
453}
454
455pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
456{
457 return pte_offset_kernel(pmd, address);
458}
459
460pte_t *addr_pte(struct task_struct *task, unsigned long addr)
461{
462 pgd_t *pgd = pgd_offset(task->mm, addr);
463 pud_t *pud = pud_offset(pgd, addr);
464 pmd_t *pmd = pmd_offset(pud, addr);
465
466 return pte_offset_map(pmd, addr);
467}
468
469void flush_tlb_all(void)
470{
471 flush_tlb_mm(current->mm);
472}
473
474void flush_tlb_kernel_range(unsigned long start, unsigned long end)
475{
476 flush_tlb_kernel_range_common(start, end);
477}
478
479void flush_tlb_kernel_vm(void)
480{
481 flush_tlb_kernel_range_common(start_vm, end_vm);
482}
483
484void __flush_tlb_one(unsigned long addr)
485{
486 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
487}
488
489static void fix_range(struct mm_struct *mm, unsigned long start_addr,
490 unsigned long end_addr, int force)
491{
492 fix_range_common(mm, start_addr, end_addr, force);
493}
494
495void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
496 unsigned long end)
497{
498 if (vma->vm_mm == NULL)
499 flush_tlb_kernel_range_common(start, end);
500 else fix_range(vma->vm_mm, start, end, 0);
501}
502
503void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
504 unsigned long end)
505{
506 /*
507 * Don't bother flushing if this address space is about to be
508 * destroyed.
509 */
510 if (atomic_read(&mm->mm_users) == 0)
511 return;
512
513 fix_range(mm, start, end, 0);
514}
515
516void flush_tlb_mm(struct mm_struct *mm)
517{
518 struct vm_area_struct *vma = mm->mmap;
519
520 while (vma != NULL) {
521 fix_range(mm, vma->vm_start, vma->vm_end, 0);
522 vma = vma->vm_next;
523 }
524}
525
526void force_flush_all(void)
527{
528 struct mm_struct *mm = current->mm;
529 struct vm_area_struct *vma = mm->mmap;
530
531 while (vma != NULL) {
532 fix_range(mm, vma->vm_start, vma->vm_end, 1);
533 vma = vma->vm_next;
534 }
535}