Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/sched/signal.h>
9
10#include <asm/tlbflush.h>
11#include <as-layout.h>
12#include <mem_user.h>
13#include <os.h>
14#include <skas.h>
15#include <kern_util.h>
16
17struct host_vm_change {
18 struct host_vm_op {
19 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
20 union {
21 struct {
22 unsigned long addr;
23 unsigned long len;
24 unsigned int prot;
25 int fd;
26 __u64 offset;
27 } mmap;
28 struct {
29 unsigned long addr;
30 unsigned long len;
31 } munmap;
32 struct {
33 unsigned long addr;
34 unsigned long len;
35 unsigned int prot;
36 } mprotect;
37 } u;
38 } ops[1];
39 int userspace;
40 int index;
41 struct mm_struct *mm;
42 void *data;
43 int force;
44};
45
46#define INIT_HVC(mm, force, userspace) \
47 ((struct host_vm_change) \
48 { .ops = { { .type = NONE } }, \
49 .mm = mm, \
50 .data = NULL, \
51 .userspace = userspace, \
52 .index = 0, \
53 .force = force })
54
55static void report_enomem(void)
56{
57 printk(KERN_ERR "UML ran out of memory on the host side! "
58 "This can happen due to a memory limitation or "
59 "vm.max_map_count has been reached.\n");
60}
61
62static int do_ops(struct host_vm_change *hvc, int end,
63 int finished)
64{
65 struct host_vm_op *op;
66 int i, ret = 0;
67
68 for (i = 0; i < end && !ret; i++) {
69 op = &hvc->ops[i];
70 switch (op->type) {
71 case MMAP:
72 if (hvc->userspace)
73 ret = map(&hvc->mm->context.id, op->u.mmap.addr,
74 op->u.mmap.len, op->u.mmap.prot,
75 op->u.mmap.fd,
76 op->u.mmap.offset, finished,
77 &hvc->data);
78 else
79 map_memory(op->u.mmap.addr, op->u.mmap.offset,
80 op->u.mmap.len, 1, 1, 1);
81 break;
82 case MUNMAP:
83 if (hvc->userspace)
84 ret = unmap(&hvc->mm->context.id,
85 op->u.munmap.addr,
86 op->u.munmap.len, finished,
87 &hvc->data);
88 else
89 ret = os_unmap_memory(
90 (void *) op->u.munmap.addr,
91 op->u.munmap.len);
92
93 break;
94 case MPROTECT:
95 if (hvc->userspace)
96 ret = protect(&hvc->mm->context.id,
97 op->u.mprotect.addr,
98 op->u.mprotect.len,
99 op->u.mprotect.prot,
100 finished, &hvc->data);
101 else
102 ret = os_protect_memory(
103 (void *) op->u.mprotect.addr,
104 op->u.mprotect.len,
105 1, 1, 1);
106 break;
107 default:
108 printk(KERN_ERR "Unknown op type %d in do_ops\n",
109 op->type);
110 BUG();
111 break;
112 }
113 }
114
115 if (ret == -ENOMEM)
116 report_enomem();
117
118 return ret;
119}
120
121static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
122 unsigned int prot, struct host_vm_change *hvc)
123{
124 __u64 offset;
125 struct host_vm_op *last;
126 int fd = -1, ret = 0;
127
128 if (hvc->userspace)
129 fd = phys_mapping(phys, &offset);
130 else
131 offset = phys;
132 if (hvc->index != 0) {
133 last = &hvc->ops[hvc->index - 1];
134 if ((last->type == MMAP) &&
135 (last->u.mmap.addr + last->u.mmap.len == virt) &&
136 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
137 (last->u.mmap.offset + last->u.mmap.len == offset)) {
138 last->u.mmap.len += len;
139 return 0;
140 }
141 }
142
143 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
144 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
145 hvc->index = 0;
146 }
147
148 hvc->ops[hvc->index++] = ((struct host_vm_op)
149 { .type = MMAP,
150 .u = { .mmap = { .addr = virt,
151 .len = len,
152 .prot = prot,
153 .fd = fd,
154 .offset = offset }
155 } });
156 return ret;
157}
158
159static int add_munmap(unsigned long addr, unsigned long len,
160 struct host_vm_change *hvc)
161{
162 struct host_vm_op *last;
163 int ret = 0;
164
165 if ((addr >= STUB_START) && (addr < STUB_END))
166 return -EINVAL;
167
168 if (hvc->index != 0) {
169 last = &hvc->ops[hvc->index - 1];
170 if ((last->type == MUNMAP) &&
171 (last->u.munmap.addr + last->u.mmap.len == addr)) {
172 last->u.munmap.len += len;
173 return 0;
174 }
175 }
176
177 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
178 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
179 hvc->index = 0;
180 }
181
182 hvc->ops[hvc->index++] = ((struct host_vm_op)
183 { .type = MUNMAP,
184 .u = { .munmap = { .addr = addr,
185 .len = len } } });
186 return ret;
187}
188
189static int add_mprotect(unsigned long addr, unsigned long len,
190 unsigned int prot, struct host_vm_change *hvc)
191{
192 struct host_vm_op *last;
193 int ret = 0;
194
195 if (hvc->index != 0) {
196 last = &hvc->ops[hvc->index - 1];
197 if ((last->type == MPROTECT) &&
198 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
199 (last->u.mprotect.prot == prot)) {
200 last->u.mprotect.len += len;
201 return 0;
202 }
203 }
204
205 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
206 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
207 hvc->index = 0;
208 }
209
210 hvc->ops[hvc->index++] = ((struct host_vm_op)
211 { .type = MPROTECT,
212 .u = { .mprotect = { .addr = addr,
213 .len = len,
214 .prot = prot } } });
215 return ret;
216}
217
218#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
219
220static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
221 unsigned long end,
222 struct host_vm_change *hvc)
223{
224 pte_t *pte;
225 int r, w, x, prot, ret = 0;
226
227 pte = pte_offset_kernel(pmd, addr);
228 do {
229 if ((addr >= STUB_START) && (addr < STUB_END))
230 continue;
231
232 r = pte_read(*pte);
233 w = pte_write(*pte);
234 x = pte_exec(*pte);
235 if (!pte_young(*pte)) {
236 r = 0;
237 w = 0;
238 } else if (!pte_dirty(*pte))
239 w = 0;
240
241 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
242 (x ? UM_PROT_EXEC : 0));
243 if (hvc->force || pte_newpage(*pte)) {
244 if (pte_present(*pte)) {
245 if (pte_newpage(*pte))
246 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
247 PAGE_SIZE, prot, hvc);
248 } else
249 ret = add_munmap(addr, PAGE_SIZE, hvc);
250 } else if (pte_newprot(*pte))
251 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
252 *pte = pte_mkuptodate(*pte);
253 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
254 return ret;
255}
256
257static inline int update_pmd_range(pud_t *pud, unsigned long addr,
258 unsigned long end,
259 struct host_vm_change *hvc)
260{
261 pmd_t *pmd;
262 unsigned long next;
263 int ret = 0;
264
265 pmd = pmd_offset(pud, addr);
266 do {
267 next = pmd_addr_end(addr, end);
268 if (!pmd_present(*pmd)) {
269 if (hvc->force || pmd_newpage(*pmd)) {
270 ret = add_munmap(addr, next - addr, hvc);
271 pmd_mkuptodate(*pmd);
272 }
273 }
274 else ret = update_pte_range(pmd, addr, next, hvc);
275 } while (pmd++, addr = next, ((addr < end) && !ret));
276 return ret;
277}
278
279static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
280 unsigned long end,
281 struct host_vm_change *hvc)
282{
283 pud_t *pud;
284 unsigned long next;
285 int ret = 0;
286
287 pud = pud_offset(p4d, addr);
288 do {
289 next = pud_addr_end(addr, end);
290 if (!pud_present(*pud)) {
291 if (hvc->force || pud_newpage(*pud)) {
292 ret = add_munmap(addr, next - addr, hvc);
293 pud_mkuptodate(*pud);
294 }
295 }
296 else ret = update_pmd_range(pud, addr, next, hvc);
297 } while (pud++, addr = next, ((addr < end) && !ret));
298 return ret;
299}
300
301static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
302 unsigned long end,
303 struct host_vm_change *hvc)
304{
305 p4d_t *p4d;
306 unsigned long next;
307 int ret = 0;
308
309 p4d = p4d_offset(pgd, addr);
310 do {
311 next = p4d_addr_end(addr, end);
312 if (!p4d_present(*p4d)) {
313 if (hvc->force || p4d_newpage(*p4d)) {
314 ret = add_munmap(addr, next - addr, hvc);
315 p4d_mkuptodate(*p4d);
316 }
317 } else
318 ret = update_pud_range(p4d, addr, next, hvc);
319 } while (p4d++, addr = next, ((addr < end) && !ret));
320 return ret;
321}
322
323void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
324 unsigned long end_addr, int force)
325{
326 pgd_t *pgd;
327 struct host_vm_change hvc;
328 unsigned long addr = start_addr, next;
329 int ret = 0, userspace = 1;
330
331 hvc = INIT_HVC(mm, force, userspace);
332 pgd = pgd_offset(mm, addr);
333 do {
334 next = pgd_addr_end(addr, end_addr);
335 if (!pgd_present(*pgd)) {
336 if (force || pgd_newpage(*pgd)) {
337 ret = add_munmap(addr, next - addr, &hvc);
338 pgd_mkuptodate(*pgd);
339 }
340 } else
341 ret = update_p4d_range(pgd, addr, next, &hvc);
342 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
343
344 if (!ret)
345 ret = do_ops(&hvc, hvc.index, 1);
346
347 /* This is not an else because ret is modified above */
348 if (ret) {
349 printk(KERN_ERR "fix_range_common: failed, killing current "
350 "process: %d\n", task_tgid_vnr(current));
351 /* We are under mmap_lock, release it such that current can terminate */
352 mmap_write_unlock(current->mm);
353 force_sig(SIGKILL);
354 do_signal(¤t->thread.regs);
355 }
356}
357
358static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
359{
360 struct mm_struct *mm;
361 pgd_t *pgd;
362 p4d_t *p4d;
363 pud_t *pud;
364 pmd_t *pmd;
365 pte_t *pte;
366 unsigned long addr, last;
367 int updated = 0, err = 0, force = 0, userspace = 0;
368 struct host_vm_change hvc;
369
370 mm = &init_mm;
371 hvc = INIT_HVC(mm, force, userspace);
372 for (addr = start; addr < end;) {
373 pgd = pgd_offset(mm, addr);
374 if (!pgd_present(*pgd)) {
375 last = ADD_ROUND(addr, PGDIR_SIZE);
376 if (last > end)
377 last = end;
378 if (pgd_newpage(*pgd)) {
379 updated = 1;
380 err = add_munmap(addr, last - addr, &hvc);
381 if (err < 0)
382 panic("munmap failed, errno = %d\n",
383 -err);
384 }
385 addr = last;
386 continue;
387 }
388
389 p4d = p4d_offset(pgd, addr);
390 if (!p4d_present(*p4d)) {
391 last = ADD_ROUND(addr, P4D_SIZE);
392 if (last > end)
393 last = end;
394 if (p4d_newpage(*p4d)) {
395 updated = 1;
396 err = add_munmap(addr, last - addr, &hvc);
397 if (err < 0)
398 panic("munmap failed, errno = %d\n",
399 -err);
400 }
401 addr = last;
402 continue;
403 }
404
405 pud = pud_offset(p4d, addr);
406 if (!pud_present(*pud)) {
407 last = ADD_ROUND(addr, PUD_SIZE);
408 if (last > end)
409 last = end;
410 if (pud_newpage(*pud)) {
411 updated = 1;
412 err = add_munmap(addr, last - addr, &hvc);
413 if (err < 0)
414 panic("munmap failed, errno = %d\n",
415 -err);
416 }
417 addr = last;
418 continue;
419 }
420
421 pmd = pmd_offset(pud, addr);
422 if (!pmd_present(*pmd)) {
423 last = ADD_ROUND(addr, PMD_SIZE);
424 if (last > end)
425 last = end;
426 if (pmd_newpage(*pmd)) {
427 updated = 1;
428 err = add_munmap(addr, last - addr, &hvc);
429 if (err < 0)
430 panic("munmap failed, errno = %d\n",
431 -err);
432 }
433 addr = last;
434 continue;
435 }
436
437 pte = pte_offset_kernel(pmd, addr);
438 if (!pte_present(*pte) || pte_newpage(*pte)) {
439 updated = 1;
440 err = add_munmap(addr, PAGE_SIZE, &hvc);
441 if (err < 0)
442 panic("munmap failed, errno = %d\n",
443 -err);
444 if (pte_present(*pte))
445 err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
446 PAGE_SIZE, 0, &hvc);
447 }
448 else if (pte_newprot(*pte)) {
449 updated = 1;
450 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
451 }
452 addr += PAGE_SIZE;
453 }
454 if (!err)
455 err = do_ops(&hvc, hvc.index, 1);
456
457 if (err < 0)
458 panic("flush_tlb_kernel failed, errno = %d\n", err);
459 return updated;
460}
461
462void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
463{
464 pgd_t *pgd;
465 p4d_t *p4d;
466 pud_t *pud;
467 pmd_t *pmd;
468 pte_t *pte;
469 struct mm_struct *mm = vma->vm_mm;
470 void *flush = NULL;
471 int r, w, x, prot, err = 0;
472 struct mm_id *mm_id;
473
474 address &= PAGE_MASK;
475 pgd = pgd_offset(mm, address);
476 if (!pgd_present(*pgd))
477 goto kill;
478
479 p4d = p4d_offset(pgd, address);
480 if (!p4d_present(*p4d))
481 goto kill;
482
483 pud = pud_offset(p4d, address);
484 if (!pud_present(*pud))
485 goto kill;
486
487 pmd = pmd_offset(pud, address);
488 if (!pmd_present(*pmd))
489 goto kill;
490
491 pte = pte_offset_kernel(pmd, address);
492
493 r = pte_read(*pte);
494 w = pte_write(*pte);
495 x = pte_exec(*pte);
496 if (!pte_young(*pte)) {
497 r = 0;
498 w = 0;
499 } else if (!pte_dirty(*pte)) {
500 w = 0;
501 }
502
503 mm_id = &mm->context.id;
504 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
505 (x ? UM_PROT_EXEC : 0));
506 if (pte_newpage(*pte)) {
507 if (pte_present(*pte)) {
508 unsigned long long offset;
509 int fd;
510
511 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
512 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
513 1, &flush);
514 }
515 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
516 }
517 else if (pte_newprot(*pte))
518 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
519
520 if (err) {
521 if (err == -ENOMEM)
522 report_enomem();
523
524 goto kill;
525 }
526
527 *pte = pte_mkuptodate(*pte);
528
529 return;
530
531kill:
532 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
533 force_sig(SIGKILL);
534}
535
536void flush_tlb_all(void)
537{
538 /*
539 * Don't bother flushing if this address space is about to be
540 * destroyed.
541 */
542 if (atomic_read(¤t->mm->mm_users) == 0)
543 return;
544
545 flush_tlb_mm(current->mm);
546}
547
548void flush_tlb_kernel_range(unsigned long start, unsigned long end)
549{
550 flush_tlb_kernel_range_common(start, end);
551}
552
553void flush_tlb_kernel_vm(void)
554{
555 flush_tlb_kernel_range_common(start_vm, end_vm);
556}
557
558void __flush_tlb_one(unsigned long addr)
559{
560 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
561}
562
563static void fix_range(struct mm_struct *mm, unsigned long start_addr,
564 unsigned long end_addr, int force)
565{
566 /*
567 * Don't bother flushing if this address space is about to be
568 * destroyed.
569 */
570 if (atomic_read(&mm->mm_users) == 0)
571 return;
572
573 fix_range_common(mm, start_addr, end_addr, force);
574}
575
576void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
577 unsigned long end)
578{
579 if (vma->vm_mm == NULL)
580 flush_tlb_kernel_range_common(start, end);
581 else fix_range(vma->vm_mm, start, end, 0);
582}
583EXPORT_SYMBOL(flush_tlb_range);
584
585void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
586 unsigned long end)
587{
588 fix_range(mm, start, end, 0);
589}
590
591void flush_tlb_mm(struct mm_struct *mm)
592{
593 struct vm_area_struct *vma = mm->mmap;
594
595 while (vma != NULL) {
596 fix_range(mm, vma->vm_start, vma->vm_end, 0);
597 vma = vma->vm_next;
598 }
599}
600
601void force_flush_all(void)
602{
603 struct mm_struct *mm = current->mm;
604 struct vm_area_struct *vma = mm->mmap;
605
606 while (vma != NULL) {
607 fix_range(mm, vma->vm_start, vma->vm_end, 1);
608 vma = vma->vm_next;
609 }
610}
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include "as-layout.h"
11#include "mem_user.h"
12#include "os.h"
13#include "skas.h"
14#include "tlb.h"
15
16struct host_vm_change {
17 struct host_vm_op {
18 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
19 union {
20 struct {
21 unsigned long addr;
22 unsigned long len;
23 unsigned int prot;
24 int fd;
25 __u64 offset;
26 } mmap;
27 struct {
28 unsigned long addr;
29 unsigned long len;
30 } munmap;
31 struct {
32 unsigned long addr;
33 unsigned long len;
34 unsigned int prot;
35 } mprotect;
36 } u;
37 } ops[1];
38 int index;
39 struct mm_id *id;
40 void *data;
41 int force;
42};
43
44#define INIT_HVC(mm, force) \
45 ((struct host_vm_change) \
46 { .ops = { { .type = NONE } }, \
47 .id = &mm->context.id, \
48 .data = NULL, \
49 .index = 0, \
50 .force = force })
51
52static int do_ops(struct host_vm_change *hvc, int end,
53 int finished)
54{
55 struct host_vm_op *op;
56 int i, ret = 0;
57
58 for (i = 0; i < end && !ret; i++) {
59 op = &hvc->ops[i];
60 switch (op->type) {
61 case MMAP:
62 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
63 op->u.mmap.prot, op->u.mmap.fd,
64 op->u.mmap.offset, finished, &hvc->data);
65 break;
66 case MUNMAP:
67 ret = unmap(hvc->id, op->u.munmap.addr,
68 op->u.munmap.len, finished, &hvc->data);
69 break;
70 case MPROTECT:
71 ret = protect(hvc->id, op->u.mprotect.addr,
72 op->u.mprotect.len, op->u.mprotect.prot,
73 finished, &hvc->data);
74 break;
75 default:
76 printk(KERN_ERR "Unknown op type %d in do_ops\n",
77 op->type);
78 break;
79 }
80 }
81
82 return ret;
83}
84
85static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
86 unsigned int prot, struct host_vm_change *hvc)
87{
88 __u64 offset;
89 struct host_vm_op *last;
90 int fd, ret = 0;
91
92 fd = phys_mapping(phys, &offset);
93 if (hvc->index != 0) {
94 last = &hvc->ops[hvc->index - 1];
95 if ((last->type == MMAP) &&
96 (last->u.mmap.addr + last->u.mmap.len == virt) &&
97 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
98 (last->u.mmap.offset + last->u.mmap.len == offset)) {
99 last->u.mmap.len += len;
100 return 0;
101 }
102 }
103
104 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
105 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
106 hvc->index = 0;
107 }
108
109 hvc->ops[hvc->index++] = ((struct host_vm_op)
110 { .type = MMAP,
111 .u = { .mmap = { .addr = virt,
112 .len = len,
113 .prot = prot,
114 .fd = fd,
115 .offset = offset }
116 } });
117 return ret;
118}
119
120static int add_munmap(unsigned long addr, unsigned long len,
121 struct host_vm_change *hvc)
122{
123 struct host_vm_op *last;
124 int ret = 0;
125
126 if (hvc->index != 0) {
127 last = &hvc->ops[hvc->index - 1];
128 if ((last->type == MUNMAP) &&
129 (last->u.munmap.addr + last->u.mmap.len == addr)) {
130 last->u.munmap.len += len;
131 return 0;
132 }
133 }
134
135 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
136 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
137 hvc->index = 0;
138 }
139
140 hvc->ops[hvc->index++] = ((struct host_vm_op)
141 { .type = MUNMAP,
142 .u = { .munmap = { .addr = addr,
143 .len = len } } });
144 return ret;
145}
146
147static int add_mprotect(unsigned long addr, unsigned long len,
148 unsigned int prot, struct host_vm_change *hvc)
149{
150 struct host_vm_op *last;
151 int ret = 0;
152
153 if (hvc->index != 0) {
154 last = &hvc->ops[hvc->index - 1];
155 if ((last->type == MPROTECT) &&
156 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
157 (last->u.mprotect.prot == prot)) {
158 last->u.mprotect.len += len;
159 return 0;
160 }
161 }
162
163 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
164 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
165 hvc->index = 0;
166 }
167
168 hvc->ops[hvc->index++] = ((struct host_vm_op)
169 { .type = MPROTECT,
170 .u = { .mprotect = { .addr = addr,
171 .len = len,
172 .prot = prot } } });
173 return ret;
174}
175
176#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
177
178static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
179 unsigned long end,
180 struct host_vm_change *hvc)
181{
182 pte_t *pte;
183 int r, w, x, prot, ret = 0;
184
185 pte = pte_offset_kernel(pmd, addr);
186 do {
187 if ((addr >= STUB_START) && (addr < STUB_END))
188 continue;
189
190 r = pte_read(*pte);
191 w = pte_write(*pte);
192 x = pte_exec(*pte);
193 if (!pte_young(*pte)) {
194 r = 0;
195 w = 0;
196 } else if (!pte_dirty(*pte))
197 w = 0;
198
199 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
200 (x ? UM_PROT_EXEC : 0));
201 if (hvc->force || pte_newpage(*pte)) {
202 if (pte_present(*pte))
203 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
204 PAGE_SIZE, prot, hvc);
205 else
206 ret = add_munmap(addr, PAGE_SIZE, hvc);
207 } else if (pte_newprot(*pte))
208 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
209 *pte = pte_mkuptodate(*pte);
210 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
211 return ret;
212}
213
214static inline int update_pmd_range(pud_t *pud, unsigned long addr,
215 unsigned long end,
216 struct host_vm_change *hvc)
217{
218 pmd_t *pmd;
219 unsigned long next;
220 int ret = 0;
221
222 pmd = pmd_offset(pud, addr);
223 do {
224 next = pmd_addr_end(addr, end);
225 if (!pmd_present(*pmd)) {
226 if (hvc->force || pmd_newpage(*pmd)) {
227 ret = add_munmap(addr, next - addr, hvc);
228 pmd_mkuptodate(*pmd);
229 }
230 }
231 else ret = update_pte_range(pmd, addr, next, hvc);
232 } while (pmd++, addr = next, ((addr < end) && !ret));
233 return ret;
234}
235
236static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
237 unsigned long end,
238 struct host_vm_change *hvc)
239{
240 pud_t *pud;
241 unsigned long next;
242 int ret = 0;
243
244 pud = pud_offset(pgd, addr);
245 do {
246 next = pud_addr_end(addr, end);
247 if (!pud_present(*pud)) {
248 if (hvc->force || pud_newpage(*pud)) {
249 ret = add_munmap(addr, next - addr, hvc);
250 pud_mkuptodate(*pud);
251 }
252 }
253 else ret = update_pmd_range(pud, addr, next, hvc);
254 } while (pud++, addr = next, ((addr < end) && !ret));
255 return ret;
256}
257
258void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
259 unsigned long end_addr, int force)
260{
261 pgd_t *pgd;
262 struct host_vm_change hvc;
263 unsigned long addr = start_addr, next;
264 int ret = 0;
265
266 hvc = INIT_HVC(mm, force);
267 pgd = pgd_offset(mm, addr);
268 do {
269 next = pgd_addr_end(addr, end_addr);
270 if (!pgd_present(*pgd)) {
271 if (force || pgd_newpage(*pgd)) {
272 ret = add_munmap(addr, next - addr, &hvc);
273 pgd_mkuptodate(*pgd);
274 }
275 }
276 else ret = update_pud_range(pgd, addr, next, &hvc);
277 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
278
279 if (!ret)
280 ret = do_ops(&hvc, hvc.index, 1);
281
282 /* This is not an else because ret is modified above */
283 if (ret) {
284 printk(KERN_ERR "fix_range_common: failed, killing current "
285 "process\n");
286 force_sig(SIGKILL, current);
287 }
288}
289
290int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
291{
292 struct mm_struct *mm;
293 pgd_t *pgd;
294 pud_t *pud;
295 pmd_t *pmd;
296 pte_t *pte;
297 unsigned long addr, last;
298 int updated = 0, err;
299
300 mm = &init_mm;
301 for (addr = start; addr < end;) {
302 pgd = pgd_offset(mm, addr);
303 if (!pgd_present(*pgd)) {
304 last = ADD_ROUND(addr, PGDIR_SIZE);
305 if (last > end)
306 last = end;
307 if (pgd_newpage(*pgd)) {
308 updated = 1;
309 err = os_unmap_memory((void *) addr,
310 last - addr);
311 if (err < 0)
312 panic("munmap failed, errno = %d\n",
313 -err);
314 }
315 addr = last;
316 continue;
317 }
318
319 pud = pud_offset(pgd, addr);
320 if (!pud_present(*pud)) {
321 last = ADD_ROUND(addr, PUD_SIZE);
322 if (last > end)
323 last = end;
324 if (pud_newpage(*pud)) {
325 updated = 1;
326 err = os_unmap_memory((void *) addr,
327 last - addr);
328 if (err < 0)
329 panic("munmap failed, errno = %d\n",
330 -err);
331 }
332 addr = last;
333 continue;
334 }
335
336 pmd = pmd_offset(pud, addr);
337 if (!pmd_present(*pmd)) {
338 last = ADD_ROUND(addr, PMD_SIZE);
339 if (last > end)
340 last = end;
341 if (pmd_newpage(*pmd)) {
342 updated = 1;
343 err = os_unmap_memory((void *) addr,
344 last - addr);
345 if (err < 0)
346 panic("munmap failed, errno = %d\n",
347 -err);
348 }
349 addr = last;
350 continue;
351 }
352
353 pte = pte_offset_kernel(pmd, addr);
354 if (!pte_present(*pte) || pte_newpage(*pte)) {
355 updated = 1;
356 err = os_unmap_memory((void *) addr,
357 PAGE_SIZE);
358 if (err < 0)
359 panic("munmap failed, errno = %d\n",
360 -err);
361 if (pte_present(*pte))
362 map_memory(addr,
363 pte_val(*pte) & PAGE_MASK,
364 PAGE_SIZE, 1, 1, 1);
365 }
366 else if (pte_newprot(*pte)) {
367 updated = 1;
368 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
369 }
370 addr += PAGE_SIZE;
371 }
372 return updated;
373}
374
375void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
376{
377 pgd_t *pgd;
378 pud_t *pud;
379 pmd_t *pmd;
380 pte_t *pte;
381 struct mm_struct *mm = vma->vm_mm;
382 void *flush = NULL;
383 int r, w, x, prot, err = 0;
384 struct mm_id *mm_id;
385
386 address &= PAGE_MASK;
387 pgd = pgd_offset(mm, address);
388 if (!pgd_present(*pgd))
389 goto kill;
390
391 pud = pud_offset(pgd, address);
392 if (!pud_present(*pud))
393 goto kill;
394
395 pmd = pmd_offset(pud, address);
396 if (!pmd_present(*pmd))
397 goto kill;
398
399 pte = pte_offset_kernel(pmd, address);
400
401 r = pte_read(*pte);
402 w = pte_write(*pte);
403 x = pte_exec(*pte);
404 if (!pte_young(*pte)) {
405 r = 0;
406 w = 0;
407 } else if (!pte_dirty(*pte)) {
408 w = 0;
409 }
410
411 mm_id = &mm->context.id;
412 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
413 (x ? UM_PROT_EXEC : 0));
414 if (pte_newpage(*pte)) {
415 if (pte_present(*pte)) {
416 unsigned long long offset;
417 int fd;
418
419 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
420 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
421 1, &flush);
422 }
423 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
424 }
425 else if (pte_newprot(*pte))
426 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
427
428 if (err)
429 goto kill;
430
431 *pte = pte_mkuptodate(*pte);
432
433 return;
434
435kill:
436 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
437 force_sig(SIGKILL, current);
438}
439
440pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
441{
442 return pgd_offset(mm, address);
443}
444
445pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
446{
447 return pud_offset(pgd, address);
448}
449
450pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
451{
452 return pmd_offset(pud, address);
453}
454
455pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
456{
457 return pte_offset_kernel(pmd, address);
458}
459
460pte_t *addr_pte(struct task_struct *task, unsigned long addr)
461{
462 pgd_t *pgd = pgd_offset(task->mm, addr);
463 pud_t *pud = pud_offset(pgd, addr);
464 pmd_t *pmd = pmd_offset(pud, addr);
465
466 return pte_offset_map(pmd, addr);
467}
468
469void flush_tlb_all(void)
470{
471 flush_tlb_mm(current->mm);
472}
473
474void flush_tlb_kernel_range(unsigned long start, unsigned long end)
475{
476 flush_tlb_kernel_range_common(start, end);
477}
478
479void flush_tlb_kernel_vm(void)
480{
481 flush_tlb_kernel_range_common(start_vm, end_vm);
482}
483
484void __flush_tlb_one(unsigned long addr)
485{
486 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
487}
488
489static void fix_range(struct mm_struct *mm, unsigned long start_addr,
490 unsigned long end_addr, int force)
491{
492 fix_range_common(mm, start_addr, end_addr, force);
493}
494
495void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
496 unsigned long end)
497{
498 if (vma->vm_mm == NULL)
499 flush_tlb_kernel_range_common(start, end);
500 else fix_range(vma->vm_mm, start, end, 0);
501}
502
503void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
504 unsigned long end)
505{
506 /*
507 * Don't bother flushing if this address space is about to be
508 * destroyed.
509 */
510 if (atomic_read(&mm->mm_users) == 0)
511 return;
512
513 fix_range(mm, start, end, 0);
514}
515
516void flush_tlb_mm(struct mm_struct *mm)
517{
518 struct vm_area_struct *vma = mm->mmap;
519
520 while (vma != NULL) {
521 fix_range(mm, vma->vm_start, vma->vm_end, 0);
522 vma = vma->vm_next;
523 }
524}
525
526void force_flush_all(void)
527{
528 struct mm_struct *mm = current->mm;
529 struct vm_area_struct *vma = mm->mmap;
530
531 while (vma != NULL) {
532 fix_range(mm, vma->vm_start, vma->vm_end, 1);
533 vma = vma->vm_next;
534 }
535}