Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#ifndef __UM_TLBFLUSH_H
7#define __UM_TLBFLUSH_H
8
9#include <linux/mm.h>
10
11/*
12 * TLB flushing:
13 *
14 * - flush_tlb() flushes the current mm struct TLBs
15 * - flush_tlb_all() flushes all processes TLBs
16 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
17 * - flush_tlb_page(vma, vmaddr) flushes one page
18 * - flush_tlb_kernel_vm() flushes the kernel vm area
19 * - flush_tlb_range(vma, start, end) flushes a range of pages
20 */
21
22extern void flush_tlb_all(void);
23extern void flush_tlb_mm(struct mm_struct *mm);
24extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
25 unsigned long end);
26extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
27extern void flush_tlb_kernel_vm(void);
28extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
29extern void __flush_tlb_one(unsigned long addr);
30
31#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#ifndef __UM_TLBFLUSH_H
7#define __UM_TLBFLUSH_H
8
9#include <linux/mm.h>
10
11/*
12 * In UML, we need to sync the TLB over by using mmap/munmap syscalls from
13 * the process handling the MM (which can be the kernel itself).
14 *
15 * To track updates, we can hook into set_ptes and flush_tlb_*. With set_ptes
16 * we catch all PTE transitions where memory that was unusable becomes usable.
17 * While with flush_tlb_* we can track any memory that becomes unusable and
18 * even if a higher layer of the page table was modified.
19 *
20 * So, we simply track updates using both methods and mark the memory area to
21 * be synced later on. The only special case is that flush_tlb_kern_* needs to
22 * be executed immediately as there is no good synchronization point in that
23 * case. In contrast, in the set_ptes case we can wait for the next kernel
24 * segfault before we do the synchornization.
25 *
26 * - flush_tlb_all() flushes all processes TLBs
27 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
28 * - flush_tlb_page(vma, vmaddr) flushes one page
29 * - flush_tlb_range(vma, start, end) flushes a range of pages
30 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
31 */
32
33extern int um_tlb_sync(struct mm_struct *mm);
34
35extern void flush_tlb_all(void);
36extern void flush_tlb_mm(struct mm_struct *mm);
37
38static inline void flush_tlb_page(struct vm_area_struct *vma,
39 unsigned long address)
40{
41 um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE);
42}
43
44static inline void flush_tlb_range(struct vm_area_struct *vma,
45 unsigned long start, unsigned long end)
46{
47 um_tlb_mark_sync(vma->vm_mm, start, end);
48}
49
50static inline void flush_tlb_kernel_range(unsigned long start,
51 unsigned long end)
52{
53 um_tlb_mark_sync(&init_mm, start, end);
54
55 /* Kernel needs to be synced immediately */
56 um_tlb_sync(&init_mm);
57}
58
59#endif