Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2014 Davidlohr Bueso.
4 */
5#include <linux/sched/signal.h>
6#include <linux/sched/task.h>
7#include <linux/mm.h>
8#include <linux/vmacache.h>
9
10/*
11 * Hash based on the pmd of addr if configured with MMU, which provides a good
12 * hit rate for workloads with spatial locality. Otherwise, use pages.
13 */
14#ifdef CONFIG_MMU
15#define VMACACHE_SHIFT PMD_SHIFT
16#else
17#define VMACACHE_SHIFT PAGE_SHIFT
18#endif
19#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
20
21/*
22 * This task may be accessing a foreign mm via (for example)
23 * get_user_pages()->find_vma(). The vmacache is task-local and this
24 * task's vmacache pertains to a different mm (ie, its own). There is
25 * nothing we can do here.
26 *
27 * Also handle the case where a kernel thread has adopted this mm via
28 * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm.
29 */
30static inline bool vmacache_valid_mm(struct mm_struct *mm)
31{
32 return current->mm == mm && !(current->flags & PF_KTHREAD);
33}
34
35void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
36{
37 if (vmacache_valid_mm(newvma->vm_mm))
38 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
39}
40
41static bool vmacache_valid(struct mm_struct *mm)
42{
43 struct task_struct *curr;
44
45 if (!vmacache_valid_mm(mm))
46 return false;
47
48 curr = current;
49 if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
50 /*
51 * First attempt will always be invalid, initialize
52 * the new cache for this task here.
53 */
54 curr->vmacache.seqnum = mm->vmacache_seqnum;
55 vmacache_flush(curr);
56 return false;
57 }
58 return true;
59}
60
61struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
62{
63 int idx = VMACACHE_HASH(addr);
64 int i;
65
66 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
67
68 if (!vmacache_valid(mm))
69 return NULL;
70
71 for (i = 0; i < VMACACHE_SIZE; i++) {
72 struct vm_area_struct *vma = current->vmacache.vmas[idx];
73
74 if (vma) {
75#ifdef CONFIG_DEBUG_VM_VMACACHE
76 if (WARN_ON_ONCE(vma->vm_mm != mm))
77 break;
78#endif
79 if (vma->vm_start <= addr && vma->vm_end > addr) {
80 count_vm_vmacache_event(VMACACHE_FIND_HITS);
81 return vma;
82 }
83 }
84 if (++idx == VMACACHE_SIZE)
85 idx = 0;
86 }
87
88 return NULL;
89}
90
91#ifndef CONFIG_MMU
92struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
93 unsigned long start,
94 unsigned long end)
95{
96 int idx = VMACACHE_HASH(start);
97 int i;
98
99 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
100
101 if (!vmacache_valid(mm))
102 return NULL;
103
104 for (i = 0; i < VMACACHE_SIZE; i++) {
105 struct vm_area_struct *vma = current->vmacache.vmas[idx];
106
107 if (vma && vma->vm_start == start && vma->vm_end == end) {
108 count_vm_vmacache_event(VMACACHE_FIND_HITS);
109 return vma;
110 }
111 if (++idx == VMACACHE_SIZE)
112 idx = 0;
113 }
114
115 return NULL;
116}
117#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2014 Davidlohr Bueso.
4 */
5#include <linux/sched/signal.h>
6#include <linux/sched/task.h>
7#include <linux/mm.h>
8#include <linux/vmacache.h>
9
10/*
11 * Flush vma caches for threads that share a given mm.
12 *
13 * The operation is safe because the caller holds the mmap_sem
14 * exclusively and other threads accessing the vma cache will
15 * have mmap_sem held at least for read, so no extra locking
16 * is required to maintain the vma cache.
17 */
18void vmacache_flush_all(struct mm_struct *mm)
19{
20 struct task_struct *g, *p;
21
22 count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
23
24 /*
25 * Single threaded tasks need not iterate the entire
26 * list of process. We can avoid the flushing as well
27 * since the mm's seqnum was increased and don't have
28 * to worry about other threads' seqnum. Current's
29 * flush will occur upon the next lookup.
30 */
31 if (atomic_read(&mm->mm_users) == 1)
32 return;
33
34 rcu_read_lock();
35 for_each_process_thread(g, p) {
36 /*
37 * Only flush the vmacache pointers as the
38 * mm seqnum is already set and curr's will
39 * be set upon invalidation when the next
40 * lookup is done.
41 */
42 if (mm == p->mm)
43 vmacache_flush(p);
44 }
45 rcu_read_unlock();
46}
47
48/*
49 * This task may be accessing a foreign mm via (for example)
50 * get_user_pages()->find_vma(). The vmacache is task-local and this
51 * task's vmacache pertains to a different mm (ie, its own). There is
52 * nothing we can do here.
53 *
54 * Also handle the case where a kernel thread has adopted this mm via use_mm().
55 * That kernel thread's vmacache is not applicable to this mm.
56 */
57static inline bool vmacache_valid_mm(struct mm_struct *mm)
58{
59 return current->mm == mm && !(current->flags & PF_KTHREAD);
60}
61
62void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
63{
64 if (vmacache_valid_mm(newvma->vm_mm))
65 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
66}
67
68static bool vmacache_valid(struct mm_struct *mm)
69{
70 struct task_struct *curr;
71
72 if (!vmacache_valid_mm(mm))
73 return false;
74
75 curr = current;
76 if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
77 /*
78 * First attempt will always be invalid, initialize
79 * the new cache for this task here.
80 */
81 curr->vmacache.seqnum = mm->vmacache_seqnum;
82 vmacache_flush(curr);
83 return false;
84 }
85 return true;
86}
87
88struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
89{
90 int i;
91
92 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
93
94 if (!vmacache_valid(mm))
95 return NULL;
96
97 for (i = 0; i < VMACACHE_SIZE; i++) {
98 struct vm_area_struct *vma = current->vmacache.vmas[i];
99
100 if (!vma)
101 continue;
102 if (WARN_ON_ONCE(vma->vm_mm != mm))
103 break;
104 if (vma->vm_start <= addr && vma->vm_end > addr) {
105 count_vm_vmacache_event(VMACACHE_FIND_HITS);
106 return vma;
107 }
108 }
109
110 return NULL;
111}
112
113#ifndef CONFIG_MMU
114struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
115 unsigned long start,
116 unsigned long end)
117{
118 int i;
119
120 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
121
122 if (!vmacache_valid(mm))
123 return NULL;
124
125 for (i = 0; i < VMACACHE_SIZE; i++) {
126 struct vm_area_struct *vma = current->vmacache.vmas[i];
127
128 if (vma && vma->vm_start == start && vma->vm_end == end) {
129 count_vm_vmacache_event(VMACACHE_FIND_HITS);
130 return vma;
131 }
132 }
133
134 return NULL;
135}
136#endif