Loading...
1/*
2 * linux/mm/mmu_notifier.c
3 *
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <clameter@sgi.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
10 */
11
12#include <linux/rculist.h>
13#include <linux/mmu_notifier.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/err.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20
21/*
22 * This function can't run concurrently against mmu_notifier_register
23 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
24 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
25 * in parallel despite there being no task using this mm any more,
26 * through the vmas outside of the exit_mmap context, such as with
27 * vmtruncate. This serializes against mmu_notifier_unregister with
28 * the mmu_notifier_mm->lock in addition to RCU and it serializes
29 * against the other mmu notifiers with RCU. struct mmu_notifier_mm
30 * can't go away from under us as exit_mmap holds an mm_count pin
31 * itself.
32 */
33void __mmu_notifier_release(struct mm_struct *mm)
34{
35 struct mmu_notifier *mn;
36
37 spin_lock(&mm->mmu_notifier_mm->lock);
38 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
39 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
40 struct mmu_notifier,
41 hlist);
42 /*
43 * We arrived before mmu_notifier_unregister so
44 * mmu_notifier_unregister will do nothing other than
45 * to wait ->release to finish and
46 * mmu_notifier_unregister to return.
47 */
48 hlist_del_init_rcu(&mn->hlist);
49 /*
50 * RCU here will block mmu_notifier_unregister until
51 * ->release returns.
52 */
53 rcu_read_lock();
54 spin_unlock(&mm->mmu_notifier_mm->lock);
55 /*
56 * if ->release runs before mmu_notifier_unregister it
57 * must be handled as it's the only way for the driver
58 * to flush all existing sptes and stop the driver
59 * from establishing any more sptes before all the
60 * pages in the mm are freed.
61 */
62 if (mn->ops->release)
63 mn->ops->release(mn, mm);
64 rcu_read_unlock();
65 spin_lock(&mm->mmu_notifier_mm->lock);
66 }
67 spin_unlock(&mm->mmu_notifier_mm->lock);
68
69 /*
70 * synchronize_rcu here prevents mmu_notifier_release to
71 * return to exit_mmap (which would proceed freeing all pages
72 * in the mm) until the ->release method returns, if it was
73 * invoked by mmu_notifier_unregister.
74 *
75 * The mmu_notifier_mm can't go away from under us because one
76 * mm_count is hold by exit_mmap.
77 */
78 synchronize_rcu();
79}
80
81/*
82 * If no young bitflag is supported by the hardware, ->clear_flush_young can
83 * unmap the address and return 1 or 0 depending if the mapping previously
84 * existed or not.
85 */
86int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
87 unsigned long address)
88{
89 struct mmu_notifier *mn;
90 struct hlist_node *n;
91 int young = 0;
92
93 rcu_read_lock();
94 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
95 if (mn->ops->clear_flush_young)
96 young |= mn->ops->clear_flush_young(mn, mm, address);
97 }
98 rcu_read_unlock();
99
100 return young;
101}
102
103int __mmu_notifier_test_young(struct mm_struct *mm,
104 unsigned long address)
105{
106 struct mmu_notifier *mn;
107 struct hlist_node *n;
108 int young = 0;
109
110 rcu_read_lock();
111 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
112 if (mn->ops->test_young) {
113 young = mn->ops->test_young(mn, mm, address);
114 if (young)
115 break;
116 }
117 }
118 rcu_read_unlock();
119
120 return young;
121}
122
123void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
124 pte_t pte)
125{
126 struct mmu_notifier *mn;
127 struct hlist_node *n;
128
129 rcu_read_lock();
130 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
131 if (mn->ops->change_pte)
132 mn->ops->change_pte(mn, mm, address, pte);
133 /*
134 * Some drivers don't have change_pte,
135 * so we must call invalidate_page in that case.
136 */
137 else if (mn->ops->invalidate_page)
138 mn->ops->invalidate_page(mn, mm, address);
139 }
140 rcu_read_unlock();
141}
142
143void __mmu_notifier_invalidate_page(struct mm_struct *mm,
144 unsigned long address)
145{
146 struct mmu_notifier *mn;
147 struct hlist_node *n;
148
149 rcu_read_lock();
150 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
151 if (mn->ops->invalidate_page)
152 mn->ops->invalidate_page(mn, mm, address);
153 }
154 rcu_read_unlock();
155}
156
157void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
158 unsigned long start, unsigned long end)
159{
160 struct mmu_notifier *mn;
161 struct hlist_node *n;
162
163 rcu_read_lock();
164 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
165 if (mn->ops->invalidate_range_start)
166 mn->ops->invalidate_range_start(mn, mm, start, end);
167 }
168 rcu_read_unlock();
169}
170
171void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
172 unsigned long start, unsigned long end)
173{
174 struct mmu_notifier *mn;
175 struct hlist_node *n;
176
177 rcu_read_lock();
178 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
179 if (mn->ops->invalidate_range_end)
180 mn->ops->invalidate_range_end(mn, mm, start, end);
181 }
182 rcu_read_unlock();
183}
184
185static int do_mmu_notifier_register(struct mmu_notifier *mn,
186 struct mm_struct *mm,
187 int take_mmap_sem)
188{
189 struct mmu_notifier_mm *mmu_notifier_mm;
190 int ret;
191
192 BUG_ON(atomic_read(&mm->mm_users) <= 0);
193
194 ret = -ENOMEM;
195 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
196 if (unlikely(!mmu_notifier_mm))
197 goto out;
198
199 if (take_mmap_sem)
200 down_write(&mm->mmap_sem);
201 ret = mm_take_all_locks(mm);
202 if (unlikely(ret))
203 goto out_cleanup;
204
205 if (!mm_has_notifiers(mm)) {
206 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
207 spin_lock_init(&mmu_notifier_mm->lock);
208 mm->mmu_notifier_mm = mmu_notifier_mm;
209 mmu_notifier_mm = NULL;
210 }
211 atomic_inc(&mm->mm_count);
212
213 /*
214 * Serialize the update against mmu_notifier_unregister. A
215 * side note: mmu_notifier_release can't run concurrently with
216 * us because we hold the mm_users pin (either implicitly as
217 * current->mm or explicitly with get_task_mm() or similar).
218 * We can't race against any other mmu notifier method either
219 * thanks to mm_take_all_locks().
220 */
221 spin_lock(&mm->mmu_notifier_mm->lock);
222 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
223 spin_unlock(&mm->mmu_notifier_mm->lock);
224
225 mm_drop_all_locks(mm);
226out_cleanup:
227 if (take_mmap_sem)
228 up_write(&mm->mmap_sem);
229 /* kfree() does nothing if mmu_notifier_mm is NULL */
230 kfree(mmu_notifier_mm);
231out:
232 BUG_ON(atomic_read(&mm->mm_users) <= 0);
233 return ret;
234}
235
236/*
237 * Must not hold mmap_sem nor any other VM related lock when calling
238 * this registration function. Must also ensure mm_users can't go down
239 * to zero while this runs to avoid races with mmu_notifier_release,
240 * so mm has to be current->mm or the mm should be pinned safely such
241 * as with get_task_mm(). If the mm is not current->mm, the mm_users
242 * pin should be released by calling mmput after mmu_notifier_register
243 * returns. mmu_notifier_unregister must be always called to
244 * unregister the notifier. mm_count is automatically pinned to allow
245 * mmu_notifier_unregister to safely run at any time later, before or
246 * after exit_mmap. ->release will always be called before exit_mmap
247 * frees the pages.
248 */
249int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
250{
251 return do_mmu_notifier_register(mn, mm, 1);
252}
253EXPORT_SYMBOL_GPL(mmu_notifier_register);
254
255/*
256 * Same as mmu_notifier_register but here the caller must hold the
257 * mmap_sem in write mode.
258 */
259int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
260{
261 return do_mmu_notifier_register(mn, mm, 0);
262}
263EXPORT_SYMBOL_GPL(__mmu_notifier_register);
264
265/* this is called after the last mmu_notifier_unregister() returned */
266void __mmu_notifier_mm_destroy(struct mm_struct *mm)
267{
268 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
269 kfree(mm->mmu_notifier_mm);
270 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
271}
272
273/*
274 * This releases the mm_count pin automatically and frees the mm
275 * structure if it was the last user of it. It serializes against
276 * running mmu notifiers with RCU and against mmu_notifier_unregister
277 * with the unregister lock + RCU. All sptes must be dropped before
278 * calling mmu_notifier_unregister. ->release or any other notifier
279 * method may be invoked concurrently with mmu_notifier_unregister,
280 * and only after mmu_notifier_unregister returned we're guaranteed
281 * that ->release or any other method can't run anymore.
282 */
283void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
284{
285 BUG_ON(atomic_read(&mm->mm_count) <= 0);
286
287 spin_lock(&mm->mmu_notifier_mm->lock);
288 if (!hlist_unhashed(&mn->hlist)) {
289 hlist_del_rcu(&mn->hlist);
290
291 /*
292 * RCU here will force exit_mmap to wait ->release to finish
293 * before freeing the pages.
294 */
295 rcu_read_lock();
296 spin_unlock(&mm->mmu_notifier_mm->lock);
297 /*
298 * exit_mmap will block in mmu_notifier_release to
299 * guarantee ->release is called before freeing the
300 * pages.
301 */
302 if (mn->ops->release)
303 mn->ops->release(mn, mm);
304 rcu_read_unlock();
305 } else
306 spin_unlock(&mm->mmu_notifier_mm->lock);
307
308 /*
309 * Wait any running method to finish, of course including
310 * ->release if it was run by mmu_notifier_relase instead of us.
311 */
312 synchronize_rcu();
313
314 BUG_ON(atomic_read(&mm->mm_count) <= 0);
315
316 mmdrop(mm);
317}
318EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
7 * Christoph Lameter <cl@linux.com>
8 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/interval_tree.h>
16#include <linux/srcu.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19#include <linux/sched/mm.h>
20#include <linux/slab.h>
21
22#include "vma.h"
23
24/* global SRCU for all MMs */
25DEFINE_STATIC_SRCU(srcu);
26
27#ifdef CONFIG_LOCKDEP
28struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
29 .name = "mmu_notifier_invalidate_range_start"
30};
31#endif
32
33/*
34 * The mmu_notifier_subscriptions structure is allocated and installed in
35 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
36 * critical section and it's released only when mm_count reaches zero
37 * in mmdrop().
38 */
39struct mmu_notifier_subscriptions {
40 /* all mmu notifiers registered in this mm are queued in this list */
41 struct hlist_head list;
42 bool has_itree;
43 /* to serialize the list modifications and hlist_unhashed */
44 spinlock_t lock;
45 unsigned long invalidate_seq;
46 unsigned long active_invalidate_ranges;
47 struct rb_root_cached itree;
48 wait_queue_head_t wq;
49 struct hlist_head deferred_list;
50};
51
52/*
53 * This is a collision-retry read-side/write-side 'lock', a lot like a
54 * seqcount, however this allows multiple write-sides to hold it at
55 * once. Conceptually the write side is protecting the values of the PTEs in
56 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
57 * writer exists.
58 *
59 * Note that the core mm creates nested invalidate_range_start()/end() regions
60 * within the same thread, and runs invalidate_range_start()/end() in parallel
61 * on multiple CPUs. This is designed to not reduce concurrency or block
62 * progress on the mm side.
63 *
64 * As a secondary function, holding the full write side also serves to prevent
65 * writers for the itree, this is an optimization to avoid extra locking
66 * during invalidate_range_start/end notifiers.
67 *
68 * The write side has two states, fully excluded:
69 * - mm->active_invalidate_ranges != 0
70 * - subscriptions->invalidate_seq & 1 == True (odd)
71 * - some range on the mm_struct is being invalidated
72 * - the itree is not allowed to change
73 *
74 * And partially excluded:
75 * - mm->active_invalidate_ranges != 0
76 * - subscriptions->invalidate_seq & 1 == False (even)
77 * - some range on the mm_struct is being invalidated
78 * - the itree is allowed to change
79 *
80 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
81 * seq |= 1 # Begin writing
82 * seq++ # Release the writing state
83 * seq & 1 # True if a writer exists
84 *
85 * The later state avoids some expensive work on inv_end in the common case of
86 * no mmu_interval_notifier monitoring the VA.
87 */
88static bool
89mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
90{
91 lockdep_assert_held(&subscriptions->lock);
92 return subscriptions->invalidate_seq & 1;
93}
94
95static struct mmu_interval_notifier *
96mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
97 const struct mmu_notifier_range *range,
98 unsigned long *seq)
99{
100 struct interval_tree_node *node;
101 struct mmu_interval_notifier *res = NULL;
102
103 spin_lock(&subscriptions->lock);
104 subscriptions->active_invalidate_ranges++;
105 node = interval_tree_iter_first(&subscriptions->itree, range->start,
106 range->end - 1);
107 if (node) {
108 subscriptions->invalidate_seq |= 1;
109 res = container_of(node, struct mmu_interval_notifier,
110 interval_tree);
111 }
112
113 *seq = subscriptions->invalidate_seq;
114 spin_unlock(&subscriptions->lock);
115 return res;
116}
117
118static struct mmu_interval_notifier *
119mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
120 const struct mmu_notifier_range *range)
121{
122 struct interval_tree_node *node;
123
124 node = interval_tree_iter_next(&interval_sub->interval_tree,
125 range->start, range->end - 1);
126 if (!node)
127 return NULL;
128 return container_of(node, struct mmu_interval_notifier, interval_tree);
129}
130
131static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
132{
133 struct mmu_interval_notifier *interval_sub;
134 struct hlist_node *next;
135
136 spin_lock(&subscriptions->lock);
137 if (--subscriptions->active_invalidate_ranges ||
138 !mn_itree_is_invalidating(subscriptions)) {
139 spin_unlock(&subscriptions->lock);
140 return;
141 }
142
143 /* Make invalidate_seq even */
144 subscriptions->invalidate_seq++;
145
146 /*
147 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
148 * Adds and removes are queued until the final inv_end happens then
149 * they are progressed. This arrangement for tree updates is used to
150 * avoid using a blocking lock during invalidate_range_start.
151 */
152 hlist_for_each_entry_safe(interval_sub, next,
153 &subscriptions->deferred_list,
154 deferred_item) {
155 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
156 interval_tree_insert(&interval_sub->interval_tree,
157 &subscriptions->itree);
158 else
159 interval_tree_remove(&interval_sub->interval_tree,
160 &subscriptions->itree);
161 hlist_del(&interval_sub->deferred_item);
162 }
163 spin_unlock(&subscriptions->lock);
164
165 wake_up_all(&subscriptions->wq);
166}
167
168/**
169 * mmu_interval_read_begin - Begin a read side critical section against a VA
170 * range
171 * @interval_sub: The interval subscription
172 *
173 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
174 * collision-retry scheme similar to seqcount for the VA range under
175 * subscription. If the mm invokes invalidation during the critical section
176 * then mmu_interval_read_retry() will return true.
177 *
178 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
179 * require a blocking context. The critical region formed by this can sleep,
180 * and the required 'user_lock' can also be a sleeping lock.
181 *
182 * The caller is required to provide a 'user_lock' to serialize both teardown
183 * and setup.
184 *
185 * The return value should be passed to mmu_interval_read_retry().
186 */
187unsigned long
188mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
189{
190 struct mmu_notifier_subscriptions *subscriptions =
191 interval_sub->mm->notifier_subscriptions;
192 unsigned long seq;
193 bool is_invalidating;
194
195 /*
196 * If the subscription has a different seq value under the user_lock
197 * than we started with then it has collided.
198 *
199 * If the subscription currently has the same seq value as the
200 * subscriptions seq, then it is currently between
201 * invalidate_start/end and is colliding.
202 *
203 * The locking looks broadly like this:
204 * mn_itree_inv_start(): mmu_interval_read_begin():
205 * spin_lock
206 * seq = READ_ONCE(interval_sub->invalidate_seq);
207 * seq == subs->invalidate_seq
208 * spin_unlock
209 * spin_lock
210 * seq = ++subscriptions->invalidate_seq
211 * spin_unlock
212 * op->invalidate():
213 * user_lock
214 * mmu_interval_set_seq()
215 * interval_sub->invalidate_seq = seq
216 * user_unlock
217 *
218 * [Required: mmu_interval_read_retry() == true]
219 *
220 * mn_itree_inv_end():
221 * spin_lock
222 * seq = ++subscriptions->invalidate_seq
223 * spin_unlock
224 *
225 * user_lock
226 * mmu_interval_read_retry():
227 * interval_sub->invalidate_seq != seq
228 * user_unlock
229 *
230 * Barriers are not needed here as any races here are closed by an
231 * eventual mmu_interval_read_retry(), which provides a barrier via the
232 * user_lock.
233 */
234 spin_lock(&subscriptions->lock);
235 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
236 seq = READ_ONCE(interval_sub->invalidate_seq);
237 is_invalidating = seq == subscriptions->invalidate_seq;
238 spin_unlock(&subscriptions->lock);
239
240 /*
241 * interval_sub->invalidate_seq must always be set to an odd value via
242 * mmu_interval_set_seq() using the provided cur_seq from
243 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
244 * will always clear the below sleep in some reasonable time as
245 * subscriptions->invalidate_seq is even in the idle state.
246 */
247 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
248 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
249 if (is_invalidating)
250 wait_event(subscriptions->wq,
251 READ_ONCE(subscriptions->invalidate_seq) != seq);
252
253 /*
254 * Notice that mmu_interval_read_retry() can already be true at this
255 * point, avoiding loops here allows the caller to provide a global
256 * time bound.
257 */
258
259 return seq;
260}
261EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
262
263static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
264 struct mm_struct *mm)
265{
266 struct mmu_notifier_range range = {
267 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
268 .event = MMU_NOTIFY_RELEASE,
269 .mm = mm,
270 .start = 0,
271 .end = ULONG_MAX,
272 };
273 struct mmu_interval_notifier *interval_sub;
274 unsigned long cur_seq;
275 bool ret;
276
277 for (interval_sub =
278 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
279 interval_sub;
280 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
281 ret = interval_sub->ops->invalidate(interval_sub, &range,
282 cur_seq);
283 WARN_ON(!ret);
284 }
285
286 mn_itree_inv_end(subscriptions);
287}
288
289/*
290 * This function can't run concurrently against mmu_notifier_register
291 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
292 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
293 * in parallel despite there being no task using this mm any more,
294 * through the vmas outside of the exit_mmap context, such as with
295 * vmtruncate. This serializes against mmu_notifier_unregister with
296 * the notifier_subscriptions->lock in addition to SRCU and it serializes
297 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
298 * can't go away from under us as exit_mmap holds an mm_count pin
299 * itself.
300 */
301static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
302 struct mm_struct *mm)
303{
304 struct mmu_notifier *subscription;
305 int id;
306
307 /*
308 * SRCU here will block mmu_notifier_unregister until
309 * ->release returns.
310 */
311 id = srcu_read_lock(&srcu);
312 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
313 srcu_read_lock_held(&srcu))
314 /*
315 * If ->release runs before mmu_notifier_unregister it must be
316 * handled, as it's the only way for the driver to flush all
317 * existing sptes and stop the driver from establishing any more
318 * sptes before all the pages in the mm are freed.
319 */
320 if (subscription->ops->release)
321 subscription->ops->release(subscription, mm);
322
323 spin_lock(&subscriptions->lock);
324 while (unlikely(!hlist_empty(&subscriptions->list))) {
325 subscription = hlist_entry(subscriptions->list.first,
326 struct mmu_notifier, hlist);
327 /*
328 * We arrived before mmu_notifier_unregister so
329 * mmu_notifier_unregister will do nothing other than to wait
330 * for ->release to finish and for mmu_notifier_unregister to
331 * return.
332 */
333 hlist_del_init_rcu(&subscription->hlist);
334 }
335 spin_unlock(&subscriptions->lock);
336 srcu_read_unlock(&srcu, id);
337
338 /*
339 * synchronize_srcu here prevents mmu_notifier_release from returning to
340 * exit_mmap (which would proceed with freeing all pages in the mm)
341 * until the ->release method returns, if it was invoked by
342 * mmu_notifier_unregister.
343 *
344 * The notifier_subscriptions can't go away from under us because
345 * one mm_count is held by exit_mmap.
346 */
347 synchronize_srcu(&srcu);
348}
349
350void __mmu_notifier_release(struct mm_struct *mm)
351{
352 struct mmu_notifier_subscriptions *subscriptions =
353 mm->notifier_subscriptions;
354
355 if (subscriptions->has_itree)
356 mn_itree_release(subscriptions, mm);
357
358 if (!hlist_empty(&subscriptions->list))
359 mn_hlist_release(subscriptions, mm);
360}
361
362/*
363 * If no young bitflag is supported by the hardware, ->clear_flush_young can
364 * unmap the address and return 1 or 0 depending if the mapping previously
365 * existed or not.
366 */
367int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
368 unsigned long start,
369 unsigned long end)
370{
371 struct mmu_notifier *subscription;
372 int young = 0, id;
373
374 id = srcu_read_lock(&srcu);
375 hlist_for_each_entry_rcu(subscription,
376 &mm->notifier_subscriptions->list, hlist,
377 srcu_read_lock_held(&srcu)) {
378 if (subscription->ops->clear_flush_young)
379 young |= subscription->ops->clear_flush_young(
380 subscription, mm, start, end);
381 }
382 srcu_read_unlock(&srcu, id);
383
384 return young;
385}
386
387int __mmu_notifier_clear_young(struct mm_struct *mm,
388 unsigned long start,
389 unsigned long end)
390{
391 struct mmu_notifier *subscription;
392 int young = 0, id;
393
394 id = srcu_read_lock(&srcu);
395 hlist_for_each_entry_rcu(subscription,
396 &mm->notifier_subscriptions->list, hlist,
397 srcu_read_lock_held(&srcu)) {
398 if (subscription->ops->clear_young)
399 young |= subscription->ops->clear_young(subscription,
400 mm, start, end);
401 }
402 srcu_read_unlock(&srcu, id);
403
404 return young;
405}
406
407int __mmu_notifier_test_young(struct mm_struct *mm,
408 unsigned long address)
409{
410 struct mmu_notifier *subscription;
411 int young = 0, id;
412
413 id = srcu_read_lock(&srcu);
414 hlist_for_each_entry_rcu(subscription,
415 &mm->notifier_subscriptions->list, hlist,
416 srcu_read_lock_held(&srcu)) {
417 if (subscription->ops->test_young) {
418 young = subscription->ops->test_young(subscription, mm,
419 address);
420 if (young)
421 break;
422 }
423 }
424 srcu_read_unlock(&srcu, id);
425
426 return young;
427}
428
429static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
430 const struct mmu_notifier_range *range)
431{
432 struct mmu_interval_notifier *interval_sub;
433 unsigned long cur_seq;
434
435 for (interval_sub =
436 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
437 interval_sub;
438 interval_sub = mn_itree_inv_next(interval_sub, range)) {
439 bool ret;
440
441 ret = interval_sub->ops->invalidate(interval_sub, range,
442 cur_seq);
443 if (!ret) {
444 if (WARN_ON(mmu_notifier_range_blockable(range)))
445 continue;
446 goto out_would_block;
447 }
448 }
449 return 0;
450
451out_would_block:
452 /*
453 * On -EAGAIN the non-blocking caller is not allowed to call
454 * invalidate_range_end()
455 */
456 mn_itree_inv_end(subscriptions);
457 return -EAGAIN;
458}
459
460static int mn_hlist_invalidate_range_start(
461 struct mmu_notifier_subscriptions *subscriptions,
462 struct mmu_notifier_range *range)
463{
464 struct mmu_notifier *subscription;
465 int ret = 0;
466 int id;
467
468 id = srcu_read_lock(&srcu);
469 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
470 srcu_read_lock_held(&srcu)) {
471 const struct mmu_notifier_ops *ops = subscription->ops;
472
473 if (ops->invalidate_range_start) {
474 int _ret;
475
476 if (!mmu_notifier_range_blockable(range))
477 non_block_start();
478 _ret = ops->invalidate_range_start(subscription, range);
479 if (!mmu_notifier_range_blockable(range))
480 non_block_end();
481 if (_ret) {
482 pr_info("%pS callback failed with %d in %sblockable context.\n",
483 ops->invalidate_range_start, _ret,
484 !mmu_notifier_range_blockable(range) ?
485 "non-" :
486 "");
487 WARN_ON(mmu_notifier_range_blockable(range) ||
488 _ret != -EAGAIN);
489 /*
490 * We call all the notifiers on any EAGAIN,
491 * there is no way for a notifier to know if
492 * its start method failed, thus a start that
493 * does EAGAIN can't also do end.
494 */
495 WARN_ON(ops->invalidate_range_end);
496 ret = _ret;
497 }
498 }
499 }
500
501 if (ret) {
502 /*
503 * Must be non-blocking to get here. If there are multiple
504 * notifiers and one or more failed start, any that succeeded
505 * start are expecting their end to be called. Do so now.
506 */
507 hlist_for_each_entry_rcu(subscription, &subscriptions->list,
508 hlist, srcu_read_lock_held(&srcu)) {
509 if (!subscription->ops->invalidate_range_end)
510 continue;
511
512 subscription->ops->invalidate_range_end(subscription,
513 range);
514 }
515 }
516 srcu_read_unlock(&srcu, id);
517
518 return ret;
519}
520
521int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
522{
523 struct mmu_notifier_subscriptions *subscriptions =
524 range->mm->notifier_subscriptions;
525 int ret;
526
527 if (subscriptions->has_itree) {
528 ret = mn_itree_invalidate(subscriptions, range);
529 if (ret)
530 return ret;
531 }
532 if (!hlist_empty(&subscriptions->list))
533 return mn_hlist_invalidate_range_start(subscriptions, range);
534 return 0;
535}
536
537static void
538mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
539 struct mmu_notifier_range *range)
540{
541 struct mmu_notifier *subscription;
542 int id;
543
544 id = srcu_read_lock(&srcu);
545 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
546 srcu_read_lock_held(&srcu)) {
547 if (subscription->ops->invalidate_range_end) {
548 if (!mmu_notifier_range_blockable(range))
549 non_block_start();
550 subscription->ops->invalidate_range_end(subscription,
551 range);
552 if (!mmu_notifier_range_blockable(range))
553 non_block_end();
554 }
555 }
556 srcu_read_unlock(&srcu, id);
557}
558
559void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
560{
561 struct mmu_notifier_subscriptions *subscriptions =
562 range->mm->notifier_subscriptions;
563
564 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
565 if (subscriptions->has_itree)
566 mn_itree_inv_end(subscriptions);
567
568 if (!hlist_empty(&subscriptions->list))
569 mn_hlist_invalidate_end(subscriptions, range);
570 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
571}
572
573void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
574 unsigned long start, unsigned long end)
575{
576 struct mmu_notifier *subscription;
577 int id;
578
579 id = srcu_read_lock(&srcu);
580 hlist_for_each_entry_rcu(subscription,
581 &mm->notifier_subscriptions->list, hlist,
582 srcu_read_lock_held(&srcu)) {
583 if (subscription->ops->arch_invalidate_secondary_tlbs)
584 subscription->ops->arch_invalidate_secondary_tlbs(
585 subscription, mm,
586 start, end);
587 }
588 srcu_read_unlock(&srcu, id);
589}
590
591/*
592 * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
593 * write mode. A NULL mn signals the notifier is being registered for itree
594 * mode.
595 */
596int __mmu_notifier_register(struct mmu_notifier *subscription,
597 struct mm_struct *mm)
598{
599 struct mmu_notifier_subscriptions *subscriptions = NULL;
600 int ret;
601
602 mmap_assert_write_locked(mm);
603 BUG_ON(atomic_read(&mm->mm_users) <= 0);
604
605 /*
606 * Subsystems should only register for invalidate_secondary_tlbs() or
607 * invalidate_range_start()/end() callbacks, not both.
608 */
609 if (WARN_ON_ONCE(subscription &&
610 (subscription->ops->arch_invalidate_secondary_tlbs &&
611 (subscription->ops->invalidate_range_start ||
612 subscription->ops->invalidate_range_end))))
613 return -EINVAL;
614
615 if (!mm->notifier_subscriptions) {
616 /*
617 * kmalloc cannot be called under mm_take_all_locks(), but we
618 * know that mm->notifier_subscriptions can't change while we
619 * hold the write side of the mmap_lock.
620 */
621 subscriptions = kzalloc(
622 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
623 if (!subscriptions)
624 return -ENOMEM;
625
626 INIT_HLIST_HEAD(&subscriptions->list);
627 spin_lock_init(&subscriptions->lock);
628 subscriptions->invalidate_seq = 2;
629 subscriptions->itree = RB_ROOT_CACHED;
630 init_waitqueue_head(&subscriptions->wq);
631 INIT_HLIST_HEAD(&subscriptions->deferred_list);
632 }
633
634 ret = mm_take_all_locks(mm);
635 if (unlikely(ret))
636 goto out_clean;
637
638 /*
639 * Serialize the update against mmu_notifier_unregister. A
640 * side note: mmu_notifier_release can't run concurrently with
641 * us because we hold the mm_users pin (either implicitly as
642 * current->mm or explicitly with get_task_mm() or similar).
643 * We can't race against any other mmu notifier method either
644 * thanks to mm_take_all_locks().
645 *
646 * release semantics on the initialization of the
647 * mmu_notifier_subscriptions's contents are provided for unlocked
648 * readers. acquire can only be used while holding the mmgrab or
649 * mmget, and is safe because once created the
650 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
651 * As above, users holding the mmap_lock or one of the
652 * mm_take_all_locks() do not need to use acquire semantics.
653 */
654 if (subscriptions)
655 smp_store_release(&mm->notifier_subscriptions, subscriptions);
656
657 if (subscription) {
658 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
659 mmgrab(mm);
660 subscription->mm = mm;
661 subscription->users = 1;
662
663 spin_lock(&mm->notifier_subscriptions->lock);
664 hlist_add_head_rcu(&subscription->hlist,
665 &mm->notifier_subscriptions->list);
666 spin_unlock(&mm->notifier_subscriptions->lock);
667 } else
668 mm->notifier_subscriptions->has_itree = true;
669
670 mm_drop_all_locks(mm);
671 BUG_ON(atomic_read(&mm->mm_users) <= 0);
672 return 0;
673
674out_clean:
675 kfree(subscriptions);
676 return ret;
677}
678EXPORT_SYMBOL_GPL(__mmu_notifier_register);
679
680/**
681 * mmu_notifier_register - Register a notifier on a mm
682 * @subscription: The notifier to attach
683 * @mm: The mm to attach the notifier to
684 *
685 * Must not hold mmap_lock nor any other VM related lock when calling
686 * this registration function. Must also ensure mm_users can't go down
687 * to zero while this runs to avoid races with mmu_notifier_release,
688 * so mm has to be current->mm or the mm should be pinned safely such
689 * as with get_task_mm(). If the mm is not current->mm, the mm_users
690 * pin should be released by calling mmput after mmu_notifier_register
691 * returns.
692 *
693 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
694 * unregister the notifier.
695 *
696 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
697 * valid, and can be converted to an active mm pointer via mmget_not_zero().
698 */
699int mmu_notifier_register(struct mmu_notifier *subscription,
700 struct mm_struct *mm)
701{
702 int ret;
703
704 mmap_write_lock(mm);
705 ret = __mmu_notifier_register(subscription, mm);
706 mmap_write_unlock(mm);
707 return ret;
708}
709EXPORT_SYMBOL_GPL(mmu_notifier_register);
710
711static struct mmu_notifier *
712find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
713{
714 struct mmu_notifier *subscription;
715
716 spin_lock(&mm->notifier_subscriptions->lock);
717 hlist_for_each_entry_rcu(subscription,
718 &mm->notifier_subscriptions->list, hlist,
719 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
720 if (subscription->ops != ops)
721 continue;
722
723 if (likely(subscription->users != UINT_MAX))
724 subscription->users++;
725 else
726 subscription = ERR_PTR(-EOVERFLOW);
727 spin_unlock(&mm->notifier_subscriptions->lock);
728 return subscription;
729 }
730 spin_unlock(&mm->notifier_subscriptions->lock);
731 return NULL;
732}
733
734/**
735 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
736 * the mm & ops
737 * @ops: The operations struct being subscribe with
738 * @mm : The mm to attach notifiers too
739 *
740 * This function either allocates a new mmu_notifier via
741 * ops->alloc_notifier(), or returns an already existing notifier on the
742 * list. The value of the ops pointer is used to determine when two notifiers
743 * are the same.
744 *
745 * Each call to mmu_notifier_get() must be paired with a call to
746 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
747 *
748 * While the caller has a mmu_notifier get the mm pointer will remain valid,
749 * and can be converted to an active mm pointer via mmget_not_zero().
750 */
751struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
752 struct mm_struct *mm)
753{
754 struct mmu_notifier *subscription;
755 int ret;
756
757 mmap_assert_write_locked(mm);
758
759 if (mm->notifier_subscriptions) {
760 subscription = find_get_mmu_notifier(mm, ops);
761 if (subscription)
762 return subscription;
763 }
764
765 subscription = ops->alloc_notifier(mm);
766 if (IS_ERR(subscription))
767 return subscription;
768 subscription->ops = ops;
769 ret = __mmu_notifier_register(subscription, mm);
770 if (ret)
771 goto out_free;
772 return subscription;
773out_free:
774 subscription->ops->free_notifier(subscription);
775 return ERR_PTR(ret);
776}
777EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
778
779/* this is called after the last mmu_notifier_unregister() returned */
780void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
781{
782 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
783 kfree(mm->notifier_subscriptions);
784 mm->notifier_subscriptions = LIST_POISON1; /* debug */
785}
786
787/*
788 * This releases the mm_count pin automatically and frees the mm
789 * structure if it was the last user of it. It serializes against
790 * running mmu notifiers with SRCU and against mmu_notifier_unregister
791 * with the unregister lock + SRCU. All sptes must be dropped before
792 * calling mmu_notifier_unregister. ->release or any other notifier
793 * method may be invoked concurrently with mmu_notifier_unregister,
794 * and only after mmu_notifier_unregister returned we're guaranteed
795 * that ->release or any other method can't run anymore.
796 */
797void mmu_notifier_unregister(struct mmu_notifier *subscription,
798 struct mm_struct *mm)
799{
800 BUG_ON(atomic_read(&mm->mm_count) <= 0);
801
802 if (!hlist_unhashed(&subscription->hlist)) {
803 /*
804 * SRCU here will force exit_mmap to wait for ->release to
805 * finish before freeing the pages.
806 */
807 int id;
808
809 id = srcu_read_lock(&srcu);
810 /*
811 * exit_mmap will block in mmu_notifier_release to guarantee
812 * that ->release is called before freeing the pages.
813 */
814 if (subscription->ops->release)
815 subscription->ops->release(subscription, mm);
816 srcu_read_unlock(&srcu, id);
817
818 spin_lock(&mm->notifier_subscriptions->lock);
819 /*
820 * Can not use list_del_rcu() since __mmu_notifier_release
821 * can delete it before we hold the lock.
822 */
823 hlist_del_init_rcu(&subscription->hlist);
824 spin_unlock(&mm->notifier_subscriptions->lock);
825 }
826
827 /*
828 * Wait for any running method to finish, of course including
829 * ->release if it was run by mmu_notifier_release instead of us.
830 */
831 synchronize_srcu(&srcu);
832
833 BUG_ON(atomic_read(&mm->mm_count) <= 0);
834
835 mmdrop(mm);
836}
837EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
838
839static void mmu_notifier_free_rcu(struct rcu_head *rcu)
840{
841 struct mmu_notifier *subscription =
842 container_of(rcu, struct mmu_notifier, rcu);
843 struct mm_struct *mm = subscription->mm;
844
845 subscription->ops->free_notifier(subscription);
846 /* Pairs with the get in __mmu_notifier_register() */
847 mmdrop(mm);
848}
849
850/**
851 * mmu_notifier_put - Release the reference on the notifier
852 * @subscription: The notifier to act on
853 *
854 * This function must be paired with each mmu_notifier_get(), it releases the
855 * reference obtained by the get. If this is the last reference then process
856 * to free the notifier will be run asynchronously.
857 *
858 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
859 * when the mm_struct is destroyed. Instead free_notifier is always called to
860 * release any resources held by the user.
861 *
862 * As ops->release is not guaranteed to be called, the user must ensure that
863 * all sptes are dropped, and no new sptes can be established before
864 * mmu_notifier_put() is called.
865 *
866 * This function can be called from the ops->release callback, however the
867 * caller must still ensure it is called pairwise with mmu_notifier_get().
868 *
869 * Modules calling this function must call mmu_notifier_synchronize() in
870 * their __exit functions to ensure the async work is completed.
871 */
872void mmu_notifier_put(struct mmu_notifier *subscription)
873{
874 struct mm_struct *mm = subscription->mm;
875
876 spin_lock(&mm->notifier_subscriptions->lock);
877 if (WARN_ON(!subscription->users) || --subscription->users)
878 goto out_unlock;
879 hlist_del_init_rcu(&subscription->hlist);
880 spin_unlock(&mm->notifier_subscriptions->lock);
881
882 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
883 return;
884
885out_unlock:
886 spin_unlock(&mm->notifier_subscriptions->lock);
887}
888EXPORT_SYMBOL_GPL(mmu_notifier_put);
889
890static int __mmu_interval_notifier_insert(
891 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
892 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
893 unsigned long length, const struct mmu_interval_notifier_ops *ops)
894{
895 interval_sub->mm = mm;
896 interval_sub->ops = ops;
897 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
898 interval_sub->interval_tree.start = start;
899 /*
900 * Note that the representation of the intervals in the interval tree
901 * considers the ending point as contained in the interval.
902 */
903 if (length == 0 ||
904 check_add_overflow(start, length - 1,
905 &interval_sub->interval_tree.last))
906 return -EOVERFLOW;
907
908 /* Must call with a mmget() held */
909 if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
910 return -EINVAL;
911
912 /* pairs with mmdrop in mmu_interval_notifier_remove() */
913 mmgrab(mm);
914
915 /*
916 * If some invalidate_range_start/end region is going on in parallel
917 * we don't know what VA ranges are affected, so we must assume this
918 * new range is included.
919 *
920 * If the itree is invalidating then we are not allowed to change
921 * it. Retrying until invalidation is done is tricky due to the
922 * possibility for live lock, instead defer the add to
923 * mn_itree_inv_end() so this algorithm is deterministic.
924 *
925 * In all cases the value for the interval_sub->invalidate_seq should be
926 * odd, see mmu_interval_read_begin()
927 */
928 spin_lock(&subscriptions->lock);
929 if (subscriptions->active_invalidate_ranges) {
930 if (mn_itree_is_invalidating(subscriptions))
931 hlist_add_head(&interval_sub->deferred_item,
932 &subscriptions->deferred_list);
933 else {
934 subscriptions->invalidate_seq |= 1;
935 interval_tree_insert(&interval_sub->interval_tree,
936 &subscriptions->itree);
937 }
938 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
939 } else {
940 WARN_ON(mn_itree_is_invalidating(subscriptions));
941 /*
942 * The starting seq for a subscription not under invalidation
943 * should be odd, not equal to the current invalidate_seq and
944 * invalidate_seq should not 'wrap' to the new seq any time
945 * soon.
946 */
947 interval_sub->invalidate_seq =
948 subscriptions->invalidate_seq - 1;
949 interval_tree_insert(&interval_sub->interval_tree,
950 &subscriptions->itree);
951 }
952 spin_unlock(&subscriptions->lock);
953 return 0;
954}
955
956/**
957 * mmu_interval_notifier_insert - Insert an interval notifier
958 * @interval_sub: Interval subscription to register
959 * @start: Starting virtual address to monitor
960 * @length: Length of the range to monitor
961 * @mm: mm_struct to attach to
962 * @ops: Interval notifier operations to be called on matching events
963 *
964 * This function subscribes the interval notifier for notifications from the
965 * mm. Upon return the ops related to mmu_interval_notifier will be called
966 * whenever an event that intersects with the given range occurs.
967 *
968 * Upon return the range_notifier may not be present in the interval tree yet.
969 * The caller must use the normal interval notifier read flow via
970 * mmu_interval_read_begin() to establish SPTEs for this range.
971 */
972int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
973 struct mm_struct *mm, unsigned long start,
974 unsigned long length,
975 const struct mmu_interval_notifier_ops *ops)
976{
977 struct mmu_notifier_subscriptions *subscriptions;
978 int ret;
979
980 might_lock(&mm->mmap_lock);
981
982 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
983 if (!subscriptions || !subscriptions->has_itree) {
984 ret = mmu_notifier_register(NULL, mm);
985 if (ret)
986 return ret;
987 subscriptions = mm->notifier_subscriptions;
988 }
989 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
990 start, length, ops);
991}
992EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
993
994int mmu_interval_notifier_insert_locked(
995 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
996 unsigned long start, unsigned long length,
997 const struct mmu_interval_notifier_ops *ops)
998{
999 struct mmu_notifier_subscriptions *subscriptions =
1000 mm->notifier_subscriptions;
1001 int ret;
1002
1003 mmap_assert_write_locked(mm);
1004
1005 if (!subscriptions || !subscriptions->has_itree) {
1006 ret = __mmu_notifier_register(NULL, mm);
1007 if (ret)
1008 return ret;
1009 subscriptions = mm->notifier_subscriptions;
1010 }
1011 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1012 start, length, ops);
1013}
1014EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1015
1016static bool
1017mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1018 unsigned long seq)
1019{
1020 bool ret;
1021
1022 spin_lock(&subscriptions->lock);
1023 ret = subscriptions->invalidate_seq != seq;
1024 spin_unlock(&subscriptions->lock);
1025 return ret;
1026}
1027
1028/**
1029 * mmu_interval_notifier_remove - Remove a interval notifier
1030 * @interval_sub: Interval subscription to unregister
1031 *
1032 * This function must be paired with mmu_interval_notifier_insert(). It cannot
1033 * be called from any ops callback.
1034 *
1035 * Once this returns ops callbacks are no longer running on other CPUs and
1036 * will not be called in future.
1037 */
1038void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1039{
1040 struct mm_struct *mm = interval_sub->mm;
1041 struct mmu_notifier_subscriptions *subscriptions =
1042 mm->notifier_subscriptions;
1043 unsigned long seq = 0;
1044
1045 might_sleep();
1046
1047 spin_lock(&subscriptions->lock);
1048 if (mn_itree_is_invalidating(subscriptions)) {
1049 /*
1050 * remove is being called after insert put this on the
1051 * deferred list, but before the deferred list was processed.
1052 */
1053 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1054 hlist_del(&interval_sub->deferred_item);
1055 } else {
1056 hlist_add_head(&interval_sub->deferred_item,
1057 &subscriptions->deferred_list);
1058 seq = subscriptions->invalidate_seq;
1059 }
1060 } else {
1061 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1062 interval_tree_remove(&interval_sub->interval_tree,
1063 &subscriptions->itree);
1064 }
1065 spin_unlock(&subscriptions->lock);
1066
1067 /*
1068 * The possible sleep on progress in the invalidation requires the
1069 * caller not hold any locks held by invalidation callbacks.
1070 */
1071 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1072 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1073 if (seq)
1074 wait_event(subscriptions->wq,
1075 mmu_interval_seq_released(subscriptions, seq));
1076
1077 /* pairs with mmgrab in mmu_interval_notifier_insert() */
1078 mmdrop(mm);
1079}
1080EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1081
1082/**
1083 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1084 *
1085 * This function ensures that all outstanding async SRU work from
1086 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1087 * associated with an unused mmu_notifier will no longer be called.
1088 *
1089 * Before using the caller must ensure that all of its mmu_notifiers have been
1090 * fully released via mmu_notifier_put().
1091 *
1092 * Modules using the mmu_notifier_put() API should call this in their __exit
1093 * function to avoid module unloading races.
1094 */
1095void mmu_notifier_synchronize(void)
1096{
1097 synchronize_srcu(&srcu);
1098}
1099EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);