Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
7 * Christoph Lameter <cl@linux.com>
8 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/srcu.h>
16#include <linux/rcupdate.h>
17#include <linux/sched.h>
18#include <linux/sched/mm.h>
19#include <linux/slab.h>
20
21/* global SRCU for all MMs */
22DEFINE_STATIC_SRCU(srcu);
23
24#ifdef CONFIG_LOCKDEP
25struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
26 .name = "mmu_notifier_invalidate_range_start"
27};
28#endif
29
30/*
31 * This function can't run concurrently against mmu_notifier_register
32 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
33 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
34 * in parallel despite there being no task using this mm any more,
35 * through the vmas outside of the exit_mmap context, such as with
36 * vmtruncate. This serializes against mmu_notifier_unregister with
37 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
38 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
39 * can't go away from under us as exit_mmap holds an mm_count pin
40 * itself.
41 */
42void __mmu_notifier_release(struct mm_struct *mm)
43{
44 struct mmu_notifier *mn;
45 int id;
46
47 /*
48 * SRCU here will block mmu_notifier_unregister until
49 * ->release returns.
50 */
51 id = srcu_read_lock(&srcu);
52 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
53 /*
54 * If ->release runs before mmu_notifier_unregister it must be
55 * handled, as it's the only way for the driver to flush all
56 * existing sptes and stop the driver from establishing any more
57 * sptes before all the pages in the mm are freed.
58 */
59 if (mn->ops->release)
60 mn->ops->release(mn, mm);
61
62 spin_lock(&mm->mmu_notifier_mm->lock);
63 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
64 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
65 struct mmu_notifier,
66 hlist);
67 /*
68 * We arrived before mmu_notifier_unregister so
69 * mmu_notifier_unregister will do nothing other than to wait
70 * for ->release to finish and for mmu_notifier_unregister to
71 * return.
72 */
73 hlist_del_init_rcu(&mn->hlist);
74 }
75 spin_unlock(&mm->mmu_notifier_mm->lock);
76 srcu_read_unlock(&srcu, id);
77
78 /*
79 * synchronize_srcu here prevents mmu_notifier_release from returning to
80 * exit_mmap (which would proceed with freeing all pages in the mm)
81 * until the ->release method returns, if it was invoked by
82 * mmu_notifier_unregister.
83 *
84 * The mmu_notifier_mm can't go away from under us because one mm_count
85 * is held by exit_mmap.
86 */
87 synchronize_srcu(&srcu);
88}
89
90/*
91 * If no young bitflag is supported by the hardware, ->clear_flush_young can
92 * unmap the address and return 1 or 0 depending if the mapping previously
93 * existed or not.
94 */
95int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
96 unsigned long start,
97 unsigned long end)
98{
99 struct mmu_notifier *mn;
100 int young = 0, id;
101
102 id = srcu_read_lock(&srcu);
103 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
104 if (mn->ops->clear_flush_young)
105 young |= mn->ops->clear_flush_young(mn, mm, start, end);
106 }
107 srcu_read_unlock(&srcu, id);
108
109 return young;
110}
111
112int __mmu_notifier_clear_young(struct mm_struct *mm,
113 unsigned long start,
114 unsigned long end)
115{
116 struct mmu_notifier *mn;
117 int young = 0, id;
118
119 id = srcu_read_lock(&srcu);
120 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
121 if (mn->ops->clear_young)
122 young |= mn->ops->clear_young(mn, mm, start, end);
123 }
124 srcu_read_unlock(&srcu, id);
125
126 return young;
127}
128
129int __mmu_notifier_test_young(struct mm_struct *mm,
130 unsigned long address)
131{
132 struct mmu_notifier *mn;
133 int young = 0, id;
134
135 id = srcu_read_lock(&srcu);
136 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
137 if (mn->ops->test_young) {
138 young = mn->ops->test_young(mn, mm, address);
139 if (young)
140 break;
141 }
142 }
143 srcu_read_unlock(&srcu, id);
144
145 return young;
146}
147
148void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
149 pte_t pte)
150{
151 struct mmu_notifier *mn;
152 int id;
153
154 id = srcu_read_lock(&srcu);
155 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
156 if (mn->ops->change_pte)
157 mn->ops->change_pte(mn, mm, address, pte);
158 }
159 srcu_read_unlock(&srcu, id);
160}
161
162int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
163{
164 struct mmu_notifier *mn;
165 int ret = 0;
166 int id;
167
168 id = srcu_read_lock(&srcu);
169 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
170 if (mn->ops->invalidate_range_start) {
171 int _ret;
172
173 if (!mmu_notifier_range_blockable(range))
174 non_block_start();
175 _ret = mn->ops->invalidate_range_start(mn, range);
176 if (!mmu_notifier_range_blockable(range))
177 non_block_end();
178 if (_ret) {
179 pr_info("%pS callback failed with %d in %sblockable context.\n",
180 mn->ops->invalidate_range_start, _ret,
181 !mmu_notifier_range_blockable(range) ? "non-" : "");
182 WARN_ON(mmu_notifier_range_blockable(range) ||
183 _ret != -EAGAIN);
184 ret = _ret;
185 }
186 }
187 }
188 srcu_read_unlock(&srcu, id);
189
190 return ret;
191}
192
193void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
194 bool only_end)
195{
196 struct mmu_notifier *mn;
197 int id;
198
199 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
200 id = srcu_read_lock(&srcu);
201 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
202 /*
203 * Call invalidate_range here too to avoid the need for the
204 * subsystem of having to register an invalidate_range_end
205 * call-back when there is invalidate_range already. Usually a
206 * subsystem registers either invalidate_range_start()/end() or
207 * invalidate_range(), so this will be no additional overhead
208 * (besides the pointer check).
209 *
210 * We skip call to invalidate_range() if we know it is safe ie
211 * call site use mmu_notifier_invalidate_range_only_end() which
212 * is safe to do when we know that a call to invalidate_range()
213 * already happen under page table lock.
214 */
215 if (!only_end && mn->ops->invalidate_range)
216 mn->ops->invalidate_range(mn, range->mm,
217 range->start,
218 range->end);
219 if (mn->ops->invalidate_range_end) {
220 if (!mmu_notifier_range_blockable(range))
221 non_block_start();
222 mn->ops->invalidate_range_end(mn, range);
223 if (!mmu_notifier_range_blockable(range))
224 non_block_end();
225 }
226 }
227 srcu_read_unlock(&srcu, id);
228 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
229}
230
231void __mmu_notifier_invalidate_range(struct mm_struct *mm,
232 unsigned long start, unsigned long end)
233{
234 struct mmu_notifier *mn;
235 int id;
236
237 id = srcu_read_lock(&srcu);
238 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
239 if (mn->ops->invalidate_range)
240 mn->ops->invalidate_range(mn, mm, start, end);
241 }
242 srcu_read_unlock(&srcu, id);
243}
244
245/*
246 * Same as mmu_notifier_register but here the caller must hold the
247 * mmap_sem in write mode.
248 */
249int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
250{
251 struct mmu_notifier_mm *mmu_notifier_mm = NULL;
252 int ret;
253
254 lockdep_assert_held_write(&mm->mmap_sem);
255 BUG_ON(atomic_read(&mm->mm_users) <= 0);
256
257 if (IS_ENABLED(CONFIG_LOCKDEP)) {
258 fs_reclaim_acquire(GFP_KERNEL);
259 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
260 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
261 fs_reclaim_release(GFP_KERNEL);
262 }
263
264 mn->mm = mm;
265 mn->users = 1;
266
267 if (!mm->mmu_notifier_mm) {
268 /*
269 * kmalloc cannot be called under mm_take_all_locks(), but we
270 * know that mm->mmu_notifier_mm can't change while we hold
271 * the write side of the mmap_sem.
272 */
273 mmu_notifier_mm =
274 kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
275 if (!mmu_notifier_mm)
276 return -ENOMEM;
277
278 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
279 spin_lock_init(&mmu_notifier_mm->lock);
280 }
281
282 ret = mm_take_all_locks(mm);
283 if (unlikely(ret))
284 goto out_clean;
285
286 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
287 mmgrab(mm);
288
289 /*
290 * Serialize the update against mmu_notifier_unregister. A
291 * side note: mmu_notifier_release can't run concurrently with
292 * us because we hold the mm_users pin (either implicitly as
293 * current->mm or explicitly with get_task_mm() or similar).
294 * We can't race against any other mmu notifier method either
295 * thanks to mm_take_all_locks().
296 */
297 if (mmu_notifier_mm)
298 mm->mmu_notifier_mm = mmu_notifier_mm;
299
300 spin_lock(&mm->mmu_notifier_mm->lock);
301 hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
302 spin_unlock(&mm->mmu_notifier_mm->lock);
303
304 mm_drop_all_locks(mm);
305 BUG_ON(atomic_read(&mm->mm_users) <= 0);
306 return 0;
307
308out_clean:
309 kfree(mmu_notifier_mm);
310 return ret;
311}
312EXPORT_SYMBOL_GPL(__mmu_notifier_register);
313
314/**
315 * mmu_notifier_register - Register a notifier on a mm
316 * @mn: The notifier to attach
317 * @mm: The mm to attach the notifier to
318 *
319 * Must not hold mmap_sem nor any other VM related lock when calling
320 * this registration function. Must also ensure mm_users can't go down
321 * to zero while this runs to avoid races with mmu_notifier_release,
322 * so mm has to be current->mm or the mm should be pinned safely such
323 * as with get_task_mm(). If the mm is not current->mm, the mm_users
324 * pin should be released by calling mmput after mmu_notifier_register
325 * returns.
326 *
327 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
328 * unregister the notifier.
329 *
330 * While the caller has a mmu_notifier get the mn->mm pointer will remain
331 * valid, and can be converted to an active mm pointer via mmget_not_zero().
332 */
333int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
334{
335 int ret;
336
337 down_write(&mm->mmap_sem);
338 ret = __mmu_notifier_register(mn, mm);
339 up_write(&mm->mmap_sem);
340 return ret;
341}
342EXPORT_SYMBOL_GPL(mmu_notifier_register);
343
344static struct mmu_notifier *
345find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
346{
347 struct mmu_notifier *mn;
348
349 spin_lock(&mm->mmu_notifier_mm->lock);
350 hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) {
351 if (mn->ops != ops)
352 continue;
353
354 if (likely(mn->users != UINT_MAX))
355 mn->users++;
356 else
357 mn = ERR_PTR(-EOVERFLOW);
358 spin_unlock(&mm->mmu_notifier_mm->lock);
359 return mn;
360 }
361 spin_unlock(&mm->mmu_notifier_mm->lock);
362 return NULL;
363}
364
365/**
366 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
367 * the mm & ops
368 * @ops: The operations struct being subscribe with
369 * @mm : The mm to attach notifiers too
370 *
371 * This function either allocates a new mmu_notifier via
372 * ops->alloc_notifier(), or returns an already existing notifier on the
373 * list. The value of the ops pointer is used to determine when two notifiers
374 * are the same.
375 *
376 * Each call to mmu_notifier_get() must be paired with a call to
377 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem.
378 *
379 * While the caller has a mmu_notifier get the mm pointer will remain valid,
380 * and can be converted to an active mm pointer via mmget_not_zero().
381 */
382struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
383 struct mm_struct *mm)
384{
385 struct mmu_notifier *mn;
386 int ret;
387
388 lockdep_assert_held_write(&mm->mmap_sem);
389
390 if (mm->mmu_notifier_mm) {
391 mn = find_get_mmu_notifier(mm, ops);
392 if (mn)
393 return mn;
394 }
395
396 mn = ops->alloc_notifier(mm);
397 if (IS_ERR(mn))
398 return mn;
399 mn->ops = ops;
400 ret = __mmu_notifier_register(mn, mm);
401 if (ret)
402 goto out_free;
403 return mn;
404out_free:
405 mn->ops->free_notifier(mn);
406 return ERR_PTR(ret);
407}
408EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
409
410/* this is called after the last mmu_notifier_unregister() returned */
411void __mmu_notifier_mm_destroy(struct mm_struct *mm)
412{
413 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
414 kfree(mm->mmu_notifier_mm);
415 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
416}
417
418/*
419 * This releases the mm_count pin automatically and frees the mm
420 * structure if it was the last user of it. It serializes against
421 * running mmu notifiers with SRCU and against mmu_notifier_unregister
422 * with the unregister lock + SRCU. All sptes must be dropped before
423 * calling mmu_notifier_unregister. ->release or any other notifier
424 * method may be invoked concurrently with mmu_notifier_unregister,
425 * and only after mmu_notifier_unregister returned we're guaranteed
426 * that ->release or any other method can't run anymore.
427 */
428void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
429{
430 BUG_ON(atomic_read(&mm->mm_count) <= 0);
431
432 if (!hlist_unhashed(&mn->hlist)) {
433 /*
434 * SRCU here will force exit_mmap to wait for ->release to
435 * finish before freeing the pages.
436 */
437 int id;
438
439 id = srcu_read_lock(&srcu);
440 /*
441 * exit_mmap will block in mmu_notifier_release to guarantee
442 * that ->release is called before freeing the pages.
443 */
444 if (mn->ops->release)
445 mn->ops->release(mn, mm);
446 srcu_read_unlock(&srcu, id);
447
448 spin_lock(&mm->mmu_notifier_mm->lock);
449 /*
450 * Can not use list_del_rcu() since __mmu_notifier_release
451 * can delete it before we hold the lock.
452 */
453 hlist_del_init_rcu(&mn->hlist);
454 spin_unlock(&mm->mmu_notifier_mm->lock);
455 }
456
457 /*
458 * Wait for any running method to finish, of course including
459 * ->release if it was run by mmu_notifier_release instead of us.
460 */
461 synchronize_srcu(&srcu);
462
463 BUG_ON(atomic_read(&mm->mm_count) <= 0);
464
465 mmdrop(mm);
466}
467EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
468
469static void mmu_notifier_free_rcu(struct rcu_head *rcu)
470{
471 struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu);
472 struct mm_struct *mm = mn->mm;
473
474 mn->ops->free_notifier(mn);
475 /* Pairs with the get in __mmu_notifier_register() */
476 mmdrop(mm);
477}
478
479/**
480 * mmu_notifier_put - Release the reference on the notifier
481 * @mn: The notifier to act on
482 *
483 * This function must be paired with each mmu_notifier_get(), it releases the
484 * reference obtained by the get. If this is the last reference then process
485 * to free the notifier will be run asynchronously.
486 *
487 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
488 * when the mm_struct is destroyed. Instead free_notifier is always called to
489 * release any resources held by the user.
490 *
491 * As ops->release is not guaranteed to be called, the user must ensure that
492 * all sptes are dropped, and no new sptes can be established before
493 * mmu_notifier_put() is called.
494 *
495 * This function can be called from the ops->release callback, however the
496 * caller must still ensure it is called pairwise with mmu_notifier_get().
497 *
498 * Modules calling this function must call mmu_notifier_synchronize() in
499 * their __exit functions to ensure the async work is completed.
500 */
501void mmu_notifier_put(struct mmu_notifier *mn)
502{
503 struct mm_struct *mm = mn->mm;
504
505 spin_lock(&mm->mmu_notifier_mm->lock);
506 if (WARN_ON(!mn->users) || --mn->users)
507 goto out_unlock;
508 hlist_del_init_rcu(&mn->hlist);
509 spin_unlock(&mm->mmu_notifier_mm->lock);
510
511 call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu);
512 return;
513
514out_unlock:
515 spin_unlock(&mm->mmu_notifier_mm->lock);
516}
517EXPORT_SYMBOL_GPL(mmu_notifier_put);
518
519/**
520 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
521 *
522 * This function ensures that all outstanding async SRU work from
523 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
524 * associated with an unused mmu_notifier will no longer be called.
525 *
526 * Before using the caller must ensure that all of its mmu_notifiers have been
527 * fully released via mmu_notifier_put().
528 *
529 * Modules using the mmu_notifier_put() API should call this in their __exit
530 * function to avoid module unloading races.
531 */
532void mmu_notifier_synchronize(void)
533{
534 synchronize_srcu(&srcu);
535}
536EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
537
538bool
539mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
540{
541 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
542 return false;
543 /* Return true if the vma still have the read flag set. */
544 return range->vma->vm_flags & VM_READ;
545}
546EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
7 * Christoph Lameter <cl@linux.com>
8 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/interval_tree.h>
16#include <linux/srcu.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19#include <linux/sched/mm.h>
20#include <linux/slab.h>
21
22/* global SRCU for all MMs */
23DEFINE_STATIC_SRCU(srcu);
24
25#ifdef CONFIG_LOCKDEP
26struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27 .name = "mmu_notifier_invalidate_range_start"
28};
29#endif
30
31/*
32 * The mmu_notifier_subscriptions structure is allocated and installed in
33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
34 * critical section and it's released only when mm_count reaches zero
35 * in mmdrop().
36 */
37struct mmu_notifier_subscriptions {
38 /* all mmu notifiers registered in this mm are queued in this list */
39 struct hlist_head list;
40 bool has_itree;
41 /* to serialize the list modifications and hlist_unhashed */
42 spinlock_t lock;
43 unsigned long invalidate_seq;
44 unsigned long active_invalidate_ranges;
45 struct rb_root_cached itree;
46 wait_queue_head_t wq;
47 struct hlist_head deferred_list;
48};
49
50/*
51 * This is a collision-retry read-side/write-side 'lock', a lot like a
52 * seqcount, however this allows multiple write-sides to hold it at
53 * once. Conceptually the write side is protecting the values of the PTEs in
54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
55 * writer exists.
56 *
57 * Note that the core mm creates nested invalidate_range_start()/end() regions
58 * within the same thread, and runs invalidate_range_start()/end() in parallel
59 * on multiple CPUs. This is designed to not reduce concurrency or block
60 * progress on the mm side.
61 *
62 * As a secondary function, holding the full write side also serves to prevent
63 * writers for the itree, this is an optimization to avoid extra locking
64 * during invalidate_range_start/end notifiers.
65 *
66 * The write side has two states, fully excluded:
67 * - mm->active_invalidate_ranges != 0
68 * - subscriptions->invalidate_seq & 1 == True (odd)
69 * - some range on the mm_struct is being invalidated
70 * - the itree is not allowed to change
71 *
72 * And partially excluded:
73 * - mm->active_invalidate_ranges != 0
74 * - subscriptions->invalidate_seq & 1 == False (even)
75 * - some range on the mm_struct is being invalidated
76 * - the itree is allowed to change
77 *
78 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
79 * seq |= 1 # Begin writing
80 * seq++ # Release the writing state
81 * seq & 1 # True if a writer exists
82 *
83 * The later state avoids some expensive work on inv_end in the common case of
84 * no mmu_interval_notifier monitoring the VA.
85 */
86static bool
87mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
88{
89 lockdep_assert_held(&subscriptions->lock);
90 return subscriptions->invalidate_seq & 1;
91}
92
93static struct mmu_interval_notifier *
94mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
95 const struct mmu_notifier_range *range,
96 unsigned long *seq)
97{
98 struct interval_tree_node *node;
99 struct mmu_interval_notifier *res = NULL;
100
101 spin_lock(&subscriptions->lock);
102 subscriptions->active_invalidate_ranges++;
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
104 range->end - 1);
105 if (node) {
106 subscriptions->invalidate_seq |= 1;
107 res = container_of(node, struct mmu_interval_notifier,
108 interval_tree);
109 }
110
111 *seq = subscriptions->invalidate_seq;
112 spin_unlock(&subscriptions->lock);
113 return res;
114}
115
116static struct mmu_interval_notifier *
117mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
118 const struct mmu_notifier_range *range)
119{
120 struct interval_tree_node *node;
121
122 node = interval_tree_iter_next(&interval_sub->interval_tree,
123 range->start, range->end - 1);
124 if (!node)
125 return NULL;
126 return container_of(node, struct mmu_interval_notifier, interval_tree);
127}
128
129static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
130{
131 struct mmu_interval_notifier *interval_sub;
132 struct hlist_node *next;
133
134 spin_lock(&subscriptions->lock);
135 if (--subscriptions->active_invalidate_ranges ||
136 !mn_itree_is_invalidating(subscriptions)) {
137 spin_unlock(&subscriptions->lock);
138 return;
139 }
140
141 /* Make invalidate_seq even */
142 subscriptions->invalidate_seq++;
143
144 /*
145 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
146 * Adds and removes are queued until the final inv_end happens then
147 * they are progressed. This arrangement for tree updates is used to
148 * avoid using a blocking lock during invalidate_range_start.
149 */
150 hlist_for_each_entry_safe(interval_sub, next,
151 &subscriptions->deferred_list,
152 deferred_item) {
153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
154 interval_tree_insert(&interval_sub->interval_tree,
155 &subscriptions->itree);
156 else
157 interval_tree_remove(&interval_sub->interval_tree,
158 &subscriptions->itree);
159 hlist_del(&interval_sub->deferred_item);
160 }
161 spin_unlock(&subscriptions->lock);
162
163 wake_up_all(&subscriptions->wq);
164}
165
166/**
167 * mmu_interval_read_begin - Begin a read side critical section against a VA
168 * range
169 * @interval_sub: The interval subscription
170 *
171 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
172 * collision-retry scheme similar to seqcount for the VA range under
173 * subscription. If the mm invokes invalidation during the critical section
174 * then mmu_interval_read_retry() will return true.
175 *
176 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
177 * require a blocking context. The critical region formed by this can sleep,
178 * and the required 'user_lock' can also be a sleeping lock.
179 *
180 * The caller is required to provide a 'user_lock' to serialize both teardown
181 * and setup.
182 *
183 * The return value should be passed to mmu_interval_read_retry().
184 */
185unsigned long
186mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
187{
188 struct mmu_notifier_subscriptions *subscriptions =
189 interval_sub->mm->notifier_subscriptions;
190 unsigned long seq;
191 bool is_invalidating;
192
193 /*
194 * If the subscription has a different seq value under the user_lock
195 * than we started with then it has collided.
196 *
197 * If the subscription currently has the same seq value as the
198 * subscriptions seq, then it is currently between
199 * invalidate_start/end and is colliding.
200 *
201 * The locking looks broadly like this:
202 * mn_tree_invalidate_start(): mmu_interval_read_begin():
203 * spin_lock
204 * seq = READ_ONCE(interval_sub->invalidate_seq);
205 * seq == subs->invalidate_seq
206 * spin_unlock
207 * spin_lock
208 * seq = ++subscriptions->invalidate_seq
209 * spin_unlock
210 * op->invalidate_range():
211 * user_lock
212 * mmu_interval_set_seq()
213 * interval_sub->invalidate_seq = seq
214 * user_unlock
215 *
216 * [Required: mmu_interval_read_retry() == true]
217 *
218 * mn_itree_inv_end():
219 * spin_lock
220 * seq = ++subscriptions->invalidate_seq
221 * spin_unlock
222 *
223 * user_lock
224 * mmu_interval_read_retry():
225 * interval_sub->invalidate_seq != seq
226 * user_unlock
227 *
228 * Barriers are not needed here as any races here are closed by an
229 * eventual mmu_interval_read_retry(), which provides a barrier via the
230 * user_lock.
231 */
232 spin_lock(&subscriptions->lock);
233 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
234 seq = READ_ONCE(interval_sub->invalidate_seq);
235 is_invalidating = seq == subscriptions->invalidate_seq;
236 spin_unlock(&subscriptions->lock);
237
238 /*
239 * interval_sub->invalidate_seq must always be set to an odd value via
240 * mmu_interval_set_seq() using the provided cur_seq from
241 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
242 * will always clear the below sleep in some reasonable time as
243 * subscriptions->invalidate_seq is even in the idle state.
244 */
245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
246 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
247 if (is_invalidating)
248 wait_event(subscriptions->wq,
249 READ_ONCE(subscriptions->invalidate_seq) != seq);
250
251 /*
252 * Notice that mmu_interval_read_retry() can already be true at this
253 * point, avoiding loops here allows the caller to provide a global
254 * time bound.
255 */
256
257 return seq;
258}
259EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
260
261static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
262 struct mm_struct *mm)
263{
264 struct mmu_notifier_range range = {
265 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
266 .event = MMU_NOTIFY_RELEASE,
267 .mm = mm,
268 .start = 0,
269 .end = ULONG_MAX,
270 };
271 struct mmu_interval_notifier *interval_sub;
272 unsigned long cur_seq;
273 bool ret;
274
275 for (interval_sub =
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
277 interval_sub;
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 ret = interval_sub->ops->invalidate(interval_sub, &range,
280 cur_seq);
281 WARN_ON(!ret);
282 }
283
284 mn_itree_inv_end(subscriptions);
285}
286
287/*
288 * This function can't run concurrently against mmu_notifier_register
289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
290 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
291 * in parallel despite there being no task using this mm any more,
292 * through the vmas outside of the exit_mmap context, such as with
293 * vmtruncate. This serializes against mmu_notifier_unregister with
294 * the notifier_subscriptions->lock in addition to SRCU and it serializes
295 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
296 * can't go away from under us as exit_mmap holds an mm_count pin
297 * itself.
298 */
299static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
300 struct mm_struct *mm)
301{
302 struct mmu_notifier *subscription;
303 int id;
304
305 /*
306 * SRCU here will block mmu_notifier_unregister until
307 * ->release returns.
308 */
309 id = srcu_read_lock(&srcu);
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
311 srcu_read_lock_held(&srcu))
312 /*
313 * If ->release runs before mmu_notifier_unregister it must be
314 * handled, as it's the only way for the driver to flush all
315 * existing sptes and stop the driver from establishing any more
316 * sptes before all the pages in the mm are freed.
317 */
318 if (subscription->ops->release)
319 subscription->ops->release(subscription, mm);
320
321 spin_lock(&subscriptions->lock);
322 while (unlikely(!hlist_empty(&subscriptions->list))) {
323 subscription = hlist_entry(subscriptions->list.first,
324 struct mmu_notifier, hlist);
325 /*
326 * We arrived before mmu_notifier_unregister so
327 * mmu_notifier_unregister will do nothing other than to wait
328 * for ->release to finish and for mmu_notifier_unregister to
329 * return.
330 */
331 hlist_del_init_rcu(&subscription->hlist);
332 }
333 spin_unlock(&subscriptions->lock);
334 srcu_read_unlock(&srcu, id);
335
336 /*
337 * synchronize_srcu here prevents mmu_notifier_release from returning to
338 * exit_mmap (which would proceed with freeing all pages in the mm)
339 * until the ->release method returns, if it was invoked by
340 * mmu_notifier_unregister.
341 *
342 * The notifier_subscriptions can't go away from under us because
343 * one mm_count is held by exit_mmap.
344 */
345 synchronize_srcu(&srcu);
346}
347
348void __mmu_notifier_release(struct mm_struct *mm)
349{
350 struct mmu_notifier_subscriptions *subscriptions =
351 mm->notifier_subscriptions;
352
353 if (subscriptions->has_itree)
354 mn_itree_release(subscriptions, mm);
355
356 if (!hlist_empty(&subscriptions->list))
357 mn_hlist_release(subscriptions, mm);
358}
359
360/*
361 * If no young bitflag is supported by the hardware, ->clear_flush_young can
362 * unmap the address and return 1 or 0 depending if the mapping previously
363 * existed or not.
364 */
365int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
366 unsigned long start,
367 unsigned long end)
368{
369 struct mmu_notifier *subscription;
370 int young = 0, id;
371
372 id = srcu_read_lock(&srcu);
373 hlist_for_each_entry_rcu(subscription,
374 &mm->notifier_subscriptions->list, hlist,
375 srcu_read_lock_held(&srcu)) {
376 if (subscription->ops->clear_flush_young)
377 young |= subscription->ops->clear_flush_young(
378 subscription, mm, start, end);
379 }
380 srcu_read_unlock(&srcu, id);
381
382 return young;
383}
384
385int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end)
388{
389 struct mmu_notifier *subscription;
390 int young = 0, id;
391
392 id = srcu_read_lock(&srcu);
393 hlist_for_each_entry_rcu(subscription,
394 &mm->notifier_subscriptions->list, hlist,
395 srcu_read_lock_held(&srcu)) {
396 if (subscription->ops->clear_young)
397 young |= subscription->ops->clear_young(subscription,
398 mm, start, end);
399 }
400 srcu_read_unlock(&srcu, id);
401
402 return young;
403}
404
405int __mmu_notifier_test_young(struct mm_struct *mm,
406 unsigned long address)
407{
408 struct mmu_notifier *subscription;
409 int young = 0, id;
410
411 id = srcu_read_lock(&srcu);
412 hlist_for_each_entry_rcu(subscription,
413 &mm->notifier_subscriptions->list, hlist,
414 srcu_read_lock_held(&srcu)) {
415 if (subscription->ops->test_young) {
416 young = subscription->ops->test_young(subscription, mm,
417 address);
418 if (young)
419 break;
420 }
421 }
422 srcu_read_unlock(&srcu, id);
423
424 return young;
425}
426
427void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
428 pte_t pte)
429{
430 struct mmu_notifier *subscription;
431 int id;
432
433 id = srcu_read_lock(&srcu);
434 hlist_for_each_entry_rcu(subscription,
435 &mm->notifier_subscriptions->list, hlist,
436 srcu_read_lock_held(&srcu)) {
437 if (subscription->ops->change_pte)
438 subscription->ops->change_pte(subscription, mm, address,
439 pte);
440 }
441 srcu_read_unlock(&srcu, id);
442}
443
444static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
445 const struct mmu_notifier_range *range)
446{
447 struct mmu_interval_notifier *interval_sub;
448 unsigned long cur_seq;
449
450 for (interval_sub =
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
452 interval_sub;
453 interval_sub = mn_itree_inv_next(interval_sub, range)) {
454 bool ret;
455
456 ret = interval_sub->ops->invalidate(interval_sub, range,
457 cur_seq);
458 if (!ret) {
459 if (WARN_ON(mmu_notifier_range_blockable(range)))
460 continue;
461 goto out_would_block;
462 }
463 }
464 return 0;
465
466out_would_block:
467 /*
468 * On -EAGAIN the non-blocking caller is not allowed to call
469 * invalidate_range_end()
470 */
471 mn_itree_inv_end(subscriptions);
472 return -EAGAIN;
473}
474
475static int mn_hlist_invalidate_range_start(
476 struct mmu_notifier_subscriptions *subscriptions,
477 struct mmu_notifier_range *range)
478{
479 struct mmu_notifier *subscription;
480 int ret = 0;
481 int id;
482
483 id = srcu_read_lock(&srcu);
484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
485 srcu_read_lock_held(&srcu)) {
486 const struct mmu_notifier_ops *ops = subscription->ops;
487
488 if (ops->invalidate_range_start) {
489 int _ret;
490
491 if (!mmu_notifier_range_blockable(range))
492 non_block_start();
493 _ret = ops->invalidate_range_start(subscription, range);
494 if (!mmu_notifier_range_blockable(range))
495 non_block_end();
496 if (_ret) {
497 pr_info("%pS callback failed with %d in %sblockable context.\n",
498 ops->invalidate_range_start, _ret,
499 !mmu_notifier_range_blockable(range) ?
500 "non-" :
501 "");
502 WARN_ON(mmu_notifier_range_blockable(range) ||
503 _ret != -EAGAIN);
504 ret = _ret;
505 }
506 }
507 }
508 srcu_read_unlock(&srcu, id);
509
510 return ret;
511}
512
513int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
514{
515 struct mmu_notifier_subscriptions *subscriptions =
516 range->mm->notifier_subscriptions;
517 int ret;
518
519 if (subscriptions->has_itree) {
520 ret = mn_itree_invalidate(subscriptions, range);
521 if (ret)
522 return ret;
523 }
524 if (!hlist_empty(&subscriptions->list))
525 return mn_hlist_invalidate_range_start(subscriptions, range);
526 return 0;
527}
528
529static void
530mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
531 struct mmu_notifier_range *range, bool only_end)
532{
533 struct mmu_notifier *subscription;
534 int id;
535
536 id = srcu_read_lock(&srcu);
537 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
538 srcu_read_lock_held(&srcu)) {
539 /*
540 * Call invalidate_range here too to avoid the need for the
541 * subsystem of having to register an invalidate_range_end
542 * call-back when there is invalidate_range already. Usually a
543 * subsystem registers either invalidate_range_start()/end() or
544 * invalidate_range(), so this will be no additional overhead
545 * (besides the pointer check).
546 *
547 * We skip call to invalidate_range() if we know it is safe ie
548 * call site use mmu_notifier_invalidate_range_only_end() which
549 * is safe to do when we know that a call to invalidate_range()
550 * already happen under page table lock.
551 */
552 if (!only_end && subscription->ops->invalidate_range)
553 subscription->ops->invalidate_range(subscription,
554 range->mm,
555 range->start,
556 range->end);
557 if (subscription->ops->invalidate_range_end) {
558 if (!mmu_notifier_range_blockable(range))
559 non_block_start();
560 subscription->ops->invalidate_range_end(subscription,
561 range);
562 if (!mmu_notifier_range_blockable(range))
563 non_block_end();
564 }
565 }
566 srcu_read_unlock(&srcu, id);
567}
568
569void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
570 bool only_end)
571{
572 struct mmu_notifier_subscriptions *subscriptions =
573 range->mm->notifier_subscriptions;
574
575 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
576 if (subscriptions->has_itree)
577 mn_itree_inv_end(subscriptions);
578
579 if (!hlist_empty(&subscriptions->list))
580 mn_hlist_invalidate_end(subscriptions, range, only_end);
581 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
582}
583
584void __mmu_notifier_invalidate_range(struct mm_struct *mm,
585 unsigned long start, unsigned long end)
586{
587 struct mmu_notifier *subscription;
588 int id;
589
590 id = srcu_read_lock(&srcu);
591 hlist_for_each_entry_rcu(subscription,
592 &mm->notifier_subscriptions->list, hlist,
593 srcu_read_lock_held(&srcu)) {
594 if (subscription->ops->invalidate_range)
595 subscription->ops->invalidate_range(subscription, mm,
596 start, end);
597 }
598 srcu_read_unlock(&srcu, id);
599}
600
601/*
602 * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
603 * write mode. A NULL mn signals the notifier is being registered for itree
604 * mode.
605 */
606int __mmu_notifier_register(struct mmu_notifier *subscription,
607 struct mm_struct *mm)
608{
609 struct mmu_notifier_subscriptions *subscriptions = NULL;
610 int ret;
611
612 mmap_assert_write_locked(mm);
613 BUG_ON(atomic_read(&mm->mm_users) <= 0);
614
615 if (IS_ENABLED(CONFIG_LOCKDEP)) {
616 fs_reclaim_acquire(GFP_KERNEL);
617 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
618 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
619 fs_reclaim_release(GFP_KERNEL);
620 }
621
622 if (!mm->notifier_subscriptions) {
623 /*
624 * kmalloc cannot be called under mm_take_all_locks(), but we
625 * know that mm->notifier_subscriptions can't change while we
626 * hold the write side of the mmap_lock.
627 */
628 subscriptions = kzalloc(
629 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
630 if (!subscriptions)
631 return -ENOMEM;
632
633 INIT_HLIST_HEAD(&subscriptions->list);
634 spin_lock_init(&subscriptions->lock);
635 subscriptions->invalidate_seq = 2;
636 subscriptions->itree = RB_ROOT_CACHED;
637 init_waitqueue_head(&subscriptions->wq);
638 INIT_HLIST_HEAD(&subscriptions->deferred_list);
639 }
640
641 ret = mm_take_all_locks(mm);
642 if (unlikely(ret))
643 goto out_clean;
644
645 /*
646 * Serialize the update against mmu_notifier_unregister. A
647 * side note: mmu_notifier_release can't run concurrently with
648 * us because we hold the mm_users pin (either implicitly as
649 * current->mm or explicitly with get_task_mm() or similar).
650 * We can't race against any other mmu notifier method either
651 * thanks to mm_take_all_locks().
652 *
653 * release semantics on the initialization of the
654 * mmu_notifier_subscriptions's contents are provided for unlocked
655 * readers. acquire can only be used while holding the mmgrab or
656 * mmget, and is safe because once created the
657 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
658 * As above, users holding the mmap_lock or one of the
659 * mm_take_all_locks() do not need to use acquire semantics.
660 */
661 if (subscriptions)
662 smp_store_release(&mm->notifier_subscriptions, subscriptions);
663
664 if (subscription) {
665 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
666 mmgrab(mm);
667 subscription->mm = mm;
668 subscription->users = 1;
669
670 spin_lock(&mm->notifier_subscriptions->lock);
671 hlist_add_head_rcu(&subscription->hlist,
672 &mm->notifier_subscriptions->list);
673 spin_unlock(&mm->notifier_subscriptions->lock);
674 } else
675 mm->notifier_subscriptions->has_itree = true;
676
677 mm_drop_all_locks(mm);
678 BUG_ON(atomic_read(&mm->mm_users) <= 0);
679 return 0;
680
681out_clean:
682 kfree(subscriptions);
683 return ret;
684}
685EXPORT_SYMBOL_GPL(__mmu_notifier_register);
686
687/**
688 * mmu_notifier_register - Register a notifier on a mm
689 * @subscription: The notifier to attach
690 * @mm: The mm to attach the notifier to
691 *
692 * Must not hold mmap_lock nor any other VM related lock when calling
693 * this registration function. Must also ensure mm_users can't go down
694 * to zero while this runs to avoid races with mmu_notifier_release,
695 * so mm has to be current->mm or the mm should be pinned safely such
696 * as with get_task_mm(). If the mm is not current->mm, the mm_users
697 * pin should be released by calling mmput after mmu_notifier_register
698 * returns.
699 *
700 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
701 * unregister the notifier.
702 *
703 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
704 * valid, and can be converted to an active mm pointer via mmget_not_zero().
705 */
706int mmu_notifier_register(struct mmu_notifier *subscription,
707 struct mm_struct *mm)
708{
709 int ret;
710
711 mmap_write_lock(mm);
712 ret = __mmu_notifier_register(subscription, mm);
713 mmap_write_unlock(mm);
714 return ret;
715}
716EXPORT_SYMBOL_GPL(mmu_notifier_register);
717
718static struct mmu_notifier *
719find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
720{
721 struct mmu_notifier *subscription;
722
723 spin_lock(&mm->notifier_subscriptions->lock);
724 hlist_for_each_entry_rcu(subscription,
725 &mm->notifier_subscriptions->list, hlist,
726 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
727 if (subscription->ops != ops)
728 continue;
729
730 if (likely(subscription->users != UINT_MAX))
731 subscription->users++;
732 else
733 subscription = ERR_PTR(-EOVERFLOW);
734 spin_unlock(&mm->notifier_subscriptions->lock);
735 return subscription;
736 }
737 spin_unlock(&mm->notifier_subscriptions->lock);
738 return NULL;
739}
740
741/**
742 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
743 * the mm & ops
744 * @ops: The operations struct being subscribe with
745 * @mm : The mm to attach notifiers too
746 *
747 * This function either allocates a new mmu_notifier via
748 * ops->alloc_notifier(), or returns an already existing notifier on the
749 * list. The value of the ops pointer is used to determine when two notifiers
750 * are the same.
751 *
752 * Each call to mmu_notifier_get() must be paired with a call to
753 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
754 *
755 * While the caller has a mmu_notifier get the mm pointer will remain valid,
756 * and can be converted to an active mm pointer via mmget_not_zero().
757 */
758struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
759 struct mm_struct *mm)
760{
761 struct mmu_notifier *subscription;
762 int ret;
763
764 mmap_assert_write_locked(mm);
765
766 if (mm->notifier_subscriptions) {
767 subscription = find_get_mmu_notifier(mm, ops);
768 if (subscription)
769 return subscription;
770 }
771
772 subscription = ops->alloc_notifier(mm);
773 if (IS_ERR(subscription))
774 return subscription;
775 subscription->ops = ops;
776 ret = __mmu_notifier_register(subscription, mm);
777 if (ret)
778 goto out_free;
779 return subscription;
780out_free:
781 subscription->ops->free_notifier(subscription);
782 return ERR_PTR(ret);
783}
784EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
785
786/* this is called after the last mmu_notifier_unregister() returned */
787void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
788{
789 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
790 kfree(mm->notifier_subscriptions);
791 mm->notifier_subscriptions = LIST_POISON1; /* debug */
792}
793
794/*
795 * This releases the mm_count pin automatically and frees the mm
796 * structure if it was the last user of it. It serializes against
797 * running mmu notifiers with SRCU and against mmu_notifier_unregister
798 * with the unregister lock + SRCU. All sptes must be dropped before
799 * calling mmu_notifier_unregister. ->release or any other notifier
800 * method may be invoked concurrently with mmu_notifier_unregister,
801 * and only after mmu_notifier_unregister returned we're guaranteed
802 * that ->release or any other method can't run anymore.
803 */
804void mmu_notifier_unregister(struct mmu_notifier *subscription,
805 struct mm_struct *mm)
806{
807 BUG_ON(atomic_read(&mm->mm_count) <= 0);
808
809 if (!hlist_unhashed(&subscription->hlist)) {
810 /*
811 * SRCU here will force exit_mmap to wait for ->release to
812 * finish before freeing the pages.
813 */
814 int id;
815
816 id = srcu_read_lock(&srcu);
817 /*
818 * exit_mmap will block in mmu_notifier_release to guarantee
819 * that ->release is called before freeing the pages.
820 */
821 if (subscription->ops->release)
822 subscription->ops->release(subscription, mm);
823 srcu_read_unlock(&srcu, id);
824
825 spin_lock(&mm->notifier_subscriptions->lock);
826 /*
827 * Can not use list_del_rcu() since __mmu_notifier_release
828 * can delete it before we hold the lock.
829 */
830 hlist_del_init_rcu(&subscription->hlist);
831 spin_unlock(&mm->notifier_subscriptions->lock);
832 }
833
834 /*
835 * Wait for any running method to finish, of course including
836 * ->release if it was run by mmu_notifier_release instead of us.
837 */
838 synchronize_srcu(&srcu);
839
840 BUG_ON(atomic_read(&mm->mm_count) <= 0);
841
842 mmdrop(mm);
843}
844EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
845
846static void mmu_notifier_free_rcu(struct rcu_head *rcu)
847{
848 struct mmu_notifier *subscription =
849 container_of(rcu, struct mmu_notifier, rcu);
850 struct mm_struct *mm = subscription->mm;
851
852 subscription->ops->free_notifier(subscription);
853 /* Pairs with the get in __mmu_notifier_register() */
854 mmdrop(mm);
855}
856
857/**
858 * mmu_notifier_put - Release the reference on the notifier
859 * @subscription: The notifier to act on
860 *
861 * This function must be paired with each mmu_notifier_get(), it releases the
862 * reference obtained by the get. If this is the last reference then process
863 * to free the notifier will be run asynchronously.
864 *
865 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
866 * when the mm_struct is destroyed. Instead free_notifier is always called to
867 * release any resources held by the user.
868 *
869 * As ops->release is not guaranteed to be called, the user must ensure that
870 * all sptes are dropped, and no new sptes can be established before
871 * mmu_notifier_put() is called.
872 *
873 * This function can be called from the ops->release callback, however the
874 * caller must still ensure it is called pairwise with mmu_notifier_get().
875 *
876 * Modules calling this function must call mmu_notifier_synchronize() in
877 * their __exit functions to ensure the async work is completed.
878 */
879void mmu_notifier_put(struct mmu_notifier *subscription)
880{
881 struct mm_struct *mm = subscription->mm;
882
883 spin_lock(&mm->notifier_subscriptions->lock);
884 if (WARN_ON(!subscription->users) || --subscription->users)
885 goto out_unlock;
886 hlist_del_init_rcu(&subscription->hlist);
887 spin_unlock(&mm->notifier_subscriptions->lock);
888
889 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
890 return;
891
892out_unlock:
893 spin_unlock(&mm->notifier_subscriptions->lock);
894}
895EXPORT_SYMBOL_GPL(mmu_notifier_put);
896
897static int __mmu_interval_notifier_insert(
898 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
899 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
900 unsigned long length, const struct mmu_interval_notifier_ops *ops)
901{
902 interval_sub->mm = mm;
903 interval_sub->ops = ops;
904 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
905 interval_sub->interval_tree.start = start;
906 /*
907 * Note that the representation of the intervals in the interval tree
908 * considers the ending point as contained in the interval.
909 */
910 if (length == 0 ||
911 check_add_overflow(start, length - 1,
912 &interval_sub->interval_tree.last))
913 return -EOVERFLOW;
914
915 /* Must call with a mmget() held */
916 if (WARN_ON(atomic_read(&mm->mm_count) <= 0))
917 return -EINVAL;
918
919 /* pairs with mmdrop in mmu_interval_notifier_remove() */
920 mmgrab(mm);
921
922 /*
923 * If some invalidate_range_start/end region is going on in parallel
924 * we don't know what VA ranges are affected, so we must assume this
925 * new range is included.
926 *
927 * If the itree is invalidating then we are not allowed to change
928 * it. Retrying until invalidation is done is tricky due to the
929 * possibility for live lock, instead defer the add to
930 * mn_itree_inv_end() so this algorithm is deterministic.
931 *
932 * In all cases the value for the interval_sub->invalidate_seq should be
933 * odd, see mmu_interval_read_begin()
934 */
935 spin_lock(&subscriptions->lock);
936 if (subscriptions->active_invalidate_ranges) {
937 if (mn_itree_is_invalidating(subscriptions))
938 hlist_add_head(&interval_sub->deferred_item,
939 &subscriptions->deferred_list);
940 else {
941 subscriptions->invalidate_seq |= 1;
942 interval_tree_insert(&interval_sub->interval_tree,
943 &subscriptions->itree);
944 }
945 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
946 } else {
947 WARN_ON(mn_itree_is_invalidating(subscriptions));
948 /*
949 * The starting seq for a subscription not under invalidation
950 * should be odd, not equal to the current invalidate_seq and
951 * invalidate_seq should not 'wrap' to the new seq any time
952 * soon.
953 */
954 interval_sub->invalidate_seq =
955 subscriptions->invalidate_seq - 1;
956 interval_tree_insert(&interval_sub->interval_tree,
957 &subscriptions->itree);
958 }
959 spin_unlock(&subscriptions->lock);
960 return 0;
961}
962
963/**
964 * mmu_interval_notifier_insert - Insert an interval notifier
965 * @interval_sub: Interval subscription to register
966 * @start: Starting virtual address to monitor
967 * @length: Length of the range to monitor
968 * @mm: mm_struct to attach to
969 * @ops: Interval notifier operations to be called on matching events
970 *
971 * This function subscribes the interval notifier for notifications from the
972 * mm. Upon return the ops related to mmu_interval_notifier will be called
973 * whenever an event that intersects with the given range occurs.
974 *
975 * Upon return the range_notifier may not be present in the interval tree yet.
976 * The caller must use the normal interval notifier read flow via
977 * mmu_interval_read_begin() to establish SPTEs for this range.
978 */
979int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
980 struct mm_struct *mm, unsigned long start,
981 unsigned long length,
982 const struct mmu_interval_notifier_ops *ops)
983{
984 struct mmu_notifier_subscriptions *subscriptions;
985 int ret;
986
987 might_lock(&mm->mmap_lock);
988
989 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
990 if (!subscriptions || !subscriptions->has_itree) {
991 ret = mmu_notifier_register(NULL, mm);
992 if (ret)
993 return ret;
994 subscriptions = mm->notifier_subscriptions;
995 }
996 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
997 start, length, ops);
998}
999EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
1000
1001int mmu_interval_notifier_insert_locked(
1002 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1003 unsigned long start, unsigned long length,
1004 const struct mmu_interval_notifier_ops *ops)
1005{
1006 struct mmu_notifier_subscriptions *subscriptions =
1007 mm->notifier_subscriptions;
1008 int ret;
1009
1010 mmap_assert_write_locked(mm);
1011
1012 if (!subscriptions || !subscriptions->has_itree) {
1013 ret = __mmu_notifier_register(NULL, mm);
1014 if (ret)
1015 return ret;
1016 subscriptions = mm->notifier_subscriptions;
1017 }
1018 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1019 start, length, ops);
1020}
1021EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1022
1023/**
1024 * mmu_interval_notifier_remove - Remove a interval notifier
1025 * @interval_sub: Interval subscription to unregister
1026 *
1027 * This function must be paired with mmu_interval_notifier_insert(). It cannot
1028 * be called from any ops callback.
1029 *
1030 * Once this returns ops callbacks are no longer running on other CPUs and
1031 * will not be called in future.
1032 */
1033void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1034{
1035 struct mm_struct *mm = interval_sub->mm;
1036 struct mmu_notifier_subscriptions *subscriptions =
1037 mm->notifier_subscriptions;
1038 unsigned long seq = 0;
1039
1040 might_sleep();
1041
1042 spin_lock(&subscriptions->lock);
1043 if (mn_itree_is_invalidating(subscriptions)) {
1044 /*
1045 * remove is being called after insert put this on the
1046 * deferred list, but before the deferred list was processed.
1047 */
1048 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1049 hlist_del(&interval_sub->deferred_item);
1050 } else {
1051 hlist_add_head(&interval_sub->deferred_item,
1052 &subscriptions->deferred_list);
1053 seq = subscriptions->invalidate_seq;
1054 }
1055 } else {
1056 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1057 interval_tree_remove(&interval_sub->interval_tree,
1058 &subscriptions->itree);
1059 }
1060 spin_unlock(&subscriptions->lock);
1061
1062 /*
1063 * The possible sleep on progress in the invalidation requires the
1064 * caller not hold any locks held by invalidation callbacks.
1065 */
1066 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1067 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1068 if (seq)
1069 wait_event(subscriptions->wq,
1070 READ_ONCE(subscriptions->invalidate_seq) != seq);
1071
1072 /* pairs with mmgrab in mmu_interval_notifier_insert() */
1073 mmdrop(mm);
1074}
1075EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1076
1077/**
1078 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1079 *
1080 * This function ensures that all outstanding async SRU work from
1081 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1082 * associated with an unused mmu_notifier will no longer be called.
1083 *
1084 * Before using the caller must ensure that all of its mmu_notifiers have been
1085 * fully released via mmu_notifier_put().
1086 *
1087 * Modules using the mmu_notifier_put() API should call this in their __exit
1088 * function to avoid module unloading races.
1089 */
1090void mmu_notifier_synchronize(void)
1091{
1092 synchronize_srcu(&srcu);
1093}
1094EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1095
1096bool
1097mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1098{
1099 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1100 return false;
1101 /* Return true if the vma still have the read flag set. */
1102 return range->vma->vm_flags & VM_READ;
1103}
1104EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);