Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.6
 
  1/*
  2 *  linux/mm/mmu_notifier.c
  3 *
  4 *  Copyright (C) 2008  Qumranet, Inc.
  5 *  Copyright (C) 2008  SGI
  6 *             Christoph Lameter <cl@linux.com>
  7 *
  8 *  This work is licensed under the terms of the GNU GPL, version 2. See
  9 *  the COPYING file in the top-level directory.
 10 */
 11
 12#include <linux/rculist.h>
 13#include <linux/mmu_notifier.h>
 14#include <linux/export.h>
 15#include <linux/mm.h>
 16#include <linux/err.h>
 
 17#include <linux/srcu.h>
 18#include <linux/rcupdate.h>
 19#include <linux/sched.h>
 
 20#include <linux/slab.h>
 21
 22/* global SRCU for all MMs */
 23static struct srcu_struct srcu;
 
 
 
 
 
 
 24
 25/*
 26 * This function allows mmu_notifier::release callback to delay a call to
 27 * a function that will free appropriate resources. The function must be
 28 * quick and must not block.
 
 29 */
 30void mmu_notifier_call_srcu(struct rcu_head *rcu,
 31			    void (*func)(struct rcu_head *rcu))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32{
 33	call_srcu(&srcu, rcu, func);
 
 34}
 35EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
 36
 37void mmu_notifier_synchronize(void)
 
 
 
 38{
 39	/* Wait for any running method to finish. */
 40	srcu_barrier(&srcu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41}
 42EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
 43
 44/*
 45 * This function can't run concurrently against mmu_notifier_register
 46 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 47 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 48 * in parallel despite there being no task using this mm any more,
 49 * through the vmas outside of the exit_mmap context, such as with
 50 * vmtruncate. This serializes against mmu_notifier_unregister with
 51 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 52 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
 53 * can't go away from under us as exit_mmap holds an mm_count pin
 54 * itself.
 55 */
 56void __mmu_notifier_release(struct mm_struct *mm)
 
 57{
 58	struct mmu_notifier *mn;
 59	int id;
 60
 61	/*
 62	 * SRCU here will block mmu_notifier_unregister until
 63	 * ->release returns.
 64	 */
 65	id = srcu_read_lock(&srcu);
 66	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
 
 67		/*
 68		 * If ->release runs before mmu_notifier_unregister it must be
 69		 * handled, as it's the only way for the driver to flush all
 70		 * existing sptes and stop the driver from establishing any more
 71		 * sptes before all the pages in the mm are freed.
 72		 */
 73		if (mn->ops->release)
 74			mn->ops->release(mn, mm);
 75
 76	spin_lock(&mm->mmu_notifier_mm->lock);
 77	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
 78		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
 79				 struct mmu_notifier,
 80				 hlist);
 81		/*
 82		 * We arrived before mmu_notifier_unregister so
 83		 * mmu_notifier_unregister will do nothing other than to wait
 84		 * for ->release to finish and for mmu_notifier_unregister to
 85		 * return.
 86		 */
 87		hlist_del_init_rcu(&mn->hlist);
 88	}
 89	spin_unlock(&mm->mmu_notifier_mm->lock);
 90	srcu_read_unlock(&srcu, id);
 91
 92	/*
 93	 * synchronize_srcu here prevents mmu_notifier_release from returning to
 94	 * exit_mmap (which would proceed with freeing all pages in the mm)
 95	 * until the ->release method returns, if it was invoked by
 96	 * mmu_notifier_unregister.
 97	 *
 98	 * The mmu_notifier_mm can't go away from under us because one mm_count
 99	 * is held by exit_mmap.
100	 */
101	synchronize_srcu(&srcu);
102}
103
 
 
 
 
 
 
 
 
 
 
 
 
104/*
105 * If no young bitflag is supported by the hardware, ->clear_flush_young can
106 * unmap the address and return 1 or 0 depending if the mapping previously
107 * existed or not.
108 */
109int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
110					unsigned long start,
111					unsigned long end)
112{
113	struct mmu_notifier *mn;
114	int young = 0, id;
115
116	id = srcu_read_lock(&srcu);
117	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
118		if (mn->ops->clear_flush_young)
119			young |= mn->ops->clear_flush_young(mn, mm, start, end);
 
 
 
120	}
121	srcu_read_unlock(&srcu, id);
122
123	return young;
124}
125
126int __mmu_notifier_clear_young(struct mm_struct *mm,
127			       unsigned long start,
128			       unsigned long end)
129{
130	struct mmu_notifier *mn;
131	int young = 0, id;
132
133	id = srcu_read_lock(&srcu);
134	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
135		if (mn->ops->clear_young)
136			young |= mn->ops->clear_young(mn, mm, start, end);
 
 
 
137	}
138	srcu_read_unlock(&srcu, id);
139
140	return young;
141}
142
143int __mmu_notifier_test_young(struct mm_struct *mm,
144			      unsigned long address)
145{
146	struct mmu_notifier *mn;
147	int young = 0, id;
148
149	id = srcu_read_lock(&srcu);
150	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
151		if (mn->ops->test_young) {
152			young = mn->ops->test_young(mn, mm, address);
 
 
 
153			if (young)
154				break;
155		}
156	}
157	srcu_read_unlock(&srcu, id);
158
159	return young;
160}
161
162void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
163			       pte_t pte)
164{
165	struct mmu_notifier *mn;
166	int id;
167
168	id = srcu_read_lock(&srcu);
169	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
170		if (mn->ops->change_pte)
171			mn->ops->change_pte(mn, mm, address, pte);
 
 
 
172	}
173	srcu_read_unlock(&srcu, id);
174}
175
176void __mmu_notifier_invalidate_page(struct mm_struct *mm,
177					  unsigned long address)
178{
179	struct mmu_notifier *mn;
180	int id;
181
182	id = srcu_read_lock(&srcu);
183	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
184		if (mn->ops->invalidate_page)
185			mn->ops->invalidate_page(mn, mm, address);
 
 
 
 
 
 
 
 
 
186	}
187	srcu_read_unlock(&srcu, id);
 
 
 
 
 
 
 
 
188}
189
190void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
191				  unsigned long start, unsigned long end)
 
192{
193	struct mmu_notifier *mn;
 
194	int id;
195
196	id = srcu_read_lock(&srcu);
197	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
198		if (mn->ops->invalidate_range_start)
199			mn->ops->invalidate_range_start(mn, mm, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200	}
201	srcu_read_unlock(&srcu, id);
 
 
202}
203EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
204
205void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
206				  unsigned long start, unsigned long end)
207{
208	struct mmu_notifier *mn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209	int id;
210
211	id = srcu_read_lock(&srcu);
212	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
 
213		/*
214		 * Call invalidate_range here too to avoid the need for the
215		 * subsystem of having to register an invalidate_range_end
216		 * call-back when there is invalidate_range already. Usually a
217		 * subsystem registers either invalidate_range_start()/end() or
218		 * invalidate_range(), so this will be no additional overhead
219		 * (besides the pointer check).
 
 
 
 
 
220		 */
221		if (mn->ops->invalidate_range)
222			mn->ops->invalidate_range(mn, mm, start, end);
223		if (mn->ops->invalidate_range_end)
224			mn->ops->invalidate_range_end(mn, mm, start, end);
 
 
 
 
 
 
 
 
 
225	}
226	srcu_read_unlock(&srcu, id);
227}
228EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
230void __mmu_notifier_invalidate_range(struct mm_struct *mm,
231				  unsigned long start, unsigned long end)
232{
233	struct mmu_notifier *mn;
234	int id;
235
236	id = srcu_read_lock(&srcu);
237	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
238		if (mn->ops->invalidate_range)
239			mn->ops->invalidate_range(mn, mm, start, end);
 
 
 
240	}
241	srcu_read_unlock(&srcu, id);
242}
243EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
244
245static int do_mmu_notifier_register(struct mmu_notifier *mn,
246				    struct mm_struct *mm,
247				    int take_mmap_sem)
 
 
 
 
248{
249	struct mmu_notifier_mm *mmu_notifier_mm;
250	int ret;
251
 
252	BUG_ON(atomic_read(&mm->mm_users) <= 0);
253
254	/*
255	 * Verify that mmu_notifier_init() already run and the global srcu is
256	 * initialized.
257	 */
258	BUG_ON(!srcu.per_cpu_ref);
 
259
260	ret = -ENOMEM;
261	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
262	if (unlikely(!mmu_notifier_mm))
263		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
265	if (take_mmap_sem)
266		down_write(&mm->mmap_sem);
267	ret = mm_take_all_locks(mm);
268	if (unlikely(ret))
269		goto out_clean;
270
271	if (!mm_has_notifiers(mm)) {
272		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
273		spin_lock_init(&mmu_notifier_mm->lock);
274
275		mm->mmu_notifier_mm = mmu_notifier_mm;
276		mmu_notifier_mm = NULL;
277	}
278	atomic_inc(&mm->mm_count);
279
280	/*
281	 * Serialize the update against mmu_notifier_unregister. A
282	 * side note: mmu_notifier_release can't run concurrently with
283	 * us because we hold the mm_users pin (either implicitly as
284	 * current->mm or explicitly with get_task_mm() or similar).
285	 * We can't race against any other mmu notifier method either
286	 * thanks to mm_take_all_locks().
 
 
 
 
 
 
 
 
287	 */
288	spin_lock(&mm->mmu_notifier_mm->lock);
289	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
290	spin_unlock(&mm->mmu_notifier_mm->lock);
 
 
 
 
 
 
 
 
 
 
 
 
291
292	mm_drop_all_locks(mm);
293out_clean:
294	if (take_mmap_sem)
295		up_write(&mm->mmap_sem);
296	kfree(mmu_notifier_mm);
297out:
298	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 
 
 
 
299	return ret;
300}
 
301
302/*
303 * Must not hold mmap_sem nor any other VM related lock when calling
 
 
 
 
304 * this registration function. Must also ensure mm_users can't go down
305 * to zero while this runs to avoid races with mmu_notifier_release,
306 * so mm has to be current->mm or the mm should be pinned safely such
307 * as with get_task_mm(). If the mm is not current->mm, the mm_users
308 * pin should be released by calling mmput after mmu_notifier_register
309 * returns. mmu_notifier_unregister must be always called to
310 * unregister the notifier. mm_count is automatically pinned to allow
311 * mmu_notifier_unregister to safely run at any time later, before or
312 * after exit_mmap. ->release will always be called before exit_mmap
313 * frees the pages.
 
 
314 */
315int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
 
316{
317	return do_mmu_notifier_register(mn, mm, 1);
 
 
 
 
 
318}
319EXPORT_SYMBOL_GPL(mmu_notifier_register);
320
321/*
322 * Same as mmu_notifier_register but here the caller must hold the
323 * mmap_sem in write mode.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324 */
325int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
 
326{
327	return do_mmu_notifier_register(mn, mm, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328}
329EXPORT_SYMBOL_GPL(__mmu_notifier_register);
330
331/* this is called after the last mmu_notifier_unregister() returned */
332void __mmu_notifier_mm_destroy(struct mm_struct *mm)
333{
334	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
335	kfree(mm->mmu_notifier_mm);
336	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
337}
338
339/*
340 * This releases the mm_count pin automatically and frees the mm
341 * structure if it was the last user of it. It serializes against
342 * running mmu notifiers with SRCU and against mmu_notifier_unregister
343 * with the unregister lock + SRCU. All sptes must be dropped before
344 * calling mmu_notifier_unregister. ->release or any other notifier
345 * method may be invoked concurrently with mmu_notifier_unregister,
346 * and only after mmu_notifier_unregister returned we're guaranteed
347 * that ->release or any other method can't run anymore.
348 */
349void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 
350{
351	BUG_ON(atomic_read(&mm->mm_count) <= 0);
352
353	if (!hlist_unhashed(&mn->hlist)) {
354		/*
355		 * SRCU here will force exit_mmap to wait for ->release to
356		 * finish before freeing the pages.
357		 */
358		int id;
359
360		id = srcu_read_lock(&srcu);
361		/*
362		 * exit_mmap will block in mmu_notifier_release to guarantee
363		 * that ->release is called before freeing the pages.
364		 */
365		if (mn->ops->release)
366			mn->ops->release(mn, mm);
367		srcu_read_unlock(&srcu, id);
368
369		spin_lock(&mm->mmu_notifier_mm->lock);
370		/*
371		 * Can not use list_del_rcu() since __mmu_notifier_release
372		 * can delete it before we hold the lock.
373		 */
374		hlist_del_init_rcu(&mn->hlist);
375		spin_unlock(&mm->mmu_notifier_mm->lock);
376	}
377
378	/*
379	 * Wait for any running method to finish, of course including
380	 * ->release if it was run by mmu_notifier_release instead of us.
381	 */
382	synchronize_srcu(&srcu);
383
384	BUG_ON(atomic_read(&mm->mm_count) <= 0);
385
386	mmdrop(mm);
387}
388EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
389
390/*
391 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392 */
393void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
394					struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395{
396	spin_lock(&mm->mmu_notifier_mm->lock);
 
 
 
397	/*
398	 * Can not use list_del_rcu() since __mmu_notifier_release
399	 * can delete it before we hold the lock.
400	 */
401	hlist_del_init_rcu(&mn->hlist);
402	spin_unlock(&mm->mmu_notifier_mm->lock);
 
 
 
 
 
 
403
404	BUG_ON(atomic_read(&mm->mm_count) <= 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405	mmdrop(mm);
406}
407EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
409static int __init mmu_notifier_init(void)
 
410{
411	return init_srcu_struct(&srcu);
 
 
 
412}
413subsys_initcall(mmu_notifier_init);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/mmu_notifier.c
   4 *
   5 *  Copyright (C) 2008  Qumranet, Inc.
   6 *  Copyright (C) 2008  SGI
   7 *             Christoph Lameter <cl@linux.com>
 
 
 
   8 */
   9
  10#include <linux/rculist.h>
  11#include <linux/mmu_notifier.h>
  12#include <linux/export.h>
  13#include <linux/mm.h>
  14#include <linux/err.h>
  15#include <linux/interval_tree.h>
  16#include <linux/srcu.h>
  17#include <linux/rcupdate.h>
  18#include <linux/sched.h>
  19#include <linux/sched/mm.h>
  20#include <linux/slab.h>
  21
  22/* global SRCU for all MMs */
  23DEFINE_STATIC_SRCU(srcu);
  24
  25#ifdef CONFIG_LOCKDEP
  26struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
  27	.name = "mmu_notifier_invalidate_range_start"
  28};
  29#endif
  30
  31/*
  32 * The mmu_notifier_subscriptions structure is allocated and installed in
  33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
  34 * critical section and it's released only when mm_count reaches zero
  35 * in mmdrop().
  36 */
  37struct mmu_notifier_subscriptions {
  38	/* all mmu notifiers registered in this mm are queued in this list */
  39	struct hlist_head list;
  40	bool has_itree;
  41	/* to serialize the list modifications and hlist_unhashed */
  42	spinlock_t lock;
  43	unsigned long invalidate_seq;
  44	unsigned long active_invalidate_ranges;
  45	struct rb_root_cached itree;
  46	wait_queue_head_t wq;
  47	struct hlist_head deferred_list;
  48};
  49
  50/*
  51 * This is a collision-retry read-side/write-side 'lock', a lot like a
  52 * seqcount, however this allows multiple write-sides to hold it at
  53 * once. Conceptually the write side is protecting the values of the PTEs in
  54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
  55 * writer exists.
  56 *
  57 * Note that the core mm creates nested invalidate_range_start()/end() regions
  58 * within the same thread, and runs invalidate_range_start()/end() in parallel
  59 * on multiple CPUs. This is designed to not reduce concurrency or block
  60 * progress on the mm side.
  61 *
  62 * As a secondary function, holding the full write side also serves to prevent
  63 * writers for the itree, this is an optimization to avoid extra locking
  64 * during invalidate_range_start/end notifiers.
  65 *
  66 * The write side has two states, fully excluded:
  67 *  - mm->active_invalidate_ranges != 0
  68 *  - subscriptions->invalidate_seq & 1 == True (odd)
  69 *  - some range on the mm_struct is being invalidated
  70 *  - the itree is not allowed to change
  71 *
  72 * And partially excluded:
  73 *  - mm->active_invalidate_ranges != 0
  74 *  - subscriptions->invalidate_seq & 1 == False (even)
  75 *  - some range on the mm_struct is being invalidated
  76 *  - the itree is allowed to change
  77 *
  78 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
  79 *    seq |= 1  # Begin writing
  80 *    seq++     # Release the writing state
  81 *    seq & 1   # True if a writer exists
  82 *
  83 * The later state avoids some expensive work on inv_end in the common case of
  84 * no mmu_interval_notifier monitoring the VA.
  85 */
  86static bool
  87mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
  88{
  89	lockdep_assert_held(&subscriptions->lock);
  90	return subscriptions->invalidate_seq & 1;
  91}
 
  92
  93static struct mmu_interval_notifier *
  94mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
  95			 const struct mmu_notifier_range *range,
  96			 unsigned long *seq)
  97{
  98	struct interval_tree_node *node;
  99	struct mmu_interval_notifier *res = NULL;
 100
 101	spin_lock(&subscriptions->lock);
 102	subscriptions->active_invalidate_ranges++;
 103	node = interval_tree_iter_first(&subscriptions->itree, range->start,
 104					range->end - 1);
 105	if (node) {
 106		subscriptions->invalidate_seq |= 1;
 107		res = container_of(node, struct mmu_interval_notifier,
 108				   interval_tree);
 109	}
 110
 111	*seq = subscriptions->invalidate_seq;
 112	spin_unlock(&subscriptions->lock);
 113	return res;
 114}
 115
 116static struct mmu_interval_notifier *
 117mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
 118		  const struct mmu_notifier_range *range)
 119{
 120	struct interval_tree_node *node;
 121
 122	node = interval_tree_iter_next(&interval_sub->interval_tree,
 123				       range->start, range->end - 1);
 124	if (!node)
 125		return NULL;
 126	return container_of(node, struct mmu_interval_notifier, interval_tree);
 127}
 128
 129static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
 130{
 131	struct mmu_interval_notifier *interval_sub;
 132	struct hlist_node *next;
 133
 134	spin_lock(&subscriptions->lock);
 135	if (--subscriptions->active_invalidate_ranges ||
 136	    !mn_itree_is_invalidating(subscriptions)) {
 137		spin_unlock(&subscriptions->lock);
 138		return;
 139	}
 140
 141	/* Make invalidate_seq even */
 142	subscriptions->invalidate_seq++;
 143
 144	/*
 145	 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
 146	 * Adds and removes are queued until the final inv_end happens then
 147	 * they are progressed. This arrangement for tree updates is used to
 148	 * avoid using a blocking lock during invalidate_range_start.
 149	 */
 150	hlist_for_each_entry_safe(interval_sub, next,
 151				  &subscriptions->deferred_list,
 152				  deferred_item) {
 153		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
 154			interval_tree_insert(&interval_sub->interval_tree,
 155					     &subscriptions->itree);
 156		else
 157			interval_tree_remove(&interval_sub->interval_tree,
 158					     &subscriptions->itree);
 159		hlist_del(&interval_sub->deferred_item);
 160	}
 161	spin_unlock(&subscriptions->lock);
 162
 163	wake_up_all(&subscriptions->wq);
 164}
 165
 166/**
 167 * mmu_interval_read_begin - Begin a read side critical section against a VA
 168 *                           range
 169 * @interval_sub: The interval subscription
 170 *
 171 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
 172 * collision-retry scheme similar to seqcount for the VA range under
 173 * subscription. If the mm invokes invalidation during the critical section
 174 * then mmu_interval_read_retry() will return true.
 175 *
 176 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
 177 * require a blocking context.  The critical region formed by this can sleep,
 178 * and the required 'user_lock' can also be a sleeping lock.
 179 *
 180 * The caller is required to provide a 'user_lock' to serialize both teardown
 181 * and setup.
 182 *
 183 * The return value should be passed to mmu_interval_read_retry().
 184 */
 185unsigned long
 186mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
 187{
 188	struct mmu_notifier_subscriptions *subscriptions =
 189		interval_sub->mm->notifier_subscriptions;
 190	unsigned long seq;
 191	bool is_invalidating;
 192
 193	/*
 194	 * If the subscription has a different seq value under the user_lock
 195	 * than we started with then it has collided.
 196	 *
 197	 * If the subscription currently has the same seq value as the
 198	 * subscriptions seq, then it is currently between
 199	 * invalidate_start/end and is colliding.
 200	 *
 201	 * The locking looks broadly like this:
 202	 *   mn_tree_invalidate_start():          mmu_interval_read_begin():
 203	 *                                         spin_lock
 204	 *                                          seq = READ_ONCE(interval_sub->invalidate_seq);
 205	 *                                          seq == subs->invalidate_seq
 206	 *                                         spin_unlock
 207	 *    spin_lock
 208	 *     seq = ++subscriptions->invalidate_seq
 209	 *    spin_unlock
 210	 *     op->invalidate_range():
 211	 *       user_lock
 212	 *        mmu_interval_set_seq()
 213	 *         interval_sub->invalidate_seq = seq
 214	 *       user_unlock
 215	 *
 216	 *                          [Required: mmu_interval_read_retry() == true]
 217	 *
 218	 *   mn_itree_inv_end():
 219	 *    spin_lock
 220	 *     seq = ++subscriptions->invalidate_seq
 221	 *    spin_unlock
 222	 *
 223	 *                                        user_lock
 224	 *                                         mmu_interval_read_retry():
 225	 *                                          interval_sub->invalidate_seq != seq
 226	 *                                        user_unlock
 227	 *
 228	 * Barriers are not needed here as any races here are closed by an
 229	 * eventual mmu_interval_read_retry(), which provides a barrier via the
 230	 * user_lock.
 231	 */
 232	spin_lock(&subscriptions->lock);
 233	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
 234	seq = READ_ONCE(interval_sub->invalidate_seq);
 235	is_invalidating = seq == subscriptions->invalidate_seq;
 236	spin_unlock(&subscriptions->lock);
 237
 238	/*
 239	 * interval_sub->invalidate_seq must always be set to an odd value via
 240	 * mmu_interval_set_seq() using the provided cur_seq from
 241	 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
 242	 * will always clear the below sleep in some reasonable time as
 243	 * subscriptions->invalidate_seq is even in the idle state.
 244	 */
 245	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 246	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 247	if (is_invalidating)
 248		wait_event(subscriptions->wq,
 249			   READ_ONCE(subscriptions->invalidate_seq) != seq);
 250
 251	/*
 252	 * Notice that mmu_interval_read_retry() can already be true at this
 253	 * point, avoiding loops here allows the caller to provide a global
 254	 * time bound.
 255	 */
 256
 257	return seq;
 258}
 259EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
 260
 261static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
 262			     struct mm_struct *mm)
 263{
 264	struct mmu_notifier_range range = {
 265		.flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
 266		.event = MMU_NOTIFY_RELEASE,
 267		.mm = mm,
 268		.start = 0,
 269		.end = ULONG_MAX,
 270	};
 271	struct mmu_interval_notifier *interval_sub;
 272	unsigned long cur_seq;
 273	bool ret;
 274
 275	for (interval_sub =
 276		     mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
 277	     interval_sub;
 278	     interval_sub = mn_itree_inv_next(interval_sub, &range)) {
 279		ret = interval_sub->ops->invalidate(interval_sub, &range,
 280						    cur_seq);
 281		WARN_ON(!ret);
 282	}
 283
 284	mn_itree_inv_end(subscriptions);
 285}
 
 286
 287/*
 288 * This function can't run concurrently against mmu_notifier_register
 289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 290 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 291 * in parallel despite there being no task using this mm any more,
 292 * through the vmas outside of the exit_mmap context, such as with
 293 * vmtruncate. This serializes against mmu_notifier_unregister with
 294 * the notifier_subscriptions->lock in addition to SRCU and it serializes
 295 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
 296 * can't go away from under us as exit_mmap holds an mm_count pin
 297 * itself.
 298 */
 299static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
 300			     struct mm_struct *mm)
 301{
 302	struct mmu_notifier *subscription;
 303	int id;
 304
 305	/*
 306	 * SRCU here will block mmu_notifier_unregister until
 307	 * ->release returns.
 308	 */
 309	id = srcu_read_lock(&srcu);
 310	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
 311				 srcu_read_lock_held(&srcu))
 312		/*
 313		 * If ->release runs before mmu_notifier_unregister it must be
 314		 * handled, as it's the only way for the driver to flush all
 315		 * existing sptes and stop the driver from establishing any more
 316		 * sptes before all the pages in the mm are freed.
 317		 */
 318		if (subscription->ops->release)
 319			subscription->ops->release(subscription, mm);
 320
 321	spin_lock(&subscriptions->lock);
 322	while (unlikely(!hlist_empty(&subscriptions->list))) {
 323		subscription = hlist_entry(subscriptions->list.first,
 324					   struct mmu_notifier, hlist);
 
 325		/*
 326		 * We arrived before mmu_notifier_unregister so
 327		 * mmu_notifier_unregister will do nothing other than to wait
 328		 * for ->release to finish and for mmu_notifier_unregister to
 329		 * return.
 330		 */
 331		hlist_del_init_rcu(&subscription->hlist);
 332	}
 333	spin_unlock(&subscriptions->lock);
 334	srcu_read_unlock(&srcu, id);
 335
 336	/*
 337	 * synchronize_srcu here prevents mmu_notifier_release from returning to
 338	 * exit_mmap (which would proceed with freeing all pages in the mm)
 339	 * until the ->release method returns, if it was invoked by
 340	 * mmu_notifier_unregister.
 341	 *
 342	 * The notifier_subscriptions can't go away from under us because
 343	 * one mm_count is held by exit_mmap.
 344	 */
 345	synchronize_srcu(&srcu);
 346}
 347
 348void __mmu_notifier_release(struct mm_struct *mm)
 349{
 350	struct mmu_notifier_subscriptions *subscriptions =
 351		mm->notifier_subscriptions;
 352
 353	if (subscriptions->has_itree)
 354		mn_itree_release(subscriptions, mm);
 355
 356	if (!hlist_empty(&subscriptions->list))
 357		mn_hlist_release(subscriptions, mm);
 358}
 359
 360/*
 361 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 362 * unmap the address and return 1 or 0 depending if the mapping previously
 363 * existed or not.
 364 */
 365int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 366					unsigned long start,
 367					unsigned long end)
 368{
 369	struct mmu_notifier *subscription;
 370	int young = 0, id;
 371
 372	id = srcu_read_lock(&srcu);
 373	hlist_for_each_entry_rcu(subscription,
 374				 &mm->notifier_subscriptions->list, hlist,
 375				 srcu_read_lock_held(&srcu)) {
 376		if (subscription->ops->clear_flush_young)
 377			young |= subscription->ops->clear_flush_young(
 378				subscription, mm, start, end);
 379	}
 380	srcu_read_unlock(&srcu, id);
 381
 382	return young;
 383}
 384
 385int __mmu_notifier_clear_young(struct mm_struct *mm,
 386			       unsigned long start,
 387			       unsigned long end)
 388{
 389	struct mmu_notifier *subscription;
 390	int young = 0, id;
 391
 392	id = srcu_read_lock(&srcu);
 393	hlist_for_each_entry_rcu(subscription,
 394				 &mm->notifier_subscriptions->list, hlist,
 395				 srcu_read_lock_held(&srcu)) {
 396		if (subscription->ops->clear_young)
 397			young |= subscription->ops->clear_young(subscription,
 398								mm, start, end);
 399	}
 400	srcu_read_unlock(&srcu, id);
 401
 402	return young;
 403}
 404
 405int __mmu_notifier_test_young(struct mm_struct *mm,
 406			      unsigned long address)
 407{
 408	struct mmu_notifier *subscription;
 409	int young = 0, id;
 410
 411	id = srcu_read_lock(&srcu);
 412	hlist_for_each_entry_rcu(subscription,
 413				 &mm->notifier_subscriptions->list, hlist,
 414				 srcu_read_lock_held(&srcu)) {
 415		if (subscription->ops->test_young) {
 416			young = subscription->ops->test_young(subscription, mm,
 417							      address);
 418			if (young)
 419				break;
 420		}
 421	}
 422	srcu_read_unlock(&srcu, id);
 423
 424	return young;
 425}
 426
 427void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 428			       pte_t pte)
 429{
 430	struct mmu_notifier *subscription;
 431	int id;
 432
 433	id = srcu_read_lock(&srcu);
 434	hlist_for_each_entry_rcu(subscription,
 435				 &mm->notifier_subscriptions->list, hlist,
 436				 srcu_read_lock_held(&srcu)) {
 437		if (subscription->ops->change_pte)
 438			subscription->ops->change_pte(subscription, mm, address,
 439						      pte);
 440	}
 441	srcu_read_unlock(&srcu, id);
 442}
 443
 444static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
 445			       const struct mmu_notifier_range *range)
 446{
 447	struct mmu_interval_notifier *interval_sub;
 448	unsigned long cur_seq;
 449
 450	for (interval_sub =
 451		     mn_itree_inv_start_range(subscriptions, range, &cur_seq);
 452	     interval_sub;
 453	     interval_sub = mn_itree_inv_next(interval_sub, range)) {
 454		bool ret;
 455
 456		ret = interval_sub->ops->invalidate(interval_sub, range,
 457						    cur_seq);
 458		if (!ret) {
 459			if (WARN_ON(mmu_notifier_range_blockable(range)))
 460				continue;
 461			goto out_would_block;
 462		}
 463	}
 464	return 0;
 465
 466out_would_block:
 467	/*
 468	 * On -EAGAIN the non-blocking caller is not allowed to call
 469	 * invalidate_range_end()
 470	 */
 471	mn_itree_inv_end(subscriptions);
 472	return -EAGAIN;
 473}
 474
 475static int mn_hlist_invalidate_range_start(
 476	struct mmu_notifier_subscriptions *subscriptions,
 477	struct mmu_notifier_range *range)
 478{
 479	struct mmu_notifier *subscription;
 480	int ret = 0;
 481	int id;
 482
 483	id = srcu_read_lock(&srcu);
 484	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
 485				 srcu_read_lock_held(&srcu)) {
 486		const struct mmu_notifier_ops *ops = subscription->ops;
 487
 488		if (ops->invalidate_range_start) {
 489			int _ret;
 490
 491			if (!mmu_notifier_range_blockable(range))
 492				non_block_start();
 493			_ret = ops->invalidate_range_start(subscription, range);
 494			if (!mmu_notifier_range_blockable(range))
 495				non_block_end();
 496			if (_ret) {
 497				pr_info("%pS callback failed with %d in %sblockable context.\n",
 498					ops->invalidate_range_start, _ret,
 499					!mmu_notifier_range_blockable(range) ?
 500						"non-" :
 501						"");
 502				WARN_ON(mmu_notifier_range_blockable(range) ||
 503					_ret != -EAGAIN);
 504				ret = _ret;
 505			}
 506		}
 507	}
 508	srcu_read_unlock(&srcu, id);
 509
 510	return ret;
 511}
 
 512
 513int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 
 514{
 515	struct mmu_notifier_subscriptions *subscriptions =
 516		range->mm->notifier_subscriptions;
 517	int ret;
 518
 519	if (subscriptions->has_itree) {
 520		ret = mn_itree_invalidate(subscriptions, range);
 521		if (ret)
 522			return ret;
 523	}
 524	if (!hlist_empty(&subscriptions->list))
 525		return mn_hlist_invalidate_range_start(subscriptions, range);
 526	return 0;
 527}
 528
 529static void
 530mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
 531			struct mmu_notifier_range *range, bool only_end)
 532{
 533	struct mmu_notifier *subscription;
 534	int id;
 535
 536	id = srcu_read_lock(&srcu);
 537	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
 538				 srcu_read_lock_held(&srcu)) {
 539		/*
 540		 * Call invalidate_range here too to avoid the need for the
 541		 * subsystem of having to register an invalidate_range_end
 542		 * call-back when there is invalidate_range already. Usually a
 543		 * subsystem registers either invalidate_range_start()/end() or
 544		 * invalidate_range(), so this will be no additional overhead
 545		 * (besides the pointer check).
 546		 *
 547		 * We skip call to invalidate_range() if we know it is safe ie
 548		 * call site use mmu_notifier_invalidate_range_only_end() which
 549		 * is safe to do when we know that a call to invalidate_range()
 550		 * already happen under page table lock.
 551		 */
 552		if (!only_end && subscription->ops->invalidate_range)
 553			subscription->ops->invalidate_range(subscription,
 554							    range->mm,
 555							    range->start,
 556							    range->end);
 557		if (subscription->ops->invalidate_range_end) {
 558			if (!mmu_notifier_range_blockable(range))
 559				non_block_start();
 560			subscription->ops->invalidate_range_end(subscription,
 561								range);
 562			if (!mmu_notifier_range_blockable(range))
 563				non_block_end();
 564		}
 565	}
 566	srcu_read_unlock(&srcu, id);
 567}
 568
 569void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
 570					 bool only_end)
 571{
 572	struct mmu_notifier_subscriptions *subscriptions =
 573		range->mm->notifier_subscriptions;
 574
 575	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 576	if (subscriptions->has_itree)
 577		mn_itree_inv_end(subscriptions);
 578
 579	if (!hlist_empty(&subscriptions->list))
 580		mn_hlist_invalidate_end(subscriptions, range, only_end);
 581	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 582}
 583
 584void __mmu_notifier_invalidate_range(struct mm_struct *mm,
 585				  unsigned long start, unsigned long end)
 586{
 587	struct mmu_notifier *subscription;
 588	int id;
 589
 590	id = srcu_read_lock(&srcu);
 591	hlist_for_each_entry_rcu(subscription,
 592				 &mm->notifier_subscriptions->list, hlist,
 593				 srcu_read_lock_held(&srcu)) {
 594		if (subscription->ops->invalidate_range)
 595			subscription->ops->invalidate_range(subscription, mm,
 596							    start, end);
 597	}
 598	srcu_read_unlock(&srcu, id);
 599}
 
 600
 601/*
 602 * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
 603 * write mode. A NULL mn signals the notifier is being registered for itree
 604 * mode.
 605 */
 606int __mmu_notifier_register(struct mmu_notifier *subscription,
 607			    struct mm_struct *mm)
 608{
 609	struct mmu_notifier_subscriptions *subscriptions = NULL;
 610	int ret;
 611
 612	mmap_assert_write_locked(mm);
 613	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 614
 615	if (IS_ENABLED(CONFIG_LOCKDEP)) {
 616		fs_reclaim_acquire(GFP_KERNEL);
 617		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 618		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 619		fs_reclaim_release(GFP_KERNEL);
 620	}
 621
 622	if (!mm->notifier_subscriptions) {
 623		/*
 624		 * kmalloc cannot be called under mm_take_all_locks(), but we
 625		 * know that mm->notifier_subscriptions can't change while we
 626		 * hold the write side of the mmap_lock.
 627		 */
 628		subscriptions = kzalloc(
 629			sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
 630		if (!subscriptions)
 631			return -ENOMEM;
 632
 633		INIT_HLIST_HEAD(&subscriptions->list);
 634		spin_lock_init(&subscriptions->lock);
 635		subscriptions->invalidate_seq = 2;
 636		subscriptions->itree = RB_ROOT_CACHED;
 637		init_waitqueue_head(&subscriptions->wq);
 638		INIT_HLIST_HEAD(&subscriptions->deferred_list);
 639	}
 640
 
 
 641	ret = mm_take_all_locks(mm);
 642	if (unlikely(ret))
 643		goto out_clean;
 644
 
 
 
 
 
 
 
 
 
 645	/*
 646	 * Serialize the update against mmu_notifier_unregister. A
 647	 * side note: mmu_notifier_release can't run concurrently with
 648	 * us because we hold the mm_users pin (either implicitly as
 649	 * current->mm or explicitly with get_task_mm() or similar).
 650	 * We can't race against any other mmu notifier method either
 651	 * thanks to mm_take_all_locks().
 652	 *
 653	 * release semantics on the initialization of the
 654	 * mmu_notifier_subscriptions's contents are provided for unlocked
 655	 * readers.  acquire can only be used while holding the mmgrab or
 656	 * mmget, and is safe because once created the
 657	 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
 658	 * As above, users holding the mmap_lock or one of the
 659	 * mm_take_all_locks() do not need to use acquire semantics.
 660	 */
 661	if (subscriptions)
 662		smp_store_release(&mm->notifier_subscriptions, subscriptions);
 663
 664	if (subscription) {
 665		/* Pairs with the mmdrop in mmu_notifier_unregister_* */
 666		mmgrab(mm);
 667		subscription->mm = mm;
 668		subscription->users = 1;
 669
 670		spin_lock(&mm->notifier_subscriptions->lock);
 671		hlist_add_head_rcu(&subscription->hlist,
 672				   &mm->notifier_subscriptions->list);
 673		spin_unlock(&mm->notifier_subscriptions->lock);
 674	} else
 675		mm->notifier_subscriptions->has_itree = true;
 676
 677	mm_drop_all_locks(mm);
 
 
 
 
 
 678	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 679	return 0;
 680
 681out_clean:
 682	kfree(subscriptions);
 683	return ret;
 684}
 685EXPORT_SYMBOL_GPL(__mmu_notifier_register);
 686
 687/**
 688 * mmu_notifier_register - Register a notifier on a mm
 689 * @subscription: The notifier to attach
 690 * @mm: The mm to attach the notifier to
 691 *
 692 * Must not hold mmap_lock nor any other VM related lock when calling
 693 * this registration function. Must also ensure mm_users can't go down
 694 * to zero while this runs to avoid races with mmu_notifier_release,
 695 * so mm has to be current->mm or the mm should be pinned safely such
 696 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 697 * pin should be released by calling mmput after mmu_notifier_register
 698 * returns.
 699 *
 700 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
 701 * unregister the notifier.
 702 *
 703 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
 704 * valid, and can be converted to an active mm pointer via mmget_not_zero().
 705 */
 706int mmu_notifier_register(struct mmu_notifier *subscription,
 707			  struct mm_struct *mm)
 708{
 709	int ret;
 710
 711	mmap_write_lock(mm);
 712	ret = __mmu_notifier_register(subscription, mm);
 713	mmap_write_unlock(mm);
 714	return ret;
 715}
 716EXPORT_SYMBOL_GPL(mmu_notifier_register);
 717
 718static struct mmu_notifier *
 719find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
 720{
 721	struct mmu_notifier *subscription;
 722
 723	spin_lock(&mm->notifier_subscriptions->lock);
 724	hlist_for_each_entry_rcu(subscription,
 725				 &mm->notifier_subscriptions->list, hlist,
 726				 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
 727		if (subscription->ops != ops)
 728			continue;
 729
 730		if (likely(subscription->users != UINT_MAX))
 731			subscription->users++;
 732		else
 733			subscription = ERR_PTR(-EOVERFLOW);
 734		spin_unlock(&mm->notifier_subscriptions->lock);
 735		return subscription;
 736	}
 737	spin_unlock(&mm->notifier_subscriptions->lock);
 738	return NULL;
 739}
 740
 741/**
 742 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
 743 *                           the mm & ops
 744 * @ops: The operations struct being subscribe with
 745 * @mm : The mm to attach notifiers too
 746 *
 747 * This function either allocates a new mmu_notifier via
 748 * ops->alloc_notifier(), or returns an already existing notifier on the
 749 * list. The value of the ops pointer is used to determine when two notifiers
 750 * are the same.
 751 *
 752 * Each call to mmu_notifier_get() must be paired with a call to
 753 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
 754 *
 755 * While the caller has a mmu_notifier get the mm pointer will remain valid,
 756 * and can be converted to an active mm pointer via mmget_not_zero().
 757 */
 758struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
 759					     struct mm_struct *mm)
 760{
 761	struct mmu_notifier *subscription;
 762	int ret;
 763
 764	mmap_assert_write_locked(mm);
 765
 766	if (mm->notifier_subscriptions) {
 767		subscription = find_get_mmu_notifier(mm, ops);
 768		if (subscription)
 769			return subscription;
 770	}
 771
 772	subscription = ops->alloc_notifier(mm);
 773	if (IS_ERR(subscription))
 774		return subscription;
 775	subscription->ops = ops;
 776	ret = __mmu_notifier_register(subscription, mm);
 777	if (ret)
 778		goto out_free;
 779	return subscription;
 780out_free:
 781	subscription->ops->free_notifier(subscription);
 782	return ERR_PTR(ret);
 783}
 784EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
 785
 786/* this is called after the last mmu_notifier_unregister() returned */
 787void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
 788{
 789	BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
 790	kfree(mm->notifier_subscriptions);
 791	mm->notifier_subscriptions = LIST_POISON1; /* debug */
 792}
 793
 794/*
 795 * This releases the mm_count pin automatically and frees the mm
 796 * structure if it was the last user of it. It serializes against
 797 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 798 * with the unregister lock + SRCU. All sptes must be dropped before
 799 * calling mmu_notifier_unregister. ->release or any other notifier
 800 * method may be invoked concurrently with mmu_notifier_unregister,
 801 * and only after mmu_notifier_unregister returned we're guaranteed
 802 * that ->release or any other method can't run anymore.
 803 */
 804void mmu_notifier_unregister(struct mmu_notifier *subscription,
 805			     struct mm_struct *mm)
 806{
 807	BUG_ON(atomic_read(&mm->mm_count) <= 0);
 808
 809	if (!hlist_unhashed(&subscription->hlist)) {
 810		/*
 811		 * SRCU here will force exit_mmap to wait for ->release to
 812		 * finish before freeing the pages.
 813		 */
 814		int id;
 815
 816		id = srcu_read_lock(&srcu);
 817		/*
 818		 * exit_mmap will block in mmu_notifier_release to guarantee
 819		 * that ->release is called before freeing the pages.
 820		 */
 821		if (subscription->ops->release)
 822			subscription->ops->release(subscription, mm);
 823		srcu_read_unlock(&srcu, id);
 824
 825		spin_lock(&mm->notifier_subscriptions->lock);
 826		/*
 827		 * Can not use list_del_rcu() since __mmu_notifier_release
 828		 * can delete it before we hold the lock.
 829		 */
 830		hlist_del_init_rcu(&subscription->hlist);
 831		spin_unlock(&mm->notifier_subscriptions->lock);
 832	}
 833
 834	/*
 835	 * Wait for any running method to finish, of course including
 836	 * ->release if it was run by mmu_notifier_release instead of us.
 837	 */
 838	synchronize_srcu(&srcu);
 839
 840	BUG_ON(atomic_read(&mm->mm_count) <= 0);
 841
 842	mmdrop(mm);
 843}
 844EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
 845
 846static void mmu_notifier_free_rcu(struct rcu_head *rcu)
 847{
 848	struct mmu_notifier *subscription =
 849		container_of(rcu, struct mmu_notifier, rcu);
 850	struct mm_struct *mm = subscription->mm;
 851
 852	subscription->ops->free_notifier(subscription);
 853	/* Pairs with the get in __mmu_notifier_register() */
 854	mmdrop(mm);
 855}
 856
 857/**
 858 * mmu_notifier_put - Release the reference on the notifier
 859 * @subscription: The notifier to act on
 860 *
 861 * This function must be paired with each mmu_notifier_get(), it releases the
 862 * reference obtained by the get. If this is the last reference then process
 863 * to free the notifier will be run asynchronously.
 864 *
 865 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
 866 * when the mm_struct is destroyed. Instead free_notifier is always called to
 867 * release any resources held by the user.
 868 *
 869 * As ops->release is not guaranteed to be called, the user must ensure that
 870 * all sptes are dropped, and no new sptes can be established before
 871 * mmu_notifier_put() is called.
 872 *
 873 * This function can be called from the ops->release callback, however the
 874 * caller must still ensure it is called pairwise with mmu_notifier_get().
 875 *
 876 * Modules calling this function must call mmu_notifier_synchronize() in
 877 * their __exit functions to ensure the async work is completed.
 878 */
 879void mmu_notifier_put(struct mmu_notifier *subscription)
 880{
 881	struct mm_struct *mm = subscription->mm;
 882
 883	spin_lock(&mm->notifier_subscriptions->lock);
 884	if (WARN_ON(!subscription->users) || --subscription->users)
 885		goto out_unlock;
 886	hlist_del_init_rcu(&subscription->hlist);
 887	spin_unlock(&mm->notifier_subscriptions->lock);
 888
 889	call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
 890	return;
 891
 892out_unlock:
 893	spin_unlock(&mm->notifier_subscriptions->lock);
 894}
 895EXPORT_SYMBOL_GPL(mmu_notifier_put);
 896
 897static int __mmu_interval_notifier_insert(
 898	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
 899	struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
 900	unsigned long length, const struct mmu_interval_notifier_ops *ops)
 901{
 902	interval_sub->mm = mm;
 903	interval_sub->ops = ops;
 904	RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
 905	interval_sub->interval_tree.start = start;
 906	/*
 907	 * Note that the representation of the intervals in the interval tree
 908	 * considers the ending point as contained in the interval.
 909	 */
 910	if (length == 0 ||
 911	    check_add_overflow(start, length - 1,
 912			       &interval_sub->interval_tree.last))
 913		return -EOVERFLOW;
 914
 915	/* Must call with a mmget() held */
 916	if (WARN_ON(atomic_read(&mm->mm_count) <= 0))
 917		return -EINVAL;
 918
 919	/* pairs with mmdrop in mmu_interval_notifier_remove() */
 920	mmgrab(mm);
 921
 922	/*
 923	 * If some invalidate_range_start/end region is going on in parallel
 924	 * we don't know what VA ranges are affected, so we must assume this
 925	 * new range is included.
 926	 *
 927	 * If the itree is invalidating then we are not allowed to change
 928	 * it. Retrying until invalidation is done is tricky due to the
 929	 * possibility for live lock, instead defer the add to
 930	 * mn_itree_inv_end() so this algorithm is deterministic.
 931	 *
 932	 * In all cases the value for the interval_sub->invalidate_seq should be
 933	 * odd, see mmu_interval_read_begin()
 934	 */
 935	spin_lock(&subscriptions->lock);
 936	if (subscriptions->active_invalidate_ranges) {
 937		if (mn_itree_is_invalidating(subscriptions))
 938			hlist_add_head(&interval_sub->deferred_item,
 939				       &subscriptions->deferred_list);
 940		else {
 941			subscriptions->invalidate_seq |= 1;
 942			interval_tree_insert(&interval_sub->interval_tree,
 943					     &subscriptions->itree);
 944		}
 945		interval_sub->invalidate_seq = subscriptions->invalidate_seq;
 946	} else {
 947		WARN_ON(mn_itree_is_invalidating(subscriptions));
 948		/*
 949		 * The starting seq for a subscription not under invalidation
 950		 * should be odd, not equal to the current invalidate_seq and
 951		 * invalidate_seq should not 'wrap' to the new seq any time
 952		 * soon.
 953		 */
 954		interval_sub->invalidate_seq =
 955			subscriptions->invalidate_seq - 1;
 956		interval_tree_insert(&interval_sub->interval_tree,
 957				     &subscriptions->itree);
 958	}
 959	spin_unlock(&subscriptions->lock);
 960	return 0;
 961}
 962
 963/**
 964 * mmu_interval_notifier_insert - Insert an interval notifier
 965 * @interval_sub: Interval subscription to register
 966 * @start: Starting virtual address to monitor
 967 * @length: Length of the range to monitor
 968 * @mm: mm_struct to attach to
 969 * @ops: Interval notifier operations to be called on matching events
 970 *
 971 * This function subscribes the interval notifier for notifications from the
 972 * mm.  Upon return the ops related to mmu_interval_notifier will be called
 973 * whenever an event that intersects with the given range occurs.
 974 *
 975 * Upon return the range_notifier may not be present in the interval tree yet.
 976 * The caller must use the normal interval notifier read flow via
 977 * mmu_interval_read_begin() to establish SPTEs for this range.
 978 */
 979int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
 980				 struct mm_struct *mm, unsigned long start,
 981				 unsigned long length,
 982				 const struct mmu_interval_notifier_ops *ops)
 983{
 984	struct mmu_notifier_subscriptions *subscriptions;
 985	int ret;
 986
 987	might_lock(&mm->mmap_lock);
 988
 989	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
 990	if (!subscriptions || !subscriptions->has_itree) {
 991		ret = mmu_notifier_register(NULL, mm);
 992		if (ret)
 993			return ret;
 994		subscriptions = mm->notifier_subscriptions;
 995	}
 996	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
 997					      start, length, ops);
 998}
 999EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
1000
1001int mmu_interval_notifier_insert_locked(
1002	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1003	unsigned long start, unsigned long length,
1004	const struct mmu_interval_notifier_ops *ops)
1005{
1006	struct mmu_notifier_subscriptions *subscriptions =
1007		mm->notifier_subscriptions;
1008	int ret;
1009
1010	mmap_assert_write_locked(mm);
1011
1012	if (!subscriptions || !subscriptions->has_itree) {
1013		ret = __mmu_notifier_register(NULL, mm);
1014		if (ret)
1015			return ret;
1016		subscriptions = mm->notifier_subscriptions;
1017	}
1018	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1019					      start, length, ops);
1020}
1021EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1022
1023/**
1024 * mmu_interval_notifier_remove - Remove a interval notifier
1025 * @interval_sub: Interval subscription to unregister
1026 *
1027 * This function must be paired with mmu_interval_notifier_insert(). It cannot
1028 * be called from any ops callback.
1029 *
1030 * Once this returns ops callbacks are no longer running on other CPUs and
1031 * will not be called in future.
1032 */
1033void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1034{
1035	struct mm_struct *mm = interval_sub->mm;
1036	struct mmu_notifier_subscriptions *subscriptions =
1037		mm->notifier_subscriptions;
1038	unsigned long seq = 0;
1039
1040	might_sleep();
1041
1042	spin_lock(&subscriptions->lock);
1043	if (mn_itree_is_invalidating(subscriptions)) {
1044		/*
1045		 * remove is being called after insert put this on the
1046		 * deferred list, but before the deferred list was processed.
1047		 */
1048		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1049			hlist_del(&interval_sub->deferred_item);
1050		} else {
1051			hlist_add_head(&interval_sub->deferred_item,
1052				       &subscriptions->deferred_list);
1053			seq = subscriptions->invalidate_seq;
1054		}
1055	} else {
1056		WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1057		interval_tree_remove(&interval_sub->interval_tree,
1058				     &subscriptions->itree);
1059	}
1060	spin_unlock(&subscriptions->lock);
1061
1062	/*
1063	 * The possible sleep on progress in the invalidation requires the
1064	 * caller not hold any locks held by invalidation callbacks.
1065	 */
1066	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1067	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1068	if (seq)
1069		wait_event(subscriptions->wq,
1070			   READ_ONCE(subscriptions->invalidate_seq) != seq);
1071
1072	/* pairs with mmgrab in mmu_interval_notifier_insert() */
1073	mmdrop(mm);
1074}
1075EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1076
1077/**
1078 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1079 *
1080 * This function ensures that all outstanding async SRU work from
1081 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1082 * associated with an unused mmu_notifier will no longer be called.
1083 *
1084 * Before using the caller must ensure that all of its mmu_notifiers have been
1085 * fully released via mmu_notifier_put().
1086 *
1087 * Modules using the mmu_notifier_put() API should call this in their __exit
1088 * function to avoid module unloading races.
1089 */
1090void mmu_notifier_synchronize(void)
1091{
1092	synchronize_srcu(&srcu);
1093}
1094EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1095
1096bool
1097mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1098{
1099	if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1100		return false;
1101	/* Return true if the vma still have the read flag set. */
1102	return range->vma->vm_flags & VM_READ;
1103}
1104EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);