Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _LINUX_MMU_NOTIFIER_H
  3#define _LINUX_MMU_NOTIFIER_H
  4
  5#include <linux/list.h>
  6#include <linux/spinlock.h>
  7#include <linux/mm_types.h>
  8#include <linux/mmap_lock.h>
  9#include <linux/srcu.h>
 10#include <linux/interval_tree.h>
 11
 12struct mmu_notifier_subscriptions;
 13struct mmu_notifier;
 14struct mmu_notifier_range;
 15struct mmu_interval_notifier;
 16
 17/**
 18 * enum mmu_notifier_event - reason for the mmu notifier callback
 19 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
 20 * move the range
 21 *
 22 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
 23 * madvise() or replacing a page by another one, ...).
 24 *
 25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
 26 * ie using the vma access permission (vm_page_prot) to update the whole range
 27 * is enough no need to inspect changes to the CPU page table (mprotect()
 28 * syscall)
 29 *
 30 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
 31 * pages in the range so to mirror those changes the user must inspect the CPU
 32 * page table (from the end callback).
 33 *
 34 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
 35 * access flags). User should soft dirty the page in the end callback to make
 36 * sure that anyone relying on soft dirtiness catch pages that might be written
 37 * through non CPU mappings.
 38 *
 39 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
 40 * that the mm refcount is zero and the range is no longer accessible.
 41 *
 42 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
 43 * a device driver to possibly ignore the invalidation if the
 44 * owner field matches the driver's device private pgmap owner.
 45 *
 46 * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
 47 * longer have exclusive access to the page. When sent during creation of an
 48 * exclusive range the owner will be initialised to the value provided by the
 49 * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
 50 */
 51enum mmu_notifier_event {
 52	MMU_NOTIFY_UNMAP = 0,
 53	MMU_NOTIFY_CLEAR,
 54	MMU_NOTIFY_PROTECTION_VMA,
 55	MMU_NOTIFY_PROTECTION_PAGE,
 56	MMU_NOTIFY_SOFT_DIRTY,
 57	MMU_NOTIFY_RELEASE,
 58	MMU_NOTIFY_MIGRATE,
 59	MMU_NOTIFY_EXCLUSIVE,
 60};
 61
 62#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
 63
 64struct mmu_notifier_ops {
 65	/*
 66	 * Called either by mmu_notifier_unregister or when the mm is
 67	 * being destroyed by exit_mmap, always before all pages are
 68	 * freed. This can run concurrently with other mmu notifier
 69	 * methods (the ones invoked outside the mm context) and it
 70	 * should tear down all secondary mmu mappings and freeze the
 71	 * secondary mmu. If this method isn't implemented you've to
 72	 * be sure that nothing could possibly write to the pages
 73	 * through the secondary mmu by the time the last thread with
 74	 * tsk->mm == mm exits.
 75	 *
 76	 * As side note: the pages freed after ->release returns could
 77	 * be immediately reallocated by the gart at an alias physical
 78	 * address with a different cache model, so if ->release isn't
 79	 * implemented because all _software_ driven memory accesses
 80	 * through the secondary mmu are terminated by the time the
 81	 * last thread of this mm quits, you've also to be sure that
 82	 * speculative _hardware_ operations can't allocate dirty
 83	 * cachelines in the cpu that could not be snooped and made
 84	 * coherent with the other read and write operations happening
 85	 * through the gart alias address, so leading to memory
 86	 * corruption.
 87	 */
 88	void (*release)(struct mmu_notifier *subscription,
 89			struct mm_struct *mm);
 90
 91	/*
 92	 * clear_flush_young is called after the VM is
 93	 * test-and-clearing the young/accessed bitflag in the
 94	 * pte. This way the VM will provide proper aging to the
 95	 * accesses to the page through the secondary MMUs and not
 96	 * only to the ones through the Linux pte.
 97	 * Start-end is necessary in case the secondary MMU is mapping the page
 98	 * at a smaller granularity than the primary MMU.
 99	 */
100	int (*clear_flush_young)(struct mmu_notifier *subscription,
101				 struct mm_struct *mm,
102				 unsigned long start,
103				 unsigned long end);
104
105	/*
106	 * clear_young is a lightweight version of clear_flush_young. Like the
107	 * latter, it is supposed to test-and-clear the young/accessed bitflag
108	 * in the secondary pte, but it may omit flushing the secondary tlb.
109	 */
110	int (*clear_young)(struct mmu_notifier *subscription,
111			   struct mm_struct *mm,
112			   unsigned long start,
113			   unsigned long end);
114
115	/*
116	 * test_young is called to check the young/accessed bitflag in
117	 * the secondary pte. This is used to know if the page is
118	 * frequently used without actually clearing the flag or tearing
119	 * down the secondary mapping on the page.
120	 */
121	int (*test_young)(struct mmu_notifier *subscription,
122			  struct mm_struct *mm,
123			  unsigned long address);
124
125	/*
126	 * change_pte is called in cases that pte mapping to page is changed:
127	 * for example, when ksm remaps pte to point to a new shared page.
128	 */
129	void (*change_pte)(struct mmu_notifier *subscription,
130			   struct mm_struct *mm,
131			   unsigned long address,
132			   pte_t pte);
133
134	/*
135	 * invalidate_range_start() and invalidate_range_end() must be
136	 * paired and are called only when the mmap_lock and/or the
137	 * locks protecting the reverse maps are held. If the subsystem
138	 * can't guarantee that no additional references are taken to
139	 * the pages in the range, it has to implement the
140	 * invalidate_range() notifier to remove any references taken
141	 * after invalidate_range_start().
142	 *
143	 * Invalidation of multiple concurrent ranges may be
144	 * optionally permitted by the driver. Either way the
145	 * establishment of sptes is forbidden in the range passed to
146	 * invalidate_range_begin/end for the whole duration of the
147	 * invalidate_range_begin/end critical section.
148	 *
149	 * invalidate_range_start() is called when all pages in the
150	 * range are still mapped and have at least a refcount of one.
151	 *
152	 * invalidate_range_end() is called when all pages in the
153	 * range have been unmapped and the pages have been freed by
154	 * the VM.
155	 *
156	 * The VM will remove the page table entries and potentially
157	 * the page between invalidate_range_start() and
158	 * invalidate_range_end(). If the page must not be freed
159	 * because of pending I/O or other circumstances then the
160	 * invalidate_range_start() callback (or the initial mapping
161	 * by the driver) must make sure that the refcount is kept
162	 * elevated.
163	 *
164	 * If the driver increases the refcount when the pages are
165	 * initially mapped into an address space then either
166	 * invalidate_range_start() or invalidate_range_end() may
167	 * decrease the refcount. If the refcount is decreased on
168	 * invalidate_range_start() then the VM can free pages as page
169	 * table entries are removed.  If the refcount is only
170	 * dropped on invalidate_range_end() then the driver itself
171	 * will drop the last refcount but it must take care to flush
172	 * any secondary tlb before doing the final free on the
173	 * page. Pages will no longer be referenced by the linux
174	 * address space but may still be referenced by sptes until
175	 * the last refcount is dropped.
176	 *
177	 * If blockable argument is set to false then the callback cannot
178	 * sleep and has to return with -EAGAIN if sleeping would be required.
179	 * 0 should be returned otherwise. Please note that notifiers that can
180	 * fail invalidate_range_start are not allowed to implement
181	 * invalidate_range_end, as there is no mechanism for informing the
182	 * notifier that its start failed.
183	 */
184	int (*invalidate_range_start)(struct mmu_notifier *subscription,
185				      const struct mmu_notifier_range *range);
186	void (*invalidate_range_end)(struct mmu_notifier *subscription,
187				     const struct mmu_notifier_range *range);
188
189	/*
190	 * invalidate_range() is either called between
191	 * invalidate_range_start() and invalidate_range_end() when the
192	 * VM has to free pages that where unmapped, but before the
193	 * pages are actually freed, or outside of _start()/_end() when
194	 * a (remote) TLB is necessary.
195	 *
196	 * If invalidate_range() is used to manage a non-CPU TLB with
197	 * shared page-tables, it not necessary to implement the
198	 * invalidate_range_start()/end() notifiers, as
199	 * invalidate_range() already catches the points in time when an
200	 * external TLB range needs to be flushed. For more in depth
201	 * discussion on this see Documentation/vm/mmu_notifier.rst
202	 *
203	 * Note that this function might be called with just a sub-range
204	 * of what was passed to invalidate_range_start()/end(), if
205	 * called between those functions.
 
 
206	 */
207	void (*invalidate_range)(struct mmu_notifier *subscription,
208				 struct mm_struct *mm,
209				 unsigned long start,
210				 unsigned long end);
 
211
212	/*
213	 * These callbacks are used with the get/put interface to manage the
214	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
215	 * notifier for use with the mm.
216	 *
217	 * free_notifier() is only called after the mmu_notifier has been
218	 * fully put, calls to any ops callback are prevented and no ops
219	 * callbacks are currently running. It is called from a SRCU callback
220	 * and cannot sleep.
221	 */
222	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
223	void (*free_notifier)(struct mmu_notifier *subscription);
224};
225
226/*
227 * The notifier chains are protected by mmap_lock and/or the reverse map
228 * semaphores. Notifier chains are only changed when all reverse maps and
229 * the mmap_lock locks are taken.
230 *
231 * Therefore notifier chains can only be traversed when either
232 *
233 * 1. mmap_lock is held.
234 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
235 * 3. No other concurrent thread can access the list (release)
236 */
237struct mmu_notifier {
238	struct hlist_node hlist;
239	const struct mmu_notifier_ops *ops;
240	struct mm_struct *mm;
241	struct rcu_head rcu;
242	unsigned int users;
243};
244
245/**
246 * struct mmu_interval_notifier_ops
247 * @invalidate: Upon return the caller must stop using any SPTEs within this
248 *              range. This function can sleep. Return false only if sleeping
249 *              was required but mmu_notifier_range_blockable(range) is false.
250 */
251struct mmu_interval_notifier_ops {
252	bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
253			   const struct mmu_notifier_range *range,
254			   unsigned long cur_seq);
255};
256
257struct mmu_interval_notifier {
258	struct interval_tree_node interval_tree;
259	const struct mmu_interval_notifier_ops *ops;
260	struct mm_struct *mm;
261	struct hlist_node deferred_item;
262	unsigned long invalidate_seq;
263};
264
265#ifdef CONFIG_MMU_NOTIFIER
266
267#ifdef CONFIG_LOCKDEP
268extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
269#endif
270
271struct mmu_notifier_range {
272	struct vm_area_struct *vma;
273	struct mm_struct *mm;
274	unsigned long start;
275	unsigned long end;
276	unsigned flags;
277	enum mmu_notifier_event event;
278	void *owner;
279};
280
281static inline int mm_has_notifiers(struct mm_struct *mm)
282{
283	return unlikely(mm->notifier_subscriptions);
284}
285
286struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
287					     struct mm_struct *mm);
288static inline struct mmu_notifier *
289mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
290{
291	struct mmu_notifier *ret;
292
293	mmap_write_lock(mm);
294	ret = mmu_notifier_get_locked(ops, mm);
295	mmap_write_unlock(mm);
296	return ret;
297}
298void mmu_notifier_put(struct mmu_notifier *subscription);
299void mmu_notifier_synchronize(void);
300
301extern int mmu_notifier_register(struct mmu_notifier *subscription,
302				 struct mm_struct *mm);
303extern int __mmu_notifier_register(struct mmu_notifier *subscription,
304				   struct mm_struct *mm);
305extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
306				    struct mm_struct *mm);
307
308unsigned long
309mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
310int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
311				 struct mm_struct *mm, unsigned long start,
312				 unsigned long length,
313				 const struct mmu_interval_notifier_ops *ops);
314int mmu_interval_notifier_insert_locked(
315	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
316	unsigned long start, unsigned long length,
317	const struct mmu_interval_notifier_ops *ops);
318void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
319
320/**
321 * mmu_interval_set_seq - Save the invalidation sequence
322 * @interval_sub - The subscription passed to invalidate
323 * @cur_seq - The cur_seq passed to the invalidate() callback
324 *
325 * This must be called unconditionally from the invalidate callback of a
326 * struct mmu_interval_notifier_ops under the same lock that is used to call
327 * mmu_interval_read_retry(). It updates the sequence number for later use by
328 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
329 *
330 * If the caller does not call mmu_interval_read_begin() or
331 * mmu_interval_read_retry() then this call is not required.
332 */
333static inline void
334mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
335		     unsigned long cur_seq)
336{
337	WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
338}
339
340/**
341 * mmu_interval_read_retry - End a read side critical section against a VA range
342 * interval_sub: The subscription
343 * seq: The return of the paired mmu_interval_read_begin()
344 *
345 * This MUST be called under a user provided lock that is also held
346 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
347 *
348 * Each call should be paired with a single mmu_interval_read_begin() and
349 * should be used to conclude the read side.
350 *
351 * Returns true if an invalidation collided with this critical section, and
352 * the caller should retry.
353 */
354static inline bool
355mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
356			unsigned long seq)
357{
358	return interval_sub->invalidate_seq != seq;
359}
360
361/**
362 * mmu_interval_check_retry - Test if a collision has occurred
363 * interval_sub: The subscription
364 * seq: The return of the matching mmu_interval_read_begin()
365 *
366 * This can be used in the critical section between mmu_interval_read_begin()
367 * and mmu_interval_read_retry().  A return of true indicates an invalidation
368 * has collided with this critical region and a future
369 * mmu_interval_read_retry() will return true.
370 *
371 * False is not reliable and only suggests a collision may not have
372 * occurred. It can be called many times and does not have to hold the user
373 * provided lock.
374 *
375 * This call can be used as part of loops and other expensive operations to
376 * expedite a retry.
377 */
378static inline bool
379mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
380			 unsigned long seq)
381{
382	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
383	return READ_ONCE(interval_sub->invalidate_seq) != seq;
384}
385
386extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
387extern void __mmu_notifier_release(struct mm_struct *mm);
388extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
389					  unsigned long start,
390					  unsigned long end);
391extern int __mmu_notifier_clear_young(struct mm_struct *mm,
392				      unsigned long start,
393				      unsigned long end);
394extern int __mmu_notifier_test_young(struct mm_struct *mm,
395				     unsigned long address);
396extern void __mmu_notifier_change_pte(struct mm_struct *mm,
397				      unsigned long address, pte_t pte);
398extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
399extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
400				  bool only_end);
401extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
402				  unsigned long start, unsigned long end);
403extern bool
404mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
405
406static inline bool
407mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
408{
409	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
410}
411
412static inline void mmu_notifier_release(struct mm_struct *mm)
413{
414	if (mm_has_notifiers(mm))
415		__mmu_notifier_release(mm);
416}
417
418static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
419					  unsigned long start,
420					  unsigned long end)
421{
422	if (mm_has_notifiers(mm))
423		return __mmu_notifier_clear_flush_young(mm, start, end);
424	return 0;
425}
426
427static inline int mmu_notifier_clear_young(struct mm_struct *mm,
428					   unsigned long start,
429					   unsigned long end)
430{
431	if (mm_has_notifiers(mm))
432		return __mmu_notifier_clear_young(mm, start, end);
433	return 0;
434}
435
436static inline int mmu_notifier_test_young(struct mm_struct *mm,
437					  unsigned long address)
438{
439	if (mm_has_notifiers(mm))
440		return __mmu_notifier_test_young(mm, address);
441	return 0;
442}
443
444static inline void mmu_notifier_change_pte(struct mm_struct *mm,
445					   unsigned long address, pte_t pte)
446{
447	if (mm_has_notifiers(mm))
448		__mmu_notifier_change_pte(mm, address, pte);
449}
450
451static inline void
452mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
453{
454	might_sleep();
455
456	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
457	if (mm_has_notifiers(range->mm)) {
458		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
459		__mmu_notifier_invalidate_range_start(range);
460	}
461	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
462}
463
464static inline int
 
 
 
 
 
 
 
465mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
466{
467	int ret = 0;
468
469	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
470	if (mm_has_notifiers(range->mm)) {
471		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
472		ret = __mmu_notifier_invalidate_range_start(range);
473	}
474	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
475	return ret;
476}
477
478static inline void
479mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
480{
481	if (mmu_notifier_range_blockable(range))
482		might_sleep();
483
484	if (mm_has_notifiers(range->mm))
485		__mmu_notifier_invalidate_range_end(range, false);
486}
487
488static inline void
489mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
490{
491	if (mm_has_notifiers(range->mm))
492		__mmu_notifier_invalidate_range_end(range, true);
493}
494
495static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
496				  unsigned long start, unsigned long end)
497{
498	if (mm_has_notifiers(mm))
499		__mmu_notifier_invalidate_range(mm, start, end);
500}
501
502static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
503{
504	mm->notifier_subscriptions = NULL;
505}
506
507static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
508{
509	if (mm_has_notifiers(mm))
510		__mmu_notifier_subscriptions_destroy(mm);
511}
512
513
514static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
515					   enum mmu_notifier_event event,
516					   unsigned flags,
517					   struct vm_area_struct *vma,
518					   struct mm_struct *mm,
519					   unsigned long start,
520					   unsigned long end)
521{
522	range->vma = vma;
523	range->event = event;
524	range->mm = mm;
525	range->start = start;
526	range->end = end;
527	range->flags = flags;
528}
529
530static inline void mmu_notifier_range_init_owner(
531			struct mmu_notifier_range *range,
532			enum mmu_notifier_event event, unsigned int flags,
533			struct vm_area_struct *vma, struct mm_struct *mm,
534			unsigned long start, unsigned long end, void *owner)
535{
536	mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
537	range->owner = owner;
538}
539
540#define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
541({									\
542	int __young;							\
543	struct vm_area_struct *___vma = __vma;				\
544	unsigned long ___address = __address;				\
545	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
546	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
547						  ___address,		\
548						  ___address +		\
549							PAGE_SIZE);	\
550	__young;							\
551})
552
553#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
554({									\
555	int __young;							\
556	struct vm_area_struct *___vma = __vma;				\
557	unsigned long ___address = __address;				\
558	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
559	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
560						  ___address,		\
561						  ___address +		\
562							PMD_SIZE);	\
563	__young;							\
564})
565
566#define ptep_clear_young_notify(__vma, __address, __ptep)		\
567({									\
568	int __young;							\
569	struct vm_area_struct *___vma = __vma;				\
570	unsigned long ___address = __address;				\
571	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
572	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
573					    ___address + PAGE_SIZE);	\
574	__young;							\
575})
576
577#define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
578({									\
579	int __young;							\
580	struct vm_area_struct *___vma = __vma;				\
581	unsigned long ___address = __address;				\
582	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
583	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
584					    ___address + PMD_SIZE);	\
585	__young;							\
586})
587
588#define	ptep_clear_flush_notify(__vma, __address, __ptep)		\
589({									\
590	unsigned long ___addr = __address & PAGE_MASK;			\
591	struct mm_struct *___mm = (__vma)->vm_mm;			\
592	pte_t ___pte;							\
593									\
594	___pte = ptep_clear_flush(__vma, __address, __ptep);		\
595	mmu_notifier_invalidate_range(___mm, ___addr,			\
596					___addr + PAGE_SIZE);		\
597									\
598	___pte;								\
599})
600
601#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)		\
602({									\
603	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
604	struct mm_struct *___mm = (__vma)->vm_mm;			\
605	pmd_t ___pmd;							\
606									\
607	___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);		\
608	mmu_notifier_invalidate_range(___mm, ___haddr,			\
609				      ___haddr + HPAGE_PMD_SIZE);	\
610									\
611	___pmd;								\
612})
613
614#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)		\
615({									\
616	unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;		\
617	struct mm_struct *___mm = (__vma)->vm_mm;			\
618	pud_t ___pud;							\
619									\
620	___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);		\
621	mmu_notifier_invalidate_range(___mm, ___haddr,			\
622				      ___haddr + HPAGE_PUD_SIZE);	\
623									\
624	___pud;								\
625})
626
627/*
628 * set_pte_at_notify() sets the pte _after_ running the notifier.
629 * This is safe to start by updating the secondary MMUs, because the primary MMU
630 * pte invalidate must have already happened with a ptep_clear_flush() before
631 * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
632 * required when we change both the protection of the mapping from read-only to
633 * read-write and the pfn (like during copy on write page faults). Otherwise the
634 * old page would remain mapped readonly in the secondary MMUs after the new
635 * page is already writable by some CPU through the primary MMU.
636 */
637#define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
638({									\
639	struct mm_struct *___mm = __mm;					\
640	unsigned long ___address = __address;				\
641	pte_t ___pte = __pte;						\
642									\
643	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
644	set_pte_at(___mm, ___address, __ptep, ___pte);			\
645})
646
647#else /* CONFIG_MMU_NOTIFIER */
648
649struct mmu_notifier_range {
650	unsigned long start;
651	unsigned long end;
652};
653
654static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
655					    unsigned long start,
656					    unsigned long end)
657{
658	range->start = start;
659	range->end = end;
660}
661
662#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
663	_mmu_notifier_range_init(range, start, end)
664#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
665					end, owner) \
666	_mmu_notifier_range_init(range, start, end)
667
668static inline bool
669mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
670{
671	return true;
672}
673
674static inline int mm_has_notifiers(struct mm_struct *mm)
675{
676	return 0;
677}
678
679static inline void mmu_notifier_release(struct mm_struct *mm)
680{
681}
682
683static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
684					  unsigned long start,
685					  unsigned long end)
686{
687	return 0;
688}
689
690static inline int mmu_notifier_test_young(struct mm_struct *mm,
691					  unsigned long address)
692{
693	return 0;
694}
695
696static inline void mmu_notifier_change_pte(struct mm_struct *mm,
697					   unsigned long address, pte_t pte)
698{
699}
700
701static inline void
702mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
703{
704}
705
706static inline int
707mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
708{
709	return 0;
710}
711
712static inline
713void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
714{
715}
716
717static inline void
718mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
719{
720}
721
722static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
723				  unsigned long start, unsigned long end)
724{
725}
726
727static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
728{
729}
730
731static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
732{
733}
734
735#define mmu_notifier_range_update_to_read_only(r) false
736
737#define ptep_clear_flush_young_notify ptep_clear_flush_young
738#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
739#define ptep_clear_young_notify ptep_test_and_clear_young
740#define pmdp_clear_young_notify pmdp_test_and_clear_young
741#define	ptep_clear_flush_notify ptep_clear_flush
742#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
743#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
744#define set_pte_at_notify set_pte_at
745
746static inline void mmu_notifier_synchronize(void)
747{
748}
749
750#endif /* CONFIG_MMU_NOTIFIER */
751
752#endif /* _LINUX_MMU_NOTIFIER_H */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _LINUX_MMU_NOTIFIER_H
  3#define _LINUX_MMU_NOTIFIER_H
  4
  5#include <linux/list.h>
  6#include <linux/spinlock.h>
  7#include <linux/mm_types.h>
  8#include <linux/mmap_lock.h>
  9#include <linux/srcu.h>
 10#include <linux/interval_tree.h>
 11
 12struct mmu_notifier_subscriptions;
 13struct mmu_notifier;
 14struct mmu_notifier_range;
 15struct mmu_interval_notifier;
 16
 17/**
 18 * enum mmu_notifier_event - reason for the mmu notifier callback
 19 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
 20 * move the range
 21 *
 22 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
 23 * madvise() or replacing a page by another one, ...).
 24 *
 25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
 26 * ie using the vma access permission (vm_page_prot) to update the whole range
 27 * is enough no need to inspect changes to the CPU page table (mprotect()
 28 * syscall)
 29 *
 30 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
 31 * pages in the range so to mirror those changes the user must inspect the CPU
 32 * page table (from the end callback).
 33 *
 34 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
 35 * access flags). User should soft dirty the page in the end callback to make
 36 * sure that anyone relying on soft dirtiness catch pages that might be written
 37 * through non CPU mappings.
 38 *
 39 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
 40 * that the mm refcount is zero and the range is no longer accessible.
 41 *
 42 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
 43 * a device driver to possibly ignore the invalidation if the
 44 * owner field matches the driver's device private pgmap owner.
 45 *
 46 * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
 47 * longer have exclusive access to the page. When sent during creation of an
 48 * exclusive range the owner will be initialised to the value provided by the
 49 * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
 50 */
 51enum mmu_notifier_event {
 52	MMU_NOTIFY_UNMAP = 0,
 53	MMU_NOTIFY_CLEAR,
 54	MMU_NOTIFY_PROTECTION_VMA,
 55	MMU_NOTIFY_PROTECTION_PAGE,
 56	MMU_NOTIFY_SOFT_DIRTY,
 57	MMU_NOTIFY_RELEASE,
 58	MMU_NOTIFY_MIGRATE,
 59	MMU_NOTIFY_EXCLUSIVE,
 60};
 61
 62#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
 63
 64struct mmu_notifier_ops {
 65	/*
 66	 * Called either by mmu_notifier_unregister or when the mm is
 67	 * being destroyed by exit_mmap, always before all pages are
 68	 * freed. This can run concurrently with other mmu notifier
 69	 * methods (the ones invoked outside the mm context) and it
 70	 * should tear down all secondary mmu mappings and freeze the
 71	 * secondary mmu. If this method isn't implemented you've to
 72	 * be sure that nothing could possibly write to the pages
 73	 * through the secondary mmu by the time the last thread with
 74	 * tsk->mm == mm exits.
 75	 *
 76	 * As side note: the pages freed after ->release returns could
 77	 * be immediately reallocated by the gart at an alias physical
 78	 * address with a different cache model, so if ->release isn't
 79	 * implemented because all _software_ driven memory accesses
 80	 * through the secondary mmu are terminated by the time the
 81	 * last thread of this mm quits, you've also to be sure that
 82	 * speculative _hardware_ operations can't allocate dirty
 83	 * cachelines in the cpu that could not be snooped and made
 84	 * coherent with the other read and write operations happening
 85	 * through the gart alias address, so leading to memory
 86	 * corruption.
 87	 */
 88	void (*release)(struct mmu_notifier *subscription,
 89			struct mm_struct *mm);
 90
 91	/*
 92	 * clear_flush_young is called after the VM is
 93	 * test-and-clearing the young/accessed bitflag in the
 94	 * pte. This way the VM will provide proper aging to the
 95	 * accesses to the page through the secondary MMUs and not
 96	 * only to the ones through the Linux pte.
 97	 * Start-end is necessary in case the secondary MMU is mapping the page
 98	 * at a smaller granularity than the primary MMU.
 99	 */
100	int (*clear_flush_young)(struct mmu_notifier *subscription,
101				 struct mm_struct *mm,
102				 unsigned long start,
103				 unsigned long end);
104
105	/*
106	 * clear_young is a lightweight version of clear_flush_young. Like the
107	 * latter, it is supposed to test-and-clear the young/accessed bitflag
108	 * in the secondary pte, but it may omit flushing the secondary tlb.
109	 */
110	int (*clear_young)(struct mmu_notifier *subscription,
111			   struct mm_struct *mm,
112			   unsigned long start,
113			   unsigned long end);
114
115	/*
116	 * test_young is called to check the young/accessed bitflag in
117	 * the secondary pte. This is used to know if the page is
118	 * frequently used without actually clearing the flag or tearing
119	 * down the secondary mapping on the page.
120	 */
121	int (*test_young)(struct mmu_notifier *subscription,
122			  struct mm_struct *mm,
123			  unsigned long address);
124
125	/*
126	 * change_pte is called in cases that pte mapping to page is changed:
127	 * for example, when ksm remaps pte to point to a new shared page.
128	 */
129	void (*change_pte)(struct mmu_notifier *subscription,
130			   struct mm_struct *mm,
131			   unsigned long address,
132			   pte_t pte);
133
134	/*
135	 * invalidate_range_start() and invalidate_range_end() must be
136	 * paired and are called only when the mmap_lock and/or the
137	 * locks protecting the reverse maps are held. If the subsystem
138	 * can't guarantee that no additional references are taken to
139	 * the pages in the range, it has to implement the
140	 * invalidate_range() notifier to remove any references taken
141	 * after invalidate_range_start().
142	 *
143	 * Invalidation of multiple concurrent ranges may be
144	 * optionally permitted by the driver. Either way the
145	 * establishment of sptes is forbidden in the range passed to
146	 * invalidate_range_begin/end for the whole duration of the
147	 * invalidate_range_begin/end critical section.
148	 *
149	 * invalidate_range_start() is called when all pages in the
150	 * range are still mapped and have at least a refcount of one.
151	 *
152	 * invalidate_range_end() is called when all pages in the
153	 * range have been unmapped and the pages have been freed by
154	 * the VM.
155	 *
156	 * The VM will remove the page table entries and potentially
157	 * the page between invalidate_range_start() and
158	 * invalidate_range_end(). If the page must not be freed
159	 * because of pending I/O or other circumstances then the
160	 * invalidate_range_start() callback (or the initial mapping
161	 * by the driver) must make sure that the refcount is kept
162	 * elevated.
163	 *
164	 * If the driver increases the refcount when the pages are
165	 * initially mapped into an address space then either
166	 * invalidate_range_start() or invalidate_range_end() may
167	 * decrease the refcount. If the refcount is decreased on
168	 * invalidate_range_start() then the VM can free pages as page
169	 * table entries are removed.  If the refcount is only
170	 * dropped on invalidate_range_end() then the driver itself
171	 * will drop the last refcount but it must take care to flush
172	 * any secondary tlb before doing the final free on the
173	 * page. Pages will no longer be referenced by the linux
174	 * address space but may still be referenced by sptes until
175	 * the last refcount is dropped.
176	 *
177	 * If blockable argument is set to false then the callback cannot
178	 * sleep and has to return with -EAGAIN if sleeping would be required.
179	 * 0 should be returned otherwise. Please note that notifiers that can
180	 * fail invalidate_range_start are not allowed to implement
181	 * invalidate_range_end, as there is no mechanism for informing the
182	 * notifier that its start failed.
183	 */
184	int (*invalidate_range_start)(struct mmu_notifier *subscription,
185				      const struct mmu_notifier_range *range);
186	void (*invalidate_range_end)(struct mmu_notifier *subscription,
187				     const struct mmu_notifier_range *range);
188
189	/*
190	 * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
191	 * which shares page-tables with the CPU. The
192	 * invalidate_range_start()/end() callbacks should not be implemented as
193	 * invalidate_secondary_tlbs() already catches the points in time when
194	 * an external TLB needs to be flushed.
195	 *
196	 * This requires arch_invalidate_secondary_tlbs() to be called while
197	 * holding the ptl spin-lock and therefore this callback is not allowed
198	 * to sleep.
 
 
 
199	 *
200	 * This is called by architecture code whenever invalidating a TLB
201	 * entry. It is assumed that any secondary TLB has the same rules for
202	 * when invalidations are required. If this is not the case architecture
203	 * code will need to call this explicitly when required for secondary
204	 * TLB invalidation.
205	 */
206	void (*arch_invalidate_secondary_tlbs)(
207					struct mmu_notifier *subscription,
208					struct mm_struct *mm,
209					unsigned long start,
210					unsigned long end);
211
212	/*
213	 * These callbacks are used with the get/put interface to manage the
214	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
215	 * notifier for use with the mm.
216	 *
217	 * free_notifier() is only called after the mmu_notifier has been
218	 * fully put, calls to any ops callback are prevented and no ops
219	 * callbacks are currently running. It is called from a SRCU callback
220	 * and cannot sleep.
221	 */
222	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
223	void (*free_notifier)(struct mmu_notifier *subscription);
224};
225
226/*
227 * The notifier chains are protected by mmap_lock and/or the reverse map
228 * semaphores. Notifier chains are only changed when all reverse maps and
229 * the mmap_lock locks are taken.
230 *
231 * Therefore notifier chains can only be traversed when either
232 *
233 * 1. mmap_lock is held.
234 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
235 * 3. No other concurrent thread can access the list (release)
236 */
237struct mmu_notifier {
238	struct hlist_node hlist;
239	const struct mmu_notifier_ops *ops;
240	struct mm_struct *mm;
241	struct rcu_head rcu;
242	unsigned int users;
243};
244
245/**
246 * struct mmu_interval_notifier_ops
247 * @invalidate: Upon return the caller must stop using any SPTEs within this
248 *              range. This function can sleep. Return false only if sleeping
249 *              was required but mmu_notifier_range_blockable(range) is false.
250 */
251struct mmu_interval_notifier_ops {
252	bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
253			   const struct mmu_notifier_range *range,
254			   unsigned long cur_seq);
255};
256
257struct mmu_interval_notifier {
258	struct interval_tree_node interval_tree;
259	const struct mmu_interval_notifier_ops *ops;
260	struct mm_struct *mm;
261	struct hlist_node deferred_item;
262	unsigned long invalidate_seq;
263};
264
265#ifdef CONFIG_MMU_NOTIFIER
266
267#ifdef CONFIG_LOCKDEP
268extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
269#endif
270
271struct mmu_notifier_range {
 
272	struct mm_struct *mm;
273	unsigned long start;
274	unsigned long end;
275	unsigned flags;
276	enum mmu_notifier_event event;
277	void *owner;
278};
279
280static inline int mm_has_notifiers(struct mm_struct *mm)
281{
282	return unlikely(mm->notifier_subscriptions);
283}
284
285struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
286					     struct mm_struct *mm);
287static inline struct mmu_notifier *
288mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
289{
290	struct mmu_notifier *ret;
291
292	mmap_write_lock(mm);
293	ret = mmu_notifier_get_locked(ops, mm);
294	mmap_write_unlock(mm);
295	return ret;
296}
297void mmu_notifier_put(struct mmu_notifier *subscription);
298void mmu_notifier_synchronize(void);
299
300extern int mmu_notifier_register(struct mmu_notifier *subscription,
301				 struct mm_struct *mm);
302extern int __mmu_notifier_register(struct mmu_notifier *subscription,
303				   struct mm_struct *mm);
304extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
305				    struct mm_struct *mm);
306
307unsigned long
308mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
309int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
310				 struct mm_struct *mm, unsigned long start,
311				 unsigned long length,
312				 const struct mmu_interval_notifier_ops *ops);
313int mmu_interval_notifier_insert_locked(
314	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
315	unsigned long start, unsigned long length,
316	const struct mmu_interval_notifier_ops *ops);
317void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
318
319/**
320 * mmu_interval_set_seq - Save the invalidation sequence
321 * @interval_sub - The subscription passed to invalidate
322 * @cur_seq - The cur_seq passed to the invalidate() callback
323 *
324 * This must be called unconditionally from the invalidate callback of a
325 * struct mmu_interval_notifier_ops under the same lock that is used to call
326 * mmu_interval_read_retry(). It updates the sequence number for later use by
327 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
328 *
329 * If the caller does not call mmu_interval_read_begin() or
330 * mmu_interval_read_retry() then this call is not required.
331 */
332static inline void
333mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
334		     unsigned long cur_seq)
335{
336	WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
337}
338
339/**
340 * mmu_interval_read_retry - End a read side critical section against a VA range
341 * interval_sub: The subscription
342 * seq: The return of the paired mmu_interval_read_begin()
343 *
344 * This MUST be called under a user provided lock that is also held
345 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
346 *
347 * Each call should be paired with a single mmu_interval_read_begin() and
348 * should be used to conclude the read side.
349 *
350 * Returns true if an invalidation collided with this critical section, and
351 * the caller should retry.
352 */
353static inline bool
354mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
355			unsigned long seq)
356{
357	return interval_sub->invalidate_seq != seq;
358}
359
360/**
361 * mmu_interval_check_retry - Test if a collision has occurred
362 * interval_sub: The subscription
363 * seq: The return of the matching mmu_interval_read_begin()
364 *
365 * This can be used in the critical section between mmu_interval_read_begin()
366 * and mmu_interval_read_retry().  A return of true indicates an invalidation
367 * has collided with this critical region and a future
368 * mmu_interval_read_retry() will return true.
369 *
370 * False is not reliable and only suggests a collision may not have
371 * occurred. It can be called many times and does not have to hold the user
372 * provided lock.
373 *
374 * This call can be used as part of loops and other expensive operations to
375 * expedite a retry.
376 */
377static inline bool
378mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
379			 unsigned long seq)
380{
381	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
382	return READ_ONCE(interval_sub->invalidate_seq) != seq;
383}
384
385extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
386extern void __mmu_notifier_release(struct mm_struct *mm);
387extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
388					  unsigned long start,
389					  unsigned long end);
390extern int __mmu_notifier_clear_young(struct mm_struct *mm,
391				      unsigned long start,
392				      unsigned long end);
393extern int __mmu_notifier_test_young(struct mm_struct *mm,
394				     unsigned long address);
395extern void __mmu_notifier_change_pte(struct mm_struct *mm,
396				      unsigned long address, pte_t pte);
397extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
398extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
399extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
400					unsigned long start, unsigned long end);
 
401extern bool
402mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
403
404static inline bool
405mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
406{
407	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
408}
409
410static inline void mmu_notifier_release(struct mm_struct *mm)
411{
412	if (mm_has_notifiers(mm))
413		__mmu_notifier_release(mm);
414}
415
416static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
417					  unsigned long start,
418					  unsigned long end)
419{
420	if (mm_has_notifiers(mm))
421		return __mmu_notifier_clear_flush_young(mm, start, end);
422	return 0;
423}
424
425static inline int mmu_notifier_clear_young(struct mm_struct *mm,
426					   unsigned long start,
427					   unsigned long end)
428{
429	if (mm_has_notifiers(mm))
430		return __mmu_notifier_clear_young(mm, start, end);
431	return 0;
432}
433
434static inline int mmu_notifier_test_young(struct mm_struct *mm,
435					  unsigned long address)
436{
437	if (mm_has_notifiers(mm))
438		return __mmu_notifier_test_young(mm, address);
439	return 0;
440}
441
442static inline void mmu_notifier_change_pte(struct mm_struct *mm,
443					   unsigned long address, pte_t pte)
444{
445	if (mm_has_notifiers(mm))
446		__mmu_notifier_change_pte(mm, address, pte);
447}
448
449static inline void
450mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
451{
452	might_sleep();
453
454	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
455	if (mm_has_notifiers(range->mm)) {
456		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
457		__mmu_notifier_invalidate_range_start(range);
458	}
459	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
460}
461
462/*
463 * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
464 * can return an error if a notifier can't proceed without blocking, in which
465 * case you're not allowed to modify PTEs in the specified range.
466 *
467 * This is mainly intended for OOM handling.
468 */
469static inline int __must_check
470mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
471{
472	int ret = 0;
473
474	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
475	if (mm_has_notifiers(range->mm)) {
476		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
477		ret = __mmu_notifier_invalidate_range_start(range);
478	}
479	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
480	return ret;
481}
482
483static inline void
484mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
485{
486	if (mmu_notifier_range_blockable(range))
487		might_sleep();
488
489	if (mm_has_notifiers(range->mm))
490		__mmu_notifier_invalidate_range_end(range);
 
 
 
 
 
 
 
491}
492
493static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
494					unsigned long start, unsigned long end)
495{
496	if (mm_has_notifiers(mm))
497		__mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
498}
499
500static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
501{
502	mm->notifier_subscriptions = NULL;
503}
504
505static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
506{
507	if (mm_has_notifiers(mm))
508		__mmu_notifier_subscriptions_destroy(mm);
509}
510
511
512static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
513					   enum mmu_notifier_event event,
514					   unsigned flags,
 
515					   struct mm_struct *mm,
516					   unsigned long start,
517					   unsigned long end)
518{
 
519	range->event = event;
520	range->mm = mm;
521	range->start = start;
522	range->end = end;
523	range->flags = flags;
524}
525
526static inline void mmu_notifier_range_init_owner(
527			struct mmu_notifier_range *range,
528			enum mmu_notifier_event event, unsigned int flags,
529			struct mm_struct *mm, unsigned long start,
530			unsigned long end, void *owner)
531{
532	mmu_notifier_range_init(range, event, flags, mm, start, end);
533	range->owner = owner;
534}
535
536#define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
537({									\
538	int __young;							\
539	struct vm_area_struct *___vma = __vma;				\
540	unsigned long ___address = __address;				\
541	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
542	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
543						  ___address,		\
544						  ___address +		\
545							PAGE_SIZE);	\
546	__young;							\
547})
548
549#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
550({									\
551	int __young;							\
552	struct vm_area_struct *___vma = __vma;				\
553	unsigned long ___address = __address;				\
554	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
555	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
556						  ___address,		\
557						  ___address +		\
558							PMD_SIZE);	\
559	__young;							\
560})
561
562#define ptep_clear_young_notify(__vma, __address, __ptep)		\
563({									\
564	int __young;							\
565	struct vm_area_struct *___vma = __vma;				\
566	unsigned long ___address = __address;				\
567	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
568	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
569					    ___address + PAGE_SIZE);	\
570	__young;							\
571})
572
573#define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
574({									\
575	int __young;							\
576	struct vm_area_struct *___vma = __vma;				\
577	unsigned long ___address = __address;				\
578	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
579	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
580					    ___address + PMD_SIZE);	\
581	__young;							\
582})
583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
584/*
585 * set_pte_at_notify() sets the pte _after_ running the notifier.
586 * This is safe to start by updating the secondary MMUs, because the primary MMU
587 * pte invalidate must have already happened with a ptep_clear_flush() before
588 * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
589 * required when we change both the protection of the mapping from read-only to
590 * read-write and the pfn (like during copy on write page faults). Otherwise the
591 * old page would remain mapped readonly in the secondary MMUs after the new
592 * page is already writable by some CPU through the primary MMU.
593 */
594#define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
595({									\
596	struct mm_struct *___mm = __mm;					\
597	unsigned long ___address = __address;				\
598	pte_t ___pte = __pte;						\
599									\
600	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
601	set_pte_at(___mm, ___address, __ptep, ___pte);			\
602})
603
604#else /* CONFIG_MMU_NOTIFIER */
605
606struct mmu_notifier_range {
607	unsigned long start;
608	unsigned long end;
609};
610
611static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
612					    unsigned long start,
613					    unsigned long end)
614{
615	range->start = start;
616	range->end = end;
617}
618
619#define mmu_notifier_range_init(range,event,flags,mm,start,end)  \
620	_mmu_notifier_range_init(range, start, end)
621#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
622					end, owner) \
623	_mmu_notifier_range_init(range, start, end)
624
625static inline bool
626mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
627{
628	return true;
629}
630
631static inline int mm_has_notifiers(struct mm_struct *mm)
632{
633	return 0;
634}
635
636static inline void mmu_notifier_release(struct mm_struct *mm)
637{
638}
639
640static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
641					  unsigned long start,
642					  unsigned long end)
643{
644	return 0;
645}
646
647static inline int mmu_notifier_test_young(struct mm_struct *mm,
648					  unsigned long address)
649{
650	return 0;
651}
652
653static inline void mmu_notifier_change_pte(struct mm_struct *mm,
654					   unsigned long address, pte_t pte)
655{
656}
657
658static inline void
659mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
660{
661}
662
663static inline int
664mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
665{
666	return 0;
667}
668
669static inline
670void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
671{
672}
673
674static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
 
 
 
 
 
675				  unsigned long start, unsigned long end)
676{
677}
678
679static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
680{
681}
682
683static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
684{
685}
686
687#define mmu_notifier_range_update_to_read_only(r) false
688
689#define ptep_clear_flush_young_notify ptep_clear_flush_young
690#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
691#define ptep_clear_young_notify ptep_test_and_clear_young
692#define pmdp_clear_young_notify pmdp_test_and_clear_young
693#define	ptep_clear_flush_notify ptep_clear_flush
694#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
695#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
696#define set_pte_at_notify set_pte_at
697
698static inline void mmu_notifier_synchronize(void)
699{
700}
701
702#endif /* CONFIG_MMU_NOTIFIER */
703
704#endif /* _LINUX_MMU_NOTIFIER_H */