Loading...
Note: File does not exist in v4.6.
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * vma_internal.h
4 *
5 * Header providing userland wrappers and shims for the functionality provided
6 * by mm/vma_internal.h.
7 *
8 * We make the header guard the same as mm/vma_internal.h, so if this shim
9 * header is included, it precludes the inclusion of the kernel one.
10 */
11
12#ifndef __MM_VMA_INTERNAL_H
13#define __MM_VMA_INTERNAL_H
14
15#define __private
16#define __bitwise
17#define __randomize_layout
18
19#define CONFIG_MMU
20#define CONFIG_PER_VMA_LOCK
21
22#include <stdlib.h>
23
24#include <linux/list.h>
25#include <linux/maple_tree.h>
26#include <linux/mm.h>
27#include <linux/rbtree.h>
28#include <linux/rwsem.h>
29
30#define VM_WARN_ON(_expr) (WARN_ON(_expr))
31#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
32#define VM_BUG_ON(_expr) (BUG_ON(_expr))
33#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
34
35#define VM_NONE 0x00000000
36#define VM_READ 0x00000001
37#define VM_WRITE 0x00000002
38#define VM_EXEC 0x00000004
39#define VM_SHARED 0x00000008
40#define VM_MAYREAD 0x00000010
41#define VM_MAYWRITE 0x00000020
42#define VM_GROWSDOWN 0x00000100
43#define VM_PFNMAP 0x00000400
44#define VM_LOCKED 0x00002000
45#define VM_IO 0x00004000
46#define VM_DONTEXPAND 0x00040000
47#define VM_LOCKONFAULT 0x00080000
48#define VM_ACCOUNT 0x00100000
49#define VM_NORESERVE 0x00200000
50#define VM_MIXEDMAP 0x10000000
51#define VM_STACK VM_GROWSDOWN
52#define VM_SHADOW_STACK VM_NONE
53#define VM_SOFTDIRTY 0
54
55#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
56#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
57
58/* This mask represents all the VMA flag bits used by mlock */
59#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
60
61#ifdef CONFIG_64BIT
62/* VM is sealed, in vm_flags */
63#define VM_SEALED _BITUL(63)
64#endif
65
66#define FIRST_USER_ADDRESS 0UL
67#define USER_PGTABLES_CEILING 0UL
68
69#define vma_policy(vma) NULL
70
71#define down_write_nest_lock(sem, nest_lock)
72
73#define pgprot_val(x) ((x).pgprot)
74#define __pgprot(x) ((pgprot_t) { (x) } )
75
76#define for_each_vma(__vmi, __vma) \
77 while (((__vma) = vma_next(&(__vmi))) != NULL)
78
79/* The MM code likes to work with exclusive end addresses */
80#define for_each_vma_range(__vmi, __vma, __end) \
81 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
82
83#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
84
85#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
86
87#define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
88#define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
89
90#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
91
92#define AS_MM_ALL_LOCKS 2
93
94/* We hardcode this for now. */
95#define sysctl_max_map_count 0x1000000UL
96
97#define pgoff_t unsigned long
98typedef unsigned long pgprotval_t;
99typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
100typedef unsigned long vm_flags_t;
101typedef __bitwise unsigned int vm_fault_t;
102
103/*
104 * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
105 * either way :)
106 */
107#define pr_warn_once pr_err
108
109typedef struct refcount_struct {
110 atomic_t refs;
111} refcount_t;
112
113struct kref {
114 refcount_t refcount;
115};
116
117/*
118 * Define the task command name length as enum, then it can be visible to
119 * BPF programs.
120 */
121enum {
122 TASK_COMM_LEN = 16,
123};
124
125struct task_struct {
126 char comm[TASK_COMM_LEN];
127 pid_t pid;
128 struct mm_struct *mm;
129};
130
131struct task_struct *get_current(void);
132#define current get_current()
133
134struct anon_vma {
135 struct anon_vma *root;
136 struct rb_root_cached rb_root;
137
138 /* Test fields. */
139 bool was_cloned;
140 bool was_unlinked;
141};
142
143struct anon_vma_chain {
144 struct anon_vma *anon_vma;
145 struct list_head same_vma;
146};
147
148struct anon_vma_name {
149 struct kref kref;
150 /* The name needs to be at the end because it is dynamically sized. */
151 char name[];
152};
153
154struct vma_iterator {
155 struct ma_state mas;
156};
157
158#define VMA_ITERATOR(name, __mm, __addr) \
159 struct vma_iterator name = { \
160 .mas = { \
161 .tree = &(__mm)->mm_mt, \
162 .index = __addr, \
163 .node = NULL, \
164 .status = ma_start, \
165 }, \
166 }
167
168struct address_space {
169 struct rb_root_cached i_mmap;
170 unsigned long flags;
171 atomic_t i_mmap_writable;
172};
173
174struct vm_userfaultfd_ctx {};
175struct mempolicy {};
176struct mmu_gather {};
177struct mutex {};
178#define DEFINE_MUTEX(mutexname) \
179 struct mutex mutexname = {}
180
181struct mm_struct {
182 struct maple_tree mm_mt;
183 int map_count; /* number of VMAs */
184 unsigned long total_vm; /* Total pages mapped */
185 unsigned long locked_vm; /* Pages that have PG_mlocked set */
186 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
187 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
188 unsigned long stack_vm; /* VM_STACK */
189};
190
191struct vma_lock {
192 struct rw_semaphore lock;
193};
194
195
196struct file {
197 struct address_space *f_mapping;
198};
199
200struct vm_area_struct {
201 /* The first cache line has the info for VMA tree walking. */
202
203 union {
204 struct {
205 /* VMA covers [vm_start; vm_end) addresses within mm */
206 unsigned long vm_start;
207 unsigned long vm_end;
208 };
209#ifdef CONFIG_PER_VMA_LOCK
210 struct rcu_head vm_rcu; /* Used for deferred freeing. */
211#endif
212 };
213
214 struct mm_struct *vm_mm; /* The address space we belong to. */
215 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
216
217 /*
218 * Flags, see mm.h.
219 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
220 */
221 union {
222 const vm_flags_t vm_flags;
223 vm_flags_t __private __vm_flags;
224 };
225
226#ifdef CONFIG_PER_VMA_LOCK
227 /* Flag to indicate areas detached from the mm->mm_mt tree */
228 bool detached;
229
230 /*
231 * Can only be written (using WRITE_ONCE()) while holding both:
232 * - mmap_lock (in write mode)
233 * - vm_lock->lock (in write mode)
234 * Can be read reliably while holding one of:
235 * - mmap_lock (in read or write mode)
236 * - vm_lock->lock (in read or write mode)
237 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
238 * while holding nothing (except RCU to keep the VMA struct allocated).
239 *
240 * This sequence counter is explicitly allowed to overflow; sequence
241 * counter reuse can only lead to occasional unnecessary use of the
242 * slowpath.
243 */
244 int vm_lock_seq;
245 struct vma_lock *vm_lock;
246#endif
247
248 /*
249 * For areas with an address space and backing store,
250 * linkage into the address_space->i_mmap interval tree.
251 *
252 */
253 struct {
254 struct rb_node rb;
255 unsigned long rb_subtree_last;
256 } shared;
257
258 /*
259 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
260 * list, after a COW of one of the file pages. A MAP_SHARED vma
261 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
262 * or brk vma (with NULL file) can only be in an anon_vma list.
263 */
264 struct list_head anon_vma_chain; /* Serialized by mmap_lock &
265 * page_table_lock */
266 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
267
268 /* Function pointers to deal with this struct. */
269 const struct vm_operations_struct *vm_ops;
270
271 /* Information about our backing store: */
272 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
273 units */
274 struct file * vm_file; /* File we map to (can be NULL). */
275 void * vm_private_data; /* was vm_pte (shared mem) */
276
277#ifdef CONFIG_ANON_VMA_NAME
278 /*
279 * For private and shared anonymous mappings, a pointer to a null
280 * terminated string containing the name given to the vma, or NULL if
281 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
282 */
283 struct anon_vma_name *anon_name;
284#endif
285#ifdef CONFIG_SWAP
286 atomic_long_t swap_readahead_info;
287#endif
288#ifndef CONFIG_MMU
289 struct vm_region *vm_region; /* NOMMU mapping region */
290#endif
291#ifdef CONFIG_NUMA
292 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
293#endif
294#ifdef CONFIG_NUMA_BALANCING
295 struct vma_numab_state *numab_state; /* NUMA Balancing state */
296#endif
297 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
298} __randomize_layout;
299
300struct vm_fault {};
301
302struct vm_operations_struct {
303 void (*open)(struct vm_area_struct * area);
304 /**
305 * @close: Called when the VMA is being removed from the MM.
306 * Context: User context. May sleep. Caller holds mmap_lock.
307 */
308 void (*close)(struct vm_area_struct * area);
309 /* Called any time before splitting to check if it's allowed */
310 int (*may_split)(struct vm_area_struct *area, unsigned long addr);
311 int (*mremap)(struct vm_area_struct *area);
312 /*
313 * Called by mprotect() to make driver-specific permission
314 * checks before mprotect() is finalised. The VMA must not
315 * be modified. Returns 0 if mprotect() can proceed.
316 */
317 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
318 unsigned long end, unsigned long newflags);
319 vm_fault_t (*fault)(struct vm_fault *vmf);
320 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
321 vm_fault_t (*map_pages)(struct vm_fault *vmf,
322 pgoff_t start_pgoff, pgoff_t end_pgoff);
323 unsigned long (*pagesize)(struct vm_area_struct * area);
324
325 /* notification that a previously read-only page is about to become
326 * writable, if an error is returned it will cause a SIGBUS */
327 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
328
329 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
330 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
331
332 /* called by access_process_vm when get_user_pages() fails, typically
333 * for use by special VMAs. See also generic_access_phys() for a generic
334 * implementation useful for any iomem mapping.
335 */
336 int (*access)(struct vm_area_struct *vma, unsigned long addr,
337 void *buf, int len, int write);
338
339 /* Called by the /proc/PID/maps code to ask the vma whether it
340 * has a special name. Returning non-NULL will also cause this
341 * vma to be dumped unconditionally. */
342 const char *(*name)(struct vm_area_struct *vma);
343
344#ifdef CONFIG_NUMA
345 /*
346 * set_policy() op must add a reference to any non-NULL @new mempolicy
347 * to hold the policy upon return. Caller should pass NULL @new to
348 * remove a policy and fall back to surrounding context--i.e. do not
349 * install a MPOL_DEFAULT policy, nor the task or system default
350 * mempolicy.
351 */
352 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
353
354 /*
355 * get_policy() op must add reference [mpol_get()] to any policy at
356 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
357 * in mm/mempolicy.c will do this automatically.
358 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
359 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
360 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
361 * must return NULL--i.e., do not "fallback" to task or system default
362 * policy.
363 */
364 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
365 unsigned long addr, pgoff_t *ilx);
366#endif
367 /*
368 * Called by vm_normal_page() for special PTEs to find the
369 * page for @addr. This is useful if the default behavior
370 * (using pte_page()) would not find the correct page.
371 */
372 struct page *(*find_special_page)(struct vm_area_struct *vma,
373 unsigned long addr);
374};
375
376static inline void vma_iter_invalidate(struct vma_iterator *vmi)
377{
378 mas_pause(&vmi->mas);
379}
380
381static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
382{
383 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
384}
385
386static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
387{
388 return __pgprot(vm_flags);
389}
390
391static inline bool is_shared_maywrite(vm_flags_t vm_flags)
392{
393 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
394 (VM_SHARED | VM_MAYWRITE);
395}
396
397static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
398{
399 return is_shared_maywrite(vma->vm_flags);
400}
401
402static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
403{
404 /*
405 * Uses mas_find() to get the first VMA when the iterator starts.
406 * Calling mas_next() could skip the first entry.
407 */
408 return mas_find(&vmi->mas, ULONG_MAX);
409}
410
411static inline bool vma_lock_alloc(struct vm_area_struct *vma)
412{
413 vma->vm_lock = calloc(1, sizeof(struct vma_lock));
414
415 if (!vma->vm_lock)
416 return false;
417
418 init_rwsem(&vma->vm_lock->lock);
419 vma->vm_lock_seq = -1;
420
421 return true;
422}
423
424static inline void vma_assert_write_locked(struct vm_area_struct *);
425static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
426{
427 /* When detaching vma should be write-locked */
428 if (detached)
429 vma_assert_write_locked(vma);
430 vma->detached = detached;
431}
432
433extern const struct vm_operations_struct vma_dummy_vm_ops;
434
435static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
436{
437 memset(vma, 0, sizeof(*vma));
438 vma->vm_mm = mm;
439 vma->vm_ops = &vma_dummy_vm_ops;
440 INIT_LIST_HEAD(&vma->anon_vma_chain);
441 vma_mark_detached(vma, false);
442}
443
444static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
445{
446 struct vm_area_struct *vma = calloc(1, sizeof(struct vm_area_struct));
447
448 if (!vma)
449 return NULL;
450
451 vma_init(vma, mm);
452 if (!vma_lock_alloc(vma)) {
453 free(vma);
454 return NULL;
455 }
456
457 return vma;
458}
459
460static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
461{
462 struct vm_area_struct *new = calloc(1, sizeof(struct vm_area_struct));
463
464 if (!new)
465 return NULL;
466
467 memcpy(new, orig, sizeof(*new));
468 if (!vma_lock_alloc(new)) {
469 free(new);
470 return NULL;
471 }
472 INIT_LIST_HEAD(&new->anon_vma_chain);
473
474 return new;
475}
476
477/*
478 * These are defined in vma.h, but sadly vm_stat_account() is referenced by
479 * kernel/fork.c, so we have to these broadly available there, and temporarily
480 * define them here to resolve the dependency cycle.
481 */
482
483#define is_exec_mapping(flags) \
484 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
485
486#define is_stack_mapping(flags) \
487 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
488
489#define is_data_mapping(flags) \
490 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
491
492static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
493 long npages)
494{
495 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
496
497 if (is_exec_mapping(flags))
498 mm->exec_vm += npages;
499 else if (is_stack_mapping(flags))
500 mm->stack_vm += npages;
501 else if (is_data_mapping(flags))
502 mm->data_vm += npages;
503}
504
505#undef is_exec_mapping
506#undef is_stack_mapping
507#undef is_data_mapping
508
509/* Currently stubbed but we may later wish to un-stub. */
510static inline void vm_acct_memory(long pages);
511static inline void vm_unacct_memory(long pages)
512{
513 vm_acct_memory(-pages);
514}
515
516static inline void mapping_allow_writable(struct address_space *mapping)
517{
518 atomic_inc(&mapping->i_mmap_writable);
519}
520
521static inline void vma_set_range(struct vm_area_struct *vma,
522 unsigned long start, unsigned long end,
523 pgoff_t pgoff)
524{
525 vma->vm_start = start;
526 vma->vm_end = end;
527 vma->vm_pgoff = pgoff;
528}
529
530static inline
531struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
532{
533 return mas_find(&vmi->mas, max - 1);
534}
535
536static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
537 unsigned long start, unsigned long end, gfp_t gfp)
538{
539 __mas_set_range(&vmi->mas, start, end - 1);
540 mas_store_gfp(&vmi->mas, NULL, gfp);
541 if (unlikely(mas_is_err(&vmi->mas)))
542 return -ENOMEM;
543
544 return 0;
545}
546
547static inline void mmap_assert_locked(struct mm_struct *);
548static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
549 unsigned long start_addr,
550 unsigned long end_addr)
551{
552 unsigned long index = start_addr;
553
554 mmap_assert_locked(mm);
555 return mt_find(&mm->mm_mt, &index, end_addr - 1);
556}
557
558static inline
559struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
560{
561 return mtree_load(&mm->mm_mt, addr);
562}
563
564static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
565{
566 return mas_prev(&vmi->mas, 0);
567}
568
569static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
570{
571 mas_set(&vmi->mas, addr);
572}
573
574static inline bool vma_is_anonymous(struct vm_area_struct *vma)
575{
576 return !vma->vm_ops;
577}
578
579/* Defined in vma.h, so temporarily define here to avoid circular dependency. */
580#define vma_iter_load(vmi) \
581 mas_walk(&(vmi)->mas)
582
583static inline struct vm_area_struct *
584find_vma_prev(struct mm_struct *mm, unsigned long addr,
585 struct vm_area_struct **pprev)
586{
587 struct vm_area_struct *vma;
588 VMA_ITERATOR(vmi, mm, addr);
589
590 vma = vma_iter_load(&vmi);
591 *pprev = vma_prev(&vmi);
592 if (!vma)
593 vma = vma_next(&vmi);
594 return vma;
595}
596
597#undef vma_iter_load
598
599static inline void vma_iter_init(struct vma_iterator *vmi,
600 struct mm_struct *mm, unsigned long addr)
601{
602 mas_init(&vmi->mas, &mm->mm_mt, addr);
603}
604
605/* Stubbed functions. */
606
607static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
608{
609 return NULL;
610}
611
612static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
613 struct vm_userfaultfd_ctx vm_ctx)
614{
615 return true;
616}
617
618static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
619 struct anon_vma_name *anon_name2)
620{
621 return true;
622}
623
624static inline void might_sleep(void)
625{
626}
627
628static inline unsigned long vma_pages(struct vm_area_struct *vma)
629{
630 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
631}
632
633static inline void fput(struct file *)
634{
635}
636
637static inline void mpol_put(struct mempolicy *)
638{
639}
640
641static inline void vma_lock_free(struct vm_area_struct *vma)
642{
643 free(vma->vm_lock);
644}
645
646static inline void __vm_area_free(struct vm_area_struct *vma)
647{
648 vma_lock_free(vma);
649 free(vma);
650}
651
652static inline void vm_area_free(struct vm_area_struct *vma)
653{
654 __vm_area_free(vma);
655}
656
657static inline void lru_add_drain(void)
658{
659}
660
661static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
662{
663}
664
665static inline void update_hiwater_rss(struct mm_struct *)
666{
667}
668
669static inline void update_hiwater_vm(struct mm_struct *)
670{
671}
672
673static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
674 struct vm_area_struct *vma, unsigned long start_addr,
675 unsigned long end_addr, unsigned long tree_end,
676 bool mm_wr_locked)
677{
678 (void)tlb;
679 (void)mas;
680 (void)vma;
681 (void)start_addr;
682 (void)end_addr;
683 (void)tree_end;
684 (void)mm_wr_locked;
685}
686
687static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
688 struct vm_area_struct *vma, unsigned long floor,
689 unsigned long ceiling, bool mm_wr_locked)
690{
691 (void)tlb;
692 (void)mas;
693 (void)vma;
694 (void)floor;
695 (void)ceiling;
696 (void)mm_wr_locked;
697}
698
699static inline void mapping_unmap_writable(struct address_space *)
700{
701}
702
703static inline void flush_dcache_mmap_lock(struct address_space *)
704{
705}
706
707static inline void tlb_finish_mmu(struct mmu_gather *)
708{
709}
710
711static inline struct file *get_file(struct file *f)
712{
713 return f;
714}
715
716static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
717{
718 return 0;
719}
720
721static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
722{
723 /* For testing purposes. We indicate that an anon_vma has been cloned. */
724 if (src->anon_vma != NULL) {
725 dst->anon_vma = src->anon_vma;
726 dst->anon_vma->was_cloned = true;
727 }
728
729 return 0;
730}
731
732static inline void vma_start_write(struct vm_area_struct *vma)
733{
734 /* Used to indicate to tests that a write operation has begun. */
735 vma->vm_lock_seq++;
736}
737
738static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
739 unsigned long start,
740 unsigned long end,
741 long adjust_next)
742{
743 (void)vma;
744 (void)start;
745 (void)end;
746 (void)adjust_next;
747}
748
749static inline void vma_iter_free(struct vma_iterator *vmi)
750{
751 mas_destroy(&vmi->mas);
752}
753
754static inline
755struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
756{
757 return mas_next_range(&vmi->mas, ULONG_MAX);
758}
759
760static inline void vm_acct_memory(long pages)
761{
762}
763
764static inline void vma_interval_tree_insert(struct vm_area_struct *,
765 struct rb_root_cached *)
766{
767}
768
769static inline void vma_interval_tree_remove(struct vm_area_struct *,
770 struct rb_root_cached *)
771{
772}
773
774static inline void flush_dcache_mmap_unlock(struct address_space *)
775{
776}
777
778static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
779 struct rb_root_cached *)
780{
781}
782
783static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
784 struct rb_root_cached *)
785{
786}
787
788static inline void uprobe_mmap(struct vm_area_struct *)
789{
790}
791
792static inline void uprobe_munmap(struct vm_area_struct *vma,
793 unsigned long start, unsigned long end)
794{
795 (void)vma;
796 (void)start;
797 (void)end;
798}
799
800static inline void i_mmap_lock_write(struct address_space *)
801{
802}
803
804static inline void anon_vma_lock_write(struct anon_vma *)
805{
806}
807
808static inline void vma_assert_write_locked(struct vm_area_struct *)
809{
810}
811
812static inline void unlink_anon_vmas(struct vm_area_struct *vma)
813{
814 /* For testing purposes, indicate that the anon_vma was unlinked. */
815 vma->anon_vma->was_unlinked = true;
816}
817
818static inline void anon_vma_unlock_write(struct anon_vma *)
819{
820}
821
822static inline void i_mmap_unlock_write(struct address_space *)
823{
824}
825
826static inline void anon_vma_merge(struct vm_area_struct *,
827 struct vm_area_struct *)
828{
829}
830
831static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
832 unsigned long start,
833 unsigned long end,
834 struct list_head *unmaps)
835{
836 (void)vma;
837 (void)start;
838 (void)end;
839 (void)unmaps;
840
841 return 0;
842}
843
844static inline void mmap_write_downgrade(struct mm_struct *)
845{
846}
847
848static inline void mmap_read_unlock(struct mm_struct *)
849{
850}
851
852static inline void mmap_write_unlock(struct mm_struct *)
853{
854}
855
856static inline bool can_modify_mm(struct mm_struct *mm,
857 unsigned long start,
858 unsigned long end)
859{
860 (void)mm;
861 (void)start;
862 (void)end;
863
864 return true;
865}
866
867static inline void arch_unmap(struct mm_struct *mm,
868 unsigned long start,
869 unsigned long end)
870{
871 (void)mm;
872 (void)start;
873 (void)end;
874}
875
876static inline void mmap_assert_locked(struct mm_struct *)
877{
878}
879
880static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
881{
882 return true;
883}
884
885static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
886 unsigned long vm_flags)
887{
888 (void)vma;
889 (void)vm_flags;
890}
891
892static inline bool mapping_can_writeback(struct address_space *)
893{
894 return true;
895}
896
897static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
898{
899 return false;
900}
901
902static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
903{
904 return false;
905}
906
907static inline bool userfaultfd_wp(struct vm_area_struct *)
908{
909 return false;
910}
911
912static inline void mmap_assert_write_locked(struct mm_struct *)
913{
914}
915
916static inline void mutex_lock(struct mutex *)
917{
918}
919
920static inline void mutex_unlock(struct mutex *)
921{
922}
923
924static inline bool mutex_is_locked(struct mutex *)
925{
926 return true;
927}
928
929static inline bool signal_pending(void *)
930{
931 return false;
932}
933
934static inline bool is_file_hugepages(struct file *)
935{
936 return false;
937}
938
939static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
940{
941 return true;
942}
943
944static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
945{
946 return true;
947}
948
949static inline void vm_flags_init(struct vm_area_struct *vma,
950 vm_flags_t flags)
951{
952 vma->__vm_flags = flags;
953}
954
955static inline void vm_flags_set(struct vm_area_struct *vma,
956 vm_flags_t flags)
957{
958 vma_start_write(vma);
959 vma->__vm_flags |= flags;
960}
961
962static inline void vm_flags_clear(struct vm_area_struct *vma,
963 vm_flags_t flags)
964{
965 vma_start_write(vma);
966 vma->__vm_flags &= ~flags;
967}
968
969static inline int call_mmap(struct file *, struct vm_area_struct *)
970{
971 return 0;
972}
973
974static inline int shmem_zero_setup(struct vm_area_struct *)
975{
976 return 0;
977}
978
979static inline void vma_set_anonymous(struct vm_area_struct *vma)
980{
981 vma->vm_ops = NULL;
982}
983
984static inline void ksm_add_vma(struct vm_area_struct *)
985{
986}
987
988static inline void perf_event_mmap(struct vm_area_struct *)
989{
990}
991
992static inline bool vma_is_dax(struct vm_area_struct *)
993{
994 return false;
995}
996
997static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
998{
999 return NULL;
1000}
1001
1002bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1003
1004/* Update vma->vm_page_prot to reflect vma->vm_flags. */
1005static inline void vma_set_page_prot(struct vm_area_struct *vma)
1006{
1007 unsigned long vm_flags = vma->vm_flags;
1008 pgprot_t vm_page_prot;
1009
1010 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1011 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1012
1013 if (vma_wants_writenotify(vma, vm_page_prot)) {
1014 vm_flags &= ~VM_SHARED;
1015 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1016 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1017 }
1018 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1019 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1020}
1021
1022static inline bool arch_validate_flags(unsigned long)
1023{
1024 return true;
1025}
1026
1027static inline void vma_close(struct vm_area_struct *)
1028{
1029}
1030
1031static inline int mmap_file(struct file *, struct vm_area_struct *)
1032{
1033 return 0;
1034}
1035
1036#endif /* __MM_VMA_INTERNAL_H */