Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * vma.h
  4 *
  5 * Core VMA manipulation API implemented in vma.c.
  6 */
  7#ifndef __MM_VMA_H
  8#define __MM_VMA_H
  9
 10/*
 11 * VMA lock generalization
 12 */
 13struct vma_prepare {
 14	struct vm_area_struct *vma;
 15	struct vm_area_struct *adj_next;
 16	struct file *file;
 17	struct address_space *mapping;
 18	struct anon_vma *anon_vma;
 19	struct vm_area_struct *insert;
 20	struct vm_area_struct *remove;
 21	struct vm_area_struct *remove2;
 22};
 23
 24struct unlink_vma_file_batch {
 25	int count;
 26	struct vm_area_struct *vmas[8];
 27};
 28
 29/*
 30 * vma munmap operation
 31 */
 32struct vma_munmap_struct {
 33	struct vma_iterator *vmi;
 34	struct vm_area_struct *vma;     /* The first vma to munmap */
 35	struct vm_area_struct *prev;    /* vma before the munmap area */
 36	struct vm_area_struct *next;    /* vma after the munmap area */
 37	struct list_head *uf;           /* Userfaultfd list_head */
 38	unsigned long start;            /* Aligned start addr (inclusive) */
 39	unsigned long end;              /* Aligned end addr (exclusive) */
 40	unsigned long unmap_start;      /* Unmap PTE start */
 41	unsigned long unmap_end;        /* Unmap PTE end */
 42	int vma_count;                  /* Number of vmas that will be removed */
 43	bool unlock;                    /* Unlock after the munmap */
 44	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
 45	/* 2 byte hole */
 46	unsigned long nr_pages;         /* Number of pages being removed */
 47	unsigned long locked_vm;        /* Number of locked pages */
 48	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
 49	unsigned long exec_vm;
 50	unsigned long stack_vm;
 51	unsigned long data_vm;
 52};
 53
 54enum vma_merge_state {
 55	VMA_MERGE_START,
 56	VMA_MERGE_ERROR_NOMEM,
 57	VMA_MERGE_NOMERGE,
 58	VMA_MERGE_SUCCESS,
 59};
 60
 61enum vma_merge_flags {
 62	VMG_FLAG_DEFAULT = 0,
 63	/*
 64	 * If we can expand, simply do so. We know there is nothing to merge to
 65	 * the right. Does not reset state upon failure to merge. The VMA
 66	 * iterator is assumed to be positioned at the previous VMA, rather than
 67	 * at the gap.
 68	 */
 69	VMG_FLAG_JUST_EXPAND = 1 << 0,
 70};
 71
 72/* Represents a VMA merge operation. */
 73struct vma_merge_struct {
 74	struct mm_struct *mm;
 75	struct vma_iterator *vmi;
 76	pgoff_t pgoff;
 77	struct vm_area_struct *prev;
 78	struct vm_area_struct *next; /* Modified by vma_merge(). */
 79	struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
 80	unsigned long start;
 81	unsigned long end;
 82	unsigned long flags;
 83	struct file *file;
 84	struct anon_vma *anon_vma;
 85	struct mempolicy *policy;
 86	struct vm_userfaultfd_ctx uffd_ctx;
 87	struct anon_vma_name *anon_name;
 88	enum vma_merge_flags merge_flags;
 89	enum vma_merge_state state;
 90};
 91
 92static inline bool vmg_nomem(struct vma_merge_struct *vmg)
 93{
 94	return vmg->state == VMA_MERGE_ERROR_NOMEM;
 95}
 96
 97/* Assumes addr >= vma->vm_start. */
 98static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
 99				       unsigned long addr)
100{
101	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
102}
103
104#define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
105	struct vma_merge_struct name = {				\
106		.mm = mm_,						\
107		.vmi = vmi_,						\
108		.start = start_,					\
109		.end = end_,						\
110		.flags = flags_,					\
111		.pgoff = pgoff_,					\
112		.state = VMA_MERGE_START,				\
113		.merge_flags = VMG_FLAG_DEFAULT,			\
114	}
115
116#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
117	struct vma_merge_struct name = {			\
118		.mm = vma_->vm_mm,				\
119		.vmi = vmi_,					\
120		.prev = prev_,					\
121		.next = NULL,					\
122		.vma = vma_,					\
123		.start = start_,				\
124		.end = end_,					\
125		.flags = vma_->vm_flags,			\
126		.pgoff = vma_pgoff_offset(vma_, start_),	\
127		.file = vma_->vm_file,				\
128		.anon_vma = vma_->anon_vma,			\
129		.policy = vma_policy(vma_),			\
130		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
131		.anon_name = anon_vma_name(vma_),		\
132		.state = VMA_MERGE_START,			\
133		.merge_flags = VMG_FLAG_DEFAULT,		\
134	}
135
136#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
137void validate_mm(struct mm_struct *mm);
138#else
139#define validate_mm(mm) do { } while (0)
140#endif
141
142/* Required for expand_downwards(). */
143void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
144
145/* Required for expand_downwards(). */
146void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
147
148int vma_expand(struct vma_merge_struct *vmg);
149int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
150	       unsigned long start, unsigned long end, pgoff_t pgoff);
151
152static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
153			struct vm_area_struct *vma, gfp_t gfp)
154
155{
156	if (vmi->mas.status != ma_start &&
157	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
158		vma_iter_invalidate(vmi);
159
160	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
161	mas_store_gfp(&vmi->mas, vma, gfp);
162	if (unlikely(mas_is_err(&vmi->mas)))
163		return -ENOMEM;
164
165	return 0;
166}
167
168int
169do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
170		    struct mm_struct *mm, unsigned long start,
171		    unsigned long end, struct list_head *uf, bool unlock);
172
173int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
174		  unsigned long start, size_t len, struct list_head *uf,
175		  bool unlock);
176
177void remove_vma(struct vm_area_struct *vma, bool unreachable);
178
179void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
180		struct vm_area_struct *prev, struct vm_area_struct *next);
181
182/* We are about to modify the VMA's flags. */
183struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
184		struct vm_area_struct *prev, struct vm_area_struct *vma,
185		unsigned long start, unsigned long end,
186		unsigned long new_flags);
187
188/* We are about to modify the VMA's flags and/or anon_name. */
189struct vm_area_struct
190*vma_modify_flags_name(struct vma_iterator *vmi,
191		       struct vm_area_struct *prev,
192		       struct vm_area_struct *vma,
193		       unsigned long start,
194		       unsigned long end,
195		       unsigned long new_flags,
196		       struct anon_vma_name *new_name);
197
198/* We are about to modify the VMA's memory policy. */
199struct vm_area_struct
200*vma_modify_policy(struct vma_iterator *vmi,
201		   struct vm_area_struct *prev,
202		   struct vm_area_struct *vma,
203		   unsigned long start, unsigned long end,
204		   struct mempolicy *new_pol);
205
206/* We are about to modify the VMA's flags and/or uffd context. */
207struct vm_area_struct
208*vma_modify_flags_uffd(struct vma_iterator *vmi,
209		       struct vm_area_struct *prev,
210		       struct vm_area_struct *vma,
211		       unsigned long start, unsigned long end,
212		       unsigned long new_flags,
213		       struct vm_userfaultfd_ctx new_ctx);
214
215struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
216
217struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
218					struct vm_area_struct *vma,
219					unsigned long delta);
220
221void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
222
223void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
224
225void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
226			       struct vm_area_struct *vma);
227
228void unlink_file_vma(struct vm_area_struct *vma);
229
230void vma_link_file(struct vm_area_struct *vma);
231
232int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
233
234struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
235	unsigned long addr, unsigned long len, pgoff_t pgoff,
236	bool *need_rmap_locks);
237
238struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
239
240bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
241bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
242
243int mm_take_all_locks(struct mm_struct *mm);
244void mm_drop_all_locks(struct mm_struct *mm);
245
246unsigned long __mmap_region(struct file *file, unsigned long addr,
247		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
248		struct list_head *uf);
249
250static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
251{
252	/*
253	 * We want to check manually if we can change individual PTEs writable
254	 * if we can't do that automatically for all PTEs in a mapping. For
255	 * private mappings, that's always the case when we have write
256	 * permissions as we properly have to handle COW.
257	 */
258	if (vma->vm_flags & VM_SHARED)
259		return vma_wants_writenotify(vma, vma->vm_page_prot);
260	return !!(vma->vm_flags & VM_WRITE);
261}
262
263#ifdef CONFIG_MMU
264static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
265{
266	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
267}
268#endif
269
270static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
271						    unsigned long min)
272{
273	return mas_prev(&vmi->mas, min);
274}
275
276/*
277 * These three helpers classifies VMAs for virtual memory accounting.
278 */
279
280/*
281 * Executable code area - executable, not writable, not stack
282 */
283static inline bool is_exec_mapping(vm_flags_t flags)
284{
285	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
286}
287
288/*
289 * Stack area (including shadow stacks)
290 *
291 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
292 * do_mmap() forbids all other combinations.
293 */
294static inline bool is_stack_mapping(vm_flags_t flags)
295{
296	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
297}
298
299/*
300 * Data area - private, writable, not stack
301 */
302static inline bool is_data_mapping(vm_flags_t flags)
303{
304	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
305}
306
307
308static inline void vma_iter_config(struct vma_iterator *vmi,
309		unsigned long index, unsigned long last)
310{
311	__mas_set_range(&vmi->mas, index, last - 1);
312}
313
314static inline void vma_iter_reset(struct vma_iterator *vmi)
315{
316	mas_reset(&vmi->mas);
317}
318
319static inline
320struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
321{
322	return mas_prev_range(&vmi->mas, min);
323}
324
325static inline
326struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
327{
328	return mas_next_range(&vmi->mas, max);
329}
330
331static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
332				       unsigned long max, unsigned long size)
333{
334	return mas_empty_area(&vmi->mas, min, max - 1, size);
335}
336
337static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
338					unsigned long max, unsigned long size)
339{
340	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
341}
342
343/*
344 * VMA Iterator functions shared between nommu and mmap
345 */
346static inline int vma_iter_prealloc(struct vma_iterator *vmi,
347		struct vm_area_struct *vma)
348{
349	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
350}
351
352static inline void vma_iter_clear(struct vma_iterator *vmi)
353{
354	mas_store_prealloc(&vmi->mas, NULL);
355}
356
357static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
358{
359	return mas_walk(&vmi->mas);
360}
361
362/* Store a VMA with preallocated memory */
363static inline void vma_iter_store(struct vma_iterator *vmi,
364				  struct vm_area_struct *vma)
365{
366
367#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
368	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
369			vmi->mas.index > vma->vm_start)) {
370		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
371			vmi->mas.index, vma->vm_start, vma->vm_start,
372			vma->vm_end, vmi->mas.index, vmi->mas.last);
373	}
374	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
375			vmi->mas.last <  vma->vm_start)) {
376		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
377		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
378		       vmi->mas.index, vmi->mas.last);
379	}
380#endif
381
382	if (vmi->mas.status != ma_start &&
383	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
384		vma_iter_invalidate(vmi);
385
386	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
387	mas_store_prealloc(&vmi->mas, vma);
388}
389
390static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
391{
392	return vmi->mas.index;
393}
394
395static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
396{
397	return vmi->mas.last + 1;
398}
399
400static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
401				      unsigned long count)
402{
403	return mas_expected_entries(&vmi->mas, count);
404}
405
406static inline
407struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
408{
409	return mas_prev_range(&vmi->mas, 0);
410}
411
412/*
413 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
414 * if no previous VMA, to index 0.
415 */
416static inline
417struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
418		struct vm_area_struct **pprev)
419{
420	struct vm_area_struct *next = vma_next(vmi);
421	struct vm_area_struct *prev = vma_prev(vmi);
422
423	/*
424	 * Consider the case where no previous VMA exists. We advance to the
425	 * next VMA, skipping any gap, then rewind to the start of the range.
426	 *
427	 * If we were to unconditionally advance to the next range we'd wind up
428	 * at the next VMA again, so we check to ensure there is a previous VMA
429	 * to skip over.
430	 */
431	if (prev)
432		vma_iter_next_range(vmi);
433
434	if (pprev)
435		*pprev = prev;
436
437	return next;
438}
439
440#ifdef CONFIG_64BIT
441
442static inline bool vma_is_sealed(struct vm_area_struct *vma)
443{
444	return (vma->vm_flags & VM_SEALED);
445}
446
447/*
448 * check if a vma is sealed for modification.
449 * return true, if modification is allowed.
450 */
451static inline bool can_modify_vma(struct vm_area_struct *vma)
452{
453	if (unlikely(vma_is_sealed(vma)))
454		return false;
455
456	return true;
457}
458
459bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
460
461#else
462
463static inline bool can_modify_vma(struct vm_area_struct *vma)
464{
465	return true;
466}
467
468static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
469{
470	return true;
471}
472
473#endif
474
475#endif	/* __MM_VMA_H */