Linux Audio

Check our new training course

Loading...
  1/* internal.h: mm/ internal definitions
  2 *
  3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11#ifndef __MM_INTERNAL_H
 12#define __MM_INTERNAL_H
 13
 14#include <linux/fs.h>
 15#include <linux/mm.h>
 16#include <linux/pagemap.h>
 17#include <linux/tracepoint-defs.h>
 18
 19/*
 20 * The set of flags that only affect watermark checking and reclaim
 21 * behaviour. This is used by the MM to obey the caller constraints
 22 * about IO, FS and watermark checking while ignoring placement
 23 * hints such as HIGHMEM usage.
 24 */
 25#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
 26			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
 27			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 28
 29/* The GFP flags allowed during early boot */
 30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
 31
 32/* Control allocation cpuset and node placement constraints */
 33#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 34
 35/* Do not use these with a slab allocator */
 36#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 37
 38void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 39		unsigned long floor, unsigned long ceiling);
 40
 41void unmap_page_range(struct mmu_gather *tlb,
 42			     struct vm_area_struct *vma,
 43			     unsigned long addr, unsigned long end,
 44			     struct zap_details *details);
 45
 46extern int __do_page_cache_readahead(struct address_space *mapping,
 47		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 48		unsigned long lookahead_size);
 49
 50/*
 51 * Submit IO for the read-ahead request in file_ra_state.
 52 */
 53static inline unsigned long ra_submit(struct file_ra_state *ra,
 54		struct address_space *mapping, struct file *filp)
 55{
 56	return __do_page_cache_readahead(mapping, filp,
 57					ra->start, ra->size, ra->async_size);
 58}
 59
 60/*
 61 * Turn a non-refcounted page (->_count == 0) into refcounted with
 62 * a count of one.
 63 */
 64static inline void set_page_refcounted(struct page *page)
 65{
 66	VM_BUG_ON_PAGE(PageTail(page), page);
 67	VM_BUG_ON_PAGE(page_ref_count(page), page);
 68	set_page_count(page, 1);
 69}
 70
 71extern unsigned long highest_memmap_pfn;
 72
 73/*
 74 * in mm/vmscan.c:
 75 */
 76extern int isolate_lru_page(struct page *page);
 77extern void putback_lru_page(struct page *page);
 78extern bool zone_reclaimable(struct zone *zone);
 79
 80/*
 81 * in mm/rmap.c:
 82 */
 83extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 84
 85/*
 86 * in mm/page_alloc.c
 87 */
 88
 89/*
 90 * Structure for holding the mostly immutable allocation parameters passed
 91 * between functions involved in allocations, including the alloc_pages*
 92 * family of functions.
 93 *
 94 * nodemask, migratetype and high_zoneidx are initialized only once in
 95 * __alloc_pages_nodemask() and then never change.
 96 *
 97 * zonelist, preferred_zone and classzone_idx are set first in
 98 * __alloc_pages_nodemask() for the fast path, and might be later changed
 99 * in __alloc_pages_slowpath(). All other functions pass the whole strucure
100 * by a const pointer.
101 */
102struct alloc_context {
103	struct zonelist *zonelist;
104	nodemask_t *nodemask;
105	struct zone *preferred_zone;
106	int classzone_idx;
107	int migratetype;
108	enum zone_type high_zoneidx;
109	bool spread_dirty_pages;
110};
111
112/*
113 * Locate the struct page for both the matching buddy in our
114 * pair (buddy1) and the combined O(n+1) page they form (page).
115 *
116 * 1) Any buddy B1 will have an order O twin B2 which satisfies
117 * the following equation:
118 *     B2 = B1 ^ (1 << O)
119 * For example, if the starting buddy (buddy2) is #8 its order
120 * 1 buddy is #10:
121 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
122 *
123 * 2) Any buddy B will have an order O+1 parent P which
124 * satisfies the following equation:
125 *     P = B & ~(1 << O)
126 *
127 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
128 */
129static inline unsigned long
130__find_buddy_index(unsigned long page_idx, unsigned int order)
131{
132	return page_idx ^ (1 << order);
133}
134
135extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
136				unsigned long end_pfn, struct zone *zone);
137
138static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
139				unsigned long end_pfn, struct zone *zone)
140{
141	if (zone->contiguous)
142		return pfn_to_page(start_pfn);
143
144	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
145}
146
147extern int __isolate_free_page(struct page *page, unsigned int order);
148extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
149					unsigned int order);
150extern void prep_compound_page(struct page *page, unsigned int order);
151extern int user_min_free_kbytes;
152
153#if defined CONFIG_COMPACTION || defined CONFIG_CMA
154
155/*
156 * in mm/compaction.c
157 */
158/*
159 * compact_control is used to track pages being migrated and the free pages
160 * they are being migrated to during memory compaction. The free_pfn starts
161 * at the end of a zone and migrate_pfn begins at the start. Movable pages
162 * are moved to the end of a zone during a compaction run and the run
163 * completes when free_pfn <= migrate_pfn
164 */
165struct compact_control {
166	struct list_head freepages;	/* List of free pages to migrate to */
167	struct list_head migratepages;	/* List of pages being migrated */
168	unsigned long nr_freepages;	/* Number of isolated free pages */
169	unsigned long nr_migratepages;	/* Number of pages to migrate */
170	unsigned long free_pfn;		/* isolate_freepages search base */
171	unsigned long migrate_pfn;	/* isolate_migratepages search base */
172	unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
173	enum migrate_mode mode;		/* Async or sync migration mode */
174	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
175	bool direct_compaction;		/* False from kcompactd or /proc/... */
176	int order;			/* order a direct compactor needs */
177	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
178	const int alloc_flags;		/* alloc flags of a direct compactor */
179	const int classzone_idx;	/* zone index of a direct compactor */
180	struct zone *zone;
181	int contended;			/* Signal need_sched() or lock
182					 * contention detected during
183					 * compaction
184					 */
185};
186
187unsigned long
188isolate_freepages_range(struct compact_control *cc,
189			unsigned long start_pfn, unsigned long end_pfn);
190unsigned long
191isolate_migratepages_range(struct compact_control *cc,
192			   unsigned long low_pfn, unsigned long end_pfn);
193int find_suitable_fallback(struct free_area *area, unsigned int order,
194			int migratetype, bool only_stealable, bool *can_steal);
195
196#endif
197
198/*
199 * This function returns the order of a free page in the buddy system. In
200 * general, page_zone(page)->lock must be held by the caller to prevent the
201 * page from being allocated in parallel and returning garbage as the order.
202 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
203 * page cannot be allocated or merged in parallel. Alternatively, it must
204 * handle invalid values gracefully, and use page_order_unsafe() below.
205 */
206static inline unsigned int page_order(struct page *page)
207{
208	/* PageBuddy() must be checked by the caller */
209	return page_private(page);
210}
211
212/*
213 * Like page_order(), but for callers who cannot afford to hold the zone lock.
214 * PageBuddy() should be checked first by the caller to minimize race window,
215 * and invalid values must be handled gracefully.
216 *
217 * READ_ONCE is used so that if the caller assigns the result into a local
218 * variable and e.g. tests it for valid range before using, the compiler cannot
219 * decide to remove the variable and inline the page_private(page) multiple
220 * times, potentially observing different values in the tests and the actual
221 * use of the result.
222 */
223#define page_order_unsafe(page)		READ_ONCE(page_private(page))
224
225static inline bool is_cow_mapping(vm_flags_t flags)
226{
227	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
228}
229
230/*
231 * These three helpers classifies VMAs for virtual memory accounting.
232 */
233
234/*
235 * Executable code area - executable, not writable, not stack
236 */
237static inline bool is_exec_mapping(vm_flags_t flags)
238{
239	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
240}
241
242/*
243 * Stack area - atomatically grows in one direction
244 *
245 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
246 * do_mmap() forbids all other combinations.
247 */
248static inline bool is_stack_mapping(vm_flags_t flags)
249{
250	return (flags & VM_STACK) == VM_STACK;
251}
252
253/*
254 * Data area - private, writable, not stack
255 */
256static inline bool is_data_mapping(vm_flags_t flags)
257{
258	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
259}
260
261/* mm/util.c */
262void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
263		struct vm_area_struct *prev, struct rb_node *rb_parent);
264
265#ifdef CONFIG_MMU
266extern long populate_vma_page_range(struct vm_area_struct *vma,
267		unsigned long start, unsigned long end, int *nonblocking);
268extern void munlock_vma_pages_range(struct vm_area_struct *vma,
269			unsigned long start, unsigned long end);
270static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
271{
272	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
273}
274
275/*
276 * must be called with vma's mmap_sem held for read or write, and page locked.
277 */
278extern void mlock_vma_page(struct page *page);
279extern unsigned int munlock_vma_page(struct page *page);
280
281/*
282 * Clear the page's PageMlocked().  This can be useful in a situation where
283 * we want to unconditionally remove a page from the pagecache -- e.g.,
284 * on truncation or freeing.
285 *
286 * It is legal to call this function for any page, mlocked or not.
287 * If called for a page that is still mapped by mlocked vmas, all we do
288 * is revert to lazy LRU behaviour -- semantics are not broken.
289 */
290extern void clear_page_mlock(struct page *page);
291
292/*
293 * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
294 * (because that does not go through the full procedure of migration ptes):
295 * to migrate the Mlocked page flag; update statistics.
296 */
297static inline void mlock_migrate_page(struct page *newpage, struct page *page)
298{
299	if (TestClearPageMlocked(page)) {
300		int nr_pages = hpage_nr_pages(page);
301
302		/* Holding pmd lock, no change in irq context: __mod is safe */
303		__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
304		SetPageMlocked(newpage);
305		__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
306	}
307}
308
309extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
310
311/*
312 * At what user virtual address is page expected in @vma?
313 */
314static inline unsigned long
315__vma_address(struct page *page, struct vm_area_struct *vma)
316{
317	pgoff_t pgoff = page_to_pgoff(page);
318	return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
319}
320
321static inline unsigned long
322vma_address(struct page *page, struct vm_area_struct *vma)
323{
324	unsigned long address = __vma_address(page, vma);
325
326	/* page should be within @vma mapping range */
327	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
328
329	return address;
330}
331
332#else /* !CONFIG_MMU */
333static inline void clear_page_mlock(struct page *page) { }
334static inline void mlock_vma_page(struct page *page) { }
335static inline void mlock_migrate_page(struct page *new, struct page *old) { }
336
337#endif /* !CONFIG_MMU */
338
339/*
340 * Return the mem_map entry representing the 'offset' subpage within
341 * the maximally aligned gigantic page 'base'.  Handle any discontiguity
342 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
343 */
344static inline struct page *mem_map_offset(struct page *base, int offset)
345{
346	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
347		return nth_page(base, offset);
348	return base + offset;
349}
350
351/*
352 * Iterator over all subpages within the maximally aligned gigantic
353 * page 'base'.  Handle any discontiguity in the mem_map.
354 */
355static inline struct page *mem_map_next(struct page *iter,
356						struct page *base, int offset)
357{
358	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
359		unsigned long pfn = page_to_pfn(base) + offset;
360		if (!pfn_valid(pfn))
361			return NULL;
362		return pfn_to_page(pfn);
363	}
364	return iter + 1;
365}
366
367/*
368 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
369 * so all functions starting at paging_init should be marked __init
370 * in those cases. SPARSEMEM, however, allows for memory hotplug,
371 * and alloc_bootmem_node is not used.
372 */
373#ifdef CONFIG_SPARSEMEM
374#define __paginginit __meminit
375#else
376#define __paginginit __init
377#endif
378
379/* Memory initialisation debug and verification */
380enum mminit_level {
381	MMINIT_WARNING,
382	MMINIT_VERIFY,
383	MMINIT_TRACE
384};
385
386#ifdef CONFIG_DEBUG_MEMORY_INIT
387
388extern int mminit_loglevel;
389
390#define mminit_dprintk(level, prefix, fmt, arg...) \
391do { \
392	if (level < mminit_loglevel) { \
393		if (level <= MMINIT_WARNING) \
394			pr_warn("mminit::" prefix " " fmt, ##arg);	\
395		else \
396			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
397	} \
398} while (0)
399
400extern void mminit_verify_pageflags_layout(void);
401extern void mminit_verify_zonelist(void);
402#else
403
404static inline void mminit_dprintk(enum mminit_level level,
405				const char *prefix, const char *fmt, ...)
406{
407}
408
409static inline void mminit_verify_pageflags_layout(void)
410{
411}
412
413static inline void mminit_verify_zonelist(void)
414{
415}
416#endif /* CONFIG_DEBUG_MEMORY_INIT */
417
418/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
419#if defined(CONFIG_SPARSEMEM)
420extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
421				unsigned long *end_pfn);
422#else
423static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
424				unsigned long *end_pfn)
425{
426}
427#endif /* CONFIG_SPARSEMEM */
428
429#define ZONE_RECLAIM_NOSCAN	-2
430#define ZONE_RECLAIM_FULL	-1
431#define ZONE_RECLAIM_SOME	0
432#define ZONE_RECLAIM_SUCCESS	1
433
434extern int hwpoison_filter(struct page *p);
435
436extern u32 hwpoison_filter_dev_major;
437extern u32 hwpoison_filter_dev_minor;
438extern u64 hwpoison_filter_flags_mask;
439extern u64 hwpoison_filter_flags_value;
440extern u64 hwpoison_filter_memcg;
441extern u32 hwpoison_filter_enable;
442
443extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
444        unsigned long, unsigned long,
445        unsigned long, unsigned long);
446
447extern void set_pageblock_order(void);
448unsigned long reclaim_clean_pages_from_list(struct zone *zone,
449					    struct list_head *page_list);
450/* The ALLOC_WMARK bits are used as an index to zone->watermark */
451#define ALLOC_WMARK_MIN		WMARK_MIN
452#define ALLOC_WMARK_LOW		WMARK_LOW
453#define ALLOC_WMARK_HIGH	WMARK_HIGH
454#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
455
456/* Mask to get the watermark bits */
457#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
458
459#define ALLOC_HARDER		0x10 /* try to alloc harder */
460#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
461#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
462#define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
463#define ALLOC_FAIR		0x100 /* fair zone allocation */
464
465enum ttu_flags;
466struct tlbflush_unmap_batch;
467
468#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
469void try_to_unmap_flush(void);
470void try_to_unmap_flush_dirty(void);
471#else
472static inline void try_to_unmap_flush(void)
473{
474}
475static inline void try_to_unmap_flush_dirty(void)
476{
477}
478
479#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
480
481extern const struct trace_print_flags pageflag_names[];
482extern const struct trace_print_flags vmaflag_names[];
483extern const struct trace_print_flags gfpflag_names[];
484
485#endif	/* __MM_INTERNAL_H */
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/* internal.h: mm/ internal definitions
   3 *
   4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7#ifndef __MM_INTERNAL_H
   8#define __MM_INTERNAL_H
   9
  10#include <linux/fs.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/rmap.h>
  14#include <linux/tracepoint-defs.h>
  15
  16struct folio_batch;
  17
  18/*
  19 * The set of flags that only affect watermark checking and reclaim
  20 * behaviour. This is used by the MM to obey the caller constraints
  21 * about IO, FS and watermark checking while ignoring placement
  22 * hints such as HIGHMEM usage.
  23 */
  24#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
  25			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
  26			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
  27			__GFP_NOLOCKDEP)
  28
  29/* The GFP flags allowed during early boot */
  30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
  31
  32/* Control allocation cpuset and node placement constraints */
  33#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
  34
  35/* Do not use these with a slab allocator */
  36#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
  37
  38/*
  39 * Different from WARN_ON_ONCE(), no warning will be issued
  40 * when we specify __GFP_NOWARN.
  41 */
  42#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
  43	static bool __section(".data.once") __warned;			\
  44	int __ret_warn_once = !!(cond);					\
  45									\
  46	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
  47		__warned = true;					\
  48		WARN_ON(1);						\
  49	}								\
  50	unlikely(__ret_warn_once);					\
  51})
  52
  53void page_writeback_init(void);
  54
  55/*
  56 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
  57 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
  58 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
  59 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
  60 */
  61#define ENTIRELY_MAPPED		0x800000
  62#define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
  63
  64/*
  65 * Flags passed to __show_mem() and show_free_areas() to suppress output in
  66 * various contexts.
  67 */
  68#define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
  69
  70/*
  71 * How many individual pages have an elevated _mapcount.  Excludes
  72 * the folio's entire_mapcount.
  73 */
  74static inline int folio_nr_pages_mapped(struct folio *folio)
  75{
  76	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
  77}
  78
  79static inline void *folio_raw_mapping(struct folio *folio)
  80{
  81	unsigned long mapping = (unsigned long)folio->mapping;
  82
  83	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
  84}
  85
  86void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
  87						int nr_throttled);
  88static inline void acct_reclaim_writeback(struct folio *folio)
  89{
  90	pg_data_t *pgdat = folio_pgdat(folio);
  91	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
  92
  93	if (nr_throttled)
  94		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
  95}
  96
  97static inline void wake_throttle_isolated(pg_data_t *pgdat)
  98{
  99	wait_queue_head_t *wqh;
 100
 101	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
 102	if (waitqueue_active(wqh))
 103		wake_up(wqh);
 104}
 105
 106vm_fault_t do_swap_page(struct vm_fault *vmf);
 107void folio_rotate_reclaimable(struct folio *folio);
 108bool __folio_end_writeback(struct folio *folio);
 109void deactivate_file_folio(struct folio *folio);
 110void folio_activate(struct folio *folio);
 111
 112void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 113		   struct vm_area_struct *start_vma, unsigned long floor,
 114		   unsigned long ceiling, bool mm_wr_locked);
 115void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 116
 117struct zap_details;
 118void unmap_page_range(struct mmu_gather *tlb,
 119			     struct vm_area_struct *vma,
 120			     unsigned long addr, unsigned long end,
 121			     struct zap_details *details);
 122
 123void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
 124		unsigned int order);
 125void force_page_cache_ra(struct readahead_control *, unsigned long nr);
 126static inline void force_page_cache_readahead(struct address_space *mapping,
 127		struct file *file, pgoff_t index, unsigned long nr_to_read)
 128{
 129	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
 130	force_page_cache_ra(&ractl, nr_to_read);
 131}
 132
 133unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
 134		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
 135unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
 136		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
 137void filemap_free_folio(struct address_space *mapping, struct folio *folio);
 138int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
 139bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
 140		loff_t end);
 141long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
 142unsigned long mapping_try_invalidate(struct address_space *mapping,
 143		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
 144
 145/**
 146 * folio_evictable - Test whether a folio is evictable.
 147 * @folio: The folio to test.
 148 *
 149 * Test whether @folio is evictable -- i.e., should be placed on
 150 * active/inactive lists vs unevictable list.
 151 *
 152 * Reasons folio might not be evictable:
 153 * 1. folio's mapping marked unevictable
 154 * 2. One of the pages in the folio is part of an mlocked VMA
 155 */
 156static inline bool folio_evictable(struct folio *folio)
 157{
 158	bool ret;
 159
 160	/* Prevent address_space of inode and swap cache from being freed */
 161	rcu_read_lock();
 162	ret = !mapping_unevictable(folio_mapping(folio)) &&
 163			!folio_test_mlocked(folio);
 164	rcu_read_unlock();
 165	return ret;
 166}
 167
 168/*
 169 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
 170 * a count of one.
 171 */
 172static inline void set_page_refcounted(struct page *page)
 173{
 174	VM_BUG_ON_PAGE(PageTail(page), page);
 175	VM_BUG_ON_PAGE(page_ref_count(page), page);
 176	set_page_count(page, 1);
 177}
 178
 179/*
 180 * Return true if a folio needs ->release_folio() calling upon it.
 181 */
 182static inline bool folio_needs_release(struct folio *folio)
 183{
 184	struct address_space *mapping = folio_mapping(folio);
 185
 186	return folio_has_private(folio) ||
 187		(mapping && mapping_release_always(mapping));
 188}
 189
 190extern unsigned long highest_memmap_pfn;
 191
 192/*
 193 * Maximum number of reclaim retries without progress before the OOM
 194 * killer is consider the only way forward.
 195 */
 196#define MAX_RECLAIM_RETRIES 16
 197
 198/*
 199 * in mm/vmscan.c:
 200 */
 201bool isolate_lru_page(struct page *page);
 202bool folio_isolate_lru(struct folio *folio);
 203void putback_lru_page(struct page *page);
 204void folio_putback_lru(struct folio *folio);
 205extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
 206
 207/*
 208 * in mm/rmap.c:
 209 */
 210pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 211
 212/*
 213 * in mm/page_alloc.c
 214 */
 215#define K(x) ((x) << (PAGE_SHIFT-10))
 216
 217extern char * const zone_names[MAX_NR_ZONES];
 218
 219/* perform sanity checks on struct pages being allocated or freed */
 220DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
 221
 222extern int min_free_kbytes;
 223
 224void setup_per_zone_wmarks(void);
 225void calculate_min_free_kbytes(void);
 226int __meminit init_per_zone_wmark_min(void);
 227void page_alloc_sysctl_init(void);
 228
 229/*
 230 * Structure for holding the mostly immutable allocation parameters passed
 231 * between functions involved in allocations, including the alloc_pages*
 232 * family of functions.
 233 *
 234 * nodemask, migratetype and highest_zoneidx are initialized only once in
 235 * __alloc_pages() and then never change.
 236 *
 237 * zonelist, preferred_zone and highest_zoneidx are set first in
 238 * __alloc_pages() for the fast path, and might be later changed
 239 * in __alloc_pages_slowpath(). All other functions pass the whole structure
 240 * by a const pointer.
 241 */
 242struct alloc_context {
 243	struct zonelist *zonelist;
 244	nodemask_t *nodemask;
 245	struct zoneref *preferred_zoneref;
 246	int migratetype;
 247
 248	/*
 249	 * highest_zoneidx represents highest usable zone index of
 250	 * the allocation request. Due to the nature of the zone,
 251	 * memory on lower zone than the highest_zoneidx will be
 252	 * protected by lowmem_reserve[highest_zoneidx].
 253	 *
 254	 * highest_zoneidx is also used by reclaim/compaction to limit
 255	 * the target zone since higher zone than this index cannot be
 256	 * usable for this allocation request.
 257	 */
 258	enum zone_type highest_zoneidx;
 259	bool spread_dirty_pages;
 260};
 261
 262/*
 263 * This function returns the order of a free page in the buddy system. In
 264 * general, page_zone(page)->lock must be held by the caller to prevent the
 265 * page from being allocated in parallel and returning garbage as the order.
 266 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
 267 * page cannot be allocated or merged in parallel. Alternatively, it must
 268 * handle invalid values gracefully, and use buddy_order_unsafe() below.
 269 */
 270static inline unsigned int buddy_order(struct page *page)
 271{
 272	/* PageBuddy() must be checked by the caller */
 273	return page_private(page);
 274}
 275
 276/*
 277 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
 278 * PageBuddy() should be checked first by the caller to minimize race window,
 279 * and invalid values must be handled gracefully.
 280 *
 281 * READ_ONCE is used so that if the caller assigns the result into a local
 282 * variable and e.g. tests it for valid range before using, the compiler cannot
 283 * decide to remove the variable and inline the page_private(page) multiple
 284 * times, potentially observing different values in the tests and the actual
 285 * use of the result.
 286 */
 287#define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
 288
 289/*
 290 * This function checks whether a page is free && is the buddy
 291 * we can coalesce a page and its buddy if
 292 * (a) the buddy is not in a hole (check before calling!) &&
 293 * (b) the buddy is in the buddy system &&
 294 * (c) a page and its buddy have the same order &&
 295 * (d) a page and its buddy are in the same zone.
 296 *
 297 * For recording whether a page is in the buddy system, we set PageBuddy.
 298 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
 299 *
 300 * For recording page's order, we use page_private(page).
 301 */
 302static inline bool page_is_buddy(struct page *page, struct page *buddy,
 303				 unsigned int order)
 304{
 305	if (!page_is_guard(buddy) && !PageBuddy(buddy))
 306		return false;
 307
 308	if (buddy_order(buddy) != order)
 309		return false;
 310
 311	/*
 312	 * zone check is done late to avoid uselessly calculating
 313	 * zone/node ids for pages that could never merge.
 314	 */
 315	if (page_zone_id(page) != page_zone_id(buddy))
 316		return false;
 317
 318	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 319
 320	return true;
 321}
 322
 323/*
 324 * Locate the struct page for both the matching buddy in our
 325 * pair (buddy1) and the combined O(n+1) page they form (page).
 326 *
 327 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 328 * the following equation:
 329 *     B2 = B1 ^ (1 << O)
 330 * For example, if the starting buddy (buddy2) is #8 its order
 331 * 1 buddy is #10:
 332 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 333 *
 334 * 2) Any buddy B will have an order O+1 parent P which
 335 * satisfies the following equation:
 336 *     P = B & ~(1 << O)
 337 *
 338 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
 339 */
 340static inline unsigned long
 341__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
 342{
 343	return page_pfn ^ (1 << order);
 344}
 345
 346/*
 347 * Find the buddy of @page and validate it.
 348 * @page: The input page
 349 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
 350 *       function is used in the performance-critical __free_one_page().
 351 * @order: The order of the page
 352 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
 353 *             page_to_pfn().
 354 *
 355 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
 356 * not the same as @page. The validation is necessary before use it.
 357 *
 358 * Return: the found buddy page or NULL if not found.
 359 */
 360static inline struct page *find_buddy_page_pfn(struct page *page,
 361			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
 362{
 363	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
 364	struct page *buddy;
 365
 366	buddy = page + (__buddy_pfn - pfn);
 367	if (buddy_pfn)
 368		*buddy_pfn = __buddy_pfn;
 369
 370	if (page_is_buddy(page, buddy, order))
 371		return buddy;
 372	return NULL;
 373}
 374
 375extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
 376				unsigned long end_pfn, struct zone *zone);
 377
 378static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
 379				unsigned long end_pfn, struct zone *zone)
 380{
 381	if (zone->contiguous)
 382		return pfn_to_page(start_pfn);
 383
 384	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
 385}
 386
 387void set_zone_contiguous(struct zone *zone);
 388
 389static inline void clear_zone_contiguous(struct zone *zone)
 390{
 391	zone->contiguous = false;
 392}
 393
 394extern int __isolate_free_page(struct page *page, unsigned int order);
 395extern void __putback_isolated_page(struct page *page, unsigned int order,
 396				    int mt);
 397extern void memblock_free_pages(struct page *page, unsigned long pfn,
 398					unsigned int order);
 399extern void __free_pages_core(struct page *page, unsigned int order);
 400
 401/*
 402 * This will have no effect, other than possibly generating a warning, if the
 403 * caller passes in a non-large folio.
 404 */
 405static inline void folio_set_order(struct folio *folio, unsigned int order)
 406{
 407	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
 408		return;
 409
 410	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
 411#ifdef CONFIG_64BIT
 412	folio->_folio_nr_pages = 1U << order;
 413#endif
 414}
 415
 416void folio_undo_large_rmappable(struct folio *folio);
 417
 418static inline struct folio *page_rmappable_folio(struct page *page)
 419{
 420	struct folio *folio = (struct folio *)page;
 421
 422	if (folio && folio_order(folio) > 1)
 423		folio_prep_large_rmappable(folio);
 424	return folio;
 425}
 426
 427static inline void prep_compound_head(struct page *page, unsigned int order)
 428{
 429	struct folio *folio = (struct folio *)page;
 430
 431	folio_set_order(folio, order);
 432	atomic_set(&folio->_entire_mapcount, -1);
 433	atomic_set(&folio->_nr_pages_mapped, 0);
 434	atomic_set(&folio->_pincount, 0);
 435}
 436
 437static inline void prep_compound_tail(struct page *head, int tail_idx)
 438{
 439	struct page *p = head + tail_idx;
 440
 441	p->mapping = TAIL_MAPPING;
 442	set_compound_head(p, head);
 443	set_page_private(p, 0);
 444}
 445
 446extern void prep_compound_page(struct page *page, unsigned int order);
 447
 448extern void post_alloc_hook(struct page *page, unsigned int order,
 449					gfp_t gfp_flags);
 450extern int user_min_free_kbytes;
 451
 452extern void free_unref_page(struct page *page, unsigned int order);
 453extern void free_unref_page_list(struct list_head *list);
 454
 455extern void zone_pcp_reset(struct zone *zone);
 456extern void zone_pcp_disable(struct zone *zone);
 457extern void zone_pcp_enable(struct zone *zone);
 458extern void zone_pcp_init(struct zone *zone);
 459
 460extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
 461			  phys_addr_t min_addr,
 462			  int nid, bool exact_nid);
 463
 464void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
 465		unsigned long, enum meminit_context, struct vmem_altmap *, int);
 466
 467
 468int split_free_page(struct page *free_page,
 469			unsigned int order, unsigned long split_pfn_offset);
 470
 471#if defined CONFIG_COMPACTION || defined CONFIG_CMA
 472
 473/*
 474 * in mm/compaction.c
 475 */
 476/*
 477 * compact_control is used to track pages being migrated and the free pages
 478 * they are being migrated to during memory compaction. The free_pfn starts
 479 * at the end of a zone and migrate_pfn begins at the start. Movable pages
 480 * are moved to the end of a zone during a compaction run and the run
 481 * completes when free_pfn <= migrate_pfn
 482 */
 483struct compact_control {
 484	struct list_head freepages;	/* List of free pages to migrate to */
 485	struct list_head migratepages;	/* List of pages being migrated */
 486	unsigned int nr_freepages;	/* Number of isolated free pages */
 487	unsigned int nr_migratepages;	/* Number of pages to migrate */
 488	unsigned long free_pfn;		/* isolate_freepages search base */
 489	/*
 490	 * Acts as an in/out parameter to page isolation for migration.
 491	 * isolate_migratepages uses it as a search base.
 492	 * isolate_migratepages_block will update the value to the next pfn
 493	 * after the last isolated one.
 494	 */
 495	unsigned long migrate_pfn;
 496	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
 497	struct zone *zone;
 498	unsigned long total_migrate_scanned;
 499	unsigned long total_free_scanned;
 500	unsigned short fast_search_fail;/* failures to use free list searches */
 501	short search_order;		/* order to start a fast search at */
 502	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
 503	int order;			/* order a direct compactor needs */
 504	int migratetype;		/* migratetype of direct compactor */
 505	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
 506	const int highest_zoneidx;	/* zone index of a direct compactor */
 507	enum migrate_mode mode;		/* Async or sync migration mode */
 508	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
 509	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
 510	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
 511	bool direct_compaction;		/* False from kcompactd or /proc/... */
 512	bool proactive_compaction;	/* kcompactd proactive compaction */
 513	bool whole_zone;		/* Whole zone should/has been scanned */
 514	bool contended;			/* Signal lock contention */
 515	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
 516					 * when there are potentially transient
 517					 * isolation or migration failures to
 518					 * ensure forward progress.
 519					 */
 520	bool alloc_contig;		/* alloc_contig_range allocation */
 521};
 522
 523/*
 524 * Used in direct compaction when a page should be taken from the freelists
 525 * immediately when one is created during the free path.
 526 */
 527struct capture_control {
 528	struct compact_control *cc;
 529	struct page *page;
 530};
 531
 532unsigned long
 533isolate_freepages_range(struct compact_control *cc,
 534			unsigned long start_pfn, unsigned long end_pfn);
 535int
 536isolate_migratepages_range(struct compact_control *cc,
 537			   unsigned long low_pfn, unsigned long end_pfn);
 538
 539int __alloc_contig_migrate_range(struct compact_control *cc,
 540					unsigned long start, unsigned long end);
 541
 542/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
 543void init_cma_reserved_pageblock(struct page *page);
 544
 545#endif /* CONFIG_COMPACTION || CONFIG_CMA */
 546
 547int find_suitable_fallback(struct free_area *area, unsigned int order,
 548			int migratetype, bool only_stealable, bool *can_steal);
 549
 550static inline bool free_area_empty(struct free_area *area, int migratetype)
 551{
 552	return list_empty(&area->free_list[migratetype]);
 553}
 554
 555/*
 556 * These three helpers classifies VMAs for virtual memory accounting.
 557 */
 558
 559/*
 560 * Executable code area - executable, not writable, not stack
 561 */
 562static inline bool is_exec_mapping(vm_flags_t flags)
 563{
 564	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
 565}
 566
 567/*
 568 * Stack area (including shadow stacks)
 569 *
 570 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
 571 * do_mmap() forbids all other combinations.
 572 */
 573static inline bool is_stack_mapping(vm_flags_t flags)
 574{
 575	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
 576}
 577
 578/*
 579 * Data area - private, writable, not stack
 580 */
 581static inline bool is_data_mapping(vm_flags_t flags)
 582{
 583	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
 584}
 585
 586/* mm/util.c */
 587struct anon_vma *folio_anon_vma(struct folio *folio);
 588
 589#ifdef CONFIG_MMU
 590void unmap_mapping_folio(struct folio *folio);
 591extern long populate_vma_page_range(struct vm_area_struct *vma,
 592		unsigned long start, unsigned long end, int *locked);
 593extern long faultin_vma_page_range(struct vm_area_struct *vma,
 594				   unsigned long start, unsigned long end,
 595				   bool write, int *locked);
 596extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
 597			       unsigned long bytes);
 598
 599/*
 600 * NOTE: This function can't tell whether the folio is "fully mapped" in the
 601 * range.
 602 * "fully mapped" means all the pages of folio is associated with the page
 603 * table of range while this function just check whether the folio range is
 604 * within the range [start, end). Function caller needs to do page table
 605 * check if it cares about the page table association.
 606 *
 607 * Typical usage (like mlock or madvise) is:
 608 * Caller knows at least 1 page of folio is associated with page table of VMA
 609 * and the range [start, end) is intersect with the VMA range. Caller wants
 610 * to know whether the folio is fully associated with the range. It calls
 611 * this function to check whether the folio is in the range first. Then checks
 612 * the page table to know whether the folio is fully mapped to the range.
 613 */
 614static inline bool
 615folio_within_range(struct folio *folio, struct vm_area_struct *vma,
 616		unsigned long start, unsigned long end)
 617{
 618	pgoff_t pgoff, addr;
 619	unsigned long vma_pglen = vma_pages(vma);
 620
 621	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
 622	if (start > end)
 623		return false;
 624
 625	if (start < vma->vm_start)
 626		start = vma->vm_start;
 627
 628	if (end > vma->vm_end)
 629		end = vma->vm_end;
 630
 631	pgoff = folio_pgoff(folio);
 632
 633	/* if folio start address is not in vma range */
 634	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
 635		return false;
 636
 637	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 638
 639	return !(addr < start || end - addr < folio_size(folio));
 640}
 641
 642static inline bool
 643folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
 644{
 645	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
 646}
 647
 648/*
 649 * mlock_vma_folio() and munlock_vma_folio():
 650 * should be called with vma's mmap_lock held for read or write,
 651 * under page table lock for the pte/pmd being added or removed.
 652 *
 653 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
 654 * the end of folio_remove_rmap_*(); but new anon folios are managed by
 655 * folio_add_lru_vma() calling mlock_new_folio().
 656 */
 657void mlock_folio(struct folio *folio);
 658static inline void mlock_vma_folio(struct folio *folio,
 659				struct vm_area_struct *vma)
 660{
 661	/*
 662	 * The VM_SPECIAL check here serves two purposes.
 663	 * 1) VM_IO check prevents migration from double-counting during mlock.
 664	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
 665	 *    is never left set on a VM_SPECIAL vma, there is an interval while
 666	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
 667	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
 668	 */
 669	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
 670		mlock_folio(folio);
 671}
 672
 673void munlock_folio(struct folio *folio);
 674static inline void munlock_vma_folio(struct folio *folio,
 675					struct vm_area_struct *vma)
 676{
 677	/*
 678	 * munlock if the function is called. Ideally, we should only
 679	 * do munlock if any page of folio is unmapped from VMA and
 680	 * cause folio not fully mapped to VMA.
 681	 *
 682	 * But it's not easy to confirm that's the situation. So we
 683	 * always munlock the folio and page reclaim will correct it
 684	 * if it's wrong.
 685	 */
 686	if (unlikely(vma->vm_flags & VM_LOCKED))
 687		munlock_folio(folio);
 688}
 689
 690void mlock_new_folio(struct folio *folio);
 691bool need_mlock_drain(int cpu);
 692void mlock_drain_local(void);
 693void mlock_drain_remote(int cpu);
 694
 695extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 696
 697/*
 698 * Return the start of user virtual address at the specific offset within
 699 * a vma.
 700 */
 701static inline unsigned long
 702vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
 703		  struct vm_area_struct *vma)
 704{
 705	unsigned long address;
 706
 707	if (pgoff >= vma->vm_pgoff) {
 708		address = vma->vm_start +
 709			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 710		/* Check for address beyond vma (or wrapped through 0?) */
 711		if (address < vma->vm_start || address >= vma->vm_end)
 712			address = -EFAULT;
 713	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
 714		/* Test above avoids possibility of wrap to 0 on 32-bit */
 715		address = vma->vm_start;
 716	} else {
 717		address = -EFAULT;
 718	}
 719	return address;
 720}
 721
 722/*
 723 * Return the start of user virtual address of a page within a vma.
 724 * Returns -EFAULT if all of the page is outside the range of vma.
 725 * If page is a compound head, the entire compound page is considered.
 726 */
 727static inline unsigned long
 728vma_address(struct page *page, struct vm_area_struct *vma)
 729{
 730	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
 731	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
 732}
 733
 734/*
 735 * Then at what user virtual address will none of the range be found in vma?
 736 * Assumes that vma_address() already returned a good starting address.
 737 */
 738static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
 739{
 740	struct vm_area_struct *vma = pvmw->vma;
 741	pgoff_t pgoff;
 742	unsigned long address;
 743
 744	/* Common case, plus ->pgoff is invalid for KSM */
 745	if (pvmw->nr_pages == 1)
 746		return pvmw->address + PAGE_SIZE;
 747
 748	pgoff = pvmw->pgoff + pvmw->nr_pages;
 749	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 750	/* Check for address beyond vma (or wrapped through 0?) */
 751	if (address < vma->vm_start || address > vma->vm_end)
 752		address = vma->vm_end;
 753	return address;
 754}
 755
 756static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 757						    struct file *fpin)
 758{
 759	int flags = vmf->flags;
 760
 761	if (fpin)
 762		return fpin;
 763
 764	/*
 765	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
 766	 * anything, so we only pin the file and drop the mmap_lock if only
 767	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
 768	 */
 769	if (fault_flag_allow_retry_first(flags) &&
 770	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
 771		fpin = get_file(vmf->vma->vm_file);
 772		release_fault_lock(vmf);
 773	}
 774	return fpin;
 775}
 776#else /* !CONFIG_MMU */
 777static inline void unmap_mapping_folio(struct folio *folio) { }
 778static inline void mlock_new_folio(struct folio *folio) { }
 779static inline bool need_mlock_drain(int cpu) { return false; }
 780static inline void mlock_drain_local(void) { }
 781static inline void mlock_drain_remote(int cpu) { }
 782static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 783{
 784}
 785#endif /* !CONFIG_MMU */
 786
 787/* Memory initialisation debug and verification */
 788#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 789DECLARE_STATIC_KEY_TRUE(deferred_pages);
 790
 791bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
 792#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 793
 794enum mminit_level {
 795	MMINIT_WARNING,
 796	MMINIT_VERIFY,
 797	MMINIT_TRACE
 798};
 799
 800#ifdef CONFIG_DEBUG_MEMORY_INIT
 801
 802extern int mminit_loglevel;
 803
 804#define mminit_dprintk(level, prefix, fmt, arg...) \
 805do { \
 806	if (level < mminit_loglevel) { \
 807		if (level <= MMINIT_WARNING) \
 808			pr_warn("mminit::" prefix " " fmt, ##arg);	\
 809		else \
 810			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
 811	} \
 812} while (0)
 813
 814extern void mminit_verify_pageflags_layout(void);
 815extern void mminit_verify_zonelist(void);
 816#else
 817
 818static inline void mminit_dprintk(enum mminit_level level,
 819				const char *prefix, const char *fmt, ...)
 820{
 821}
 822
 823static inline void mminit_verify_pageflags_layout(void)
 824{
 825}
 826
 827static inline void mminit_verify_zonelist(void)
 828{
 829}
 830#endif /* CONFIG_DEBUG_MEMORY_INIT */
 831
 832#define NODE_RECLAIM_NOSCAN	-2
 833#define NODE_RECLAIM_FULL	-1
 834#define NODE_RECLAIM_SOME	0
 835#define NODE_RECLAIM_SUCCESS	1
 836
 837#ifdef CONFIG_NUMA
 838extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
 839extern int find_next_best_node(int node, nodemask_t *used_node_mask);
 840#else
 841static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
 842				unsigned int order)
 843{
 844	return NODE_RECLAIM_NOSCAN;
 845}
 846static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
 847{
 848	return NUMA_NO_NODE;
 849}
 850#endif
 851
 852/*
 853 * mm/memory-failure.c
 854 */
 855extern int hwpoison_filter(struct page *p);
 856
 857extern u32 hwpoison_filter_dev_major;
 858extern u32 hwpoison_filter_dev_minor;
 859extern u64 hwpoison_filter_flags_mask;
 860extern u64 hwpoison_filter_flags_value;
 861extern u64 hwpoison_filter_memcg;
 862extern u32 hwpoison_filter_enable;
 863
 864extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
 865        unsigned long, unsigned long,
 866        unsigned long, unsigned long);
 867
 868extern void set_pageblock_order(void);
 869unsigned long reclaim_pages(struct list_head *folio_list);
 870unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 871					    struct list_head *folio_list);
 872/* The ALLOC_WMARK bits are used as an index to zone->watermark */
 873#define ALLOC_WMARK_MIN		WMARK_MIN
 874#define ALLOC_WMARK_LOW		WMARK_LOW
 875#define ALLOC_WMARK_HIGH	WMARK_HIGH
 876#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
 877
 878/* Mask to get the watermark bits */
 879#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
 880
 881/*
 882 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
 883 * cannot assume a reduced access to memory reserves is sufficient for
 884 * !MMU
 885 */
 886#ifdef CONFIG_MMU
 887#define ALLOC_OOM		0x08
 888#else
 889#define ALLOC_OOM		ALLOC_NO_WATERMARKS
 890#endif
 891
 892#define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
 893				       * to 25% of the min watermark or
 894				       * 62.5% if __GFP_HIGH is set.
 895				       */
 896#define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
 897				       * of the min watermark.
 898				       */
 899#define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
 900#define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
 901#ifdef CONFIG_ZONE_DMA32
 902#define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
 903#else
 904#define ALLOC_NOFRAGMENT	  0x0
 905#endif
 906#define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
 907#define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
 908
 909/* Flags that allow allocations below the min watermark. */
 910#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
 911
 912enum ttu_flags;
 913struct tlbflush_unmap_batch;
 914
 915
 916/*
 917 * only for MM internal work items which do not depend on
 918 * any allocations or locks which might depend on allocations
 919 */
 920extern struct workqueue_struct *mm_percpu_wq;
 921
 922#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 923void try_to_unmap_flush(void);
 924void try_to_unmap_flush_dirty(void);
 925void flush_tlb_batched_pending(struct mm_struct *mm);
 926#else
 927static inline void try_to_unmap_flush(void)
 928{
 929}
 930static inline void try_to_unmap_flush_dirty(void)
 931{
 932}
 933static inline void flush_tlb_batched_pending(struct mm_struct *mm)
 934{
 935}
 936#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 937
 938extern const struct trace_print_flags pageflag_names[];
 939extern const struct trace_print_flags pagetype_names[];
 940extern const struct trace_print_flags vmaflag_names[];
 941extern const struct trace_print_flags gfpflag_names[];
 942
 943static inline bool is_migrate_highatomic(enum migratetype migratetype)
 944{
 945	return migratetype == MIGRATE_HIGHATOMIC;
 946}
 947
 948static inline bool is_migrate_highatomic_page(struct page *page)
 949{
 950	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
 951}
 952
 953void setup_zone_pageset(struct zone *zone);
 954
 955struct migration_target_control {
 956	int nid;		/* preferred node id */
 957	nodemask_t *nmask;
 958	gfp_t gfp_mask;
 959};
 960
 961/*
 962 * mm/filemap.c
 963 */
 964size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
 965			      struct folio *folio, loff_t fpos, size_t size);
 966
 967/*
 968 * mm/vmalloc.c
 969 */
 970#ifdef CONFIG_MMU
 971void __init vmalloc_init(void);
 972int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 973                pgprot_t prot, struct page **pages, unsigned int page_shift);
 974#else
 975static inline void vmalloc_init(void)
 976{
 977}
 978
 979static inline
 980int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 981                pgprot_t prot, struct page **pages, unsigned int page_shift)
 982{
 983	return -EINVAL;
 984}
 985#endif
 986
 987int __must_check __vmap_pages_range_noflush(unsigned long addr,
 988			       unsigned long end, pgprot_t prot,
 989			       struct page **pages, unsigned int page_shift);
 990
 991void vunmap_range_noflush(unsigned long start, unsigned long end);
 992
 993void __vunmap_range_noflush(unsigned long start, unsigned long end);
 994
 995int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
 996		      unsigned long addr, int page_nid, int *flags);
 997
 998void free_zone_device_page(struct page *page);
 999int migrate_device_coherent_page(struct page *page);
1000
1001/*
1002 * mm/gup.c
1003 */
1004struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
1005int __must_check try_grab_page(struct page *page, unsigned int flags);
1006
1007/*
1008 * mm/huge_memory.c
1009 */
1010struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1011				   unsigned long addr, pmd_t *pmd,
1012				   unsigned int flags);
1013
1014/*
1015 * mm/mmap.c
1016 */
1017struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1018					struct vm_area_struct *vma,
1019					unsigned long delta);
1020
1021enum {
1022	/* mark page accessed */
1023	FOLL_TOUCH = 1 << 16,
1024	/* a retry, previous pass started an IO */
1025	FOLL_TRIED = 1 << 17,
1026	/* we are working on non-current tsk/mm */
1027	FOLL_REMOTE = 1 << 18,
1028	/* pages must be released via unpin_user_page */
1029	FOLL_PIN = 1 << 19,
1030	/* gup_fast: prevent fall-back to slow gup */
1031	FOLL_FAST_ONLY = 1 << 20,
1032	/* allow unlocking the mmap lock */
1033	FOLL_UNLOCKABLE = 1 << 21,
1034};
1035
1036#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1037			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE)
1038
1039/*
1040 * Indicates for which pages that are write-protected in the page table,
1041 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1042 * GUP pin will remain consistent with the pages mapped into the page tables
1043 * of the MM.
1044 *
1045 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1046 * PageAnonExclusive() has to protect against concurrent GUP:
1047 * * Ordinary GUP: Using the PT lock
1048 * * GUP-fast and fork(): mm->write_protect_seq
1049 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1050 *    folio_try_share_anon_rmap_*()
1051 *
1052 * Must be called with the (sub)page that's actually referenced via the
1053 * page table entry, which might not necessarily be the head page for a
1054 * PTE-mapped THP.
1055 *
1056 * If the vma is NULL, we're coming from the GUP-fast path and might have
1057 * to fallback to the slow path just to lookup the vma.
1058 */
1059static inline bool gup_must_unshare(struct vm_area_struct *vma,
1060				    unsigned int flags, struct page *page)
1061{
1062	/*
1063	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1064	 * has to be writable -- and if it references (part of) an anonymous
1065	 * folio, that part is required to be marked exclusive.
1066	 */
1067	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1068		return false;
1069	/*
1070	 * Note: PageAnon(page) is stable until the page is actually getting
1071	 * freed.
1072	 */
1073	if (!PageAnon(page)) {
1074		/*
1075		 * We only care about R/O long-term pining: R/O short-term
1076		 * pinning does not have the semantics to observe successive
1077		 * changes through the process page tables.
1078		 */
1079		if (!(flags & FOLL_LONGTERM))
1080			return false;
1081
1082		/* We really need the vma ... */
1083		if (!vma)
1084			return true;
1085
1086		/*
1087		 * ... because we only care about writable private ("COW")
1088		 * mappings where we have to break COW early.
1089		 */
1090		return is_cow_mapping(vma->vm_flags);
1091	}
1092
1093	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1094	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
1095		smp_rmb();
1096
1097	/*
1098	 * During GUP-fast we might not get called on the head page for a
1099	 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
1100	 * not work with the abstracted hugetlb PTEs that always point at the
1101	 * head page. For hugetlb, PageAnonExclusive only applies on the head
1102	 * page (as it cannot be partially COW-shared), so lookup the head page.
1103	 */
1104	if (unlikely(!PageHead(page) && PageHuge(page)))
1105		page = compound_head(page);
1106
1107	/*
1108	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1109	 * cannot get pinned.
1110	 */
1111	return !PageAnonExclusive(page);
1112}
1113
1114extern bool mirrored_kernelcore;
1115extern bool memblock_has_mirror(void);
1116
1117static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1118{
1119	/*
1120	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1121	 * enablements, because when without soft-dirty being compiled in,
1122	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1123	 * will be constantly true.
1124	 */
1125	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1126		return false;
1127
1128	/*
1129	 * Soft-dirty is kind of special: its tracking is enabled when the
1130	 * vma flags not set.
1131	 */
1132	return !(vma->vm_flags & VM_SOFTDIRTY);
1133}
1134
1135static inline void vma_iter_config(struct vma_iterator *vmi,
1136		unsigned long index, unsigned long last)
1137{
1138	__mas_set_range(&vmi->mas, index, last - 1);
1139}
1140
1141/*
1142 * VMA Iterator functions shared between nommu and mmap
1143 */
1144static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1145		struct vm_area_struct *vma)
1146{
1147	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1148}
1149
1150static inline void vma_iter_clear(struct vma_iterator *vmi)
1151{
1152	mas_store_prealloc(&vmi->mas, NULL);
1153}
1154
1155static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1156{
1157	return mas_walk(&vmi->mas);
1158}
1159
1160/* Store a VMA with preallocated memory */
1161static inline void vma_iter_store(struct vma_iterator *vmi,
1162				  struct vm_area_struct *vma)
1163{
1164
1165#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1166	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1167			vmi->mas.index > vma->vm_start)) {
1168		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1169			vmi->mas.index, vma->vm_start, vma->vm_start,
1170			vma->vm_end, vmi->mas.index, vmi->mas.last);
1171	}
1172	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1173			vmi->mas.last <  vma->vm_start)) {
1174		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1175		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1176		       vmi->mas.index, vmi->mas.last);
1177	}
1178#endif
1179
1180	if (vmi->mas.status != ma_start &&
1181	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1182		vma_iter_invalidate(vmi);
1183
1184	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1185	mas_store_prealloc(&vmi->mas, vma);
1186}
1187
1188static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1189			struct vm_area_struct *vma, gfp_t gfp)
1190{
1191	if (vmi->mas.status != ma_start &&
1192	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1193		vma_iter_invalidate(vmi);
1194
1195	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1196	mas_store_gfp(&vmi->mas, vma, gfp);
1197	if (unlikely(mas_is_err(&vmi->mas)))
1198		return -ENOMEM;
1199
1200	return 0;
1201}
1202
1203/*
1204 * VMA lock generalization
1205 */
1206struct vma_prepare {
1207	struct vm_area_struct *vma;
1208	struct vm_area_struct *adj_next;
1209	struct file *file;
1210	struct address_space *mapping;
1211	struct anon_vma *anon_vma;
1212	struct vm_area_struct *insert;
1213	struct vm_area_struct *remove;
1214	struct vm_area_struct *remove2;
1215};
1216
1217void __meminit __init_single_page(struct page *page, unsigned long pfn,
1218				unsigned long zone, int nid);
1219
1220/* shrinker related functions */
1221unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1222			  int priority);
1223
1224#ifdef CONFIG_SHRINKER_DEBUG
1225static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1226			struct shrinker *shrinker, const char *fmt, va_list ap)
1227{
1228	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1229
1230	return shrinker->name ? 0 : -ENOMEM;
1231}
1232
1233static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1234{
1235	kfree_const(shrinker->name);
1236	shrinker->name = NULL;
1237}
1238
1239extern int shrinker_debugfs_add(struct shrinker *shrinker);
1240extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1241					      int *debugfs_id);
1242extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1243				    int debugfs_id);
1244#else /* CONFIG_SHRINKER_DEBUG */
1245static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1246{
1247	return 0;
1248}
1249static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1250					      const char *fmt, va_list ap)
1251{
1252	return 0;
1253}
1254static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1255{
1256}
1257static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1258						     int *debugfs_id)
1259{
1260	*debugfs_id = -1;
1261	return NULL;
1262}
1263static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1264					   int debugfs_id)
1265{
1266}
1267#endif /* CONFIG_SHRINKER_DEBUG */
1268
1269#endif	/* __MM_INTERNAL_H */