Loading...
1/* internal.h: mm/ internal definitions
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#ifndef __MM_INTERNAL_H
12#define __MM_INTERNAL_H
13
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/tracepoint-defs.h>
18
19/*
20 * The set of flags that only affect watermark checking and reclaim
21 * behaviour. This is used by the MM to obey the caller constraints
22 * about IO, FS and watermark checking while ignoring placement
23 * hints such as HIGHMEM usage.
24 */
25#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
26 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
27 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
28 __GFP_ATOMIC)
29
30/* The GFP flags allowed during early boot */
31#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
32
33/* Control allocation cpuset and node placement constraints */
34#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
35
36/* Do not use these with a slab allocator */
37#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
38
39void page_writeback_init(void);
40
41int do_swap_page(struct vm_fault *vmf);
42
43void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
44 unsigned long floor, unsigned long ceiling);
45
46static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
47{
48 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
49}
50
51void unmap_page_range(struct mmu_gather *tlb,
52 struct vm_area_struct *vma,
53 unsigned long addr, unsigned long end,
54 struct zap_details *details);
55
56extern int __do_page_cache_readahead(struct address_space *mapping,
57 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
58 unsigned long lookahead_size);
59
60/*
61 * Submit IO for the read-ahead request in file_ra_state.
62 */
63static inline unsigned long ra_submit(struct file_ra_state *ra,
64 struct address_space *mapping, struct file *filp)
65{
66 return __do_page_cache_readahead(mapping, filp,
67 ra->start, ra->size, ra->async_size);
68}
69
70/*
71 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
72 * a count of one.
73 */
74static inline void set_page_refcounted(struct page *page)
75{
76 VM_BUG_ON_PAGE(PageTail(page), page);
77 VM_BUG_ON_PAGE(page_ref_count(page), page);
78 set_page_count(page, 1);
79}
80
81extern unsigned long highest_memmap_pfn;
82
83/*
84 * Maximum number of reclaim retries without progress before the OOM
85 * killer is consider the only way forward.
86 */
87#define MAX_RECLAIM_RETRIES 16
88
89/*
90 * in mm/vmscan.c:
91 */
92extern int isolate_lru_page(struct page *page);
93extern void putback_lru_page(struct page *page);
94
95/*
96 * in mm/rmap.c:
97 */
98extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
99
100/*
101 * in mm/page_alloc.c
102 */
103
104/*
105 * Structure for holding the mostly immutable allocation parameters passed
106 * between functions involved in allocations, including the alloc_pages*
107 * family of functions.
108 *
109 * nodemask, migratetype and high_zoneidx are initialized only once in
110 * __alloc_pages_nodemask() and then never change.
111 *
112 * zonelist, preferred_zone and classzone_idx are set first in
113 * __alloc_pages_nodemask() for the fast path, and might be later changed
114 * in __alloc_pages_slowpath(). All other functions pass the whole strucure
115 * by a const pointer.
116 */
117struct alloc_context {
118 struct zonelist *zonelist;
119 nodemask_t *nodemask;
120 struct zoneref *preferred_zoneref;
121 int migratetype;
122 enum zone_type high_zoneidx;
123 bool spread_dirty_pages;
124};
125
126#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
127
128/*
129 * Locate the struct page for both the matching buddy in our
130 * pair (buddy1) and the combined O(n+1) page they form (page).
131 *
132 * 1) Any buddy B1 will have an order O twin B2 which satisfies
133 * the following equation:
134 * B2 = B1 ^ (1 << O)
135 * For example, if the starting buddy (buddy2) is #8 its order
136 * 1 buddy is #10:
137 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
138 *
139 * 2) Any buddy B will have an order O+1 parent P which
140 * satisfies the following equation:
141 * P = B & ~(1 << O)
142 *
143 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
144 */
145static inline unsigned long
146__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
147{
148 return page_pfn ^ (1 << order);
149}
150
151extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
152 unsigned long end_pfn, struct zone *zone);
153
154static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
155 unsigned long end_pfn, struct zone *zone)
156{
157 if (zone->contiguous)
158 return pfn_to_page(start_pfn);
159
160 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
161}
162
163extern int __isolate_free_page(struct page *page, unsigned int order);
164extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
165 unsigned int order);
166extern void prep_compound_page(struct page *page, unsigned int order);
167extern void post_alloc_hook(struct page *page, unsigned int order,
168 gfp_t gfp_flags);
169extern int user_min_free_kbytes;
170
171#if defined CONFIG_COMPACTION || defined CONFIG_CMA
172
173/*
174 * in mm/compaction.c
175 */
176/*
177 * compact_control is used to track pages being migrated and the free pages
178 * they are being migrated to during memory compaction. The free_pfn starts
179 * at the end of a zone and migrate_pfn begins at the start. Movable pages
180 * are moved to the end of a zone during a compaction run and the run
181 * completes when free_pfn <= migrate_pfn
182 */
183struct compact_control {
184 struct list_head freepages; /* List of free pages to migrate to */
185 struct list_head migratepages; /* List of pages being migrated */
186 struct zone *zone;
187 unsigned long nr_freepages; /* Number of isolated free pages */
188 unsigned long nr_migratepages; /* Number of pages to migrate */
189 unsigned long total_migrate_scanned;
190 unsigned long total_free_scanned;
191 unsigned long free_pfn; /* isolate_freepages search base */
192 unsigned long migrate_pfn; /* isolate_migratepages search base */
193 unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
194 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
195 int order; /* order a direct compactor needs */
196 int migratetype; /* migratetype of direct compactor */
197 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
198 const int classzone_idx; /* zone index of a direct compactor */
199 enum migrate_mode mode; /* Async or sync migration mode */
200 bool ignore_skip_hint; /* Scan blocks even if marked skip */
201 bool no_set_skip_hint; /* Don't mark blocks for skipping */
202 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
203 bool direct_compaction; /* False from kcompactd or /proc/... */
204 bool whole_zone; /* Whole zone should/has been scanned */
205 bool contended; /* Signal lock or sched contention */
206 bool finishing_block; /* Finishing current pageblock */
207};
208
209unsigned long
210isolate_freepages_range(struct compact_control *cc,
211 unsigned long start_pfn, unsigned long end_pfn);
212unsigned long
213isolate_migratepages_range(struct compact_control *cc,
214 unsigned long low_pfn, unsigned long end_pfn);
215int find_suitable_fallback(struct free_area *area, unsigned int order,
216 int migratetype, bool only_stealable, bool *can_steal);
217
218#endif
219
220/*
221 * This function returns the order of a free page in the buddy system. In
222 * general, page_zone(page)->lock must be held by the caller to prevent the
223 * page from being allocated in parallel and returning garbage as the order.
224 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
225 * page cannot be allocated or merged in parallel. Alternatively, it must
226 * handle invalid values gracefully, and use page_order_unsafe() below.
227 */
228static inline unsigned int page_order(struct page *page)
229{
230 /* PageBuddy() must be checked by the caller */
231 return page_private(page);
232}
233
234/*
235 * Like page_order(), but for callers who cannot afford to hold the zone lock.
236 * PageBuddy() should be checked first by the caller to minimize race window,
237 * and invalid values must be handled gracefully.
238 *
239 * READ_ONCE is used so that if the caller assigns the result into a local
240 * variable and e.g. tests it for valid range before using, the compiler cannot
241 * decide to remove the variable and inline the page_private(page) multiple
242 * times, potentially observing different values in the tests and the actual
243 * use of the result.
244 */
245#define page_order_unsafe(page) READ_ONCE(page_private(page))
246
247static inline bool is_cow_mapping(vm_flags_t flags)
248{
249 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
250}
251
252/*
253 * These three helpers classifies VMAs for virtual memory accounting.
254 */
255
256/*
257 * Executable code area - executable, not writable, not stack
258 */
259static inline bool is_exec_mapping(vm_flags_t flags)
260{
261 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
262}
263
264/*
265 * Stack area - atomatically grows in one direction
266 *
267 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
268 * do_mmap() forbids all other combinations.
269 */
270static inline bool is_stack_mapping(vm_flags_t flags)
271{
272 return (flags & VM_STACK) == VM_STACK;
273}
274
275/*
276 * Data area - private, writable, not stack
277 */
278static inline bool is_data_mapping(vm_flags_t flags)
279{
280 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
281}
282
283/* mm/util.c */
284void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
285 struct vm_area_struct *prev, struct rb_node *rb_parent);
286
287#ifdef CONFIG_MMU
288extern long populate_vma_page_range(struct vm_area_struct *vma,
289 unsigned long start, unsigned long end, int *nonblocking);
290extern void munlock_vma_pages_range(struct vm_area_struct *vma,
291 unsigned long start, unsigned long end);
292static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
293{
294 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
295}
296
297/*
298 * must be called with vma's mmap_sem held for read or write, and page locked.
299 */
300extern void mlock_vma_page(struct page *page);
301extern unsigned int munlock_vma_page(struct page *page);
302
303/*
304 * Clear the page's PageMlocked(). This can be useful in a situation where
305 * we want to unconditionally remove a page from the pagecache -- e.g.,
306 * on truncation or freeing.
307 *
308 * It is legal to call this function for any page, mlocked or not.
309 * If called for a page that is still mapped by mlocked vmas, all we do
310 * is revert to lazy LRU behaviour -- semantics are not broken.
311 */
312extern void clear_page_mlock(struct page *page);
313
314/*
315 * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
316 * (because that does not go through the full procedure of migration ptes):
317 * to migrate the Mlocked page flag; update statistics.
318 */
319static inline void mlock_migrate_page(struct page *newpage, struct page *page)
320{
321 if (TestClearPageMlocked(page)) {
322 int nr_pages = hpage_nr_pages(page);
323
324 /* Holding pmd lock, no change in irq context: __mod is safe */
325 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
326 SetPageMlocked(newpage);
327 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
328 }
329}
330
331extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
332
333/*
334 * At what user virtual address is page expected in @vma?
335 */
336static inline unsigned long
337__vma_address(struct page *page, struct vm_area_struct *vma)
338{
339 pgoff_t pgoff = page_to_pgoff(page);
340 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
341}
342
343static inline unsigned long
344vma_address(struct page *page, struct vm_area_struct *vma)
345{
346 unsigned long start, end;
347
348 start = __vma_address(page, vma);
349 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
350
351 /* page should be within @vma mapping range */
352 VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
353
354 return max(start, vma->vm_start);
355}
356
357#else /* !CONFIG_MMU */
358static inline void clear_page_mlock(struct page *page) { }
359static inline void mlock_vma_page(struct page *page) { }
360static inline void mlock_migrate_page(struct page *new, struct page *old) { }
361
362#endif /* !CONFIG_MMU */
363
364/*
365 * Return the mem_map entry representing the 'offset' subpage within
366 * the maximally aligned gigantic page 'base'. Handle any discontiguity
367 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
368 */
369static inline struct page *mem_map_offset(struct page *base, int offset)
370{
371 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
372 return nth_page(base, offset);
373 return base + offset;
374}
375
376/*
377 * Iterator over all subpages within the maximally aligned gigantic
378 * page 'base'. Handle any discontiguity in the mem_map.
379 */
380static inline struct page *mem_map_next(struct page *iter,
381 struct page *base, int offset)
382{
383 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
384 unsigned long pfn = page_to_pfn(base) + offset;
385 if (!pfn_valid(pfn))
386 return NULL;
387 return pfn_to_page(pfn);
388 }
389 return iter + 1;
390}
391
392/*
393 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
394 * so all functions starting at paging_init should be marked __init
395 * in those cases. SPARSEMEM, however, allows for memory hotplug,
396 * and alloc_bootmem_node is not used.
397 */
398#ifdef CONFIG_SPARSEMEM
399#define __paginginit __meminit
400#else
401#define __paginginit __init
402#endif
403
404/* Memory initialisation debug and verification */
405enum mminit_level {
406 MMINIT_WARNING,
407 MMINIT_VERIFY,
408 MMINIT_TRACE
409};
410
411#ifdef CONFIG_DEBUG_MEMORY_INIT
412
413extern int mminit_loglevel;
414
415#define mminit_dprintk(level, prefix, fmt, arg...) \
416do { \
417 if (level < mminit_loglevel) { \
418 if (level <= MMINIT_WARNING) \
419 pr_warn("mminit::" prefix " " fmt, ##arg); \
420 else \
421 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
422 } \
423} while (0)
424
425extern void mminit_verify_pageflags_layout(void);
426extern void mminit_verify_zonelist(void);
427#else
428
429static inline void mminit_dprintk(enum mminit_level level,
430 const char *prefix, const char *fmt, ...)
431{
432}
433
434static inline void mminit_verify_pageflags_layout(void)
435{
436}
437
438static inline void mminit_verify_zonelist(void)
439{
440}
441#endif /* CONFIG_DEBUG_MEMORY_INIT */
442
443/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
444#if defined(CONFIG_SPARSEMEM)
445extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
446 unsigned long *end_pfn);
447#else
448static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
449 unsigned long *end_pfn)
450{
451}
452#endif /* CONFIG_SPARSEMEM */
453
454#define NODE_RECLAIM_NOSCAN -2
455#define NODE_RECLAIM_FULL -1
456#define NODE_RECLAIM_SOME 0
457#define NODE_RECLAIM_SUCCESS 1
458
459extern int hwpoison_filter(struct page *p);
460
461extern u32 hwpoison_filter_dev_major;
462extern u32 hwpoison_filter_dev_minor;
463extern u64 hwpoison_filter_flags_mask;
464extern u64 hwpoison_filter_flags_value;
465extern u64 hwpoison_filter_memcg;
466extern u32 hwpoison_filter_enable;
467
468extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
469 unsigned long, unsigned long,
470 unsigned long, unsigned long);
471
472extern void set_pageblock_order(void);
473unsigned long reclaim_clean_pages_from_list(struct zone *zone,
474 struct list_head *page_list);
475/* The ALLOC_WMARK bits are used as an index to zone->watermark */
476#define ALLOC_WMARK_MIN WMARK_MIN
477#define ALLOC_WMARK_LOW WMARK_LOW
478#define ALLOC_WMARK_HIGH WMARK_HIGH
479#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
480
481/* Mask to get the watermark bits */
482#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
483
484/*
485 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
486 * cannot assume a reduced access to memory reserves is sufficient for
487 * !MMU
488 */
489#ifdef CONFIG_MMU
490#define ALLOC_OOM 0x08
491#else
492#define ALLOC_OOM ALLOC_NO_WATERMARKS
493#endif
494
495#define ALLOC_HARDER 0x10 /* try to alloc harder */
496#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
497#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
498#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
499
500enum ttu_flags;
501struct tlbflush_unmap_batch;
502
503
504/*
505 * only for MM internal work items which do not depend on
506 * any allocations or locks which might depend on allocations
507 */
508extern struct workqueue_struct *mm_percpu_wq;
509
510#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
511void try_to_unmap_flush(void);
512void try_to_unmap_flush_dirty(void);
513void flush_tlb_batched_pending(struct mm_struct *mm);
514#else
515static inline void try_to_unmap_flush(void)
516{
517}
518static inline void try_to_unmap_flush_dirty(void)
519{
520}
521static inline void flush_tlb_batched_pending(struct mm_struct *mm)
522{
523}
524#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
525
526extern const struct trace_print_flags pageflag_names[];
527extern const struct trace_print_flags vmaflag_names[];
528extern const struct trace_print_flags gfpflag_names[];
529
530static inline bool is_migrate_highatomic(enum migratetype migratetype)
531{
532 return migratetype == MIGRATE_HIGHATOMIC;
533}
534
535static inline bool is_migrate_highatomic_page(struct page *page)
536{
537 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
538}
539
540void setup_zone_pageset(struct zone *zone);
541extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
542#endif /* __MM_INTERNAL_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
14#include <linux/tracepoint-defs.h>
15
16struct folio_batch;
17
18/*
19 * The set of flags that only affect watermark checking and reclaim
20 * behaviour. This is used by the MM to obey the caller constraints
21 * about IO, FS and watermark checking while ignoring placement
22 * hints such as HIGHMEM usage.
23 */
24#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 __GFP_NOLOCKDEP)
28
29/* The GFP flags allowed during early boot */
30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31
32/* Control allocation cpuset and node placement constraints */
33#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34
35/* Do not use these with a slab allocator */
36#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37
38/*
39 * Different from WARN_ON_ONCE(), no warning will be issued
40 * when we specify __GFP_NOWARN.
41 */
42#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
43 static bool __section(".data.once") __warned; \
44 int __ret_warn_once = !!(cond); \
45 \
46 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
47 __warned = true; \
48 WARN_ON(1); \
49 } \
50 unlikely(__ret_warn_once); \
51})
52
53void page_writeback_init(void);
54
55/*
56 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
58 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
59 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60 */
61#define ENTIRELY_MAPPED 0x800000
62#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
63
64/*
65 * Flags passed to __show_mem() and show_free_areas() to suppress output in
66 * various contexts.
67 */
68#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
69
70/*
71 * How many individual pages have an elevated _mapcount. Excludes
72 * the folio's entire_mapcount.
73 */
74static inline int folio_nr_pages_mapped(struct folio *folio)
75{
76 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
77}
78
79static inline void *folio_raw_mapping(struct folio *folio)
80{
81 unsigned long mapping = (unsigned long)folio->mapping;
82
83 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
84}
85
86#ifdef CONFIG_MMU
87
88/* Flags for folio_pte_batch(). */
89typedef int __bitwise fpb_t;
90
91/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
92#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
93
94/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
95#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
96
97static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
98{
99 if (flags & FPB_IGNORE_DIRTY)
100 pte = pte_mkclean(pte);
101 if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
102 pte = pte_clear_soft_dirty(pte);
103 return pte_wrprotect(pte_mkold(pte));
104}
105
106/**
107 * folio_pte_batch - detect a PTE batch for a large folio
108 * @folio: The large folio to detect a PTE batch for.
109 * @addr: The user virtual address the first page is mapped at.
110 * @start_ptep: Page table pointer for the first entry.
111 * @pte: Page table entry for the first page.
112 * @max_nr: The maximum number of table entries to consider.
113 * @flags: Flags to modify the PTE batch semantics.
114 * @any_writable: Optional pointer to indicate whether any entry except the
115 * first one is writable.
116 *
117 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
118 * pages of the same large folio.
119 *
120 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
121 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
122 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
123 *
124 * start_ptep must map any page of the folio. max_nr must be at least one and
125 * must be limited by the caller so scanning cannot exceed a single page table.
126 *
127 * Return: the number of table entries in the batch.
128 */
129static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
130 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
131 bool *any_writable)
132{
133 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
134 const pte_t *end_ptep = start_ptep + max_nr;
135 pte_t expected_pte, *ptep;
136 bool writable;
137 int nr;
138
139 if (any_writable)
140 *any_writable = false;
141
142 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
143 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
144 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
145
146 nr = pte_batch_hint(start_ptep, pte);
147 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
148 ptep = start_ptep + nr;
149
150 while (ptep < end_ptep) {
151 pte = ptep_get(ptep);
152 if (any_writable)
153 writable = !!pte_write(pte);
154 pte = __pte_batch_clear_ignored(pte, flags);
155
156 if (!pte_same(pte, expected_pte))
157 break;
158
159 /*
160 * Stop immediately once we reached the end of the folio. In
161 * corner cases the next PFN might fall into a different
162 * folio.
163 */
164 if (pte_pfn(pte) >= folio_end_pfn)
165 break;
166
167 if (any_writable)
168 *any_writable |= writable;
169
170 nr = pte_batch_hint(ptep, pte);
171 expected_pte = pte_advance_pfn(expected_pte, nr);
172 ptep += nr;
173 }
174
175 return min(ptep - start_ptep, max_nr);
176}
177#endif /* CONFIG_MMU */
178
179void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
180 int nr_throttled);
181static inline void acct_reclaim_writeback(struct folio *folio)
182{
183 pg_data_t *pgdat = folio_pgdat(folio);
184 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
185
186 if (nr_throttled)
187 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
188}
189
190static inline void wake_throttle_isolated(pg_data_t *pgdat)
191{
192 wait_queue_head_t *wqh;
193
194 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
195 if (waitqueue_active(wqh))
196 wake_up(wqh);
197}
198
199vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
200vm_fault_t do_swap_page(struct vm_fault *vmf);
201void folio_rotate_reclaimable(struct folio *folio);
202bool __folio_end_writeback(struct folio *folio);
203void deactivate_file_folio(struct folio *folio);
204void folio_activate(struct folio *folio);
205
206void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
207 struct vm_area_struct *start_vma, unsigned long floor,
208 unsigned long ceiling, bool mm_wr_locked);
209void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
210
211struct zap_details;
212void unmap_page_range(struct mmu_gather *tlb,
213 struct vm_area_struct *vma,
214 unsigned long addr, unsigned long end,
215 struct zap_details *details);
216
217void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
218 unsigned int order);
219void force_page_cache_ra(struct readahead_control *, unsigned long nr);
220static inline void force_page_cache_readahead(struct address_space *mapping,
221 struct file *file, pgoff_t index, unsigned long nr_to_read)
222{
223 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
224 force_page_cache_ra(&ractl, nr_to_read);
225}
226
227unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
228 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
229unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
230 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
231void filemap_free_folio(struct address_space *mapping, struct folio *folio);
232int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
233bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
234 loff_t end);
235long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
236unsigned long mapping_try_invalidate(struct address_space *mapping,
237 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
238
239/**
240 * folio_evictable - Test whether a folio is evictable.
241 * @folio: The folio to test.
242 *
243 * Test whether @folio is evictable -- i.e., should be placed on
244 * active/inactive lists vs unevictable list.
245 *
246 * Reasons folio might not be evictable:
247 * 1. folio's mapping marked unevictable
248 * 2. One of the pages in the folio is part of an mlocked VMA
249 */
250static inline bool folio_evictable(struct folio *folio)
251{
252 bool ret;
253
254 /* Prevent address_space of inode and swap cache from being freed */
255 rcu_read_lock();
256 ret = !mapping_unevictable(folio_mapping(folio)) &&
257 !folio_test_mlocked(folio);
258 rcu_read_unlock();
259 return ret;
260}
261
262/*
263 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
264 * a count of one.
265 */
266static inline void set_page_refcounted(struct page *page)
267{
268 VM_BUG_ON_PAGE(PageTail(page), page);
269 VM_BUG_ON_PAGE(page_ref_count(page), page);
270 set_page_count(page, 1);
271}
272
273/*
274 * Return true if a folio needs ->release_folio() calling upon it.
275 */
276static inline bool folio_needs_release(struct folio *folio)
277{
278 struct address_space *mapping = folio_mapping(folio);
279
280 return folio_has_private(folio) ||
281 (mapping && mapping_release_always(mapping));
282}
283
284extern unsigned long highest_memmap_pfn;
285
286/*
287 * Maximum number of reclaim retries without progress before the OOM
288 * killer is consider the only way forward.
289 */
290#define MAX_RECLAIM_RETRIES 16
291
292/*
293 * in mm/vmscan.c:
294 */
295bool isolate_lru_page(struct page *page);
296bool folio_isolate_lru(struct folio *folio);
297void putback_lru_page(struct page *page);
298void folio_putback_lru(struct folio *folio);
299extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
300
301/*
302 * in mm/rmap.c:
303 */
304pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
305
306/*
307 * in mm/page_alloc.c
308 */
309#define K(x) ((x) << (PAGE_SHIFT-10))
310
311extern char * const zone_names[MAX_NR_ZONES];
312
313/* perform sanity checks on struct pages being allocated or freed */
314DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
315
316extern int min_free_kbytes;
317
318void setup_per_zone_wmarks(void);
319void calculate_min_free_kbytes(void);
320int __meminit init_per_zone_wmark_min(void);
321void page_alloc_sysctl_init(void);
322
323/*
324 * Structure for holding the mostly immutable allocation parameters passed
325 * between functions involved in allocations, including the alloc_pages*
326 * family of functions.
327 *
328 * nodemask, migratetype and highest_zoneidx are initialized only once in
329 * __alloc_pages() and then never change.
330 *
331 * zonelist, preferred_zone and highest_zoneidx are set first in
332 * __alloc_pages() for the fast path, and might be later changed
333 * in __alloc_pages_slowpath(). All other functions pass the whole structure
334 * by a const pointer.
335 */
336struct alloc_context {
337 struct zonelist *zonelist;
338 nodemask_t *nodemask;
339 struct zoneref *preferred_zoneref;
340 int migratetype;
341
342 /*
343 * highest_zoneidx represents highest usable zone index of
344 * the allocation request. Due to the nature of the zone,
345 * memory on lower zone than the highest_zoneidx will be
346 * protected by lowmem_reserve[highest_zoneidx].
347 *
348 * highest_zoneidx is also used by reclaim/compaction to limit
349 * the target zone since higher zone than this index cannot be
350 * usable for this allocation request.
351 */
352 enum zone_type highest_zoneidx;
353 bool spread_dirty_pages;
354};
355
356/*
357 * This function returns the order of a free page in the buddy system. In
358 * general, page_zone(page)->lock must be held by the caller to prevent the
359 * page from being allocated in parallel and returning garbage as the order.
360 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
361 * page cannot be allocated or merged in parallel. Alternatively, it must
362 * handle invalid values gracefully, and use buddy_order_unsafe() below.
363 */
364static inline unsigned int buddy_order(struct page *page)
365{
366 /* PageBuddy() must be checked by the caller */
367 return page_private(page);
368}
369
370/*
371 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
372 * PageBuddy() should be checked first by the caller to minimize race window,
373 * and invalid values must be handled gracefully.
374 *
375 * READ_ONCE is used so that if the caller assigns the result into a local
376 * variable and e.g. tests it for valid range before using, the compiler cannot
377 * decide to remove the variable and inline the page_private(page) multiple
378 * times, potentially observing different values in the tests and the actual
379 * use of the result.
380 */
381#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
382
383/*
384 * This function checks whether a page is free && is the buddy
385 * we can coalesce a page and its buddy if
386 * (a) the buddy is not in a hole (check before calling!) &&
387 * (b) the buddy is in the buddy system &&
388 * (c) a page and its buddy have the same order &&
389 * (d) a page and its buddy are in the same zone.
390 *
391 * For recording whether a page is in the buddy system, we set PageBuddy.
392 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
393 *
394 * For recording page's order, we use page_private(page).
395 */
396static inline bool page_is_buddy(struct page *page, struct page *buddy,
397 unsigned int order)
398{
399 if (!page_is_guard(buddy) && !PageBuddy(buddy))
400 return false;
401
402 if (buddy_order(buddy) != order)
403 return false;
404
405 /*
406 * zone check is done late to avoid uselessly calculating
407 * zone/node ids for pages that could never merge.
408 */
409 if (page_zone_id(page) != page_zone_id(buddy))
410 return false;
411
412 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
413
414 return true;
415}
416
417/*
418 * Locate the struct page for both the matching buddy in our
419 * pair (buddy1) and the combined O(n+1) page they form (page).
420 *
421 * 1) Any buddy B1 will have an order O twin B2 which satisfies
422 * the following equation:
423 * B2 = B1 ^ (1 << O)
424 * For example, if the starting buddy (buddy2) is #8 its order
425 * 1 buddy is #10:
426 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
427 *
428 * 2) Any buddy B will have an order O+1 parent P which
429 * satisfies the following equation:
430 * P = B & ~(1 << O)
431 *
432 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
433 */
434static inline unsigned long
435__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
436{
437 return page_pfn ^ (1 << order);
438}
439
440/*
441 * Find the buddy of @page and validate it.
442 * @page: The input page
443 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
444 * function is used in the performance-critical __free_one_page().
445 * @order: The order of the page
446 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
447 * page_to_pfn().
448 *
449 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
450 * not the same as @page. The validation is necessary before use it.
451 *
452 * Return: the found buddy page or NULL if not found.
453 */
454static inline struct page *find_buddy_page_pfn(struct page *page,
455 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
456{
457 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
458 struct page *buddy;
459
460 buddy = page + (__buddy_pfn - pfn);
461 if (buddy_pfn)
462 *buddy_pfn = __buddy_pfn;
463
464 if (page_is_buddy(page, buddy, order))
465 return buddy;
466 return NULL;
467}
468
469extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
470 unsigned long end_pfn, struct zone *zone);
471
472static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
473 unsigned long end_pfn, struct zone *zone)
474{
475 if (zone->contiguous)
476 return pfn_to_page(start_pfn);
477
478 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
479}
480
481void set_zone_contiguous(struct zone *zone);
482
483static inline void clear_zone_contiguous(struct zone *zone)
484{
485 zone->contiguous = false;
486}
487
488extern int __isolate_free_page(struct page *page, unsigned int order);
489extern void __putback_isolated_page(struct page *page, unsigned int order,
490 int mt);
491extern void memblock_free_pages(struct page *page, unsigned long pfn,
492 unsigned int order);
493extern void __free_pages_core(struct page *page, unsigned int order);
494
495/*
496 * This will have no effect, other than possibly generating a warning, if the
497 * caller passes in a non-large folio.
498 */
499static inline void folio_set_order(struct folio *folio, unsigned int order)
500{
501 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
502 return;
503
504 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
505#ifdef CONFIG_64BIT
506 folio->_folio_nr_pages = 1U << order;
507#endif
508}
509
510void folio_undo_large_rmappable(struct folio *folio);
511
512static inline struct folio *page_rmappable_folio(struct page *page)
513{
514 struct folio *folio = (struct folio *)page;
515
516 folio_prep_large_rmappable(folio);
517 return folio;
518}
519
520static inline void prep_compound_head(struct page *page, unsigned int order)
521{
522 struct folio *folio = (struct folio *)page;
523
524 folio_set_order(folio, order);
525 atomic_set(&folio->_entire_mapcount, -1);
526 atomic_set(&folio->_nr_pages_mapped, 0);
527 atomic_set(&folio->_pincount, 0);
528}
529
530static inline void prep_compound_tail(struct page *head, int tail_idx)
531{
532 struct page *p = head + tail_idx;
533
534 p->mapping = TAIL_MAPPING;
535 set_compound_head(p, head);
536 set_page_private(p, 0);
537}
538
539extern void prep_compound_page(struct page *page, unsigned int order);
540
541extern void post_alloc_hook(struct page *page, unsigned int order,
542 gfp_t gfp_flags);
543extern bool free_pages_prepare(struct page *page, unsigned int order);
544
545extern int user_min_free_kbytes;
546
547void free_unref_page(struct page *page, unsigned int order);
548void free_unref_folios(struct folio_batch *fbatch);
549
550extern void zone_pcp_reset(struct zone *zone);
551extern void zone_pcp_disable(struct zone *zone);
552extern void zone_pcp_enable(struct zone *zone);
553extern void zone_pcp_init(struct zone *zone);
554
555extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
556 phys_addr_t min_addr,
557 int nid, bool exact_nid);
558
559void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
560 unsigned long, enum meminit_context, struct vmem_altmap *, int);
561
562
563int split_free_page(struct page *free_page,
564 unsigned int order, unsigned long split_pfn_offset);
565
566#if defined CONFIG_COMPACTION || defined CONFIG_CMA
567
568/*
569 * in mm/compaction.c
570 */
571/*
572 * compact_control is used to track pages being migrated and the free pages
573 * they are being migrated to during memory compaction. The free_pfn starts
574 * at the end of a zone and migrate_pfn begins at the start. Movable pages
575 * are moved to the end of a zone during a compaction run and the run
576 * completes when free_pfn <= migrate_pfn
577 */
578struct compact_control {
579 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
580 struct list_head migratepages; /* List of pages being migrated */
581 unsigned int nr_freepages; /* Number of isolated free pages */
582 unsigned int nr_migratepages; /* Number of pages to migrate */
583 unsigned long free_pfn; /* isolate_freepages search base */
584 /*
585 * Acts as an in/out parameter to page isolation for migration.
586 * isolate_migratepages uses it as a search base.
587 * isolate_migratepages_block will update the value to the next pfn
588 * after the last isolated one.
589 */
590 unsigned long migrate_pfn;
591 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
592 struct zone *zone;
593 unsigned long total_migrate_scanned;
594 unsigned long total_free_scanned;
595 unsigned short fast_search_fail;/* failures to use free list searches */
596 short search_order; /* order to start a fast search at */
597 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
598 int order; /* order a direct compactor needs */
599 int migratetype; /* migratetype of direct compactor */
600 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
601 const int highest_zoneidx; /* zone index of a direct compactor */
602 enum migrate_mode mode; /* Async or sync migration mode */
603 bool ignore_skip_hint; /* Scan blocks even if marked skip */
604 bool no_set_skip_hint; /* Don't mark blocks for skipping */
605 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
606 bool direct_compaction; /* False from kcompactd or /proc/... */
607 bool proactive_compaction; /* kcompactd proactive compaction */
608 bool whole_zone; /* Whole zone should/has been scanned */
609 bool contended; /* Signal lock contention */
610 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
611 * when there are potentially transient
612 * isolation or migration failures to
613 * ensure forward progress.
614 */
615 bool alloc_contig; /* alloc_contig_range allocation */
616};
617
618/*
619 * Used in direct compaction when a page should be taken from the freelists
620 * immediately when one is created during the free path.
621 */
622struct capture_control {
623 struct compact_control *cc;
624 struct page *page;
625};
626
627unsigned long
628isolate_freepages_range(struct compact_control *cc,
629 unsigned long start_pfn, unsigned long end_pfn);
630int
631isolate_migratepages_range(struct compact_control *cc,
632 unsigned long low_pfn, unsigned long end_pfn);
633
634int __alloc_contig_migrate_range(struct compact_control *cc,
635 unsigned long start, unsigned long end,
636 int migratetype);
637
638/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
639void init_cma_reserved_pageblock(struct page *page);
640
641#endif /* CONFIG_COMPACTION || CONFIG_CMA */
642
643int find_suitable_fallback(struct free_area *area, unsigned int order,
644 int migratetype, bool only_stealable, bool *can_steal);
645
646static inline bool free_area_empty(struct free_area *area, int migratetype)
647{
648 return list_empty(&area->free_list[migratetype]);
649}
650
651/*
652 * These three helpers classifies VMAs for virtual memory accounting.
653 */
654
655/*
656 * Executable code area - executable, not writable, not stack
657 */
658static inline bool is_exec_mapping(vm_flags_t flags)
659{
660 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
661}
662
663/*
664 * Stack area (including shadow stacks)
665 *
666 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
667 * do_mmap() forbids all other combinations.
668 */
669static inline bool is_stack_mapping(vm_flags_t flags)
670{
671 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
672}
673
674/*
675 * Data area - private, writable, not stack
676 */
677static inline bool is_data_mapping(vm_flags_t flags)
678{
679 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
680}
681
682/* mm/util.c */
683struct anon_vma *folio_anon_vma(struct folio *folio);
684
685#ifdef CONFIG_MMU
686void unmap_mapping_folio(struct folio *folio);
687extern long populate_vma_page_range(struct vm_area_struct *vma,
688 unsigned long start, unsigned long end, int *locked);
689extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
690 unsigned long end, bool write, int *locked);
691extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
692 unsigned long bytes);
693
694/*
695 * NOTE: This function can't tell whether the folio is "fully mapped" in the
696 * range.
697 * "fully mapped" means all the pages of folio is associated with the page
698 * table of range while this function just check whether the folio range is
699 * within the range [start, end). Function caller needs to do page table
700 * check if it cares about the page table association.
701 *
702 * Typical usage (like mlock or madvise) is:
703 * Caller knows at least 1 page of folio is associated with page table of VMA
704 * and the range [start, end) is intersect with the VMA range. Caller wants
705 * to know whether the folio is fully associated with the range. It calls
706 * this function to check whether the folio is in the range first. Then checks
707 * the page table to know whether the folio is fully mapped to the range.
708 */
709static inline bool
710folio_within_range(struct folio *folio, struct vm_area_struct *vma,
711 unsigned long start, unsigned long end)
712{
713 pgoff_t pgoff, addr;
714 unsigned long vma_pglen = vma_pages(vma);
715
716 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
717 if (start > end)
718 return false;
719
720 if (start < vma->vm_start)
721 start = vma->vm_start;
722
723 if (end > vma->vm_end)
724 end = vma->vm_end;
725
726 pgoff = folio_pgoff(folio);
727
728 /* if folio start address is not in vma range */
729 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
730 return false;
731
732 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
733
734 return !(addr < start || end - addr < folio_size(folio));
735}
736
737static inline bool
738folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
739{
740 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
741}
742
743/*
744 * mlock_vma_folio() and munlock_vma_folio():
745 * should be called with vma's mmap_lock held for read or write,
746 * under page table lock for the pte/pmd being added or removed.
747 *
748 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
749 * the end of folio_remove_rmap_*(); but new anon folios are managed by
750 * folio_add_lru_vma() calling mlock_new_folio().
751 */
752void mlock_folio(struct folio *folio);
753static inline void mlock_vma_folio(struct folio *folio,
754 struct vm_area_struct *vma)
755{
756 /*
757 * The VM_SPECIAL check here serves two purposes.
758 * 1) VM_IO check prevents migration from double-counting during mlock.
759 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
760 * is never left set on a VM_SPECIAL vma, there is an interval while
761 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
762 * still be set while VM_SPECIAL bits are added: so ignore it then.
763 */
764 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
765 mlock_folio(folio);
766}
767
768void munlock_folio(struct folio *folio);
769static inline void munlock_vma_folio(struct folio *folio,
770 struct vm_area_struct *vma)
771{
772 /*
773 * munlock if the function is called. Ideally, we should only
774 * do munlock if any page of folio is unmapped from VMA and
775 * cause folio not fully mapped to VMA.
776 *
777 * But it's not easy to confirm that's the situation. So we
778 * always munlock the folio and page reclaim will correct it
779 * if it's wrong.
780 */
781 if (unlikely(vma->vm_flags & VM_LOCKED))
782 munlock_folio(folio);
783}
784
785void mlock_new_folio(struct folio *folio);
786bool need_mlock_drain(int cpu);
787void mlock_drain_local(void);
788void mlock_drain_remote(int cpu);
789
790extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
791
792/*
793 * Return the start of user virtual address at the specific offset within
794 * a vma.
795 */
796static inline unsigned long
797vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
798 struct vm_area_struct *vma)
799{
800 unsigned long address;
801
802 if (pgoff >= vma->vm_pgoff) {
803 address = vma->vm_start +
804 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
805 /* Check for address beyond vma (or wrapped through 0?) */
806 if (address < vma->vm_start || address >= vma->vm_end)
807 address = -EFAULT;
808 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
809 /* Test above avoids possibility of wrap to 0 on 32-bit */
810 address = vma->vm_start;
811 } else {
812 address = -EFAULT;
813 }
814 return address;
815}
816
817/*
818 * Return the start of user virtual address of a page within a vma.
819 * Returns -EFAULT if all of the page is outside the range of vma.
820 * If page is a compound head, the entire compound page is considered.
821 */
822static inline unsigned long
823vma_address(struct page *page, struct vm_area_struct *vma)
824{
825 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
826 return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
827}
828
829/*
830 * Then at what user virtual address will none of the range be found in vma?
831 * Assumes that vma_address() already returned a good starting address.
832 */
833static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
834{
835 struct vm_area_struct *vma = pvmw->vma;
836 pgoff_t pgoff;
837 unsigned long address;
838
839 /* Common case, plus ->pgoff is invalid for KSM */
840 if (pvmw->nr_pages == 1)
841 return pvmw->address + PAGE_SIZE;
842
843 pgoff = pvmw->pgoff + pvmw->nr_pages;
844 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
845 /* Check for address beyond vma (or wrapped through 0?) */
846 if (address < vma->vm_start || address > vma->vm_end)
847 address = vma->vm_end;
848 return address;
849}
850
851static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
852 struct file *fpin)
853{
854 int flags = vmf->flags;
855
856 if (fpin)
857 return fpin;
858
859 /*
860 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
861 * anything, so we only pin the file and drop the mmap_lock if only
862 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
863 */
864 if (fault_flag_allow_retry_first(flags) &&
865 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
866 fpin = get_file(vmf->vma->vm_file);
867 release_fault_lock(vmf);
868 }
869 return fpin;
870}
871#else /* !CONFIG_MMU */
872static inline void unmap_mapping_folio(struct folio *folio) { }
873static inline void mlock_new_folio(struct folio *folio) { }
874static inline bool need_mlock_drain(int cpu) { return false; }
875static inline void mlock_drain_local(void) { }
876static inline void mlock_drain_remote(int cpu) { }
877static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
878{
879}
880#endif /* !CONFIG_MMU */
881
882/* Memory initialisation debug and verification */
883#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
884DECLARE_STATIC_KEY_TRUE(deferred_pages);
885
886bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
887#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
888
889enum mminit_level {
890 MMINIT_WARNING,
891 MMINIT_VERIFY,
892 MMINIT_TRACE
893};
894
895#ifdef CONFIG_DEBUG_MEMORY_INIT
896
897extern int mminit_loglevel;
898
899#define mminit_dprintk(level, prefix, fmt, arg...) \
900do { \
901 if (level < mminit_loglevel) { \
902 if (level <= MMINIT_WARNING) \
903 pr_warn("mminit::" prefix " " fmt, ##arg); \
904 else \
905 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
906 } \
907} while (0)
908
909extern void mminit_verify_pageflags_layout(void);
910extern void mminit_verify_zonelist(void);
911#else
912
913static inline void mminit_dprintk(enum mminit_level level,
914 const char *prefix, const char *fmt, ...)
915{
916}
917
918static inline void mminit_verify_pageflags_layout(void)
919{
920}
921
922static inline void mminit_verify_zonelist(void)
923{
924}
925#endif /* CONFIG_DEBUG_MEMORY_INIT */
926
927#define NODE_RECLAIM_NOSCAN -2
928#define NODE_RECLAIM_FULL -1
929#define NODE_RECLAIM_SOME 0
930#define NODE_RECLAIM_SUCCESS 1
931
932#ifdef CONFIG_NUMA
933extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
934extern int find_next_best_node(int node, nodemask_t *used_node_mask);
935#else
936static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
937 unsigned int order)
938{
939 return NODE_RECLAIM_NOSCAN;
940}
941static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
942{
943 return NUMA_NO_NODE;
944}
945#endif
946
947/*
948 * mm/memory-failure.c
949 */
950extern int hwpoison_filter(struct page *p);
951
952extern u32 hwpoison_filter_dev_major;
953extern u32 hwpoison_filter_dev_minor;
954extern u64 hwpoison_filter_flags_mask;
955extern u64 hwpoison_filter_flags_value;
956extern u64 hwpoison_filter_memcg;
957extern u32 hwpoison_filter_enable;
958
959extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
960 unsigned long, unsigned long,
961 unsigned long, unsigned long);
962
963extern void set_pageblock_order(void);
964unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references);
965unsigned int reclaim_clean_pages_from_list(struct zone *zone,
966 struct list_head *folio_list);
967/* The ALLOC_WMARK bits are used as an index to zone->watermark */
968#define ALLOC_WMARK_MIN WMARK_MIN
969#define ALLOC_WMARK_LOW WMARK_LOW
970#define ALLOC_WMARK_HIGH WMARK_HIGH
971#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
972
973/* Mask to get the watermark bits */
974#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
975
976/*
977 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
978 * cannot assume a reduced access to memory reserves is sufficient for
979 * !MMU
980 */
981#ifdef CONFIG_MMU
982#define ALLOC_OOM 0x08
983#else
984#define ALLOC_OOM ALLOC_NO_WATERMARKS
985#endif
986
987#define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
988 * to 25% of the min watermark or
989 * 62.5% if __GFP_HIGH is set.
990 */
991#define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
992 * of the min watermark.
993 */
994#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
995#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
996#ifdef CONFIG_ZONE_DMA32
997#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
998#else
999#define ALLOC_NOFRAGMENT 0x0
1000#endif
1001#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1002#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1003
1004/* Flags that allow allocations below the min watermark. */
1005#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1006
1007enum ttu_flags;
1008struct tlbflush_unmap_batch;
1009
1010
1011/*
1012 * only for MM internal work items which do not depend on
1013 * any allocations or locks which might depend on allocations
1014 */
1015extern struct workqueue_struct *mm_percpu_wq;
1016
1017#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1018void try_to_unmap_flush(void);
1019void try_to_unmap_flush_dirty(void);
1020void flush_tlb_batched_pending(struct mm_struct *mm);
1021#else
1022static inline void try_to_unmap_flush(void)
1023{
1024}
1025static inline void try_to_unmap_flush_dirty(void)
1026{
1027}
1028static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1029{
1030}
1031#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1032
1033extern const struct trace_print_flags pageflag_names[];
1034extern const struct trace_print_flags pagetype_names[];
1035extern const struct trace_print_flags vmaflag_names[];
1036extern const struct trace_print_flags gfpflag_names[];
1037
1038static inline bool is_migrate_highatomic(enum migratetype migratetype)
1039{
1040 return migratetype == MIGRATE_HIGHATOMIC;
1041}
1042
1043static inline bool is_migrate_highatomic_page(struct page *page)
1044{
1045 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
1046}
1047
1048void setup_zone_pageset(struct zone *zone);
1049
1050struct migration_target_control {
1051 int nid; /* preferred node id */
1052 nodemask_t *nmask;
1053 gfp_t gfp_mask;
1054};
1055
1056/*
1057 * mm/filemap.c
1058 */
1059size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1060 struct folio *folio, loff_t fpos, size_t size);
1061
1062/*
1063 * mm/vmalloc.c
1064 */
1065#ifdef CONFIG_MMU
1066void __init vmalloc_init(void);
1067int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1068 pgprot_t prot, struct page **pages, unsigned int page_shift);
1069#else
1070static inline void vmalloc_init(void)
1071{
1072}
1073
1074static inline
1075int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1076 pgprot_t prot, struct page **pages, unsigned int page_shift)
1077{
1078 return -EINVAL;
1079}
1080#endif
1081
1082int __must_check __vmap_pages_range_noflush(unsigned long addr,
1083 unsigned long end, pgprot_t prot,
1084 struct page **pages, unsigned int page_shift);
1085
1086void vunmap_range_noflush(unsigned long start, unsigned long end);
1087
1088void __vunmap_range_noflush(unsigned long start, unsigned long end);
1089
1090int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
1091 unsigned long addr, int page_nid, int *flags);
1092
1093void free_zone_device_page(struct page *page);
1094int migrate_device_coherent_page(struct page *page);
1095
1096/*
1097 * mm/gup.c
1098 */
1099struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
1100int __must_check try_grab_page(struct page *page, unsigned int flags);
1101
1102/*
1103 * mm/huge_memory.c
1104 */
1105struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1106 unsigned long addr, pmd_t *pmd,
1107 unsigned int flags);
1108
1109/*
1110 * mm/mmap.c
1111 */
1112struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1113 struct vm_area_struct *vma,
1114 unsigned long delta);
1115
1116enum {
1117 /* mark page accessed */
1118 FOLL_TOUCH = 1 << 16,
1119 /* a retry, previous pass started an IO */
1120 FOLL_TRIED = 1 << 17,
1121 /* we are working on non-current tsk/mm */
1122 FOLL_REMOTE = 1 << 18,
1123 /* pages must be released via unpin_user_page */
1124 FOLL_PIN = 1 << 19,
1125 /* gup_fast: prevent fall-back to slow gup */
1126 FOLL_FAST_ONLY = 1 << 20,
1127 /* allow unlocking the mmap lock */
1128 FOLL_UNLOCKABLE = 1 << 21,
1129 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1130 FOLL_MADV_POPULATE = 1 << 22,
1131};
1132
1133#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1134 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1135 FOLL_MADV_POPULATE)
1136
1137/*
1138 * Indicates for which pages that are write-protected in the page table,
1139 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1140 * GUP pin will remain consistent with the pages mapped into the page tables
1141 * of the MM.
1142 *
1143 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1144 * PageAnonExclusive() has to protect against concurrent GUP:
1145 * * Ordinary GUP: Using the PT lock
1146 * * GUP-fast and fork(): mm->write_protect_seq
1147 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1148 * folio_try_share_anon_rmap_*()
1149 *
1150 * Must be called with the (sub)page that's actually referenced via the
1151 * page table entry, which might not necessarily be the head page for a
1152 * PTE-mapped THP.
1153 *
1154 * If the vma is NULL, we're coming from the GUP-fast path and might have
1155 * to fallback to the slow path just to lookup the vma.
1156 */
1157static inline bool gup_must_unshare(struct vm_area_struct *vma,
1158 unsigned int flags, struct page *page)
1159{
1160 /*
1161 * FOLL_WRITE is implicitly handled correctly as the page table entry
1162 * has to be writable -- and if it references (part of) an anonymous
1163 * folio, that part is required to be marked exclusive.
1164 */
1165 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1166 return false;
1167 /*
1168 * Note: PageAnon(page) is stable until the page is actually getting
1169 * freed.
1170 */
1171 if (!PageAnon(page)) {
1172 /*
1173 * We only care about R/O long-term pining: R/O short-term
1174 * pinning does not have the semantics to observe successive
1175 * changes through the process page tables.
1176 */
1177 if (!(flags & FOLL_LONGTERM))
1178 return false;
1179
1180 /* We really need the vma ... */
1181 if (!vma)
1182 return true;
1183
1184 /*
1185 * ... because we only care about writable private ("COW")
1186 * mappings where we have to break COW early.
1187 */
1188 return is_cow_mapping(vma->vm_flags);
1189 }
1190
1191 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1192 if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
1193 smp_rmb();
1194
1195 /*
1196 * During GUP-fast we might not get called on the head page for a
1197 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
1198 * not work with the abstracted hugetlb PTEs that always point at the
1199 * head page. For hugetlb, PageAnonExclusive only applies on the head
1200 * page (as it cannot be partially COW-shared), so lookup the head page.
1201 */
1202 if (unlikely(!PageHead(page) && PageHuge(page)))
1203 page = compound_head(page);
1204
1205 /*
1206 * Note that PageKsm() pages cannot be exclusive, and consequently,
1207 * cannot get pinned.
1208 */
1209 return !PageAnonExclusive(page);
1210}
1211
1212extern bool mirrored_kernelcore;
1213extern bool memblock_has_mirror(void);
1214
1215static __always_inline void vma_set_range(struct vm_area_struct *vma,
1216 unsigned long start, unsigned long end,
1217 pgoff_t pgoff)
1218{
1219 vma->vm_start = start;
1220 vma->vm_end = end;
1221 vma->vm_pgoff = pgoff;
1222}
1223
1224static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1225{
1226 /*
1227 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1228 * enablements, because when without soft-dirty being compiled in,
1229 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1230 * will be constantly true.
1231 */
1232 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1233 return false;
1234
1235 /*
1236 * Soft-dirty is kind of special: its tracking is enabled when the
1237 * vma flags not set.
1238 */
1239 return !(vma->vm_flags & VM_SOFTDIRTY);
1240}
1241
1242static inline void vma_iter_config(struct vma_iterator *vmi,
1243 unsigned long index, unsigned long last)
1244{
1245 __mas_set_range(&vmi->mas, index, last - 1);
1246}
1247
1248/*
1249 * VMA Iterator functions shared between nommu and mmap
1250 */
1251static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1252 struct vm_area_struct *vma)
1253{
1254 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1255}
1256
1257static inline void vma_iter_clear(struct vma_iterator *vmi)
1258{
1259 mas_store_prealloc(&vmi->mas, NULL);
1260}
1261
1262static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1263{
1264 return mas_walk(&vmi->mas);
1265}
1266
1267/* Store a VMA with preallocated memory */
1268static inline void vma_iter_store(struct vma_iterator *vmi,
1269 struct vm_area_struct *vma)
1270{
1271
1272#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1273 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1274 vmi->mas.index > vma->vm_start)) {
1275 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1276 vmi->mas.index, vma->vm_start, vma->vm_start,
1277 vma->vm_end, vmi->mas.index, vmi->mas.last);
1278 }
1279 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1280 vmi->mas.last < vma->vm_start)) {
1281 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1282 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1283 vmi->mas.index, vmi->mas.last);
1284 }
1285#endif
1286
1287 if (vmi->mas.status != ma_start &&
1288 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1289 vma_iter_invalidate(vmi);
1290
1291 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1292 mas_store_prealloc(&vmi->mas, vma);
1293}
1294
1295static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1296 struct vm_area_struct *vma, gfp_t gfp)
1297{
1298 if (vmi->mas.status != ma_start &&
1299 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1300 vma_iter_invalidate(vmi);
1301
1302 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1303 mas_store_gfp(&vmi->mas, vma, gfp);
1304 if (unlikely(mas_is_err(&vmi->mas)))
1305 return -ENOMEM;
1306
1307 return 0;
1308}
1309
1310/*
1311 * VMA lock generalization
1312 */
1313struct vma_prepare {
1314 struct vm_area_struct *vma;
1315 struct vm_area_struct *adj_next;
1316 struct file *file;
1317 struct address_space *mapping;
1318 struct anon_vma *anon_vma;
1319 struct vm_area_struct *insert;
1320 struct vm_area_struct *remove;
1321 struct vm_area_struct *remove2;
1322};
1323
1324void __meminit __init_single_page(struct page *page, unsigned long pfn,
1325 unsigned long zone, int nid);
1326
1327/* shrinker related functions */
1328unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1329 int priority);
1330
1331#ifdef CONFIG_SHRINKER_DEBUG
1332static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1333 struct shrinker *shrinker, const char *fmt, va_list ap)
1334{
1335 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1336
1337 return shrinker->name ? 0 : -ENOMEM;
1338}
1339
1340static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1341{
1342 kfree_const(shrinker->name);
1343 shrinker->name = NULL;
1344}
1345
1346extern int shrinker_debugfs_add(struct shrinker *shrinker);
1347extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1348 int *debugfs_id);
1349extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1350 int debugfs_id);
1351#else /* CONFIG_SHRINKER_DEBUG */
1352static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1353{
1354 return 0;
1355}
1356static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1357 const char *fmt, va_list ap)
1358{
1359 return 0;
1360}
1361static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1362{
1363}
1364static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1365 int *debugfs_id)
1366{
1367 *debugfs_id = -1;
1368 return NULL;
1369}
1370static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1371 int debugfs_id)
1372{
1373}
1374#endif /* CONFIG_SHRINKER_DEBUG */
1375
1376/* Only track the nodes of mappings with shadow entries */
1377void workingset_update_node(struct xa_node *node);
1378extern struct list_lru shadow_nodes;
1379
1380#endif /* __MM_INTERNAL_H */