Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/page_isolation.c
4 */
5
6#include <linux/mm.h>
7#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h>
9#include <linux/memory.h>
10#include <linux/hugetlb.h>
11#include <linux/page_owner.h>
12#include <linux/migrate.h>
13#include "internal.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/page_isolation.h>
17
18static int set_migratetype_isolate(struct page *page, int migratetype,
19 bool skip_hwpoisoned_pages)
20{
21 struct zone *zone;
22 unsigned long flags, pfn;
23 struct memory_isolate_notify arg;
24 int notifier_ret;
25 int ret = -EBUSY;
26
27 zone = page_zone(page);
28
29 spin_lock_irqsave(&zone->lock, flags);
30
31 /*
32 * We assume the caller intended to SET migrate type to isolate.
33 * If it is already set, then someone else must have raced and
34 * set it before us. Return -EBUSY
35 */
36 if (is_migrate_isolate_page(page))
37 goto out;
38
39 pfn = page_to_pfn(page);
40 arg.start_pfn = pfn;
41 arg.nr_pages = pageblock_nr_pages;
42 arg.pages_found = 0;
43
44 /*
45 * It may be possible to isolate a pageblock even if the
46 * migratetype is not MIGRATE_MOVABLE. The memory isolation
47 * notifier chain is used by balloon drivers to return the
48 * number of pages in a range that are held by the balloon
49 * driver to shrink memory. If all the pages are accounted for
50 * by balloons, are free, or on the LRU, isolation can continue.
51 * Later, for example, when memory hotplug notifier runs, these
52 * pages reported as "can be isolated" should be isolated(freed)
53 * by the balloon driver through the memory notifier chain.
54 */
55 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
56 notifier_ret = notifier_to_errno(notifier_ret);
57 if (notifier_ret)
58 goto out;
59 /*
60 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
61 * We just check MOVABLE pages.
62 */
63 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
64 skip_hwpoisoned_pages))
65 ret = 0;
66
67 /*
68 * immobile means "not-on-lru" pages. If immobile is larger than
69 * removable-by-driver pages reported by notifier, we'll fail.
70 */
71
72out:
73 if (!ret) {
74 unsigned long nr_pages;
75 int mt = get_pageblock_migratetype(page);
76
77 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
78 zone->nr_isolate_pageblock++;
79 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
80 NULL);
81
82 __mod_zone_freepage_state(zone, -nr_pages, mt);
83 }
84
85 spin_unlock_irqrestore(&zone->lock, flags);
86 if (!ret)
87 drain_all_pages(zone);
88 return ret;
89}
90
91static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
92{
93 struct zone *zone;
94 unsigned long flags, nr_pages;
95 bool isolated_page = false;
96 unsigned int order;
97 unsigned long pfn, buddy_pfn;
98 struct page *buddy;
99
100 zone = page_zone(page);
101 spin_lock_irqsave(&zone->lock, flags);
102 if (!is_migrate_isolate_page(page))
103 goto out;
104
105 /*
106 * Because freepage with more than pageblock_order on isolated
107 * pageblock is restricted to merge due to freepage counting problem,
108 * it is possible that there is free buddy page.
109 * move_freepages_block() doesn't care of merge so we need other
110 * approach in order to merge them. Isolation and free will make
111 * these pages to be merged.
112 */
113 if (PageBuddy(page)) {
114 order = page_order(page);
115 if (order >= pageblock_order) {
116 pfn = page_to_pfn(page);
117 buddy_pfn = __find_buddy_pfn(pfn, order);
118 buddy = page + (buddy_pfn - pfn);
119
120 if (pfn_valid_within(buddy_pfn) &&
121 !is_migrate_isolate_page(buddy)) {
122 __isolate_free_page(page, order);
123 isolated_page = true;
124 }
125 }
126 }
127
128 /*
129 * If we isolate freepage with more than pageblock_order, there
130 * should be no freepage in the range, so we could avoid costly
131 * pageblock scanning for freepage moving.
132 */
133 if (!isolated_page) {
134 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
135 __mod_zone_freepage_state(zone, nr_pages, migratetype);
136 }
137 set_pageblock_migratetype(page, migratetype);
138 zone->nr_isolate_pageblock--;
139out:
140 spin_unlock_irqrestore(&zone->lock, flags);
141 if (isolated_page) {
142 post_alloc_hook(page, order, __GFP_MOVABLE);
143 __free_pages(page, order);
144 }
145}
146
147static inline struct page *
148__first_valid_page(unsigned long pfn, unsigned long nr_pages)
149{
150 int i;
151
152 for (i = 0; i < nr_pages; i++) {
153 struct page *page;
154
155 if (!pfn_valid_within(pfn + i))
156 continue;
157 page = pfn_to_online_page(pfn + i);
158 if (!page)
159 continue;
160 return page;
161 }
162 return NULL;
163}
164
165/*
166 * start_isolate_page_range() -- make page-allocation-type of range of pages
167 * to be MIGRATE_ISOLATE.
168 * @start_pfn: The lower PFN of the range to be isolated.
169 * @end_pfn: The upper PFN of the range to be isolated.
170 * @migratetype: migrate type to set in error recovery.
171 *
172 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
173 * the range will never be allocated. Any free pages and pages freed in the
174 * future will not be allocated again.
175 *
176 * start_pfn/end_pfn must be aligned to pageblock_order.
177 * Return 0 on success and -EBUSY if any part of range cannot be isolated.
178 *
179 * There is no high level synchronization mechanism that prevents two threads
180 * from trying to isolate overlapping ranges. If this happens, one thread
181 * will notice pageblocks in the overlapping range already set to isolate.
182 * This happens in set_migratetype_isolate, and set_migratetype_isolate
183 * returns an error. We then clean up by restoring the migration type on
184 * pageblocks we may have modified and return -EBUSY to caller. This
185 * prevents two threads from simultaneously working on overlapping ranges.
186 */
187int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 unsigned migratetype, bool skip_hwpoisoned_pages)
189{
190 unsigned long pfn;
191 unsigned long undo_pfn;
192 struct page *page;
193
194 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
195 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
196
197 for (pfn = start_pfn;
198 pfn < end_pfn;
199 pfn += pageblock_nr_pages) {
200 page = __first_valid_page(pfn, pageblock_nr_pages);
201 if (page &&
202 set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
203 undo_pfn = pfn;
204 goto undo;
205 }
206 }
207 return 0;
208undo:
209 for (pfn = start_pfn;
210 pfn < undo_pfn;
211 pfn += pageblock_nr_pages) {
212 struct page *page = pfn_to_online_page(pfn);
213 if (!page)
214 continue;
215 unset_migratetype_isolate(page, migratetype);
216 }
217
218 return -EBUSY;
219}
220
221/*
222 * Make isolated pages available again.
223 */
224int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
225 unsigned migratetype)
226{
227 unsigned long pfn;
228 struct page *page;
229
230 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
231 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
232
233 for (pfn = start_pfn;
234 pfn < end_pfn;
235 pfn += pageblock_nr_pages) {
236 page = __first_valid_page(pfn, pageblock_nr_pages);
237 if (!page || !is_migrate_isolate_page(page))
238 continue;
239 unset_migratetype_isolate(page, migratetype);
240 }
241 return 0;
242}
243/*
244 * Test all pages in the range is free(means isolated) or not.
245 * all pages in [start_pfn...end_pfn) must be in the same zone.
246 * zone->lock must be held before call this.
247 *
248 * Returns the last tested pfn.
249 */
250static unsigned long
251__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
252 bool skip_hwpoisoned_pages)
253{
254 struct page *page;
255
256 while (pfn < end_pfn) {
257 if (!pfn_valid_within(pfn)) {
258 pfn++;
259 continue;
260 }
261 page = pfn_to_page(pfn);
262 if (PageBuddy(page))
263 /*
264 * If the page is on a free list, it has to be on
265 * the correct MIGRATE_ISOLATE freelist. There is no
266 * simple way to verify that as VM_BUG_ON(), though.
267 */
268 pfn += 1 << page_order(page);
269 else if (skip_hwpoisoned_pages && PageHWPoison(page))
270 /* A HWPoisoned page cannot be also PageBuddy */
271 pfn++;
272 else
273 break;
274 }
275
276 return pfn;
277}
278
279/* Caller should ensure that requested range is in a single zone */
280int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
281 bool skip_hwpoisoned_pages)
282{
283 unsigned long pfn, flags;
284 struct page *page;
285 struct zone *zone;
286
287 /*
288 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
289 * are not aligned to pageblock_nr_pages.
290 * Then we just check migratetype first.
291 */
292 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
293 page = __first_valid_page(pfn, pageblock_nr_pages);
294 if (page && !is_migrate_isolate_page(page))
295 break;
296 }
297 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
298 if ((pfn < end_pfn) || !page)
299 return -EBUSY;
300 /* Check all pages are free or marked as ISOLATED */
301 zone = page_zone(page);
302 spin_lock_irqsave(&zone->lock, flags);
303 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
304 skip_hwpoisoned_pages);
305 spin_unlock_irqrestore(&zone->lock, flags);
306
307 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
308
309 return pfn < end_pfn ? -EBUSY : 0;
310}
311
312struct page *alloc_migrate_target(struct page *page, unsigned long private)
313{
314 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
315}
1/*
2 * linux/mm/page_isolation.c
3 */
4
5#include <linux/mm.h>
6#include <linux/page-isolation.h>
7#include <linux/pageblock-flags.h>
8#include <linux/memory.h>
9#include <linux/hugetlb.h>
10#include "internal.h"
11
12#define CREATE_TRACE_POINTS
13#include <trace/events/page_isolation.h>
14
15static int set_migratetype_isolate(struct page *page,
16 bool skip_hwpoisoned_pages)
17{
18 struct zone *zone;
19 unsigned long flags, pfn;
20 struct memory_isolate_notify arg;
21 int notifier_ret;
22 int ret = -EBUSY;
23
24 zone = page_zone(page);
25
26 spin_lock_irqsave(&zone->lock, flags);
27
28 pfn = page_to_pfn(page);
29 arg.start_pfn = pfn;
30 arg.nr_pages = pageblock_nr_pages;
31 arg.pages_found = 0;
32
33 /*
34 * It may be possible to isolate a pageblock even if the
35 * migratetype is not MIGRATE_MOVABLE. The memory isolation
36 * notifier chain is used by balloon drivers to return the
37 * number of pages in a range that are held by the balloon
38 * driver to shrink memory. If all the pages are accounted for
39 * by balloons, are free, or on the LRU, isolation can continue.
40 * Later, for example, when memory hotplug notifier runs, these
41 * pages reported as "can be isolated" should be isolated(freed)
42 * by the balloon driver through the memory notifier chain.
43 */
44 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
45 notifier_ret = notifier_to_errno(notifier_ret);
46 if (notifier_ret)
47 goto out;
48 /*
49 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
50 * We just check MOVABLE pages.
51 */
52 if (!has_unmovable_pages(zone, page, arg.pages_found,
53 skip_hwpoisoned_pages))
54 ret = 0;
55
56 /*
57 * immobile means "not-on-lru" paes. If immobile is larger than
58 * removable-by-driver pages reported by notifier, we'll fail.
59 */
60
61out:
62 if (!ret) {
63 unsigned long nr_pages;
64 int migratetype = get_pageblock_migratetype(page);
65
66 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
67 zone->nr_isolate_pageblock++;
68 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
69
70 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
71 }
72
73 spin_unlock_irqrestore(&zone->lock, flags);
74 if (!ret)
75 drain_all_pages(zone);
76 return ret;
77}
78
79static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
80{
81 struct zone *zone;
82 unsigned long flags, nr_pages;
83 struct page *isolated_page = NULL;
84 unsigned int order;
85 unsigned long page_idx, buddy_idx;
86 struct page *buddy;
87
88 zone = page_zone(page);
89 spin_lock_irqsave(&zone->lock, flags);
90 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
91 goto out;
92
93 /*
94 * Because freepage with more than pageblock_order on isolated
95 * pageblock is restricted to merge due to freepage counting problem,
96 * it is possible that there is free buddy page.
97 * move_freepages_block() doesn't care of merge so we need other
98 * approach in order to merge them. Isolation and free will make
99 * these pages to be merged.
100 */
101 if (PageBuddy(page)) {
102 order = page_order(page);
103 if (order >= pageblock_order) {
104 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
105 buddy_idx = __find_buddy_index(page_idx, order);
106 buddy = page + (buddy_idx - page_idx);
107
108 if (pfn_valid_within(page_to_pfn(buddy)) &&
109 !is_migrate_isolate_page(buddy)) {
110 __isolate_free_page(page, order);
111 kernel_map_pages(page, (1 << order), 1);
112 set_page_refcounted(page);
113 isolated_page = page;
114 }
115 }
116 }
117
118 /*
119 * If we isolate freepage with more than pageblock_order, there
120 * should be no freepage in the range, so we could avoid costly
121 * pageblock scanning for freepage moving.
122 */
123 if (!isolated_page) {
124 nr_pages = move_freepages_block(zone, page, migratetype);
125 __mod_zone_freepage_state(zone, nr_pages, migratetype);
126 }
127 set_pageblock_migratetype(page, migratetype);
128 zone->nr_isolate_pageblock--;
129out:
130 spin_unlock_irqrestore(&zone->lock, flags);
131 if (isolated_page)
132 __free_pages(isolated_page, order);
133}
134
135static inline struct page *
136__first_valid_page(unsigned long pfn, unsigned long nr_pages)
137{
138 int i;
139 for (i = 0; i < nr_pages; i++)
140 if (pfn_valid_within(pfn + i))
141 break;
142 if (unlikely(i == nr_pages))
143 return NULL;
144 return pfn_to_page(pfn + i);
145}
146
147/*
148 * start_isolate_page_range() -- make page-allocation-type of range of pages
149 * to be MIGRATE_ISOLATE.
150 * @start_pfn: The lower PFN of the range to be isolated.
151 * @end_pfn: The upper PFN of the range to be isolated.
152 * @migratetype: migrate type to set in error recovery.
153 *
154 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
155 * the range will never be allocated. Any free pages and pages freed in the
156 * future will not be allocated again.
157 *
158 * start_pfn/end_pfn must be aligned to pageblock_order.
159 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
160 */
161int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
162 unsigned migratetype, bool skip_hwpoisoned_pages)
163{
164 unsigned long pfn;
165 unsigned long undo_pfn;
166 struct page *page;
167
168 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
169 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
170
171 for (pfn = start_pfn;
172 pfn < end_pfn;
173 pfn += pageblock_nr_pages) {
174 page = __first_valid_page(pfn, pageblock_nr_pages);
175 if (page &&
176 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
177 undo_pfn = pfn;
178 goto undo;
179 }
180 }
181 return 0;
182undo:
183 for (pfn = start_pfn;
184 pfn < undo_pfn;
185 pfn += pageblock_nr_pages)
186 unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
187
188 return -EBUSY;
189}
190
191/*
192 * Make isolated pages available again.
193 */
194int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
195 unsigned migratetype)
196{
197 unsigned long pfn;
198 struct page *page;
199
200 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
201 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
202
203 for (pfn = start_pfn;
204 pfn < end_pfn;
205 pfn += pageblock_nr_pages) {
206 page = __first_valid_page(pfn, pageblock_nr_pages);
207 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
208 continue;
209 unset_migratetype_isolate(page, migratetype);
210 }
211 return 0;
212}
213/*
214 * Test all pages in the range is free(means isolated) or not.
215 * all pages in [start_pfn...end_pfn) must be in the same zone.
216 * zone->lock must be held before call this.
217 *
218 * Returns the last tested pfn.
219 */
220static unsigned long
221__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
222 bool skip_hwpoisoned_pages)
223{
224 struct page *page;
225
226 while (pfn < end_pfn) {
227 if (!pfn_valid_within(pfn)) {
228 pfn++;
229 continue;
230 }
231 page = pfn_to_page(pfn);
232 if (PageBuddy(page))
233 /*
234 * If the page is on a free list, it has to be on
235 * the correct MIGRATE_ISOLATE freelist. There is no
236 * simple way to verify that as VM_BUG_ON(), though.
237 */
238 pfn += 1 << page_order(page);
239 else if (skip_hwpoisoned_pages && PageHWPoison(page))
240 /* A HWPoisoned page cannot be also PageBuddy */
241 pfn++;
242 else
243 break;
244 }
245
246 return pfn;
247}
248
249int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
250 bool skip_hwpoisoned_pages)
251{
252 unsigned long pfn, flags;
253 struct page *page;
254 struct zone *zone;
255
256 /*
257 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
258 * are not aligned to pageblock_nr_pages.
259 * Then we just check migratetype first.
260 */
261 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
262 page = __first_valid_page(pfn, pageblock_nr_pages);
263 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
264 break;
265 }
266 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
267 if ((pfn < end_pfn) || !page)
268 return -EBUSY;
269 /* Check all pages are free or marked as ISOLATED */
270 zone = page_zone(page);
271 spin_lock_irqsave(&zone->lock, flags);
272 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
273 skip_hwpoisoned_pages);
274 spin_unlock_irqrestore(&zone->lock, flags);
275
276 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
277
278 return pfn < end_pfn ? -EBUSY : 0;
279}
280
281struct page *alloc_migrate_target(struct page *page, unsigned long private,
282 int **resultp)
283{
284 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
285
286 /*
287 * TODO: allocate a destination hugepage from a nearest neighbor node,
288 * accordance with memory policy of the user process if possible. For
289 * now as a simple work-around, we use the next node for destination.
290 */
291 if (PageHuge(page)) {
292 int node = next_online_node(page_to_nid(page));
293 if (node == MAX_NUMNODES)
294 node = first_online_node;
295 return alloc_huge_page_node(page_hstate(compound_head(page)),
296 node);
297 }
298
299 if (PageHighMem(page))
300 gfp_mask |= __GFP_HIGHMEM;
301
302 return alloc_page(gfp_mask);
303}