Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/mm/page_isolation.c
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/page-isolation.h>
  8#include <linux/pageblock-flags.h>
  9#include <linux/memory.h>
 10#include <linux/hugetlb.h>
 11#include <linux/page_owner.h>
 12#include <linux/migrate.h>
 13#include "internal.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/page_isolation.h>
 17
 18static int set_migratetype_isolate(struct page *page, int migratetype,
 19				bool skip_hwpoisoned_pages)
 20{
 
 21	struct zone *zone;
 22	unsigned long flags, pfn;
 23	struct memory_isolate_notify arg;
 24	int notifier_ret;
 25	int ret = -EBUSY;
 26
 27	zone = page_zone(page);
 28
 29	spin_lock_irqsave(&zone->lock, flags);
 30
 31	/*
 32	 * We assume the caller intended to SET migrate type to isolate.
 33	 * If it is already set, then someone else must have raced and
 34	 * set it before us.  Return -EBUSY
 35	 */
 36	if (is_migrate_isolate_page(page))
 37		goto out;
 38
 39	pfn = page_to_pfn(page);
 40	arg.start_pfn = pfn;
 41	arg.nr_pages = pageblock_nr_pages;
 42	arg.pages_found = 0;
 43
 44	/*
 45	 * It may be possible to isolate a pageblock even if the
 46	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
 47	 * notifier chain is used by balloon drivers to return the
 48	 * number of pages in a range that are held by the balloon
 49	 * driver to shrink memory. If all the pages are accounted for
 50	 * by balloons, are free, or on the LRU, isolation can continue.
 51	 * Later, for example, when memory hotplug notifier runs, these
 52	 * pages reported as "can be isolated" should be isolated(freed)
 53	 * by the balloon driver through the memory notifier chain.
 54	 */
 55	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
 56	notifier_ret = notifier_to_errno(notifier_ret);
 57	if (notifier_ret)
 58		goto out;
 59	/*
 60	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 61	 * We just check MOVABLE pages.
 62	 */
 63	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
 64				 skip_hwpoisoned_pages))
 65		ret = 0;
 66
 67	/*
 68	 * immobile means "not-on-lru" pages. If immobile is larger than
 69	 * removable-by-driver pages reported by notifier, we'll fail.
 70	 */
 71
 72out:
 73	if (!ret) {
 74		unsigned long nr_pages;
 75		int mt = get_pageblock_migratetype(page);
 76
 77		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
 78		zone->nr_isolate_pageblock++;
 79		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
 80									NULL);
 81
 82		__mod_zone_freepage_state(zone, -nr_pages, mt);
 
 83	}
 84
 
 85	spin_unlock_irqrestore(&zone->lock, flags);
 86	if (!ret)
 87		drain_all_pages(zone);
 
 
 
 
 
 
 
 
 
 
 
 88	return ret;
 89}
 90
 91static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 92{
 93	struct zone *zone;
 94	unsigned long flags, nr_pages;
 95	bool isolated_page = false;
 96	unsigned int order;
 97	unsigned long pfn, buddy_pfn;
 98	struct page *buddy;
 99
100	zone = page_zone(page);
101	spin_lock_irqsave(&zone->lock, flags);
102	if (!is_migrate_isolate_page(page))
103		goto out;
104
105	/*
106	 * Because freepage with more than pageblock_order on isolated
107	 * pageblock is restricted to merge due to freepage counting problem,
108	 * it is possible that there is free buddy page.
109	 * move_freepages_block() doesn't care of merge so we need other
110	 * approach in order to merge them. Isolation and free will make
111	 * these pages to be merged.
112	 */
113	if (PageBuddy(page)) {
114		order = page_order(page);
115		if (order >= pageblock_order) {
116			pfn = page_to_pfn(page);
117			buddy_pfn = __find_buddy_pfn(pfn, order);
118			buddy = page + (buddy_pfn - pfn);
119
120			if (pfn_valid_within(buddy_pfn) &&
121			    !is_migrate_isolate_page(buddy)) {
122				__isolate_free_page(page, order);
123				isolated_page = true;
124			}
125		}
126	}
127
128	/*
129	 * If we isolate freepage with more than pageblock_order, there
130	 * should be no freepage in the range, so we could avoid costly
131	 * pageblock scanning for freepage moving.
132	 */
133	if (!isolated_page) {
134		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
135		__mod_zone_freepage_state(zone, nr_pages, migratetype);
136	}
137	set_pageblock_migratetype(page, migratetype);
 
 
138	zone->nr_isolate_pageblock--;
139out:
140	spin_unlock_irqrestore(&zone->lock, flags);
141	if (isolated_page) {
142		post_alloc_hook(page, order, __GFP_MOVABLE);
143		__free_pages(page, order);
144	}
145}
146
147static inline struct page *
148__first_valid_page(unsigned long pfn, unsigned long nr_pages)
149{
150	int i;
151
152	for (i = 0; i < nr_pages; i++) {
153		struct page *page;
154
155		if (!pfn_valid_within(pfn + i))
156			continue;
157		page = pfn_to_online_page(pfn + i);
158		if (!page)
159			continue;
160		return page;
161	}
162	return NULL;
163}
164
165/*
166 * start_isolate_page_range() -- make page-allocation-type of range of pages
167 * to be MIGRATE_ISOLATE.
168 * @start_pfn: The lower PFN of the range to be isolated.
169 * @end_pfn: The upper PFN of the range to be isolated.
170 * @migratetype: migrate type to set in error recovery.
 
 
 
 
 
 
 
 
171 *
172 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
173 * the range will never be allocated. Any free pages and pages freed in the
174 * future will not be allocated again.
175 *
176 * start_pfn/end_pfn must be aligned to pageblock_order.
177 * Return 0 on success and -EBUSY if any part of range cannot be isolated.
178 *
179 * There is no high level synchronization mechanism that prevents two threads
180 * from trying to isolate overlapping ranges.  If this happens, one thread
181 * will notice pageblocks in the overlapping range already set to isolate.
182 * This happens in set_migratetype_isolate, and set_migratetype_isolate
183 * returns an error.  We then clean up by restoring the migration type on
184 * pageblocks we may have modified and return -EBUSY to caller.  This
185 * prevents two threads from simultaneously working on overlapping ranges.
 
 
 
 
 
 
 
 
 
 
 
186 */
187int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188			     unsigned migratetype, bool skip_hwpoisoned_pages)
189{
190	unsigned long pfn;
191	unsigned long undo_pfn;
192	struct page *page;
 
193
194	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
195	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
196
197	for (pfn = start_pfn;
198	     pfn < end_pfn;
199	     pfn += pageblock_nr_pages) {
200		page = __first_valid_page(pfn, pageblock_nr_pages);
201		if (page &&
202		    set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
203			undo_pfn = pfn;
204			goto undo;
 
 
205		}
206	}
207	return 0;
208undo:
209	for (pfn = start_pfn;
210	     pfn < undo_pfn;
211	     pfn += pageblock_nr_pages) {
212		struct page *page = pfn_to_online_page(pfn);
213		if (!page)
214			continue;
215		unset_migratetype_isolate(page, migratetype);
216	}
217
218	return -EBUSY;
219}
220
221/*
222 * Make isolated pages available again.
223 */
224int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
225			    unsigned migratetype)
226{
227	unsigned long pfn;
228	struct page *page;
229
230	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
231	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
232
233	for (pfn = start_pfn;
234	     pfn < end_pfn;
235	     pfn += pageblock_nr_pages) {
236		page = __first_valid_page(pfn, pageblock_nr_pages);
237		if (!page || !is_migrate_isolate_page(page))
238			continue;
239		unset_migratetype_isolate(page, migratetype);
240	}
241	return 0;
242}
243/*
244 * Test all pages in the range is free(means isolated) or not.
245 * all pages in [start_pfn...end_pfn) must be in the same zone.
246 * zone->lock must be held before call this.
247 *
248 * Returns the last tested pfn.
249 */
250static unsigned long
251__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
252				  bool skip_hwpoisoned_pages)
253{
254	struct page *page;
255
256	while (pfn < end_pfn) {
257		if (!pfn_valid_within(pfn)) {
258			pfn++;
259			continue;
260		}
261		page = pfn_to_page(pfn);
262		if (PageBuddy(page))
263			/*
264			 * If the page is on a free list, it has to be on
265			 * the correct MIGRATE_ISOLATE freelist. There is no
266			 * simple way to verify that as VM_BUG_ON(), though.
267			 */
268			pfn += 1 << page_order(page);
269		else if (skip_hwpoisoned_pages && PageHWPoison(page))
270			/* A HWPoisoned page cannot be also PageBuddy */
271			pfn++;
 
 
 
 
 
 
 
 
272		else
273			break;
274	}
275
276	return pfn;
277}
278
279/* Caller should ensure that requested range is in a single zone */
280int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
281			bool skip_hwpoisoned_pages)
282{
283	unsigned long pfn, flags;
284	struct page *page;
285	struct zone *zone;
286
287	/*
288	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
289	 * are not aligned to pageblock_nr_pages.
290	 * Then we just check migratetype first.
291	 */
292	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
293		page = __first_valid_page(pfn, pageblock_nr_pages);
294		if (page && !is_migrate_isolate_page(page))
295			break;
296	}
297	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
298	if ((pfn < end_pfn) || !page)
299		return -EBUSY;
300	/* Check all pages are free or marked as ISOLATED */
301	zone = page_zone(page);
302	spin_lock_irqsave(&zone->lock, flags);
303	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
304						skip_hwpoisoned_pages);
305	spin_unlock_irqrestore(&zone->lock, flags);
306
307	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
308
309	return pfn < end_pfn ? -EBUSY : 0;
310}
311
312struct page *alloc_migrate_target(struct page *page, unsigned long private)
313{
314	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
315}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/mm/page_isolation.c
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/page-isolation.h>
  8#include <linux/pageblock-flags.h>
  9#include <linux/memory.h>
 10#include <linux/hugetlb.h>
 11#include <linux/page_owner.h>
 12#include <linux/migrate.h>
 13#include "internal.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/page_isolation.h>
 17
 18static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
 
 19{
 20	struct page *unmovable = NULL;
 21	struct zone *zone;
 22	unsigned long flags;
 
 
 23	int ret = -EBUSY;
 24
 25	zone = page_zone(page);
 26
 27	spin_lock_irqsave(&zone->lock, flags);
 28
 29	/*
 30	 * We assume the caller intended to SET migrate type to isolate.
 31	 * If it is already set, then someone else must have raced and
 32	 * set it before us.  Return -EBUSY
 33	 */
 34	if (is_migrate_isolate_page(page))
 35		goto out;
 36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37	/*
 38	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 39	 * We just check MOVABLE pages.
 40	 */
 41	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
 42	if (!unmovable) {
 
 
 
 
 
 
 
 
 
 43		unsigned long nr_pages;
 44		int mt = get_pageblock_migratetype(page);
 45
 46		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
 47		zone->nr_isolate_pageblock++;
 48		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
 49									NULL);
 50
 51		__mod_zone_freepage_state(zone, -nr_pages, mt);
 52		ret = 0;
 53	}
 54
 55out:
 56	spin_unlock_irqrestore(&zone->lock, flags);
 57	if (!ret) {
 58		drain_all_pages(zone);
 59	} else {
 60		WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
 61
 62		if ((isol_flags & REPORT_FAILURE) && unmovable)
 63			/*
 64			 * printk() with zone->lock held will likely trigger a
 65			 * lockdep splat, so defer it here.
 66			 */
 67			dump_page(unmovable, "unmovable page");
 68	}
 69
 70	return ret;
 71}
 72
 73static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 74{
 75	struct zone *zone;
 76	unsigned long flags, nr_pages;
 77	bool isolated_page = false;
 78	unsigned int order;
 79	unsigned long pfn, buddy_pfn;
 80	struct page *buddy;
 81
 82	zone = page_zone(page);
 83	spin_lock_irqsave(&zone->lock, flags);
 84	if (!is_migrate_isolate_page(page))
 85		goto out;
 86
 87	/*
 88	 * Because freepage with more than pageblock_order on isolated
 89	 * pageblock is restricted to merge due to freepage counting problem,
 90	 * it is possible that there is free buddy page.
 91	 * move_freepages_block() doesn't care of merge so we need other
 92	 * approach in order to merge them. Isolation and free will make
 93	 * these pages to be merged.
 94	 */
 95	if (PageBuddy(page)) {
 96		order = page_order(page);
 97		if (order >= pageblock_order) {
 98			pfn = page_to_pfn(page);
 99			buddy_pfn = __find_buddy_pfn(pfn, order);
100			buddy = page + (buddy_pfn - pfn);
101
102			if (pfn_valid_within(buddy_pfn) &&
103			    !is_migrate_isolate_page(buddy)) {
104				__isolate_free_page(page, order);
105				isolated_page = true;
106			}
107		}
108	}
109
110	/*
111	 * If we isolate freepage with more than pageblock_order, there
112	 * should be no freepage in the range, so we could avoid costly
113	 * pageblock scanning for freepage moving.
114	 */
115	if (!isolated_page) {
116		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
117		__mod_zone_freepage_state(zone, nr_pages, migratetype);
118	}
119	set_pageblock_migratetype(page, migratetype);
120	if (isolated_page)
121		__putback_isolated_page(page, order, migratetype);
122	zone->nr_isolate_pageblock--;
123out:
124	spin_unlock_irqrestore(&zone->lock, flags);
 
 
 
 
125}
126
127static inline struct page *
128__first_valid_page(unsigned long pfn, unsigned long nr_pages)
129{
130	int i;
131
132	for (i = 0; i < nr_pages; i++) {
133		struct page *page;
134
 
 
135		page = pfn_to_online_page(pfn + i);
136		if (!page)
137			continue;
138		return page;
139	}
140	return NULL;
141}
142
143/**
144 * start_isolate_page_range() - make page-allocation-type of range of pages to
145 * be MIGRATE_ISOLATE.
146 * @start_pfn:		The lower PFN of the range to be isolated.
147 * @end_pfn:		The upper PFN of the range to be isolated.
148 *			start_pfn/end_pfn must be aligned to pageblock_order.
149 * @migratetype:	Migrate type to set in error recovery.
150 * @flags:		The following flags are allowed (they can be combined in
151 *			a bit mask)
152 *			MEMORY_OFFLINE - isolate to offline (!allocate) memory
153 *					 e.g., skip over PageHWPoison() pages
154 *					 and PageOffline() pages.
155 *			REPORT_FAILURE - report details about the failure to
156 *			isolate the range
157 *
158 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
159 * the range will never be allocated. Any free pages and pages freed in the
160 * future will not be allocated again. If specified range includes migrate types
161 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
162 * pages in the range finally, the caller have to free all pages in the range.
163 * test_page_isolated() can be used for test it.
164 *
165 * There is no high level synchronization mechanism that prevents two threads
166 * from trying to isolate overlapping ranges. If this happens, one thread
167 * will notice pageblocks in the overlapping range already set to isolate.
168 * This happens in set_migratetype_isolate, and set_migratetype_isolate
169 * returns an error. We then clean up by restoring the migration type on
170 * pageblocks we may have modified and return -EBUSY to caller. This
171 * prevents two threads from simultaneously working on overlapping ranges.
172 *
173 * Please note that there is no strong synchronization with the page allocator
174 * either. Pages might be freed while their page blocks are marked ISOLATED.
175 * In some cases pages might still end up on pcp lists and that would allow
176 * for their allocation even when they are in fact isolated already. Depending
177 * on how strong of a guarantee the caller needs drain_all_pages might be needed
178 * (e.g. __offline_pages will need to call it after check for isolated range for
179 * a next retry).
180 *
181 * Return: the number of isolated pageblocks on success and -EBUSY if any part
182 * of range cannot be isolated.
183 */
184int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
185			     unsigned migratetype, int flags)
186{
187	unsigned long pfn;
188	unsigned long undo_pfn;
189	struct page *page;
190	int nr_isolate_pageblock = 0;
191
192	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
193	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
194
195	for (pfn = start_pfn;
196	     pfn < end_pfn;
197	     pfn += pageblock_nr_pages) {
198		page = __first_valid_page(pfn, pageblock_nr_pages);
199		if (page) {
200			if (set_migratetype_isolate(page, migratetype, flags)) {
201				undo_pfn = pfn;
202				goto undo;
203			}
204			nr_isolate_pageblock++;
205		}
206	}
207	return nr_isolate_pageblock;
208undo:
209	for (pfn = start_pfn;
210	     pfn < undo_pfn;
211	     pfn += pageblock_nr_pages) {
212		struct page *page = pfn_to_online_page(pfn);
213		if (!page)
214			continue;
215		unset_migratetype_isolate(page, migratetype);
216	}
217
218	return -EBUSY;
219}
220
221/*
222 * Make isolated pages available again.
223 */
224void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
225			    unsigned migratetype)
226{
227	unsigned long pfn;
228	struct page *page;
229
230	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
231	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
232
233	for (pfn = start_pfn;
234	     pfn < end_pfn;
235	     pfn += pageblock_nr_pages) {
236		page = __first_valid_page(pfn, pageblock_nr_pages);
237		if (!page || !is_migrate_isolate_page(page))
238			continue;
239		unset_migratetype_isolate(page, migratetype);
240	}
 
241}
242/*
243 * Test all pages in the range is free(means isolated) or not.
244 * all pages in [start_pfn...end_pfn) must be in the same zone.
245 * zone->lock must be held before call this.
246 *
247 * Returns the last tested pfn.
248 */
249static unsigned long
250__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
251				  int flags)
252{
253	struct page *page;
254
255	while (pfn < end_pfn) {
256		if (!pfn_valid_within(pfn)) {
257			pfn++;
258			continue;
259		}
260		page = pfn_to_page(pfn);
261		if (PageBuddy(page))
262			/*
263			 * If the page is on a free list, it has to be on
264			 * the correct MIGRATE_ISOLATE freelist. There is no
265			 * simple way to verify that as VM_BUG_ON(), though.
266			 */
267			pfn += 1 << page_order(page);
268		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
269			/* A HWPoisoned page cannot be also PageBuddy */
270			pfn++;
271		else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
272			 !page_count(page))
273			/*
274			 * The responsible driver agreed to skip PageOffline()
275			 * pages when offlining memory by dropping its
276			 * reference in MEM_GOING_OFFLINE.
277			 */
278			pfn++;
279		else
280			break;
281	}
282
283	return pfn;
284}
285
286/* Caller should ensure that requested range is in a single zone */
287int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
288			int isol_flags)
289{
290	unsigned long pfn, flags;
291	struct page *page;
292	struct zone *zone;
293
294	/*
295	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
296	 * are not aligned to pageblock_nr_pages.
297	 * Then we just check migratetype first.
298	 */
299	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
300		page = __first_valid_page(pfn, pageblock_nr_pages);
301		if (page && !is_migrate_isolate_page(page))
302			break;
303	}
304	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
305	if ((pfn < end_pfn) || !page)
306		return -EBUSY;
307	/* Check all pages are free or marked as ISOLATED */
308	zone = page_zone(page);
309	spin_lock_irqsave(&zone->lock, flags);
310	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
 
311	spin_unlock_irqrestore(&zone->lock, flags);
312
313	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
314
315	return pfn < end_pfn ? -EBUSY : 0;
 
 
 
 
 
316}