Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/mm/page_isolation.c
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/page-isolation.h>
  8#include <linux/pageblock-flags.h>
  9#include <linux/memory.h>
 10#include <linux/hugetlb.h>
 11#include <linux/page_owner.h>
 12#include <linux/migrate.h>
 13#include "internal.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/page_isolation.h>
 17
 18static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
 
 19{
 20	struct zone *zone = page_zone(page);
 21	struct page *unmovable;
 22	unsigned long flags;
 
 
 
 
 23
 24	spin_lock_irqsave(&zone->lock, flags);
 25
 
 
 
 
 
 26	/*
 27	 * We assume the caller intended to SET migrate type to isolate.
 28	 * If it is already set, then someone else must have raced and
 29	 * set it before us.
 
 
 
 
 
 
 30	 */
 31	if (is_migrate_isolate_page(page)) {
 32		spin_unlock_irqrestore(&zone->lock, flags);
 33		return -EBUSY;
 34	}
 35
 36	/*
 37	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 38	 * We just check MOVABLE pages.
 39	 */
 40	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
 41	if (!unmovable) {
 
 
 
 
 
 
 
 
 
 42		unsigned long nr_pages;
 43		int mt = get_pageblock_migratetype(page);
 44
 45		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
 46		zone->nr_isolate_pageblock++;
 47		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
 48									NULL);
 49
 50		__mod_zone_freepage_state(zone, -nr_pages, mt);
 51		spin_unlock_irqrestore(&zone->lock, flags);
 52		return 0;
 53	}
 54
 55	spin_unlock_irqrestore(&zone->lock, flags);
 56	if (isol_flags & REPORT_FAILURE) {
 57		/*
 58		 * printk() with zone->lock held will likely trigger a
 59		 * lockdep splat, so defer it here.
 60		 */
 61		dump_page(unmovable, "unmovable page");
 62	}
 63
 64	return -EBUSY;
 65}
 66
 67static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 68{
 69	struct zone *zone;
 70	unsigned long flags, nr_pages;
 71	bool isolated_page = false;
 72	unsigned int order;
 73	unsigned long pfn, buddy_pfn;
 74	struct page *buddy;
 75
 76	zone = page_zone(page);
 77	spin_lock_irqsave(&zone->lock, flags);
 78	if (!is_migrate_isolate_page(page))
 79		goto out;
 80
 81	/*
 82	 * Because freepage with more than pageblock_order on isolated
 83	 * pageblock is restricted to merge due to freepage counting problem,
 84	 * it is possible that there is free buddy page.
 85	 * move_freepages_block() doesn't care of merge so we need other
 86	 * approach in order to merge them. Isolation and free will make
 87	 * these pages to be merged.
 88	 */
 89	if (PageBuddy(page)) {
 90		order = buddy_order(page);
 91		if (order >= pageblock_order && order < MAX_ORDER - 1) {
 92			pfn = page_to_pfn(page);
 93			buddy_pfn = __find_buddy_pfn(pfn, order);
 94			buddy = page + (buddy_pfn - pfn);
 95
 96			if (pfn_valid_within(buddy_pfn) &&
 97			    !is_migrate_isolate_page(buddy)) {
 98				__isolate_free_page(page, order);
 99				isolated_page = true;
 
 
100			}
101		}
102	}
103
104	/*
105	 * If we isolate freepage with more than pageblock_order, there
106	 * should be no freepage in the range, so we could avoid costly
107	 * pageblock scanning for freepage moving.
108	 *
109	 * We didn't actually touch any of the isolated pages, so place them
110	 * to the tail of the freelist. This is an optimization for memory
111	 * onlining - just onlined memory won't immediately be considered for
112	 * allocation.
113	 */
114	if (!isolated_page) {
115		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
116		__mod_zone_freepage_state(zone, nr_pages, migratetype);
117	}
118	set_pageblock_migratetype(page, migratetype);
119	if (isolated_page)
120		__putback_isolated_page(page, order, migratetype);
121	zone->nr_isolate_pageblock--;
122out:
123	spin_unlock_irqrestore(&zone->lock, flags);
 
 
124}
125
126static inline struct page *
127__first_valid_page(unsigned long pfn, unsigned long nr_pages)
128{
129	int i;
130
131	for (i = 0; i < nr_pages; i++) {
132		struct page *page;
133
134		page = pfn_to_online_page(pfn + i);
135		if (!page)
136			continue;
137		return page;
138	}
139	return NULL;
140}
141
142/**
143 * start_isolate_page_range() - make page-allocation-type of range of pages to
144 * be MIGRATE_ISOLATE.
145 * @start_pfn:		The lower PFN of the range to be isolated.
146 * @end_pfn:		The upper PFN of the range to be isolated.
147 *			start_pfn/end_pfn must be aligned to pageblock_order.
148 * @migratetype:	Migrate type to set in error recovery.
149 * @flags:		The following flags are allowed (they can be combined in
150 *			a bit mask)
151 *			MEMORY_OFFLINE - isolate to offline (!allocate) memory
152 *					 e.g., skip over PageHWPoison() pages
153 *					 and PageOffline() pages.
154 *			REPORT_FAILURE - report details about the failure to
155 *			isolate the range
156 *
157 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
158 * the range will never be allocated. Any free pages and pages freed in the
159 * future will not be allocated again. If specified range includes migrate types
160 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
161 * pages in the range finally, the caller have to free all pages in the range.
162 * test_page_isolated() can be used for test it.
163 *
164 * There is no high level synchronization mechanism that prevents two threads
165 * from trying to isolate overlapping ranges. If this happens, one thread
166 * will notice pageblocks in the overlapping range already set to isolate.
167 * This happens in set_migratetype_isolate, and set_migratetype_isolate
168 * returns an error. We then clean up by restoring the migration type on
169 * pageblocks we may have modified and return -EBUSY to caller. This
170 * prevents two threads from simultaneously working on overlapping ranges.
171 *
172 * Please note that there is no strong synchronization with the page allocator
173 * either. Pages might be freed while their page blocks are marked ISOLATED.
174 * A call to drain_all_pages() after isolation can flush most of them. However
175 * in some cases pages might still end up on pcp lists and that would allow
176 * for their allocation even when they are in fact isolated already. Depending
177 * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
178 * might be used to flush and disable pcplist before isolation and enable after
179 * unisolation.
180 *
181 * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
182 */
183int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
184			     unsigned migratetype, int flags)
185{
186	unsigned long pfn;
187	unsigned long undo_pfn;
188	struct page *page;
189
190	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
191	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
192
193	for (pfn = start_pfn;
194	     pfn < end_pfn;
195	     pfn += pageblock_nr_pages) {
196		page = __first_valid_page(pfn, pageblock_nr_pages);
197		if (page) {
198			if (set_migratetype_isolate(page, migratetype, flags)) {
199				undo_pfn = pfn;
200				goto undo;
201			}
202		}
203	}
204	return 0;
205undo:
206	for (pfn = start_pfn;
207	     pfn < undo_pfn;
208	     pfn += pageblock_nr_pages) {
209		struct page *page = pfn_to_online_page(pfn);
210		if (!page)
211			continue;
212		unset_migratetype_isolate(page, migratetype);
213	}
214
215	return -EBUSY;
216}
217
218/*
219 * Make isolated pages available again.
220 */
221void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
222			    unsigned migratetype)
223{
224	unsigned long pfn;
225	struct page *page;
226
227	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
228	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
229
230	for (pfn = start_pfn;
231	     pfn < end_pfn;
232	     pfn += pageblock_nr_pages) {
233		page = __first_valid_page(pfn, pageblock_nr_pages);
234		if (!page || !is_migrate_isolate_page(page))
235			continue;
236		unset_migratetype_isolate(page, migratetype);
237	}
 
238}
239/*
240 * Test all pages in the range is free(means isolated) or not.
241 * all pages in [start_pfn...end_pfn) must be in the same zone.
242 * zone->lock must be held before call this.
243 *
244 * Returns the last tested pfn.
245 */
246static unsigned long
247__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
248				  int flags)
249{
250	struct page *page;
251
252	while (pfn < end_pfn) {
253		if (!pfn_valid_within(pfn)) {
254			pfn++;
255			continue;
256		}
257		page = pfn_to_page(pfn);
258		if (PageBuddy(page))
259			/*
260			 * If the page is on a free list, it has to be on
261			 * the correct MIGRATE_ISOLATE freelist. There is no
262			 * simple way to verify that as VM_BUG_ON(), though.
263			 */
264			pfn += 1 << buddy_order(page);
265		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
266			/* A HWPoisoned page cannot be also PageBuddy */
267			pfn++;
268		else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
269			 !page_count(page))
270			/*
271			 * The responsible driver agreed to skip PageOffline()
272			 * pages when offlining memory by dropping its
273			 * reference in MEM_GOING_OFFLINE.
274			 */
275			pfn++;
276		else
277			break;
278	}
279
280	return pfn;
281}
282
283/* Caller should ensure that requested range is in a single zone */
284int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
285			int isol_flags)
286{
287	unsigned long pfn, flags;
288	struct page *page;
289	struct zone *zone;
290
291	/*
292	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
293	 * are not aligned to pageblock_nr_pages.
294	 * Then we just check migratetype first.
295	 */
296	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
297		page = __first_valid_page(pfn, pageblock_nr_pages);
298		if (page && !is_migrate_isolate_page(page))
299			break;
300	}
301	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
302	if ((pfn < end_pfn) || !page)
303		return -EBUSY;
304	/* Check all pages are free or marked as ISOLATED */
305	zone = page_zone(page);
306	spin_lock_irqsave(&zone->lock, flags);
307	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
 
308	spin_unlock_irqrestore(&zone->lock, flags);
309
310	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
311
312	return pfn < end_pfn ? -EBUSY : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313}
v4.6
 
  1/*
  2 * linux/mm/page_isolation.c
  3 */
  4
  5#include <linux/mm.h>
  6#include <linux/page-isolation.h>
  7#include <linux/pageblock-flags.h>
  8#include <linux/memory.h>
  9#include <linux/hugetlb.h>
 
 
 10#include "internal.h"
 11
 12#define CREATE_TRACE_POINTS
 13#include <trace/events/page_isolation.h>
 14
 15static int set_migratetype_isolate(struct page *page,
 16				bool skip_hwpoisoned_pages)
 17{
 18	struct zone *zone;
 19	unsigned long flags, pfn;
 20	struct memory_isolate_notify arg;
 21	int notifier_ret;
 22	int ret = -EBUSY;
 23
 24	zone = page_zone(page);
 25
 26	spin_lock_irqsave(&zone->lock, flags);
 27
 28	pfn = page_to_pfn(page);
 29	arg.start_pfn = pfn;
 30	arg.nr_pages = pageblock_nr_pages;
 31	arg.pages_found = 0;
 32
 33	/*
 34	 * It may be possible to isolate a pageblock even if the
 35	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
 36	 * notifier chain is used by balloon drivers to return the
 37	 * number of pages in a range that are held by the balloon
 38	 * driver to shrink memory. If all the pages are accounted for
 39	 * by balloons, are free, or on the LRU, isolation can continue.
 40	 * Later, for example, when memory hotplug notifier runs, these
 41	 * pages reported as "can be isolated" should be isolated(freed)
 42	 * by the balloon driver through the memory notifier chain.
 43	 */
 44	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
 45	notifier_ret = notifier_to_errno(notifier_ret);
 46	if (notifier_ret)
 47		goto out;
 
 48	/*
 49	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 50	 * We just check MOVABLE pages.
 51	 */
 52	if (!has_unmovable_pages(zone, page, arg.pages_found,
 53				 skip_hwpoisoned_pages))
 54		ret = 0;
 55
 56	/*
 57	 * immobile means "not-on-lru" paes. If immobile is larger than
 58	 * removable-by-driver pages reported by notifier, we'll fail.
 59	 */
 60
 61out:
 62	if (!ret) {
 63		unsigned long nr_pages;
 64		int migratetype = get_pageblock_migratetype(page);
 65
 66		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
 67		zone->nr_isolate_pageblock++;
 68		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
 
 69
 70		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
 
 
 71	}
 72
 73	spin_unlock_irqrestore(&zone->lock, flags);
 74	if (!ret)
 75		drain_all_pages(zone);
 76	return ret;
 
 
 
 
 
 
 77}
 78
 79static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 80{
 81	struct zone *zone;
 82	unsigned long flags, nr_pages;
 83	struct page *isolated_page = NULL;
 84	unsigned int order;
 85	unsigned long page_idx, buddy_idx;
 86	struct page *buddy;
 87
 88	zone = page_zone(page);
 89	spin_lock_irqsave(&zone->lock, flags);
 90	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 91		goto out;
 92
 93	/*
 94	 * Because freepage with more than pageblock_order on isolated
 95	 * pageblock is restricted to merge due to freepage counting problem,
 96	 * it is possible that there is free buddy page.
 97	 * move_freepages_block() doesn't care of merge so we need other
 98	 * approach in order to merge them. Isolation and free will make
 99	 * these pages to be merged.
100	 */
101	if (PageBuddy(page)) {
102		order = page_order(page);
103		if (order >= pageblock_order) {
104			page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
105			buddy_idx = __find_buddy_index(page_idx, order);
106			buddy = page + (buddy_idx - page_idx);
107
108			if (pfn_valid_within(page_to_pfn(buddy)) &&
109			    !is_migrate_isolate_page(buddy)) {
110				__isolate_free_page(page, order);
111				kernel_map_pages(page, (1 << order), 1);
112				set_page_refcounted(page);
113				isolated_page = page;
114			}
115		}
116	}
117
118	/*
119	 * If we isolate freepage with more than pageblock_order, there
120	 * should be no freepage in the range, so we could avoid costly
121	 * pageblock scanning for freepage moving.
 
 
 
 
 
122	 */
123	if (!isolated_page) {
124		nr_pages = move_freepages_block(zone, page, migratetype);
125		__mod_zone_freepage_state(zone, nr_pages, migratetype);
126	}
127	set_pageblock_migratetype(page, migratetype);
 
 
128	zone->nr_isolate_pageblock--;
129out:
130	spin_unlock_irqrestore(&zone->lock, flags);
131	if (isolated_page)
132		__free_pages(isolated_page, order);
133}
134
135static inline struct page *
136__first_valid_page(unsigned long pfn, unsigned long nr_pages)
137{
138	int i;
139	for (i = 0; i < nr_pages; i++)
140		if (pfn_valid_within(pfn + i))
141			break;
142	if (unlikely(i == nr_pages))
143		return NULL;
144	return pfn_to_page(pfn + i);
 
 
 
 
145}
146
147/*
148 * start_isolate_page_range() -- make page-allocation-type of range of pages
149 * to be MIGRATE_ISOLATE.
150 * @start_pfn: The lower PFN of the range to be isolated.
151 * @end_pfn: The upper PFN of the range to be isolated.
152 * @migratetype: migrate type to set in error recovery.
 
 
 
 
 
 
 
 
153 *
154 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
155 * the range will never be allocated. Any free pages and pages freed in the
156 * future will not be allocated again.
 
 
 
 
 
 
 
 
 
 
 
157 *
158 * start_pfn/end_pfn must be aligned to pageblock_order.
159 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 
 
 
 
 
 
 
 
160 */
161int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
162			     unsigned migratetype, bool skip_hwpoisoned_pages)
163{
164	unsigned long pfn;
165	unsigned long undo_pfn;
166	struct page *page;
167
168	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
169	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
170
171	for (pfn = start_pfn;
172	     pfn < end_pfn;
173	     pfn += pageblock_nr_pages) {
174		page = __first_valid_page(pfn, pageblock_nr_pages);
175		if (page &&
176		    set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
177			undo_pfn = pfn;
178			goto undo;
 
179		}
180	}
181	return 0;
182undo:
183	for (pfn = start_pfn;
184	     pfn < undo_pfn;
185	     pfn += pageblock_nr_pages)
186		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 
 
 
 
187
188	return -EBUSY;
189}
190
191/*
192 * Make isolated pages available again.
193 */
194int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
195			    unsigned migratetype)
196{
197	unsigned long pfn;
198	struct page *page;
199
200	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
201	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
202
203	for (pfn = start_pfn;
204	     pfn < end_pfn;
205	     pfn += pageblock_nr_pages) {
206		page = __first_valid_page(pfn, pageblock_nr_pages);
207		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
208			continue;
209		unset_migratetype_isolate(page, migratetype);
210	}
211	return 0;
212}
213/*
214 * Test all pages in the range is free(means isolated) or not.
215 * all pages in [start_pfn...end_pfn) must be in the same zone.
216 * zone->lock must be held before call this.
217 *
218 * Returns the last tested pfn.
219 */
220static unsigned long
221__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
222				  bool skip_hwpoisoned_pages)
223{
224	struct page *page;
225
226	while (pfn < end_pfn) {
227		if (!pfn_valid_within(pfn)) {
228			pfn++;
229			continue;
230		}
231		page = pfn_to_page(pfn);
232		if (PageBuddy(page))
233			/*
234			 * If the page is on a free list, it has to be on
235			 * the correct MIGRATE_ISOLATE freelist. There is no
236			 * simple way to verify that as VM_BUG_ON(), though.
237			 */
238			pfn += 1 << page_order(page);
239		else if (skip_hwpoisoned_pages && PageHWPoison(page))
240			/* A HWPoisoned page cannot be also PageBuddy */
241			pfn++;
 
 
 
 
 
 
 
 
242		else
243			break;
244	}
245
246	return pfn;
247}
248
 
249int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
250			bool skip_hwpoisoned_pages)
251{
252	unsigned long pfn, flags;
253	struct page *page;
254	struct zone *zone;
255
256	/*
257	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
258	 * are not aligned to pageblock_nr_pages.
259	 * Then we just check migratetype first.
260	 */
261	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
262		page = __first_valid_page(pfn, pageblock_nr_pages);
263		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
264			break;
265	}
266	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
267	if ((pfn < end_pfn) || !page)
268		return -EBUSY;
269	/* Check all pages are free or marked as ISOLATED */
270	zone = page_zone(page);
271	spin_lock_irqsave(&zone->lock, flags);
272	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
273						skip_hwpoisoned_pages);
274	spin_unlock_irqrestore(&zone->lock, flags);
275
276	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
277
278	return pfn < end_pfn ? -EBUSY : 0;
279}
280
281struct page *alloc_migrate_target(struct page *page, unsigned long private,
282				  int **resultp)
283{
284	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
285
286	/*
287	 * TODO: allocate a destination hugepage from a nearest neighbor node,
288	 * accordance with memory policy of the user process if possible. For
289	 * now as a simple work-around, we use the next node for destination.
290	 */
291	if (PageHuge(page)) {
292		int node = next_online_node(page_to_nid(page));
293		if (node == MAX_NUMNODES)
294			node = first_online_node;
295		return alloc_huge_page_node(page_hstate(compound_head(page)),
296					    node);
297	}
298
299	if (PageHighMem(page))
300		gfp_mask |= __GFP_HIGHMEM;
301
302	return alloc_page(gfp_mask);
303}