Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/mm/page_isolation.c
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/page-isolation.h>
  8#include <linux/pageblock-flags.h>
  9#include <linux/memory.h>
 10#include <linux/hugetlb.h>
 11#include <linux/page_owner.h>
 12#include <linux/migrate.h>
 13#include "internal.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/page_isolation.h>
 17
 18static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
 19{
 20	struct zone *zone;
 21	unsigned long flags, pfn;
 22	struct memory_isolate_notify arg;
 23	int notifier_ret;
 24	int ret = -EBUSY;
 25
 26	zone = page_zone(page);
 27
 28	spin_lock_irqsave(&zone->lock, flags);
 29
 30	/*
 31	 * We assume the caller intended to SET migrate type to isolate.
 32	 * If it is already set, then someone else must have raced and
 33	 * set it before us.  Return -EBUSY
 34	 */
 35	if (is_migrate_isolate_page(page))
 36		goto out;
 37
 38	pfn = page_to_pfn(page);
 39	arg.start_pfn = pfn;
 40	arg.nr_pages = pageblock_nr_pages;
 41	arg.pages_found = 0;
 42
 43	/*
 44	 * It may be possible to isolate a pageblock even if the
 45	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
 46	 * notifier chain is used by balloon drivers to return the
 47	 * number of pages in a range that are held by the balloon
 48	 * driver to shrink memory. If all the pages are accounted for
 49	 * by balloons, are free, or on the LRU, isolation can continue.
 50	 * Later, for example, when memory hotplug notifier runs, these
 51	 * pages reported as "can be isolated" should be isolated(freed)
 52	 * by the balloon driver through the memory notifier chain.
 53	 */
 54	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
 55	notifier_ret = notifier_to_errno(notifier_ret);
 56	if (notifier_ret)
 57		goto out;
 58	/*
 59	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 60	 * We just check MOVABLE pages.
 61	 */
 62	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
 63				 isol_flags))
 64		ret = 0;
 65
 66	/*
 67	 * immobile means "not-on-lru" pages. If immobile is larger than
 68	 * removable-by-driver pages reported by notifier, we'll fail.
 69	 */
 70
 71out:
 72	if (!ret) {
 73		unsigned long nr_pages;
 74		int mt = get_pageblock_migratetype(page);
 75
 76		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
 77		zone->nr_isolate_pageblock++;
 78		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
 79									NULL);
 80
 81		__mod_zone_freepage_state(zone, -nr_pages, mt);
 82	}
 83
 84	spin_unlock_irqrestore(&zone->lock, flags);
 85	if (!ret)
 86		drain_all_pages(zone);
 87	return ret;
 88}
 89
 90static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 91{
 92	struct zone *zone;
 93	unsigned long flags, nr_pages;
 94	bool isolated_page = false;
 95	unsigned int order;
 96	unsigned long pfn, buddy_pfn;
 97	struct page *buddy;
 98
 99	zone = page_zone(page);
100	spin_lock_irqsave(&zone->lock, flags);
101	if (!is_migrate_isolate_page(page))
102		goto out;
103
104	/*
105	 * Because freepage with more than pageblock_order on isolated
106	 * pageblock is restricted to merge due to freepage counting problem,
107	 * it is possible that there is free buddy page.
108	 * move_freepages_block() doesn't care of merge so we need other
109	 * approach in order to merge them. Isolation and free will make
110	 * these pages to be merged.
111	 */
112	if (PageBuddy(page)) {
113		order = page_order(page);
114		if (order >= pageblock_order) {
115			pfn = page_to_pfn(page);
116			buddy_pfn = __find_buddy_pfn(pfn, order);
117			buddy = page + (buddy_pfn - pfn);
118
119			if (pfn_valid_within(buddy_pfn) &&
120			    !is_migrate_isolate_page(buddy)) {
121				__isolate_free_page(page, order);
122				isolated_page = true;
123			}
124		}
125	}
126
127	/*
128	 * If we isolate freepage with more than pageblock_order, there
129	 * should be no freepage in the range, so we could avoid costly
130	 * pageblock scanning for freepage moving.
131	 */
132	if (!isolated_page) {
133		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
134		__mod_zone_freepage_state(zone, nr_pages, migratetype);
135	}
136	set_pageblock_migratetype(page, migratetype);
137	zone->nr_isolate_pageblock--;
138out:
139	spin_unlock_irqrestore(&zone->lock, flags);
140	if (isolated_page) {
141		post_alloc_hook(page, order, __GFP_MOVABLE);
142		__free_pages(page, order);
143	}
144}
145
146static inline struct page *
147__first_valid_page(unsigned long pfn, unsigned long nr_pages)
148{
149	int i;
150
151	for (i = 0; i < nr_pages; i++) {
152		struct page *page;
153
154		page = pfn_to_online_page(pfn + i);
155		if (!page)
156			continue;
157		return page;
158	}
159	return NULL;
160}
161
162/**
163 * start_isolate_page_range() - make page-allocation-type of range of pages to
164 * be MIGRATE_ISOLATE.
165 * @start_pfn:		The lower PFN of the range to be isolated.
166 * @end_pfn:		The upper PFN of the range to be isolated.
167 *			start_pfn/end_pfn must be aligned to pageblock_order.
168 * @migratetype:	Migrate type to set in error recovery.
169 * @flags:		The following flags are allowed (they can be combined in
170 *			a bit mask)
171 *			SKIP_HWPOISON - ignore hwpoison pages
172 *			REPORT_FAILURE - report details about the failure to
173 *			isolate the range
174 *
175 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
176 * the range will never be allocated. Any free pages and pages freed in the
177 * future will not be allocated again. If specified range includes migrate types
178 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
179 * pages in the range finally, the caller have to free all pages in the range.
180 * test_page_isolated() can be used for test it.
181 *
182 * There is no high level synchronization mechanism that prevents two threads
183 * from trying to isolate overlapping ranges. If this happens, one thread
184 * will notice pageblocks in the overlapping range already set to isolate.
185 * This happens in set_migratetype_isolate, and set_migratetype_isolate
186 * returns an error. We then clean up by restoring the migration type on
187 * pageblocks we may have modified and return -EBUSY to caller. This
188 * prevents two threads from simultaneously working on overlapping ranges.
189 *
190 * Return: the number of isolated pageblocks on success and -EBUSY if any part
191 * of range cannot be isolated.
192 */
193int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
194			     unsigned migratetype, int flags)
195{
196	unsigned long pfn;
197	unsigned long undo_pfn;
198	struct page *page;
199	int nr_isolate_pageblock = 0;
200
201	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
202	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
203
204	for (pfn = start_pfn;
205	     pfn < end_pfn;
206	     pfn += pageblock_nr_pages) {
207		page = __first_valid_page(pfn, pageblock_nr_pages);
208		if (page) {
209			if (set_migratetype_isolate(page, migratetype, flags)) {
210				undo_pfn = pfn;
211				goto undo;
212			}
213			nr_isolate_pageblock++;
214		}
215	}
216	return nr_isolate_pageblock;
217undo:
218	for (pfn = start_pfn;
219	     pfn < undo_pfn;
220	     pfn += pageblock_nr_pages) {
221		struct page *page = pfn_to_online_page(pfn);
222		if (!page)
223			continue;
224		unset_migratetype_isolate(page, migratetype);
225	}
226
227	return -EBUSY;
228}
229
230/*
231 * Make isolated pages available again.
232 */
233void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
234			    unsigned migratetype)
235{
236	unsigned long pfn;
237	struct page *page;
238
239	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
240	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
241
242	for (pfn = start_pfn;
243	     pfn < end_pfn;
244	     pfn += pageblock_nr_pages) {
245		page = __first_valid_page(pfn, pageblock_nr_pages);
246		if (!page || !is_migrate_isolate_page(page))
247			continue;
248		unset_migratetype_isolate(page, migratetype);
249	}
 
250}
251/*
252 * Test all pages in the range is free(means isolated) or not.
253 * all pages in [start_pfn...end_pfn) must be in the same zone.
254 * zone->lock must be held before call this.
255 *
256 * Returns the last tested pfn.
257 */
258static unsigned long
259__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
260				  bool skip_hwpoisoned_pages)
261{
262	struct page *page;
263
264	while (pfn < end_pfn) {
265		if (!pfn_valid_within(pfn)) {
266			pfn++;
267			continue;
268		}
269		page = pfn_to_page(pfn);
270		if (PageBuddy(page))
271			/*
272			 * If the page is on a free list, it has to be on
273			 * the correct MIGRATE_ISOLATE freelist. There is no
274			 * simple way to verify that as VM_BUG_ON(), though.
 
 
275			 */
 
 
 
 
 
 
 
276			pfn += 1 << page_order(page);
277		else if (skip_hwpoisoned_pages && PageHWPoison(page))
278			/* A HWPoisoned page cannot be also PageBuddy */
 
 
 
 
 
 
 
279			pfn++;
 
 
280		else
281			break;
282	}
283
284	return pfn;
 
285}
286
287/* Caller should ensure that requested range is in a single zone */
288int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
289			bool skip_hwpoisoned_pages)
290{
291	unsigned long pfn, flags;
292	struct page *page;
293	struct zone *zone;
 
294
295	/*
296	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
297	 * are not aligned to pageblock_nr_pages.
298	 * Then we just check migratetype first.
299	 */
300	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
301		page = __first_valid_page(pfn, pageblock_nr_pages);
302		if (page && !is_migrate_isolate_page(page))
303			break;
304	}
305	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
306	if ((pfn < end_pfn) || !page)
307		return -EBUSY;
308	/* Check all pages are free or marked as ISOLATED */
309	zone = page_zone(page);
310	spin_lock_irqsave(&zone->lock, flags);
311	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
312						skip_hwpoisoned_pages);
313	spin_unlock_irqrestore(&zone->lock, flags);
314
315	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
316
317	return pfn < end_pfn ? -EBUSY : 0;
318}
319
320struct page *alloc_migrate_target(struct page *page, unsigned long private)
 
321{
322	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323}
v3.15
 
  1/*
  2 * linux/mm/page_isolation.c
  3 */
  4
  5#include <linux/mm.h>
  6#include <linux/page-isolation.h>
  7#include <linux/pageblock-flags.h>
  8#include <linux/memory.h>
  9#include <linux/hugetlb.h>
 
 
 10#include "internal.h"
 11
 12int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
 
 
 
 13{
 14	struct zone *zone;
 15	unsigned long flags, pfn;
 16	struct memory_isolate_notify arg;
 17	int notifier_ret;
 18	int ret = -EBUSY;
 19
 20	zone = page_zone(page);
 21
 22	spin_lock_irqsave(&zone->lock, flags);
 23
 
 
 
 
 
 
 
 
 24	pfn = page_to_pfn(page);
 25	arg.start_pfn = pfn;
 26	arg.nr_pages = pageblock_nr_pages;
 27	arg.pages_found = 0;
 28
 29	/*
 30	 * It may be possible to isolate a pageblock even if the
 31	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
 32	 * notifier chain is used by balloon drivers to return the
 33	 * number of pages in a range that are held by the balloon
 34	 * driver to shrink memory. If all the pages are accounted for
 35	 * by balloons, are free, or on the LRU, isolation can continue.
 36	 * Later, for example, when memory hotplug notifier runs, these
 37	 * pages reported as "can be isolated" should be isolated(freed)
 38	 * by the balloon driver through the memory notifier chain.
 39	 */
 40	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
 41	notifier_ret = notifier_to_errno(notifier_ret);
 42	if (notifier_ret)
 43		goto out;
 44	/*
 45	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 46	 * We just check MOVABLE pages.
 47	 */
 48	if (!has_unmovable_pages(zone, page, arg.pages_found,
 49				 skip_hwpoisoned_pages))
 50		ret = 0;
 51
 52	/*
 53	 * immobile means "not-on-lru" paes. If immobile is larger than
 54	 * removable-by-driver pages reported by notifier, we'll fail.
 55	 */
 56
 57out:
 58	if (!ret) {
 59		unsigned long nr_pages;
 60		int migratetype = get_pageblock_migratetype(page);
 61
 62		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
 63		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
 
 
 64
 65		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
 66	}
 67
 68	spin_unlock_irqrestore(&zone->lock, flags);
 69	if (!ret)
 70		drain_all_pages();
 71	return ret;
 72}
 73
 74void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 75{
 76	struct zone *zone;
 77	unsigned long flags, nr_pages;
 
 
 
 
 78
 79	zone = page_zone(page);
 80	spin_lock_irqsave(&zone->lock, flags);
 81	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 82		goto out;
 83	nr_pages = move_freepages_block(zone, page, migratetype);
 84	__mod_zone_freepage_state(zone, nr_pages, migratetype);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85	set_pageblock_migratetype(page, migratetype);
 
 86out:
 87	spin_unlock_irqrestore(&zone->lock, flags);
 
 
 
 
 88}
 89
 90static inline struct page *
 91__first_valid_page(unsigned long pfn, unsigned long nr_pages)
 92{
 93	int i;
 94	for (i = 0; i < nr_pages; i++)
 95		if (pfn_valid_within(pfn + i))
 96			break;
 97	if (unlikely(i == nr_pages))
 98		return NULL;
 99	return pfn_to_page(pfn + i);
 
 
 
 
100}
101
102/*
103 * start_isolate_page_range() -- make page-allocation-type of range of pages
104 * to be MIGRATE_ISOLATE.
105 * @start_pfn: The lower PFN of the range to be isolated.
106 * @end_pfn: The upper PFN of the range to be isolated.
107 * @migratetype: migrate type to set in error recovery.
 
 
 
 
 
 
108 *
109 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
110 * the range will never be allocated. Any free pages and pages freed in the
111 * future will not be allocated again.
 
 
 
 
 
 
 
 
 
 
 
112 *
113 * start_pfn/end_pfn must be aligned to pageblock_order.
114 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
115 */
116int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
117			     unsigned migratetype, bool skip_hwpoisoned_pages)
118{
119	unsigned long pfn;
120	unsigned long undo_pfn;
121	struct page *page;
 
122
123	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
124	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
125
126	for (pfn = start_pfn;
127	     pfn < end_pfn;
128	     pfn += pageblock_nr_pages) {
129		page = __first_valid_page(pfn, pageblock_nr_pages);
130		if (page &&
131		    set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
132			undo_pfn = pfn;
133			goto undo;
 
 
134		}
135	}
136	return 0;
137undo:
138	for (pfn = start_pfn;
139	     pfn < undo_pfn;
140	     pfn += pageblock_nr_pages)
141		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 
 
 
 
142
143	return -EBUSY;
144}
145
146/*
147 * Make isolated pages available again.
148 */
149int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
150			    unsigned migratetype)
151{
152	unsigned long pfn;
153	struct page *page;
154	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
155	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 
 
156	for (pfn = start_pfn;
157	     pfn < end_pfn;
158	     pfn += pageblock_nr_pages) {
159		page = __first_valid_page(pfn, pageblock_nr_pages);
160		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
161			continue;
162		unset_migratetype_isolate(page, migratetype);
163	}
164	return 0;
165}
166/*
167 * Test all pages in the range is free(means isolated) or not.
168 * all pages in [start_pfn...end_pfn) must be in the same zone.
169 * zone->lock must be held before call this.
170 *
171 * Returns 1 if all pages in the range are isolated.
172 */
173static int
174__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
175				  bool skip_hwpoisoned_pages)
176{
177	struct page *page;
178
179	while (pfn < end_pfn) {
180		if (!pfn_valid_within(pfn)) {
181			pfn++;
182			continue;
183		}
184		page = pfn_to_page(pfn);
185		if (PageBuddy(page)) {
186			/*
187			 * If race between isolatation and allocation happens,
188			 * some free pages could be in MIGRATE_MOVABLE list
189			 * although pageblock's migratation type of the page
190			 * is MIGRATE_ISOLATE. Catch it and move the page into
191			 * MIGRATE_ISOLATE list.
192			 */
193			if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
194				struct page *end_page;
195
196				end_page = page + (1 << page_order(page)) - 1;
197				move_freepages(page_zone(page), page, end_page,
198						MIGRATE_ISOLATE);
199			}
200			pfn += 1 << page_order(page);
201		}
202		else if (page_count(page) == 0 &&
203			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
204			pfn += 1;
205		else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
206			/*
207			 * The HWPoisoned page may be not in buddy
208			 * system, and page_count() is not 0.
209			 */
210			pfn++;
211			continue;
212		}
213		else
214			break;
215	}
216	if (pfn < end_pfn)
217		return 0;
218	return 1;
219}
220
 
221int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
222			bool skip_hwpoisoned_pages)
223{
224	unsigned long pfn, flags;
225	struct page *page;
226	struct zone *zone;
227	int ret;
228
229	/*
230	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
231	 * are not aligned to pageblock_nr_pages.
232	 * Then we just check migratetype first.
233	 */
234	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
235		page = __first_valid_page(pfn, pageblock_nr_pages);
236		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
237			break;
238	}
239	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
240	if ((pfn < end_pfn) || !page)
241		return -EBUSY;
242	/* Check all pages are free or marked as ISOLATED */
243	zone = page_zone(page);
244	spin_lock_irqsave(&zone->lock, flags);
245	ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
246						skip_hwpoisoned_pages);
247	spin_unlock_irqrestore(&zone->lock, flags);
248	return ret ? 0 : -EBUSY;
 
 
 
249}
250
251struct page *alloc_migrate_target(struct page *page, unsigned long private,
252				  int **resultp)
253{
254	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
255
256	/*
257	 * TODO: allocate a destination hugepage from a nearest neighbor node,
258	 * accordance with memory policy of the user process if possible. For
259	 * now as a simple work-around, we use the next node for destination.
260	 */
261	if (PageHuge(page)) {
262		nodemask_t src = nodemask_of_node(page_to_nid(page));
263		nodemask_t dst;
264		nodes_complement(dst, src);
265		return alloc_huge_page_node(page_hstate(compound_head(page)),
266					    next_node(page_to_nid(page), dst));
267	}
268
269	if (PageHighMem(page))
270		gfp_mask |= __GFP_HIGHMEM;
271
272	return alloc_page(gfp_mask);
273}