Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/mmzone.h>
  4#include <linux/bootmem.h>
  5#include <linux/page_ext.h>
  6#include <linux/memory.h>
  7#include <linux/vmalloc.h>
  8#include <linux/kmemleak.h>
  9#include <linux/page_owner.h>
 10#include <linux/page_idle.h>
 11
 12/*
 13 * struct page extension
 14 *
 15 * This is the feature to manage memory for extended data per page.
 16 *
 17 * Until now, we must modify struct page itself to store extra data per page.
 18 * This requires rebuilding the kernel and it is really time consuming process.
 19 * And, sometimes, rebuild is impossible due to third party module dependency.
 20 * At last, enlarging struct page could cause un-wanted system behaviour change.
 21 *
 22 * This feature is intended to overcome above mentioned problems. This feature
 23 * allocates memory for extended data per page in certain place rather than
 24 * the struct page itself. This memory can be accessed by the accessor
 25 * functions provided by this code. During the boot process, it checks whether
 26 * allocation of huge chunk of memory is needed or not. If not, it avoids
 27 * allocating memory at all. With this advantage, we can include this feature
 28 * into the kernel in default and can avoid rebuild and solve related problems.
 29 *
 30 * To help these things to work well, there are two callbacks for clients. One
 31 * is the need callback which is mandatory if user wants to avoid useless
 32 * memory allocation at boot-time. The other is optional, init callback, which
 33 * is used to do proper initialization after memory is allocated.
 34 *
 35 * The need callback is used to decide whether extended memory allocation is
 36 * needed or not. Sometimes users want to deactivate some features in this
 37 * boot and extra memory would be unneccessary. In this case, to avoid
 38 * allocating huge chunk of memory, each clients represent their need of
 39 * extra memory through the need callback. If one of the need callbacks
 40 * returns true, it means that someone needs extra memory so that
 41 * page extension core should allocates memory for page extension. If
 42 * none of need callbacks return true, memory isn't needed at all in this boot
 43 * and page extension core can skip to allocate memory. As result,
 44 * none of memory is wasted.
 45 *
 46 * When need callback returns true, page_ext checks if there is a request for
 47 * extra memory through size in struct page_ext_operations. If it is non-zero,
 48 * extra space is allocated for each page_ext entry and offset is returned to
 49 * user through offset in struct page_ext_operations.
 50 *
 51 * The init callback is used to do proper initialization after page extension
 52 * is completely initialized. In sparse memory system, extra memory is
 53 * allocated some time later than memmap is allocated. In other words, lifetime
 54 * of memory for page extension isn't same with memmap for struct page.
 55 * Therefore, clients can't store extra data until page extension is
 56 * initialized, even if pages are allocated and used freely. This could
 57 * cause inadequate state of extra data per page, so, to prevent it, client
 58 * can utilize this callback to initialize the state of it correctly.
 59 */
 60
 61static struct page_ext_operations *page_ext_ops[] = {
 62#ifdef CONFIG_DEBUG_PAGEALLOC
 63	&debug_guardpage_ops,
 64#endif
 65#ifdef CONFIG_PAGE_OWNER
 66	&page_owner_ops,
 67#endif
 68#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 69	&page_idle_ops,
 70#endif
 71};
 72
 
 
 73static unsigned long total_usage;
 74static unsigned long extra_mem;
 75
 76static bool __init invoke_need_callbacks(void)
 77{
 78	int i;
 79	int entries = ARRAY_SIZE(page_ext_ops);
 80	bool need = false;
 81
 82	for (i = 0; i < entries; i++) {
 83		if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
 84			page_ext_ops[i]->offset = sizeof(struct page_ext) +
 85						extra_mem;
 86			extra_mem += page_ext_ops[i]->size;
 87			need = true;
 88		}
 89	}
 90
 91	return need;
 92}
 93
 94static void __init invoke_init_callbacks(void)
 95{
 96	int i;
 97	int entries = ARRAY_SIZE(page_ext_ops);
 98
 99	for (i = 0; i < entries; i++) {
100		if (page_ext_ops[i]->init)
101			page_ext_ops[i]->init();
102	}
103}
104
105static unsigned long get_entry_size(void)
 
106{
107	return sizeof(struct page_ext) + extra_mem;
108}
 
109
110static inline struct page_ext *get_entry(void *base, unsigned long index)
111{
112	return base + get_entry_size() * index;
113}
114
115#if !defined(CONFIG_SPARSEMEM)
116
117
118void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
119{
120	pgdat->node_page_ext = NULL;
121}
122
123struct page_ext *lookup_page_ext(struct page *page)
124{
125	unsigned long pfn = page_to_pfn(page);
126	unsigned long index;
127	struct page_ext *base;
128
129	base = NODE_DATA(page_to_nid(page))->node_page_ext;
130	/*
131	 * The sanity checks the page allocator does upon freeing a
132	 * page can reach here before the page_ext arrays are
133	 * allocated when feeding a range of pages to the allocator
134	 * for the first time during bootup or memory hotplug.
135	 */
136	if (unlikely(!base))
137		return NULL;
138	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
139					MAX_ORDER_NR_PAGES);
140	return get_entry(base, index);
141}
142
143static int __init alloc_node_page_ext(int nid)
144{
145	struct page_ext *base;
146	unsigned long table_size;
147	unsigned long nr_pages;
148
149	nr_pages = NODE_DATA(nid)->node_spanned_pages;
150	if (!nr_pages)
151		return 0;
152
153	/*
154	 * Need extra space if node range is not aligned with
155	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
156	 * checks buddy's status, range could be out of exact node range.
157	 */
158	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
159		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
160		nr_pages += MAX_ORDER_NR_PAGES;
161
162	table_size = get_entry_size() * nr_pages;
163
164	base = memblock_virt_alloc_try_nid_nopanic(
165			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
166			BOOTMEM_ALLOC_ACCESSIBLE, nid);
167	if (!base)
168		return -ENOMEM;
169	NODE_DATA(nid)->node_page_ext = base;
170	total_usage += table_size;
171	return 0;
172}
173
174void __init page_ext_init_flatmem(void)
175{
176
177	int nid, fail;
178
179	if (!invoke_need_callbacks())
180		return;
181
182	for_each_online_node(nid)  {
183		fail = alloc_node_page_ext(nid);
184		if (fail)
185			goto fail;
186	}
187	pr_info("allocated %ld bytes of page_ext\n", total_usage);
188	invoke_init_callbacks();
189	return;
190
191fail:
192	pr_crit("allocation of page_ext failed.\n");
193	panic("Out of memory");
194}
195
196#else /* CONFIG_FLAT_NODE_MEM_MAP */
197
198struct page_ext *lookup_page_ext(struct page *page)
199{
200	unsigned long pfn = page_to_pfn(page);
201	struct mem_section *section = __pfn_to_section(pfn);
202	/*
203	 * The sanity checks the page allocator does upon freeing a
204	 * page can reach here before the page_ext arrays are
205	 * allocated when feeding a range of pages to the allocator
206	 * for the first time during bootup or memory hotplug.
207	 */
208	if (!section->page_ext)
209		return NULL;
210	return get_entry(section->page_ext, pfn);
211}
212
213static void *__meminit alloc_page_ext(size_t size, int nid)
214{
215	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
216	void *addr = NULL;
217
218	addr = alloc_pages_exact_nid(nid, size, flags);
219	if (addr) {
220		kmemleak_alloc(addr, size, 1, flags);
221		return addr;
222	}
223
224	addr = vzalloc_node(size, nid);
225
226	return addr;
227}
228
229static int __meminit init_section_page_ext(unsigned long pfn, int nid)
230{
231	struct mem_section *section;
232	struct page_ext *base;
233	unsigned long table_size;
234
235	section = __pfn_to_section(pfn);
236
237	if (section->page_ext)
238		return 0;
239
240	table_size = get_entry_size() * PAGES_PER_SECTION;
241	base = alloc_page_ext(table_size, nid);
242
243	/*
244	 * The value stored in section->page_ext is (base - pfn)
245	 * and it does not point to the memory block allocated above,
246	 * causing kmemleak false positives.
247	 */
248	kmemleak_not_leak(base);
249
250	if (!base) {
251		pr_err("page ext allocation failure\n");
252		return -ENOMEM;
253	}
254
255	/*
256	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
257	 * we need to apply a mask.
258	 */
259	pfn &= PAGE_SECTION_MASK;
260	section->page_ext = (void *)base - get_entry_size() * pfn;
261	total_usage += table_size;
262	return 0;
263}
264#ifdef CONFIG_MEMORY_HOTPLUG
265static void free_page_ext(void *addr)
266{
267	if (is_vmalloc_addr(addr)) {
268		vfree(addr);
269	} else {
270		struct page *page = virt_to_page(addr);
271		size_t table_size;
272
273		table_size = get_entry_size() * PAGES_PER_SECTION;
274
275		BUG_ON(PageReserved(page));
 
276		free_pages_exact(addr, table_size);
277	}
278}
279
280static void __free_page_ext(unsigned long pfn)
281{
282	struct mem_section *ms;
283	struct page_ext *base;
284
285	ms = __pfn_to_section(pfn);
286	if (!ms || !ms->page_ext)
287		return;
288	base = get_entry(ms->page_ext, pfn);
289	free_page_ext(base);
290	ms->page_ext = NULL;
291}
292
293static int __meminit online_page_ext(unsigned long start_pfn,
294				unsigned long nr_pages,
295				int nid)
296{
297	unsigned long start, end, pfn;
298	int fail = 0;
299
300	start = SECTION_ALIGN_DOWN(start_pfn);
301	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
302
303	if (nid == -1) {
304		/*
305		 * In this case, "nid" already exists and contains valid memory.
306		 * "start_pfn" passed to us is a pfn which is an arg for
307		 * online__pages(), and start_pfn should exist.
308		 */
309		nid = pfn_to_nid(start_pfn);
310		VM_BUG_ON(!node_state(nid, N_ONLINE));
311	}
312
313	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
314		if (!pfn_present(pfn))
315			continue;
316		fail = init_section_page_ext(pfn, nid);
317	}
318	if (!fail)
319		return 0;
320
321	/* rollback */
322	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
323		__free_page_ext(pfn);
324
325	return -ENOMEM;
326}
327
328static int __meminit offline_page_ext(unsigned long start_pfn,
329				unsigned long nr_pages, int nid)
330{
331	unsigned long start, end, pfn;
332
333	start = SECTION_ALIGN_DOWN(start_pfn);
334	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
335
336	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
337		__free_page_ext(pfn);
338	return 0;
339
340}
341
342static int __meminit page_ext_callback(struct notifier_block *self,
343			       unsigned long action, void *arg)
344{
345	struct memory_notify *mn = arg;
346	int ret = 0;
347
348	switch (action) {
349	case MEM_GOING_ONLINE:
350		ret = online_page_ext(mn->start_pfn,
351				   mn->nr_pages, mn->status_change_nid);
352		break;
353	case MEM_OFFLINE:
354		offline_page_ext(mn->start_pfn,
355				mn->nr_pages, mn->status_change_nid);
356		break;
357	case MEM_CANCEL_ONLINE:
358		offline_page_ext(mn->start_pfn,
359				mn->nr_pages, mn->status_change_nid);
360		break;
361	case MEM_GOING_OFFLINE:
362		break;
363	case MEM_ONLINE:
364	case MEM_CANCEL_OFFLINE:
365		break;
366	}
367
368	return notifier_from_errno(ret);
369}
370
371#endif
372
373void __init page_ext_init(void)
374{
375	unsigned long pfn;
376	int nid;
377
378	if (!invoke_need_callbacks())
379		return;
380
381	for_each_node_state(nid, N_MEMORY) {
382		unsigned long start_pfn, end_pfn;
383
384		start_pfn = node_start_pfn(nid);
385		end_pfn = node_end_pfn(nid);
386		/*
387		 * start_pfn and end_pfn may not be aligned to SECTION and the
388		 * page->flags of out of node pages are not initialized.  So we
389		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
390		 */
391		for (pfn = start_pfn; pfn < end_pfn;
392			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
393
394			if (!pfn_valid(pfn))
395				continue;
396			/*
397			 * Nodes's pfns can be overlapping.
398			 * We know some arch can have a nodes layout such as
399			 * -------------pfn-------------->
400			 * N0 | N1 | N2 | N0 | N1 | N2|....
401			 *
402			 * Take into account DEFERRED_STRUCT_PAGE_INIT.
403			 */
404			if (early_pfn_to_nid(pfn) != nid)
405				continue;
406			if (init_section_page_ext(pfn, nid))
407				goto oom;
408			cond_resched();
409		}
410	}
411	hotplug_memory_notifier(page_ext_callback, 0);
412	pr_info("allocated %ld bytes of page_ext\n", total_usage);
413	invoke_init_callbacks();
414	return;
415
416oom:
417	panic("Out of memory");
418}
419
420void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
421{
422}
423
424#endif
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/mmzone.h>
  4#include <linux/memblock.h>
  5#include <linux/page_ext.h>
  6#include <linux/memory.h>
  7#include <linux/vmalloc.h>
  8#include <linux/kmemleak.h>
  9#include <linux/page_owner.h>
 10#include <linux/page_idle.h>
 11
 12/*
 13 * struct page extension
 14 *
 15 * This is the feature to manage memory for extended data per page.
 16 *
 17 * Until now, we must modify struct page itself to store extra data per page.
 18 * This requires rebuilding the kernel and it is really time consuming process.
 19 * And, sometimes, rebuild is impossible due to third party module dependency.
 20 * At last, enlarging struct page could cause un-wanted system behaviour change.
 21 *
 22 * This feature is intended to overcome above mentioned problems. This feature
 23 * allocates memory for extended data per page in certain place rather than
 24 * the struct page itself. This memory can be accessed by the accessor
 25 * functions provided by this code. During the boot process, it checks whether
 26 * allocation of huge chunk of memory is needed or not. If not, it avoids
 27 * allocating memory at all. With this advantage, we can include this feature
 28 * into the kernel in default and can avoid rebuild and solve related problems.
 29 *
 30 * To help these things to work well, there are two callbacks for clients. One
 31 * is the need callback which is mandatory if user wants to avoid useless
 32 * memory allocation at boot-time. The other is optional, init callback, which
 33 * is used to do proper initialization after memory is allocated.
 34 *
 35 * The need callback is used to decide whether extended memory allocation is
 36 * needed or not. Sometimes users want to deactivate some features in this
 37 * boot and extra memory would be unnecessary. In this case, to avoid
 38 * allocating huge chunk of memory, each clients represent their need of
 39 * extra memory through the need callback. If one of the need callbacks
 40 * returns true, it means that someone needs extra memory so that
 41 * page extension core should allocates memory for page extension. If
 42 * none of need callbacks return true, memory isn't needed at all in this boot
 43 * and page extension core can skip to allocate memory. As result,
 44 * none of memory is wasted.
 45 *
 46 * When need callback returns true, page_ext checks if there is a request for
 47 * extra memory through size in struct page_ext_operations. If it is non-zero,
 48 * extra space is allocated for each page_ext entry and offset is returned to
 49 * user through offset in struct page_ext_operations.
 50 *
 51 * The init callback is used to do proper initialization after page extension
 52 * is completely initialized. In sparse memory system, extra memory is
 53 * allocated some time later than memmap is allocated. In other words, lifetime
 54 * of memory for page extension isn't same with memmap for struct page.
 55 * Therefore, clients can't store extra data until page extension is
 56 * initialized, even if pages are allocated and used freely. This could
 57 * cause inadequate state of extra data per page, so, to prevent it, client
 58 * can utilize this callback to initialize the state of it correctly.
 59 */
 60
 61static struct page_ext_operations *page_ext_ops[] = {
 
 
 
 62#ifdef CONFIG_PAGE_OWNER
 63	&page_owner_ops,
 64#endif
 65#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 66	&page_idle_ops,
 67#endif
 68};
 69
 70unsigned long page_ext_size = sizeof(struct page_ext);
 71
 72static unsigned long total_usage;
 
 73
 74static bool __init invoke_need_callbacks(void)
 75{
 76	int i;
 77	int entries = ARRAY_SIZE(page_ext_ops);
 78	bool need = false;
 79
 80	for (i = 0; i < entries; i++) {
 81		if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
 82			page_ext_ops[i]->offset = page_ext_size;
 83			page_ext_size += page_ext_ops[i]->size;
 
 84			need = true;
 85		}
 86	}
 87
 88	return need;
 89}
 90
 91static void __init invoke_init_callbacks(void)
 92{
 93	int i;
 94	int entries = ARRAY_SIZE(page_ext_ops);
 95
 96	for (i = 0; i < entries; i++) {
 97		if (page_ext_ops[i]->init)
 98			page_ext_ops[i]->init();
 99	}
100}
101
102#ifndef CONFIG_SPARSEMEM
103void __init page_ext_init_flatmem_late(void)
104{
105	invoke_init_callbacks();
106}
107#endif
108
109static inline struct page_ext *get_entry(void *base, unsigned long index)
110{
111	return base + page_ext_size * index;
112}
113
114#ifndef CONFIG_SPARSEMEM
115
116
117void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
118{
119	pgdat->node_page_ext = NULL;
120}
121
122struct page_ext *lookup_page_ext(const struct page *page)
123{
124	unsigned long pfn = page_to_pfn(page);
125	unsigned long index;
126	struct page_ext *base;
127
128	base = NODE_DATA(page_to_nid(page))->node_page_ext;
129	/*
130	 * The sanity checks the page allocator does upon freeing a
131	 * page can reach here before the page_ext arrays are
132	 * allocated when feeding a range of pages to the allocator
133	 * for the first time during bootup or memory hotplug.
134	 */
135	if (unlikely(!base))
136		return NULL;
137	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
138					MAX_ORDER_NR_PAGES);
139	return get_entry(base, index);
140}
141
142static int __init alloc_node_page_ext(int nid)
143{
144	struct page_ext *base;
145	unsigned long table_size;
146	unsigned long nr_pages;
147
148	nr_pages = NODE_DATA(nid)->node_spanned_pages;
149	if (!nr_pages)
150		return 0;
151
152	/*
153	 * Need extra space if node range is not aligned with
154	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
155	 * checks buddy's status, range could be out of exact node range.
156	 */
157	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
158		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
159		nr_pages += MAX_ORDER_NR_PAGES;
160
161	table_size = page_ext_size * nr_pages;
162
163	base = memblock_alloc_try_nid(
164			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
165			MEMBLOCK_ALLOC_ACCESSIBLE, nid);
166	if (!base)
167		return -ENOMEM;
168	NODE_DATA(nid)->node_page_ext = base;
169	total_usage += table_size;
170	return 0;
171}
172
173void __init page_ext_init_flatmem(void)
174{
175
176	int nid, fail;
177
178	if (!invoke_need_callbacks())
179		return;
180
181	for_each_online_node(nid)  {
182		fail = alloc_node_page_ext(nid);
183		if (fail)
184			goto fail;
185	}
186	pr_info("allocated %ld bytes of page_ext\n", total_usage);
 
187	return;
188
189fail:
190	pr_crit("allocation of page_ext failed.\n");
191	panic("Out of memory");
192}
193
194#else /* CONFIG_FLATMEM */
195
196struct page_ext *lookup_page_ext(const struct page *page)
197{
198	unsigned long pfn = page_to_pfn(page);
199	struct mem_section *section = __pfn_to_section(pfn);
200	/*
201	 * The sanity checks the page allocator does upon freeing a
202	 * page can reach here before the page_ext arrays are
203	 * allocated when feeding a range of pages to the allocator
204	 * for the first time during bootup or memory hotplug.
205	 */
206	if (!section->page_ext)
207		return NULL;
208	return get_entry(section->page_ext, pfn);
209}
210
211static void *__meminit alloc_page_ext(size_t size, int nid)
212{
213	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
214	void *addr = NULL;
215
216	addr = alloc_pages_exact_nid(nid, size, flags);
217	if (addr) {
218		kmemleak_alloc(addr, size, 1, flags);
219		return addr;
220	}
221
222	addr = vzalloc_node(size, nid);
223
224	return addr;
225}
226
227static int __meminit init_section_page_ext(unsigned long pfn, int nid)
228{
229	struct mem_section *section;
230	struct page_ext *base;
231	unsigned long table_size;
232
233	section = __pfn_to_section(pfn);
234
235	if (section->page_ext)
236		return 0;
237
238	table_size = page_ext_size * PAGES_PER_SECTION;
239	base = alloc_page_ext(table_size, nid);
240
241	/*
242	 * The value stored in section->page_ext is (base - pfn)
243	 * and it does not point to the memory block allocated above,
244	 * causing kmemleak false positives.
245	 */
246	kmemleak_not_leak(base);
247
248	if (!base) {
249		pr_err("page ext allocation failure\n");
250		return -ENOMEM;
251	}
252
253	/*
254	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
255	 * we need to apply a mask.
256	 */
257	pfn &= PAGE_SECTION_MASK;
258	section->page_ext = (void *)base - page_ext_size * pfn;
259	total_usage += table_size;
260	return 0;
261}
262#ifdef CONFIG_MEMORY_HOTPLUG
263static void free_page_ext(void *addr)
264{
265	if (is_vmalloc_addr(addr)) {
266		vfree(addr);
267	} else {
268		struct page *page = virt_to_page(addr);
269		size_t table_size;
270
271		table_size = page_ext_size * PAGES_PER_SECTION;
272
273		BUG_ON(PageReserved(page));
274		kmemleak_free(addr);
275		free_pages_exact(addr, table_size);
276	}
277}
278
279static void __free_page_ext(unsigned long pfn)
280{
281	struct mem_section *ms;
282	struct page_ext *base;
283
284	ms = __pfn_to_section(pfn);
285	if (!ms || !ms->page_ext)
286		return;
287	base = get_entry(ms->page_ext, pfn);
288	free_page_ext(base);
289	ms->page_ext = NULL;
290}
291
292static int __meminit online_page_ext(unsigned long start_pfn,
293				unsigned long nr_pages,
294				int nid)
295{
296	unsigned long start, end, pfn;
297	int fail = 0;
298
299	start = SECTION_ALIGN_DOWN(start_pfn);
300	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
301
302	if (nid == NUMA_NO_NODE) {
303		/*
304		 * In this case, "nid" already exists and contains valid memory.
305		 * "start_pfn" passed to us is a pfn which is an arg for
306		 * online__pages(), and start_pfn should exist.
307		 */
308		nid = pfn_to_nid(start_pfn);
309		VM_BUG_ON(!node_state(nid, N_ONLINE));
310	}
311
312	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
 
 
313		fail = init_section_page_ext(pfn, nid);
 
314	if (!fail)
315		return 0;
316
317	/* rollback */
318	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
319		__free_page_ext(pfn);
320
321	return -ENOMEM;
322}
323
324static int __meminit offline_page_ext(unsigned long start_pfn,
325				unsigned long nr_pages, int nid)
326{
327	unsigned long start, end, pfn;
328
329	start = SECTION_ALIGN_DOWN(start_pfn);
330	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
331
332	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
333		__free_page_ext(pfn);
334	return 0;
335
336}
337
338static int __meminit page_ext_callback(struct notifier_block *self,
339			       unsigned long action, void *arg)
340{
341	struct memory_notify *mn = arg;
342	int ret = 0;
343
344	switch (action) {
345	case MEM_GOING_ONLINE:
346		ret = online_page_ext(mn->start_pfn,
347				   mn->nr_pages, mn->status_change_nid);
348		break;
349	case MEM_OFFLINE:
350		offline_page_ext(mn->start_pfn,
351				mn->nr_pages, mn->status_change_nid);
352		break;
353	case MEM_CANCEL_ONLINE:
354		offline_page_ext(mn->start_pfn,
355				mn->nr_pages, mn->status_change_nid);
356		break;
357	case MEM_GOING_OFFLINE:
358		break;
359	case MEM_ONLINE:
360	case MEM_CANCEL_OFFLINE:
361		break;
362	}
363
364	return notifier_from_errno(ret);
365}
366
367#endif
368
369void __init page_ext_init(void)
370{
371	unsigned long pfn;
372	int nid;
373
374	if (!invoke_need_callbacks())
375		return;
376
377	for_each_node_state(nid, N_MEMORY) {
378		unsigned long start_pfn, end_pfn;
379
380		start_pfn = node_start_pfn(nid);
381		end_pfn = node_end_pfn(nid);
382		/*
383		 * start_pfn and end_pfn may not be aligned to SECTION and the
384		 * page->flags of out of node pages are not initialized.  So we
385		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
386		 */
387		for (pfn = start_pfn; pfn < end_pfn;
388			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
389
390			if (!pfn_valid(pfn))
391				continue;
392			/*
393			 * Nodes's pfns can be overlapping.
394			 * We know some arch can have a nodes layout such as
395			 * -------------pfn-------------->
396			 * N0 | N1 | N2 | N0 | N1 | N2|....
 
 
397			 */
398			if (pfn_to_nid(pfn) != nid)
399				continue;
400			if (init_section_page_ext(pfn, nid))
401				goto oom;
402			cond_resched();
403		}
404	}
405	hotplug_memory_notifier(page_ext_callback, 0);
406	pr_info("allocated %ld bytes of page_ext\n", total_usage);
407	invoke_init_callbacks();
408	return;
409
410oom:
411	panic("Out of memory");
412}
413
414void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
415{
416}
417
418#endif