Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/mmzone.h>
  4#include <linux/bootmem.h>
  5#include <linux/page_ext.h>
  6#include <linux/memory.h>
  7#include <linux/vmalloc.h>
  8#include <linux/kmemleak.h>
  9#include <linux/page_owner.h>
 10#include <linux/page_idle.h>
 
 
 
 11
 12/*
 13 * struct page extension
 14 *
 15 * This is the feature to manage memory for extended data per page.
 16 *
 17 * Until now, we must modify struct page itself to store extra data per page.
 18 * This requires rebuilding the kernel and it is really time consuming process.
 19 * And, sometimes, rebuild is impossible due to third party module dependency.
 20 * At last, enlarging struct page could cause un-wanted system behaviour change.
 21 *
 22 * This feature is intended to overcome above mentioned problems. This feature
 23 * allocates memory for extended data per page in certain place rather than
 24 * the struct page itself. This memory can be accessed by the accessor
 25 * functions provided by this code. During the boot process, it checks whether
 26 * allocation of huge chunk of memory is needed or not. If not, it avoids
 27 * allocating memory at all. With this advantage, we can include this feature
 28 * into the kernel in default and can avoid rebuild and solve related problems.
 29 *
 30 * To help these things to work well, there are two callbacks for clients. One
 31 * is the need callback which is mandatory if user wants to avoid useless
 32 * memory allocation at boot-time. The other is optional, init callback, which
 33 * is used to do proper initialization after memory is allocated.
 34 *
 35 * The need callback is used to decide whether extended memory allocation is
 36 * needed or not. Sometimes users want to deactivate some features in this
 37 * boot and extra memory would be unneccessary. In this case, to avoid
 38 * allocating huge chunk of memory, each clients represent their need of
 39 * extra memory through the need callback. If one of the need callbacks
 40 * returns true, it means that someone needs extra memory so that
 41 * page extension core should allocates memory for page extension. If
 42 * none of need callbacks return true, memory isn't needed at all in this boot
 43 * and page extension core can skip to allocate memory. As result,
 44 * none of memory is wasted.
 45 *
 46 * When need callback returns true, page_ext checks if there is a request for
 47 * extra memory through size in struct page_ext_operations. If it is non-zero,
 48 * extra space is allocated for each page_ext entry and offset is returned to
 49 * user through offset in struct page_ext_operations.
 50 *
 51 * The init callback is used to do proper initialization after page extension
 52 * is completely initialized. In sparse memory system, extra memory is
 53 * allocated some time later than memmap is allocated. In other words, lifetime
 54 * of memory for page extension isn't same with memmap for struct page.
 55 * Therefore, clients can't store extra data until page extension is
 56 * initialized, even if pages are allocated and used freely. This could
 57 * cause inadequate state of extra data per page, so, to prevent it, client
 58 * can utilize this callback to initialize the state of it correctly.
 59 */
 60
 61static struct page_ext_operations *page_ext_ops[] = {
 62#ifdef CONFIG_DEBUG_PAGEALLOC
 63	&debug_guardpage_ops,
 64#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 65#ifdef CONFIG_PAGE_OWNER
 66	&page_owner_ops,
 67#endif
 68#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 69	&page_idle_ops,
 70#endif
 
 
 
 
 
 
 71};
 72
 
 
 73static unsigned long total_usage;
 74static unsigned long extra_mem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75
 76static bool __init invoke_need_callbacks(void)
 77{
 78	int i;
 79	int entries = ARRAY_SIZE(page_ext_ops);
 80	bool need = false;
 81
 82	for (i = 0; i < entries; i++) {
 83		if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
 84			page_ext_ops[i]->offset = sizeof(struct page_ext) +
 85						extra_mem;
 86			extra_mem += page_ext_ops[i]->size;
 
 
 
 
 
 
 
 
 87			need = true;
 88		}
 89	}
 90
 91	return need;
 92}
 93
 94static void __init invoke_init_callbacks(void)
 95{
 96	int i;
 97	int entries = ARRAY_SIZE(page_ext_ops);
 98
 99	for (i = 0; i < entries; i++) {
100		if (page_ext_ops[i]->init)
101			page_ext_ops[i]->init();
102	}
103}
104
105static unsigned long get_entry_size(void)
106{
107	return sizeof(struct page_ext) + extra_mem;
108}
109
110static inline struct page_ext *get_entry(void *base, unsigned long index)
 
111{
112	return base + get_entry_size() * index;
113}
114
115#if !defined(CONFIG_SPARSEMEM)
116
117
118void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
119{
120	pgdat->node_page_ext = NULL;
121}
122
123struct page_ext *lookup_page_ext(struct page *page)
124{
125	unsigned long pfn = page_to_pfn(page);
126	unsigned long index;
127	struct page_ext *base;
128
 
129	base = NODE_DATA(page_to_nid(page))->node_page_ext;
130	/*
131	 * The sanity checks the page allocator does upon freeing a
132	 * page can reach here before the page_ext arrays are
133	 * allocated when feeding a range of pages to the allocator
134	 * for the first time during bootup or memory hotplug.
135	 */
136	if (unlikely(!base))
137		return NULL;
138	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
139					MAX_ORDER_NR_PAGES);
140	return get_entry(base, index);
141}
142
143static int __init alloc_node_page_ext(int nid)
144{
145	struct page_ext *base;
146	unsigned long table_size;
147	unsigned long nr_pages;
148
149	nr_pages = NODE_DATA(nid)->node_spanned_pages;
150	if (!nr_pages)
151		return 0;
152
153	/*
154	 * Need extra space if node range is not aligned with
155	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
156	 * checks buddy's status, range could be out of exact node range.
157	 */
158	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
159		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
160		nr_pages += MAX_ORDER_NR_PAGES;
161
162	table_size = get_entry_size() * nr_pages;
163
164	base = memblock_virt_alloc_try_nid_nopanic(
165			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
166			BOOTMEM_ALLOC_ACCESSIBLE, nid);
167	if (!base)
168		return -ENOMEM;
169	NODE_DATA(nid)->node_page_ext = base;
170	total_usage += table_size;
 
171	return 0;
172}
173
174void __init page_ext_init_flatmem(void)
175{
176
177	int nid, fail;
178
179	if (!invoke_need_callbacks())
180		return;
181
182	for_each_online_node(nid)  {
183		fail = alloc_node_page_ext(nid);
184		if (fail)
185			goto fail;
186	}
187	pr_info("allocated %ld bytes of page_ext\n", total_usage);
188	invoke_init_callbacks();
189	return;
190
191fail:
192	pr_crit("allocation of page_ext failed.\n");
193	panic("Out of memory");
194}
195
196#else /* CONFIG_FLAT_NODE_MEM_MAP */
 
 
 
 
197
198struct page_ext *lookup_page_ext(struct page *page)
199{
200	unsigned long pfn = page_to_pfn(page);
201	struct mem_section *section = __pfn_to_section(pfn);
 
 
 
202	/*
203	 * The sanity checks the page allocator does upon freeing a
204	 * page can reach here before the page_ext arrays are
205	 * allocated when feeding a range of pages to the allocator
206	 * for the first time during bootup or memory hotplug.
207	 */
208	if (!section->page_ext)
209		return NULL;
210	return get_entry(section->page_ext, pfn);
211}
212
213static void *__meminit alloc_page_ext(size_t size, int nid)
214{
215	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
216	void *addr = NULL;
217
218	addr = alloc_pages_exact_nid(nid, size, flags);
219	if (addr) {
220		kmemleak_alloc(addr, size, 1, flags);
221		return addr;
222	}
223
224	addr = vzalloc_node(size, nid);
 
225
226	return addr;
227}
228
229static int __meminit init_section_page_ext(unsigned long pfn, int nid)
230{
231	struct mem_section *section;
232	struct page_ext *base;
233	unsigned long table_size;
234
235	section = __pfn_to_section(pfn);
236
237	if (section->page_ext)
238		return 0;
239
240	table_size = get_entry_size() * PAGES_PER_SECTION;
241	base = alloc_page_ext(table_size, nid);
242
243	/*
244	 * The value stored in section->page_ext is (base - pfn)
245	 * and it does not point to the memory block allocated above,
246	 * causing kmemleak false positives.
247	 */
248	kmemleak_not_leak(base);
249
250	if (!base) {
251		pr_err("page ext allocation failure\n");
252		return -ENOMEM;
253	}
254
255	/*
256	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
257	 * we need to apply a mask.
258	 */
259	pfn &= PAGE_SECTION_MASK;
260	section->page_ext = (void *)base - get_entry_size() * pfn;
261	total_usage += table_size;
262	return 0;
263}
264#ifdef CONFIG_MEMORY_HOTPLUG
265static void free_page_ext(void *addr)
266{
 
 
 
 
 
 
267	if (is_vmalloc_addr(addr)) {
268		vfree(addr);
269	} else {
270		struct page *page = virt_to_page(addr);
271		size_t table_size;
272
273		table_size = get_entry_size() * PAGES_PER_SECTION;
274
275		BUG_ON(PageReserved(page));
 
276		free_pages_exact(addr, table_size);
277	}
278}
279
280static void __free_page_ext(unsigned long pfn)
281{
282	struct mem_section *ms;
283	struct page_ext *base;
284
285	ms = __pfn_to_section(pfn);
286	if (!ms || !ms->page_ext)
287		return;
288	base = get_entry(ms->page_ext, pfn);
 
 
 
 
 
 
 
 
 
 
289	free_page_ext(base);
290	ms->page_ext = NULL;
 
 
 
 
 
 
 
 
 
 
 
291}
292
293static int __meminit online_page_ext(unsigned long start_pfn,
294				unsigned long nr_pages,
295				int nid)
296{
297	unsigned long start, end, pfn;
298	int fail = 0;
299
300	start = SECTION_ALIGN_DOWN(start_pfn);
301	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
302
303	if (nid == -1) {
304		/*
305		 * In this case, "nid" already exists and contains valid memory.
306		 * "start_pfn" passed to us is a pfn which is an arg for
307		 * online__pages(), and start_pfn should exist.
308		 */
309		nid = pfn_to_nid(start_pfn);
310		VM_BUG_ON(!node_state(nid, N_ONLINE));
311	}
312
313	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
314		if (!pfn_present(pfn))
315			continue;
316		fail = init_section_page_ext(pfn, nid);
317	}
318	if (!fail)
319		return 0;
320
321	/* rollback */
 
322	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
323		__free_page_ext(pfn);
324
325	return -ENOMEM;
326}
327
328static int __meminit offline_page_ext(unsigned long start_pfn,
329				unsigned long nr_pages, int nid)
330{
331	unsigned long start, end, pfn;
332
333	start = SECTION_ALIGN_DOWN(start_pfn);
334	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
335
 
 
 
 
 
 
 
 
 
336	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
337		__free_page_ext(pfn);
338	return 0;
 
339
 
 
340}
341
342static int __meminit page_ext_callback(struct notifier_block *self,
343			       unsigned long action, void *arg)
344{
345	struct memory_notify *mn = arg;
346	int ret = 0;
347
348	switch (action) {
349	case MEM_GOING_ONLINE:
350		ret = online_page_ext(mn->start_pfn,
351				   mn->nr_pages, mn->status_change_nid);
352		break;
353	case MEM_OFFLINE:
354		offline_page_ext(mn->start_pfn,
355				mn->nr_pages, mn->status_change_nid);
356		break;
357	case MEM_CANCEL_ONLINE:
358		offline_page_ext(mn->start_pfn,
359				mn->nr_pages, mn->status_change_nid);
360		break;
361	case MEM_GOING_OFFLINE:
362		break;
363	case MEM_ONLINE:
364	case MEM_CANCEL_OFFLINE:
365		break;
366	}
367
368	return notifier_from_errno(ret);
369}
370
371#endif
372
373void __init page_ext_init(void)
374{
375	unsigned long pfn;
376	int nid;
377
378	if (!invoke_need_callbacks())
379		return;
380
381	for_each_node_state(nid, N_MEMORY) {
382		unsigned long start_pfn, end_pfn;
383
384		start_pfn = node_start_pfn(nid);
385		end_pfn = node_end_pfn(nid);
386		/*
387		 * start_pfn and end_pfn may not be aligned to SECTION and the
388		 * page->flags of out of node pages are not initialized.  So we
389		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
390		 */
391		for (pfn = start_pfn; pfn < end_pfn;
392			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
393
394			if (!pfn_valid(pfn))
395				continue;
396			/*
397			 * Nodes's pfns can be overlapping.
398			 * We know some arch can have a nodes layout such as
399			 * -------------pfn-------------->
400			 * N0 | N1 | N2 | N0 | N1 | N2|....
401			 *
402			 * Take into account DEFERRED_STRUCT_PAGE_INIT.
403			 */
404			if (early_pfn_to_nid(pfn) != nid)
405				continue;
406			if (init_section_page_ext(pfn, nid))
407				goto oom;
408			cond_resched();
409		}
410	}
411	hotplug_memory_notifier(page_ext_callback, 0);
412	pr_info("allocated %ld bytes of page_ext\n", total_usage);
413	invoke_init_callbacks();
414	return;
415
416oom:
417	panic("Out of memory");
418}
419
420void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
421{
422}
423
424#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/mmzone.h>
  4#include <linux/memblock.h>
  5#include <linux/page_ext.h>
  6#include <linux/memory.h>
  7#include <linux/vmalloc.h>
  8#include <linux/kmemleak.h>
  9#include <linux/page_owner.h>
 10#include <linux/page_idle.h>
 11#include <linux/page_table_check.h>
 12#include <linux/rcupdate.h>
 13#include <linux/pgalloc_tag.h>
 14
 15/*
 16 * struct page extension
 17 *
 18 * This is the feature to manage memory for extended data per page.
 19 *
 20 * Until now, we must modify struct page itself to store extra data per page.
 21 * This requires rebuilding the kernel and it is really time consuming process.
 22 * And, sometimes, rebuild is impossible due to third party module dependency.
 23 * At last, enlarging struct page could cause un-wanted system behaviour change.
 24 *
 25 * This feature is intended to overcome above mentioned problems. This feature
 26 * allocates memory for extended data per page in certain place rather than
 27 * the struct page itself. This memory can be accessed by the accessor
 28 * functions provided by this code. During the boot process, it checks whether
 29 * allocation of huge chunk of memory is needed or not. If not, it avoids
 30 * allocating memory at all. With this advantage, we can include this feature
 31 * into the kernel in default and can avoid rebuild and solve related problems.
 32 *
 33 * To help these things to work well, there are two callbacks for clients. One
 34 * is the need callback which is mandatory if user wants to avoid useless
 35 * memory allocation at boot-time. The other is optional, init callback, which
 36 * is used to do proper initialization after memory is allocated.
 37 *
 38 * The need callback is used to decide whether extended memory allocation is
 39 * needed or not. Sometimes users want to deactivate some features in this
 40 * boot and extra memory would be unnecessary. In this case, to avoid
 41 * allocating huge chunk of memory, each clients represent their need of
 42 * extra memory through the need callback. If one of the need callbacks
 43 * returns true, it means that someone needs extra memory so that
 44 * page extension core should allocates memory for page extension. If
 45 * none of need callbacks return true, memory isn't needed at all in this boot
 46 * and page extension core can skip to allocate memory. As result,
 47 * none of memory is wasted.
 48 *
 49 * When need callback returns true, page_ext checks if there is a request for
 50 * extra memory through size in struct page_ext_operations. If it is non-zero,
 51 * extra space is allocated for each page_ext entry and offset is returned to
 52 * user through offset in struct page_ext_operations.
 53 *
 54 * The init callback is used to do proper initialization after page extension
 55 * is completely initialized. In sparse memory system, extra memory is
 56 * allocated some time later than memmap is allocated. In other words, lifetime
 57 * of memory for page extension isn't same with memmap for struct page.
 58 * Therefore, clients can't store extra data until page extension is
 59 * initialized, even if pages are allocated and used freely. This could
 60 * cause inadequate state of extra data per page, so, to prevent it, client
 61 * can utilize this callback to initialize the state of it correctly.
 62 */
 63
 64#ifdef CONFIG_SPARSEMEM
 65#define PAGE_EXT_INVALID       (0x1)
 
 66#endif
 67
 68#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
 69static bool need_page_idle(void)
 70{
 71	return true;
 72}
 73static struct page_ext_operations page_idle_ops __initdata = {
 74	.need = need_page_idle,
 75	.need_shared_flags = true,
 76};
 77#endif
 78
 79static struct page_ext_operations *page_ext_ops[] __initdata = {
 80#ifdef CONFIG_PAGE_OWNER
 81	&page_owner_ops,
 82#endif
 83#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
 84	&page_idle_ops,
 85#endif
 86#ifdef CONFIG_MEM_ALLOC_PROFILING
 87	&page_alloc_tagging_ops,
 88#endif
 89#ifdef CONFIG_PAGE_TABLE_CHECK
 90	&page_table_check_ops,
 91#endif
 92};
 93
 94unsigned long page_ext_size;
 95
 96static unsigned long total_usage;
 97
 98#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
 99/*
100 * To ensure correct allocation tagging for pages, page_ext should be available
101 * before the first page allocation. Otherwise early task stacks will be
102 * allocated before page_ext initialization and missing tags will be flagged.
103 */
104bool early_page_ext __meminitdata = true;
105#else
106bool early_page_ext __meminitdata;
107#endif
108static int __init setup_early_page_ext(char *str)
109{
110	early_page_ext = true;
111	return 0;
112}
113early_param("early_page_ext", setup_early_page_ext);
114
115static bool __init invoke_need_callbacks(void)
116{
117	int i;
118	int entries = ARRAY_SIZE(page_ext_ops);
119	bool need = false;
120
121	for (i = 0; i < entries; i++) {
122		if (page_ext_ops[i]->need()) {
123			if (page_ext_ops[i]->need_shared_flags) {
124				page_ext_size = sizeof(struct page_ext);
125				break;
126			}
127		}
128	}
129
130	for (i = 0; i < entries; i++) {
131		if (page_ext_ops[i]->need()) {
132			page_ext_ops[i]->offset = page_ext_size;
133			page_ext_size += page_ext_ops[i]->size;
134			need = true;
135		}
136	}
137
138	return need;
139}
140
141static void __init invoke_init_callbacks(void)
142{
143	int i;
144	int entries = ARRAY_SIZE(page_ext_ops);
145
146	for (i = 0; i < entries; i++) {
147		if (page_ext_ops[i]->init)
148			page_ext_ops[i]->init();
149	}
150}
151
152static inline struct page_ext *get_entry(void *base, unsigned long index)
153{
154	return base + page_ext_size * index;
155}
156
157#ifndef CONFIG_SPARSEMEM
158void __init page_ext_init_flatmem_late(void)
159{
160	invoke_init_callbacks();
161}
162
 
 
 
163void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
164{
165	pgdat->node_page_ext = NULL;
166}
167
168static struct page_ext *lookup_page_ext(const struct page *page)
169{
170	unsigned long pfn = page_to_pfn(page);
171	unsigned long index;
172	struct page_ext *base;
173
174	WARN_ON_ONCE(!rcu_read_lock_held());
175	base = NODE_DATA(page_to_nid(page))->node_page_ext;
176	/*
177	 * The sanity checks the page allocator does upon freeing a
178	 * page can reach here before the page_ext arrays are
179	 * allocated when feeding a range of pages to the allocator
180	 * for the first time during bootup or memory hotplug.
181	 */
182	if (unlikely(!base))
183		return NULL;
184	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
185					MAX_ORDER_NR_PAGES);
186	return get_entry(base, index);
187}
188
189static int __init alloc_node_page_ext(int nid)
190{
191	struct page_ext *base;
192	unsigned long table_size;
193	unsigned long nr_pages;
194
195	nr_pages = NODE_DATA(nid)->node_spanned_pages;
196	if (!nr_pages)
197		return 0;
198
199	/*
200	 * Need extra space if node range is not aligned with
201	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
202	 * checks buddy's status, range could be out of exact node range.
203	 */
204	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
205		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
206		nr_pages += MAX_ORDER_NR_PAGES;
207
208	table_size = page_ext_size * nr_pages;
209
210	base = memblock_alloc_try_nid(
211			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
212			MEMBLOCK_ALLOC_ACCESSIBLE, nid);
213	if (!base)
214		return -ENOMEM;
215	NODE_DATA(nid)->node_page_ext = base;
216	total_usage += table_size;
217	memmap_boot_pages_add(DIV_ROUND_UP(table_size, PAGE_SIZE));
218	return 0;
219}
220
221void __init page_ext_init_flatmem(void)
222{
223
224	int nid, fail;
225
226	if (!invoke_need_callbacks())
227		return;
228
229	for_each_online_node(nid)  {
230		fail = alloc_node_page_ext(nid);
231		if (fail)
232			goto fail;
233	}
234	pr_info("allocated %ld bytes of page_ext\n", total_usage);
 
235	return;
236
237fail:
238	pr_crit("allocation of page_ext failed.\n");
239	panic("Out of memory");
240}
241
242#else /* CONFIG_SPARSEMEM */
243static bool page_ext_invalid(struct page_ext *page_ext)
244{
245	return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
246}
247
248static struct page_ext *lookup_page_ext(const struct page *page)
249{
250	unsigned long pfn = page_to_pfn(page);
251	struct mem_section *section = __pfn_to_section(pfn);
252	struct page_ext *page_ext = READ_ONCE(section->page_ext);
253
254	WARN_ON_ONCE(!rcu_read_lock_held());
255	/*
256	 * The sanity checks the page allocator does upon freeing a
257	 * page can reach here before the page_ext arrays are
258	 * allocated when feeding a range of pages to the allocator
259	 * for the first time during bootup or memory hotplug.
260	 */
261	if (page_ext_invalid(page_ext))
262		return NULL;
263	return get_entry(page_ext, pfn);
264}
265
266static void *__meminit alloc_page_ext(size_t size, int nid)
267{
268	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
269	void *addr = NULL;
270
271	addr = alloc_pages_exact_nid(nid, size, flags);
272	if (addr)
273		kmemleak_alloc(addr, size, 1, flags);
274	else
275		addr = vzalloc_node(size, nid);
276
277	if (addr)
278		memmap_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
279
280	return addr;
281}
282
283static int __meminit init_section_page_ext(unsigned long pfn, int nid)
284{
285	struct mem_section *section;
286	struct page_ext *base;
287	unsigned long table_size;
288
289	section = __pfn_to_section(pfn);
290
291	if (section->page_ext)
292		return 0;
293
294	table_size = page_ext_size * PAGES_PER_SECTION;
295	base = alloc_page_ext(table_size, nid);
296
297	/*
298	 * The value stored in section->page_ext is (base - pfn)
299	 * and it does not point to the memory block allocated above,
300	 * causing kmemleak false positives.
301	 */
302	kmemleak_not_leak(base);
303
304	if (!base) {
305		pr_err("page ext allocation failure\n");
306		return -ENOMEM;
307	}
308
309	/*
310	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
311	 * we need to apply a mask.
312	 */
313	pfn &= PAGE_SECTION_MASK;
314	section->page_ext = (void *)base - page_ext_size * pfn;
315	total_usage += table_size;
316	return 0;
317}
318
319static void free_page_ext(void *addr)
320{
321	size_t table_size;
322	struct page *page;
323
324	table_size = page_ext_size * PAGES_PER_SECTION;
325	memmap_pages_add(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
326
327	if (is_vmalloc_addr(addr)) {
328		vfree(addr);
329	} else {
330		page = virt_to_page(addr);
 
 
 
 
331		BUG_ON(PageReserved(page));
332		kmemleak_free(addr);
333		free_pages_exact(addr, table_size);
334	}
335}
336
337static void __free_page_ext(unsigned long pfn)
338{
339	struct mem_section *ms;
340	struct page_ext *base;
341
342	ms = __pfn_to_section(pfn);
343	if (!ms || !ms->page_ext)
344		return;
345
346	base = READ_ONCE(ms->page_ext);
347	/*
348	 * page_ext here can be valid while doing the roll back
349	 * operation in online_page_ext().
350	 */
351	if (page_ext_invalid(base))
352		base = (void *)base - PAGE_EXT_INVALID;
353	WRITE_ONCE(ms->page_ext, NULL);
354
355	base = get_entry(base, pfn);
356	free_page_ext(base);
357}
358
359static void __invalidate_page_ext(unsigned long pfn)
360{
361	struct mem_section *ms;
362	void *val;
363
364	ms = __pfn_to_section(pfn);
365	if (!ms || !ms->page_ext)
366		return;
367	val = (void *)ms->page_ext + PAGE_EXT_INVALID;
368	WRITE_ONCE(ms->page_ext, val);
369}
370
371static int __meminit online_page_ext(unsigned long start_pfn,
372				unsigned long nr_pages,
373				int nid)
374{
375	unsigned long start, end, pfn;
376	int fail = 0;
377
378	start = SECTION_ALIGN_DOWN(start_pfn);
379	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
380
381	if (nid == NUMA_NO_NODE) {
382		/*
383		 * In this case, "nid" already exists and contains valid memory.
384		 * "start_pfn" passed to us is a pfn which is an arg for
385		 * online__pages(), and start_pfn should exist.
386		 */
387		nid = pfn_to_nid(start_pfn);
388		VM_BUG_ON(!node_online(nid));
389	}
390
391	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
 
 
392		fail = init_section_page_ext(pfn, nid);
 
393	if (!fail)
394		return 0;
395
396	/* rollback */
397	end = pfn - PAGES_PER_SECTION;
398	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
399		__free_page_ext(pfn);
400
401	return -ENOMEM;
402}
403
404static void __meminit offline_page_ext(unsigned long start_pfn,
405				unsigned long nr_pages)
406{
407	unsigned long start, end, pfn;
408
409	start = SECTION_ALIGN_DOWN(start_pfn);
410	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
411
412	/*
413	 * Freeing of page_ext is done in 3 steps to avoid
414	 * use-after-free of it:
415	 * 1) Traverse all the sections and mark their page_ext
416	 *    as invalid.
417	 * 2) Wait for all the existing users of page_ext who
418	 *    started before invalidation to finish.
419	 * 3) Free the page_ext.
420	 */
421	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
422		__invalidate_page_ext(pfn);
423
424	synchronize_rcu();
425
426	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
427		__free_page_ext(pfn);
428}
429
430static int __meminit page_ext_callback(struct notifier_block *self,
431			       unsigned long action, void *arg)
432{
433	struct memory_notify *mn = arg;
434	int ret = 0;
435
436	switch (action) {
437	case MEM_GOING_ONLINE:
438		ret = online_page_ext(mn->start_pfn,
439				   mn->nr_pages, mn->status_change_nid);
440		break;
441	case MEM_OFFLINE:
442		offline_page_ext(mn->start_pfn,
443				mn->nr_pages);
444		break;
445	case MEM_CANCEL_ONLINE:
446		offline_page_ext(mn->start_pfn,
447				mn->nr_pages);
448		break;
449	case MEM_GOING_OFFLINE:
450		break;
451	case MEM_ONLINE:
452	case MEM_CANCEL_OFFLINE:
453		break;
454	}
455
456	return notifier_from_errno(ret);
457}
458
 
 
459void __init page_ext_init(void)
460{
461	unsigned long pfn;
462	int nid;
463
464	if (!invoke_need_callbacks())
465		return;
466
467	for_each_node_state(nid, N_MEMORY) {
468		unsigned long start_pfn, end_pfn;
469
470		start_pfn = node_start_pfn(nid);
471		end_pfn = node_end_pfn(nid);
472		/*
473		 * start_pfn and end_pfn may not be aligned to SECTION and the
474		 * page->flags of out of node pages are not initialized.  So we
475		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
476		 */
477		for (pfn = start_pfn; pfn < end_pfn;
478			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
479
480			if (!pfn_valid(pfn))
481				continue;
482			/*
483			 * Nodes's pfns can be overlapping.
484			 * We know some arch can have a nodes layout such as
485			 * -------------pfn-------------->
486			 * N0 | N1 | N2 | N0 | N1 | N2|....
 
 
487			 */
488			if (pfn_to_nid(pfn) != nid)
489				continue;
490			if (init_section_page_ext(pfn, nid))
491				goto oom;
492			cond_resched();
493		}
494	}
495	hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
496	pr_info("allocated %ld bytes of page_ext\n", total_usage);
497	invoke_init_callbacks();
498	return;
499
500oom:
501	panic("Out of memory");
502}
503
504void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
505{
506}
507
508#endif
509
510/**
511 * page_ext_get() - Get the extended information for a page.
512 * @page: The page we're interested in.
513 *
514 * Ensures that the page_ext will remain valid until page_ext_put()
515 * is called.
516 *
517 * Return: NULL if no page_ext exists for this page.
518 * Context: Any context.  Caller may not sleep until they have called
519 * page_ext_put().
520 */
521struct page_ext *page_ext_get(const struct page *page)
522{
523	struct page_ext *page_ext;
524
525	rcu_read_lock();
526	page_ext = lookup_page_ext(page);
527	if (!page_ext) {
528		rcu_read_unlock();
529		return NULL;
530	}
531
532	return page_ext;
533}
534
535/**
536 * page_ext_put() - Working with page extended information is done.
537 * @page_ext: Page extended information received from page_ext_get().
538 *
539 * The page extended information of the page may not be valid after this
540 * function is called.
541 *
542 * Return: None.
543 * Context: Any context with corresponding page_ext_get() is called.
544 */
545void page_ext_put(struct page_ext *page_ext)
546{
547	if (unlikely(!page_ext))
548		return;
549
550	rcu_read_unlock();
551}