Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/mmzone.h>
4#include <linux/memblock.h>
5#include <linux/page_ext.h>
6#include <linux/memory.h>
7#include <linux/vmalloc.h>
8#include <linux/kmemleak.h>
9#include <linux/page_owner.h>
10#include <linux/page_idle.h>
11
12/*
13 * struct page extension
14 *
15 * This is the feature to manage memory for extended data per page.
16 *
17 * Until now, we must modify struct page itself to store extra data per page.
18 * This requires rebuilding the kernel and it is really time consuming process.
19 * And, sometimes, rebuild is impossible due to third party module dependency.
20 * At last, enlarging struct page could cause un-wanted system behaviour change.
21 *
22 * This feature is intended to overcome above mentioned problems. This feature
23 * allocates memory for extended data per page in certain place rather than
24 * the struct page itself. This memory can be accessed by the accessor
25 * functions provided by this code. During the boot process, it checks whether
26 * allocation of huge chunk of memory is needed or not. If not, it avoids
27 * allocating memory at all. With this advantage, we can include this feature
28 * into the kernel in default and can avoid rebuild and solve related problems.
29 *
30 * To help these things to work well, there are two callbacks for clients. One
31 * is the need callback which is mandatory if user wants to avoid useless
32 * memory allocation at boot-time. The other is optional, init callback, which
33 * is used to do proper initialization after memory is allocated.
34 *
35 * The need callback is used to decide whether extended memory allocation is
36 * needed or not. Sometimes users want to deactivate some features in this
37 * boot and extra memory would be unneccessary. In this case, to avoid
38 * allocating huge chunk of memory, each clients represent their need of
39 * extra memory through the need callback. If one of the need callbacks
40 * returns true, it means that someone needs extra memory so that
41 * page extension core should allocates memory for page extension. If
42 * none of need callbacks return true, memory isn't needed at all in this boot
43 * and page extension core can skip to allocate memory. As result,
44 * none of memory is wasted.
45 *
46 * When need callback returns true, page_ext checks if there is a request for
47 * extra memory through size in struct page_ext_operations. If it is non-zero,
48 * extra space is allocated for each page_ext entry and offset is returned to
49 * user through offset in struct page_ext_operations.
50 *
51 * The init callback is used to do proper initialization after page extension
52 * is completely initialized. In sparse memory system, extra memory is
53 * allocated some time later than memmap is allocated. In other words, lifetime
54 * of memory for page extension isn't same with memmap for struct page.
55 * Therefore, clients can't store extra data until page extension is
56 * initialized, even if pages are allocated and used freely. This could
57 * cause inadequate state of extra data per page, so, to prevent it, client
58 * can utilize this callback to initialize the state of it correctly.
59 */
60
61static struct page_ext_operations *page_ext_ops[] = {
62#ifdef CONFIG_PAGE_OWNER
63 &page_owner_ops,
64#endif
65#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
66 &page_idle_ops,
67#endif
68};
69
70unsigned long page_ext_size = sizeof(struct page_ext);
71
72static unsigned long total_usage;
73
74static bool __init invoke_need_callbacks(void)
75{
76 int i;
77 int entries = ARRAY_SIZE(page_ext_ops);
78 bool need = false;
79
80 for (i = 0; i < entries; i++) {
81 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
82 page_ext_ops[i]->offset = page_ext_size;
83 page_ext_size += page_ext_ops[i]->size;
84 need = true;
85 }
86 }
87
88 return need;
89}
90
91static void __init invoke_init_callbacks(void)
92{
93 int i;
94 int entries = ARRAY_SIZE(page_ext_ops);
95
96 for (i = 0; i < entries; i++) {
97 if (page_ext_ops[i]->init)
98 page_ext_ops[i]->init();
99 }
100}
101
102static inline struct page_ext *get_entry(void *base, unsigned long index)
103{
104 return base + page_ext_size * index;
105}
106
107#if !defined(CONFIG_SPARSEMEM)
108
109
110void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
111{
112 pgdat->node_page_ext = NULL;
113}
114
115struct page_ext *lookup_page_ext(const struct page *page)
116{
117 unsigned long pfn = page_to_pfn(page);
118 unsigned long index;
119 struct page_ext *base;
120
121 base = NODE_DATA(page_to_nid(page))->node_page_ext;
122 /*
123 * The sanity checks the page allocator does upon freeing a
124 * page can reach here before the page_ext arrays are
125 * allocated when feeding a range of pages to the allocator
126 * for the first time during bootup or memory hotplug.
127 */
128 if (unlikely(!base))
129 return NULL;
130 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
131 MAX_ORDER_NR_PAGES);
132 return get_entry(base, index);
133}
134
135static int __init alloc_node_page_ext(int nid)
136{
137 struct page_ext *base;
138 unsigned long table_size;
139 unsigned long nr_pages;
140
141 nr_pages = NODE_DATA(nid)->node_spanned_pages;
142 if (!nr_pages)
143 return 0;
144
145 /*
146 * Need extra space if node range is not aligned with
147 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
148 * checks buddy's status, range could be out of exact node range.
149 */
150 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
151 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
152 nr_pages += MAX_ORDER_NR_PAGES;
153
154 table_size = page_ext_size * nr_pages;
155
156 base = memblock_alloc_try_nid(
157 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
158 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
159 if (!base)
160 return -ENOMEM;
161 NODE_DATA(nid)->node_page_ext = base;
162 total_usage += table_size;
163 return 0;
164}
165
166void __init page_ext_init_flatmem(void)
167{
168
169 int nid, fail;
170
171 if (!invoke_need_callbacks())
172 return;
173
174 for_each_online_node(nid) {
175 fail = alloc_node_page_ext(nid);
176 if (fail)
177 goto fail;
178 }
179 pr_info("allocated %ld bytes of page_ext\n", total_usage);
180 invoke_init_callbacks();
181 return;
182
183fail:
184 pr_crit("allocation of page_ext failed.\n");
185 panic("Out of memory");
186}
187
188#else /* CONFIG_FLAT_NODE_MEM_MAP */
189
190struct page_ext *lookup_page_ext(const struct page *page)
191{
192 unsigned long pfn = page_to_pfn(page);
193 struct mem_section *section = __pfn_to_section(pfn);
194 /*
195 * The sanity checks the page allocator does upon freeing a
196 * page can reach here before the page_ext arrays are
197 * allocated when feeding a range of pages to the allocator
198 * for the first time during bootup or memory hotplug.
199 */
200 if (!section->page_ext)
201 return NULL;
202 return get_entry(section->page_ext, pfn);
203}
204
205static void *__meminit alloc_page_ext(size_t size, int nid)
206{
207 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
208 void *addr = NULL;
209
210 addr = alloc_pages_exact_nid(nid, size, flags);
211 if (addr) {
212 kmemleak_alloc(addr, size, 1, flags);
213 return addr;
214 }
215
216 addr = vzalloc_node(size, nid);
217
218 return addr;
219}
220
221static int __meminit init_section_page_ext(unsigned long pfn, int nid)
222{
223 struct mem_section *section;
224 struct page_ext *base;
225 unsigned long table_size;
226
227 section = __pfn_to_section(pfn);
228
229 if (section->page_ext)
230 return 0;
231
232 table_size = page_ext_size * PAGES_PER_SECTION;
233 base = alloc_page_ext(table_size, nid);
234
235 /*
236 * The value stored in section->page_ext is (base - pfn)
237 * and it does not point to the memory block allocated above,
238 * causing kmemleak false positives.
239 */
240 kmemleak_not_leak(base);
241
242 if (!base) {
243 pr_err("page ext allocation failure\n");
244 return -ENOMEM;
245 }
246
247 /*
248 * The passed "pfn" may not be aligned to SECTION. For the calculation
249 * we need to apply a mask.
250 */
251 pfn &= PAGE_SECTION_MASK;
252 section->page_ext = (void *)base - page_ext_size * pfn;
253 total_usage += table_size;
254 return 0;
255}
256#ifdef CONFIG_MEMORY_HOTPLUG
257static void free_page_ext(void *addr)
258{
259 if (is_vmalloc_addr(addr)) {
260 vfree(addr);
261 } else {
262 struct page *page = virt_to_page(addr);
263 size_t table_size;
264
265 table_size = page_ext_size * PAGES_PER_SECTION;
266
267 BUG_ON(PageReserved(page));
268 kmemleak_free(addr);
269 free_pages_exact(addr, table_size);
270 }
271}
272
273static void __free_page_ext(unsigned long pfn)
274{
275 struct mem_section *ms;
276 struct page_ext *base;
277
278 ms = __pfn_to_section(pfn);
279 if (!ms || !ms->page_ext)
280 return;
281 base = get_entry(ms->page_ext, pfn);
282 free_page_ext(base);
283 ms->page_ext = NULL;
284}
285
286static int __meminit online_page_ext(unsigned long start_pfn,
287 unsigned long nr_pages,
288 int nid)
289{
290 unsigned long start, end, pfn;
291 int fail = 0;
292
293 start = SECTION_ALIGN_DOWN(start_pfn);
294 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
295
296 if (nid == NUMA_NO_NODE) {
297 /*
298 * In this case, "nid" already exists and contains valid memory.
299 * "start_pfn" passed to us is a pfn which is an arg for
300 * online__pages(), and start_pfn should exist.
301 */
302 nid = pfn_to_nid(start_pfn);
303 VM_BUG_ON(!node_state(nid, N_ONLINE));
304 }
305
306 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
307 if (!pfn_present(pfn))
308 continue;
309 fail = init_section_page_ext(pfn, nid);
310 }
311 if (!fail)
312 return 0;
313
314 /* rollback */
315 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
316 __free_page_ext(pfn);
317
318 return -ENOMEM;
319}
320
321static int __meminit offline_page_ext(unsigned long start_pfn,
322 unsigned long nr_pages, int nid)
323{
324 unsigned long start, end, pfn;
325
326 start = SECTION_ALIGN_DOWN(start_pfn);
327 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
328
329 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
330 __free_page_ext(pfn);
331 return 0;
332
333}
334
335static int __meminit page_ext_callback(struct notifier_block *self,
336 unsigned long action, void *arg)
337{
338 struct memory_notify *mn = arg;
339 int ret = 0;
340
341 switch (action) {
342 case MEM_GOING_ONLINE:
343 ret = online_page_ext(mn->start_pfn,
344 mn->nr_pages, mn->status_change_nid);
345 break;
346 case MEM_OFFLINE:
347 offline_page_ext(mn->start_pfn,
348 mn->nr_pages, mn->status_change_nid);
349 break;
350 case MEM_CANCEL_ONLINE:
351 offline_page_ext(mn->start_pfn,
352 mn->nr_pages, mn->status_change_nid);
353 break;
354 case MEM_GOING_OFFLINE:
355 break;
356 case MEM_ONLINE:
357 case MEM_CANCEL_OFFLINE:
358 break;
359 }
360
361 return notifier_from_errno(ret);
362}
363
364#endif
365
366void __init page_ext_init(void)
367{
368 unsigned long pfn;
369 int nid;
370
371 if (!invoke_need_callbacks())
372 return;
373
374 for_each_node_state(nid, N_MEMORY) {
375 unsigned long start_pfn, end_pfn;
376
377 start_pfn = node_start_pfn(nid);
378 end_pfn = node_end_pfn(nid);
379 /*
380 * start_pfn and end_pfn may not be aligned to SECTION and the
381 * page->flags of out of node pages are not initialized. So we
382 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
383 */
384 for (pfn = start_pfn; pfn < end_pfn;
385 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
386
387 if (!pfn_valid(pfn))
388 continue;
389 /*
390 * Nodes's pfns can be overlapping.
391 * We know some arch can have a nodes layout such as
392 * -------------pfn-------------->
393 * N0 | N1 | N2 | N0 | N1 | N2|....
394 */
395 if (pfn_to_nid(pfn) != nid)
396 continue;
397 if (init_section_page_ext(pfn, nid))
398 goto oom;
399 cond_resched();
400 }
401 }
402 hotplug_memory_notifier(page_ext_callback, 0);
403 pr_info("allocated %ld bytes of page_ext\n", total_usage);
404 invoke_init_callbacks();
405 return;
406
407oom:
408 panic("Out of memory");
409}
410
411void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
412{
413}
414
415#endif
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/mmzone.h>
4#include <linux/memblock.h>
5#include <linux/page_ext.h>
6#include <linux/memory.h>
7#include <linux/vmalloc.h>
8#include <linux/kmemleak.h>
9#include <linux/page_owner.h>
10#include <linux/page_idle.h>
11#include <linux/page_table_check.h>
12#include <linux/rcupdate.h>
13
14/*
15 * struct page extension
16 *
17 * This is the feature to manage memory for extended data per page.
18 *
19 * Until now, we must modify struct page itself to store extra data per page.
20 * This requires rebuilding the kernel and it is really time consuming process.
21 * And, sometimes, rebuild is impossible due to third party module dependency.
22 * At last, enlarging struct page could cause un-wanted system behaviour change.
23 *
24 * This feature is intended to overcome above mentioned problems. This feature
25 * allocates memory for extended data per page in certain place rather than
26 * the struct page itself. This memory can be accessed by the accessor
27 * functions provided by this code. During the boot process, it checks whether
28 * allocation of huge chunk of memory is needed or not. If not, it avoids
29 * allocating memory at all. With this advantage, we can include this feature
30 * into the kernel in default and can avoid rebuild and solve related problems.
31 *
32 * To help these things to work well, there are two callbacks for clients. One
33 * is the need callback which is mandatory if user wants to avoid useless
34 * memory allocation at boot-time. The other is optional, init callback, which
35 * is used to do proper initialization after memory is allocated.
36 *
37 * The need callback is used to decide whether extended memory allocation is
38 * needed or not. Sometimes users want to deactivate some features in this
39 * boot and extra memory would be unnecessary. In this case, to avoid
40 * allocating huge chunk of memory, each clients represent their need of
41 * extra memory through the need callback. If one of the need callbacks
42 * returns true, it means that someone needs extra memory so that
43 * page extension core should allocates memory for page extension. If
44 * none of need callbacks return true, memory isn't needed at all in this boot
45 * and page extension core can skip to allocate memory. As result,
46 * none of memory is wasted.
47 *
48 * When need callback returns true, page_ext checks if there is a request for
49 * extra memory through size in struct page_ext_operations. If it is non-zero,
50 * extra space is allocated for each page_ext entry and offset is returned to
51 * user through offset in struct page_ext_operations.
52 *
53 * The init callback is used to do proper initialization after page extension
54 * is completely initialized. In sparse memory system, extra memory is
55 * allocated some time later than memmap is allocated. In other words, lifetime
56 * of memory for page extension isn't same with memmap for struct page.
57 * Therefore, clients can't store extra data until page extension is
58 * initialized, even if pages are allocated and used freely. This could
59 * cause inadequate state of extra data per page, so, to prevent it, client
60 * can utilize this callback to initialize the state of it correctly.
61 */
62
63#ifdef CONFIG_SPARSEMEM
64#define PAGE_EXT_INVALID (0x1)
65#endif
66
67#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
68static bool need_page_idle(void)
69{
70 return true;
71}
72static struct page_ext_operations page_idle_ops __initdata = {
73 .need = need_page_idle,
74};
75#endif
76
77static struct page_ext_operations *page_ext_ops[] __initdata = {
78#ifdef CONFIG_PAGE_OWNER
79 &page_owner_ops,
80#endif
81#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
82 &page_idle_ops,
83#endif
84#ifdef CONFIG_PAGE_TABLE_CHECK
85 &page_table_check_ops,
86#endif
87};
88
89unsigned long page_ext_size = sizeof(struct page_ext);
90
91static unsigned long total_usage;
92static struct page_ext *lookup_page_ext(const struct page *page);
93
94bool early_page_ext;
95static int __init setup_early_page_ext(char *str)
96{
97 early_page_ext = true;
98 return 0;
99}
100early_param("early_page_ext", setup_early_page_ext);
101
102static bool __init invoke_need_callbacks(void)
103{
104 int i;
105 int entries = ARRAY_SIZE(page_ext_ops);
106 bool need = false;
107
108 for (i = 0; i < entries; i++) {
109 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
110 page_ext_ops[i]->offset = page_ext_size;
111 page_ext_size += page_ext_ops[i]->size;
112 need = true;
113 }
114 }
115
116 return need;
117}
118
119static void __init invoke_init_callbacks(void)
120{
121 int i;
122 int entries = ARRAY_SIZE(page_ext_ops);
123
124 for (i = 0; i < entries; i++) {
125 if (page_ext_ops[i]->init)
126 page_ext_ops[i]->init();
127 }
128}
129
130#ifndef CONFIG_SPARSEMEM
131void __init page_ext_init_flatmem_late(void)
132{
133 invoke_init_callbacks();
134}
135#endif
136
137static inline struct page_ext *get_entry(void *base, unsigned long index)
138{
139 return base + page_ext_size * index;
140}
141
142/**
143 * page_ext_get() - Get the extended information for a page.
144 * @page: The page we're interested in.
145 *
146 * Ensures that the page_ext will remain valid until page_ext_put()
147 * is called.
148 *
149 * Return: NULL if no page_ext exists for this page.
150 * Context: Any context. Caller may not sleep until they have called
151 * page_ext_put().
152 */
153struct page_ext *page_ext_get(struct page *page)
154{
155 struct page_ext *page_ext;
156
157 rcu_read_lock();
158 page_ext = lookup_page_ext(page);
159 if (!page_ext) {
160 rcu_read_unlock();
161 return NULL;
162 }
163
164 return page_ext;
165}
166
167/**
168 * page_ext_put() - Working with page extended information is done.
169 * @page_ext: Page extended information received from page_ext_get().
170 *
171 * The page extended information of the page may not be valid after this
172 * function is called.
173 *
174 * Return: None.
175 * Context: Any context with corresponding page_ext_get() is called.
176 */
177void page_ext_put(struct page_ext *page_ext)
178{
179 if (unlikely(!page_ext))
180 return;
181
182 rcu_read_unlock();
183}
184#ifndef CONFIG_SPARSEMEM
185
186
187void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
188{
189 pgdat->node_page_ext = NULL;
190}
191
192static struct page_ext *lookup_page_ext(const struct page *page)
193{
194 unsigned long pfn = page_to_pfn(page);
195 unsigned long index;
196 struct page_ext *base;
197
198 WARN_ON_ONCE(!rcu_read_lock_held());
199 base = NODE_DATA(page_to_nid(page))->node_page_ext;
200 /*
201 * The sanity checks the page allocator does upon freeing a
202 * page can reach here before the page_ext arrays are
203 * allocated when feeding a range of pages to the allocator
204 * for the first time during bootup or memory hotplug.
205 */
206 if (unlikely(!base))
207 return NULL;
208 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
209 MAX_ORDER_NR_PAGES);
210 return get_entry(base, index);
211}
212
213static int __init alloc_node_page_ext(int nid)
214{
215 struct page_ext *base;
216 unsigned long table_size;
217 unsigned long nr_pages;
218
219 nr_pages = NODE_DATA(nid)->node_spanned_pages;
220 if (!nr_pages)
221 return 0;
222
223 /*
224 * Need extra space if node range is not aligned with
225 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
226 * checks buddy's status, range could be out of exact node range.
227 */
228 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
229 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
230 nr_pages += MAX_ORDER_NR_PAGES;
231
232 table_size = page_ext_size * nr_pages;
233
234 base = memblock_alloc_try_nid(
235 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
236 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
237 if (!base)
238 return -ENOMEM;
239 NODE_DATA(nid)->node_page_ext = base;
240 total_usage += table_size;
241 return 0;
242}
243
244void __init page_ext_init_flatmem(void)
245{
246
247 int nid, fail;
248
249 if (!invoke_need_callbacks())
250 return;
251
252 for_each_online_node(nid) {
253 fail = alloc_node_page_ext(nid);
254 if (fail)
255 goto fail;
256 }
257 pr_info("allocated %ld bytes of page_ext\n", total_usage);
258 return;
259
260fail:
261 pr_crit("allocation of page_ext failed.\n");
262 panic("Out of memory");
263}
264
265#else /* CONFIG_SPARSEMEM */
266static bool page_ext_invalid(struct page_ext *page_ext)
267{
268 return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
269}
270
271static struct page_ext *lookup_page_ext(const struct page *page)
272{
273 unsigned long pfn = page_to_pfn(page);
274 struct mem_section *section = __pfn_to_section(pfn);
275 struct page_ext *page_ext = READ_ONCE(section->page_ext);
276
277 WARN_ON_ONCE(!rcu_read_lock_held());
278 /*
279 * The sanity checks the page allocator does upon freeing a
280 * page can reach here before the page_ext arrays are
281 * allocated when feeding a range of pages to the allocator
282 * for the first time during bootup or memory hotplug.
283 */
284 if (page_ext_invalid(page_ext))
285 return NULL;
286 return get_entry(page_ext, pfn);
287}
288
289static void *__meminit alloc_page_ext(size_t size, int nid)
290{
291 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
292 void *addr = NULL;
293
294 addr = alloc_pages_exact_nid(nid, size, flags);
295 if (addr) {
296 kmemleak_alloc(addr, size, 1, flags);
297 return addr;
298 }
299
300 addr = vzalloc_node(size, nid);
301
302 return addr;
303}
304
305static int __meminit init_section_page_ext(unsigned long pfn, int nid)
306{
307 struct mem_section *section;
308 struct page_ext *base;
309 unsigned long table_size;
310
311 section = __pfn_to_section(pfn);
312
313 if (section->page_ext)
314 return 0;
315
316 table_size = page_ext_size * PAGES_PER_SECTION;
317 base = alloc_page_ext(table_size, nid);
318
319 /*
320 * The value stored in section->page_ext is (base - pfn)
321 * and it does not point to the memory block allocated above,
322 * causing kmemleak false positives.
323 */
324 kmemleak_not_leak(base);
325
326 if (!base) {
327 pr_err("page ext allocation failure\n");
328 return -ENOMEM;
329 }
330
331 /*
332 * The passed "pfn" may not be aligned to SECTION. For the calculation
333 * we need to apply a mask.
334 */
335 pfn &= PAGE_SECTION_MASK;
336 section->page_ext = (void *)base - page_ext_size * pfn;
337 total_usage += table_size;
338 return 0;
339}
340
341static void free_page_ext(void *addr)
342{
343 if (is_vmalloc_addr(addr)) {
344 vfree(addr);
345 } else {
346 struct page *page = virt_to_page(addr);
347 size_t table_size;
348
349 table_size = page_ext_size * PAGES_PER_SECTION;
350
351 BUG_ON(PageReserved(page));
352 kmemleak_free(addr);
353 free_pages_exact(addr, table_size);
354 }
355}
356
357static void __free_page_ext(unsigned long pfn)
358{
359 struct mem_section *ms;
360 struct page_ext *base;
361
362 ms = __pfn_to_section(pfn);
363 if (!ms || !ms->page_ext)
364 return;
365
366 base = READ_ONCE(ms->page_ext);
367 /*
368 * page_ext here can be valid while doing the roll back
369 * operation in online_page_ext().
370 */
371 if (page_ext_invalid(base))
372 base = (void *)base - PAGE_EXT_INVALID;
373 WRITE_ONCE(ms->page_ext, NULL);
374
375 base = get_entry(base, pfn);
376 free_page_ext(base);
377}
378
379static void __invalidate_page_ext(unsigned long pfn)
380{
381 struct mem_section *ms;
382 void *val;
383
384 ms = __pfn_to_section(pfn);
385 if (!ms || !ms->page_ext)
386 return;
387 val = (void *)ms->page_ext + PAGE_EXT_INVALID;
388 WRITE_ONCE(ms->page_ext, val);
389}
390
391static int __meminit online_page_ext(unsigned long start_pfn,
392 unsigned long nr_pages,
393 int nid)
394{
395 unsigned long start, end, pfn;
396 int fail = 0;
397
398 start = SECTION_ALIGN_DOWN(start_pfn);
399 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
400
401 if (nid == NUMA_NO_NODE) {
402 /*
403 * In this case, "nid" already exists and contains valid memory.
404 * "start_pfn" passed to us is a pfn which is an arg for
405 * online__pages(), and start_pfn should exist.
406 */
407 nid = pfn_to_nid(start_pfn);
408 VM_BUG_ON(!node_online(nid));
409 }
410
411 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
412 fail = init_section_page_ext(pfn, nid);
413 if (!fail)
414 return 0;
415
416 /* rollback */
417 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
418 __free_page_ext(pfn);
419
420 return -ENOMEM;
421}
422
423static int __meminit offline_page_ext(unsigned long start_pfn,
424 unsigned long nr_pages)
425{
426 unsigned long start, end, pfn;
427
428 start = SECTION_ALIGN_DOWN(start_pfn);
429 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
430
431 /*
432 * Freeing of page_ext is done in 3 steps to avoid
433 * use-after-free of it:
434 * 1) Traverse all the sections and mark their page_ext
435 * as invalid.
436 * 2) Wait for all the existing users of page_ext who
437 * started before invalidation to finish.
438 * 3) Free the page_ext.
439 */
440 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
441 __invalidate_page_ext(pfn);
442
443 synchronize_rcu();
444
445 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
446 __free_page_ext(pfn);
447 return 0;
448
449}
450
451static int __meminit page_ext_callback(struct notifier_block *self,
452 unsigned long action, void *arg)
453{
454 struct memory_notify *mn = arg;
455 int ret = 0;
456
457 switch (action) {
458 case MEM_GOING_ONLINE:
459 ret = online_page_ext(mn->start_pfn,
460 mn->nr_pages, mn->status_change_nid);
461 break;
462 case MEM_OFFLINE:
463 offline_page_ext(mn->start_pfn,
464 mn->nr_pages);
465 break;
466 case MEM_CANCEL_ONLINE:
467 offline_page_ext(mn->start_pfn,
468 mn->nr_pages);
469 break;
470 case MEM_GOING_OFFLINE:
471 break;
472 case MEM_ONLINE:
473 case MEM_CANCEL_OFFLINE:
474 break;
475 }
476
477 return notifier_from_errno(ret);
478}
479
480void __init page_ext_init(void)
481{
482 unsigned long pfn;
483 int nid;
484
485 if (!invoke_need_callbacks())
486 return;
487
488 for_each_node_state(nid, N_MEMORY) {
489 unsigned long start_pfn, end_pfn;
490
491 start_pfn = node_start_pfn(nid);
492 end_pfn = node_end_pfn(nid);
493 /*
494 * start_pfn and end_pfn may not be aligned to SECTION and the
495 * page->flags of out of node pages are not initialized. So we
496 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
497 */
498 for (pfn = start_pfn; pfn < end_pfn;
499 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
500
501 if (!pfn_valid(pfn))
502 continue;
503 /*
504 * Nodes's pfns can be overlapping.
505 * We know some arch can have a nodes layout such as
506 * -------------pfn-------------->
507 * N0 | N1 | N2 | N0 | N1 | N2|....
508 */
509 if (pfn_to_nid(pfn) != nid)
510 continue;
511 if (init_section_page_ext(pfn, nid))
512 goto oom;
513 cond_resched();
514 }
515 }
516 hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
517 pr_info("allocated %ld bytes of page_ext\n", total_usage);
518 invoke_init_callbacks();
519 return;
520
521oom:
522 panic("Out of memory");
523}
524
525void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
526{
527}
528
529#endif