Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1#include <linux/mm.h>
  2#include <linux/mmzone.h>
  3#include <linux/bootmem.h>
  4#include <linux/page_ext.h>
  5#include <linux/memory.h>
  6#include <linux/vmalloc.h>
  7#include <linux/kmemleak.h>
  8#include <linux/page_owner.h>
  9#include <linux/page_idle.h>
 10
 11/*
 12 * struct page extension
 13 *
 14 * This is the feature to manage memory for extended data per page.
 15 *
 16 * Until now, we must modify struct page itself to store extra data per page.
 17 * This requires rebuilding the kernel and it is really time consuming process.
 18 * And, sometimes, rebuild is impossible due to third party module dependency.
 19 * At last, enlarging struct page could cause un-wanted system behaviour change.
 20 *
 21 * This feature is intended to overcome above mentioned problems. This feature
 22 * allocates memory for extended data per page in certain place rather than
 23 * the struct page itself. This memory can be accessed by the accessor
 24 * functions provided by this code. During the boot process, it checks whether
 25 * allocation of huge chunk of memory is needed or not. If not, it avoids
 26 * allocating memory at all. With this advantage, we can include this feature
 27 * into the kernel in default and can avoid rebuild and solve related problems.
 28 *
 29 * To help these things to work well, there are two callbacks for clients. One
 30 * is the need callback which is mandatory if user wants to avoid useless
 31 * memory allocation at boot-time. The other is optional, init callback, which
 32 * is used to do proper initialization after memory is allocated.
 33 *
 34 * The need callback is used to decide whether extended memory allocation is
 35 * needed or not. Sometimes users want to deactivate some features in this
 36 * boot and extra memory would be unneccessary. In this case, to avoid
 37 * allocating huge chunk of memory, each clients represent their need of
 38 * extra memory through the need callback. If one of the need callbacks
 39 * returns true, it means that someone needs extra memory so that
 40 * page extension core should allocates memory for page extension. If
 41 * none of need callbacks return true, memory isn't needed at all in this boot
 42 * and page extension core can skip to allocate memory. As result,
 43 * none of memory is wasted.
 44 *
 45 * When need callback returns true, page_ext checks if there is a request for
 46 * extra memory through size in struct page_ext_operations. If it is non-zero,
 47 * extra space is allocated for each page_ext entry and offset is returned to
 48 * user through offset in struct page_ext_operations.
 49 *
 50 * The init callback is used to do proper initialization after page extension
 51 * is completely initialized. In sparse memory system, extra memory is
 52 * allocated some time later than memmap is allocated. In other words, lifetime
 53 * of memory for page extension isn't same with memmap for struct page.
 54 * Therefore, clients can't store extra data until page extension is
 55 * initialized, even if pages are allocated and used freely. This could
 56 * cause inadequate state of extra data per page, so, to prevent it, client
 57 * can utilize this callback to initialize the state of it correctly.
 58 */
 59
 60static struct page_ext_operations *page_ext_ops[] = {
 61	&debug_guardpage_ops,
 62#ifdef CONFIG_PAGE_POISONING
 63	&page_poisoning_ops,
 64#endif
 65#ifdef CONFIG_PAGE_OWNER
 66	&page_owner_ops,
 67#endif
 68#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 69	&page_idle_ops,
 70#endif
 71};
 72
 73static unsigned long total_usage;
 74static unsigned long extra_mem;
 75
 76static bool __init invoke_need_callbacks(void)
 77{
 78	int i;
 79	int entries = ARRAY_SIZE(page_ext_ops);
 80	bool need = false;
 81
 82	for (i = 0; i < entries; i++) {
 83		if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
 84			page_ext_ops[i]->offset = sizeof(struct page_ext) +
 85						extra_mem;
 86			extra_mem += page_ext_ops[i]->size;
 87			need = true;
 88		}
 89	}
 90
 91	return need;
 92}
 93
 94static void __init invoke_init_callbacks(void)
 95{
 96	int i;
 97	int entries = ARRAY_SIZE(page_ext_ops);
 98
 99	for (i = 0; i < entries; i++) {
100		if (page_ext_ops[i]->init)
101			page_ext_ops[i]->init();
102	}
103}
104
105static unsigned long get_entry_size(void)
106{
107	return sizeof(struct page_ext) + extra_mem;
108}
109
110static inline struct page_ext *get_entry(void *base, unsigned long index)
111{
112	return base + get_entry_size() * index;
113}
114
115#if !defined(CONFIG_SPARSEMEM)
116
117
118void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
119{
120	pgdat->node_page_ext = NULL;
121}
122
123struct page_ext *lookup_page_ext(struct page *page)
124{
125	unsigned long pfn = page_to_pfn(page);
126	unsigned long index;
127	struct page_ext *base;
128
129	base = NODE_DATA(page_to_nid(page))->node_page_ext;
130#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
131	/*
132	 * The sanity checks the page allocator does upon freeing a
133	 * page can reach here before the page_ext arrays are
134	 * allocated when feeding a range of pages to the allocator
135	 * for the first time during bootup or memory hotplug.
136	 *
137	 * This check is also necessary for ensuring page poisoning
138	 * works as expected when enabled
139	 */
140	if (unlikely(!base))
141		return NULL;
142#endif
143	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
144					MAX_ORDER_NR_PAGES);
145	return get_entry(base, index);
146}
147
148static int __init alloc_node_page_ext(int nid)
149{
150	struct page_ext *base;
151	unsigned long table_size;
152	unsigned long nr_pages;
153
154	nr_pages = NODE_DATA(nid)->node_spanned_pages;
155	if (!nr_pages)
156		return 0;
157
158	/*
159	 * Need extra space if node range is not aligned with
160	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
161	 * checks buddy's status, range could be out of exact node range.
162	 */
163	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
164		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
165		nr_pages += MAX_ORDER_NR_PAGES;
166
167	table_size = get_entry_size() * nr_pages;
168
169	base = memblock_virt_alloc_try_nid_nopanic(
170			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
171			BOOTMEM_ALLOC_ACCESSIBLE, nid);
172	if (!base)
173		return -ENOMEM;
174	NODE_DATA(nid)->node_page_ext = base;
175	total_usage += table_size;
176	return 0;
177}
178
179void __init page_ext_init_flatmem(void)
180{
181
182	int nid, fail;
183
184	if (!invoke_need_callbacks())
185		return;
186
187	for_each_online_node(nid)  {
188		fail = alloc_node_page_ext(nid);
189		if (fail)
190			goto fail;
191	}
192	pr_info("allocated %ld bytes of page_ext\n", total_usage);
193	invoke_init_callbacks();
194	return;
195
196fail:
197	pr_crit("allocation of page_ext failed.\n");
198	panic("Out of memory");
199}
200
201#else /* CONFIG_FLAT_NODE_MEM_MAP */
202
203struct page_ext *lookup_page_ext(struct page *page)
204{
205	unsigned long pfn = page_to_pfn(page);
206	struct mem_section *section = __pfn_to_section(pfn);
207#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
208	/*
209	 * The sanity checks the page allocator does upon freeing a
210	 * page can reach here before the page_ext arrays are
211	 * allocated when feeding a range of pages to the allocator
212	 * for the first time during bootup or memory hotplug.
213	 *
214	 * This check is also necessary for ensuring page poisoning
215	 * works as expected when enabled
216	 */
217	if (!section->page_ext)
218		return NULL;
219#endif
220	return get_entry(section->page_ext, pfn);
221}
222
223static void *__meminit alloc_page_ext(size_t size, int nid)
224{
225	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
226	void *addr = NULL;
227
228	addr = alloc_pages_exact_nid(nid, size, flags);
229	if (addr) {
230		kmemleak_alloc(addr, size, 1, flags);
231		return addr;
232	}
233
234	if (node_state(nid, N_HIGH_MEMORY))
235		addr = vzalloc_node(size, nid);
236	else
237		addr = vzalloc(size);
238
239	return addr;
240}
241
242static int __meminit init_section_page_ext(unsigned long pfn, int nid)
243{
244	struct mem_section *section;
245	struct page_ext *base;
246	unsigned long table_size;
247
248	section = __pfn_to_section(pfn);
249
250	if (section->page_ext)
251		return 0;
252
253	table_size = get_entry_size() * PAGES_PER_SECTION;
254	base = alloc_page_ext(table_size, nid);
255
256	/*
257	 * The value stored in section->page_ext is (base - pfn)
258	 * and it does not point to the memory block allocated above,
259	 * causing kmemleak false positives.
260	 */
261	kmemleak_not_leak(base);
262
263	if (!base) {
264		pr_err("page ext allocation failure\n");
265		return -ENOMEM;
266	}
267
268	/*
269	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
270	 * we need to apply a mask.
271	 */
272	pfn &= PAGE_SECTION_MASK;
273	section->page_ext = (void *)base - get_entry_size() * pfn;
274	total_usage += table_size;
275	return 0;
276}
277#ifdef CONFIG_MEMORY_HOTPLUG
278static void free_page_ext(void *addr)
279{
280	if (is_vmalloc_addr(addr)) {
281		vfree(addr);
282	} else {
283		struct page *page = virt_to_page(addr);
284		size_t table_size;
285
286		table_size = get_entry_size() * PAGES_PER_SECTION;
287
288		BUG_ON(PageReserved(page));
289		free_pages_exact(addr, table_size);
290	}
291}
292
293static void __free_page_ext(unsigned long pfn)
294{
295	struct mem_section *ms;
296	struct page_ext *base;
297
298	ms = __pfn_to_section(pfn);
299	if (!ms || !ms->page_ext)
300		return;
301	base = get_entry(ms->page_ext, pfn);
302	free_page_ext(base);
303	ms->page_ext = NULL;
304}
305
306static int __meminit online_page_ext(unsigned long start_pfn,
307				unsigned long nr_pages,
308				int nid)
309{
310	unsigned long start, end, pfn;
311	int fail = 0;
312
313	start = SECTION_ALIGN_DOWN(start_pfn);
314	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
315
316	if (nid == -1) {
317		/*
318		 * In this case, "nid" already exists and contains valid memory.
319		 * "start_pfn" passed to us is a pfn which is an arg for
320		 * online__pages(), and start_pfn should exist.
321		 */
322		nid = pfn_to_nid(start_pfn);
323		VM_BUG_ON(!node_state(nid, N_ONLINE));
324	}
325
326	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
327		if (!pfn_present(pfn))
328			continue;
329		fail = init_section_page_ext(pfn, nid);
330	}
331	if (!fail)
332		return 0;
333
334	/* rollback */
335	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
336		__free_page_ext(pfn);
337
338	return -ENOMEM;
339}
340
341static int __meminit offline_page_ext(unsigned long start_pfn,
342				unsigned long nr_pages, int nid)
343{
344	unsigned long start, end, pfn;
345
346	start = SECTION_ALIGN_DOWN(start_pfn);
347	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
348
349	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
350		__free_page_ext(pfn);
351	return 0;
352
353}
354
355static int __meminit page_ext_callback(struct notifier_block *self,
356			       unsigned long action, void *arg)
357{
358	struct memory_notify *mn = arg;
359	int ret = 0;
360
361	switch (action) {
362	case MEM_GOING_ONLINE:
363		ret = online_page_ext(mn->start_pfn,
364				   mn->nr_pages, mn->status_change_nid);
365		break;
366	case MEM_OFFLINE:
367		offline_page_ext(mn->start_pfn,
368				mn->nr_pages, mn->status_change_nid);
369		break;
370	case MEM_CANCEL_ONLINE:
371		offline_page_ext(mn->start_pfn,
372				mn->nr_pages, mn->status_change_nid);
373		break;
374	case MEM_GOING_OFFLINE:
375		break;
376	case MEM_ONLINE:
377	case MEM_CANCEL_OFFLINE:
378		break;
379	}
380
381	return notifier_from_errno(ret);
382}
383
384#endif
385
386void __init page_ext_init(void)
387{
388	unsigned long pfn;
389	int nid;
390
391	if (!invoke_need_callbacks())
392		return;
393
394	for_each_node_state(nid, N_MEMORY) {
395		unsigned long start_pfn, end_pfn;
396
397		start_pfn = node_start_pfn(nid);
398		end_pfn = node_end_pfn(nid);
399		/*
400		 * start_pfn and end_pfn may not be aligned to SECTION and the
401		 * page->flags of out of node pages are not initialized.  So we
402		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
403		 */
404		for (pfn = start_pfn; pfn < end_pfn;
405			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
406
407			if (!pfn_valid(pfn))
408				continue;
409			/*
410			 * Nodes's pfns can be overlapping.
411			 * We know some arch can have a nodes layout such as
412			 * -------------pfn-------------->
413			 * N0 | N1 | N2 | N0 | N1 | N2|....
414			 *
415			 * Take into account DEFERRED_STRUCT_PAGE_INIT.
416			 */
417			if (early_pfn_to_nid(pfn) != nid)
418				continue;
419			if (init_section_page_ext(pfn, nid))
420				goto oom;
421		}
422	}
423	hotplug_memory_notifier(page_ext_callback, 0);
424	pr_info("allocated %ld bytes of page_ext\n", total_usage);
425	invoke_init_callbacks();
426	return;
427
428oom:
429	panic("Out of memory");
430}
431
432void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
433{
434}
435
436#endif