Linux Audio

Check our new training course

Loading...
v5.9
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 44#include <linux/cred.h>
 45#include <linux/errno.h>
 46#include <linux/mm.h>
 47#include <linux/memblock.h>
 48#include <linux/pagemap.h>
 49#include <linux/highmem.h>
 50#include <linux/mutex.h>
 51#include <linux/list.h>
 52#include <linux/gfp.h>
 53#include <linux/notifier.h>
 54#include <linux/memory.h>
 55#include <linux/memory_hotplug.h>
 56#include <linux/percpu-defs.h>
 57#include <linux/slab.h>
 58#include <linux/sysctl.h>
 59
 60#include <asm/page.h>
 
 
 61#include <asm/tlb.h>
 62
 63#include <asm/xen/hypervisor.h>
 64#include <asm/xen/hypercall.h>
 65
 66#include <xen/xen.h>
 67#include <xen/interface/xen.h>
 68#include <xen/interface/memory.h>
 69#include <xen/balloon.h>
 70#include <xen/features.h>
 71#include <xen/page.h>
 72#include <xen/mem-reservation.h>
 73
 74static int xen_hotplug_unpopulated;
 75
 76#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 77
 
 
 
 78static struct ctl_table balloon_table[] = {
 79	{
 80		.procname	= "hotplug_unpopulated",
 81		.data		= &xen_hotplug_unpopulated,
 82		.maxlen		= sizeof(int),
 83		.mode		= 0644,
 84		.proc_handler	= proc_dointvec_minmax,
 85		.extra1         = SYSCTL_ZERO,
 86		.extra2         = SYSCTL_ONE,
 87	},
 88	{ }
 89};
 90
 91static struct ctl_table balloon_root[] = {
 92	{
 93		.procname	= "balloon",
 94		.mode		= 0555,
 95		.child		= balloon_table,
 96	},
 97	{ }
 98};
 99
100static struct ctl_table xen_root[] = {
101	{
102		.procname	= "xen",
103		.mode		= 0555,
104		.child		= balloon_root,
105	},
106	{ }
107};
108
109#endif
110
111/*
112 * Use one extent per PAGE_SIZE to avoid to break down the page into
113 * multiple frame.
114 */
115#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
116
117/*
118 * balloon_process() state:
119 *
120 * BP_DONE: done or nothing to do,
121 * BP_WAIT: wait to be rescheduled,
122 * BP_EAGAIN: error, go to sleep,
123 * BP_ECANCELED: error, balloon operation canceled.
124 */
125
126enum bp_state {
127	BP_DONE,
128	BP_WAIT,
129	BP_EAGAIN,
130	BP_ECANCELED
131};
132
133
134static DEFINE_MUTEX(balloon_mutex);
135
136struct balloon_stats balloon_stats;
137EXPORT_SYMBOL_GPL(balloon_stats);
138
139/* We increase/decrease in batches which fit in a page */
140static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
141
142
143/* List of ballooned pages, threaded through the mem_map array. */
144static LIST_HEAD(ballooned_pages);
145static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
146
147/* Main work function, always executed in process context. */
148static void balloon_process(struct work_struct *work);
149static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
150
151/* When ballooning out (allocating memory to return to Xen) we don't really
152   want the kernel to try too hard since that can trigger the oom killer. */
153#define GFP_BALLOON \
154	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
155
156/* balloon_append: add the given page to the balloon. */
157static void balloon_append(struct page *page)
158{
159	__SetPageOffline(page);
 
 
 
160
 
 
 
161	/* Lowmem is re-populated first, so highmem pages go at list tail. */
162	if (PageHighMem(page)) {
163		list_add_tail(&page->lru, &ballooned_pages);
164		balloon_stats.balloon_high++;
165	} else {
166		list_add(&page->lru, &ballooned_pages);
167		balloon_stats.balloon_low++;
168	}
169	wake_up(&balloon_wq);
170}
171
 
 
 
 
 
172/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
173static struct page *balloon_retrieve(bool require_lowmem)
174{
175	struct page *page;
176
177	if (list_empty(&ballooned_pages))
178		return NULL;
179
180	page = list_entry(ballooned_pages.next, struct page, lru);
181	if (require_lowmem && PageHighMem(page))
182		return NULL;
183	list_del(&page->lru);
184
185	if (PageHighMem(page))
186		balloon_stats.balloon_high--;
187	else
188		balloon_stats.balloon_low--;
189
190	__ClearPageOffline(page);
191	return page;
192}
193
194static struct page *balloon_next_page(struct page *page)
195{
196	struct list_head *next = page->lru.next;
197	if (next == &ballooned_pages)
198		return NULL;
199	return list_entry(next, struct page, lru);
200}
201
202static enum bp_state update_schedule(enum bp_state state)
203{
204	if (state == BP_WAIT)
205		return BP_WAIT;
206
207	if (state == BP_ECANCELED)
208		return BP_ECANCELED;
209
210	if (state == BP_DONE) {
211		balloon_stats.schedule_delay = 1;
212		balloon_stats.retry_count = 1;
213		return BP_DONE;
214	}
215
216	++balloon_stats.retry_count;
217
218	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
219			balloon_stats.retry_count > balloon_stats.max_retry_count) {
220		balloon_stats.schedule_delay = 1;
221		balloon_stats.retry_count = 1;
222		return BP_ECANCELED;
223	}
224
225	balloon_stats.schedule_delay <<= 1;
226
227	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
228		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
229
230	return BP_EAGAIN;
231}
232
233#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
234static void release_memory_resource(struct resource *resource)
235{
236	if (!resource)
237		return;
238
239	/*
240	 * No need to reset region to identity mapped since we now
241	 * know that no I/O can be in this region
242	 */
243	release_resource(resource);
244	kfree(resource);
245}
246
247static struct resource *additional_memory_resource(phys_addr_t size)
248{
249	struct resource *res;
250	int ret;
251
252	res = kzalloc(sizeof(*res), GFP_KERNEL);
253	if (!res)
254		return NULL;
255
256	res->name = "System RAM";
257	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
258
259	ret = allocate_resource(&iomem_resource, res,
260				size, 0, -1,
261				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
262	if (ret < 0) {
263		pr_err("Cannot allocate new System RAM resource\n");
264		kfree(res);
265		return NULL;
266	}
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268	return res;
269}
270
271static enum bp_state reserve_additional_memory(void)
272{
273	long credit;
274	struct resource *resource;
275	int nid, rc;
276	unsigned long balloon_hotplug;
277
278	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
279		- balloon_stats.total_pages;
280
281	/*
282	 * Already hotplugged enough pages?  Wait for them to be
283	 * onlined.
284	 */
285	if (credit <= 0)
286		return BP_WAIT;
287
288	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
289
290	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
291	if (!resource)
292		goto err;
293
294	nid = memory_add_physaddr_to_nid(resource->start);
295
296#ifdef CONFIG_XEN_HAVE_PVMMU
297	/*
298	 * We don't support PV MMU when Linux and Xen is using
299	 * different page granularity.
300	 */
301	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
302
303        /*
304         * add_memory() will build page tables for the new memory so
305         * the p2m must contain invalid entries so the correct
306         * non-present PTEs will be written.
307         *
308         * If a failure occurs, the original (identity) p2m entries
309         * are not restored since this region is now known not to
310         * conflict with any devices.
311         */ 
312	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
313		unsigned long pfn, i;
314
315		pfn = PFN_DOWN(resource->start);
316		for (i = 0; i < balloon_hotplug; i++) {
317			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
318				pr_warn("set_phys_to_machine() failed, no memory added\n");
319				goto err;
320			}
321                }
322	}
323#endif
324
325	/*
326	 * add_memory_resource() will call online_pages() which in its turn
327	 * will call xen_online_page() callback causing deadlock if we don't
328	 * release balloon_mutex here. Unlocking here is safe because the
329	 * callers drop the mutex before trying again.
330	 */
331	mutex_unlock(&balloon_mutex);
332	/* add_memory_resource() requires the device_hotplug lock */
333	lock_device_hotplug();
334	rc = add_memory_resource(nid, resource);
335	unlock_device_hotplug();
336	mutex_lock(&balloon_mutex);
337
338	if (rc) {
339		pr_warn("Cannot add additional memory (%i)\n", rc);
340		goto err;
341	}
342
343	balloon_stats.total_pages += balloon_hotplug;
344
345	return BP_WAIT;
346  err:
347	release_memory_resource(resource);
348	return BP_ECANCELED;
349}
350
351static void xen_online_page(struct page *page, unsigned int order)
352{
353	unsigned long i, size = (1 << order);
354	unsigned long start_pfn = page_to_pfn(page);
355	struct page *p;
356
357	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
358	mutex_lock(&balloon_mutex);
359	for (i = 0; i < size; i++) {
360		p = pfn_to_page(start_pfn + i);
361		balloon_append(p);
362	}
363	mutex_unlock(&balloon_mutex);
364}
365
366static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
367{
368	if (val == MEM_ONLINE)
369		schedule_delayed_work(&balloon_worker, 0);
370
371	return NOTIFY_OK;
372}
373
374static struct notifier_block xen_memory_nb = {
375	.notifier_call = xen_memory_notifier,
376	.priority = 0
377};
378#else
379static enum bp_state reserve_additional_memory(void)
380{
381	balloon_stats.target_pages = balloon_stats.current_pages +
382				     balloon_stats.target_unpopulated;
383	return BP_ECANCELED;
384}
385#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
386
387static long current_credit(void)
388{
389	return balloon_stats.target_pages - balloon_stats.current_pages;
390}
391
392static bool balloon_is_inflated(void)
393{
394	return balloon_stats.balloon_low || balloon_stats.balloon_high;
395}
396
397static enum bp_state increase_reservation(unsigned long nr_pages)
398{
399	int rc;
400	unsigned long i;
401	struct page   *page;
 
 
 
 
 
402
403	if (nr_pages > ARRAY_SIZE(frame_list))
404		nr_pages = ARRAY_SIZE(frame_list);
405
406	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
407	for (i = 0; i < nr_pages; i++) {
408		if (!page) {
409			nr_pages = i;
410			break;
411		}
412
 
 
 
413		frame_list[i] = page_to_xen_pfn(page);
414		page = balloon_next_page(page);
415	}
416
417	rc = xenmem_reservation_increase(nr_pages, frame_list);
 
 
418	if (rc <= 0)
419		return BP_EAGAIN;
420
421	for (i = 0; i < rc; i++) {
422		page = balloon_retrieve(false);
423		BUG_ON(page == NULL);
424
425		xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
427		/* Relinquish the page back to the allocator. */
428		free_reserved_page(page);
429	}
430
431	balloon_stats.current_pages += rc;
432
433	return BP_DONE;
434}
435
436static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
437{
438	enum bp_state state = BP_DONE;
439	unsigned long i;
440	struct page *page, *tmp;
441	int ret;
 
 
 
 
 
442	LIST_HEAD(pages);
443
444	if (nr_pages > ARRAY_SIZE(frame_list))
445		nr_pages = ARRAY_SIZE(frame_list);
446
447	for (i = 0; i < nr_pages; i++) {
448		page = alloc_page(gfp);
449		if (page == NULL) {
450			nr_pages = i;
451			state = BP_EAGAIN;
452			break;
453		}
454		adjust_managed_page_count(page, -1);
455		xenmem_reservation_scrub_page(page);
456		list_add(&page->lru, &pages);
457	}
458
459	/*
460	 * Ensure that ballooned highmem pages don't have kmaps.
461	 *
462	 * Do this before changing the p2m as kmap_flush_unused()
463	 * reads PTEs to obtain pages (and hence needs the original
464	 * p2m entry).
465	 */
466	kmap_flush_unused();
467
468	/*
469	 * Setup the frame, update direct mapping, invalidate P2M,
470	 * and add to balloon.
471	 */
472	i = 0;
473	list_for_each_entry_safe(page, tmp, &pages, lru) {
 
474		frame_list[i++] = xen_page_to_gfn(page);
475
476		xenmem_reservation_va_mapping_reset(1, &page);
 
 
 
 
 
 
 
 
477
 
 
 
 
 
 
 
 
 
478		list_del(&page->lru);
479
480		balloon_append(page);
481	}
482
483	flush_tlb_all();
484
485	ret = xenmem_reservation_decrease(nr_pages, frame_list);
 
 
486	BUG_ON(ret != nr_pages);
487
488	balloon_stats.current_pages -= nr_pages;
489
490	return state;
491}
492
493/*
494 * As this is a work item it is guaranteed to run as a single instance only.
495 * We may of course race updates of the target counts (which are protected
496 * by the balloon lock), or with changes to the Xen hard limit, but we will
497 * recover from these in time.
498 */
499static void balloon_process(struct work_struct *work)
500{
501	enum bp_state state = BP_DONE;
502	long credit;
503
504
505	do {
506		mutex_lock(&balloon_mutex);
507
508		credit = current_credit();
509
510		if (credit > 0) {
511			if (balloon_is_inflated())
512				state = increase_reservation(credit);
513			else
514				state = reserve_additional_memory();
515		}
516
517		if (credit < 0) {
518			long n_pages;
519
520			n_pages = min(-credit, si_mem_available());
521			state = decrease_reservation(n_pages, GFP_BALLOON);
522			if (state == BP_DONE && n_pages != -credit &&
523			    n_pages < totalreserve_pages)
524				state = BP_EAGAIN;
525		}
526
527		state = update_schedule(state);
528
529		mutex_unlock(&balloon_mutex);
530
531		cond_resched();
532
533	} while (credit && state == BP_DONE);
534
535	/* Schedule more work if there is some still to be done. */
536	if (state == BP_EAGAIN)
537		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
538}
539
540/* Resets the Xen limit, sets new target, and kicks off processing. */
541void balloon_set_new_target(unsigned long target)
542{
543	/* No need for lock. Not read-modify-write updates. */
544	balloon_stats.target_pages = target;
545	schedule_delayed_work(&balloon_worker, 0);
546}
547EXPORT_SYMBOL_GPL(balloon_set_new_target);
548
549static int add_ballooned_pages(int nr_pages)
550{
551	enum bp_state st;
552
553	if (xen_hotplug_unpopulated) {
554		st = reserve_additional_memory();
555		if (st != BP_ECANCELED) {
556			int rc;
557
558			mutex_unlock(&balloon_mutex);
559			rc = wait_event_interruptible(balloon_wq,
560				   !list_empty(&ballooned_pages));
561			mutex_lock(&balloon_mutex);
562			return rc ? -ENOMEM : 0;
563		}
564	}
565
566	if (si_mem_available() < nr_pages)
567		return -ENOMEM;
568
569	st = decrease_reservation(nr_pages, GFP_USER);
570	if (st != BP_DONE)
571		return -ENOMEM;
572
573	return 0;
574}
575
576/**
577 * alloc_xenballooned_pages - get pages that have been ballooned out
578 * @nr_pages: Number of pages to get
579 * @pages: pages returned
580 * @return 0 on success, error otherwise
581 */
582int alloc_xenballooned_pages(int nr_pages, struct page **pages)
583{
584	int pgno = 0;
585	struct page *page;
586	int ret;
587
588	mutex_lock(&balloon_mutex);
589
590	balloon_stats.target_unpopulated += nr_pages;
591
592	while (pgno < nr_pages) {
593		page = balloon_retrieve(true);
594		if (page) {
595			pages[pgno++] = page;
596#ifdef CONFIG_XEN_HAVE_PVMMU
597			/*
598			 * We don't support PV MMU when Linux and Xen is using
599			 * different page granularity.
600			 */
601			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
602
603			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
604				ret = xen_alloc_p2m_entry(page_to_pfn(page));
605				if (ret < 0)
606					goto out_undo;
607			}
608#endif
609		} else {
610			ret = add_ballooned_pages(nr_pages - pgno);
611			if (ret < 0)
612				goto out_undo;
613		}
614	}
615	mutex_unlock(&balloon_mutex);
616	return 0;
617 out_undo:
618	mutex_unlock(&balloon_mutex);
619	free_xenballooned_pages(pgno, pages);
620	/*
621	 * NB: free_xenballooned_pages will only subtract pgno pages, but since
622	 * target_unpopulated is incremented with nr_pages at the start we need
623	 * to remove the remaining ones also, or accounting will be screwed.
624	 */
625	balloon_stats.target_unpopulated -= nr_pages - pgno;
626	return ret;
627}
628EXPORT_SYMBOL(alloc_xenballooned_pages);
629
630/**
631 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
632 * @nr_pages: Number of pages
633 * @pages: pages to return
634 */
635void free_xenballooned_pages(int nr_pages, struct page **pages)
636{
637	int i;
638
639	mutex_lock(&balloon_mutex);
640
641	for (i = 0; i < nr_pages; i++) {
642		if (pages[i])
643			balloon_append(pages[i]);
644	}
645
646	balloon_stats.target_unpopulated -= nr_pages;
647
648	/* The balloon may be too large now. Shrink it if needed. */
649	if (current_credit())
650		schedule_delayed_work(&balloon_worker, 0);
651
652	mutex_unlock(&balloon_mutex);
653}
654EXPORT_SYMBOL(free_xenballooned_pages);
655
656#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
657static void __init balloon_add_region(unsigned long start_pfn,
658				      unsigned long pages)
659{
660	unsigned long pfn, extra_pfn_end;
 
661
662	/*
663	 * If the amount of usable memory has been limited (e.g., with
664	 * the 'mem' command line parameter), don't add pages beyond
665	 * this limit.
666	 */
667	extra_pfn_end = min(max_pfn, start_pfn + pages);
668
669	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
 
670		/* totalram_pages and totalhigh_pages do not
671		   include the boot-time balloon extension, so
672		   don't subtract from it. */
673		balloon_append(pfn_to_page(pfn));
674	}
675
676	balloon_stats.total_pages += extra_pfn_end - start_pfn;
677}
678#endif
679
680static int __init balloon_init(void)
681{
 
 
682	if (!xen_domain())
683		return -ENODEV;
684
685	pr_info("Initialising balloon driver\n");
686
687#ifdef CONFIG_XEN_PV
688	balloon_stats.current_pages = xen_pv_domain()
689		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
690		: get_num_physpages();
691#else
692	balloon_stats.current_pages = get_num_physpages();
693#endif
694	balloon_stats.target_pages  = balloon_stats.current_pages;
695	balloon_stats.balloon_low   = 0;
696	balloon_stats.balloon_high  = 0;
697	balloon_stats.total_pages   = balloon_stats.current_pages;
698
699	balloon_stats.schedule_delay = 1;
700	balloon_stats.max_schedule_delay = 32;
701	balloon_stats.retry_count = 1;
702	balloon_stats.max_retry_count = 4;
703
704#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
705	set_online_page_callback(&xen_online_page);
706	register_memory_notifier(&xen_memory_nb);
707	register_sysctl_table(xen_root);
708#endif
709
710#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
711	{
712		int i;
713
714		/*
715		 * Initialize the balloon with pages from the extra memory
716		 * regions (see arch/x86/xen/setup.c).
717		 */
718		for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
719			if (xen_extra_mem[i].n_pfns)
720				balloon_add_region(xen_extra_mem[i].start_pfn,
721						   xen_extra_mem[i].n_pfns);
722	}
723#endif
724
725	/* Init the xen-balloon driver. */
726	xen_balloon_init();
727
728	return 0;
729}
730subsys_initcall(balloon_init);
v4.10.11
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 
 44#include <linux/errno.h>
 45#include <linux/mm.h>
 46#include <linux/bootmem.h>
 47#include <linux/pagemap.h>
 48#include <linux/highmem.h>
 49#include <linux/mutex.h>
 50#include <linux/list.h>
 51#include <linux/gfp.h>
 52#include <linux/notifier.h>
 53#include <linux/memory.h>
 54#include <linux/memory_hotplug.h>
 55#include <linux/percpu-defs.h>
 56#include <linux/slab.h>
 57#include <linux/sysctl.h>
 58
 59#include <asm/page.h>
 60#include <asm/pgalloc.h>
 61#include <asm/pgtable.h>
 62#include <asm/tlb.h>
 63
 64#include <asm/xen/hypervisor.h>
 65#include <asm/xen/hypercall.h>
 66
 67#include <xen/xen.h>
 68#include <xen/interface/xen.h>
 69#include <xen/interface/memory.h>
 70#include <xen/balloon.h>
 71#include <xen/features.h>
 72#include <xen/page.h>
 
 73
 74static int xen_hotplug_unpopulated;
 75
 76#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 77
 78static int zero;
 79static int one = 1;
 80
 81static struct ctl_table balloon_table[] = {
 82	{
 83		.procname	= "hotplug_unpopulated",
 84		.data		= &xen_hotplug_unpopulated,
 85		.maxlen		= sizeof(int),
 86		.mode		= 0644,
 87		.proc_handler	= proc_dointvec_minmax,
 88		.extra1         = &zero,
 89		.extra2         = &one,
 90	},
 91	{ }
 92};
 93
 94static struct ctl_table balloon_root[] = {
 95	{
 96		.procname	= "balloon",
 97		.mode		= 0555,
 98		.child		= balloon_table,
 99	},
100	{ }
101};
102
103static struct ctl_table xen_root[] = {
104	{
105		.procname	= "xen",
106		.mode		= 0555,
107		.child		= balloon_root,
108	},
109	{ }
110};
111
112#endif
113
114/*
115 * Use one extent per PAGE_SIZE to avoid to break down the page into
116 * multiple frame.
117 */
118#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
119
120/*
121 * balloon_process() state:
122 *
123 * BP_DONE: done or nothing to do,
124 * BP_WAIT: wait to be rescheduled,
125 * BP_EAGAIN: error, go to sleep,
126 * BP_ECANCELED: error, balloon operation canceled.
127 */
128
129enum bp_state {
130	BP_DONE,
131	BP_WAIT,
132	BP_EAGAIN,
133	BP_ECANCELED
134};
135
136
137static DEFINE_MUTEX(balloon_mutex);
138
139struct balloon_stats balloon_stats;
140EXPORT_SYMBOL_GPL(balloon_stats);
141
142/* We increase/decrease in batches which fit in a page */
143static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
144
145
146/* List of ballooned pages, threaded through the mem_map array. */
147static LIST_HEAD(ballooned_pages);
148static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
149
150/* Main work function, always executed in process context. */
151static void balloon_process(struct work_struct *work);
152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
153
154/* When ballooning out (allocating memory to return to Xen) we don't really
155   want the kernel to try too hard since that can trigger the oom killer. */
156#define GFP_BALLOON \
157	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
158
159static void scrub_page(struct page *page)
 
160{
161#ifdef CONFIG_XEN_SCRUB_PAGES
162	clear_highpage(page);
163#endif
164}
165
166/* balloon_append: add the given page to the balloon. */
167static void __balloon_append(struct page *page)
168{
169	/* Lowmem is re-populated first, so highmem pages go at list tail. */
170	if (PageHighMem(page)) {
171		list_add_tail(&page->lru, &ballooned_pages);
172		balloon_stats.balloon_high++;
173	} else {
174		list_add(&page->lru, &ballooned_pages);
175		balloon_stats.balloon_low++;
176	}
177	wake_up(&balloon_wq);
178}
179
180static void balloon_append(struct page *page)
181{
182	__balloon_append(page);
183}
184
185/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
186static struct page *balloon_retrieve(bool require_lowmem)
187{
188	struct page *page;
189
190	if (list_empty(&ballooned_pages))
191		return NULL;
192
193	page = list_entry(ballooned_pages.next, struct page, lru);
194	if (require_lowmem && PageHighMem(page))
195		return NULL;
196	list_del(&page->lru);
197
198	if (PageHighMem(page))
199		balloon_stats.balloon_high--;
200	else
201		balloon_stats.balloon_low--;
202
 
203	return page;
204}
205
206static struct page *balloon_next_page(struct page *page)
207{
208	struct list_head *next = page->lru.next;
209	if (next == &ballooned_pages)
210		return NULL;
211	return list_entry(next, struct page, lru);
212}
213
214static enum bp_state update_schedule(enum bp_state state)
215{
216	if (state == BP_WAIT)
217		return BP_WAIT;
218
219	if (state == BP_ECANCELED)
220		return BP_ECANCELED;
221
222	if (state == BP_DONE) {
223		balloon_stats.schedule_delay = 1;
224		balloon_stats.retry_count = 1;
225		return BP_DONE;
226	}
227
228	++balloon_stats.retry_count;
229
230	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
231			balloon_stats.retry_count > balloon_stats.max_retry_count) {
232		balloon_stats.schedule_delay = 1;
233		balloon_stats.retry_count = 1;
234		return BP_ECANCELED;
235	}
236
237	balloon_stats.schedule_delay <<= 1;
238
239	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
240		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
241
242	return BP_EAGAIN;
243}
244
245#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
246static void release_memory_resource(struct resource *resource)
247{
248	if (!resource)
249		return;
250
251	/*
252	 * No need to reset region to identity mapped since we now
253	 * know that no I/O can be in this region
254	 */
255	release_resource(resource);
256	kfree(resource);
257}
258
259static struct resource *additional_memory_resource(phys_addr_t size)
260{
261	struct resource *res;
262	int ret;
263
264	res = kzalloc(sizeof(*res), GFP_KERNEL);
265	if (!res)
266		return NULL;
267
268	res->name = "System RAM";
269	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
270
271	ret = allocate_resource(&iomem_resource, res,
272				size, 0, -1,
273				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
274	if (ret < 0) {
275		pr_err("Cannot allocate new System RAM resource\n");
276		kfree(res);
277		return NULL;
278	}
279
280#ifdef CONFIG_SPARSEMEM
281	{
282		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
283		unsigned long pfn = res->start >> PAGE_SHIFT;
284
285		if (pfn > limit) {
286			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
287			       pfn, limit);
288			release_memory_resource(res);
289			return NULL;
290		}
291	}
292#endif
293
294	return res;
295}
296
297static enum bp_state reserve_additional_memory(void)
298{
299	long credit;
300	struct resource *resource;
301	int nid, rc;
302	unsigned long balloon_hotplug;
303
304	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
305		- balloon_stats.total_pages;
306
307	/*
308	 * Already hotplugged enough pages?  Wait for them to be
309	 * onlined.
310	 */
311	if (credit <= 0)
312		return BP_WAIT;
313
314	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
315
316	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
317	if (!resource)
318		goto err;
319
320	nid = memory_add_physaddr_to_nid(resource->start);
321
322#ifdef CONFIG_XEN_HAVE_PVMMU
323	/*
324	 * We don't support PV MMU when Linux and Xen is using
325	 * different page granularity.
326	 */
327	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
328
329        /*
330         * add_memory() will build page tables for the new memory so
331         * the p2m must contain invalid entries so the correct
332         * non-present PTEs will be written.
333         *
334         * If a failure occurs, the original (identity) p2m entries
335         * are not restored since this region is now known not to
336         * conflict with any devices.
337         */ 
338	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
339		unsigned long pfn, i;
340
341		pfn = PFN_DOWN(resource->start);
342		for (i = 0; i < balloon_hotplug; i++) {
343			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
344				pr_warn("set_phys_to_machine() failed, no memory added\n");
345				goto err;
346			}
347                }
348	}
349#endif
350
351	/*
352	 * add_memory_resource() will call online_pages() which in its turn
353	 * will call xen_online_page() callback causing deadlock if we don't
354	 * release balloon_mutex here. Unlocking here is safe because the
355	 * callers drop the mutex before trying again.
356	 */
357	mutex_unlock(&balloon_mutex);
358	rc = add_memory_resource(nid, resource, memhp_auto_online);
 
 
 
359	mutex_lock(&balloon_mutex);
360
361	if (rc) {
362		pr_warn("Cannot add additional memory (%i)\n", rc);
363		goto err;
364	}
365
366	balloon_stats.total_pages += balloon_hotplug;
367
368	return BP_WAIT;
369  err:
370	release_memory_resource(resource);
371	return BP_ECANCELED;
372}
373
374static void xen_online_page(struct page *page)
375{
376	__online_page_set_limits(page);
 
 
377
 
378	mutex_lock(&balloon_mutex);
379
380	__balloon_append(page);
381
 
382	mutex_unlock(&balloon_mutex);
383}
384
385static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
386{
387	if (val == MEM_ONLINE)
388		schedule_delayed_work(&balloon_worker, 0);
389
390	return NOTIFY_OK;
391}
392
393static struct notifier_block xen_memory_nb = {
394	.notifier_call = xen_memory_notifier,
395	.priority = 0
396};
397#else
398static enum bp_state reserve_additional_memory(void)
399{
400	balloon_stats.target_pages = balloon_stats.current_pages;
 
401	return BP_ECANCELED;
402}
403#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
404
405static long current_credit(void)
406{
407	return balloon_stats.target_pages - balloon_stats.current_pages;
408}
409
410static bool balloon_is_inflated(void)
411{
412	return balloon_stats.balloon_low || balloon_stats.balloon_high;
413}
414
415static enum bp_state increase_reservation(unsigned long nr_pages)
416{
417	int rc;
418	unsigned long i;
419	struct page   *page;
420	struct xen_memory_reservation reservation = {
421		.address_bits = 0,
422		.extent_order = EXTENT_ORDER,
423		.domid        = DOMID_SELF
424	};
425
426	if (nr_pages > ARRAY_SIZE(frame_list))
427		nr_pages = ARRAY_SIZE(frame_list);
428
429	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
430	for (i = 0; i < nr_pages; i++) {
431		if (!page) {
432			nr_pages = i;
433			break;
434		}
435
436		/* XENMEM_populate_physmap requires a PFN based on Xen
437		 * granularity.
438		 */
439		frame_list[i] = page_to_xen_pfn(page);
440		page = balloon_next_page(page);
441	}
442
443	set_xen_guest_handle(reservation.extent_start, frame_list);
444	reservation.nr_extents = nr_pages;
445	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
446	if (rc <= 0)
447		return BP_EAGAIN;
448
449	for (i = 0; i < rc; i++) {
450		page = balloon_retrieve(false);
451		BUG_ON(page == NULL);
452
453#ifdef CONFIG_XEN_HAVE_PVMMU
454		/*
455		 * We don't support PV MMU when Linux and Xen is using
456		 * different page granularity.
457		 */
458		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
459
460		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
461			unsigned long pfn = page_to_pfn(page);
462
463			set_phys_to_machine(pfn, frame_list[i]);
464
465			/* Link back into the page tables if not highmem. */
466			if (!PageHighMem(page)) {
467				int ret;
468				ret = HYPERVISOR_update_va_mapping(
469						(unsigned long)__va(pfn << PAGE_SHIFT),
470						mfn_pte(frame_list[i], PAGE_KERNEL),
471						0);
472				BUG_ON(ret);
473			}
474		}
475#endif
476
477		/* Relinquish the page back to the allocator. */
478		free_reserved_page(page);
479	}
480
481	balloon_stats.current_pages += rc;
482
483	return BP_DONE;
484}
485
486static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
487{
488	enum bp_state state = BP_DONE;
489	unsigned long i;
490	struct page *page, *tmp;
491	int ret;
492	struct xen_memory_reservation reservation = {
493		.address_bits = 0,
494		.extent_order = EXTENT_ORDER,
495		.domid        = DOMID_SELF
496	};
497	LIST_HEAD(pages);
498
499	if (nr_pages > ARRAY_SIZE(frame_list))
500		nr_pages = ARRAY_SIZE(frame_list);
501
502	for (i = 0; i < nr_pages; i++) {
503		page = alloc_page(gfp);
504		if (page == NULL) {
505			nr_pages = i;
506			state = BP_EAGAIN;
507			break;
508		}
509		adjust_managed_page_count(page, -1);
510		scrub_page(page);
511		list_add(&page->lru, &pages);
512	}
513
514	/*
515	 * Ensure that ballooned highmem pages don't have kmaps.
516	 *
517	 * Do this before changing the p2m as kmap_flush_unused()
518	 * reads PTEs to obtain pages (and hence needs the original
519	 * p2m entry).
520	 */
521	kmap_flush_unused();
522
523	/*
524	 * Setup the frame, update direct mapping, invalidate P2M,
525	 * and add to balloon.
526	 */
527	i = 0;
528	list_for_each_entry_safe(page, tmp, &pages, lru) {
529		/* XENMEM_decrease_reservation requires a GFN */
530		frame_list[i++] = xen_page_to_gfn(page);
531
532#ifdef CONFIG_XEN_HAVE_PVMMU
533		/*
534		 * We don't support PV MMU when Linux and Xen is using
535		 * different page granularity.
536		 */
537		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
538
539		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
540			unsigned long pfn = page_to_pfn(page);
541
542			if (!PageHighMem(page)) {
543				ret = HYPERVISOR_update_va_mapping(
544						(unsigned long)__va(pfn << PAGE_SHIFT),
545						__pte_ma(0), 0);
546				BUG_ON(ret);
547			}
548			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
549		}
550#endif
551		list_del(&page->lru);
552
553		balloon_append(page);
554	}
555
556	flush_tlb_all();
557
558	set_xen_guest_handle(reservation.extent_start, frame_list);
559	reservation.nr_extents   = nr_pages;
560	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
561	BUG_ON(ret != nr_pages);
562
563	balloon_stats.current_pages -= nr_pages;
564
565	return state;
566}
567
568/*
569 * As this is a work item it is guaranteed to run as a single instance only.
570 * We may of course race updates of the target counts (which are protected
571 * by the balloon lock), or with changes to the Xen hard limit, but we will
572 * recover from these in time.
573 */
574static void balloon_process(struct work_struct *work)
575{
576	enum bp_state state = BP_DONE;
577	long credit;
578
579
580	do {
581		mutex_lock(&balloon_mutex);
582
583		credit = current_credit();
584
585		if (credit > 0) {
586			if (balloon_is_inflated())
587				state = increase_reservation(credit);
588			else
589				state = reserve_additional_memory();
590		}
591
592		if (credit < 0)
593			state = decrease_reservation(-credit, GFP_BALLOON);
 
 
 
 
 
 
 
594
595		state = update_schedule(state);
596
597		mutex_unlock(&balloon_mutex);
598
599		cond_resched();
600
601	} while (credit && state == BP_DONE);
602
603	/* Schedule more work if there is some still to be done. */
604	if (state == BP_EAGAIN)
605		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
606}
607
608/* Resets the Xen limit, sets new target, and kicks off processing. */
609void balloon_set_new_target(unsigned long target)
610{
611	/* No need for lock. Not read-modify-write updates. */
612	balloon_stats.target_pages = target;
613	schedule_delayed_work(&balloon_worker, 0);
614}
615EXPORT_SYMBOL_GPL(balloon_set_new_target);
616
617static int add_ballooned_pages(int nr_pages)
618{
619	enum bp_state st;
620
621	if (xen_hotplug_unpopulated) {
622		st = reserve_additional_memory();
623		if (st != BP_ECANCELED) {
 
 
624			mutex_unlock(&balloon_mutex);
625			wait_event(balloon_wq,
626				   !list_empty(&ballooned_pages));
627			mutex_lock(&balloon_mutex);
628			return 0;
629		}
630	}
631
 
 
 
632	st = decrease_reservation(nr_pages, GFP_USER);
633	if (st != BP_DONE)
634		return -ENOMEM;
635
636	return 0;
637}
638
639/**
640 * alloc_xenballooned_pages - get pages that have been ballooned out
641 * @nr_pages: Number of pages to get
642 * @pages: pages returned
643 * @return 0 on success, error otherwise
644 */
645int alloc_xenballooned_pages(int nr_pages, struct page **pages)
646{
647	int pgno = 0;
648	struct page *page;
649	int ret;
650
651	mutex_lock(&balloon_mutex);
652
653	balloon_stats.target_unpopulated += nr_pages;
654
655	while (pgno < nr_pages) {
656		page = balloon_retrieve(true);
657		if (page) {
658			pages[pgno++] = page;
659#ifdef CONFIG_XEN_HAVE_PVMMU
660			/*
661			 * We don't support PV MMU when Linux and Xen is using
662			 * different page granularity.
663			 */
664			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
665
666			ret = xen_alloc_p2m_entry(page_to_pfn(page));
667			if (ret < 0)
668				goto out_undo;
 
 
669#endif
670		} else {
671			ret = add_ballooned_pages(nr_pages - pgno);
672			if (ret < 0)
673				goto out_undo;
674		}
675	}
676	mutex_unlock(&balloon_mutex);
677	return 0;
678 out_undo:
679	mutex_unlock(&balloon_mutex);
680	free_xenballooned_pages(pgno, pages);
 
 
 
 
 
 
681	return ret;
682}
683EXPORT_SYMBOL(alloc_xenballooned_pages);
684
685/**
686 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
687 * @nr_pages: Number of pages
688 * @pages: pages to return
689 */
690void free_xenballooned_pages(int nr_pages, struct page **pages)
691{
692	int i;
693
694	mutex_lock(&balloon_mutex);
695
696	for (i = 0; i < nr_pages; i++) {
697		if (pages[i])
698			balloon_append(pages[i]);
699	}
700
701	balloon_stats.target_unpopulated -= nr_pages;
702
703	/* The balloon may be too large now. Shrink it if needed. */
704	if (current_credit())
705		schedule_delayed_work(&balloon_worker, 0);
706
707	mutex_unlock(&balloon_mutex);
708}
709EXPORT_SYMBOL(free_xenballooned_pages);
710
 
711static void __init balloon_add_region(unsigned long start_pfn,
712				      unsigned long pages)
713{
714	unsigned long pfn, extra_pfn_end;
715	struct page *page;
716
717	/*
718	 * If the amount of usable memory has been limited (e.g., with
719	 * the 'mem' command line parameter), don't add pages beyond
720	 * this limit.
721	 */
722	extra_pfn_end = min(max_pfn, start_pfn + pages);
723
724	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
725		page = pfn_to_page(pfn);
726		/* totalram_pages and totalhigh_pages do not
727		   include the boot-time balloon extension, so
728		   don't subtract from it. */
729		__balloon_append(page);
730	}
731
732	balloon_stats.total_pages += extra_pfn_end - start_pfn;
733}
 
734
735static int __init balloon_init(void)
736{
737	int i;
738
739	if (!xen_domain())
740		return -ENODEV;
741
742	pr_info("Initialising balloon driver\n");
743
 
744	balloon_stats.current_pages = xen_pv_domain()
745		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
746		: get_num_physpages();
 
 
 
747	balloon_stats.target_pages  = balloon_stats.current_pages;
748	balloon_stats.balloon_low   = 0;
749	balloon_stats.balloon_high  = 0;
750	balloon_stats.total_pages   = balloon_stats.current_pages;
751
752	balloon_stats.schedule_delay = 1;
753	balloon_stats.max_schedule_delay = 32;
754	balloon_stats.retry_count = 1;
755	balloon_stats.max_retry_count = RETRY_UNLIMITED;
756
757#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
758	set_online_page_callback(&xen_online_page);
759	register_memory_notifier(&xen_memory_nb);
760	register_sysctl_table(xen_root);
761#endif
762
763	/*
764	 * Initialize the balloon with pages from the extra memory
765	 * regions (see arch/x86/xen/setup.c).
766	 */
767	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
768		if (xen_extra_mem[i].n_pfns)
769			balloon_add_region(xen_extra_mem[i].start_pfn,
770					   xen_extra_mem[i].n_pfns);
 
 
 
 
 
 
 
 
 
771
772	return 0;
773}
774subsys_initcall(balloon_init);