Linux Audio

Check our new training course

Loading...
v4.6
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 
 44#include <linux/errno.h>
 
 
 45#include <linux/mm.h>
 46#include <linux/bootmem.h>
 47#include <linux/pagemap.h>
 48#include <linux/highmem.h>
 49#include <linux/mutex.h>
 50#include <linux/list.h>
 51#include <linux/gfp.h>
 52#include <linux/notifier.h>
 53#include <linux/memory.h>
 54#include <linux/memory_hotplug.h>
 55#include <linux/percpu-defs.h>
 56#include <linux/slab.h>
 57#include <linux/sysctl.h>
 
 
 58
 59#include <asm/page.h>
 60#include <asm/pgalloc.h>
 61#include <asm/pgtable.h>
 62#include <asm/tlb.h>
 63
 64#include <asm/xen/hypervisor.h>
 65#include <asm/xen/hypercall.h>
 66
 67#include <xen/xen.h>
 68#include <xen/interface/xen.h>
 69#include <xen/interface/memory.h>
 70#include <xen/balloon.h>
 71#include <xen/features.h>
 72#include <xen/page.h>
 
 73
 74static int xen_hotplug_unpopulated;
 
 75
 76#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 
 77
 78static int zero;
 79static int one = 1;
 80
 81static struct ctl_table balloon_table[] = {
 82	{
 83		.procname	= "hotplug_unpopulated",
 84		.data		= &xen_hotplug_unpopulated,
 85		.maxlen		= sizeof(int),
 86		.mode		= 0644,
 87		.proc_handler	= proc_dointvec_minmax,
 88		.extra1         = &zero,
 89		.extra2         = &one,
 90	},
 91	{ }
 92};
 93
 94static struct ctl_table balloon_root[] = {
 95	{
 96		.procname	= "balloon",
 97		.mode		= 0555,
 98		.child		= balloon_table,
 99	},
100	{ }
101};
102
103static struct ctl_table xen_root[] = {
104	{
105		.procname	= "xen",
106		.mode		= 0555,
107		.child		= balloon_root,
108	},
109	{ }
110};
111
 
 
112#endif
113
114/*
115 * Use one extent per PAGE_SIZE to avoid to break down the page into
116 * multiple frame.
117 */
118#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
119
120/*
121 * balloon_process() state:
122 *
123 * BP_DONE: done or nothing to do,
124 * BP_WAIT: wait to be rescheduled,
125 * BP_EAGAIN: error, go to sleep,
126 * BP_ECANCELED: error, balloon operation canceled.
127 */
128
129enum bp_state {
130	BP_DONE,
131	BP_WAIT,
132	BP_EAGAIN,
133	BP_ECANCELED
134};
135
 
 
136
137static DEFINE_MUTEX(balloon_mutex);
138
139struct balloon_stats balloon_stats;
140EXPORT_SYMBOL_GPL(balloon_stats);
141
142/* We increase/decrease in batches which fit in a page */
143static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
144
145
146/* List of ballooned pages, threaded through the mem_map array. */
147static LIST_HEAD(ballooned_pages);
148static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
149
150/* Main work function, always executed in process context. */
151static void balloon_process(struct work_struct *work);
152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
153
154static void release_memory_resource(struct resource *resource);
155
156/* When ballooning out (allocating memory to return to Xen) we don't really
157   want the kernel to try too hard since that can trigger the oom killer. */
158#define GFP_BALLOON \
159	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
160
161static void scrub_page(struct page *page)
162{
163#ifdef CONFIG_XEN_SCRUB_PAGES
164	clear_highpage(page);
165#endif
166}
167
168/* balloon_append: add the given page to the balloon. */
169static void __balloon_append(struct page *page)
170{
 
 
171	/* Lowmem is re-populated first, so highmem pages go at list tail. */
172	if (PageHighMem(page)) {
173		list_add_tail(&page->lru, &ballooned_pages);
174		balloon_stats.balloon_high++;
175	} else {
176		list_add(&page->lru, &ballooned_pages);
177		balloon_stats.balloon_low++;
178	}
179	wake_up(&balloon_wq);
180}
181
182static void balloon_append(struct page *page)
183{
184	__balloon_append(page);
185	adjust_managed_page_count(page, -1);
186}
187
188/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
189static struct page *balloon_retrieve(bool require_lowmem)
190{
191	struct page *page;
192
193	if (list_empty(&ballooned_pages))
194		return NULL;
195
196	page = list_entry(ballooned_pages.next, struct page, lru);
197	if (require_lowmem && PageHighMem(page))
198		return NULL;
199	list_del(&page->lru);
200
201	if (PageHighMem(page))
202		balloon_stats.balloon_high--;
203	else
204		balloon_stats.balloon_low--;
205
206	adjust_managed_page_count(page, 1);
207
208	return page;
209}
210
211static struct page *balloon_next_page(struct page *page)
212{
213	struct list_head *next = page->lru.next;
214	if (next == &ballooned_pages)
215		return NULL;
216	return list_entry(next, struct page, lru);
217}
218
219static enum bp_state update_schedule(enum bp_state state)
220{
221	if (state == BP_WAIT)
222		return BP_WAIT;
223
224	if (state == BP_ECANCELED)
225		return BP_ECANCELED;
226
227	if (state == BP_DONE) {
228		balloon_stats.schedule_delay = 1;
229		balloon_stats.retry_count = 1;
230		return BP_DONE;
231	}
232
233	++balloon_stats.retry_count;
234
235	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
236			balloon_stats.retry_count > balloon_stats.max_retry_count) {
237		balloon_stats.schedule_delay = 1;
238		balloon_stats.retry_count = 1;
239		return BP_ECANCELED;
 
240	}
241
242	balloon_stats.schedule_delay <<= 1;
243
244	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
245		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
246
247	return BP_EAGAIN;
248}
249
250#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 
 
 
 
 
 
 
 
 
 
 
 
 
251static struct resource *additional_memory_resource(phys_addr_t size)
252{
253	struct resource *res;
254	int ret;
255
256	res = kzalloc(sizeof(*res), GFP_KERNEL);
257	if (!res)
258		return NULL;
259
260	res->name = "System RAM";
261	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
262
263	ret = allocate_resource(&iomem_resource, res,
264				size, 0, -1,
265				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
266	if (ret < 0) {
267		pr_err("Cannot allocate new System RAM resource\n");
268		kfree(res);
269		return NULL;
270	}
271
272#ifdef CONFIG_SPARSEMEM
273	{
274		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
275		unsigned long pfn = res->start >> PAGE_SHIFT;
276
277		if (pfn > limit) {
278			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
279			       pfn, limit);
280			release_memory_resource(res);
281			return NULL;
282		}
283	}
284#endif
285
286	return res;
287}
288
289static void release_memory_resource(struct resource *resource)
290{
291	if (!resource)
292		return;
293
294	/*
295	 * No need to reset region to identity mapped since we now
296	 * know that no I/O can be in this region
297	 */
298	release_resource(resource);
299	kfree(resource);
300}
301
302static enum bp_state reserve_additional_memory(void)
303{
304	long credit;
305	struct resource *resource;
306	int nid, rc;
307	unsigned long balloon_hotplug;
308
309	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
310		- balloon_stats.total_pages;
311
312	/*
313	 * Already hotplugged enough pages?  Wait for them to be
314	 * onlined.
315	 */
316	if (credit <= 0)
317		return BP_WAIT;
318
319	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
320
321	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
322	if (!resource)
323		goto err;
324
325	nid = memory_add_physaddr_to_nid(resource->start);
326
327#ifdef CONFIG_XEN_HAVE_PVMMU
328	/*
329	 * We don't support PV MMU when Linux and Xen is using
330	 * different page granularity.
331	 */
332	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
333
334        /*
335         * add_memory() will build page tables for the new memory so
336         * the p2m must contain invalid entries so the correct
337         * non-present PTEs will be written.
338         *
339         * If a failure occurs, the original (identity) p2m entries
340         * are not restored since this region is now known not to
341         * conflict with any devices.
342         */ 
343	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
344		unsigned long pfn, i;
345
346		pfn = PFN_DOWN(resource->start);
347		for (i = 0; i < balloon_hotplug; i++) {
348			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
349				pr_warn("set_phys_to_machine() failed, no memory added\n");
350				goto err;
351			}
352                }
353	}
354#endif
355
356	/*
357	 * add_memory_resource() will call online_pages() which in its turn
358	 * will call xen_online_page() callback causing deadlock if we don't
359	 * release balloon_mutex here. Unlocking here is safe because the
360	 * callers drop the mutex before trying again.
361	 */
362	mutex_unlock(&balloon_mutex);
363	rc = add_memory_resource(nid, resource, memhp_auto_online);
 
 
 
364	mutex_lock(&balloon_mutex);
365
366	if (rc) {
367		pr_warn("Cannot add additional memory (%i)\n", rc);
368		goto err;
369	}
370
371	balloon_stats.total_pages += balloon_hotplug;
372
373	return BP_WAIT;
374  err:
375	release_memory_resource(resource);
376	return BP_ECANCELED;
377}
378
379static void xen_online_page(struct page *page)
380{
381	__online_page_set_limits(page);
 
 
382
 
383	mutex_lock(&balloon_mutex);
384
385	__balloon_append(page);
386
 
387	mutex_unlock(&balloon_mutex);
388}
389
390static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
391{
392	if (val == MEM_ONLINE)
393		schedule_delayed_work(&balloon_worker, 0);
394
395	return NOTIFY_OK;
396}
397
398static struct notifier_block xen_memory_nb = {
399	.notifier_call = xen_memory_notifier,
400	.priority = 0
401};
402#else
403static enum bp_state reserve_additional_memory(void)
404{
405	balloon_stats.target_pages = balloon_stats.current_pages;
 
406	return BP_ECANCELED;
407}
408#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
409
410static long current_credit(void)
411{
412	return balloon_stats.target_pages - balloon_stats.current_pages;
413}
414
415static bool balloon_is_inflated(void)
416{
417	return balloon_stats.balloon_low || balloon_stats.balloon_high;
418}
419
420static enum bp_state increase_reservation(unsigned long nr_pages)
421{
422	int rc;
423	unsigned long i;
424	struct page   *page;
425	struct xen_memory_reservation reservation = {
426		.address_bits = 0,
427		.extent_order = EXTENT_ORDER,
428		.domid        = DOMID_SELF
429	};
430
431	if (nr_pages > ARRAY_SIZE(frame_list))
432		nr_pages = ARRAY_SIZE(frame_list);
433
434	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
435	for (i = 0; i < nr_pages; i++) {
436		if (!page) {
437			nr_pages = i;
438			break;
439		}
440
441		/* XENMEM_populate_physmap requires a PFN based on Xen
442		 * granularity.
443		 */
444		frame_list[i] = page_to_xen_pfn(page);
445		page = balloon_next_page(page);
446	}
447
448	set_xen_guest_handle(reservation.extent_start, frame_list);
449	reservation.nr_extents = nr_pages;
450	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
451	if (rc <= 0)
452		return BP_EAGAIN;
453
454	for (i = 0; i < rc; i++) {
455		page = balloon_retrieve(false);
456		BUG_ON(page == NULL);
457
458#ifdef CONFIG_XEN_HAVE_PVMMU
459		/*
460		 * We don't support PV MMU when Linux and Xen is using
461		 * different page granularity.
462		 */
463		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
464
465		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
466			unsigned long pfn = page_to_pfn(page);
467
468			set_phys_to_machine(pfn, frame_list[i]);
469
470			/* Link back into the page tables if not highmem. */
471			if (!PageHighMem(page)) {
472				int ret;
473				ret = HYPERVISOR_update_va_mapping(
474						(unsigned long)__va(pfn << PAGE_SHIFT),
475						mfn_pte(frame_list[i], PAGE_KERNEL),
476						0);
477				BUG_ON(ret);
478			}
479		}
480#endif
481
482		/* Relinquish the page back to the allocator. */
483		__free_reserved_page(page);
484	}
485
486	balloon_stats.current_pages += rc;
487
488	return BP_DONE;
489}
490
491static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
492{
493	enum bp_state state = BP_DONE;
494	unsigned long i;
495	struct page *page, *tmp;
496	int ret;
497	struct xen_memory_reservation reservation = {
498		.address_bits = 0,
499		.extent_order = EXTENT_ORDER,
500		.domid        = DOMID_SELF
501	};
502	LIST_HEAD(pages);
503
504	if (nr_pages > ARRAY_SIZE(frame_list))
505		nr_pages = ARRAY_SIZE(frame_list);
506
507	for (i = 0; i < nr_pages; i++) {
508		page = alloc_page(gfp);
509		if (page == NULL) {
510			nr_pages = i;
511			state = BP_EAGAIN;
512			break;
513		}
514		scrub_page(page);
 
515		list_add(&page->lru, &pages);
516	}
517
518	/*
519	 * Ensure that ballooned highmem pages don't have kmaps.
520	 *
521	 * Do this before changing the p2m as kmap_flush_unused()
522	 * reads PTEs to obtain pages (and hence needs the original
523	 * p2m entry).
524	 */
525	kmap_flush_unused();
526
527	/*
528	 * Setup the frame, update direct mapping, invalidate P2M,
529	 * and add to balloon.
530	 */
531	i = 0;
532	list_for_each_entry_safe(page, tmp, &pages, lru) {
533		/* XENMEM_decrease_reservation requires a GFN */
534		frame_list[i++] = xen_page_to_gfn(page);
535
536#ifdef CONFIG_XEN_HAVE_PVMMU
537		/*
538		 * We don't support PV MMU when Linux and Xen is using
539		 * different page granularity.
540		 */
541		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
542
543		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
544			unsigned long pfn = page_to_pfn(page);
545
546			if (!PageHighMem(page)) {
547				ret = HYPERVISOR_update_va_mapping(
548						(unsigned long)__va(pfn << PAGE_SHIFT),
549						__pte_ma(0), 0);
550				BUG_ON(ret);
551			}
552			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
553		}
554#endif
555		list_del(&page->lru);
556
557		balloon_append(page);
558	}
559
560	flush_tlb_all();
561
562	set_xen_guest_handle(reservation.extent_start, frame_list);
563	reservation.nr_extents   = nr_pages;
564	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
565	BUG_ON(ret != nr_pages);
566
567	balloon_stats.current_pages -= nr_pages;
568
569	return state;
570}
571
572/*
573 * As this is a work item it is guaranteed to run as a single instance only.
 
 
 
 
 
 
 
 
 
 
 
 
574 * We may of course race updates of the target counts (which are protected
575 * by the balloon lock), or with changes to the Xen hard limit, but we will
576 * recover from these in time.
577 */
578static void balloon_process(struct work_struct *work)
579{
580	enum bp_state state = BP_DONE;
581	long credit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
 
 
 
 
 
 
 
583
584	do {
585		mutex_lock(&balloon_mutex);
586
587		credit = current_credit();
588
589		if (credit > 0) {
590			if (balloon_is_inflated())
591				state = increase_reservation(credit);
592			else
593				state = reserve_additional_memory();
594		}
595
596		if (credit < 0)
597			state = decrease_reservation(-credit, GFP_BALLOON);
 
 
 
 
 
 
 
 
598
599		state = update_schedule(state);
600
601		mutex_unlock(&balloon_mutex);
602
603		cond_resched();
604
605	} while (credit && state == BP_DONE);
606
607	/* Schedule more work if there is some still to be done. */
608	if (state == BP_EAGAIN)
609		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
610}
611
612/* Resets the Xen limit, sets new target, and kicks off processing. */
613void balloon_set_new_target(unsigned long target)
614{
615	/* No need for lock. Not read-modify-write updates. */
616	balloon_stats.target_pages = target;
617	schedule_delayed_work(&balloon_worker, 0);
618}
619EXPORT_SYMBOL_GPL(balloon_set_new_target);
620
621static int add_ballooned_pages(int nr_pages)
622{
623	enum bp_state st;
624
625	if (xen_hotplug_unpopulated) {
626		st = reserve_additional_memory();
627		if (st != BP_ECANCELED) {
 
 
628			mutex_unlock(&balloon_mutex);
629			wait_event(balloon_wq,
630				   !list_empty(&ballooned_pages));
631			mutex_lock(&balloon_mutex);
632			return 0;
633		}
634	}
635
 
 
 
636	st = decrease_reservation(nr_pages, GFP_USER);
637	if (st != BP_DONE)
638		return -ENOMEM;
639
640	return 0;
641}
642
643/**
644 * alloc_xenballooned_pages - get pages that have been ballooned out
645 * @nr_pages: Number of pages to get
646 * @pages: pages returned
647 * @return 0 on success, error otherwise
648 */
649int alloc_xenballooned_pages(int nr_pages, struct page **pages)
650{
651	int pgno = 0;
652	struct page *page;
653	int ret;
654
655	mutex_lock(&balloon_mutex);
656
657	balloon_stats.target_unpopulated += nr_pages;
658
659	while (pgno < nr_pages) {
660		page = balloon_retrieve(true);
661		if (page) {
662			pages[pgno++] = page;
663#ifdef CONFIG_XEN_HAVE_PVMMU
664			/*
665			 * We don't support PV MMU when Linux and Xen is using
666			 * different page granularity.
667			 */
668			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
669
670			ret = xen_alloc_p2m_entry(page_to_pfn(page));
671			if (ret < 0)
672				goto out_undo;
 
 
673#endif
674		} else {
675			ret = add_ballooned_pages(nr_pages - pgno);
676			if (ret < 0)
677				goto out_undo;
678		}
679	}
680	mutex_unlock(&balloon_mutex);
681	return 0;
682 out_undo:
683	mutex_unlock(&balloon_mutex);
684	free_xenballooned_pages(pgno, pages);
 
 
 
 
 
 
685	return ret;
686}
687EXPORT_SYMBOL(alloc_xenballooned_pages);
688
689/**
690 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
691 * @nr_pages: Number of pages
692 * @pages: pages to return
693 */
694void free_xenballooned_pages(int nr_pages, struct page **pages)
695{
696	int i;
697
698	mutex_lock(&balloon_mutex);
699
700	for (i = 0; i < nr_pages; i++) {
701		if (pages[i])
702			balloon_append(pages[i]);
703	}
704
705	balloon_stats.target_unpopulated -= nr_pages;
706
707	/* The balloon may be too large now. Shrink it if needed. */
708	if (current_credit())
709		schedule_delayed_work(&balloon_worker, 0);
710
711	mutex_unlock(&balloon_mutex);
712}
713EXPORT_SYMBOL(free_xenballooned_pages);
714
715static void __init balloon_add_region(unsigned long start_pfn,
716				      unsigned long pages)
717{
 
 
718	unsigned long pfn, extra_pfn_end;
719	struct page *page;
720
721	/*
722	 * If the amount of usable memory has been limited (e.g., with
723	 * the 'mem' command line parameter), don't add pages beyond
724	 * this limit.
725	 */
726	extra_pfn_end = min(max_pfn, start_pfn + pages);
727
728	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
729		page = pfn_to_page(pfn);
730		/* totalram_pages and totalhigh_pages do not
731		   include the boot-time balloon extension, so
732		   don't subtract from it. */
733		__balloon_append(page);
734	}
735
736	balloon_stats.total_pages += extra_pfn_end - start_pfn;
 
 
 
 
 
 
 
 
 
 
 
 
737}
738
739static int __init balloon_init(void)
740{
741	int i;
742
743	if (!xen_domain())
744		return -ENODEV;
745
746	pr_info("Initialising balloon driver\n");
747
 
748	balloon_stats.current_pages = xen_pv_domain()
749		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
750		: get_num_physpages();
 
 
 
751	balloon_stats.target_pages  = balloon_stats.current_pages;
752	balloon_stats.balloon_low   = 0;
753	balloon_stats.balloon_high  = 0;
754	balloon_stats.total_pages   = balloon_stats.current_pages;
755
756	balloon_stats.schedule_delay = 1;
757	balloon_stats.max_schedule_delay = 32;
758	balloon_stats.retry_count = 1;
759	balloon_stats.max_retry_count = RETRY_UNLIMITED;
760
761#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
762	set_online_page_callback(&xen_online_page);
763	register_memory_notifier(&xen_memory_nb);
764	register_sysctl_table(xen_root);
765#endif
766
767	/*
768	 * Initialize the balloon with pages from the extra memory
769	 * regions (see arch/x86/xen/setup.c).
770	 */
771	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
772		if (xen_extra_mem[i].n_pfns)
773			balloon_add_region(xen_extra_mem[i].start_pfn,
774					   xen_extra_mem[i].n_pfns);
 
 
775
776	return 0;
777}
778subsys_initcall(balloon_init);
v6.2
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 44#include <linux/cred.h>
 45#include <linux/errno.h>
 46#include <linux/freezer.h>
 47#include <linux/kthread.h>
 48#include <linux/mm.h>
 49#include <linux/memblock.h>
 50#include <linux/pagemap.h>
 51#include <linux/highmem.h>
 52#include <linux/mutex.h>
 53#include <linux/list.h>
 54#include <linux/gfp.h>
 55#include <linux/notifier.h>
 56#include <linux/memory.h>
 57#include <linux/memory_hotplug.h>
 58#include <linux/percpu-defs.h>
 59#include <linux/slab.h>
 60#include <linux/sysctl.h>
 61#include <linux/moduleparam.h>
 62#include <linux/jiffies.h>
 63
 64#include <asm/page.h>
 
 
 65#include <asm/tlb.h>
 66
 67#include <asm/xen/hypervisor.h>
 68#include <asm/xen/hypercall.h>
 69
 70#include <xen/xen.h>
 71#include <xen/interface/xen.h>
 72#include <xen/interface/memory.h>
 73#include <xen/balloon.h>
 74#include <xen/features.h>
 75#include <xen/page.h>
 76#include <xen/mem-reservation.h>
 77
 78#undef MODULE_PARAM_PREFIX
 79#define MODULE_PARAM_PREFIX "xen."
 80
 81static uint __read_mostly balloon_boot_timeout = 180;
 82module_param(balloon_boot_timeout, uint, 0444);
 83
 84#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 85static int xen_hotplug_unpopulated;
 86
 87static struct ctl_table balloon_table[] = {
 88	{
 89		.procname	= "hotplug_unpopulated",
 90		.data		= &xen_hotplug_unpopulated,
 91		.maxlen		= sizeof(int),
 92		.mode		= 0644,
 93		.proc_handler	= proc_dointvec_minmax,
 94		.extra1         = SYSCTL_ZERO,
 95		.extra2         = SYSCTL_ONE,
 96	},
 97	{ }
 98};
 99
100static struct ctl_table balloon_root[] = {
101	{
102		.procname	= "balloon",
103		.mode		= 0555,
104		.child		= balloon_table,
105	},
106	{ }
107};
108
109static struct ctl_table xen_root[] = {
110	{
111		.procname	= "xen",
112		.mode		= 0555,
113		.child		= balloon_root,
114	},
115	{ }
116};
117
118#else
119#define xen_hotplug_unpopulated 0
120#endif
121
122/*
123 * Use one extent per PAGE_SIZE to avoid to break down the page into
124 * multiple frame.
125 */
126#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
127
128/*
129 * balloon_thread() state:
130 *
131 * BP_DONE: done or nothing to do,
132 * BP_WAIT: wait to be rescheduled,
133 * BP_EAGAIN: error, go to sleep,
134 * BP_ECANCELED: error, balloon operation canceled.
135 */
136
137static enum bp_state {
138	BP_DONE,
139	BP_WAIT,
140	BP_EAGAIN,
141	BP_ECANCELED
142} balloon_state = BP_DONE;
143
144/* Main waiting point for xen-balloon thread. */
145static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
146
147static DEFINE_MUTEX(balloon_mutex);
148
149struct balloon_stats balloon_stats;
150EXPORT_SYMBOL_GPL(balloon_stats);
151
152/* We increase/decrease in batches which fit in a page */
153static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
154
155
156/* List of ballooned pages, threaded through the mem_map array. */
157static LIST_HEAD(ballooned_pages);
158static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
159
 
 
 
 
 
 
160/* When ballooning out (allocating memory to return to Xen) we don't really
161   want the kernel to try too hard since that can trigger the oom killer. */
162#define GFP_BALLOON \
163	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
164
 
 
 
 
 
 
 
165/* balloon_append: add the given page to the balloon. */
166static void balloon_append(struct page *page)
167{
168	__SetPageOffline(page);
169
170	/* Lowmem is re-populated first, so highmem pages go at list tail. */
171	if (PageHighMem(page)) {
172		list_add_tail(&page->lru, &ballooned_pages);
173		balloon_stats.balloon_high++;
174	} else {
175		list_add(&page->lru, &ballooned_pages);
176		balloon_stats.balloon_low++;
177	}
178	wake_up(&balloon_wq);
179}
180
 
 
 
 
 
 
181/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
182static struct page *balloon_retrieve(bool require_lowmem)
183{
184	struct page *page;
185
186	if (list_empty(&ballooned_pages))
187		return NULL;
188
189	page = list_entry(ballooned_pages.next, struct page, lru);
190	if (require_lowmem && PageHighMem(page))
191		return NULL;
192	list_del(&page->lru);
193
194	if (PageHighMem(page))
195		balloon_stats.balloon_high--;
196	else
197		balloon_stats.balloon_low--;
198
199	__ClearPageOffline(page);
 
200	return page;
201}
202
203static struct page *balloon_next_page(struct page *page)
204{
205	struct list_head *next = page->lru.next;
206	if (next == &ballooned_pages)
207		return NULL;
208	return list_entry(next, struct page, lru);
209}
210
211static void update_schedule(void)
212{
213	if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED)
214		return;
 
 
 
215
216	if (balloon_state == BP_DONE) {
217		balloon_stats.schedule_delay = 1;
218		balloon_stats.retry_count = 1;
219		return;
220	}
221
222	++balloon_stats.retry_count;
223
224	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
225			balloon_stats.retry_count > balloon_stats.max_retry_count) {
226		balloon_stats.schedule_delay = 1;
227		balloon_stats.retry_count = 1;
228		balloon_state = BP_ECANCELED;
229		return;
230	}
231
232	balloon_stats.schedule_delay <<= 1;
233
234	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
235		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
236
237	balloon_state = BP_EAGAIN;
238}
239
240#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
241static void release_memory_resource(struct resource *resource)
242{
243	if (!resource)
244		return;
245
246	/*
247	 * No need to reset region to identity mapped since we now
248	 * know that no I/O can be in this region
249	 */
250	release_resource(resource);
251	kfree(resource);
252}
253
254static struct resource *additional_memory_resource(phys_addr_t size)
255{
256	struct resource *res;
257	int ret;
258
259	res = kzalloc(sizeof(*res), GFP_KERNEL);
260	if (!res)
261		return NULL;
262
263	res->name = "System RAM";
264	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
265
266	ret = allocate_resource(&iomem_resource, res,
267				size, 0, -1,
268				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
269	if (ret < 0) {
270		pr_err("Cannot allocate new System RAM resource\n");
271		kfree(res);
272		return NULL;
273	}
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275	return res;
276}
277
 
 
 
 
 
 
 
 
 
 
 
 
 
278static enum bp_state reserve_additional_memory(void)
279{
280	long credit;
281	struct resource *resource;
282	int nid, rc;
283	unsigned long balloon_hotplug;
284
285	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
286		- balloon_stats.total_pages;
287
288	/*
289	 * Already hotplugged enough pages?  Wait for them to be
290	 * onlined.
291	 */
292	if (credit <= 0)
293		return BP_WAIT;
294
295	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
296
297	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
298	if (!resource)
299		goto err;
300
301	nid = memory_add_physaddr_to_nid(resource->start);
302
303#ifdef CONFIG_XEN_HAVE_PVMMU
304	/*
305	 * We don't support PV MMU when Linux and Xen is using
306	 * different page granularity.
307	 */
308	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
309
310        /*
311         * add_memory() will build page tables for the new memory so
312         * the p2m must contain invalid entries so the correct
313         * non-present PTEs will be written.
314         *
315         * If a failure occurs, the original (identity) p2m entries
316         * are not restored since this region is now known not to
317         * conflict with any devices.
318         */ 
319	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
320		unsigned long pfn, i;
321
322		pfn = PFN_DOWN(resource->start);
323		for (i = 0; i < balloon_hotplug; i++) {
324			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
325				pr_warn("set_phys_to_machine() failed, no memory added\n");
326				goto err;
327			}
328                }
329	}
330#endif
331
332	/*
333	 * add_memory_resource() will call online_pages() which in its turn
334	 * will call xen_online_page() callback causing deadlock if we don't
335	 * release balloon_mutex here. Unlocking here is safe because the
336	 * callers drop the mutex before trying again.
337	 */
338	mutex_unlock(&balloon_mutex);
339	/* add_memory_resource() requires the device_hotplug lock */
340	lock_device_hotplug();
341	rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
342	unlock_device_hotplug();
343	mutex_lock(&balloon_mutex);
344
345	if (rc) {
346		pr_warn("Cannot add additional memory (%i)\n", rc);
347		goto err;
348	}
349
350	balloon_stats.total_pages += balloon_hotplug;
351
352	return BP_WAIT;
353  err:
354	release_memory_resource(resource);
355	return BP_ECANCELED;
356}
357
358static void xen_online_page(struct page *page, unsigned int order)
359{
360	unsigned long i, size = (1 << order);
361	unsigned long start_pfn = page_to_pfn(page);
362	struct page *p;
363
364	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
365	mutex_lock(&balloon_mutex);
366	for (i = 0; i < size; i++) {
367		p = pfn_to_page(start_pfn + i);
368		balloon_append(p);
369	}
370	mutex_unlock(&balloon_mutex);
371}
372
373static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
374{
375	if (val == MEM_ONLINE)
376		wake_up(&balloon_thread_wq);
377
378	return NOTIFY_OK;
379}
380
381static struct notifier_block xen_memory_nb = {
382	.notifier_call = xen_memory_notifier,
383	.priority = 0
384};
385#else
386static enum bp_state reserve_additional_memory(void)
387{
388	balloon_stats.target_pages = balloon_stats.current_pages +
389				     balloon_stats.target_unpopulated;
390	return BP_ECANCELED;
391}
392#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
393
394static long current_credit(void)
395{
396	return balloon_stats.target_pages - balloon_stats.current_pages;
397}
398
399static bool balloon_is_inflated(void)
400{
401	return balloon_stats.balloon_low || balloon_stats.balloon_high;
402}
403
404static enum bp_state increase_reservation(unsigned long nr_pages)
405{
406	int rc;
407	unsigned long i;
408	struct page   *page;
 
 
 
 
 
409
410	if (nr_pages > ARRAY_SIZE(frame_list))
411		nr_pages = ARRAY_SIZE(frame_list);
412
413	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
414	for (i = 0; i < nr_pages; i++) {
415		if (!page) {
416			nr_pages = i;
417			break;
418		}
419
 
 
 
420		frame_list[i] = page_to_xen_pfn(page);
421		page = balloon_next_page(page);
422	}
423
424	rc = xenmem_reservation_increase(nr_pages, frame_list);
 
 
425	if (rc <= 0)
426		return BP_EAGAIN;
427
428	for (i = 0; i < rc; i++) {
429		page = balloon_retrieve(false);
430		BUG_ON(page == NULL);
431
432		xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
434		/* Relinquish the page back to the allocator. */
435		free_reserved_page(page);
436	}
437
438	balloon_stats.current_pages += rc;
439
440	return BP_DONE;
441}
442
443static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
444{
445	enum bp_state state = BP_DONE;
446	unsigned long i;
447	struct page *page, *tmp;
448	int ret;
 
 
 
 
 
449	LIST_HEAD(pages);
450
451	if (nr_pages > ARRAY_SIZE(frame_list))
452		nr_pages = ARRAY_SIZE(frame_list);
453
454	for (i = 0; i < nr_pages; i++) {
455		page = alloc_page(gfp);
456		if (page == NULL) {
457			nr_pages = i;
458			state = BP_EAGAIN;
459			break;
460		}
461		adjust_managed_page_count(page, -1);
462		xenmem_reservation_scrub_page(page);
463		list_add(&page->lru, &pages);
464	}
465
466	/*
467	 * Ensure that ballooned highmem pages don't have kmaps.
468	 *
469	 * Do this before changing the p2m as kmap_flush_unused()
470	 * reads PTEs to obtain pages (and hence needs the original
471	 * p2m entry).
472	 */
473	kmap_flush_unused();
474
475	/*
476	 * Setup the frame, update direct mapping, invalidate P2M,
477	 * and add to balloon.
478	 */
479	i = 0;
480	list_for_each_entry_safe(page, tmp, &pages, lru) {
 
481		frame_list[i++] = xen_page_to_gfn(page);
482
483		xenmem_reservation_va_mapping_reset(1, &page);
 
 
 
 
 
484
 
 
 
 
 
 
 
 
 
 
 
 
485		list_del(&page->lru);
486
487		balloon_append(page);
488	}
489
490	flush_tlb_all();
491
492	ret = xenmem_reservation_decrease(nr_pages, frame_list);
 
 
493	BUG_ON(ret != nr_pages);
494
495	balloon_stats.current_pages -= nr_pages;
496
497	return state;
498}
499
500/*
501 * Stop waiting if either state is BP_DONE and ballooning action is
502 * needed, or if the credit has changed while state is not BP_DONE.
503 */
504static bool balloon_thread_cond(long credit)
505{
506	if (balloon_state == BP_DONE)
507		credit = 0;
508
509	return current_credit() != credit || kthread_should_stop();
510}
511
512/*
513 * As this is a kthread it is guaranteed to run as a single instance only.
514 * We may of course race updates of the target counts (which are protected
515 * by the balloon lock), or with changes to the Xen hard limit, but we will
516 * recover from these in time.
517 */
518static int balloon_thread(void *unused)
519{
 
520	long credit;
521	unsigned long timeout;
522
523	set_freezable();
524	for (;;) {
525		switch (balloon_state) {
526		case BP_DONE:
527		case BP_ECANCELED:
528			timeout = 3600 * HZ;
529			break;
530		case BP_EAGAIN:
531			timeout = balloon_stats.schedule_delay * HZ;
532			break;
533		case BP_WAIT:
534			timeout = HZ;
535			break;
536		}
537
538		credit = current_credit();
539
540		wait_event_freezable_timeout(balloon_thread_wq,
541			balloon_thread_cond(credit), timeout);
542
543		if (kthread_should_stop())
544			return 0;
545
 
546		mutex_lock(&balloon_mutex);
547
548		credit = current_credit();
549
550		if (credit > 0) {
551			if (balloon_is_inflated())
552				balloon_state = increase_reservation(credit);
553			else
554				balloon_state = reserve_additional_memory();
555		}
556
557		if (credit < 0) {
558			long n_pages;
559
560			n_pages = min(-credit, si_mem_available());
561			balloon_state = decrease_reservation(n_pages,
562							     GFP_BALLOON);
563			if (balloon_state == BP_DONE && n_pages != -credit &&
564			    n_pages < totalreserve_pages)
565				balloon_state = BP_EAGAIN;
566		}
567
568		update_schedule();
569
570		mutex_unlock(&balloon_mutex);
571
572		cond_resched();
573	}
 
 
 
 
 
574}
575
576/* Resets the Xen limit, sets new target, and kicks off processing. */
577void balloon_set_new_target(unsigned long target)
578{
579	/* No need for lock. Not read-modify-write updates. */
580	balloon_stats.target_pages = target;
581	wake_up(&balloon_thread_wq);
582}
583EXPORT_SYMBOL_GPL(balloon_set_new_target);
584
585static int add_ballooned_pages(unsigned int nr_pages)
586{
587	enum bp_state st;
588
589	if (xen_hotplug_unpopulated) {
590		st = reserve_additional_memory();
591		if (st != BP_ECANCELED) {
592			int rc;
593
594			mutex_unlock(&balloon_mutex);
595			rc = wait_event_interruptible(balloon_wq,
596				   !list_empty(&ballooned_pages));
597			mutex_lock(&balloon_mutex);
598			return rc ? -ENOMEM : 0;
599		}
600	}
601
602	if (si_mem_available() < nr_pages)
603		return -ENOMEM;
604
605	st = decrease_reservation(nr_pages, GFP_USER);
606	if (st != BP_DONE)
607		return -ENOMEM;
608
609	return 0;
610}
611
612/**
613 * xen_alloc_ballooned_pages - get pages that have been ballooned out
614 * @nr_pages: Number of pages to get
615 * @pages: pages returned
616 * @return 0 on success, error otherwise
617 */
618int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
619{
620	unsigned int pgno = 0;
621	struct page *page;
622	int ret;
623
624	mutex_lock(&balloon_mutex);
625
626	balloon_stats.target_unpopulated += nr_pages;
627
628	while (pgno < nr_pages) {
629		page = balloon_retrieve(true);
630		if (page) {
631			pages[pgno++] = page;
632#ifdef CONFIG_XEN_HAVE_PVMMU
633			/*
634			 * We don't support PV MMU when Linux and Xen is using
635			 * different page granularity.
636			 */
637			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
638
639			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
640				ret = xen_alloc_p2m_entry(page_to_pfn(page));
641				if (ret < 0)
642					goto out_undo;
643			}
644#endif
645		} else {
646			ret = add_ballooned_pages(nr_pages - pgno);
647			if (ret < 0)
648				goto out_undo;
649		}
650	}
651	mutex_unlock(&balloon_mutex);
652	return 0;
653 out_undo:
654	mutex_unlock(&balloon_mutex);
655	xen_free_ballooned_pages(pgno, pages);
656	/*
657	 * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
658	 * target_unpopulated is incremented with nr_pages at the start we need
659	 * to remove the remaining ones also, or accounting will be screwed.
660	 */
661	balloon_stats.target_unpopulated -= nr_pages - pgno;
662	return ret;
663}
664EXPORT_SYMBOL(xen_alloc_ballooned_pages);
665
666/**
667 * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
668 * @nr_pages: Number of pages
669 * @pages: pages to return
670 */
671void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
672{
673	unsigned int i;
674
675	mutex_lock(&balloon_mutex);
676
677	for (i = 0; i < nr_pages; i++) {
678		if (pages[i])
679			balloon_append(pages[i]);
680	}
681
682	balloon_stats.target_unpopulated -= nr_pages;
683
684	/* The balloon may be too large now. Shrink it if needed. */
685	if (current_credit())
686		wake_up(&balloon_thread_wq);
687
688	mutex_unlock(&balloon_mutex);
689}
690EXPORT_SYMBOL(xen_free_ballooned_pages);
691
692static void __init balloon_add_regions(void)
 
693{
694#if defined(CONFIG_XEN_PV)
695	unsigned long start_pfn, pages;
696	unsigned long pfn, extra_pfn_end;
697	unsigned int i;
698
699	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
700		pages = xen_extra_mem[i].n_pfns;
701		if (!pages)
702			continue;
 
 
703
704		start_pfn = xen_extra_mem[i].start_pfn;
 
 
 
 
 
 
705
706		/*
707		 * If the amount of usable memory has been limited (e.g., with
708		 * the 'mem' command line parameter), don't add pages beyond
709		 * this limit.
710		 */
711		extra_pfn_end = min(max_pfn, start_pfn + pages);
712
713		for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
714			balloon_append(pfn_to_page(pfn));
715
716		balloon_stats.total_pages += extra_pfn_end - start_pfn;
717	}
718#endif
719}
720
721static int __init balloon_init(void)
722{
723	struct task_struct *task;
724
725	if (!xen_domain())
726		return -ENODEV;
727
728	pr_info("Initialising balloon driver\n");
729
730#ifdef CONFIG_XEN_PV
731	balloon_stats.current_pages = xen_pv_domain()
732		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
733		: get_num_physpages();
734#else
735	balloon_stats.current_pages = get_num_physpages();
736#endif
737	balloon_stats.target_pages  = balloon_stats.current_pages;
738	balloon_stats.balloon_low   = 0;
739	balloon_stats.balloon_high  = 0;
740	balloon_stats.total_pages   = balloon_stats.current_pages;
741
742	balloon_stats.schedule_delay = 1;
743	balloon_stats.max_schedule_delay = 32;
744	balloon_stats.retry_count = 1;
745	balloon_stats.max_retry_count = 4;
746
747#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
748	set_online_page_callback(&xen_online_page);
749	register_memory_notifier(&xen_memory_nb);
750	register_sysctl_table(xen_root);
751#endif
752
753	balloon_add_regions();
754
755	task = kthread_run(balloon_thread, NULL, "xen-balloon");
756	if (IS_ERR(task)) {
757		pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
758		return PTR_ERR(task);
759	}
760
761	/* Init the xen-balloon driver. */
762	xen_balloon_init();
763
764	return 0;
765}
766subsys_initcall(balloon_init);
767
768static int __init balloon_wait_finish(void)
769{
770	long credit, last_credit = 0;
771	unsigned long last_changed = 0;
772
773	if (!xen_domain())
774		return -ENODEV;
775
776	/* PV guests don't need to wait. */
777	if (xen_pv_domain() || !current_credit())
778		return 0;
779
780	pr_notice("Waiting for initial ballooning down having finished.\n");
781
782	while ((credit = current_credit()) < 0) {
783		if (credit != last_credit) {
784			last_changed = jiffies;
785			last_credit = credit;
786		}
787		if (balloon_state == BP_ECANCELED) {
788			pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
789				     -credit);
790			if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout))
791				panic("Initial ballooning failed!\n");
792		}
793
794		schedule_timeout_interruptible(HZ / 10);
795	}
796
797	pr_notice("Initial ballooning down finished.\n");
798
799	return 0;
800}
801late_initcall_sync(balloon_wait_finish);