Linux Audio

Check our new training course

Loading...
v6.13.7
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 44#include <linux/cred.h>
 45#include <linux/errno.h>
 46#include <linux/freezer.h>
 47#include <linux/kthread.h>
 48#include <linux/mm.h>
 49#include <linux/memblock.h>
 50#include <linux/pagemap.h>
 51#include <linux/highmem.h>
 52#include <linux/mutex.h>
 53#include <linux/list.h>
 54#include <linux/gfp.h>
 55#include <linux/notifier.h>
 56#include <linux/memory.h>
 57#include <linux/memory_hotplug.h>
 58#include <linux/percpu-defs.h>
 59#include <linux/slab.h>
 60#include <linux/sysctl.h>
 61#include <linux/moduleparam.h>
 62#include <linux/jiffies.h>
 63
 64#include <asm/page.h>
 
 
 65#include <asm/tlb.h>
 66
 67#include <asm/xen/hypervisor.h>
 68#include <asm/xen/hypercall.h>
 69
 70#include <xen/xen.h>
 71#include <xen/interface/xen.h>
 72#include <xen/interface/memory.h>
 73#include <xen/balloon.h>
 74#include <xen/features.h>
 75#include <xen/page.h>
 76#include <xen/mem-reservation.h>
 77
 78#undef MODULE_PARAM_PREFIX
 79#define MODULE_PARAM_PREFIX "xen."
 80
 81static uint __read_mostly balloon_boot_timeout = 180;
 82module_param(balloon_boot_timeout, uint, 0444);
 83
 84#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 85static int xen_hotplug_unpopulated;
 86
 87static struct ctl_table balloon_table[] = {
 88	{
 89		.procname	= "hotplug_unpopulated",
 90		.data		= &xen_hotplug_unpopulated,
 91		.maxlen		= sizeof(int),
 92		.mode		= 0644,
 93		.proc_handler	= proc_dointvec_minmax,
 94		.extra1         = SYSCTL_ZERO,
 95		.extra2         = SYSCTL_ONE,
 96	},
 97};
 98
 99#else
100#define xen_hotplug_unpopulated 0
101#endif
102
103/*
104 * Use one extent per PAGE_SIZE to avoid to break down the page into
105 * multiple frame.
106 */
107#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
108
109/*
110 * balloon_thread() state:
111 *
112 * BP_DONE: done or nothing to do,
113 * BP_WAIT: wait to be rescheduled,
114 * BP_EAGAIN: error, go to sleep,
115 * BP_ECANCELED: error, balloon operation canceled.
116 */
117
118static enum bp_state {
119	BP_DONE,
120	BP_WAIT,
121	BP_EAGAIN,
122	BP_ECANCELED
123} balloon_state = BP_DONE;
124
125/* Main waiting point for xen-balloon thread. */
126static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
127
128static DEFINE_MUTEX(balloon_mutex);
129
130struct balloon_stats balloon_stats;
131EXPORT_SYMBOL_GPL(balloon_stats);
132
133/* We increase/decrease in batches which fit in a page */
134static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
 
135
136
137/* List of ballooned pages, threaded through the mem_map array. */
138static LIST_HEAD(ballooned_pages);
139static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 
 
 
140
141/* When ballooning out (allocating memory to return to Xen) we don't really
142   want the kernel to try too hard since that can trigger the oom killer. */
143#define GFP_BALLOON \
144	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
145
146/* balloon_append: add the given page to the balloon. */
147static void balloon_append(struct page *page)
148{
149	if (!PageOffline(page))
150		__SetPageOffline(page);
 
 
151
 
 
 
152	/* Lowmem is re-populated first, so highmem pages go at list tail. */
153	if (PageHighMem(page)) {
154		list_add_tail(&page->lru, &ballooned_pages);
155		balloon_stats.balloon_high++;
156	} else {
157		list_add(&page->lru, &ballooned_pages);
158		balloon_stats.balloon_low++;
159	}
160	wake_up(&balloon_wq);
 
 
 
 
 
161}
162
163/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
164static struct page *balloon_retrieve(bool require_lowmem)
165{
166	struct page *page;
167
168	if (list_empty(&ballooned_pages))
169		return NULL;
170
171	page = list_entry(ballooned_pages.next, struct page, lru);
172	if (require_lowmem && PageHighMem(page))
173		return NULL;
 
174	list_del(&page->lru);
175
176	if (PageHighMem(page))
177		balloon_stats.balloon_high--;
178	else
179		balloon_stats.balloon_low--;
180
181	__ClearPageOffline(page);
 
182	return page;
183}
184
185static struct page *balloon_next_page(struct page *page)
186{
187	struct list_head *next = page->lru.next;
188	if (next == &ballooned_pages)
189		return NULL;
190	return list_entry(next, struct page, lru);
191}
192
193static void update_schedule(void)
194{
195	if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED)
196		return;
197
198	if (balloon_state == BP_DONE) {
199		balloon_stats.schedule_delay = 1;
200		balloon_stats.retry_count = 1;
201		return;
202	}
203
204	++balloon_stats.retry_count;
205
206	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
207			balloon_stats.retry_count > balloon_stats.max_retry_count) {
208		balloon_stats.schedule_delay = 1;
209		balloon_stats.retry_count = 1;
210		balloon_state = BP_ECANCELED;
211		return;
212	}
213
214	balloon_stats.schedule_delay <<= 1;
215
216	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
217		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
218
219	balloon_state = BP_EAGAIN;
220}
221
222#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
223static void release_memory_resource(struct resource *resource)
224{
225	if (!resource)
226		return;
227
228	/*
229	 * No need to reset region to identity mapped since we now
230	 * know that no I/O can be in this region
231	 */
232	release_resource(resource);
233	kfree(resource);
234}
235
236static struct resource *additional_memory_resource(phys_addr_t size)
237{
238	struct resource *res;
239	int ret;
240
241	res = kzalloc(sizeof(*res), GFP_KERNEL);
242	if (!res)
243		return NULL;
244
245	res->name = "System RAM";
246	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
247
248	ret = allocate_resource(&iomem_resource, res,
249				size, 0, -1,
250				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
251	if (ret < 0) {
252		pr_err("Cannot allocate new System RAM resource\n");
253		kfree(res);
254		return NULL;
255	}
256
257	return res;
258}
259
260static enum bp_state reserve_additional_memory(void)
 
 
 
 
 
 
 
 
 
 
 
261{
262	long credit;
263	struct resource *resource;
264	int nid, rc;
265	unsigned long balloon_hotplug;
 
266
267	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
268		- balloon_stats.total_pages;
 
269
270	/*
271	 * Already hotplugged enough pages?  Wait for them to be
272	 * onlined.
273	 */
274	if (credit <= 0)
275		return BP_WAIT;
276
277	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
278
279	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
280	if (!resource)
281		goto err;
282
283	nid = memory_add_physaddr_to_nid(resource->start);
284
285#ifdef CONFIG_XEN_HAVE_PVMMU
286	/*
287	 * We don't support PV MMU when Linux and Xen is using
288	 * different page granularity.
289	 */
290	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
291
292        /*
293         * add_memory() will build page tables for the new memory so
294         * the p2m must contain invalid entries so the correct
295         * non-present PTEs will be written.
296         *
297         * If a failure occurs, the original (identity) p2m entries
298         * are not restored since this region is now known not to
299         * conflict with any devices.
300         */ 
301	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
302		unsigned long pfn, i;
303
304		pfn = PFN_DOWN(resource->start);
305		for (i = 0; i < balloon_hotplug; i++) {
306			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
307				pr_warn("set_phys_to_machine() failed, no memory added\n");
308				goto err;
309			}
310                }
311	}
312#endif
313
314	/*
315	 * add_memory_resource() will call online_pages() which in its turn
316	 * will call xen_online_page() callback causing deadlock if we don't
317	 * release balloon_mutex here. Unlocking here is safe because the
318	 * callers drop the mutex before trying again.
319	 */
320	mutex_unlock(&balloon_mutex);
321	/* add_memory_resource() requires the device_hotplug lock */
322	lock_device_hotplug();
323	rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
324	unlock_device_hotplug();
325	mutex_lock(&balloon_mutex);
326
327	if (rc) {
328		pr_warn("Cannot add additional memory (%i)\n", rc);
329		goto err;
330	}
331
332	balloon_stats.total_pages += balloon_hotplug;
 
 
 
333
334	return BP_WAIT;
335  err:
336	release_memory_resource(resource);
337	return BP_ECANCELED;
338}
339
340static void xen_online_page(struct page *page, unsigned int order)
341{
342	unsigned long i, size = (1 << order);
343	unsigned long start_pfn = page_to_pfn(page);
344	struct page *p;
345
346	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
347	mutex_lock(&balloon_mutex);
348	for (i = 0; i < size; i++) {
349		p = pfn_to_page(start_pfn + i);
350		balloon_append(p);
351	}
 
 
 
 
352	mutex_unlock(&balloon_mutex);
353}
354
355static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
356{
357	if (val == MEM_ONLINE)
358		wake_up(&balloon_thread_wq);
359
360	return NOTIFY_OK;
361}
362
363static struct notifier_block xen_memory_nb = {
364	.notifier_call = xen_memory_notifier,
365	.priority = 0
366};
367#else
368static enum bp_state reserve_additional_memory(void)
369{
370	balloon_stats.target_pages = balloon_stats.current_pages +
371				     balloon_stats.target_unpopulated;
372	return BP_ECANCELED;
 
 
 
 
 
373}
374#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
375
376static long current_credit(void)
377{
378	return balloon_stats.target_pages - balloon_stats.current_pages;
 
 
 
379}
380
381static bool balloon_is_inflated(void)
382{
383	return balloon_stats.balloon_low || balloon_stats.balloon_high;
 
384}
 
385
386static enum bp_state increase_reservation(unsigned long nr_pages)
387{
388	int rc;
389	unsigned long i;
390	struct page   *page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
392	if (nr_pages > ARRAY_SIZE(frame_list))
393		nr_pages = ARRAY_SIZE(frame_list);
394
395	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
396	for (i = 0; i < nr_pages; i++) {
397		if (!page) {
398			nr_pages = i;
399			break;
400		}
401
402		frame_list[i] = page_to_xen_pfn(page);
403		page = balloon_next_page(page);
404	}
405
406	rc = xenmem_reservation_increase(nr_pages, frame_list);
 
 
407	if (rc <= 0)
408		return BP_EAGAIN;
409
410	for (i = 0; i < rc; i++) {
411		page = balloon_retrieve(false);
412		BUG_ON(page == NULL);
413
414		xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
415
416		/*
417		 * Relinquish the page back to the allocator. Note that
418		 * some pages, including ones added via xen_online_page(), might
419		 * not be marked reserved; free_reserved_page() will handle that.
420		 */
421		free_reserved_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
422	}
423
424	balloon_stats.current_pages += rc;
425
426	return BP_DONE;
427}
428
429static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
430{
431	enum bp_state state = BP_DONE;
432	unsigned long i;
433	struct page *page, *tmp;
434	int ret;
435	LIST_HEAD(pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
436
437	if (nr_pages > ARRAY_SIZE(frame_list))
438		nr_pages = ARRAY_SIZE(frame_list);
439
440	for (i = 0; i < nr_pages; i++) {
441		page = alloc_page(gfp);
442		if (page == NULL) {
443			nr_pages = i;
444			state = BP_EAGAIN;
445			break;
446		}
447		adjust_managed_page_count(page, -1);
448		xenmem_reservation_scrub_page(page);
449		list_add(&page->lru, &pages);
450	}
451
452	/*
453	 * Ensure that ballooned highmem pages don't have kmaps.
454	 *
455	 * Do this before changing the p2m as kmap_flush_unused()
456	 * reads PTEs to obtain pages (and hence needs the original
457	 * p2m entry).
458	 */
459	kmap_flush_unused();
460
461	/*
462	 * Setup the frame, update direct mapping, invalidate P2M,
463	 * and add to balloon.
464	 */
465	i = 0;
466	list_for_each_entry_safe(page, tmp, &pages, lru) {
467		frame_list[i++] = xen_page_to_gfn(page);
468
469		xenmem_reservation_va_mapping_reset(1, &page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470
471		list_del(&page->lru);
 
 
472
473		balloon_append(page);
474	}
475
476	flush_tlb_all();
477
478	ret = xenmem_reservation_decrease(nr_pages, frame_list);
 
 
479	BUG_ON(ret != nr_pages);
480
481	balloon_stats.current_pages -= nr_pages;
482
483	return state;
484}
485
486/*
487 * Stop waiting if either state is BP_DONE and ballooning action is
488 * needed, or if the credit has changed while state is not BP_DONE.
489 */
490static bool balloon_thread_cond(long credit)
491{
492	if (balloon_state == BP_DONE)
493		credit = 0;
494
495	return current_credit() != credit || kthread_should_stop();
496}
497
498/*
499 * As this is a kthread it is guaranteed to run as a single instance only.
500 * We may of course race updates of the target counts (which are protected
501 * by the balloon lock), or with changes to the Xen hard limit, but we will
502 * recover from these in time.
503 */
504static int balloon_thread(void *unused)
505{
 
506	long credit;
507	unsigned long timeout;
508
509	set_freezable();
510	for (;;) {
511		switch (balloon_state) {
512		case BP_DONE:
513		case BP_ECANCELED:
514			timeout = 3600 * HZ;
515			break;
516		case BP_EAGAIN:
517			timeout = balloon_stats.schedule_delay * HZ;
518			break;
519		case BP_WAIT:
520			timeout = HZ;
521			break;
522		}
523
524		credit = current_credit();
525
526		wait_event_freezable_timeout(balloon_thread_wq,
527			balloon_thread_cond(credit), timeout);
528
529		if (kthread_should_stop())
530			return 0;
531
532		mutex_lock(&balloon_mutex);
533
 
534		credit = current_credit();
535
536		if (credit > 0) {
537			if (balloon_is_inflated())
538				balloon_state = increase_reservation(credit);
539			else
540				balloon_state = reserve_additional_memory();
541		}
542
543		if (credit < 0) {
544			long n_pages;
545
546			n_pages = min(-credit, si_mem_available());
547			balloon_state = decrease_reservation(n_pages,
548							     GFP_BALLOON);
549			if (balloon_state == BP_DONE && n_pages != -credit &&
550			    n_pages < totalreserve_pages)
551				balloon_state = BP_EAGAIN;
552		}
553
554		update_schedule();
 
 
555
556		mutex_unlock(&balloon_mutex);
 
557
558		cond_resched();
559	}
 
 
 
 
 
 
 
 
560}
561
562/* Resets the Xen limit, sets new target, and kicks off processing. */
563void balloon_set_new_target(unsigned long target)
564{
565	/* No need for lock. Not read-modify-write updates. */
566	balloon_stats.target_pages = target;
567	wake_up(&balloon_thread_wq);
568}
569EXPORT_SYMBOL_GPL(balloon_set_new_target);
570
571static int add_ballooned_pages(unsigned int nr_pages)
572{
573	enum bp_state st;
574
575	if (xen_hotplug_unpopulated) {
576		st = reserve_additional_memory();
577		if (st != BP_ECANCELED) {
578			int rc;
579
580			mutex_unlock(&balloon_mutex);
581			rc = wait_event_interruptible(balloon_wq,
582				   !list_empty(&ballooned_pages));
583			mutex_lock(&balloon_mutex);
584			return rc ? -ENOMEM : 0;
585		}
586	}
587
588	if (si_mem_available() < nr_pages)
589		return -ENOMEM;
590
591	st = decrease_reservation(nr_pages, GFP_USER);
592	if (st != BP_DONE)
593		return -ENOMEM;
594
595	return 0;
596}
597
598/**
599 * xen_alloc_ballooned_pages - get pages that have been ballooned out
600 * @nr_pages: Number of pages to get
601 * @pages: pages returned
 
602 * @return 0 on success, error otherwise
603 */
604int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
605{
606	unsigned int pgno = 0;
607	struct page *page;
608	int ret;
609
610	mutex_lock(&balloon_mutex);
611
612	balloon_stats.target_unpopulated += nr_pages;
613
614	while (pgno < nr_pages) {
615		page = balloon_retrieve(true);
616		if (page) {
617			pages[pgno++] = page;
618#ifdef CONFIG_XEN_HAVE_PVMMU
619			/*
620			 * We don't support PV MMU when Linux and Xen is using
621			 * different page granularity.
622			 */
623			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
624
625			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
626				ret = xen_alloc_p2m_entry(page_to_pfn(page));
627				if (ret < 0)
628					goto out_undo;
629			}
630#endif
631		} else {
632			ret = add_ballooned_pages(nr_pages - pgno);
633			if (ret < 0)
 
 
 
 
634				goto out_undo;
635		}
636	}
637	mutex_unlock(&balloon_mutex);
638	return 0;
639 out_undo:
 
 
 
 
640	mutex_unlock(&balloon_mutex);
641	xen_free_ballooned_pages(pgno, pages);
642	/*
643	 * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
644	 * target_unpopulated is incremented with nr_pages at the start we need
645	 * to remove the remaining ones also, or accounting will be screwed.
646	 */
647	balloon_stats.target_unpopulated -= nr_pages - pgno;
648	return ret;
649}
650EXPORT_SYMBOL(xen_alloc_ballooned_pages);
651
652/**
653 * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
654 * @nr_pages: Number of pages
655 * @pages: pages to return
656 */
657void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
658{
659	unsigned int i;
660
661	mutex_lock(&balloon_mutex);
662
663	for (i = 0; i < nr_pages; i++) {
664		if (pages[i])
665			balloon_append(pages[i]);
666	}
667
668	balloon_stats.target_unpopulated -= nr_pages;
669
670	/* The balloon may be too large now. Shrink it if needed. */
671	if (current_credit())
672		wake_up(&balloon_thread_wq);
673
674	mutex_unlock(&balloon_mutex);
675}
676EXPORT_SYMBOL(xen_free_ballooned_pages);
677
678static void __init balloon_add_regions(void)
 
679{
680	unsigned long start_pfn, pages;
681	unsigned long pfn, extra_pfn_end;
682	unsigned int i;
683
684	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
685		pages = xen_extra_mem[i].n_pfns;
686		if (!pages)
687			continue;
 
 
688
689		start_pfn = xen_extra_mem[i].start_pfn;
 
 
 
 
 
 
 
690
691		/*
692		 * If the amount of usable memory has been limited (e.g., with
693		 * the 'mem' command line parameter), don't add pages beyond
694		 * this limit.
695		 */
696		extra_pfn_end = min(max_pfn, start_pfn + pages);
697
698		for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
699			balloon_append(pfn_to_page(pfn));
 
 
 
 
 
 
700
701		balloon_stats.total_pages += extra_pfn_end - start_pfn;
 
 
 
 
 
 
 
 
 
 
 
702	}
 
703}
704
 
 
 
 
705static int __init balloon_init(void)
706{
707	struct task_struct *task;
708
709	if (!xen_domain())
710		return -ENODEV;
711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712	pr_info("Initialising balloon driver\n");
713
714#ifdef CONFIG_XEN_PV
715	balloon_stats.current_pages = xen_pv_domain()
716		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
717		: get_num_physpages();
718#else
719	balloon_stats.current_pages = get_num_physpages();
720#endif
721	balloon_stats.target_pages  = balloon_stats.current_pages;
722	balloon_stats.balloon_low   = 0;
723	balloon_stats.balloon_high  = 0;
724	balloon_stats.total_pages   = balloon_stats.current_pages;
725
726	balloon_stats.schedule_delay = 1;
727	balloon_stats.max_schedule_delay = 32;
728	balloon_stats.retry_count = 1;
729	balloon_stats.max_retry_count = 4;
730
731#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 
 
 
732	set_online_page_callback(&xen_online_page);
733	register_memory_notifier(&xen_memory_nb);
734	register_sysctl_init("xen/balloon", balloon_table);
735#endif
736
737	balloon_add_regions();
738
739	task = kthread_run(balloon_thread, NULL, "xen-balloon");
740	if (IS_ERR(task)) {
741		pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
742		return PTR_ERR(task);
743	}
744
745	/* Init the xen-balloon driver. */
746	xen_balloon_init();
747
748	return 0;
749}
 
750subsys_initcall(balloon_init);
751
752static int __init balloon_wait_finish(void)
753{
754	long credit, last_credit = 0;
755	unsigned long last_changed = 0;
756
757	if (!xen_domain())
758		return -ENODEV;
759
760	/* PV guests don't need to wait. */
761	if (xen_pv_domain() || !current_credit())
762		return 0;
763
764	pr_notice("Waiting for initial ballooning down having finished.\n");
765
766	while ((credit = current_credit()) < 0) {
767		if (credit != last_credit) {
768			last_changed = jiffies;
769			last_credit = credit;
770		}
771		if (balloon_state == BP_ECANCELED) {
772			pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
773				     -credit);
774			if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout))
775				panic("Initial ballooning failed!\n");
776		}
777
778		schedule_timeout_interruptible(HZ / 10);
779	}
780
781	pr_notice("Initial ballooning down finished.\n");
782
783	return 0;
784}
785late_initcall_sync(balloon_wait_finish);
 
 
v3.15
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 
 44#include <linux/errno.h>
 45#include <linux/module.h>
 
 46#include <linux/mm.h>
 47#include <linux/bootmem.h>
 48#include <linux/pagemap.h>
 49#include <linux/highmem.h>
 50#include <linux/mutex.h>
 51#include <linux/list.h>
 52#include <linux/gfp.h>
 53#include <linux/notifier.h>
 54#include <linux/memory.h>
 55#include <linux/memory_hotplug.h>
 56#include <linux/percpu-defs.h>
 
 
 
 
 57
 58#include <asm/page.h>
 59#include <asm/pgalloc.h>
 60#include <asm/pgtable.h>
 61#include <asm/tlb.h>
 62
 63#include <asm/xen/hypervisor.h>
 64#include <asm/xen/hypercall.h>
 65
 66#include <xen/xen.h>
 67#include <xen/interface/xen.h>
 68#include <xen/interface/memory.h>
 69#include <xen/balloon.h>
 70#include <xen/features.h>
 71#include <xen/page.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73/*
 74 * balloon_process() state:
 
 
 
 
 
 
 75 *
 76 * BP_DONE: done or nothing to do,
 
 77 * BP_EAGAIN: error, go to sleep,
 78 * BP_ECANCELED: error, balloon operation canceled.
 79 */
 80
 81enum bp_state {
 82	BP_DONE,
 
 83	BP_EAGAIN,
 84	BP_ECANCELED
 85};
 86
 
 
 87
 88static DEFINE_MUTEX(balloon_mutex);
 89
 90struct balloon_stats balloon_stats;
 91EXPORT_SYMBOL_GPL(balloon_stats);
 92
 93/* We increase/decrease in batches which fit in a page */
 94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
 95static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
 96
 97
 98/* List of ballooned pages, threaded through the mem_map array. */
 99static LIST_HEAD(ballooned_pages);
100
101/* Main work function, always executed in process context. */
102static void balloon_process(struct work_struct *work);
103static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
104
105/* When ballooning out (allocating memory to return to Xen) we don't really
106   want the kernel to try too hard since that can trigger the oom killer. */
107#define GFP_BALLOON \
108	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
109
110static void scrub_page(struct page *page)
 
111{
112#ifdef CONFIG_XEN_SCRUB_PAGES
113	clear_highpage(page);
114#endif
115}
116
117/* balloon_append: add the given page to the balloon. */
118static void __balloon_append(struct page *page)
119{
120	/* Lowmem is re-populated first, so highmem pages go at list tail. */
121	if (PageHighMem(page)) {
122		list_add_tail(&page->lru, &ballooned_pages);
123		balloon_stats.balloon_high++;
124	} else {
125		list_add(&page->lru, &ballooned_pages);
126		balloon_stats.balloon_low++;
127	}
128}
129
130static void balloon_append(struct page *page)
131{
132	__balloon_append(page);
133	adjust_managed_page_count(page, -1);
134}
135
136/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
137static struct page *balloon_retrieve(bool prefer_highmem)
138{
139	struct page *page;
140
141	if (list_empty(&ballooned_pages))
142		return NULL;
143
144	if (prefer_highmem)
145		page = list_entry(ballooned_pages.prev, struct page, lru);
146	else
147		page = list_entry(ballooned_pages.next, struct page, lru);
148	list_del(&page->lru);
149
150	if (PageHighMem(page))
151		balloon_stats.balloon_high--;
152	else
153		balloon_stats.balloon_low--;
154
155	adjust_managed_page_count(page, 1);
156
157	return page;
158}
159
160static struct page *balloon_next_page(struct page *page)
161{
162	struct list_head *next = page->lru.next;
163	if (next == &ballooned_pages)
164		return NULL;
165	return list_entry(next, struct page, lru);
166}
167
168static enum bp_state update_schedule(enum bp_state state)
169{
170	if (state == BP_DONE) {
 
 
 
171		balloon_stats.schedule_delay = 1;
172		balloon_stats.retry_count = 1;
173		return BP_DONE;
174	}
175
176	++balloon_stats.retry_count;
177
178	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
179			balloon_stats.retry_count > balloon_stats.max_retry_count) {
180		balloon_stats.schedule_delay = 1;
181		balloon_stats.retry_count = 1;
182		return BP_ECANCELED;
 
183	}
184
185	balloon_stats.schedule_delay <<= 1;
186
187	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
188		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
189
190	return BP_EAGAIN;
191}
192
193#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
194static long current_credit(void)
195{
196	return balloon_stats.target_pages - balloon_stats.current_pages -
197		balloon_stats.hotplug_pages;
 
 
 
 
 
 
 
198}
199
200static bool balloon_is_inflated(void)
201{
202	if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
203			balloon_stats.balloon_hotplug)
204		return true;
205	else
206		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207}
208
209/*
210 * reserve_additional_memory() adds memory region of size >= credit above
211 * max_pfn. New region is section aligned and size is modified to be multiple
212 * of section size. Those features allow optimal use of address space and
213 * establish proper alignment when this function is called first time after
214 * boot (last section not fully populated at boot time contains unused memory
215 * pages with PG_reserved bit not set; online_pages_range() does not allow page
216 * onlining in whole range if first onlined page does not have PG_reserved
217 * bit set). Real size of added memory is established at page onlining stage.
218 */
219
220static enum bp_state reserve_additional_memory(long credit)
221{
 
 
222	int nid, rc;
223	u64 hotplug_start_paddr;
224	unsigned long balloon_hotplug = credit;
225
226	hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
227	balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
228	nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
229
230	rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
232	if (rc) {
233		pr_info("%s: add_memory() failed: %i\n", __func__, rc);
234		return BP_EAGAIN;
235	}
236
237	balloon_hotplug -= credit;
238
239	balloon_stats.hotplug_pages += credit;
240	balloon_stats.balloon_hotplug = balloon_hotplug;
241
242	return BP_DONE;
 
 
 
243}
244
245static void xen_online_page(struct page *page)
246{
247	__online_page_set_limits(page);
 
 
248
 
249	mutex_lock(&balloon_mutex);
250
251	__balloon_append(page);
252
253	if (balloon_stats.hotplug_pages)
254		--balloon_stats.hotplug_pages;
255	else
256		--balloon_stats.balloon_hotplug;
257
258	mutex_unlock(&balloon_mutex);
259}
260
261static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
262{
263	if (val == MEM_ONLINE)
264		schedule_delayed_work(&balloon_worker, 0);
265
266	return NOTIFY_OK;
267}
268
269static struct notifier_block xen_memory_nb = {
270	.notifier_call = xen_memory_notifier,
271	.priority = 0
272};
273#else
274static long current_credit(void)
275{
276	unsigned long target = balloon_stats.target_pages;
277
278	target = min(target,
279		     balloon_stats.current_pages +
280		     balloon_stats.balloon_low +
281		     balloon_stats.balloon_high);
282
283	return target - balloon_stats.current_pages;
284}
 
285
286static bool balloon_is_inflated(void)
287{
288	if (balloon_stats.balloon_low || balloon_stats.balloon_high)
289		return true;
290	else
291		return false;
292}
293
294static enum bp_state reserve_additional_memory(long credit)
295{
296	balloon_stats.target_pages = balloon_stats.current_pages;
297	return BP_DONE;
298}
299#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
300
301static enum bp_state increase_reservation(unsigned long nr_pages)
302{
303	int rc;
304	unsigned long  pfn, i;
305	struct page   *page;
306	struct xen_memory_reservation reservation = {
307		.address_bits = 0,
308		.extent_order = 0,
309		.domid        = DOMID_SELF
310	};
311
312#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
313	if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
314		nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
315		balloon_stats.hotplug_pages += nr_pages;
316		balloon_stats.balloon_hotplug -= nr_pages;
317		return BP_DONE;
318	}
319#endif
320
321	if (nr_pages > ARRAY_SIZE(frame_list))
322		nr_pages = ARRAY_SIZE(frame_list);
323
324	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
325	for (i = 0; i < nr_pages; i++) {
326		if (!page) {
327			nr_pages = i;
328			break;
329		}
330		frame_list[i] = page_to_pfn(page);
 
331		page = balloon_next_page(page);
332	}
333
334	set_xen_guest_handle(reservation.extent_start, frame_list);
335	reservation.nr_extents = nr_pages;
336	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
337	if (rc <= 0)
338		return BP_EAGAIN;
339
340	for (i = 0; i < rc; i++) {
341		page = balloon_retrieve(false);
342		BUG_ON(page == NULL);
343
344		pfn = page_to_pfn(page);
345
346#ifdef CONFIG_XEN_HAVE_PVMMU
347		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
348			set_phys_to_machine(pfn, frame_list[i]);
349
350			/* Link back into the page tables if not highmem. */
351			if (!PageHighMem(page)) {
352				int ret;
353				ret = HYPERVISOR_update_va_mapping(
354						(unsigned long)__va(pfn << PAGE_SHIFT),
355						mfn_pte(frame_list[i], PAGE_KERNEL),
356						0);
357				BUG_ON(ret);
358			}
359		}
360#endif
361
362		/* Relinquish the page back to the allocator. */
363		__free_reserved_page(page);
364	}
365
366	balloon_stats.current_pages += rc;
367
368	return BP_DONE;
369}
370
371static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
372{
373	enum bp_state state = BP_DONE;
374	unsigned long  pfn, i;
375	struct page   *page;
376	int ret;
377	struct xen_memory_reservation reservation = {
378		.address_bits = 0,
379		.extent_order = 0,
380		.domid        = DOMID_SELF
381	};
382
383#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
384	if (balloon_stats.hotplug_pages) {
385		nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
386		balloon_stats.hotplug_pages -= nr_pages;
387		balloon_stats.balloon_hotplug += nr_pages;
388		return BP_DONE;
389	}
390#endif
391
392	if (nr_pages > ARRAY_SIZE(frame_list))
393		nr_pages = ARRAY_SIZE(frame_list);
394
395	for (i = 0; i < nr_pages; i++) {
396		page = alloc_page(gfp);
397		if (page == NULL) {
398			nr_pages = i;
399			state = BP_EAGAIN;
400			break;
401		}
402		scrub_page(page);
403
404		frame_list[i] = page_to_pfn(page);
405	}
406
407	/*
408	 * Ensure that ballooned highmem pages don't have kmaps.
409	 *
410	 * Do this before changing the p2m as kmap_flush_unused()
411	 * reads PTEs to obtain pages (and hence needs the original
412	 * p2m entry).
413	 */
414	kmap_flush_unused();
415
416	/* Update direct mapping, invalidate P2M, and add to balloon. */
417	for (i = 0; i < nr_pages; i++) {
418		pfn = frame_list[i];
419		frame_list[i] = pfn_to_mfn(pfn);
420		page = pfn_to_page(pfn);
 
 
421
422#ifdef CONFIG_XEN_HAVE_PVMMU
423		/*
424		 * Ballooned out frames are effectively replaced with
425		 * a scratch frame.  Ensure direct mappings and the
426		 * p2m are consistent.
427		 */
428		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429			unsigned long p;
430			struct page   *scratch_page = get_balloon_scratch_page();
431
432			if (!PageHighMem(page)) {
433				ret = HYPERVISOR_update_va_mapping(
434						(unsigned long)__va(pfn << PAGE_SHIFT),
435						pfn_pte(page_to_pfn(scratch_page),
436							PAGE_KERNEL_RO), 0);
437				BUG_ON(ret);
438			}
439			p = page_to_pfn(scratch_page);
440			__set_phys_to_machine(pfn, pfn_to_mfn(p));
441
442			put_balloon_scratch_page();
443		}
444#endif
445
446		balloon_append(page);
447	}
448
449	flush_tlb_all();
450
451	set_xen_guest_handle(reservation.extent_start, frame_list);
452	reservation.nr_extents   = nr_pages;
453	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
454	BUG_ON(ret != nr_pages);
455
456	balloon_stats.current_pages -= nr_pages;
457
458	return state;
459}
460
461/*
462 * We avoid multiple worker processes conflicting via the balloon mutex.
 
 
 
 
 
 
 
 
 
 
 
 
463 * We may of course race updates of the target counts (which are protected
464 * by the balloon lock), or with changes to the Xen hard limit, but we will
465 * recover from these in time.
466 */
467static void balloon_process(struct work_struct *work)
468{
469	enum bp_state state = BP_DONE;
470	long credit;
 
471
472	mutex_lock(&balloon_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
474	do {
475		credit = current_credit();
476
477		if (credit > 0) {
478			if (balloon_is_inflated())
479				state = increase_reservation(credit);
480			else
481				state = reserve_additional_memory(credit);
482		}
483
484		if (credit < 0)
485			state = decrease_reservation(-credit, GFP_BALLOON);
486
487		state = update_schedule(state);
488
489#ifndef CONFIG_PREEMPT
490		if (need_resched())
491			schedule();
492#endif
493	} while (credit && state == BP_DONE);
494
495	/* Schedule more work if there is some still to be done. */
496	if (state == BP_EAGAIN)
497		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
498
499	mutex_unlock(&balloon_mutex);
500}
501
502struct page *get_balloon_scratch_page(void)
503{
504	struct page *ret = get_cpu_var(balloon_scratch_page);
505	BUG_ON(ret == NULL);
506	return ret;
507}
508
509void put_balloon_scratch_page(void)
510{
511	put_cpu_var(balloon_scratch_page);
512}
513
514/* Resets the Xen limit, sets new target, and kicks off processing. */
515void balloon_set_new_target(unsigned long target)
516{
517	/* No need for lock. Not read-modify-write updates. */
518	balloon_stats.target_pages = target;
519	schedule_delayed_work(&balloon_worker, 0);
520}
521EXPORT_SYMBOL_GPL(balloon_set_new_target);
522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523/**
524 * alloc_xenballooned_pages - get pages that have been ballooned out
525 * @nr_pages: Number of pages to get
526 * @pages: pages returned
527 * @highmem: allow highmem pages
528 * @return 0 on success, error otherwise
529 */
530int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
531{
532	int pgno = 0;
533	struct page *page;
 
 
534	mutex_lock(&balloon_mutex);
 
 
 
535	while (pgno < nr_pages) {
536		page = balloon_retrieve(highmem);
537		if (page && (highmem || !PageHighMem(page))) {
538			pages[pgno++] = page;
 
 
 
 
 
 
 
 
 
 
 
 
 
539		} else {
540			enum bp_state st;
541			if (page)
542				balloon_append(page);
543			st = decrease_reservation(nr_pages - pgno,
544					highmem ? GFP_HIGHUSER : GFP_USER);
545			if (st != BP_DONE)
546				goto out_undo;
547		}
548	}
549	mutex_unlock(&balloon_mutex);
550	return 0;
551 out_undo:
552	while (pgno)
553		balloon_append(pages[--pgno]);
554	/* Free the memory back to the kernel soon */
555	schedule_delayed_work(&balloon_worker, 0);
556	mutex_unlock(&balloon_mutex);
557	return -ENOMEM;
 
 
 
 
 
 
 
558}
559EXPORT_SYMBOL(alloc_xenballooned_pages);
560
561/**
562 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
563 * @nr_pages: Number of pages
564 * @pages: pages to return
565 */
566void free_xenballooned_pages(int nr_pages, struct page **pages)
567{
568	int i;
569
570	mutex_lock(&balloon_mutex);
571
572	for (i = 0; i < nr_pages; i++) {
573		if (pages[i])
574			balloon_append(pages[i]);
575	}
576
 
 
577	/* The balloon may be too large now. Shrink it if needed. */
578	if (current_credit())
579		schedule_delayed_work(&balloon_worker, 0);
580
581	mutex_unlock(&balloon_mutex);
582}
583EXPORT_SYMBOL(free_xenballooned_pages);
584
585static void __init balloon_add_region(unsigned long start_pfn,
586				      unsigned long pages)
587{
 
588	unsigned long pfn, extra_pfn_end;
589	struct page *page;
590
591	/*
592	 * If the amount of usable memory has been limited (e.g., with
593	 * the 'mem' command line parameter), don't add pages beyond
594	 * this limit.
595	 */
596	extra_pfn_end = min(max_pfn, start_pfn + pages);
597
598	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
599		page = pfn_to_page(pfn);
600		/* totalram_pages and totalhigh_pages do not
601		   include the boot-time balloon extension, so
602		   don't subtract from it. */
603		__balloon_append(page);
604	}
605}
606
607static int alloc_balloon_scratch_page(int cpu)
608{
609	if (per_cpu(balloon_scratch_page, cpu) != NULL)
610		return 0;
 
 
611
612	per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
613	if (per_cpu(balloon_scratch_page, cpu) == NULL) {
614		pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
615		return -ENOMEM;
616	}
617
618	return 0;
619}
620
621
622static int balloon_cpu_notify(struct notifier_block *self,
623				    unsigned long action, void *hcpu)
624{
625	int cpu = (long)hcpu;
626	switch (action) {
627	case CPU_UP_PREPARE:
628		if (alloc_balloon_scratch_page(cpu))
629			return NOTIFY_BAD;
630		break;
631	default:
632		break;
633	}
634	return NOTIFY_OK;
635}
636
637static struct notifier_block balloon_cpu_notifier = {
638	.notifier_call	= balloon_cpu_notify,
639};
640
641static int __init balloon_init(void)
642{
643	int i, cpu;
644
645	if (!xen_domain())
646		return -ENODEV;
647
648	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
649		register_cpu_notifier(&balloon_cpu_notifier);
650
651		get_online_cpus();
652		for_each_online_cpu(cpu) {
653			if (alloc_balloon_scratch_page(cpu)) {
654				put_online_cpus();
655				unregister_cpu_notifier(&balloon_cpu_notifier);
656				return -ENOMEM;
657			}
658		}
659		put_online_cpus();
660	}
661
662	pr_info("Initialising balloon driver\n");
663
 
664	balloon_stats.current_pages = xen_pv_domain()
665		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
666		: get_num_physpages();
 
 
 
667	balloon_stats.target_pages  = balloon_stats.current_pages;
668	balloon_stats.balloon_low   = 0;
669	balloon_stats.balloon_high  = 0;
 
670
671	balloon_stats.schedule_delay = 1;
672	balloon_stats.max_schedule_delay = 32;
673	balloon_stats.retry_count = 1;
674	balloon_stats.max_retry_count = RETRY_UNLIMITED;
675
676#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
677	balloon_stats.hotplug_pages = 0;
678	balloon_stats.balloon_hotplug = 0;
679
680	set_online_page_callback(&xen_online_page);
681	register_memory_notifier(&xen_memory_nb);
 
682#endif
683
684	/*
685	 * Initialize the balloon with pages from the extra memory
686	 * regions (see arch/x86/xen/setup.c).
687	 */
688	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
689		if (xen_extra_mem[i].size)
690			balloon_add_region(PFN_UP(xen_extra_mem[i].start),
691					   PFN_DOWN(xen_extra_mem[i].size));
 
 
692
693	return 0;
694}
695
696subsys_initcall(balloon_init);
697
698static int __init balloon_clear(void)
699{
700	int cpu;
 
701
702	for_each_possible_cpu(cpu)
703		per_cpu(balloon_scratch_page, cpu) = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704
705	return 0;
706}
707early_initcall(balloon_clear);
708
709MODULE_LICENSE("GPL");