Loading...
1/******************************************************************************
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
3 *
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7 * Copyright (c) 2010 Daniel Kiper
8 *
9 * Memory hotplug support was written by Daniel Kiper. Work on
10 * it was sponsored by Google under Google Summer of Code 2010
11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
12 * this project.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License version 2
16 * as published by the Free Software Foundation; or, when distributed
17 * separately from the Linux kernel or incorporated into other
18 * software packages, subject to the following license:
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a copy
21 * of this source file (the "Software"), to deal in the Software without
22 * restriction, including without limitation the rights to use, copy, modify,
23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24 * and to permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice shall be included in
28 * all copies or substantial portions of the Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36 * IN THE SOFTWARE.
37 */
38
39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40
41#include <linux/cpu.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/cred.h>
45#include <linux/errno.h>
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/mm.h>
49#include <linux/memblock.h>
50#include <linux/pagemap.h>
51#include <linux/highmem.h>
52#include <linux/mutex.h>
53#include <linux/list.h>
54#include <linux/gfp.h>
55#include <linux/notifier.h>
56#include <linux/memory.h>
57#include <linux/memory_hotplug.h>
58#include <linux/percpu-defs.h>
59#include <linux/slab.h>
60#include <linux/sysctl.h>
61#include <linux/moduleparam.h>
62#include <linux/jiffies.h>
63
64#include <asm/page.h>
65#include <asm/tlb.h>
66
67#include <asm/xen/hypervisor.h>
68#include <asm/xen/hypercall.h>
69
70#include <xen/xen.h>
71#include <xen/interface/xen.h>
72#include <xen/interface/memory.h>
73#include <xen/balloon.h>
74#include <xen/features.h>
75#include <xen/page.h>
76#include <xen/mem-reservation.h>
77
78#undef MODULE_PARAM_PREFIX
79#define MODULE_PARAM_PREFIX "xen."
80
81static uint __read_mostly balloon_boot_timeout = 180;
82module_param(balloon_boot_timeout, uint, 0444);
83
84#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
85static int xen_hotplug_unpopulated;
86
87static struct ctl_table balloon_table[] = {
88 {
89 .procname = "hotplug_unpopulated",
90 .data = &xen_hotplug_unpopulated,
91 .maxlen = sizeof(int),
92 .mode = 0644,
93 .proc_handler = proc_dointvec_minmax,
94 .extra1 = SYSCTL_ZERO,
95 .extra2 = SYSCTL_ONE,
96 },
97};
98
99#else
100#define xen_hotplug_unpopulated 0
101#endif
102
103/*
104 * Use one extent per PAGE_SIZE to avoid to break down the page into
105 * multiple frame.
106 */
107#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
108
109/*
110 * balloon_thread() state:
111 *
112 * BP_DONE: done or nothing to do,
113 * BP_WAIT: wait to be rescheduled,
114 * BP_EAGAIN: error, go to sleep,
115 * BP_ECANCELED: error, balloon operation canceled.
116 */
117
118static enum bp_state {
119 BP_DONE,
120 BP_WAIT,
121 BP_EAGAIN,
122 BP_ECANCELED
123} balloon_state = BP_DONE;
124
125/* Main waiting point for xen-balloon thread. */
126static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
127
128static DEFINE_MUTEX(balloon_mutex);
129
130struct balloon_stats balloon_stats;
131EXPORT_SYMBOL_GPL(balloon_stats);
132
133/* We increase/decrease in batches which fit in a page */
134static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
135
136
137/* List of ballooned pages, threaded through the mem_map array. */
138static LIST_HEAD(ballooned_pages);
139static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
140
141/* When ballooning out (allocating memory to return to Xen) we don't really
142 want the kernel to try too hard since that can trigger the oom killer. */
143#define GFP_BALLOON \
144 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
145
146/* balloon_append: add the given page to the balloon. */
147static void balloon_append(struct page *page)
148{
149 if (!PageOffline(page))
150 __SetPageOffline(page);
151
152 /* Lowmem is re-populated first, so highmem pages go at list tail. */
153 if (PageHighMem(page)) {
154 list_add_tail(&page->lru, &ballooned_pages);
155 balloon_stats.balloon_high++;
156 } else {
157 list_add(&page->lru, &ballooned_pages);
158 balloon_stats.balloon_low++;
159 }
160 wake_up(&balloon_wq);
161}
162
163/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
164static struct page *balloon_retrieve(bool require_lowmem)
165{
166 struct page *page;
167
168 if (list_empty(&ballooned_pages))
169 return NULL;
170
171 page = list_entry(ballooned_pages.next, struct page, lru);
172 if (require_lowmem && PageHighMem(page))
173 return NULL;
174 list_del(&page->lru);
175
176 if (PageHighMem(page))
177 balloon_stats.balloon_high--;
178 else
179 balloon_stats.balloon_low--;
180
181 __ClearPageOffline(page);
182 return page;
183}
184
185static struct page *balloon_next_page(struct page *page)
186{
187 struct list_head *next = page->lru.next;
188 if (next == &ballooned_pages)
189 return NULL;
190 return list_entry(next, struct page, lru);
191}
192
193static void update_schedule(void)
194{
195 if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED)
196 return;
197
198 if (balloon_state == BP_DONE) {
199 balloon_stats.schedule_delay = 1;
200 balloon_stats.retry_count = 1;
201 return;
202 }
203
204 ++balloon_stats.retry_count;
205
206 if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
207 balloon_stats.retry_count > balloon_stats.max_retry_count) {
208 balloon_stats.schedule_delay = 1;
209 balloon_stats.retry_count = 1;
210 balloon_state = BP_ECANCELED;
211 return;
212 }
213
214 balloon_stats.schedule_delay <<= 1;
215
216 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
217 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
218
219 balloon_state = BP_EAGAIN;
220}
221
222#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
223static void release_memory_resource(struct resource *resource)
224{
225 if (!resource)
226 return;
227
228 /*
229 * No need to reset region to identity mapped since we now
230 * know that no I/O can be in this region
231 */
232 release_resource(resource);
233 kfree(resource);
234}
235
236static struct resource *additional_memory_resource(phys_addr_t size)
237{
238 struct resource *res;
239 int ret;
240
241 res = kzalloc(sizeof(*res), GFP_KERNEL);
242 if (!res)
243 return NULL;
244
245 res->name = "System RAM";
246 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
247
248 ret = allocate_resource(&iomem_resource, res,
249 size, 0, -1,
250 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
251 if (ret < 0) {
252 pr_err("Cannot allocate new System RAM resource\n");
253 kfree(res);
254 return NULL;
255 }
256
257 return res;
258}
259
260static enum bp_state reserve_additional_memory(void)
261{
262 long credit;
263 struct resource *resource;
264 int nid, rc;
265 unsigned long balloon_hotplug;
266
267 credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
268 - balloon_stats.total_pages;
269
270 /*
271 * Already hotplugged enough pages? Wait for them to be
272 * onlined.
273 */
274 if (credit <= 0)
275 return BP_WAIT;
276
277 balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
278
279 resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
280 if (!resource)
281 goto err;
282
283 nid = memory_add_physaddr_to_nid(resource->start);
284
285#ifdef CONFIG_XEN_HAVE_PVMMU
286 /*
287 * We don't support PV MMU when Linux and Xen is using
288 * different page granularity.
289 */
290 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
291
292 /*
293 * add_memory() will build page tables for the new memory so
294 * the p2m must contain invalid entries so the correct
295 * non-present PTEs will be written.
296 *
297 * If a failure occurs, the original (identity) p2m entries
298 * are not restored since this region is now known not to
299 * conflict with any devices.
300 */
301 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
302 unsigned long pfn, i;
303
304 pfn = PFN_DOWN(resource->start);
305 for (i = 0; i < balloon_hotplug; i++) {
306 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
307 pr_warn("set_phys_to_machine() failed, no memory added\n");
308 goto err;
309 }
310 }
311 }
312#endif
313
314 /*
315 * add_memory_resource() will call online_pages() which in its turn
316 * will call xen_online_page() callback causing deadlock if we don't
317 * release balloon_mutex here. Unlocking here is safe because the
318 * callers drop the mutex before trying again.
319 */
320 mutex_unlock(&balloon_mutex);
321 /* add_memory_resource() requires the device_hotplug lock */
322 lock_device_hotplug();
323 rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
324 unlock_device_hotplug();
325 mutex_lock(&balloon_mutex);
326
327 if (rc) {
328 pr_warn("Cannot add additional memory (%i)\n", rc);
329 goto err;
330 }
331
332 balloon_stats.total_pages += balloon_hotplug;
333
334 return BP_WAIT;
335 err:
336 release_memory_resource(resource);
337 return BP_ECANCELED;
338}
339
340static void xen_online_page(struct page *page, unsigned int order)
341{
342 unsigned long i, size = (1 << order);
343 unsigned long start_pfn = page_to_pfn(page);
344 struct page *p;
345
346 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
347 mutex_lock(&balloon_mutex);
348 for (i = 0; i < size; i++) {
349 p = pfn_to_page(start_pfn + i);
350 balloon_append(p);
351 }
352 mutex_unlock(&balloon_mutex);
353}
354
355static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
356{
357 if (val == MEM_ONLINE)
358 wake_up(&balloon_thread_wq);
359
360 return NOTIFY_OK;
361}
362
363static struct notifier_block xen_memory_nb = {
364 .notifier_call = xen_memory_notifier,
365 .priority = 0
366};
367#else
368static enum bp_state reserve_additional_memory(void)
369{
370 balloon_stats.target_pages = balloon_stats.current_pages +
371 balloon_stats.target_unpopulated;
372 return BP_ECANCELED;
373}
374#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
375
376static long current_credit(void)
377{
378 return balloon_stats.target_pages - balloon_stats.current_pages;
379}
380
381static bool balloon_is_inflated(void)
382{
383 return balloon_stats.balloon_low || balloon_stats.balloon_high;
384}
385
386static enum bp_state increase_reservation(unsigned long nr_pages)
387{
388 int rc;
389 unsigned long i;
390 struct page *page;
391
392 if (nr_pages > ARRAY_SIZE(frame_list))
393 nr_pages = ARRAY_SIZE(frame_list);
394
395 page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
396 for (i = 0; i < nr_pages; i++) {
397 if (!page) {
398 nr_pages = i;
399 break;
400 }
401
402 frame_list[i] = page_to_xen_pfn(page);
403 page = balloon_next_page(page);
404 }
405
406 rc = xenmem_reservation_increase(nr_pages, frame_list);
407 if (rc <= 0)
408 return BP_EAGAIN;
409
410 for (i = 0; i < rc; i++) {
411 page = balloon_retrieve(false);
412 BUG_ON(page == NULL);
413
414 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
415
416 /*
417 * Relinquish the page back to the allocator. Note that
418 * some pages, including ones added via xen_online_page(), might
419 * not be marked reserved; free_reserved_page() will handle that.
420 */
421 free_reserved_page(page);
422 }
423
424 balloon_stats.current_pages += rc;
425
426 return BP_DONE;
427}
428
429static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
430{
431 enum bp_state state = BP_DONE;
432 unsigned long i;
433 struct page *page, *tmp;
434 int ret;
435 LIST_HEAD(pages);
436
437 if (nr_pages > ARRAY_SIZE(frame_list))
438 nr_pages = ARRAY_SIZE(frame_list);
439
440 for (i = 0; i < nr_pages; i++) {
441 page = alloc_page(gfp);
442 if (page == NULL) {
443 nr_pages = i;
444 state = BP_EAGAIN;
445 break;
446 }
447 adjust_managed_page_count(page, -1);
448 xenmem_reservation_scrub_page(page);
449 list_add(&page->lru, &pages);
450 }
451
452 /*
453 * Ensure that ballooned highmem pages don't have kmaps.
454 *
455 * Do this before changing the p2m as kmap_flush_unused()
456 * reads PTEs to obtain pages (and hence needs the original
457 * p2m entry).
458 */
459 kmap_flush_unused();
460
461 /*
462 * Setup the frame, update direct mapping, invalidate P2M,
463 * and add to balloon.
464 */
465 i = 0;
466 list_for_each_entry_safe(page, tmp, &pages, lru) {
467 frame_list[i++] = xen_page_to_gfn(page);
468
469 xenmem_reservation_va_mapping_reset(1, &page);
470
471 list_del(&page->lru);
472
473 balloon_append(page);
474 }
475
476 flush_tlb_all();
477
478 ret = xenmem_reservation_decrease(nr_pages, frame_list);
479 BUG_ON(ret != nr_pages);
480
481 balloon_stats.current_pages -= nr_pages;
482
483 return state;
484}
485
486/*
487 * Stop waiting if either state is BP_DONE and ballooning action is
488 * needed, or if the credit has changed while state is not BP_DONE.
489 */
490static bool balloon_thread_cond(long credit)
491{
492 if (balloon_state == BP_DONE)
493 credit = 0;
494
495 return current_credit() != credit || kthread_should_stop();
496}
497
498/*
499 * As this is a kthread it is guaranteed to run as a single instance only.
500 * We may of course race updates of the target counts (which are protected
501 * by the balloon lock), or with changes to the Xen hard limit, but we will
502 * recover from these in time.
503 */
504static int balloon_thread(void *unused)
505{
506 long credit;
507 unsigned long timeout;
508
509 set_freezable();
510 for (;;) {
511 switch (balloon_state) {
512 case BP_DONE:
513 case BP_ECANCELED:
514 timeout = 3600 * HZ;
515 break;
516 case BP_EAGAIN:
517 timeout = balloon_stats.schedule_delay * HZ;
518 break;
519 case BP_WAIT:
520 timeout = HZ;
521 break;
522 }
523
524 credit = current_credit();
525
526 wait_event_freezable_timeout(balloon_thread_wq,
527 balloon_thread_cond(credit), timeout);
528
529 if (kthread_should_stop())
530 return 0;
531
532 mutex_lock(&balloon_mutex);
533
534 credit = current_credit();
535
536 if (credit > 0) {
537 if (balloon_is_inflated())
538 balloon_state = increase_reservation(credit);
539 else
540 balloon_state = reserve_additional_memory();
541 }
542
543 if (credit < 0) {
544 long n_pages;
545
546 n_pages = min(-credit, si_mem_available());
547 balloon_state = decrease_reservation(n_pages,
548 GFP_BALLOON);
549 if (balloon_state == BP_DONE && n_pages != -credit &&
550 n_pages < totalreserve_pages)
551 balloon_state = BP_EAGAIN;
552 }
553
554 update_schedule();
555
556 mutex_unlock(&balloon_mutex);
557
558 cond_resched();
559 }
560}
561
562/* Resets the Xen limit, sets new target, and kicks off processing. */
563void balloon_set_new_target(unsigned long target)
564{
565 /* No need for lock. Not read-modify-write updates. */
566 balloon_stats.target_pages = target;
567 wake_up(&balloon_thread_wq);
568}
569EXPORT_SYMBOL_GPL(balloon_set_new_target);
570
571static int add_ballooned_pages(unsigned int nr_pages)
572{
573 enum bp_state st;
574
575 if (xen_hotplug_unpopulated) {
576 st = reserve_additional_memory();
577 if (st != BP_ECANCELED) {
578 int rc;
579
580 mutex_unlock(&balloon_mutex);
581 rc = wait_event_interruptible(balloon_wq,
582 !list_empty(&ballooned_pages));
583 mutex_lock(&balloon_mutex);
584 return rc ? -ENOMEM : 0;
585 }
586 }
587
588 if (si_mem_available() < nr_pages)
589 return -ENOMEM;
590
591 st = decrease_reservation(nr_pages, GFP_USER);
592 if (st != BP_DONE)
593 return -ENOMEM;
594
595 return 0;
596}
597
598/**
599 * xen_alloc_ballooned_pages - get pages that have been ballooned out
600 * @nr_pages: Number of pages to get
601 * @pages: pages returned
602 * @return 0 on success, error otherwise
603 */
604int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
605{
606 unsigned int pgno = 0;
607 struct page *page;
608 int ret;
609
610 mutex_lock(&balloon_mutex);
611
612 balloon_stats.target_unpopulated += nr_pages;
613
614 while (pgno < nr_pages) {
615 page = balloon_retrieve(true);
616 if (page) {
617 pages[pgno++] = page;
618#ifdef CONFIG_XEN_HAVE_PVMMU
619 /*
620 * We don't support PV MMU when Linux and Xen is using
621 * different page granularity.
622 */
623 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
624
625 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
626 ret = xen_alloc_p2m_entry(page_to_pfn(page));
627 if (ret < 0)
628 goto out_undo;
629 }
630#endif
631 } else {
632 ret = add_ballooned_pages(nr_pages - pgno);
633 if (ret < 0)
634 goto out_undo;
635 }
636 }
637 mutex_unlock(&balloon_mutex);
638 return 0;
639 out_undo:
640 mutex_unlock(&balloon_mutex);
641 xen_free_ballooned_pages(pgno, pages);
642 /*
643 * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
644 * target_unpopulated is incremented with nr_pages at the start we need
645 * to remove the remaining ones also, or accounting will be screwed.
646 */
647 balloon_stats.target_unpopulated -= nr_pages - pgno;
648 return ret;
649}
650EXPORT_SYMBOL(xen_alloc_ballooned_pages);
651
652/**
653 * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
654 * @nr_pages: Number of pages
655 * @pages: pages to return
656 */
657void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
658{
659 unsigned int i;
660
661 mutex_lock(&balloon_mutex);
662
663 for (i = 0; i < nr_pages; i++) {
664 if (pages[i])
665 balloon_append(pages[i]);
666 }
667
668 balloon_stats.target_unpopulated -= nr_pages;
669
670 /* The balloon may be too large now. Shrink it if needed. */
671 if (current_credit())
672 wake_up(&balloon_thread_wq);
673
674 mutex_unlock(&balloon_mutex);
675}
676EXPORT_SYMBOL(xen_free_ballooned_pages);
677
678static void __init balloon_add_regions(void)
679{
680 unsigned long start_pfn, pages;
681 unsigned long pfn, extra_pfn_end;
682 unsigned int i;
683
684 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
685 pages = xen_extra_mem[i].n_pfns;
686 if (!pages)
687 continue;
688
689 start_pfn = xen_extra_mem[i].start_pfn;
690
691 /*
692 * If the amount of usable memory has been limited (e.g., with
693 * the 'mem' command line parameter), don't add pages beyond
694 * this limit.
695 */
696 extra_pfn_end = min(max_pfn, start_pfn + pages);
697
698 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
699 balloon_append(pfn_to_page(pfn));
700
701 balloon_stats.total_pages += extra_pfn_end - start_pfn;
702 }
703}
704
705static int __init balloon_init(void)
706{
707 struct task_struct *task;
708
709 if (!xen_domain())
710 return -ENODEV;
711
712 pr_info("Initialising balloon driver\n");
713
714#ifdef CONFIG_XEN_PV
715 balloon_stats.current_pages = xen_pv_domain()
716 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
717 : get_num_physpages();
718#else
719 balloon_stats.current_pages = get_num_physpages();
720#endif
721 balloon_stats.target_pages = balloon_stats.current_pages;
722 balloon_stats.balloon_low = 0;
723 balloon_stats.balloon_high = 0;
724 balloon_stats.total_pages = balloon_stats.current_pages;
725
726 balloon_stats.schedule_delay = 1;
727 balloon_stats.max_schedule_delay = 32;
728 balloon_stats.retry_count = 1;
729 balloon_stats.max_retry_count = 4;
730
731#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
732 set_online_page_callback(&xen_online_page);
733 register_memory_notifier(&xen_memory_nb);
734 register_sysctl_init("xen/balloon", balloon_table);
735#endif
736
737 balloon_add_regions();
738
739 task = kthread_run(balloon_thread, NULL, "xen-balloon");
740 if (IS_ERR(task)) {
741 pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
742 return PTR_ERR(task);
743 }
744
745 /* Init the xen-balloon driver. */
746 xen_balloon_init();
747
748 return 0;
749}
750subsys_initcall(balloon_init);
751
752static int __init balloon_wait_finish(void)
753{
754 long credit, last_credit = 0;
755 unsigned long last_changed = 0;
756
757 if (!xen_domain())
758 return -ENODEV;
759
760 /* PV guests don't need to wait. */
761 if (xen_pv_domain() || !current_credit())
762 return 0;
763
764 pr_notice("Waiting for initial ballooning down having finished.\n");
765
766 while ((credit = current_credit()) < 0) {
767 if (credit != last_credit) {
768 last_changed = jiffies;
769 last_credit = credit;
770 }
771 if (balloon_state == BP_ECANCELED) {
772 pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
773 -credit);
774 if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout))
775 panic("Initial ballooning failed!\n");
776 }
777
778 schedule_timeout_interruptible(HZ / 10);
779 }
780
781 pr_notice("Initial ballooning down finished.\n");
782
783 return 0;
784}
785late_initcall_sync(balloon_wait_finish);
1/******************************************************************************
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
3 *
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7 * Copyright (c) 2010 Daniel Kiper
8 *
9 * Memory hotplug support was written by Daniel Kiper. Work on
10 * it was sponsored by Google under Google Summer of Code 2010
11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
12 * this project.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License version 2
16 * as published by the Free Software Foundation; or, when distributed
17 * separately from the Linux kernel or incorporated into other
18 * software packages, subject to the following license:
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a copy
21 * of this source file (the "Software"), to deal in the Software without
22 * restriction, including without limitation the rights to use, copy, modify,
23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24 * and to permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice shall be included in
28 * all copies or substantial portions of the Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36 * IN THE SOFTWARE.
37 */
38
39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40
41#include <linux/cpu.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/cred.h>
45#include <linux/errno.h>
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/mm.h>
49#include <linux/memblock.h>
50#include <linux/pagemap.h>
51#include <linux/highmem.h>
52#include <linux/mutex.h>
53#include <linux/list.h>
54#include <linux/gfp.h>
55#include <linux/notifier.h>
56#include <linux/memory.h>
57#include <linux/memory_hotplug.h>
58#include <linux/percpu-defs.h>
59#include <linux/slab.h>
60#include <linux/sysctl.h>
61
62#include <asm/page.h>
63#include <asm/tlb.h>
64
65#include <asm/xen/hypervisor.h>
66#include <asm/xen/hypercall.h>
67
68#include <xen/xen.h>
69#include <xen/interface/xen.h>
70#include <xen/interface/memory.h>
71#include <xen/balloon.h>
72#include <xen/features.h>
73#include <xen/page.h>
74#include <xen/mem-reservation.h>
75
76static int xen_hotplug_unpopulated;
77
78#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
79
80static struct ctl_table balloon_table[] = {
81 {
82 .procname = "hotplug_unpopulated",
83 .data = &xen_hotplug_unpopulated,
84 .maxlen = sizeof(int),
85 .mode = 0644,
86 .proc_handler = proc_dointvec_minmax,
87 .extra1 = SYSCTL_ZERO,
88 .extra2 = SYSCTL_ONE,
89 },
90 { }
91};
92
93static struct ctl_table balloon_root[] = {
94 {
95 .procname = "balloon",
96 .mode = 0555,
97 .child = balloon_table,
98 },
99 { }
100};
101
102static struct ctl_table xen_root[] = {
103 {
104 .procname = "xen",
105 .mode = 0555,
106 .child = balloon_root,
107 },
108 { }
109};
110
111#endif
112
113/*
114 * Use one extent per PAGE_SIZE to avoid to break down the page into
115 * multiple frame.
116 */
117#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
118
119/*
120 * balloon_thread() state:
121 *
122 * BP_DONE: done or nothing to do,
123 * BP_WAIT: wait to be rescheduled,
124 * BP_EAGAIN: error, go to sleep,
125 * BP_ECANCELED: error, balloon operation canceled.
126 */
127
128enum bp_state {
129 BP_DONE,
130 BP_WAIT,
131 BP_EAGAIN,
132 BP_ECANCELED
133};
134
135/* Main waiting point for xen-balloon thread. */
136static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
137
138static DEFINE_MUTEX(balloon_mutex);
139
140struct balloon_stats balloon_stats;
141EXPORT_SYMBOL_GPL(balloon_stats);
142
143/* We increase/decrease in batches which fit in a page */
144static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
145
146
147/* List of ballooned pages, threaded through the mem_map array. */
148static LIST_HEAD(ballooned_pages);
149static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
150
151/* When ballooning out (allocating memory to return to Xen) we don't really
152 want the kernel to try too hard since that can trigger the oom killer. */
153#define GFP_BALLOON \
154 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
155
156/* balloon_append: add the given page to the balloon. */
157static void balloon_append(struct page *page)
158{
159 __SetPageOffline(page);
160
161 /* Lowmem is re-populated first, so highmem pages go at list tail. */
162 if (PageHighMem(page)) {
163 list_add_tail(&page->lru, &ballooned_pages);
164 balloon_stats.balloon_high++;
165 } else {
166 list_add(&page->lru, &ballooned_pages);
167 balloon_stats.balloon_low++;
168 }
169 wake_up(&balloon_wq);
170}
171
172/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
173static struct page *balloon_retrieve(bool require_lowmem)
174{
175 struct page *page;
176
177 if (list_empty(&ballooned_pages))
178 return NULL;
179
180 page = list_entry(ballooned_pages.next, struct page, lru);
181 if (require_lowmem && PageHighMem(page))
182 return NULL;
183 list_del(&page->lru);
184
185 if (PageHighMem(page))
186 balloon_stats.balloon_high--;
187 else
188 balloon_stats.balloon_low--;
189
190 __ClearPageOffline(page);
191 return page;
192}
193
194static struct page *balloon_next_page(struct page *page)
195{
196 struct list_head *next = page->lru.next;
197 if (next == &ballooned_pages)
198 return NULL;
199 return list_entry(next, struct page, lru);
200}
201
202static enum bp_state update_schedule(enum bp_state state)
203{
204 if (state == BP_WAIT)
205 return BP_WAIT;
206
207 if (state == BP_ECANCELED)
208 return BP_ECANCELED;
209
210 if (state == BP_DONE) {
211 balloon_stats.schedule_delay = 1;
212 balloon_stats.retry_count = 1;
213 return BP_DONE;
214 }
215
216 ++balloon_stats.retry_count;
217
218 if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
219 balloon_stats.retry_count > balloon_stats.max_retry_count) {
220 balloon_stats.schedule_delay = 1;
221 balloon_stats.retry_count = 1;
222 return BP_ECANCELED;
223 }
224
225 balloon_stats.schedule_delay <<= 1;
226
227 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
228 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
229
230 return BP_EAGAIN;
231}
232
233#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
234static void release_memory_resource(struct resource *resource)
235{
236 if (!resource)
237 return;
238
239 /*
240 * No need to reset region to identity mapped since we now
241 * know that no I/O can be in this region
242 */
243 release_resource(resource);
244 kfree(resource);
245}
246
247static struct resource *additional_memory_resource(phys_addr_t size)
248{
249 struct resource *res;
250 int ret;
251
252 res = kzalloc(sizeof(*res), GFP_KERNEL);
253 if (!res)
254 return NULL;
255
256 res->name = "System RAM";
257 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
258
259 ret = allocate_resource(&iomem_resource, res,
260 size, 0, -1,
261 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
262 if (ret < 0) {
263 pr_err("Cannot allocate new System RAM resource\n");
264 kfree(res);
265 return NULL;
266 }
267
268 return res;
269}
270
271static enum bp_state reserve_additional_memory(void)
272{
273 long credit;
274 struct resource *resource;
275 int nid, rc;
276 unsigned long balloon_hotplug;
277
278 credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
279 - balloon_stats.total_pages;
280
281 /*
282 * Already hotplugged enough pages? Wait for them to be
283 * onlined.
284 */
285 if (credit <= 0)
286 return BP_WAIT;
287
288 balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
289
290 resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
291 if (!resource)
292 goto err;
293
294 nid = memory_add_physaddr_to_nid(resource->start);
295
296#ifdef CONFIG_XEN_HAVE_PVMMU
297 /*
298 * We don't support PV MMU when Linux and Xen is using
299 * different page granularity.
300 */
301 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
302
303 /*
304 * add_memory() will build page tables for the new memory so
305 * the p2m must contain invalid entries so the correct
306 * non-present PTEs will be written.
307 *
308 * If a failure occurs, the original (identity) p2m entries
309 * are not restored since this region is now known not to
310 * conflict with any devices.
311 */
312 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
313 unsigned long pfn, i;
314
315 pfn = PFN_DOWN(resource->start);
316 for (i = 0; i < balloon_hotplug; i++) {
317 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
318 pr_warn("set_phys_to_machine() failed, no memory added\n");
319 goto err;
320 }
321 }
322 }
323#endif
324
325 /*
326 * add_memory_resource() will call online_pages() which in its turn
327 * will call xen_online_page() callback causing deadlock if we don't
328 * release balloon_mutex here. Unlocking here is safe because the
329 * callers drop the mutex before trying again.
330 */
331 mutex_unlock(&balloon_mutex);
332 /* add_memory_resource() requires the device_hotplug lock */
333 lock_device_hotplug();
334 rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
335 unlock_device_hotplug();
336 mutex_lock(&balloon_mutex);
337
338 if (rc) {
339 pr_warn("Cannot add additional memory (%i)\n", rc);
340 goto err;
341 }
342
343 balloon_stats.total_pages += balloon_hotplug;
344
345 return BP_WAIT;
346 err:
347 release_memory_resource(resource);
348 return BP_ECANCELED;
349}
350
351static void xen_online_page(struct page *page, unsigned int order)
352{
353 unsigned long i, size = (1 << order);
354 unsigned long start_pfn = page_to_pfn(page);
355 struct page *p;
356
357 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
358 mutex_lock(&balloon_mutex);
359 for (i = 0; i < size; i++) {
360 p = pfn_to_page(start_pfn + i);
361 balloon_append(p);
362 }
363 mutex_unlock(&balloon_mutex);
364}
365
366static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
367{
368 if (val == MEM_ONLINE)
369 wake_up(&balloon_thread_wq);
370
371 return NOTIFY_OK;
372}
373
374static struct notifier_block xen_memory_nb = {
375 .notifier_call = xen_memory_notifier,
376 .priority = 0
377};
378#else
379static enum bp_state reserve_additional_memory(void)
380{
381 balloon_stats.target_pages = balloon_stats.current_pages +
382 balloon_stats.target_unpopulated;
383 return BP_ECANCELED;
384}
385#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
386
387static long current_credit(void)
388{
389 return balloon_stats.target_pages - balloon_stats.current_pages;
390}
391
392static bool balloon_is_inflated(void)
393{
394 return balloon_stats.balloon_low || balloon_stats.balloon_high;
395}
396
397static enum bp_state increase_reservation(unsigned long nr_pages)
398{
399 int rc;
400 unsigned long i;
401 struct page *page;
402
403 if (nr_pages > ARRAY_SIZE(frame_list))
404 nr_pages = ARRAY_SIZE(frame_list);
405
406 page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
407 for (i = 0; i < nr_pages; i++) {
408 if (!page) {
409 nr_pages = i;
410 break;
411 }
412
413 frame_list[i] = page_to_xen_pfn(page);
414 page = balloon_next_page(page);
415 }
416
417 rc = xenmem_reservation_increase(nr_pages, frame_list);
418 if (rc <= 0)
419 return BP_EAGAIN;
420
421 for (i = 0; i < rc; i++) {
422 page = balloon_retrieve(false);
423 BUG_ON(page == NULL);
424
425 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
426
427 /* Relinquish the page back to the allocator. */
428 free_reserved_page(page);
429 }
430
431 balloon_stats.current_pages += rc;
432
433 return BP_DONE;
434}
435
436static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
437{
438 enum bp_state state = BP_DONE;
439 unsigned long i;
440 struct page *page, *tmp;
441 int ret;
442 LIST_HEAD(pages);
443
444 if (nr_pages > ARRAY_SIZE(frame_list))
445 nr_pages = ARRAY_SIZE(frame_list);
446
447 for (i = 0; i < nr_pages; i++) {
448 page = alloc_page(gfp);
449 if (page == NULL) {
450 nr_pages = i;
451 state = BP_EAGAIN;
452 break;
453 }
454 adjust_managed_page_count(page, -1);
455 xenmem_reservation_scrub_page(page);
456 list_add(&page->lru, &pages);
457 }
458
459 /*
460 * Ensure that ballooned highmem pages don't have kmaps.
461 *
462 * Do this before changing the p2m as kmap_flush_unused()
463 * reads PTEs to obtain pages (and hence needs the original
464 * p2m entry).
465 */
466 kmap_flush_unused();
467
468 /*
469 * Setup the frame, update direct mapping, invalidate P2M,
470 * and add to balloon.
471 */
472 i = 0;
473 list_for_each_entry_safe(page, tmp, &pages, lru) {
474 frame_list[i++] = xen_page_to_gfn(page);
475
476 xenmem_reservation_va_mapping_reset(1, &page);
477
478 list_del(&page->lru);
479
480 balloon_append(page);
481 }
482
483 flush_tlb_all();
484
485 ret = xenmem_reservation_decrease(nr_pages, frame_list);
486 BUG_ON(ret != nr_pages);
487
488 balloon_stats.current_pages -= nr_pages;
489
490 return state;
491}
492
493/*
494 * Stop waiting if either state is BP_DONE and ballooning action is
495 * needed, or if the credit has changed while state is not BP_DONE.
496 */
497static bool balloon_thread_cond(enum bp_state state, long credit)
498{
499 if (state == BP_DONE)
500 credit = 0;
501
502 return current_credit() != credit || kthread_should_stop();
503}
504
505/*
506 * As this is a kthread it is guaranteed to run as a single instance only.
507 * We may of course race updates of the target counts (which are protected
508 * by the balloon lock), or with changes to the Xen hard limit, but we will
509 * recover from these in time.
510 */
511static int balloon_thread(void *unused)
512{
513 enum bp_state state = BP_DONE;
514 long credit;
515 unsigned long timeout;
516
517 set_freezable();
518 for (;;) {
519 switch (state) {
520 case BP_DONE:
521 case BP_ECANCELED:
522 timeout = 3600 * HZ;
523 break;
524 case BP_EAGAIN:
525 timeout = balloon_stats.schedule_delay * HZ;
526 break;
527 case BP_WAIT:
528 timeout = HZ;
529 break;
530 }
531
532 credit = current_credit();
533
534 wait_event_freezable_timeout(balloon_thread_wq,
535 balloon_thread_cond(state, credit), timeout);
536
537 if (kthread_should_stop())
538 return 0;
539
540 mutex_lock(&balloon_mutex);
541
542 credit = current_credit();
543
544 if (credit > 0) {
545 if (balloon_is_inflated())
546 state = increase_reservation(credit);
547 else
548 state = reserve_additional_memory();
549 }
550
551 if (credit < 0) {
552 long n_pages;
553
554 n_pages = min(-credit, si_mem_available());
555 state = decrease_reservation(n_pages, GFP_BALLOON);
556 if (state == BP_DONE && n_pages != -credit &&
557 n_pages < totalreserve_pages)
558 state = BP_EAGAIN;
559 }
560
561 state = update_schedule(state);
562
563 mutex_unlock(&balloon_mutex);
564
565 cond_resched();
566 }
567}
568
569/* Resets the Xen limit, sets new target, and kicks off processing. */
570void balloon_set_new_target(unsigned long target)
571{
572 /* No need for lock. Not read-modify-write updates. */
573 balloon_stats.target_pages = target;
574 wake_up(&balloon_thread_wq);
575}
576EXPORT_SYMBOL_GPL(balloon_set_new_target);
577
578static int add_ballooned_pages(int nr_pages)
579{
580 enum bp_state st;
581
582 if (xen_hotplug_unpopulated) {
583 st = reserve_additional_memory();
584 if (st != BP_ECANCELED) {
585 int rc;
586
587 mutex_unlock(&balloon_mutex);
588 rc = wait_event_interruptible(balloon_wq,
589 !list_empty(&ballooned_pages));
590 mutex_lock(&balloon_mutex);
591 return rc ? -ENOMEM : 0;
592 }
593 }
594
595 if (si_mem_available() < nr_pages)
596 return -ENOMEM;
597
598 st = decrease_reservation(nr_pages, GFP_USER);
599 if (st != BP_DONE)
600 return -ENOMEM;
601
602 return 0;
603}
604
605/**
606 * alloc_xenballooned_pages - get pages that have been ballooned out
607 * @nr_pages: Number of pages to get
608 * @pages: pages returned
609 * @return 0 on success, error otherwise
610 */
611int alloc_xenballooned_pages(int nr_pages, struct page **pages)
612{
613 int pgno = 0;
614 struct page *page;
615 int ret;
616
617 mutex_lock(&balloon_mutex);
618
619 balloon_stats.target_unpopulated += nr_pages;
620
621 while (pgno < nr_pages) {
622 page = balloon_retrieve(true);
623 if (page) {
624 pages[pgno++] = page;
625#ifdef CONFIG_XEN_HAVE_PVMMU
626 /*
627 * We don't support PV MMU when Linux and Xen is using
628 * different page granularity.
629 */
630 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
631
632 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
633 ret = xen_alloc_p2m_entry(page_to_pfn(page));
634 if (ret < 0)
635 goto out_undo;
636 }
637#endif
638 } else {
639 ret = add_ballooned_pages(nr_pages - pgno);
640 if (ret < 0)
641 goto out_undo;
642 }
643 }
644 mutex_unlock(&balloon_mutex);
645 return 0;
646 out_undo:
647 mutex_unlock(&balloon_mutex);
648 free_xenballooned_pages(pgno, pages);
649 /*
650 * NB: free_xenballooned_pages will only subtract pgno pages, but since
651 * target_unpopulated is incremented with nr_pages at the start we need
652 * to remove the remaining ones also, or accounting will be screwed.
653 */
654 balloon_stats.target_unpopulated -= nr_pages - pgno;
655 return ret;
656}
657EXPORT_SYMBOL(alloc_xenballooned_pages);
658
659/**
660 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
661 * @nr_pages: Number of pages
662 * @pages: pages to return
663 */
664void free_xenballooned_pages(int nr_pages, struct page **pages)
665{
666 int i;
667
668 mutex_lock(&balloon_mutex);
669
670 for (i = 0; i < nr_pages; i++) {
671 if (pages[i])
672 balloon_append(pages[i]);
673 }
674
675 balloon_stats.target_unpopulated -= nr_pages;
676
677 /* The balloon may be too large now. Shrink it if needed. */
678 if (current_credit())
679 wake_up(&balloon_thread_wq);
680
681 mutex_unlock(&balloon_mutex);
682}
683EXPORT_SYMBOL(free_xenballooned_pages);
684
685#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
686static void __init balloon_add_region(unsigned long start_pfn,
687 unsigned long pages)
688{
689 unsigned long pfn, extra_pfn_end;
690
691 /*
692 * If the amount of usable memory has been limited (e.g., with
693 * the 'mem' command line parameter), don't add pages beyond
694 * this limit.
695 */
696 extra_pfn_end = min(max_pfn, start_pfn + pages);
697
698 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
699 /* totalram_pages and totalhigh_pages do not
700 include the boot-time balloon extension, so
701 don't subtract from it. */
702 balloon_append(pfn_to_page(pfn));
703 }
704
705 balloon_stats.total_pages += extra_pfn_end - start_pfn;
706}
707#endif
708
709static int __init balloon_init(void)
710{
711 struct task_struct *task;
712
713 if (!xen_domain())
714 return -ENODEV;
715
716 pr_info("Initialising balloon driver\n");
717
718#ifdef CONFIG_XEN_PV
719 balloon_stats.current_pages = xen_pv_domain()
720 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
721 : get_num_physpages();
722#else
723 balloon_stats.current_pages = get_num_physpages();
724#endif
725 balloon_stats.target_pages = balloon_stats.current_pages;
726 balloon_stats.balloon_low = 0;
727 balloon_stats.balloon_high = 0;
728 balloon_stats.total_pages = balloon_stats.current_pages;
729
730 balloon_stats.schedule_delay = 1;
731 balloon_stats.max_schedule_delay = 32;
732 balloon_stats.retry_count = 1;
733 balloon_stats.max_retry_count = 4;
734
735#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
736 set_online_page_callback(&xen_online_page);
737 register_memory_notifier(&xen_memory_nb);
738 register_sysctl_table(xen_root);
739#endif
740
741#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
742 {
743 int i;
744
745 /*
746 * Initialize the balloon with pages from the extra memory
747 * regions (see arch/x86/xen/setup.c).
748 */
749 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
750 if (xen_extra_mem[i].n_pfns)
751 balloon_add_region(xen_extra_mem[i].start_pfn,
752 xen_extra_mem[i].n_pfns);
753 }
754#endif
755
756 task = kthread_run(balloon_thread, NULL, "xen-balloon");
757 if (IS_ERR(task)) {
758 pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
759 return PTR_ERR(task);
760 }
761
762 /* Init the xen-balloon driver. */
763 xen_balloon_init();
764
765 return 0;
766}
767subsys_initcall(balloon_init);