Loading...
1/******************************************************************************
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
3 *
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7 * Copyright (c) 2010 Daniel Kiper
8 *
9 * Memory hotplug support was written by Daniel Kiper. Work on
10 * it was sponsored by Google under Google Summer of Code 2010
11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
12 * this project.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License version 2
16 * as published by the Free Software Foundation; or, when distributed
17 * separately from the Linux kernel or incorporated into other
18 * software packages, subject to the following license:
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a copy
21 * of this source file (the "Software"), to deal in the Software without
22 * restriction, including without limitation the rights to use, copy, modify,
23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24 * and to permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice shall be included in
28 * all copies or substantial portions of the Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36 * IN THE SOFTWARE.
37 */
38
39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40
41#include <linux/cpu.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/cred.h>
45#include <linux/errno.h>
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/mm.h>
49#include <linux/memblock.h>
50#include <linux/pagemap.h>
51#include <linux/highmem.h>
52#include <linux/mutex.h>
53#include <linux/list.h>
54#include <linux/gfp.h>
55#include <linux/notifier.h>
56#include <linux/memory.h>
57#include <linux/memory_hotplug.h>
58#include <linux/percpu-defs.h>
59#include <linux/slab.h>
60#include <linux/sysctl.h>
61#include <linux/moduleparam.h>
62#include <linux/jiffies.h>
63
64#include <asm/page.h>
65#include <asm/tlb.h>
66
67#include <asm/xen/hypervisor.h>
68#include <asm/xen/hypercall.h>
69
70#include <xen/xen.h>
71#include <xen/interface/xen.h>
72#include <xen/interface/memory.h>
73#include <xen/balloon.h>
74#include <xen/features.h>
75#include <xen/page.h>
76#include <xen/mem-reservation.h>
77
78#undef MODULE_PARAM_PREFIX
79#define MODULE_PARAM_PREFIX "xen."
80
81static uint __read_mostly balloon_boot_timeout = 180;
82module_param(balloon_boot_timeout, uint, 0444);
83
84#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
85static int xen_hotplug_unpopulated;
86
87static struct ctl_table balloon_table[] = {
88 {
89 .procname = "hotplug_unpopulated",
90 .data = &xen_hotplug_unpopulated,
91 .maxlen = sizeof(int),
92 .mode = 0644,
93 .proc_handler = proc_dointvec_minmax,
94 .extra1 = SYSCTL_ZERO,
95 .extra2 = SYSCTL_ONE,
96 },
97};
98
99#else
100#define xen_hotplug_unpopulated 0
101#endif
102
103/*
104 * Use one extent per PAGE_SIZE to avoid to break down the page into
105 * multiple frame.
106 */
107#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
108
109/*
110 * balloon_thread() state:
111 *
112 * BP_DONE: done or nothing to do,
113 * BP_WAIT: wait to be rescheduled,
114 * BP_EAGAIN: error, go to sleep,
115 * BP_ECANCELED: error, balloon operation canceled.
116 */
117
118static enum bp_state {
119 BP_DONE,
120 BP_WAIT,
121 BP_EAGAIN,
122 BP_ECANCELED
123} balloon_state = BP_DONE;
124
125/* Main waiting point for xen-balloon thread. */
126static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
127
128static DEFINE_MUTEX(balloon_mutex);
129
130struct balloon_stats balloon_stats;
131EXPORT_SYMBOL_GPL(balloon_stats);
132
133/* We increase/decrease in batches which fit in a page */
134static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
135
136
137/* List of ballooned pages, threaded through the mem_map array. */
138static LIST_HEAD(ballooned_pages);
139static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
140
141/* When ballooning out (allocating memory to return to Xen) we don't really
142 want the kernel to try too hard since that can trigger the oom killer. */
143#define GFP_BALLOON \
144 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
145
146/* balloon_append: add the given page to the balloon. */
147static void balloon_append(struct page *page)
148{
149 __SetPageOffline(page);
150
151 /* Lowmem is re-populated first, so highmem pages go at list tail. */
152 if (PageHighMem(page)) {
153 list_add_tail(&page->lru, &ballooned_pages);
154 balloon_stats.balloon_high++;
155 } else {
156 list_add(&page->lru, &ballooned_pages);
157 balloon_stats.balloon_low++;
158 }
159 wake_up(&balloon_wq);
160}
161
162/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
163static struct page *balloon_retrieve(bool require_lowmem)
164{
165 struct page *page;
166
167 if (list_empty(&ballooned_pages))
168 return NULL;
169
170 page = list_entry(ballooned_pages.next, struct page, lru);
171 if (require_lowmem && PageHighMem(page))
172 return NULL;
173 list_del(&page->lru);
174
175 if (PageHighMem(page))
176 balloon_stats.balloon_high--;
177 else
178 balloon_stats.balloon_low--;
179
180 __ClearPageOffline(page);
181 return page;
182}
183
184static struct page *balloon_next_page(struct page *page)
185{
186 struct list_head *next = page->lru.next;
187 if (next == &ballooned_pages)
188 return NULL;
189 return list_entry(next, struct page, lru);
190}
191
192static void update_schedule(void)
193{
194 if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED)
195 return;
196
197 if (balloon_state == BP_DONE) {
198 balloon_stats.schedule_delay = 1;
199 balloon_stats.retry_count = 1;
200 return;
201 }
202
203 ++balloon_stats.retry_count;
204
205 if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
206 balloon_stats.retry_count > balloon_stats.max_retry_count) {
207 balloon_stats.schedule_delay = 1;
208 balloon_stats.retry_count = 1;
209 balloon_state = BP_ECANCELED;
210 return;
211 }
212
213 balloon_stats.schedule_delay <<= 1;
214
215 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
216 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
217
218 balloon_state = BP_EAGAIN;
219}
220
221#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
222static void release_memory_resource(struct resource *resource)
223{
224 if (!resource)
225 return;
226
227 /*
228 * No need to reset region to identity mapped since we now
229 * know that no I/O can be in this region
230 */
231 release_resource(resource);
232 kfree(resource);
233}
234
235static struct resource *additional_memory_resource(phys_addr_t size)
236{
237 struct resource *res;
238 int ret;
239
240 res = kzalloc(sizeof(*res), GFP_KERNEL);
241 if (!res)
242 return NULL;
243
244 res->name = "System RAM";
245 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
246
247 ret = allocate_resource(&iomem_resource, res,
248 size, 0, -1,
249 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
250 if (ret < 0) {
251 pr_err("Cannot allocate new System RAM resource\n");
252 kfree(res);
253 return NULL;
254 }
255
256 return res;
257}
258
259static enum bp_state reserve_additional_memory(void)
260{
261 long credit;
262 struct resource *resource;
263 int nid, rc;
264 unsigned long balloon_hotplug;
265
266 credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
267 - balloon_stats.total_pages;
268
269 /*
270 * Already hotplugged enough pages? Wait for them to be
271 * onlined.
272 */
273 if (credit <= 0)
274 return BP_WAIT;
275
276 balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
277
278 resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
279 if (!resource)
280 goto err;
281
282 nid = memory_add_physaddr_to_nid(resource->start);
283
284#ifdef CONFIG_XEN_HAVE_PVMMU
285 /*
286 * We don't support PV MMU when Linux and Xen is using
287 * different page granularity.
288 */
289 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
290
291 /*
292 * add_memory() will build page tables for the new memory so
293 * the p2m must contain invalid entries so the correct
294 * non-present PTEs will be written.
295 *
296 * If a failure occurs, the original (identity) p2m entries
297 * are not restored since this region is now known not to
298 * conflict with any devices.
299 */
300 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
301 unsigned long pfn, i;
302
303 pfn = PFN_DOWN(resource->start);
304 for (i = 0; i < balloon_hotplug; i++) {
305 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
306 pr_warn("set_phys_to_machine() failed, no memory added\n");
307 goto err;
308 }
309 }
310 }
311#endif
312
313 /*
314 * add_memory_resource() will call online_pages() which in its turn
315 * will call xen_online_page() callback causing deadlock if we don't
316 * release balloon_mutex here. Unlocking here is safe because the
317 * callers drop the mutex before trying again.
318 */
319 mutex_unlock(&balloon_mutex);
320 /* add_memory_resource() requires the device_hotplug lock */
321 lock_device_hotplug();
322 rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
323 unlock_device_hotplug();
324 mutex_lock(&balloon_mutex);
325
326 if (rc) {
327 pr_warn("Cannot add additional memory (%i)\n", rc);
328 goto err;
329 }
330
331 balloon_stats.total_pages += balloon_hotplug;
332
333 return BP_WAIT;
334 err:
335 release_memory_resource(resource);
336 return BP_ECANCELED;
337}
338
339static void xen_online_page(struct page *page, unsigned int order)
340{
341 unsigned long i, size = (1 << order);
342 unsigned long start_pfn = page_to_pfn(page);
343 struct page *p;
344
345 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
346 mutex_lock(&balloon_mutex);
347 for (i = 0; i < size; i++) {
348 p = pfn_to_page(start_pfn + i);
349 balloon_append(p);
350 }
351 mutex_unlock(&balloon_mutex);
352}
353
354static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
355{
356 if (val == MEM_ONLINE)
357 wake_up(&balloon_thread_wq);
358
359 return NOTIFY_OK;
360}
361
362static struct notifier_block xen_memory_nb = {
363 .notifier_call = xen_memory_notifier,
364 .priority = 0
365};
366#else
367static enum bp_state reserve_additional_memory(void)
368{
369 balloon_stats.target_pages = balloon_stats.current_pages +
370 balloon_stats.target_unpopulated;
371 return BP_ECANCELED;
372}
373#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
374
375static long current_credit(void)
376{
377 return balloon_stats.target_pages - balloon_stats.current_pages;
378}
379
380static bool balloon_is_inflated(void)
381{
382 return balloon_stats.balloon_low || balloon_stats.balloon_high;
383}
384
385static enum bp_state increase_reservation(unsigned long nr_pages)
386{
387 int rc;
388 unsigned long i;
389 struct page *page;
390
391 if (nr_pages > ARRAY_SIZE(frame_list))
392 nr_pages = ARRAY_SIZE(frame_list);
393
394 page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
395 for (i = 0; i < nr_pages; i++) {
396 if (!page) {
397 nr_pages = i;
398 break;
399 }
400
401 frame_list[i] = page_to_xen_pfn(page);
402 page = balloon_next_page(page);
403 }
404
405 rc = xenmem_reservation_increase(nr_pages, frame_list);
406 if (rc <= 0)
407 return BP_EAGAIN;
408
409 for (i = 0; i < rc; i++) {
410 page = balloon_retrieve(false);
411 BUG_ON(page == NULL);
412
413 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
414
415 /* Relinquish the page back to the allocator. */
416 free_reserved_page(page);
417 }
418
419 balloon_stats.current_pages += rc;
420
421 return BP_DONE;
422}
423
424static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
425{
426 enum bp_state state = BP_DONE;
427 unsigned long i;
428 struct page *page, *tmp;
429 int ret;
430 LIST_HEAD(pages);
431
432 if (nr_pages > ARRAY_SIZE(frame_list))
433 nr_pages = ARRAY_SIZE(frame_list);
434
435 for (i = 0; i < nr_pages; i++) {
436 page = alloc_page(gfp);
437 if (page == NULL) {
438 nr_pages = i;
439 state = BP_EAGAIN;
440 break;
441 }
442 adjust_managed_page_count(page, -1);
443 xenmem_reservation_scrub_page(page);
444 list_add(&page->lru, &pages);
445 }
446
447 /*
448 * Ensure that ballooned highmem pages don't have kmaps.
449 *
450 * Do this before changing the p2m as kmap_flush_unused()
451 * reads PTEs to obtain pages (and hence needs the original
452 * p2m entry).
453 */
454 kmap_flush_unused();
455
456 /*
457 * Setup the frame, update direct mapping, invalidate P2M,
458 * and add to balloon.
459 */
460 i = 0;
461 list_for_each_entry_safe(page, tmp, &pages, lru) {
462 frame_list[i++] = xen_page_to_gfn(page);
463
464 xenmem_reservation_va_mapping_reset(1, &page);
465
466 list_del(&page->lru);
467
468 balloon_append(page);
469 }
470
471 flush_tlb_all();
472
473 ret = xenmem_reservation_decrease(nr_pages, frame_list);
474 BUG_ON(ret != nr_pages);
475
476 balloon_stats.current_pages -= nr_pages;
477
478 return state;
479}
480
481/*
482 * Stop waiting if either state is BP_DONE and ballooning action is
483 * needed, or if the credit has changed while state is not BP_DONE.
484 */
485static bool balloon_thread_cond(long credit)
486{
487 if (balloon_state == BP_DONE)
488 credit = 0;
489
490 return current_credit() != credit || kthread_should_stop();
491}
492
493/*
494 * As this is a kthread it is guaranteed to run as a single instance only.
495 * We may of course race updates of the target counts (which are protected
496 * by the balloon lock), or with changes to the Xen hard limit, but we will
497 * recover from these in time.
498 */
499static int balloon_thread(void *unused)
500{
501 long credit;
502 unsigned long timeout;
503
504 set_freezable();
505 for (;;) {
506 switch (balloon_state) {
507 case BP_DONE:
508 case BP_ECANCELED:
509 timeout = 3600 * HZ;
510 break;
511 case BP_EAGAIN:
512 timeout = balloon_stats.schedule_delay * HZ;
513 break;
514 case BP_WAIT:
515 timeout = HZ;
516 break;
517 }
518
519 credit = current_credit();
520
521 wait_event_freezable_timeout(balloon_thread_wq,
522 balloon_thread_cond(credit), timeout);
523
524 if (kthread_should_stop())
525 return 0;
526
527 mutex_lock(&balloon_mutex);
528
529 credit = current_credit();
530
531 if (credit > 0) {
532 if (balloon_is_inflated())
533 balloon_state = increase_reservation(credit);
534 else
535 balloon_state = reserve_additional_memory();
536 }
537
538 if (credit < 0) {
539 long n_pages;
540
541 n_pages = min(-credit, si_mem_available());
542 balloon_state = decrease_reservation(n_pages,
543 GFP_BALLOON);
544 if (balloon_state == BP_DONE && n_pages != -credit &&
545 n_pages < totalreserve_pages)
546 balloon_state = BP_EAGAIN;
547 }
548
549 update_schedule();
550
551 mutex_unlock(&balloon_mutex);
552
553 cond_resched();
554 }
555}
556
557/* Resets the Xen limit, sets new target, and kicks off processing. */
558void balloon_set_new_target(unsigned long target)
559{
560 /* No need for lock. Not read-modify-write updates. */
561 balloon_stats.target_pages = target;
562 wake_up(&balloon_thread_wq);
563}
564EXPORT_SYMBOL_GPL(balloon_set_new_target);
565
566static int add_ballooned_pages(unsigned int nr_pages)
567{
568 enum bp_state st;
569
570 if (xen_hotplug_unpopulated) {
571 st = reserve_additional_memory();
572 if (st != BP_ECANCELED) {
573 int rc;
574
575 mutex_unlock(&balloon_mutex);
576 rc = wait_event_interruptible(balloon_wq,
577 !list_empty(&ballooned_pages));
578 mutex_lock(&balloon_mutex);
579 return rc ? -ENOMEM : 0;
580 }
581 }
582
583 if (si_mem_available() < nr_pages)
584 return -ENOMEM;
585
586 st = decrease_reservation(nr_pages, GFP_USER);
587 if (st != BP_DONE)
588 return -ENOMEM;
589
590 return 0;
591}
592
593/**
594 * xen_alloc_ballooned_pages - get pages that have been ballooned out
595 * @nr_pages: Number of pages to get
596 * @pages: pages returned
597 * @return 0 on success, error otherwise
598 */
599int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
600{
601 unsigned int pgno = 0;
602 struct page *page;
603 int ret;
604
605 mutex_lock(&balloon_mutex);
606
607 balloon_stats.target_unpopulated += nr_pages;
608
609 while (pgno < nr_pages) {
610 page = balloon_retrieve(true);
611 if (page) {
612 pages[pgno++] = page;
613#ifdef CONFIG_XEN_HAVE_PVMMU
614 /*
615 * We don't support PV MMU when Linux and Xen is using
616 * different page granularity.
617 */
618 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
619
620 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
621 ret = xen_alloc_p2m_entry(page_to_pfn(page));
622 if (ret < 0)
623 goto out_undo;
624 }
625#endif
626 } else {
627 ret = add_ballooned_pages(nr_pages - pgno);
628 if (ret < 0)
629 goto out_undo;
630 }
631 }
632 mutex_unlock(&balloon_mutex);
633 return 0;
634 out_undo:
635 mutex_unlock(&balloon_mutex);
636 xen_free_ballooned_pages(pgno, pages);
637 /*
638 * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
639 * target_unpopulated is incremented with nr_pages at the start we need
640 * to remove the remaining ones also, or accounting will be screwed.
641 */
642 balloon_stats.target_unpopulated -= nr_pages - pgno;
643 return ret;
644}
645EXPORT_SYMBOL(xen_alloc_ballooned_pages);
646
647/**
648 * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
649 * @nr_pages: Number of pages
650 * @pages: pages to return
651 */
652void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
653{
654 unsigned int i;
655
656 mutex_lock(&balloon_mutex);
657
658 for (i = 0; i < nr_pages; i++) {
659 if (pages[i])
660 balloon_append(pages[i]);
661 }
662
663 balloon_stats.target_unpopulated -= nr_pages;
664
665 /* The balloon may be too large now. Shrink it if needed. */
666 if (current_credit())
667 wake_up(&balloon_thread_wq);
668
669 mutex_unlock(&balloon_mutex);
670}
671EXPORT_SYMBOL(xen_free_ballooned_pages);
672
673static void __init balloon_add_regions(void)
674{
675#if defined(CONFIG_XEN_PV)
676 unsigned long start_pfn, pages;
677 unsigned long pfn, extra_pfn_end;
678 unsigned int i;
679
680 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
681 pages = xen_extra_mem[i].n_pfns;
682 if (!pages)
683 continue;
684
685 start_pfn = xen_extra_mem[i].start_pfn;
686
687 /*
688 * If the amount of usable memory has been limited (e.g., with
689 * the 'mem' command line parameter), don't add pages beyond
690 * this limit.
691 */
692 extra_pfn_end = min(max_pfn, start_pfn + pages);
693
694 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
695 balloon_append(pfn_to_page(pfn));
696
697 balloon_stats.total_pages += extra_pfn_end - start_pfn;
698 }
699#endif
700}
701
702static int __init balloon_init(void)
703{
704 struct task_struct *task;
705
706 if (!xen_domain())
707 return -ENODEV;
708
709 pr_info("Initialising balloon driver\n");
710
711#ifdef CONFIG_XEN_PV
712 balloon_stats.current_pages = xen_pv_domain()
713 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
714 : get_num_physpages();
715#else
716 balloon_stats.current_pages = get_num_physpages();
717#endif
718 balloon_stats.target_pages = balloon_stats.current_pages;
719 balloon_stats.balloon_low = 0;
720 balloon_stats.balloon_high = 0;
721 balloon_stats.total_pages = balloon_stats.current_pages;
722
723 balloon_stats.schedule_delay = 1;
724 balloon_stats.max_schedule_delay = 32;
725 balloon_stats.retry_count = 1;
726 balloon_stats.max_retry_count = 4;
727
728#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
729 set_online_page_callback(&xen_online_page);
730 register_memory_notifier(&xen_memory_nb);
731 register_sysctl_init("xen/balloon", balloon_table);
732#endif
733
734 balloon_add_regions();
735
736 task = kthread_run(balloon_thread, NULL, "xen-balloon");
737 if (IS_ERR(task)) {
738 pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
739 return PTR_ERR(task);
740 }
741
742 /* Init the xen-balloon driver. */
743 xen_balloon_init();
744
745 return 0;
746}
747subsys_initcall(balloon_init);
748
749static int __init balloon_wait_finish(void)
750{
751 long credit, last_credit = 0;
752 unsigned long last_changed = 0;
753
754 if (!xen_domain())
755 return -ENODEV;
756
757 /* PV guests don't need to wait. */
758 if (xen_pv_domain() || !current_credit())
759 return 0;
760
761 pr_notice("Waiting for initial ballooning down having finished.\n");
762
763 while ((credit = current_credit()) < 0) {
764 if (credit != last_credit) {
765 last_changed = jiffies;
766 last_credit = credit;
767 }
768 if (balloon_state == BP_ECANCELED) {
769 pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
770 -credit);
771 if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout))
772 panic("Initial ballooning failed!\n");
773 }
774
775 schedule_timeout_interruptible(HZ / 10);
776 }
777
778 pr_notice("Initial ballooning down finished.\n");
779
780 return 0;
781}
782late_initcall_sync(balloon_wait_finish);
1/******************************************************************************
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
3 *
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7 * Copyright (c) 2010 Daniel Kiper
8 *
9 * Memory hotplug support was written by Daniel Kiper. Work on
10 * it was sponsored by Google under Google Summer of Code 2010
11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
12 * this project.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License version 2
16 * as published by the Free Software Foundation; or, when distributed
17 * separately from the Linux kernel or incorporated into other
18 * software packages, subject to the following license:
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a copy
21 * of this source file (the "Software"), to deal in the Software without
22 * restriction, including without limitation the rights to use, copy, modify,
23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24 * and to permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice shall be included in
28 * all copies or substantial portions of the Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36 * IN THE SOFTWARE.
37 */
38
39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40
41#include <linux/cpu.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/errno.h>
45#include <linux/module.h>
46#include <linux/mm.h>
47#include <linux/bootmem.h>
48#include <linux/pagemap.h>
49#include <linux/highmem.h>
50#include <linux/mutex.h>
51#include <linux/list.h>
52#include <linux/gfp.h>
53#include <linux/notifier.h>
54#include <linux/memory.h>
55#include <linux/memory_hotplug.h>
56#include <linux/percpu-defs.h>
57
58#include <asm/page.h>
59#include <asm/pgalloc.h>
60#include <asm/pgtable.h>
61#include <asm/tlb.h>
62
63#include <asm/xen/hypervisor.h>
64#include <asm/xen/hypercall.h>
65
66#include <xen/xen.h>
67#include <xen/interface/xen.h>
68#include <xen/interface/memory.h>
69#include <xen/balloon.h>
70#include <xen/features.h>
71#include <xen/page.h>
72
73/*
74 * balloon_process() state:
75 *
76 * BP_DONE: done or nothing to do,
77 * BP_EAGAIN: error, go to sleep,
78 * BP_ECANCELED: error, balloon operation canceled.
79 */
80
81enum bp_state {
82 BP_DONE,
83 BP_EAGAIN,
84 BP_ECANCELED
85};
86
87
88static DEFINE_MUTEX(balloon_mutex);
89
90struct balloon_stats balloon_stats;
91EXPORT_SYMBOL_GPL(balloon_stats);
92
93/* We increase/decrease in batches which fit in a page */
94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
95static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
96
97
98/* List of ballooned pages, threaded through the mem_map array. */
99static LIST_HEAD(ballooned_pages);
100
101/* Main work function, always executed in process context. */
102static void balloon_process(struct work_struct *work);
103static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
104
105/* When ballooning out (allocating memory to return to Xen) we don't really
106 want the kernel to try too hard since that can trigger the oom killer. */
107#define GFP_BALLOON \
108 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
109
110static void scrub_page(struct page *page)
111{
112#ifdef CONFIG_XEN_SCRUB_PAGES
113 clear_highpage(page);
114#endif
115}
116
117/* balloon_append: add the given page to the balloon. */
118static void __balloon_append(struct page *page)
119{
120 /* Lowmem is re-populated first, so highmem pages go at list tail. */
121 if (PageHighMem(page)) {
122 list_add_tail(&page->lru, &ballooned_pages);
123 balloon_stats.balloon_high++;
124 } else {
125 list_add(&page->lru, &ballooned_pages);
126 balloon_stats.balloon_low++;
127 }
128}
129
130static void balloon_append(struct page *page)
131{
132 __balloon_append(page);
133 adjust_managed_page_count(page, -1);
134}
135
136/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
137static struct page *balloon_retrieve(bool prefer_highmem)
138{
139 struct page *page;
140
141 if (list_empty(&ballooned_pages))
142 return NULL;
143
144 if (prefer_highmem)
145 page = list_entry(ballooned_pages.prev, struct page, lru);
146 else
147 page = list_entry(ballooned_pages.next, struct page, lru);
148 list_del(&page->lru);
149
150 if (PageHighMem(page))
151 balloon_stats.balloon_high--;
152 else
153 balloon_stats.balloon_low--;
154
155 adjust_managed_page_count(page, 1);
156
157 return page;
158}
159
160static struct page *balloon_next_page(struct page *page)
161{
162 struct list_head *next = page->lru.next;
163 if (next == &ballooned_pages)
164 return NULL;
165 return list_entry(next, struct page, lru);
166}
167
168static enum bp_state update_schedule(enum bp_state state)
169{
170 if (state == BP_DONE) {
171 balloon_stats.schedule_delay = 1;
172 balloon_stats.retry_count = 1;
173 return BP_DONE;
174 }
175
176 ++balloon_stats.retry_count;
177
178 if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
179 balloon_stats.retry_count > balloon_stats.max_retry_count) {
180 balloon_stats.schedule_delay = 1;
181 balloon_stats.retry_count = 1;
182 return BP_ECANCELED;
183 }
184
185 balloon_stats.schedule_delay <<= 1;
186
187 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
188 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
189
190 return BP_EAGAIN;
191}
192
193#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
194static long current_credit(void)
195{
196 return balloon_stats.target_pages - balloon_stats.current_pages -
197 balloon_stats.hotplug_pages;
198}
199
200static bool balloon_is_inflated(void)
201{
202 if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
203 balloon_stats.balloon_hotplug)
204 return true;
205 else
206 return false;
207}
208
209/*
210 * reserve_additional_memory() adds memory region of size >= credit above
211 * max_pfn. New region is section aligned and size is modified to be multiple
212 * of section size. Those features allow optimal use of address space and
213 * establish proper alignment when this function is called first time after
214 * boot (last section not fully populated at boot time contains unused memory
215 * pages with PG_reserved bit not set; online_pages_range() does not allow page
216 * onlining in whole range if first onlined page does not have PG_reserved
217 * bit set). Real size of added memory is established at page onlining stage.
218 */
219
220static enum bp_state reserve_additional_memory(long credit)
221{
222 int nid, rc;
223 u64 hotplug_start_paddr;
224 unsigned long balloon_hotplug = credit;
225
226 hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
227 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
228 nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
229
230 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
231
232 if (rc) {
233 pr_info("%s: add_memory() failed: %i\n", __func__, rc);
234 return BP_EAGAIN;
235 }
236
237 balloon_hotplug -= credit;
238
239 balloon_stats.hotplug_pages += credit;
240 balloon_stats.balloon_hotplug = balloon_hotplug;
241
242 return BP_DONE;
243}
244
245static void xen_online_page(struct page *page)
246{
247 __online_page_set_limits(page);
248
249 mutex_lock(&balloon_mutex);
250
251 __balloon_append(page);
252
253 if (balloon_stats.hotplug_pages)
254 --balloon_stats.hotplug_pages;
255 else
256 --balloon_stats.balloon_hotplug;
257
258 mutex_unlock(&balloon_mutex);
259}
260
261static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
262{
263 if (val == MEM_ONLINE)
264 schedule_delayed_work(&balloon_worker, 0);
265
266 return NOTIFY_OK;
267}
268
269static struct notifier_block xen_memory_nb = {
270 .notifier_call = xen_memory_notifier,
271 .priority = 0
272};
273#else
274static long current_credit(void)
275{
276 unsigned long target = balloon_stats.target_pages;
277
278 target = min(target,
279 balloon_stats.current_pages +
280 balloon_stats.balloon_low +
281 balloon_stats.balloon_high);
282
283 return target - balloon_stats.current_pages;
284}
285
286static bool balloon_is_inflated(void)
287{
288 if (balloon_stats.balloon_low || balloon_stats.balloon_high)
289 return true;
290 else
291 return false;
292}
293
294static enum bp_state reserve_additional_memory(long credit)
295{
296 balloon_stats.target_pages = balloon_stats.current_pages;
297 return BP_DONE;
298}
299#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
300
301static enum bp_state increase_reservation(unsigned long nr_pages)
302{
303 int rc;
304 unsigned long pfn, i;
305 struct page *page;
306 struct xen_memory_reservation reservation = {
307 .address_bits = 0,
308 .extent_order = 0,
309 .domid = DOMID_SELF
310 };
311
312#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
313 if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
314 nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
315 balloon_stats.hotplug_pages += nr_pages;
316 balloon_stats.balloon_hotplug -= nr_pages;
317 return BP_DONE;
318 }
319#endif
320
321 if (nr_pages > ARRAY_SIZE(frame_list))
322 nr_pages = ARRAY_SIZE(frame_list);
323
324 page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
325 for (i = 0; i < nr_pages; i++) {
326 if (!page) {
327 nr_pages = i;
328 break;
329 }
330 frame_list[i] = page_to_pfn(page);
331 page = balloon_next_page(page);
332 }
333
334 set_xen_guest_handle(reservation.extent_start, frame_list);
335 reservation.nr_extents = nr_pages;
336 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
337 if (rc <= 0)
338 return BP_EAGAIN;
339
340 for (i = 0; i < rc; i++) {
341 page = balloon_retrieve(false);
342 BUG_ON(page == NULL);
343
344 pfn = page_to_pfn(page);
345
346#ifdef CONFIG_XEN_HAVE_PVMMU
347 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
348 set_phys_to_machine(pfn, frame_list[i]);
349
350 /* Link back into the page tables if not highmem. */
351 if (!PageHighMem(page)) {
352 int ret;
353 ret = HYPERVISOR_update_va_mapping(
354 (unsigned long)__va(pfn << PAGE_SHIFT),
355 mfn_pte(frame_list[i], PAGE_KERNEL),
356 0);
357 BUG_ON(ret);
358 }
359 }
360#endif
361
362 /* Relinquish the page back to the allocator. */
363 __free_reserved_page(page);
364 }
365
366 balloon_stats.current_pages += rc;
367
368 return BP_DONE;
369}
370
371static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
372{
373 enum bp_state state = BP_DONE;
374 unsigned long pfn, i;
375 struct page *page;
376 int ret;
377 struct xen_memory_reservation reservation = {
378 .address_bits = 0,
379 .extent_order = 0,
380 .domid = DOMID_SELF
381 };
382
383#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
384 if (balloon_stats.hotplug_pages) {
385 nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
386 balloon_stats.hotplug_pages -= nr_pages;
387 balloon_stats.balloon_hotplug += nr_pages;
388 return BP_DONE;
389 }
390#endif
391
392 if (nr_pages > ARRAY_SIZE(frame_list))
393 nr_pages = ARRAY_SIZE(frame_list);
394
395 for (i = 0; i < nr_pages; i++) {
396 page = alloc_page(gfp);
397 if (page == NULL) {
398 nr_pages = i;
399 state = BP_EAGAIN;
400 break;
401 }
402 scrub_page(page);
403
404 frame_list[i] = page_to_pfn(page);
405 }
406
407 /*
408 * Ensure that ballooned highmem pages don't have kmaps.
409 *
410 * Do this before changing the p2m as kmap_flush_unused()
411 * reads PTEs to obtain pages (and hence needs the original
412 * p2m entry).
413 */
414 kmap_flush_unused();
415
416 /* Update direct mapping, invalidate P2M, and add to balloon. */
417 for (i = 0; i < nr_pages; i++) {
418 pfn = frame_list[i];
419 frame_list[i] = pfn_to_mfn(pfn);
420 page = pfn_to_page(pfn);
421
422#ifdef CONFIG_XEN_HAVE_PVMMU
423 /*
424 * Ballooned out frames are effectively replaced with
425 * a scratch frame. Ensure direct mappings and the
426 * p2m are consistent.
427 */
428 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429 unsigned long p;
430 struct page *scratch_page = get_balloon_scratch_page();
431
432 if (!PageHighMem(page)) {
433 ret = HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT),
435 pfn_pte(page_to_pfn(scratch_page),
436 PAGE_KERNEL_RO), 0);
437 BUG_ON(ret);
438 }
439 p = page_to_pfn(scratch_page);
440 __set_phys_to_machine(pfn, pfn_to_mfn(p));
441
442 put_balloon_scratch_page();
443 }
444#endif
445
446 balloon_append(page);
447 }
448
449 flush_tlb_all();
450
451 set_xen_guest_handle(reservation.extent_start, frame_list);
452 reservation.nr_extents = nr_pages;
453 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
454 BUG_ON(ret != nr_pages);
455
456 balloon_stats.current_pages -= nr_pages;
457
458 return state;
459}
460
461/*
462 * We avoid multiple worker processes conflicting via the balloon mutex.
463 * We may of course race updates of the target counts (which are protected
464 * by the balloon lock), or with changes to the Xen hard limit, but we will
465 * recover from these in time.
466 */
467static void balloon_process(struct work_struct *work)
468{
469 enum bp_state state = BP_DONE;
470 long credit;
471
472 mutex_lock(&balloon_mutex);
473
474 do {
475 credit = current_credit();
476
477 if (credit > 0) {
478 if (balloon_is_inflated())
479 state = increase_reservation(credit);
480 else
481 state = reserve_additional_memory(credit);
482 }
483
484 if (credit < 0)
485 state = decrease_reservation(-credit, GFP_BALLOON);
486
487 state = update_schedule(state);
488
489#ifndef CONFIG_PREEMPT
490 if (need_resched())
491 schedule();
492#endif
493 } while (credit && state == BP_DONE);
494
495 /* Schedule more work if there is some still to be done. */
496 if (state == BP_EAGAIN)
497 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
498
499 mutex_unlock(&balloon_mutex);
500}
501
502struct page *get_balloon_scratch_page(void)
503{
504 struct page *ret = get_cpu_var(balloon_scratch_page);
505 BUG_ON(ret == NULL);
506 return ret;
507}
508
509void put_balloon_scratch_page(void)
510{
511 put_cpu_var(balloon_scratch_page);
512}
513
514/* Resets the Xen limit, sets new target, and kicks off processing. */
515void balloon_set_new_target(unsigned long target)
516{
517 /* No need for lock. Not read-modify-write updates. */
518 balloon_stats.target_pages = target;
519 schedule_delayed_work(&balloon_worker, 0);
520}
521EXPORT_SYMBOL_GPL(balloon_set_new_target);
522
523/**
524 * alloc_xenballooned_pages - get pages that have been ballooned out
525 * @nr_pages: Number of pages to get
526 * @pages: pages returned
527 * @highmem: allow highmem pages
528 * @return 0 on success, error otherwise
529 */
530int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
531{
532 int pgno = 0;
533 struct page *page;
534 mutex_lock(&balloon_mutex);
535 while (pgno < nr_pages) {
536 page = balloon_retrieve(highmem);
537 if (page && (highmem || !PageHighMem(page))) {
538 pages[pgno++] = page;
539 } else {
540 enum bp_state st;
541 if (page)
542 balloon_append(page);
543 st = decrease_reservation(nr_pages - pgno,
544 highmem ? GFP_HIGHUSER : GFP_USER);
545 if (st != BP_DONE)
546 goto out_undo;
547 }
548 }
549 mutex_unlock(&balloon_mutex);
550 return 0;
551 out_undo:
552 while (pgno)
553 balloon_append(pages[--pgno]);
554 /* Free the memory back to the kernel soon */
555 schedule_delayed_work(&balloon_worker, 0);
556 mutex_unlock(&balloon_mutex);
557 return -ENOMEM;
558}
559EXPORT_SYMBOL(alloc_xenballooned_pages);
560
561/**
562 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
563 * @nr_pages: Number of pages
564 * @pages: pages to return
565 */
566void free_xenballooned_pages(int nr_pages, struct page **pages)
567{
568 int i;
569
570 mutex_lock(&balloon_mutex);
571
572 for (i = 0; i < nr_pages; i++) {
573 if (pages[i])
574 balloon_append(pages[i]);
575 }
576
577 /* The balloon may be too large now. Shrink it if needed. */
578 if (current_credit())
579 schedule_delayed_work(&balloon_worker, 0);
580
581 mutex_unlock(&balloon_mutex);
582}
583EXPORT_SYMBOL(free_xenballooned_pages);
584
585static void __init balloon_add_region(unsigned long start_pfn,
586 unsigned long pages)
587{
588 unsigned long pfn, extra_pfn_end;
589 struct page *page;
590
591 /*
592 * If the amount of usable memory has been limited (e.g., with
593 * the 'mem' command line parameter), don't add pages beyond
594 * this limit.
595 */
596 extra_pfn_end = min(max_pfn, start_pfn + pages);
597
598 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
599 page = pfn_to_page(pfn);
600 /* totalram_pages and totalhigh_pages do not
601 include the boot-time balloon extension, so
602 don't subtract from it. */
603 __balloon_append(page);
604 }
605}
606
607static int alloc_balloon_scratch_page(int cpu)
608{
609 if (per_cpu(balloon_scratch_page, cpu) != NULL)
610 return 0;
611
612 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
613 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
614 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
615 return -ENOMEM;
616 }
617
618 return 0;
619}
620
621
622static int balloon_cpu_notify(struct notifier_block *self,
623 unsigned long action, void *hcpu)
624{
625 int cpu = (long)hcpu;
626 switch (action) {
627 case CPU_UP_PREPARE:
628 if (alloc_balloon_scratch_page(cpu))
629 return NOTIFY_BAD;
630 break;
631 default:
632 break;
633 }
634 return NOTIFY_OK;
635}
636
637static struct notifier_block balloon_cpu_notifier = {
638 .notifier_call = balloon_cpu_notify,
639};
640
641static int __init balloon_init(void)
642{
643 int i, cpu;
644
645 if (!xen_domain())
646 return -ENODEV;
647
648 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
649 register_cpu_notifier(&balloon_cpu_notifier);
650
651 get_online_cpus();
652 for_each_online_cpu(cpu) {
653 if (alloc_balloon_scratch_page(cpu)) {
654 put_online_cpus();
655 unregister_cpu_notifier(&balloon_cpu_notifier);
656 return -ENOMEM;
657 }
658 }
659 put_online_cpus();
660 }
661
662 pr_info("Initialising balloon driver\n");
663
664 balloon_stats.current_pages = xen_pv_domain()
665 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
666 : get_num_physpages();
667 balloon_stats.target_pages = balloon_stats.current_pages;
668 balloon_stats.balloon_low = 0;
669 balloon_stats.balloon_high = 0;
670
671 balloon_stats.schedule_delay = 1;
672 balloon_stats.max_schedule_delay = 32;
673 balloon_stats.retry_count = 1;
674 balloon_stats.max_retry_count = RETRY_UNLIMITED;
675
676#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
677 balloon_stats.hotplug_pages = 0;
678 balloon_stats.balloon_hotplug = 0;
679
680 set_online_page_callback(&xen_online_page);
681 register_memory_notifier(&xen_memory_nb);
682#endif
683
684 /*
685 * Initialize the balloon with pages from the extra memory
686 * regions (see arch/x86/xen/setup.c).
687 */
688 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
689 if (xen_extra_mem[i].size)
690 balloon_add_region(PFN_UP(xen_extra_mem[i].start),
691 PFN_DOWN(xen_extra_mem[i].size));
692
693 return 0;
694}
695
696subsys_initcall(balloon_init);
697
698static int __init balloon_clear(void)
699{
700 int cpu;
701
702 for_each_possible_cpu(cpu)
703 per_cpu(balloon_scratch_page, cpu) = NULL;
704
705 return 0;
706}
707early_initcall(balloon_clear);
708
709MODULE_LICENSE("GPL");