Linux Audio

Check our new training course

Loading...
v4.17
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 44#include <linux/cred.h>
 45#include <linux/errno.h>
 
 46#include <linux/mm.h>
 47#include <linux/bootmem.h>
 48#include <linux/pagemap.h>
 49#include <linux/highmem.h>
 50#include <linux/mutex.h>
 51#include <linux/list.h>
 52#include <linux/gfp.h>
 53#include <linux/notifier.h>
 54#include <linux/memory.h>
 55#include <linux/memory_hotplug.h>
 56#include <linux/percpu-defs.h>
 57#include <linux/slab.h>
 58#include <linux/sysctl.h>
 59
 60#include <asm/page.h>
 61#include <asm/pgalloc.h>
 62#include <asm/pgtable.h>
 63#include <asm/tlb.h>
 64
 65#include <asm/xen/hypervisor.h>
 66#include <asm/xen/hypercall.h>
 67
 68#include <xen/xen.h>
 69#include <xen/interface/xen.h>
 70#include <xen/interface/memory.h>
 71#include <xen/balloon.h>
 72#include <xen/features.h>
 73#include <xen/page.h>
 74
 75static int xen_hotplug_unpopulated;
 76
 77#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 78
 79static int zero;
 80static int one = 1;
 81
 82static struct ctl_table balloon_table[] = {
 83	{
 84		.procname	= "hotplug_unpopulated",
 85		.data		= &xen_hotplug_unpopulated,
 86		.maxlen		= sizeof(int),
 87		.mode		= 0644,
 88		.proc_handler	= proc_dointvec_minmax,
 89		.extra1         = &zero,
 90		.extra2         = &one,
 91	},
 92	{ }
 93};
 94
 95static struct ctl_table balloon_root[] = {
 96	{
 97		.procname	= "balloon",
 98		.mode		= 0555,
 99		.child		= balloon_table,
100	},
101	{ }
102};
103
104static struct ctl_table xen_root[] = {
105	{
106		.procname	= "xen",
107		.mode		= 0555,
108		.child		= balloon_root,
109	},
110	{ }
111};
112
113#endif
114
115/*
116 * Use one extent per PAGE_SIZE to avoid to break down the page into
117 * multiple frame.
118 */
119#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
120
121/*
122 * balloon_process() state:
123 *
124 * BP_DONE: done or nothing to do,
125 * BP_WAIT: wait to be rescheduled,
126 * BP_EAGAIN: error, go to sleep,
127 * BP_ECANCELED: error, balloon operation canceled.
128 */
129
130enum bp_state {
131	BP_DONE,
132	BP_WAIT,
133	BP_EAGAIN,
134	BP_ECANCELED
135};
136
137
138static DEFINE_MUTEX(balloon_mutex);
139
140struct balloon_stats balloon_stats;
141EXPORT_SYMBOL_GPL(balloon_stats);
142
143/* We increase/decrease in batches which fit in a page */
144static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
 
145
146
147/* List of ballooned pages, threaded through the mem_map array. */
148static LIST_HEAD(ballooned_pages);
149static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
150
151/* Main work function, always executed in process context. */
152static void balloon_process(struct work_struct *work);
153static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
154
155/* When ballooning out (allocating memory to return to Xen) we don't really
156   want the kernel to try too hard since that can trigger the oom killer. */
157#define GFP_BALLOON \
158	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
159
160static void scrub_page(struct page *page)
161{
162#ifdef CONFIG_XEN_SCRUB_PAGES
163	clear_highpage(page);
164#endif
165}
166
167/* balloon_append: add the given page to the balloon. */
168static void __balloon_append(struct page *page)
169{
170	/* Lowmem is re-populated first, so highmem pages go at list tail. */
171	if (PageHighMem(page)) {
172		list_add_tail(&page->lru, &ballooned_pages);
173		balloon_stats.balloon_high++;
174	} else {
175		list_add(&page->lru, &ballooned_pages);
176		balloon_stats.balloon_low++;
177	}
178	wake_up(&balloon_wq);
179}
180
181static void balloon_append(struct page *page)
182{
183	__balloon_append(page);
 
184}
185
186/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
187static struct page *balloon_retrieve(bool require_lowmem)
188{
189	struct page *page;
190
191	if (list_empty(&ballooned_pages))
192		return NULL;
193
194	page = list_entry(ballooned_pages.next, struct page, lru);
195	if (require_lowmem && PageHighMem(page))
196		return NULL;
 
197	list_del(&page->lru);
198
199	if (PageHighMem(page))
200		balloon_stats.balloon_high--;
201	else
202		balloon_stats.balloon_low--;
203
 
 
204	return page;
205}
206
207static struct page *balloon_next_page(struct page *page)
208{
209	struct list_head *next = page->lru.next;
210	if (next == &ballooned_pages)
211		return NULL;
212	return list_entry(next, struct page, lru);
213}
214
215static enum bp_state update_schedule(enum bp_state state)
216{
217	if (state == BP_WAIT)
218		return BP_WAIT;
219
220	if (state == BP_ECANCELED)
221		return BP_ECANCELED;
222
223	if (state == BP_DONE) {
224		balloon_stats.schedule_delay = 1;
225		balloon_stats.retry_count = 1;
226		return BP_DONE;
227	}
228
229	++balloon_stats.retry_count;
230
231	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
232			balloon_stats.retry_count > balloon_stats.max_retry_count) {
233		balloon_stats.schedule_delay = 1;
234		balloon_stats.retry_count = 1;
235		return BP_ECANCELED;
236	}
237
238	balloon_stats.schedule_delay <<= 1;
239
240	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
241		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
242
243	return BP_EAGAIN;
244}
245
246#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
247static void release_memory_resource(struct resource *resource)
248{
249	if (!resource)
250		return;
 
251
252	/*
253	 * No need to reset region to identity mapped since we now
254	 * know that no I/O can be in this region
255	 */
256	release_resource(resource);
257	kfree(resource);
 
258}
259
260/*
261 * Host memory not allocated to dom0. We can use this range for hotplug-based
262 * ballooning.
263 *
264 * It's a type-less resource. Setting IORESOURCE_MEM will make resource
265 * management algorithms (arch_remove_reservations()) look into guest e820,
266 * which we don't want.
 
 
267 */
268static struct resource hostmem_resource = {
269	.name   = "Host RAM",
270};
271
272void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
273{}
274
275static struct resource *additional_memory_resource(phys_addr_t size)
276{
277	struct resource *res, *res_hostmem;
278	int ret = -ENOMEM;
279
280	res = kzalloc(sizeof(*res), GFP_KERNEL);
281	if (!res)
282		return NULL;
283
284	res->name = "System RAM";
285	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
286
287	res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
288	if (res_hostmem) {
289		/* Try to grab a range from hostmem */
290		res_hostmem->name = "Host memory";
291		ret = allocate_resource(&hostmem_resource, res_hostmem,
292					size, 0, -1,
293					PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
294	}
295
296	if (!ret) {
297		/*
298		 * Insert this resource into iomem. Because hostmem_resource
299		 * tracks portion of guest e820 marked as UNUSABLE noone else
300		 * should try to use it.
301		 */
302		res->start = res_hostmem->start;
303		res->end = res_hostmem->end;
304		ret = insert_resource(&iomem_resource, res);
305		if (ret < 0) {
306			pr_err("Can't insert iomem_resource [%llx - %llx]\n",
307				res->start, res->end);
308			release_memory_resource(res_hostmem);
309			res_hostmem = NULL;
310			res->start = res->end = 0;
311		}
312	}
313
314	if (ret) {
315		ret = allocate_resource(&iomem_resource, res,
316					size, 0, -1,
317					PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
318		if (ret < 0) {
319			pr_err("Cannot allocate new System RAM resource\n");
320			kfree(res);
321			return NULL;
322		}
323	}
324
325#ifdef CONFIG_SPARSEMEM
326	{
327		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
328		unsigned long pfn = res->start >> PAGE_SHIFT;
329
330		if (pfn > limit) {
331			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
332			       pfn, limit);
333			release_memory_resource(res);
334			release_memory_resource(res_hostmem);
335			return NULL;
336		}
337	}
338#endif
339
340	return res;
341}
342
343static enum bp_state reserve_additional_memory(void)
344{
345	long credit;
346	struct resource *resource;
347	int nid, rc;
348	unsigned long balloon_hotplug;
349
350	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
351		- balloon_stats.total_pages;
352
353	/*
354	 * Already hotplugged enough pages?  Wait for them to be
355	 * onlined.
356	 */
357	if (credit <= 0)
358		return BP_WAIT;
359
360	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
361
362	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
363	if (!resource)
364		goto err;
365
366	nid = memory_add_physaddr_to_nid(resource->start);
367
368#ifdef CONFIG_XEN_HAVE_PVMMU
369	/*
370	 * We don't support PV MMU when Linux and Xen is using
371	 * different page granularity.
372	 */
373	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
374
375        /*
376         * add_memory() will build page tables for the new memory so
377         * the p2m must contain invalid entries so the correct
378         * non-present PTEs will be written.
379         *
380         * If a failure occurs, the original (identity) p2m entries
381         * are not restored since this region is now known not to
382         * conflict with any devices.
383         */ 
384	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
385		unsigned long pfn, i;
386
387		pfn = PFN_DOWN(resource->start);
388		for (i = 0; i < balloon_hotplug; i++) {
389			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
390				pr_warn("set_phys_to_machine() failed, no memory added\n");
391				goto err;
392			}
393                }
394	}
395#endif
396
397	/*
398	 * add_memory_resource() will call online_pages() which in its turn
399	 * will call xen_online_page() callback causing deadlock if we don't
400	 * release balloon_mutex here. Unlocking here is safe because the
401	 * callers drop the mutex before trying again.
402	 */
403	mutex_unlock(&balloon_mutex);
404	rc = add_memory_resource(nid, resource, memhp_auto_online);
405	mutex_lock(&balloon_mutex);
406
407	if (rc) {
408		pr_warn("Cannot add additional memory (%i)\n", rc);
409		goto err;
410	}
411
412	balloon_stats.total_pages += balloon_hotplug;
 
 
 
413
414	return BP_WAIT;
415  err:
416	release_memory_resource(resource);
417	return BP_ECANCELED;
418}
419
420static void xen_online_page(struct page *page)
421{
422	__online_page_set_limits(page);
423
424	mutex_lock(&balloon_mutex);
425
426	__balloon_append(page);
427
 
 
 
 
 
428	mutex_unlock(&balloon_mutex);
429}
430
431static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
432{
433	if (val == MEM_ONLINE)
434		schedule_delayed_work(&balloon_worker, 0);
435
436	return NOTIFY_OK;
437}
438
439static struct notifier_block xen_memory_nb = {
440	.notifier_call = xen_memory_notifier,
441	.priority = 0
442};
443#else
444static enum bp_state reserve_additional_memory(void)
445{
446	balloon_stats.target_pages = balloon_stats.current_pages;
447	return BP_ECANCELED;
 
 
 
 
 
 
448}
449#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
450
451static long current_credit(void)
452{
453	return balloon_stats.target_pages - balloon_stats.current_pages;
 
 
 
454}
455
456static bool balloon_is_inflated(void)
457{
458	return balloon_stats.balloon_low || balloon_stats.balloon_high;
 
459}
 
460
461static enum bp_state increase_reservation(unsigned long nr_pages)
462{
463	int rc;
464	unsigned long i;
465	struct page   *page;
466	struct xen_memory_reservation reservation = {
467		.address_bits = 0,
468		.extent_order = EXTENT_ORDER,
469		.domid        = DOMID_SELF
470	};
471
 
 
 
 
 
 
 
 
 
472	if (nr_pages > ARRAY_SIZE(frame_list))
473		nr_pages = ARRAY_SIZE(frame_list);
474
475	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
476	for (i = 0; i < nr_pages; i++) {
477		if (!page) {
478			nr_pages = i;
479			break;
480		}
481
482		/* XENMEM_populate_physmap requires a PFN based on Xen
483		 * granularity.
484		 */
485		frame_list[i] = page_to_xen_pfn(page);
486		page = balloon_next_page(page);
487	}
488
489	set_xen_guest_handle(reservation.extent_start, frame_list);
490	reservation.nr_extents = nr_pages;
491	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
492	if (rc <= 0)
493		return BP_EAGAIN;
494
495	for (i = 0; i < rc; i++) {
496		page = balloon_retrieve(false);
497		BUG_ON(page == NULL);
498
499#ifdef CONFIG_XEN_HAVE_PVMMU
500		/*
501		 * We don't support PV MMU when Linux and Xen is using
502		 * different page granularity.
503		 */
504		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
505
 
506		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
507			unsigned long pfn = page_to_pfn(page);
508
509			set_phys_to_machine(pfn, frame_list[i]);
510
511			/* Link back into the page tables if not highmem. */
512			if (!PageHighMem(page)) {
513				int ret;
514				ret = HYPERVISOR_update_va_mapping(
515						(unsigned long)__va(pfn << PAGE_SHIFT),
516						mfn_pte(frame_list[i], PAGE_KERNEL),
517						0);
518				BUG_ON(ret);
519			}
520		}
521#endif
522
523		/* Relinquish the page back to the allocator. */
524		free_reserved_page(page);
525	}
526
527	balloon_stats.current_pages += rc;
528
529	return BP_DONE;
530}
531
532static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
533{
534	enum bp_state state = BP_DONE;
535	unsigned long i;
536	struct page *page, *tmp;
537	int ret;
538	struct xen_memory_reservation reservation = {
539		.address_bits = 0,
540		.extent_order = EXTENT_ORDER,
541		.domid        = DOMID_SELF
542	};
543	LIST_HEAD(pages);
 
 
 
 
 
 
 
 
544
545	if (nr_pages > ARRAY_SIZE(frame_list))
546		nr_pages = ARRAY_SIZE(frame_list);
547
548	for (i = 0; i < nr_pages; i++) {
549		page = alloc_page(gfp);
550		if (page == NULL) {
551			nr_pages = i;
552			state = BP_EAGAIN;
553			break;
554		}
555		adjust_managed_page_count(page, -1);
556		scrub_page(page);
557		list_add(&page->lru, &pages);
 
558	}
559
560	/*
561	 * Ensure that ballooned highmem pages don't have kmaps.
562	 *
563	 * Do this before changing the p2m as kmap_flush_unused()
564	 * reads PTEs to obtain pages (and hence needs the original
565	 * p2m entry).
566	 */
567	kmap_flush_unused();
568
569	/*
570	 * Setup the frame, update direct mapping, invalidate P2M,
571	 * and add to balloon.
572	 */
573	i = 0;
574	list_for_each_entry_safe(page, tmp, &pages, lru) {
575		/* XENMEM_decrease_reservation requires a GFN */
576		frame_list[i++] = xen_page_to_gfn(page);
577
578#ifdef CONFIG_XEN_HAVE_PVMMU
579		/*
580		 * We don't support PV MMU when Linux and Xen is using
581		 * different page granularity.
 
582		 */
583		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
584
585		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
586			unsigned long pfn = page_to_pfn(page);
 
587
588			if (!PageHighMem(page)) {
589				ret = HYPERVISOR_update_va_mapping(
590						(unsigned long)__va(pfn << PAGE_SHIFT),
591						__pte_ma(0), 0);
 
592				BUG_ON(ret);
593			}
594			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
 
 
 
595		}
596#endif
597		list_del(&page->lru);
598
599		balloon_append(page);
600	}
601
602	flush_tlb_all();
603
604	set_xen_guest_handle(reservation.extent_start, frame_list);
605	reservation.nr_extents   = nr_pages;
606	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
607	BUG_ON(ret != nr_pages);
608
609	balloon_stats.current_pages -= nr_pages;
610
611	return state;
612}
613
614/*
615 * As this is a work item it is guaranteed to run as a single instance only.
616 * We may of course race updates of the target counts (which are protected
617 * by the balloon lock), or with changes to the Xen hard limit, but we will
618 * recover from these in time.
619 */
620static void balloon_process(struct work_struct *work)
621{
622	enum bp_state state = BP_DONE;
623	long credit;
624
 
625
626	do {
627		mutex_lock(&balloon_mutex);
628
629		credit = current_credit();
630
631		if (credit > 0) {
632			if (balloon_is_inflated())
633				state = increase_reservation(credit);
634			else
635				state = reserve_additional_memory();
636		}
637
638		if (credit < 0)
639			state = decrease_reservation(-credit, GFP_BALLOON);
640
641		state = update_schedule(state);
642
643		mutex_unlock(&balloon_mutex);
644
645		cond_resched();
646
647	} while (credit && state == BP_DONE);
648
649	/* Schedule more work if there is some still to be done. */
650	if (state == BP_EAGAIN)
651		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652}
653
654/* Resets the Xen limit, sets new target, and kicks off processing. */
655void balloon_set_new_target(unsigned long target)
656{
657	/* No need for lock. Not read-modify-write updates. */
658	balloon_stats.target_pages = target;
659	schedule_delayed_work(&balloon_worker, 0);
660}
661EXPORT_SYMBOL_GPL(balloon_set_new_target);
662
663static int add_ballooned_pages(int nr_pages)
664{
665	enum bp_state st;
666
667	if (xen_hotplug_unpopulated) {
668		st = reserve_additional_memory();
669		if (st != BP_ECANCELED) {
670			mutex_unlock(&balloon_mutex);
671			wait_event(balloon_wq,
672				   !list_empty(&ballooned_pages));
673			mutex_lock(&balloon_mutex);
674			return 0;
675		}
676	}
677
678	st = decrease_reservation(nr_pages, GFP_USER);
679	if (st != BP_DONE)
680		return -ENOMEM;
681
682	return 0;
683}
684
685/**
686 * alloc_xenballooned_pages - get pages that have been ballooned out
687 * @nr_pages: Number of pages to get
688 * @pages: pages returned
 
689 * @return 0 on success, error otherwise
690 */
691int alloc_xenballooned_pages(int nr_pages, struct page **pages)
692{
693	int pgno = 0;
694	struct page *page;
695	int ret;
696
697	mutex_lock(&balloon_mutex);
698
699	balloon_stats.target_unpopulated += nr_pages;
700
701	while (pgno < nr_pages) {
702		page = balloon_retrieve(true);
703		if (page) {
704			pages[pgno++] = page;
705#ifdef CONFIG_XEN_HAVE_PVMMU
706			/*
707			 * We don't support PV MMU when Linux and Xen is using
708			 * different page granularity.
709			 */
710			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
711
712			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
713				ret = xen_alloc_p2m_entry(page_to_pfn(page));
714				if (ret < 0)
715					goto out_undo;
716			}
717#endif
718		} else {
719			ret = add_ballooned_pages(nr_pages - pgno);
720			if (ret < 0)
 
 
 
 
721				goto out_undo;
722		}
723	}
724	mutex_unlock(&balloon_mutex);
725	return 0;
726 out_undo:
 
 
 
 
727	mutex_unlock(&balloon_mutex);
728	free_xenballooned_pages(pgno, pages);
729	return ret;
730}
731EXPORT_SYMBOL(alloc_xenballooned_pages);
732
733/**
734 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
735 * @nr_pages: Number of pages
736 * @pages: pages to return
737 */
738void free_xenballooned_pages(int nr_pages, struct page **pages)
739{
740	int i;
741
742	mutex_lock(&balloon_mutex);
743
744	for (i = 0; i < nr_pages; i++) {
745		if (pages[i])
746			balloon_append(pages[i]);
747	}
748
749	balloon_stats.target_unpopulated -= nr_pages;
750
751	/* The balloon may be too large now. Shrink it if needed. */
752	if (current_credit())
753		schedule_delayed_work(&balloon_worker, 0);
754
755	mutex_unlock(&balloon_mutex);
756}
757EXPORT_SYMBOL(free_xenballooned_pages);
758
759#ifdef CONFIG_XEN_PV
760static void __init balloon_add_region(unsigned long start_pfn,
761				      unsigned long pages)
762{
763	unsigned long pfn, extra_pfn_end;
764	struct page *page;
765
766	/*
767	 * If the amount of usable memory has been limited (e.g., with
768	 * the 'mem' command line parameter), don't add pages beyond
769	 * this limit.
770	 */
771	extra_pfn_end = min(max_pfn, start_pfn + pages);
772
773	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
774		page = pfn_to_page(pfn);
775		/* totalram_pages and totalhigh_pages do not
776		   include the boot-time balloon extension, so
777		   don't subtract from it. */
778		__balloon_append(page);
779	}
 
 
 
 
 
 
 
 
 
 
 
 
780
781	balloon_stats.total_pages += extra_pfn_end - start_pfn;
782}
783#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784
785static int __init balloon_init(void)
786{
 
 
787	if (!xen_domain())
788		return -ENODEV;
789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
790	pr_info("Initialising balloon driver\n");
791
792#ifdef CONFIG_XEN_PV
793	balloon_stats.current_pages = xen_pv_domain()
794		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
795		: get_num_physpages();
796#else
797	balloon_stats.current_pages = get_num_physpages();
798#endif
799	balloon_stats.target_pages  = balloon_stats.current_pages;
800	balloon_stats.balloon_low   = 0;
801	balloon_stats.balloon_high  = 0;
802	balloon_stats.total_pages   = balloon_stats.current_pages;
803
804	balloon_stats.schedule_delay = 1;
805	balloon_stats.max_schedule_delay = 32;
806	balloon_stats.retry_count = 1;
807	balloon_stats.max_retry_count = RETRY_UNLIMITED;
808
809#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 
 
 
810	set_online_page_callback(&xen_online_page);
811	register_memory_notifier(&xen_memory_nb);
812	register_sysctl_table(xen_root);
813
814	arch_xen_balloon_init(&hostmem_resource);
815#endif
816
817#ifdef CONFIG_XEN_PV
818	{
819		int i;
 
 
 
 
 
820
821		/*
822		 * Initialize the balloon with pages from the extra memory
823		 * regions (see arch/x86/xen/setup.c).
824		 */
825		for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
826			if (xen_extra_mem[i].n_pfns)
827				balloon_add_region(xen_extra_mem[i].start_pfn,
828						   xen_extra_mem[i].n_pfns);
829	}
830#endif
831
832	/* Init the xen-balloon driver. */
833	xen_balloon_init();
 
 
 
 
 
 
834
835	return 0;
836}
837subsys_initcall(balloon_init);
 
 
v3.15
  1/******************************************************************************
  2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3 *
  4 * Copyright (c) 2003, B Dragovic
  5 * Copyright (c) 2003-2004, M Williamson, K Fraser
  6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7 * Copyright (c) 2010 Daniel Kiper
  8 *
  9 * Memory hotplug support was written by Daniel Kiper. Work on
 10 * it was sponsored by Google under Google Summer of Code 2010
 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
 12 * this project.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License version 2
 16 * as published by the Free Software Foundation; or, when distributed
 17 * separately from the Linux kernel or incorporated into other
 18 * software packages, subject to the following license:
 19 *
 20 * Permission is hereby granted, free of charge, to any person obtaining a copy
 21 * of this source file (the "Software"), to deal in the Software without
 22 * restriction, including without limitation the rights to use, copy, modify,
 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 24 * and to permit persons to whom the Software is furnished to do so, subject to
 25 * the following conditions:
 26 *
 27 * The above copyright notice and this permission notice shall be included in
 28 * all copies or substantial portions of the Software.
 29 *
 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 36 * IN THE SOFTWARE.
 37 */
 38
 39#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 40
 41#include <linux/cpu.h>
 42#include <linux/kernel.h>
 43#include <linux/sched.h>
 
 44#include <linux/errno.h>
 45#include <linux/module.h>
 46#include <linux/mm.h>
 47#include <linux/bootmem.h>
 48#include <linux/pagemap.h>
 49#include <linux/highmem.h>
 50#include <linux/mutex.h>
 51#include <linux/list.h>
 52#include <linux/gfp.h>
 53#include <linux/notifier.h>
 54#include <linux/memory.h>
 55#include <linux/memory_hotplug.h>
 56#include <linux/percpu-defs.h>
 
 
 57
 58#include <asm/page.h>
 59#include <asm/pgalloc.h>
 60#include <asm/pgtable.h>
 61#include <asm/tlb.h>
 62
 63#include <asm/xen/hypervisor.h>
 64#include <asm/xen/hypercall.h>
 65
 66#include <xen/xen.h>
 67#include <xen/interface/xen.h>
 68#include <xen/interface/memory.h>
 69#include <xen/balloon.h>
 70#include <xen/features.h>
 71#include <xen/page.h>
 72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73/*
 74 * balloon_process() state:
 75 *
 76 * BP_DONE: done or nothing to do,
 
 77 * BP_EAGAIN: error, go to sleep,
 78 * BP_ECANCELED: error, balloon operation canceled.
 79 */
 80
 81enum bp_state {
 82	BP_DONE,
 
 83	BP_EAGAIN,
 84	BP_ECANCELED
 85};
 86
 87
 88static DEFINE_MUTEX(balloon_mutex);
 89
 90struct balloon_stats balloon_stats;
 91EXPORT_SYMBOL_GPL(balloon_stats);
 92
 93/* We increase/decrease in batches which fit in a page */
 94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
 95static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
 96
 97
 98/* List of ballooned pages, threaded through the mem_map array. */
 99static LIST_HEAD(ballooned_pages);
 
100
101/* Main work function, always executed in process context. */
102static void balloon_process(struct work_struct *work);
103static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
104
105/* When ballooning out (allocating memory to return to Xen) we don't really
106   want the kernel to try too hard since that can trigger the oom killer. */
107#define GFP_BALLOON \
108	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
109
110static void scrub_page(struct page *page)
111{
112#ifdef CONFIG_XEN_SCRUB_PAGES
113	clear_highpage(page);
114#endif
115}
116
117/* balloon_append: add the given page to the balloon. */
118static void __balloon_append(struct page *page)
119{
120	/* Lowmem is re-populated first, so highmem pages go at list tail. */
121	if (PageHighMem(page)) {
122		list_add_tail(&page->lru, &ballooned_pages);
123		balloon_stats.balloon_high++;
124	} else {
125		list_add(&page->lru, &ballooned_pages);
126		balloon_stats.balloon_low++;
127	}
 
128}
129
130static void balloon_append(struct page *page)
131{
132	__balloon_append(page);
133	adjust_managed_page_count(page, -1);
134}
135
136/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
137static struct page *balloon_retrieve(bool prefer_highmem)
138{
139	struct page *page;
140
141	if (list_empty(&ballooned_pages))
142		return NULL;
143
144	if (prefer_highmem)
145		page = list_entry(ballooned_pages.prev, struct page, lru);
146	else
147		page = list_entry(ballooned_pages.next, struct page, lru);
148	list_del(&page->lru);
149
150	if (PageHighMem(page))
151		balloon_stats.balloon_high--;
152	else
153		balloon_stats.balloon_low--;
154
155	adjust_managed_page_count(page, 1);
156
157	return page;
158}
159
160static struct page *balloon_next_page(struct page *page)
161{
162	struct list_head *next = page->lru.next;
163	if (next == &ballooned_pages)
164		return NULL;
165	return list_entry(next, struct page, lru);
166}
167
168static enum bp_state update_schedule(enum bp_state state)
169{
 
 
 
 
 
 
170	if (state == BP_DONE) {
171		balloon_stats.schedule_delay = 1;
172		balloon_stats.retry_count = 1;
173		return BP_DONE;
174	}
175
176	++balloon_stats.retry_count;
177
178	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
179			balloon_stats.retry_count > balloon_stats.max_retry_count) {
180		balloon_stats.schedule_delay = 1;
181		balloon_stats.retry_count = 1;
182		return BP_ECANCELED;
183	}
184
185	balloon_stats.schedule_delay <<= 1;
186
187	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
188		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
189
190	return BP_EAGAIN;
191}
192
193#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
194static long current_credit(void)
195{
196	return balloon_stats.target_pages - balloon_stats.current_pages -
197		balloon_stats.hotplug_pages;
198}
199
200static bool balloon_is_inflated(void)
201{
202	if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
203			balloon_stats.balloon_hotplug)
204		return true;
205	else
206		return false;
207}
208
209/*
210 * reserve_additional_memory() adds memory region of size >= credit above
211 * max_pfn. New region is section aligned and size is modified to be multiple
212 * of section size. Those features allow optimal use of address space and
213 * establish proper alignment when this function is called first time after
214 * boot (last section not fully populated at boot time contains unused memory
215 * pages with PG_reserved bit not set; online_pages_range() does not allow page
216 * onlining in whole range if first onlined page does not have PG_reserved
217 * bit set). Real size of added memory is established at page onlining stage.
218 */
 
 
 
219
220static enum bp_state reserve_additional_memory(long credit)
 
 
 
221{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222	int nid, rc;
223	u64 hotplug_start_paddr;
224	unsigned long balloon_hotplug = credit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
226	hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
227	balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
228	nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
 
 
 
229
230	rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
232	if (rc) {
233		pr_info("%s: add_memory() failed: %i\n", __func__, rc);
234		return BP_EAGAIN;
235	}
236
237	balloon_hotplug -= credit;
238
239	balloon_stats.hotplug_pages += credit;
240	balloon_stats.balloon_hotplug = balloon_hotplug;
241
242	return BP_DONE;
 
 
 
243}
244
245static void xen_online_page(struct page *page)
246{
247	__online_page_set_limits(page);
248
249	mutex_lock(&balloon_mutex);
250
251	__balloon_append(page);
252
253	if (balloon_stats.hotplug_pages)
254		--balloon_stats.hotplug_pages;
255	else
256		--balloon_stats.balloon_hotplug;
257
258	mutex_unlock(&balloon_mutex);
259}
260
261static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
262{
263	if (val == MEM_ONLINE)
264		schedule_delayed_work(&balloon_worker, 0);
265
266	return NOTIFY_OK;
267}
268
269static struct notifier_block xen_memory_nb = {
270	.notifier_call = xen_memory_notifier,
271	.priority = 0
272};
273#else
274static long current_credit(void)
275{
276	unsigned long target = balloon_stats.target_pages;
277
278	target = min(target,
279		     balloon_stats.current_pages +
280		     balloon_stats.balloon_low +
281		     balloon_stats.balloon_high);
282
283	return target - balloon_stats.current_pages;
284}
 
285
286static bool balloon_is_inflated(void)
287{
288	if (balloon_stats.balloon_low || balloon_stats.balloon_high)
289		return true;
290	else
291		return false;
292}
293
294static enum bp_state reserve_additional_memory(long credit)
295{
296	balloon_stats.target_pages = balloon_stats.current_pages;
297	return BP_DONE;
298}
299#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
300
301static enum bp_state increase_reservation(unsigned long nr_pages)
302{
303	int rc;
304	unsigned long  pfn, i;
305	struct page   *page;
306	struct xen_memory_reservation reservation = {
307		.address_bits = 0,
308		.extent_order = 0,
309		.domid        = DOMID_SELF
310	};
311
312#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
313	if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
314		nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
315		balloon_stats.hotplug_pages += nr_pages;
316		balloon_stats.balloon_hotplug -= nr_pages;
317		return BP_DONE;
318	}
319#endif
320
321	if (nr_pages > ARRAY_SIZE(frame_list))
322		nr_pages = ARRAY_SIZE(frame_list);
323
324	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
325	for (i = 0; i < nr_pages; i++) {
326		if (!page) {
327			nr_pages = i;
328			break;
329		}
330		frame_list[i] = page_to_pfn(page);
 
 
 
 
331		page = balloon_next_page(page);
332	}
333
334	set_xen_guest_handle(reservation.extent_start, frame_list);
335	reservation.nr_extents = nr_pages;
336	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
337	if (rc <= 0)
338		return BP_EAGAIN;
339
340	for (i = 0; i < rc; i++) {
341		page = balloon_retrieve(false);
342		BUG_ON(page == NULL);
343
344		pfn = page_to_pfn(page);
 
 
 
 
 
345
346#ifdef CONFIG_XEN_HAVE_PVMMU
347		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 
 
348			set_phys_to_machine(pfn, frame_list[i]);
349
350			/* Link back into the page tables if not highmem. */
351			if (!PageHighMem(page)) {
352				int ret;
353				ret = HYPERVISOR_update_va_mapping(
354						(unsigned long)__va(pfn << PAGE_SHIFT),
355						mfn_pte(frame_list[i], PAGE_KERNEL),
356						0);
357				BUG_ON(ret);
358			}
359		}
360#endif
361
362		/* Relinquish the page back to the allocator. */
363		__free_reserved_page(page);
364	}
365
366	balloon_stats.current_pages += rc;
367
368	return BP_DONE;
369}
370
371static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
372{
373	enum bp_state state = BP_DONE;
374	unsigned long  pfn, i;
375	struct page   *page;
376	int ret;
377	struct xen_memory_reservation reservation = {
378		.address_bits = 0,
379		.extent_order = 0,
380		.domid        = DOMID_SELF
381	};
382
383#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
384	if (balloon_stats.hotplug_pages) {
385		nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
386		balloon_stats.hotplug_pages -= nr_pages;
387		balloon_stats.balloon_hotplug += nr_pages;
388		return BP_DONE;
389	}
390#endif
391
392	if (nr_pages > ARRAY_SIZE(frame_list))
393		nr_pages = ARRAY_SIZE(frame_list);
394
395	for (i = 0; i < nr_pages; i++) {
396		page = alloc_page(gfp);
397		if (page == NULL) {
398			nr_pages = i;
399			state = BP_EAGAIN;
400			break;
401		}
 
402		scrub_page(page);
403
404		frame_list[i] = page_to_pfn(page);
405	}
406
407	/*
408	 * Ensure that ballooned highmem pages don't have kmaps.
409	 *
410	 * Do this before changing the p2m as kmap_flush_unused()
411	 * reads PTEs to obtain pages (and hence needs the original
412	 * p2m entry).
413	 */
414	kmap_flush_unused();
415
416	/* Update direct mapping, invalidate P2M, and add to balloon. */
417	for (i = 0; i < nr_pages; i++) {
418		pfn = frame_list[i];
419		frame_list[i] = pfn_to_mfn(pfn);
420		page = pfn_to_page(pfn);
 
 
 
421
422#ifdef CONFIG_XEN_HAVE_PVMMU
423		/*
424		 * Ballooned out frames are effectively replaced with
425		 * a scratch frame.  Ensure direct mappings and the
426		 * p2m are consistent.
427		 */
 
 
428		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429			unsigned long p;
430			struct page   *scratch_page = get_balloon_scratch_page();
431
432			if (!PageHighMem(page)) {
433				ret = HYPERVISOR_update_va_mapping(
434						(unsigned long)__va(pfn << PAGE_SHIFT),
435						pfn_pte(page_to_pfn(scratch_page),
436							PAGE_KERNEL_RO), 0);
437				BUG_ON(ret);
438			}
439			p = page_to_pfn(scratch_page);
440			__set_phys_to_machine(pfn, pfn_to_mfn(p));
441
442			put_balloon_scratch_page();
443		}
444#endif
 
445
446		balloon_append(page);
447	}
448
449	flush_tlb_all();
450
451	set_xen_guest_handle(reservation.extent_start, frame_list);
452	reservation.nr_extents   = nr_pages;
453	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
454	BUG_ON(ret != nr_pages);
455
456	balloon_stats.current_pages -= nr_pages;
457
458	return state;
459}
460
461/*
462 * We avoid multiple worker processes conflicting via the balloon mutex.
463 * We may of course race updates of the target counts (which are protected
464 * by the balloon lock), or with changes to the Xen hard limit, but we will
465 * recover from these in time.
466 */
467static void balloon_process(struct work_struct *work)
468{
469	enum bp_state state = BP_DONE;
470	long credit;
471
472	mutex_lock(&balloon_mutex);
473
474	do {
 
 
475		credit = current_credit();
476
477		if (credit > 0) {
478			if (balloon_is_inflated())
479				state = increase_reservation(credit);
480			else
481				state = reserve_additional_memory(credit);
482		}
483
484		if (credit < 0)
485			state = decrease_reservation(-credit, GFP_BALLOON);
486
487		state = update_schedule(state);
488
489#ifndef CONFIG_PREEMPT
490		if (need_resched())
491			schedule();
492#endif
493	} while (credit && state == BP_DONE);
494
495	/* Schedule more work if there is some still to be done. */
496	if (state == BP_EAGAIN)
497		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
498
499	mutex_unlock(&balloon_mutex);
500}
501
502struct page *get_balloon_scratch_page(void)
503{
504	struct page *ret = get_cpu_var(balloon_scratch_page);
505	BUG_ON(ret == NULL);
506	return ret;
507}
508
509void put_balloon_scratch_page(void)
510{
511	put_cpu_var(balloon_scratch_page);
512}
513
514/* Resets the Xen limit, sets new target, and kicks off processing. */
515void balloon_set_new_target(unsigned long target)
516{
517	/* No need for lock. Not read-modify-write updates. */
518	balloon_stats.target_pages = target;
519	schedule_delayed_work(&balloon_worker, 0);
520}
521EXPORT_SYMBOL_GPL(balloon_set_new_target);
522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523/**
524 * alloc_xenballooned_pages - get pages that have been ballooned out
525 * @nr_pages: Number of pages to get
526 * @pages: pages returned
527 * @highmem: allow highmem pages
528 * @return 0 on success, error otherwise
529 */
530int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
531{
532	int pgno = 0;
533	struct page *page;
 
 
534	mutex_lock(&balloon_mutex);
 
 
 
535	while (pgno < nr_pages) {
536		page = balloon_retrieve(highmem);
537		if (page && (highmem || !PageHighMem(page))) {
538			pages[pgno++] = page;
 
 
 
 
 
 
 
 
 
 
 
 
 
539		} else {
540			enum bp_state st;
541			if (page)
542				balloon_append(page);
543			st = decrease_reservation(nr_pages - pgno,
544					highmem ? GFP_HIGHUSER : GFP_USER);
545			if (st != BP_DONE)
546				goto out_undo;
547		}
548	}
549	mutex_unlock(&balloon_mutex);
550	return 0;
551 out_undo:
552	while (pgno)
553		balloon_append(pages[--pgno]);
554	/* Free the memory back to the kernel soon */
555	schedule_delayed_work(&balloon_worker, 0);
556	mutex_unlock(&balloon_mutex);
557	return -ENOMEM;
 
558}
559EXPORT_SYMBOL(alloc_xenballooned_pages);
560
561/**
562 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
563 * @nr_pages: Number of pages
564 * @pages: pages to return
565 */
566void free_xenballooned_pages(int nr_pages, struct page **pages)
567{
568	int i;
569
570	mutex_lock(&balloon_mutex);
571
572	for (i = 0; i < nr_pages; i++) {
573		if (pages[i])
574			balloon_append(pages[i]);
575	}
576
 
 
577	/* The balloon may be too large now. Shrink it if needed. */
578	if (current_credit())
579		schedule_delayed_work(&balloon_worker, 0);
580
581	mutex_unlock(&balloon_mutex);
582}
583EXPORT_SYMBOL(free_xenballooned_pages);
584
 
585static void __init balloon_add_region(unsigned long start_pfn,
586				      unsigned long pages)
587{
588	unsigned long pfn, extra_pfn_end;
589	struct page *page;
590
591	/*
592	 * If the amount of usable memory has been limited (e.g., with
593	 * the 'mem' command line parameter), don't add pages beyond
594	 * this limit.
595	 */
596	extra_pfn_end = min(max_pfn, start_pfn + pages);
597
598	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
599		page = pfn_to_page(pfn);
600		/* totalram_pages and totalhigh_pages do not
601		   include the boot-time balloon extension, so
602		   don't subtract from it. */
603		__balloon_append(page);
604	}
605}
606
607static int alloc_balloon_scratch_page(int cpu)
608{
609	if (per_cpu(balloon_scratch_page, cpu) != NULL)
610		return 0;
611
612	per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
613	if (per_cpu(balloon_scratch_page, cpu) == NULL) {
614		pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
615		return -ENOMEM;
616	}
617
618	return 0;
619}
620
621
622static int balloon_cpu_notify(struct notifier_block *self,
623				    unsigned long action, void *hcpu)
624{
625	int cpu = (long)hcpu;
626	switch (action) {
627	case CPU_UP_PREPARE:
628		if (alloc_balloon_scratch_page(cpu))
629			return NOTIFY_BAD;
630		break;
631	default:
632		break;
633	}
634	return NOTIFY_OK;
635}
636
637static struct notifier_block balloon_cpu_notifier = {
638	.notifier_call	= balloon_cpu_notify,
639};
640
641static int __init balloon_init(void)
642{
643	int i, cpu;
644
645	if (!xen_domain())
646		return -ENODEV;
647
648	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
649		register_cpu_notifier(&balloon_cpu_notifier);
650
651		get_online_cpus();
652		for_each_online_cpu(cpu) {
653			if (alloc_balloon_scratch_page(cpu)) {
654				put_online_cpus();
655				unregister_cpu_notifier(&balloon_cpu_notifier);
656				return -ENOMEM;
657			}
658		}
659		put_online_cpus();
660	}
661
662	pr_info("Initialising balloon driver\n");
663
 
664	balloon_stats.current_pages = xen_pv_domain()
665		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
666		: get_num_physpages();
 
 
 
667	balloon_stats.target_pages  = balloon_stats.current_pages;
668	balloon_stats.balloon_low   = 0;
669	balloon_stats.balloon_high  = 0;
 
670
671	balloon_stats.schedule_delay = 1;
672	balloon_stats.max_schedule_delay = 32;
673	balloon_stats.retry_count = 1;
674	balloon_stats.max_retry_count = RETRY_UNLIMITED;
675
676#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
677	balloon_stats.hotplug_pages = 0;
678	balloon_stats.balloon_hotplug = 0;
679
680	set_online_page_callback(&xen_online_page);
681	register_memory_notifier(&xen_memory_nb);
 
 
 
682#endif
683
684	/*
685	 * Initialize the balloon with pages from the extra memory
686	 * regions (see arch/x86/xen/setup.c).
687	 */
688	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
689		if (xen_extra_mem[i].size)
690			balloon_add_region(PFN_UP(xen_extra_mem[i].start),
691					   PFN_DOWN(xen_extra_mem[i].size));
692
693	return 0;
694}
 
 
 
 
 
 
 
 
695
696subsys_initcall(balloon_init);
697
698static int __init balloon_clear(void)
699{
700	int cpu;
701
702	for_each_possible_cpu(cpu)
703		per_cpu(balloon_scratch_page, cpu) = NULL;
704
705	return 0;
706}
707early_initcall(balloon_clear);
708
709MODULE_LICENSE("GPL");