Loading...
1/*
2 * VMware Balloon driver.
3 *
4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Dmitry Torokhov <dtor@vmware.com>
21 */
22
23/*
24 * This is VMware physical memory management driver for Linux. The driver
25 * acts like a "balloon" that can be inflated to reclaim physical pages by
26 * reserving them in the guest and invalidating them in the monitor,
27 * freeing up the underlying machine pages so they can be allocated to
28 * other guests. The balloon can also be deflated to allow the guest to
29 * use more physical memory. Higher level policies can control the sizes
30 * of balloons in VMs in order to manage physical memory resources.
31 */
32
33//#define DEBUG
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/sched.h>
40#include <linux/module.h>
41#include <linux/workqueue.h>
42#include <linux/debugfs.h>
43#include <linux/seq_file.h>
44#include <asm/hypervisor.h>
45
46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
48MODULE_VERSION("1.2.1.3-k");
49MODULE_ALIAS("dmi:*:svnVMware*:*");
50MODULE_ALIAS("vmware_vmmemctl");
51MODULE_LICENSE("GPL");
52
53/*
54 * Various constants controlling rate of inflaint/deflating balloon,
55 * measured in pages.
56 */
57
58/*
59 * Rate of allocating memory when there is no memory pressure
60 * (driver performs non-sleeping allocations).
61 */
62#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
63
64/*
65 * Rates of memory allocaton when guest experiences memory pressure
66 * (driver performs sleeping allocations).
67 */
68#define VMW_BALLOON_RATE_ALLOC_MIN 512U
69#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
70#define VMW_BALLOON_RATE_ALLOC_INC 16U
71
72/*
73 * Rates for releasing pages while deflating balloon.
74 */
75#define VMW_BALLOON_RATE_FREE_MIN 512U
76#define VMW_BALLOON_RATE_FREE_MAX 16384U
77#define VMW_BALLOON_RATE_FREE_INC 16U
78
79/*
80 * When guest is under memory pressure, use a reduced page allocation
81 * rate for next several cycles.
82 */
83#define VMW_BALLOON_SLOW_CYCLES 4
84
85/*
86 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
87 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
88 * __GFP_NOWARN, to suppress page allocation failure warnings.
89 */
90#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
91
92/*
93 * Use GFP_HIGHUSER when executing in a separate kernel thread
94 * context and allocation can sleep. This is less stressful to
95 * the guest memory system, since it allows the thread to block
96 * while memory is reclaimed, and won't take pages from emergency
97 * low-memory pools.
98 */
99#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
100
101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD 1024
103
104/* Maximum number of refused pages we accumulate during inflation cycle */
105#define VMW_BALLOON_MAX_REFUSED 16
106
107/*
108 * Hypervisor communication port definitions.
109 */
110#define VMW_BALLOON_HV_PORT 0x5670
111#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
112#define VMW_BALLOON_PROTOCOL_VERSION 2
113#define VMW_BALLOON_GUEST_ID 1 /* Linux */
114
115#define VMW_BALLOON_CMD_START 0
116#define VMW_BALLOON_CMD_GET_TARGET 1
117#define VMW_BALLOON_CMD_LOCK 2
118#define VMW_BALLOON_CMD_UNLOCK 3
119#define VMW_BALLOON_CMD_GUEST_ID 4
120
121/* error codes */
122#define VMW_BALLOON_SUCCESS 0
123#define VMW_BALLOON_FAILURE -1
124#define VMW_BALLOON_ERROR_CMD_INVALID 1
125#define VMW_BALLOON_ERROR_PPN_INVALID 2
126#define VMW_BALLOON_ERROR_PPN_LOCKED 3
127#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
128#define VMW_BALLOON_ERROR_PPN_PINNED 5
129#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
130#define VMW_BALLOON_ERROR_RESET 7
131#define VMW_BALLOON_ERROR_BUSY 8
132
133#define VMWARE_BALLOON_CMD(cmd, data, result) \
134({ \
135 unsigned long __stat, __dummy1, __dummy2; \
136 __asm__ __volatile__ ("inl %%dx" : \
137 "=a"(__stat), \
138 "=c"(__dummy1), \
139 "=d"(__dummy2), \
140 "=b"(result) : \
141 "0"(VMW_BALLOON_HV_MAGIC), \
142 "1"(VMW_BALLOON_CMD_##cmd), \
143 "2"(VMW_BALLOON_HV_PORT), \
144 "3"(data) : \
145 "memory"); \
146 result &= -1UL; \
147 __stat & -1UL; \
148})
149
150#ifdef CONFIG_DEBUG_FS
151struct vmballoon_stats {
152 unsigned int timer;
153
154 /* allocation statistics */
155 unsigned int alloc;
156 unsigned int alloc_fail;
157 unsigned int sleep_alloc;
158 unsigned int sleep_alloc_fail;
159 unsigned int refused_alloc;
160 unsigned int refused_free;
161 unsigned int free;
162
163 /* monitor operations */
164 unsigned int lock;
165 unsigned int lock_fail;
166 unsigned int unlock;
167 unsigned int unlock_fail;
168 unsigned int target;
169 unsigned int target_fail;
170 unsigned int start;
171 unsigned int start_fail;
172 unsigned int guest_type;
173 unsigned int guest_type_fail;
174};
175
176#define STATS_INC(stat) (stat)++
177#else
178#define STATS_INC(stat)
179#endif
180
181struct vmballoon {
182
183 /* list of reserved physical pages */
184 struct list_head pages;
185
186 /* transient list of non-balloonable pages */
187 struct list_head refused_pages;
188 unsigned int n_refused_pages;
189
190 /* balloon size in pages */
191 unsigned int size;
192 unsigned int target;
193
194 /* reset flag */
195 bool reset_required;
196
197 /* adjustment rates (pages per second) */
198 unsigned int rate_alloc;
199 unsigned int rate_free;
200
201 /* slowdown page allocations for next few cycles */
202 unsigned int slow_allocation_cycles;
203
204#ifdef CONFIG_DEBUG_FS
205 /* statistics */
206 struct vmballoon_stats stats;
207
208 /* debugfs file exporting statistics */
209 struct dentry *dbg_entry;
210#endif
211
212 struct sysinfo sysinfo;
213
214 struct delayed_work dwork;
215};
216
217static struct vmballoon balloon;
218
219/*
220 * Send "start" command to the host, communicating supported version
221 * of the protocol.
222 */
223static bool vmballoon_send_start(struct vmballoon *b)
224{
225 unsigned long status, dummy;
226
227 STATS_INC(b->stats.start);
228
229 status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
230 if (status == VMW_BALLOON_SUCCESS)
231 return true;
232
233 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
234 STATS_INC(b->stats.start_fail);
235 return false;
236}
237
238static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
239{
240 switch (status) {
241 case VMW_BALLOON_SUCCESS:
242 return true;
243
244 case VMW_BALLOON_ERROR_RESET:
245 b->reset_required = true;
246 /* fall through */
247
248 default:
249 return false;
250 }
251}
252
253/*
254 * Communicate guest type to the host so that it can adjust ballooning
255 * algorithm to the one most appropriate for the guest. This command
256 * is normally issued after sending "start" command and is part of
257 * standard reset sequence.
258 */
259static bool vmballoon_send_guest_id(struct vmballoon *b)
260{
261 unsigned long status, dummy;
262
263 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
264
265 STATS_INC(b->stats.guest_type);
266
267 if (vmballoon_check_status(b, status))
268 return true;
269
270 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
271 STATS_INC(b->stats.guest_type_fail);
272 return false;
273}
274
275/*
276 * Retrieve desired balloon size from the host.
277 */
278static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
279{
280 unsigned long status;
281 unsigned long target;
282 unsigned long limit;
283 u32 limit32;
284
285 /*
286 * si_meminfo() is cheap. Moreover, we want to provide dynamic
287 * max balloon size later. So let us call si_meminfo() every
288 * iteration.
289 */
290 si_meminfo(&b->sysinfo);
291 limit = b->sysinfo.totalram;
292
293 /* Ensure limit fits in 32-bits */
294 limit32 = (u32)limit;
295 if (limit != limit32)
296 return false;
297
298 /* update stats */
299 STATS_INC(b->stats.target);
300
301 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
302 if (vmballoon_check_status(b, status)) {
303 *new_target = target;
304 return true;
305 }
306
307 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
308 STATS_INC(b->stats.target_fail);
309 return false;
310}
311
312/*
313 * Notify the host about allocated page so that host can use it without
314 * fear that guest will need it. Host may reject some pages, we need to
315 * check the return value and maybe submit a different page.
316 */
317static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
318 unsigned int *hv_status)
319{
320 unsigned long status, dummy;
321 u32 pfn32;
322
323 pfn32 = (u32)pfn;
324 if (pfn32 != pfn)
325 return -1;
326
327 STATS_INC(b->stats.lock);
328
329 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
330 if (vmballoon_check_status(b, status))
331 return 0;
332
333 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
334 STATS_INC(b->stats.lock_fail);
335 return 1;
336}
337
338/*
339 * Notify the host that guest intends to release given page back into
340 * the pool of available (to the guest) pages.
341 */
342static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
343{
344 unsigned long status, dummy;
345 u32 pfn32;
346
347 pfn32 = (u32)pfn;
348 if (pfn32 != pfn)
349 return false;
350
351 STATS_INC(b->stats.unlock);
352
353 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
354 if (vmballoon_check_status(b, status))
355 return true;
356
357 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
358 STATS_INC(b->stats.unlock_fail);
359 return false;
360}
361
362/*
363 * Quickly release all pages allocated for the balloon. This function is
364 * called when host decides to "reset" balloon for one reason or another.
365 * Unlike normal "deflate" we do not (shall not) notify host of the pages
366 * being released.
367 */
368static void vmballoon_pop(struct vmballoon *b)
369{
370 struct page *page, *next;
371 unsigned int count = 0;
372
373 list_for_each_entry_safe(page, next, &b->pages, lru) {
374 list_del(&page->lru);
375 __free_page(page);
376 STATS_INC(b->stats.free);
377 b->size--;
378
379 if (++count >= b->rate_free) {
380 count = 0;
381 cond_resched();
382 }
383 }
384}
385
386/*
387 * Perform standard reset sequence by popping the balloon (in case it
388 * is not empty) and then restarting protocol. This operation normally
389 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
390 */
391static void vmballoon_reset(struct vmballoon *b)
392{
393 /* free all pages, skipping monitor unlock */
394 vmballoon_pop(b);
395
396 if (vmballoon_send_start(b)) {
397 b->reset_required = false;
398 if (!vmballoon_send_guest_id(b))
399 pr_err("failed to send guest ID to the host\n");
400 }
401}
402
403/*
404 * Allocate (or reserve) a page for the balloon and notify the host. If host
405 * refuses the page put it on "refuse" list and allocate another one until host
406 * is satisfied. "Refused" pages are released at the end of inflation cycle
407 * (when we allocate b->rate_alloc pages).
408 */
409static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
410{
411 struct page *page;
412 gfp_t flags;
413 unsigned int hv_status;
414 int locked;
415 flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
416
417 do {
418 if (!can_sleep)
419 STATS_INC(b->stats.alloc);
420 else
421 STATS_INC(b->stats.sleep_alloc);
422
423 page = alloc_page(flags);
424 if (!page) {
425 if (!can_sleep)
426 STATS_INC(b->stats.alloc_fail);
427 else
428 STATS_INC(b->stats.sleep_alloc_fail);
429 return -ENOMEM;
430 }
431
432 /* inform monitor */
433 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
434 if (locked > 0) {
435 STATS_INC(b->stats.refused_alloc);
436
437 if (hv_status == VMW_BALLOON_ERROR_RESET ||
438 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
439 __free_page(page);
440 return -EIO;
441 }
442
443 /*
444 * Place page on the list of non-balloonable pages
445 * and retry allocation, unless we already accumulated
446 * too many of them, in which case take a breather.
447 */
448 list_add(&page->lru, &b->refused_pages);
449 if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
450 return -EIO;
451 }
452 } while (locked != 0);
453
454 /* track allocated page */
455 list_add(&page->lru, &b->pages);
456
457 /* update balloon size */
458 b->size++;
459
460 return 0;
461}
462
463/*
464 * Release the page allocated for the balloon. Note that we first notify
465 * the host so it can make sure the page will be available for the guest
466 * to use, if needed.
467 */
468static int vmballoon_release_page(struct vmballoon *b, struct page *page)
469{
470 if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
471 return -EIO;
472
473 list_del(&page->lru);
474
475 /* deallocate page */
476 __free_page(page);
477 STATS_INC(b->stats.free);
478
479 /* update balloon size */
480 b->size--;
481
482 return 0;
483}
484
485/*
486 * Release pages that were allocated while attempting to inflate the
487 * balloon but were refused by the host for one reason or another.
488 */
489static void vmballoon_release_refused_pages(struct vmballoon *b)
490{
491 struct page *page, *next;
492
493 list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
494 list_del(&page->lru);
495 __free_page(page);
496 STATS_INC(b->stats.refused_free);
497 }
498
499 b->n_refused_pages = 0;
500}
501
502/*
503 * Inflate the balloon towards its target size. Note that we try to limit
504 * the rate of allocation to make sure we are not choking the rest of the
505 * system.
506 */
507static void vmballoon_inflate(struct vmballoon *b)
508{
509 unsigned int goal;
510 unsigned int rate;
511 unsigned int i;
512 unsigned int allocations = 0;
513 int error = 0;
514 bool alloc_can_sleep = false;
515
516 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
517
518 /*
519 * First try NOSLEEP page allocations to inflate balloon.
520 *
521 * If we do not throttle nosleep allocations, we can drain all
522 * free pages in the guest quickly (if the balloon target is high).
523 * As a side-effect, draining free pages helps to inform (force)
524 * the guest to start swapping if balloon target is not met yet,
525 * which is a desired behavior. However, balloon driver can consume
526 * all available CPU cycles if too many pages are allocated in a
527 * second. Therefore, we throttle nosleep allocations even when
528 * the guest is not under memory pressure. OTOH, if we have already
529 * predicted that the guest is under memory pressure, then we
530 * slowdown page allocations considerably.
531 */
532
533 goal = b->target - b->size;
534 /*
535 * Start with no sleep allocation rate which may be higher
536 * than sleeping allocation rate.
537 */
538 rate = b->slow_allocation_cycles ?
539 b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
540
541 pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
542 __func__, goal, rate, b->rate_alloc);
543
544 for (i = 0; i < goal; i++) {
545
546 error = vmballoon_reserve_page(b, alloc_can_sleep);
547 if (error) {
548 if (error != -ENOMEM) {
549 /*
550 * Not a page allocation failure, stop this
551 * cycle. Maybe we'll get new target from
552 * the host soon.
553 */
554 break;
555 }
556
557 if (alloc_can_sleep) {
558 /*
559 * CANSLEEP page allocation failed, so guest
560 * is under severe memory pressure. Quickly
561 * decrease allocation rate.
562 */
563 b->rate_alloc = max(b->rate_alloc / 2,
564 VMW_BALLOON_RATE_ALLOC_MIN);
565 break;
566 }
567
568 /*
569 * NOSLEEP page allocation failed, so the guest is
570 * under memory pressure. Let us slow down page
571 * allocations for next few cycles so that the guest
572 * gets out of memory pressure. Also, if we already
573 * allocated b->rate_alloc pages, let's pause,
574 * otherwise switch to sleeping allocations.
575 */
576 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
577
578 if (i >= b->rate_alloc)
579 break;
580
581 alloc_can_sleep = true;
582 /* Lower rate for sleeping allocations. */
583 rate = b->rate_alloc;
584 }
585
586 if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
587 cond_resched();
588 allocations = 0;
589 }
590
591 if (i >= rate) {
592 /* We allocated enough pages, let's take a break. */
593 break;
594 }
595 }
596
597 /*
598 * We reached our goal without failures so try increasing
599 * allocation rate.
600 */
601 if (error == 0 && i >= b->rate_alloc) {
602 unsigned int mult = i / b->rate_alloc;
603
604 b->rate_alloc =
605 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
606 VMW_BALLOON_RATE_ALLOC_MAX);
607 }
608
609 vmballoon_release_refused_pages(b);
610}
611
612/*
613 * Decrease the size of the balloon allowing guest to use more memory.
614 */
615static void vmballoon_deflate(struct vmballoon *b)
616{
617 struct page *page, *next;
618 unsigned int i = 0;
619 unsigned int goal;
620 int error;
621
622 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
623
624 /* limit deallocation rate */
625 goal = min(b->size - b->target, b->rate_free);
626
627 pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
628
629 /* free pages to reach target */
630 list_for_each_entry_safe(page, next, &b->pages, lru) {
631 error = vmballoon_release_page(b, page);
632 if (error) {
633 /* quickly decrease rate in case of error */
634 b->rate_free = max(b->rate_free / 2,
635 VMW_BALLOON_RATE_FREE_MIN);
636 return;
637 }
638
639 if (++i >= goal)
640 break;
641 }
642
643 /* slowly increase rate if there were no errors */
644 b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
645 VMW_BALLOON_RATE_FREE_MAX);
646}
647
648/*
649 * Balloon work function: reset protocol, if needed, get the new size and
650 * adjust balloon as needed. Repeat in 1 sec.
651 */
652static void vmballoon_work(struct work_struct *work)
653{
654 struct delayed_work *dwork = to_delayed_work(work);
655 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
656 unsigned int target;
657
658 STATS_INC(b->stats.timer);
659
660 if (b->reset_required)
661 vmballoon_reset(b);
662
663 if (b->slow_allocation_cycles > 0)
664 b->slow_allocation_cycles--;
665
666 if (vmballoon_send_get_target(b, &target)) {
667 /* update target, adjust size */
668 b->target = target;
669
670 if (b->size < target)
671 vmballoon_inflate(b);
672 else if (b->size > target)
673 vmballoon_deflate(b);
674 }
675
676 /*
677 * We are using a freezable workqueue so that balloon operations are
678 * stopped while the system transitions to/from sleep/hibernation.
679 */
680 queue_delayed_work(system_freezable_wq,
681 dwork, round_jiffies_relative(HZ));
682}
683
684/*
685 * DEBUGFS Interface
686 */
687#ifdef CONFIG_DEBUG_FS
688
689static int vmballoon_debug_show(struct seq_file *f, void *offset)
690{
691 struct vmballoon *b = f->private;
692 struct vmballoon_stats *stats = &b->stats;
693
694 /* format size info */
695 seq_printf(f,
696 "target: %8d pages\n"
697 "current: %8d pages\n",
698 b->target, b->size);
699
700 /* format rate info */
701 seq_printf(f,
702 "rateNoSleepAlloc: %8d pages/sec\n"
703 "rateSleepAlloc: %8d pages/sec\n"
704 "rateFree: %8d pages/sec\n",
705 VMW_BALLOON_NOSLEEP_ALLOC_MAX,
706 b->rate_alloc, b->rate_free);
707
708 seq_printf(f,
709 "\n"
710 "timer: %8u\n"
711 "start: %8u (%4u failed)\n"
712 "guestType: %8u (%4u failed)\n"
713 "lock: %8u (%4u failed)\n"
714 "unlock: %8u (%4u failed)\n"
715 "target: %8u (%4u failed)\n"
716 "primNoSleepAlloc: %8u (%4u failed)\n"
717 "primCanSleepAlloc: %8u (%4u failed)\n"
718 "primFree: %8u\n"
719 "errAlloc: %8u\n"
720 "errFree: %8u\n",
721 stats->timer,
722 stats->start, stats->start_fail,
723 stats->guest_type, stats->guest_type_fail,
724 stats->lock, stats->lock_fail,
725 stats->unlock, stats->unlock_fail,
726 stats->target, stats->target_fail,
727 stats->alloc, stats->alloc_fail,
728 stats->sleep_alloc, stats->sleep_alloc_fail,
729 stats->free,
730 stats->refused_alloc, stats->refused_free);
731
732 return 0;
733}
734
735static int vmballoon_debug_open(struct inode *inode, struct file *file)
736{
737 return single_open(file, vmballoon_debug_show, inode->i_private);
738}
739
740static const struct file_operations vmballoon_debug_fops = {
741 .owner = THIS_MODULE,
742 .open = vmballoon_debug_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
748static int __init vmballoon_debugfs_init(struct vmballoon *b)
749{
750 int error;
751
752 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
753 &vmballoon_debug_fops);
754 if (IS_ERR(b->dbg_entry)) {
755 error = PTR_ERR(b->dbg_entry);
756 pr_err("failed to create debugfs entry, error: %d\n", error);
757 return error;
758 }
759
760 return 0;
761}
762
763static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
764{
765 debugfs_remove(b->dbg_entry);
766}
767
768#else
769
770static inline int vmballoon_debugfs_init(struct vmballoon *b)
771{
772 return 0;
773}
774
775static inline void vmballoon_debugfs_exit(struct vmballoon *b)
776{
777}
778
779#endif /* CONFIG_DEBUG_FS */
780
781static int __init vmballoon_init(void)
782{
783 int error;
784
785 /*
786 * Check if we are running on VMware's hypervisor and bail out
787 * if we are not.
788 */
789 if (x86_hyper != &x86_hyper_vmware)
790 return -ENODEV;
791
792 INIT_LIST_HEAD(&balloon.pages);
793 INIT_LIST_HEAD(&balloon.refused_pages);
794
795 /* initialize rates */
796 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
797 balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
798
799 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
800
801 /*
802 * Start balloon.
803 */
804 if (!vmballoon_send_start(&balloon)) {
805 pr_err("failed to send start command to the host\n");
806 return -EIO;
807 }
808
809 if (!vmballoon_send_guest_id(&balloon)) {
810 pr_err("failed to send guest ID to the host\n");
811 return -EIO;
812 }
813
814 error = vmballoon_debugfs_init(&balloon);
815 if (error)
816 return error;
817
818 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
819
820 return 0;
821}
822module_init(vmballoon_init);
823
824static void __exit vmballoon_exit(void)
825{
826 cancel_delayed_work_sync(&balloon.dwork);
827
828 vmballoon_debugfs_exit(&balloon);
829
830 /*
831 * Deallocate all reserved memory, and reset connection with monitor.
832 * Reset connection before deallocating memory to avoid potential for
833 * additional spurious resets from guest touching deallocated pages.
834 */
835 vmballoon_send_start(&balloon);
836 vmballoon_pop(&balloon);
837}
838module_exit(vmballoon_exit);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * VMware Balloon driver.
4 *
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6 *
7 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/vmalloc.h>
23#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/workqueue.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
28#include <linux/rwsem.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/mount.h>
32#include <linux/pseudo_fs.h>
33#include <linux/balloon_compaction.h>
34#include <linux/vmw_vmci_defs.h>
35#include <linux/vmw_vmci_api.h>
36#include <asm/hypervisor.h>
37
38MODULE_AUTHOR("VMware, Inc.");
39MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
40MODULE_ALIAS("dmi:*:svnVMware*:*");
41MODULE_ALIAS("vmware_vmmemctl");
42MODULE_LICENSE("GPL");
43
44static bool __read_mostly vmwballoon_shrinker_enable;
45module_param(vmwballoon_shrinker_enable, bool, 0444);
46MODULE_PARM_DESC(vmwballoon_shrinker_enable,
47 "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
48
49/* Delay in seconds after shrink before inflation. */
50#define VMBALLOON_SHRINK_DELAY (5)
51
52/* Maximum number of refused pages we accumulate during inflation cycle */
53#define VMW_BALLOON_MAX_REFUSED 16
54
55/* Magic number for the balloon mount-point */
56#define BALLOON_VMW_MAGIC 0x0ba11007
57
58/*
59 * Hypervisor communication port definitions.
60 */
61#define VMW_BALLOON_HV_PORT 0x5670
62#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
63#define VMW_BALLOON_GUEST_ID 1 /* Linux */
64
65enum vmwballoon_capabilities {
66 /*
67 * Bit 0 is reserved and not associated to any capability.
68 */
69 VMW_BALLOON_BASIC_CMDS = (1 << 1),
70 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
71 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
72 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
73 VMW_BALLOON_64_BIT_TARGET = (1 << 5)
74};
75
76#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
77 | VMW_BALLOON_BATCHED_CMDS \
78 | VMW_BALLOON_BATCHED_2M_CMDS \
79 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
80
81#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
82
83/*
84 * 64-bit targets are only supported in 64-bit
85 */
86#ifdef CONFIG_64BIT
87#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
88 | VMW_BALLOON_64_BIT_TARGET)
89#else
90#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
91#endif
92
93enum vmballoon_page_size_type {
94 VMW_BALLOON_4K_PAGE,
95 VMW_BALLOON_2M_PAGE,
96 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
97};
98
99#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
100
101static const char * const vmballoon_page_size_names[] = {
102 [VMW_BALLOON_4K_PAGE] = "4k",
103 [VMW_BALLOON_2M_PAGE] = "2M"
104};
105
106enum vmballoon_op {
107 VMW_BALLOON_INFLATE,
108 VMW_BALLOON_DEFLATE
109};
110
111enum vmballoon_op_stat_type {
112 VMW_BALLOON_OP_STAT,
113 VMW_BALLOON_OP_FAIL_STAT
114};
115
116#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
117
118/**
119 * enum vmballoon_cmd_type - backdoor commands.
120 *
121 * Availability of the commands is as followed:
122 *
123 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
124 * %VMW_BALLOON_CMD_GUEST_ID are always available.
125 *
126 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
127 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
128 *
129 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
130 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
131 * are available.
132 *
133 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
134 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
135 * are supported.
136 *
137 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
138 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
139 *
140 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
141 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
142 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
143 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
144 * to be deflated from the balloon.
145 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
146 * runs in the VM.
147 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
148 * ballooned pages (up to 512).
149 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
150 * pages that are about to be deflated from the
151 * balloon (up to 512).
152 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
153 * for 2MB pages.
154 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
155 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
156 * pages.
157 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
158 * that would be invoked when the balloon
159 * size changes.
160 * @VMW_BALLOON_CMD_LAST: Value of the last command.
161 */
162enum vmballoon_cmd_type {
163 VMW_BALLOON_CMD_START,
164 VMW_BALLOON_CMD_GET_TARGET,
165 VMW_BALLOON_CMD_LOCK,
166 VMW_BALLOON_CMD_UNLOCK,
167 VMW_BALLOON_CMD_GUEST_ID,
168 /* No command 5 */
169 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
170 VMW_BALLOON_CMD_BATCHED_UNLOCK,
171 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
172 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
173 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
174 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
175};
176
177#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
178
179enum vmballoon_error_codes {
180 VMW_BALLOON_SUCCESS,
181 VMW_BALLOON_ERROR_CMD_INVALID,
182 VMW_BALLOON_ERROR_PPN_INVALID,
183 VMW_BALLOON_ERROR_PPN_LOCKED,
184 VMW_BALLOON_ERROR_PPN_UNLOCKED,
185 VMW_BALLOON_ERROR_PPN_PINNED,
186 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
187 VMW_BALLOON_ERROR_RESET,
188 VMW_BALLOON_ERROR_BUSY
189};
190
191#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
192
193#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
194 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
195 (1UL << VMW_BALLOON_CMD_LOCK) | \
196 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
197 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
198 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
199 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
200 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
201
202static const char * const vmballoon_cmd_names[] = {
203 [VMW_BALLOON_CMD_START] = "start",
204 [VMW_BALLOON_CMD_GET_TARGET] = "target",
205 [VMW_BALLOON_CMD_LOCK] = "lock",
206 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
207 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
208 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
209 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
210 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
211 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
212 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
213};
214
215enum vmballoon_stat_page {
216 VMW_BALLOON_PAGE_STAT_ALLOC,
217 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
218 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
219 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
220 VMW_BALLOON_PAGE_STAT_FREE,
221 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
222};
223
224#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
225
226enum vmballoon_stat_general {
227 VMW_BALLOON_STAT_TIMER,
228 VMW_BALLOON_STAT_DOORBELL,
229 VMW_BALLOON_STAT_RESET,
230 VMW_BALLOON_STAT_SHRINK,
231 VMW_BALLOON_STAT_SHRINK_FREE,
232 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
233};
234
235#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
236
237static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
238static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
239
240struct vmballoon_ctl {
241 struct list_head pages;
242 struct list_head refused_pages;
243 struct list_head prealloc_pages;
244 unsigned int n_refused_pages;
245 unsigned int n_pages;
246 enum vmballoon_page_size_type page_size;
247 enum vmballoon_op op;
248};
249
250/**
251 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
252 *
253 * @status: the status of the operation, which is written by the hypervisor.
254 * @reserved: reserved for future use. Must be set to zero.
255 * @pfn: the physical frame number of the page to be locked or unlocked.
256 */
257struct vmballoon_batch_entry {
258 u64 status : 5;
259 u64 reserved : PAGE_SHIFT - 5;
260 u64 pfn : 52;
261} __packed;
262
263struct vmballoon {
264 /**
265 * @max_page_size: maximum supported page size for ballooning.
266 *
267 * Protected by @conf_sem
268 */
269 enum vmballoon_page_size_type max_page_size;
270
271 /**
272 * @size: balloon actual size in basic page size (frames).
273 *
274 * While we currently do not support size which is bigger than 32-bit,
275 * in preparation for future support, use 64-bits.
276 */
277 atomic64_t size;
278
279 /**
280 * @target: balloon target size in basic page size (frames).
281 *
282 * We do not protect the target under the assumption that setting the
283 * value is always done through a single write. If this assumption ever
284 * breaks, we would have to use X_ONCE for accesses, and suffer the less
285 * optimized code. Although we may read stale target value if multiple
286 * accesses happen at once, the performance impact should be minor.
287 */
288 unsigned long target;
289
290 /**
291 * @reset_required: reset flag
292 *
293 * Setting this flag may introduce races, but the code is expected to
294 * handle them gracefully. In the worst case, another operation will
295 * fail as reset did not take place. Clearing the flag is done while
296 * holding @conf_sem for write.
297 */
298 bool reset_required;
299
300 /**
301 * @capabilities: hypervisor balloon capabilities.
302 *
303 * Protected by @conf_sem.
304 */
305 unsigned long capabilities;
306
307 /**
308 * @batch_page: pointer to communication batch page.
309 *
310 * When batching is used, batch_page points to a page, which holds up to
311 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
312 */
313 struct vmballoon_batch_entry *batch_page;
314
315 /**
316 * @batch_max_pages: maximum pages that can be locked/unlocked.
317 *
318 * Indicates the number of pages that the hypervisor can lock or unlock
319 * at once, according to whether batching is enabled. If batching is
320 * disabled, only a single page can be locked/unlock on each operation.
321 *
322 * Protected by @conf_sem.
323 */
324 unsigned int batch_max_pages;
325
326 /**
327 * @page: page to be locked/unlocked by the hypervisor
328 *
329 * @page is only used when batching is disabled and a single page is
330 * reclaimed on each iteration.
331 *
332 * Protected by @comm_lock.
333 */
334 struct page *page;
335
336 /**
337 * @shrink_timeout: timeout until the next inflation.
338 *
339 * After an shrink event, indicates the time in jiffies after which
340 * inflation is allowed again. Can be written concurrently with reads,
341 * so must use READ_ONCE/WRITE_ONCE when accessing.
342 */
343 unsigned long shrink_timeout;
344
345 /* statistics */
346 struct vmballoon_stats *stats;
347
348#ifdef CONFIG_DEBUG_FS
349 /* debugfs file exporting statistics */
350 struct dentry *dbg_entry;
351#endif
352
353 /**
354 * @b_dev_info: balloon device information descriptor.
355 */
356 struct balloon_dev_info b_dev_info;
357
358 struct delayed_work dwork;
359
360 /**
361 * @huge_pages - list of the inflated 2MB pages.
362 *
363 * Protected by @b_dev_info.pages_lock .
364 */
365 struct list_head huge_pages;
366
367 /**
368 * @vmci_doorbell.
369 *
370 * Protected by @conf_sem.
371 */
372 struct vmci_handle vmci_doorbell;
373
374 /**
375 * @conf_sem: semaphore to protect the configuration and the statistics.
376 */
377 struct rw_semaphore conf_sem;
378
379 /**
380 * @comm_lock: lock to protect the communication with the host.
381 *
382 * Lock ordering: @conf_sem -> @comm_lock .
383 */
384 spinlock_t comm_lock;
385
386 /**
387 * @shrinker: shrinker interface that is used to avoid over-inflation.
388 */
389 struct shrinker shrinker;
390
391 /**
392 * @shrinker_registered: whether the shrinker was registered.
393 *
394 * The shrinker interface does not handle gracefully the removal of
395 * shrinker that was not registered before. This indication allows to
396 * simplify the unregistration process.
397 */
398 bool shrinker_registered;
399};
400
401static struct vmballoon balloon;
402
403struct vmballoon_stats {
404 /* timer / doorbell operations */
405 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
406
407 /* allocation statistics for huge and small pages */
408 atomic64_t
409 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
410
411 /* Monitor operations: total operations, and failures */
412 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
413};
414
415static inline bool is_vmballoon_stats_on(void)
416{
417 return IS_ENABLED(CONFIG_DEBUG_FS) &&
418 static_branch_unlikely(&balloon_stat_enabled);
419}
420
421static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
422 enum vmballoon_op_stat_type type)
423{
424 if (is_vmballoon_stats_on())
425 atomic64_inc(&b->stats->ops[op][type]);
426}
427
428static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
429 enum vmballoon_stat_general stat)
430{
431 if (is_vmballoon_stats_on())
432 atomic64_inc(&b->stats->general_stat[stat]);
433}
434
435static inline void vmballoon_stats_gen_add(struct vmballoon *b,
436 enum vmballoon_stat_general stat,
437 unsigned int val)
438{
439 if (is_vmballoon_stats_on())
440 atomic64_add(val, &b->stats->general_stat[stat]);
441}
442
443static inline void vmballoon_stats_page_inc(struct vmballoon *b,
444 enum vmballoon_stat_page stat,
445 enum vmballoon_page_size_type size)
446{
447 if (is_vmballoon_stats_on())
448 atomic64_inc(&b->stats->page_stat[stat][size]);
449}
450
451static inline void vmballoon_stats_page_add(struct vmballoon *b,
452 enum vmballoon_stat_page stat,
453 enum vmballoon_page_size_type size,
454 unsigned int val)
455{
456 if (is_vmballoon_stats_on())
457 atomic64_add(val, &b->stats->page_stat[stat][size]);
458}
459
460static inline unsigned long
461__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
462 unsigned long arg2, unsigned long *result)
463{
464 unsigned long status, dummy1, dummy2, dummy3, local_result;
465
466 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
467
468 asm volatile ("inl %%dx" :
469 "=a"(status),
470 "=c"(dummy1),
471 "=d"(dummy2),
472 "=b"(local_result),
473 "=S"(dummy3) :
474 "0"(VMW_BALLOON_HV_MAGIC),
475 "1"(cmd),
476 "2"(VMW_BALLOON_HV_PORT),
477 "3"(arg1),
478 "4"(arg2) :
479 "memory");
480
481 /* update the result if needed */
482 if (result)
483 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
484 local_result;
485
486 /* update target when applicable */
487 if (status == VMW_BALLOON_SUCCESS &&
488 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
489 WRITE_ONCE(b->target, local_result);
490
491 if (status != VMW_BALLOON_SUCCESS &&
492 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
493 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
494 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
495 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
496 status);
497 }
498
499 /* mark reset required accordingly */
500 if (status == VMW_BALLOON_ERROR_RESET)
501 b->reset_required = true;
502
503 return status;
504}
505
506static __always_inline unsigned long
507vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
508 unsigned long arg2)
509{
510 unsigned long dummy;
511
512 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
513}
514
515/*
516 * Send "start" command to the host, communicating supported version
517 * of the protocol.
518 */
519static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
520{
521 unsigned long status, capabilities;
522
523 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
524 &capabilities);
525
526 switch (status) {
527 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
528 b->capabilities = capabilities;
529 break;
530 case VMW_BALLOON_SUCCESS:
531 b->capabilities = VMW_BALLOON_BASIC_CMDS;
532 break;
533 default:
534 return -EIO;
535 }
536
537 /*
538 * 2MB pages are only supported with batching. If batching is for some
539 * reason disabled, do not use 2MB pages, since otherwise the legacy
540 * mechanism is used with 2MB pages, causing a failure.
541 */
542 b->max_page_size = VMW_BALLOON_4K_PAGE;
543 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
544 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
545 b->max_page_size = VMW_BALLOON_2M_PAGE;
546
547
548 return 0;
549}
550
551/**
552 * vmballoon_send_guest_id - communicate guest type to the host.
553 *
554 * @b: pointer to the balloon.
555 *
556 * Communicate guest type to the host so that it can adjust ballooning
557 * algorithm to the one most appropriate for the guest. This command
558 * is normally issued after sending "start" command and is part of
559 * standard reset sequence.
560 *
561 * Return: zero on success or appropriate error code.
562 */
563static int vmballoon_send_guest_id(struct vmballoon *b)
564{
565 unsigned long status;
566
567 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
568 VMW_BALLOON_GUEST_ID, 0);
569
570 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
571}
572
573/**
574 * vmballoon_page_order() - return the order of the page
575 * @page_size: the size of the page.
576 *
577 * Return: the allocation order.
578 */
579static inline
580unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
581{
582 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
583}
584
585/**
586 * vmballoon_page_in_frames() - returns the number of frames in a page.
587 * @page_size: the size of the page.
588 *
589 * Return: the number of 4k frames.
590 */
591static inline unsigned int
592vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
593{
594 return 1 << vmballoon_page_order(page_size);
595}
596
597/**
598 * vmballoon_mark_page_offline() - mark a page as offline
599 * @page: pointer for the page.
600 * @page_size: the size of the page.
601 */
602static void
603vmballoon_mark_page_offline(struct page *page,
604 enum vmballoon_page_size_type page_size)
605{
606 int i;
607
608 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
609 __SetPageOffline(page + i);
610}
611
612/**
613 * vmballoon_mark_page_online() - mark a page as online
614 * @page: pointer for the page.
615 * @page_size: the size of the page.
616 */
617static void
618vmballoon_mark_page_online(struct page *page,
619 enum vmballoon_page_size_type page_size)
620{
621 int i;
622
623 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
624 __ClearPageOffline(page + i);
625}
626
627/**
628 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
629 *
630 * @b: pointer to the balloon.
631 *
632 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
633 * by the host-guest protocol and EIO if an error occurred in communicating with
634 * the host.
635 */
636static int vmballoon_send_get_target(struct vmballoon *b)
637{
638 unsigned long status;
639 unsigned long limit;
640
641 limit = totalram_pages();
642
643 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
644 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
645 limit != (u32)limit)
646 return -EINVAL;
647
648 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
649
650 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
651}
652
653/**
654 * vmballoon_alloc_page_list - allocates a list of pages.
655 *
656 * @b: pointer to the balloon.
657 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
658 * @req_n_pages: the number of requested pages.
659 *
660 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
661 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
662 *
663 * Return: zero on success or error code otherwise.
664 */
665static int vmballoon_alloc_page_list(struct vmballoon *b,
666 struct vmballoon_ctl *ctl,
667 unsigned int req_n_pages)
668{
669 struct page *page;
670 unsigned int i;
671
672 for (i = 0; i < req_n_pages; i++) {
673 /*
674 * First check if we happen to have pages that were allocated
675 * before. This happens when 2MB page rejected during inflation
676 * by the hypervisor, and then split into 4KB pages.
677 */
678 if (!list_empty(&ctl->prealloc_pages)) {
679 page = list_first_entry(&ctl->prealloc_pages,
680 struct page, lru);
681 list_del(&page->lru);
682 } else {
683 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
684 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
685 __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
686 else
687 page = balloon_page_alloc();
688
689 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
690 ctl->page_size);
691 }
692
693 if (page) {
694 /* Success. Add the page to the list and continue. */
695 list_add(&page->lru, &ctl->pages);
696 continue;
697 }
698
699 /* Allocation failed. Update statistics and stop. */
700 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
701 ctl->page_size);
702 break;
703 }
704
705 ctl->n_pages = i;
706
707 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
708}
709
710/**
711 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
712 *
713 * @b: pointer for %struct vmballoon.
714 * @page: pointer for the page whose result should be handled.
715 * @page_size: size of the page.
716 * @status: status of the operation as provided by the hypervisor.
717 */
718static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
719 enum vmballoon_page_size_type page_size,
720 unsigned long status)
721{
722 /* On success do nothing. The page is already on the balloon list. */
723 if (likely(status == VMW_BALLOON_SUCCESS))
724 return 0;
725
726 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
727 page_to_pfn(page), status,
728 vmballoon_page_size_names[page_size]);
729
730 /* Error occurred */
731 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
732 page_size);
733
734 return -EIO;
735}
736
737/**
738 * vmballoon_status_page - returns the status of (un)lock operation
739 *
740 * @b: pointer to the balloon.
741 * @idx: index for the page for which the operation is performed.
742 * @p: pointer to where the page struct is returned.
743 *
744 * Following a lock or unlock operation, returns the status of the operation for
745 * an individual page. Provides the page that the operation was performed on on
746 * the @page argument.
747 *
748 * Returns: The status of a lock or unlock operation for an individual page.
749 */
750static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
751 struct page **p)
752{
753 if (static_branch_likely(&vmw_balloon_batching)) {
754 /* batching mode */
755 *p = pfn_to_page(b->batch_page[idx].pfn);
756 return b->batch_page[idx].status;
757 }
758
759 /* non-batching mode */
760 *p = b->page;
761
762 /*
763 * If a failure occurs, the indication will be provided in the status
764 * of the entire operation, which is considered before the individual
765 * page status. So for non-batching mode, the indication is always of
766 * success.
767 */
768 return VMW_BALLOON_SUCCESS;
769}
770
771/**
772 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
773 * @b: pointer to the balloon.
774 * @num_pages: number of inflated/deflated pages.
775 * @page_size: size of the page.
776 * @op: the type of operation (lock or unlock).
777 *
778 * Notify the host about page(s) that were ballooned (or removed from the
779 * balloon) so that host can use it without fear that guest will need it (or
780 * stop using them since the VM does). Host may reject some pages, we need to
781 * check the return value and maybe submit a different page. The pages that are
782 * inflated/deflated are pointed by @b->page.
783 *
784 * Return: result as provided by the hypervisor.
785 */
786static unsigned long vmballoon_lock_op(struct vmballoon *b,
787 unsigned int num_pages,
788 enum vmballoon_page_size_type page_size,
789 enum vmballoon_op op)
790{
791 unsigned long cmd, pfn;
792
793 lockdep_assert_held(&b->comm_lock);
794
795 if (static_branch_likely(&vmw_balloon_batching)) {
796 if (op == VMW_BALLOON_INFLATE)
797 cmd = page_size == VMW_BALLOON_2M_PAGE ?
798 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
799 VMW_BALLOON_CMD_BATCHED_LOCK;
800 else
801 cmd = page_size == VMW_BALLOON_2M_PAGE ?
802 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
803 VMW_BALLOON_CMD_BATCHED_UNLOCK;
804
805 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
806 } else {
807 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
808 VMW_BALLOON_CMD_UNLOCK;
809 pfn = page_to_pfn(b->page);
810
811 /* In non-batching mode, PFNs must fit in 32-bit */
812 if (unlikely(pfn != (u32)pfn))
813 return VMW_BALLOON_ERROR_PPN_INVALID;
814 }
815
816 return vmballoon_cmd(b, cmd, pfn, num_pages);
817}
818
819/**
820 * vmballoon_add_page - adds a page towards lock/unlock operation.
821 *
822 * @b: pointer to the balloon.
823 * @idx: index of the page to be ballooned in this batch.
824 * @p: pointer to the page that is about to be ballooned.
825 *
826 * Adds the page to be ballooned. Must be called while holding @comm_lock.
827 */
828static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
829 struct page *p)
830{
831 lockdep_assert_held(&b->comm_lock);
832
833 if (static_branch_likely(&vmw_balloon_batching))
834 b->batch_page[idx] = (struct vmballoon_batch_entry)
835 { .pfn = page_to_pfn(p) };
836 else
837 b->page = p;
838}
839
840/**
841 * vmballoon_lock - lock or unlock a batch of pages.
842 *
843 * @b: pointer to the balloon.
844 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
845 *
846 * Notifies the host of about ballooned pages (after inflation or deflation,
847 * according to @ctl). If the host rejects the page put it on the
848 * @ctl refuse list. These refused page are then released when moving to the
849 * next size of pages.
850 *
851 * Note that we neither free any @page here nor put them back on the ballooned
852 * pages list. Instead we queue it for later processing. We do that for several
853 * reasons. First, we do not want to free the page under the lock. Second, it
854 * allows us to unify the handling of lock and unlock. In the inflate case, the
855 * caller will check if there are too many refused pages and release them.
856 * Although it is not identical to the past behavior, it should not affect
857 * performance.
858 */
859static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
860{
861 unsigned long batch_status;
862 struct page *page;
863 unsigned int i, num_pages;
864
865 num_pages = ctl->n_pages;
866 if (num_pages == 0)
867 return 0;
868
869 /* communication with the host is done under the communication lock */
870 spin_lock(&b->comm_lock);
871
872 i = 0;
873 list_for_each_entry(page, &ctl->pages, lru)
874 vmballoon_add_page(b, i++, page);
875
876 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
877 ctl->op);
878
879 /*
880 * Iterate over the pages in the provided list. Since we are changing
881 * @ctl->n_pages we are saving the original value in @num_pages and
882 * use this value to bound the loop.
883 */
884 for (i = 0; i < num_pages; i++) {
885 unsigned long status;
886
887 status = vmballoon_status_page(b, i, &page);
888
889 /*
890 * Failure of the whole batch overrides a single operation
891 * results.
892 */
893 if (batch_status != VMW_BALLOON_SUCCESS)
894 status = batch_status;
895
896 /* Continue if no error happened */
897 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
898 status))
899 continue;
900
901 /*
902 * Error happened. Move the pages to the refused list and update
903 * the pages number.
904 */
905 list_move(&page->lru, &ctl->refused_pages);
906 ctl->n_pages--;
907 ctl->n_refused_pages++;
908 }
909
910 spin_unlock(&b->comm_lock);
911
912 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
913}
914
915/**
916 * vmballoon_release_page_list() - Releases a page list
917 *
918 * @page_list: list of pages to release.
919 * @n_pages: pointer to the number of pages.
920 * @page_size: whether the pages in the list are 2MB (or else 4KB).
921 *
922 * Releases the list of pages and zeros the number of pages.
923 */
924static void vmballoon_release_page_list(struct list_head *page_list,
925 int *n_pages,
926 enum vmballoon_page_size_type page_size)
927{
928 struct page *page, *tmp;
929
930 list_for_each_entry_safe(page, tmp, page_list, lru) {
931 list_del(&page->lru);
932 __free_pages(page, vmballoon_page_order(page_size));
933 }
934
935 if (n_pages)
936 *n_pages = 0;
937}
938
939
940/*
941 * Release pages that were allocated while attempting to inflate the
942 * balloon but were refused by the host for one reason or another.
943 */
944static void vmballoon_release_refused_pages(struct vmballoon *b,
945 struct vmballoon_ctl *ctl)
946{
947 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
948 ctl->page_size);
949
950 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
951 ctl->page_size);
952}
953
954/**
955 * vmballoon_change - retrieve the required balloon change
956 *
957 * @b: pointer for the balloon.
958 *
959 * Return: the required change for the balloon size. A positive number
960 * indicates inflation, a negative number indicates a deflation.
961 */
962static int64_t vmballoon_change(struct vmballoon *b)
963{
964 int64_t size, target;
965
966 size = atomic64_read(&b->size);
967 target = READ_ONCE(b->target);
968
969 /*
970 * We must cast first because of int sizes
971 * Otherwise we might get huge positives instead of negatives
972 */
973
974 if (b->reset_required)
975 return 0;
976
977 /* consider a 2MB slack on deflate, unless the balloon is emptied */
978 if (target < size && target != 0 &&
979 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
980 return 0;
981
982 /* If an out-of-memory recently occurred, inflation is disallowed. */
983 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
984 return 0;
985
986 return target - size;
987}
988
989/**
990 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
991 *
992 * @b: pointer to balloon.
993 * @pages: list of pages to enqueue.
994 * @n_pages: pointer to number of pages in list. The value is zeroed.
995 * @page_size: whether the pages are 2MB or 4KB pages.
996 *
997 * Enqueues the provides list of pages in the ballooned page list, clears the
998 * list and zeroes the number of pages that was provided.
999 */
1000static void vmballoon_enqueue_page_list(struct vmballoon *b,
1001 struct list_head *pages,
1002 unsigned int *n_pages,
1003 enum vmballoon_page_size_type page_size)
1004{
1005 unsigned long flags;
1006 struct page *page;
1007
1008 if (page_size == VMW_BALLOON_4K_PAGE) {
1009 balloon_page_list_enqueue(&b->b_dev_info, pages);
1010 } else {
1011 /*
1012 * Keep the huge pages in a local list which is not available
1013 * for the balloon compaction mechanism.
1014 */
1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016
1017 list_for_each_entry(page, pages, lru) {
1018 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1019 }
1020
1021 list_splice_init(pages, &b->huge_pages);
1022 __count_vm_events(BALLOON_INFLATE, *n_pages *
1023 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1024 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1025 }
1026
1027 *n_pages = 0;
1028}
1029
1030/**
1031 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1032 *
1033 * @b: pointer to balloon.
1034 * @pages: list of pages to enqueue.
1035 * @n_pages: pointer to number of pages in list. The value is zeroed.
1036 * @page_size: whether the pages are 2MB or 4KB pages.
1037 * @n_req_pages: the number of requested pages.
1038 *
1039 * Dequeues the number of requested pages from the balloon for deflation. The
1040 * number of dequeued pages may be lower, if not enough pages in the requested
1041 * size are available.
1042 */
1043static void vmballoon_dequeue_page_list(struct vmballoon *b,
1044 struct list_head *pages,
1045 unsigned int *n_pages,
1046 enum vmballoon_page_size_type page_size,
1047 unsigned int n_req_pages)
1048{
1049 struct page *page, *tmp;
1050 unsigned int i = 0;
1051 unsigned long flags;
1052
1053 /* In the case of 4k pages, use the compaction infrastructure */
1054 if (page_size == VMW_BALLOON_4K_PAGE) {
1055 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1056 n_req_pages);
1057 return;
1058 }
1059
1060 /* 2MB pages */
1061 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1062 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1063 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1064
1065 list_move(&page->lru, pages);
1066 if (++i == n_req_pages)
1067 break;
1068 }
1069
1070 __count_vm_events(BALLOON_DEFLATE,
1071 i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1072 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1073 *n_pages = i;
1074}
1075
1076/**
1077 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1078 *
1079 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1080 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1081 * then being refused. To prevent this case, this function splits the refused
1082 * pages into 4KB pages and adds them into @prealloc_pages list.
1083 *
1084 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1085 */
1086static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1087{
1088 struct page *page, *tmp;
1089 unsigned int i, order;
1090
1091 order = vmballoon_page_order(ctl->page_size);
1092
1093 list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1094 list_del(&page->lru);
1095 split_page(page, order);
1096 for (i = 0; i < (1 << order); i++)
1097 list_add(&page[i].lru, &ctl->prealloc_pages);
1098 }
1099 ctl->n_refused_pages = 0;
1100}
1101
1102/**
1103 * vmballoon_inflate() - Inflate the balloon towards its target size.
1104 *
1105 * @b: pointer to the balloon.
1106 */
1107static void vmballoon_inflate(struct vmballoon *b)
1108{
1109 int64_t to_inflate_frames;
1110 struct vmballoon_ctl ctl = {
1111 .pages = LIST_HEAD_INIT(ctl.pages),
1112 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1113 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1114 .page_size = b->max_page_size,
1115 .op = VMW_BALLOON_INFLATE
1116 };
1117
1118 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1119 unsigned int to_inflate_pages, page_in_frames;
1120 int alloc_error, lock_error = 0;
1121
1122 VM_BUG_ON(!list_empty(&ctl.pages));
1123 VM_BUG_ON(ctl.n_pages != 0);
1124
1125 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1126
1127 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1128 DIV_ROUND_UP_ULL(to_inflate_frames,
1129 page_in_frames));
1130
1131 /* Start by allocating */
1132 alloc_error = vmballoon_alloc_page_list(b, &ctl,
1133 to_inflate_pages);
1134
1135 /* Actually lock the pages by telling the hypervisor */
1136 lock_error = vmballoon_lock(b, &ctl);
1137
1138 /*
1139 * If an error indicates that something serious went wrong,
1140 * stop the inflation.
1141 */
1142 if (lock_error)
1143 break;
1144
1145 /* Update the balloon size */
1146 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1147
1148 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1149 ctl.page_size);
1150
1151 /*
1152 * If allocation failed or the number of refused pages exceeds
1153 * the maximum allowed, move to the next page size.
1154 */
1155 if (alloc_error ||
1156 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1157 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1158 break;
1159
1160 /*
1161 * Split the refused pages to 4k. This will also empty
1162 * the refused pages list.
1163 */
1164 vmballoon_split_refused_pages(&ctl);
1165 ctl.page_size--;
1166 }
1167
1168 cond_resched();
1169 }
1170
1171 /*
1172 * Release pages that were allocated while attempting to inflate the
1173 * balloon but were refused by the host for one reason or another,
1174 * and update the statistics.
1175 */
1176 if (ctl.n_refused_pages != 0)
1177 vmballoon_release_refused_pages(b, &ctl);
1178
1179 vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1180}
1181
1182/**
1183 * vmballoon_deflate() - Decrease the size of the balloon.
1184 *
1185 * @b: pointer to the balloon
1186 * @n_frames: the number of frames to deflate. If zero, automatically
1187 * calculated according to the target size.
1188 * @coordinated: whether to coordinate with the host
1189 *
1190 * Decrease the size of the balloon allowing guest to use more memory.
1191 *
1192 * Return: The number of deflated frames (i.e., basic page size units)
1193 */
1194static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1195 bool coordinated)
1196{
1197 unsigned long deflated_frames = 0;
1198 unsigned long tried_frames = 0;
1199 struct vmballoon_ctl ctl = {
1200 .pages = LIST_HEAD_INIT(ctl.pages),
1201 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1202 .page_size = VMW_BALLOON_4K_PAGE,
1203 .op = VMW_BALLOON_DEFLATE
1204 };
1205
1206 /* free pages to reach target */
1207 while (true) {
1208 unsigned int to_deflate_pages, n_unlocked_frames;
1209 unsigned int page_in_frames;
1210 int64_t to_deflate_frames;
1211 bool deflated_all;
1212
1213 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1214
1215 VM_BUG_ON(!list_empty(&ctl.pages));
1216 VM_BUG_ON(ctl.n_pages);
1217 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1218 VM_BUG_ON(ctl.n_refused_pages);
1219
1220 /*
1221 * If we were requested a specific number of frames, we try to
1222 * deflate this number of frames. Otherwise, deflation is
1223 * performed according to the target and balloon size.
1224 */
1225 to_deflate_frames = n_frames ? n_frames - tried_frames :
1226 -vmballoon_change(b);
1227
1228 /* break if no work to do */
1229 if (to_deflate_frames <= 0)
1230 break;
1231
1232 /*
1233 * Calculate the number of frames based on current page size,
1234 * but limit the deflated frames to a single chunk
1235 */
1236 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1237 DIV_ROUND_UP_ULL(to_deflate_frames,
1238 page_in_frames));
1239
1240 /* First take the pages from the balloon pages. */
1241 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1242 ctl.page_size, to_deflate_pages);
1243
1244 /*
1245 * Before pages are moving to the refused list, count their
1246 * frames as frames that we tried to deflate.
1247 */
1248 tried_frames += ctl.n_pages * page_in_frames;
1249
1250 /*
1251 * Unlock the pages by communicating with the hypervisor if the
1252 * communication is coordinated (i.e., not pop). We ignore the
1253 * return code. Instead we check if all the pages we manage to
1254 * unlock all the pages. If we failed, we will move to the next
1255 * page size, and would eventually try again later.
1256 */
1257 if (coordinated)
1258 vmballoon_lock(b, &ctl);
1259
1260 /*
1261 * Check if we deflated enough. We will move to the next page
1262 * size if we did not manage to do so. This calculation takes
1263 * place now, as once the pages are released, the number of
1264 * pages is zeroed.
1265 */
1266 deflated_all = (ctl.n_pages == to_deflate_pages);
1267
1268 /* Update local and global counters */
1269 n_unlocked_frames = ctl.n_pages * page_in_frames;
1270 atomic64_sub(n_unlocked_frames, &b->size);
1271 deflated_frames += n_unlocked_frames;
1272
1273 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1274 ctl.page_size, ctl.n_pages);
1275
1276 /* free the ballooned pages */
1277 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1278 ctl.page_size);
1279
1280 /* Return the refused pages to the ballooned list. */
1281 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1282 &ctl.n_refused_pages,
1283 ctl.page_size);
1284
1285 /* If we failed to unlock all the pages, move to next size. */
1286 if (!deflated_all) {
1287 if (ctl.page_size == b->max_page_size)
1288 break;
1289 ctl.page_size++;
1290 }
1291
1292 cond_resched();
1293 }
1294
1295 return deflated_frames;
1296}
1297
1298/**
1299 * vmballoon_deinit_batching - disables batching mode.
1300 *
1301 * @b: pointer to &struct vmballoon.
1302 *
1303 * Disables batching, by deallocating the page for communication with the
1304 * hypervisor and disabling the static key to indicate that batching is off.
1305 */
1306static void vmballoon_deinit_batching(struct vmballoon *b)
1307{
1308 free_page((unsigned long)b->batch_page);
1309 b->batch_page = NULL;
1310 static_branch_disable(&vmw_balloon_batching);
1311 b->batch_max_pages = 1;
1312}
1313
1314/**
1315 * vmballoon_init_batching - enable batching mode.
1316 *
1317 * @b: pointer to &struct vmballoon.
1318 *
1319 * Enables batching, by allocating a page for communication with the hypervisor
1320 * and enabling the static_key to use batching.
1321 *
1322 * Return: zero on success or an appropriate error-code.
1323 */
1324static int vmballoon_init_batching(struct vmballoon *b)
1325{
1326 struct page *page;
1327
1328 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1329 if (!page)
1330 return -ENOMEM;
1331
1332 b->batch_page = page_address(page);
1333 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1334
1335 static_branch_enable(&vmw_balloon_batching);
1336
1337 return 0;
1338}
1339
1340/*
1341 * Receive notification and resize balloon
1342 */
1343static void vmballoon_doorbell(void *client_data)
1344{
1345 struct vmballoon *b = client_data;
1346
1347 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1348
1349 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1350}
1351
1352/*
1353 * Clean up vmci doorbell
1354 */
1355static void vmballoon_vmci_cleanup(struct vmballoon *b)
1356{
1357 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1358 VMCI_INVALID_ID, VMCI_INVALID_ID);
1359
1360 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1361 vmci_doorbell_destroy(b->vmci_doorbell);
1362 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1363 }
1364}
1365
1366/**
1367 * vmballoon_vmci_init - Initialize vmci doorbell.
1368 *
1369 * @b: pointer to the balloon.
1370 *
1371 * Return: zero on success or when wakeup command not supported. Error-code
1372 * otherwise.
1373 *
1374 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1375 */
1376static int vmballoon_vmci_init(struct vmballoon *b)
1377{
1378 unsigned long error;
1379
1380 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1381 return 0;
1382
1383 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1384 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1385 vmballoon_doorbell, b);
1386
1387 if (error != VMCI_SUCCESS)
1388 goto fail;
1389
1390 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1391 b->vmci_doorbell.context,
1392 b->vmci_doorbell.resource, NULL);
1393
1394 if (error != VMW_BALLOON_SUCCESS)
1395 goto fail;
1396
1397 return 0;
1398fail:
1399 vmballoon_vmci_cleanup(b);
1400 return -EIO;
1401}
1402
1403/**
1404 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1405 *
1406 * @b: pointer to the balloon.
1407 *
1408 * This function is called when host decides to "reset" balloon for one reason
1409 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1410 * pages being released.
1411 */
1412static void vmballoon_pop(struct vmballoon *b)
1413{
1414 unsigned long size;
1415
1416 while ((size = atomic64_read(&b->size)))
1417 vmballoon_deflate(b, size, false);
1418}
1419
1420/*
1421 * Perform standard reset sequence by popping the balloon (in case it
1422 * is not empty) and then restarting protocol. This operation normally
1423 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1424 */
1425static void vmballoon_reset(struct vmballoon *b)
1426{
1427 int error;
1428
1429 down_write(&b->conf_sem);
1430
1431 vmballoon_vmci_cleanup(b);
1432
1433 /* free all pages, skipping monitor unlock */
1434 vmballoon_pop(b);
1435
1436 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1437 goto unlock;
1438
1439 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1440 if (vmballoon_init_batching(b)) {
1441 /*
1442 * We failed to initialize batching, inform the monitor
1443 * about it by sending a null capability.
1444 *
1445 * The guest will retry in one second.
1446 */
1447 vmballoon_send_start(b, 0);
1448 goto unlock;
1449 }
1450 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1451 vmballoon_deinit_batching(b);
1452 }
1453
1454 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1455 b->reset_required = false;
1456
1457 error = vmballoon_vmci_init(b);
1458 if (error)
1459 pr_err("failed to initialize vmci doorbell\n");
1460
1461 if (vmballoon_send_guest_id(b))
1462 pr_err("failed to send guest ID to the host\n");
1463
1464unlock:
1465 up_write(&b->conf_sem);
1466}
1467
1468/**
1469 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1470 *
1471 * @work: pointer to the &work_struct which is provided by the workqueue.
1472 *
1473 * Resets the protocol if needed, gets the new size and adjusts balloon as
1474 * needed. Repeat in 1 sec.
1475 */
1476static void vmballoon_work(struct work_struct *work)
1477{
1478 struct delayed_work *dwork = to_delayed_work(work);
1479 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1480 int64_t change = 0;
1481
1482 if (b->reset_required)
1483 vmballoon_reset(b);
1484
1485 down_read(&b->conf_sem);
1486
1487 /*
1488 * Update the stats while holding the semaphore to ensure that
1489 * @stats_enabled is consistent with whether the stats are actually
1490 * enabled
1491 */
1492 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1493
1494 if (!vmballoon_send_get_target(b))
1495 change = vmballoon_change(b);
1496
1497 if (change != 0) {
1498 pr_debug("%s - size: %llu, target %lu\n", __func__,
1499 atomic64_read(&b->size), READ_ONCE(b->target));
1500
1501 if (change > 0)
1502 vmballoon_inflate(b);
1503 else /* (change < 0) */
1504 vmballoon_deflate(b, 0, true);
1505 }
1506
1507 up_read(&b->conf_sem);
1508
1509 /*
1510 * We are using a freezable workqueue so that balloon operations are
1511 * stopped while the system transitions to/from sleep/hibernation.
1512 */
1513 queue_delayed_work(system_freezable_wq,
1514 dwork, round_jiffies_relative(HZ));
1515
1516}
1517
1518/**
1519 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1520 * @shrinker: pointer to the balloon shrinker.
1521 * @sc: page reclaim information.
1522 *
1523 * Returns: number of pages that were freed during deflation.
1524 */
1525static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1526 struct shrink_control *sc)
1527{
1528 struct vmballoon *b = &balloon;
1529 unsigned long deflated_frames;
1530
1531 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1532
1533 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1534
1535 /*
1536 * If the lock is also contended for read, we cannot easily reclaim and
1537 * we bail out.
1538 */
1539 if (!down_read_trylock(&b->conf_sem))
1540 return 0;
1541
1542 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1543
1544 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1545 deflated_frames);
1546
1547 /*
1548 * Delay future inflation for some time to mitigate the situations in
1549 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1550 * the access is asynchronous.
1551 */
1552 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1553
1554 up_read(&b->conf_sem);
1555
1556 return deflated_frames;
1557}
1558
1559/**
1560 * vmballoon_shrinker_count() - return the number of ballooned pages.
1561 * @shrinker: pointer to the balloon shrinker.
1562 * @sc: page reclaim information.
1563 *
1564 * Returns: number of 4k pages that are allocated for the balloon and can
1565 * therefore be reclaimed under pressure.
1566 */
1567static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1568 struct shrink_control *sc)
1569{
1570 struct vmballoon *b = &balloon;
1571
1572 return atomic64_read(&b->size);
1573}
1574
1575static void vmballoon_unregister_shrinker(struct vmballoon *b)
1576{
1577 if (b->shrinker_registered)
1578 unregister_shrinker(&b->shrinker);
1579 b->shrinker_registered = false;
1580}
1581
1582static int vmballoon_register_shrinker(struct vmballoon *b)
1583{
1584 int r;
1585
1586 /* Do nothing if the shrinker is not enabled */
1587 if (!vmwballoon_shrinker_enable)
1588 return 0;
1589
1590 b->shrinker.scan_objects = vmballoon_shrinker_scan;
1591 b->shrinker.count_objects = vmballoon_shrinker_count;
1592 b->shrinker.seeks = DEFAULT_SEEKS;
1593
1594 r = register_shrinker(&b->shrinker);
1595
1596 if (r == 0)
1597 b->shrinker_registered = true;
1598
1599 return r;
1600}
1601
1602/*
1603 * DEBUGFS Interface
1604 */
1605#ifdef CONFIG_DEBUG_FS
1606
1607static const char * const vmballoon_stat_page_names[] = {
1608 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1609 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1610 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1611 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1612 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1613};
1614
1615static const char * const vmballoon_stat_names[] = {
1616 [VMW_BALLOON_STAT_TIMER] = "timer",
1617 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1618 [VMW_BALLOON_STAT_RESET] = "reset",
1619 [VMW_BALLOON_STAT_SHRINK] = "shrink",
1620 [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
1621};
1622
1623static int vmballoon_enable_stats(struct vmballoon *b)
1624{
1625 int r = 0;
1626
1627 down_write(&b->conf_sem);
1628
1629 /* did we somehow race with another reader which enabled stats? */
1630 if (b->stats)
1631 goto out;
1632
1633 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1634
1635 if (!b->stats) {
1636 /* allocation failed */
1637 r = -ENOMEM;
1638 goto out;
1639 }
1640 static_key_enable(&balloon_stat_enabled.key);
1641out:
1642 up_write(&b->conf_sem);
1643 return r;
1644}
1645
1646/**
1647 * vmballoon_debug_show - shows statistics of balloon operations.
1648 * @f: pointer to the &struct seq_file.
1649 * @offset: ignored.
1650 *
1651 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1652 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1653 * we only collect statistics after the first time the counters are read.
1654 *
1655 * Return: zero on success or an error code.
1656 */
1657static int vmballoon_debug_show(struct seq_file *f, void *offset)
1658{
1659 struct vmballoon *b = f->private;
1660 int i, j;
1661
1662 /* enables stats if they are disabled */
1663 if (!b->stats) {
1664 int r = vmballoon_enable_stats(b);
1665
1666 if (r)
1667 return r;
1668 }
1669
1670 /* format capabilities info */
1671 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1672 VMW_BALLOON_CAPABILITIES);
1673 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1674 seq_printf(f, "%-22s: %16s\n", "is resetting",
1675 b->reset_required ? "y" : "n");
1676
1677 /* format size info */
1678 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1679 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1680
1681 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1682 if (vmballoon_cmd_names[i] == NULL)
1683 continue;
1684
1685 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1686 vmballoon_cmd_names[i],
1687 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1688 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1689 }
1690
1691 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1692 seq_printf(f, "%-22s: %16llu\n",
1693 vmballoon_stat_names[i],
1694 atomic64_read(&b->stats->general_stat[i]));
1695
1696 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1697 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1698 seq_printf(f, "%-18s(%s): %16llu\n",
1699 vmballoon_stat_page_names[i],
1700 vmballoon_page_size_names[j],
1701 atomic64_read(&b->stats->page_stat[i][j]));
1702 }
1703
1704 return 0;
1705}
1706
1707DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1708
1709static void __init vmballoon_debugfs_init(struct vmballoon *b)
1710{
1711 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1712 &vmballoon_debug_fops);
1713}
1714
1715static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1716{
1717 static_key_disable(&balloon_stat_enabled.key);
1718 debugfs_remove(b->dbg_entry);
1719 kfree(b->stats);
1720 b->stats = NULL;
1721}
1722
1723#else
1724
1725static inline void vmballoon_debugfs_init(struct vmballoon *b)
1726{
1727}
1728
1729static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1730{
1731}
1732
1733#endif /* CONFIG_DEBUG_FS */
1734
1735
1736#ifdef CONFIG_BALLOON_COMPACTION
1737
1738static int vmballoon_init_fs_context(struct fs_context *fc)
1739{
1740 return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
1741}
1742
1743static struct file_system_type vmballoon_fs = {
1744 .name = "balloon-vmware",
1745 .init_fs_context = vmballoon_init_fs_context,
1746 .kill_sb = kill_anon_super,
1747};
1748
1749static struct vfsmount *vmballoon_mnt;
1750
1751/**
1752 * vmballoon_migratepage() - migrates a balloon page.
1753 * @b_dev_info: balloon device information descriptor.
1754 * @newpage: the page to which @page should be migrated.
1755 * @page: a ballooned page that should be migrated.
1756 * @mode: migration mode, ignored.
1757 *
1758 * This function is really open-coded, but that is according to the interface
1759 * that balloon_compaction provides.
1760 *
1761 * Return: zero on success, -EAGAIN when migration cannot be performed
1762 * momentarily, and -EBUSY if migration failed and should be retried
1763 * with that specific page.
1764 */
1765static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1766 struct page *newpage, struct page *page,
1767 enum migrate_mode mode)
1768{
1769 unsigned long status, flags;
1770 struct vmballoon *b;
1771 int ret;
1772
1773 b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1774
1775 /*
1776 * If the semaphore is taken, there is ongoing configuration change
1777 * (i.e., balloon reset), so try again.
1778 */
1779 if (!down_read_trylock(&b->conf_sem))
1780 return -EAGAIN;
1781
1782 spin_lock(&b->comm_lock);
1783 /*
1784 * We must start by deflating and not inflating, as otherwise the
1785 * hypervisor may tell us that it has enough memory and the new page is
1786 * not needed. Since the old page is isolated, we cannot use the list
1787 * interface to unlock it, as the LRU field is used for isolation.
1788 * Instead, we use the native interface directly.
1789 */
1790 vmballoon_add_page(b, 0, page);
1791 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1792 VMW_BALLOON_DEFLATE);
1793
1794 if (status == VMW_BALLOON_SUCCESS)
1795 status = vmballoon_status_page(b, 0, &page);
1796
1797 /*
1798 * If a failure happened, let the migration mechanism know that it
1799 * should not retry.
1800 */
1801 if (status != VMW_BALLOON_SUCCESS) {
1802 spin_unlock(&b->comm_lock);
1803 ret = -EBUSY;
1804 goto out_unlock;
1805 }
1806
1807 /*
1808 * The page is isolated, so it is safe to delete it without holding
1809 * @pages_lock . We keep holding @comm_lock since we will need it in a
1810 * second.
1811 */
1812 balloon_page_delete(page);
1813
1814 put_page(page);
1815
1816 /* Inflate */
1817 vmballoon_add_page(b, 0, newpage);
1818 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1819 VMW_BALLOON_INFLATE);
1820
1821 if (status == VMW_BALLOON_SUCCESS)
1822 status = vmballoon_status_page(b, 0, &newpage);
1823
1824 spin_unlock(&b->comm_lock);
1825
1826 if (status != VMW_BALLOON_SUCCESS) {
1827 /*
1828 * A failure happened. While we can deflate the page we just
1829 * inflated, this deflation can also encounter an error. Instead
1830 * we will decrease the size of the balloon to reflect the
1831 * change and report failure.
1832 */
1833 atomic64_dec(&b->size);
1834 ret = -EBUSY;
1835 } else {
1836 /*
1837 * Success. Take a reference for the page, and we will add it to
1838 * the list after acquiring the lock.
1839 */
1840 get_page(newpage);
1841 ret = MIGRATEPAGE_SUCCESS;
1842 }
1843
1844 /* Update the balloon list under the @pages_lock */
1845 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1846
1847 /*
1848 * On inflation success, we already took a reference for the @newpage.
1849 * If we succeed just insert it to the list and update the statistics
1850 * under the lock.
1851 */
1852 if (ret == MIGRATEPAGE_SUCCESS) {
1853 balloon_page_insert(&b->b_dev_info, newpage);
1854 __count_vm_event(BALLOON_MIGRATE);
1855 }
1856
1857 /*
1858 * We deflated successfully, so regardless to the inflation success, we
1859 * need to reduce the number of isolated_pages.
1860 */
1861 b->b_dev_info.isolated_pages--;
1862 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1863
1864out_unlock:
1865 up_read(&b->conf_sem);
1866 return ret;
1867}
1868
1869/**
1870 * vmballoon_compaction_deinit() - removes compaction related data.
1871 *
1872 * @b: pointer to the balloon.
1873 */
1874static void vmballoon_compaction_deinit(struct vmballoon *b)
1875{
1876 if (!IS_ERR(b->b_dev_info.inode))
1877 iput(b->b_dev_info.inode);
1878
1879 b->b_dev_info.inode = NULL;
1880 kern_unmount(vmballoon_mnt);
1881 vmballoon_mnt = NULL;
1882}
1883
1884/**
1885 * vmballoon_compaction_init() - initialized compaction for the balloon.
1886 *
1887 * @b: pointer to the balloon.
1888 *
1889 * If during the initialization a failure occurred, this function does not
1890 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1891 * case.
1892 *
1893 * Return: zero on success or error code on failure.
1894 */
1895static __init int vmballoon_compaction_init(struct vmballoon *b)
1896{
1897 vmballoon_mnt = kern_mount(&vmballoon_fs);
1898 if (IS_ERR(vmballoon_mnt))
1899 return PTR_ERR(vmballoon_mnt);
1900
1901 b->b_dev_info.migratepage = vmballoon_migratepage;
1902 b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
1903
1904 if (IS_ERR(b->b_dev_info.inode))
1905 return PTR_ERR(b->b_dev_info.inode);
1906
1907 b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
1908 return 0;
1909}
1910
1911#else /* CONFIG_BALLOON_COMPACTION */
1912
1913static void vmballoon_compaction_deinit(struct vmballoon *b)
1914{
1915}
1916
1917static int vmballoon_compaction_init(struct vmballoon *b)
1918{
1919 return 0;
1920}
1921
1922#endif /* CONFIG_BALLOON_COMPACTION */
1923
1924static int __init vmballoon_init(void)
1925{
1926 int error;
1927
1928 /*
1929 * Check if we are running on VMware's hypervisor and bail out
1930 * if we are not.
1931 */
1932 if (x86_hyper_type != X86_HYPER_VMWARE)
1933 return -ENODEV;
1934
1935 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1936
1937 error = vmballoon_register_shrinker(&balloon);
1938 if (error)
1939 goto fail;
1940
1941 /*
1942 * Initialization of compaction must be done after the call to
1943 * balloon_devinfo_init() .
1944 */
1945 balloon_devinfo_init(&balloon.b_dev_info);
1946 error = vmballoon_compaction_init(&balloon);
1947 if (error)
1948 goto fail;
1949
1950 INIT_LIST_HEAD(&balloon.huge_pages);
1951 spin_lock_init(&balloon.comm_lock);
1952 init_rwsem(&balloon.conf_sem);
1953 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1954 balloon.batch_page = NULL;
1955 balloon.page = NULL;
1956 balloon.reset_required = true;
1957
1958 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1959
1960 vmballoon_debugfs_init(&balloon);
1961
1962 return 0;
1963fail:
1964 vmballoon_unregister_shrinker(&balloon);
1965 vmballoon_compaction_deinit(&balloon);
1966 return error;
1967}
1968
1969/*
1970 * Using late_initcall() instead of module_init() allows the balloon to use the
1971 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1972 * VMCI is probed only after the balloon is initialized. If the balloon is used
1973 * as a module, late_initcall() is equivalent to module_init().
1974 */
1975late_initcall(vmballoon_init);
1976
1977static void __exit vmballoon_exit(void)
1978{
1979 vmballoon_unregister_shrinker(&balloon);
1980 vmballoon_vmci_cleanup(&balloon);
1981 cancel_delayed_work_sync(&balloon.dwork);
1982
1983 vmballoon_debugfs_exit(&balloon);
1984
1985 /*
1986 * Deallocate all reserved memory, and reset connection with monitor.
1987 * Reset connection before deallocating memory to avoid potential for
1988 * additional spurious resets from guest touching deallocated pages.
1989 */
1990 vmballoon_send_start(&balloon, 0);
1991 vmballoon_pop(&balloon);
1992
1993 /* Only once we popped the balloon, compaction can be deinit */
1994 vmballoon_compaction_deinit(&balloon);
1995}
1996module_exit(vmballoon_exit);