Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * VMware Balloon driver.
4 *
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6 *
7 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/vmalloc.h>
24#include <linux/sched.h>
25#include <linux/module.h>
26#include <linux/workqueue.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29#include <linux/rwsem.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/balloon_compaction.h>
33#include <linux/vmw_vmci_defs.h>
34#include <linux/vmw_vmci_api.h>
35#include <asm/hypervisor.h>
36
37MODULE_AUTHOR("VMware, Inc.");
38MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
39MODULE_ALIAS("dmi:*:svnVMware*:*");
40MODULE_ALIAS("vmware_vmmemctl");
41MODULE_LICENSE("GPL");
42
43static bool __read_mostly vmwballoon_shrinker_enable;
44module_param(vmwballoon_shrinker_enable, bool, 0444);
45MODULE_PARM_DESC(vmwballoon_shrinker_enable,
46 "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
47
48/* Delay in seconds after shrink before inflation. */
49#define VMBALLOON_SHRINK_DELAY (5)
50
51/* Maximum number of refused pages we accumulate during inflation cycle */
52#define VMW_BALLOON_MAX_REFUSED 16
53
54/* Magic number for the balloon mount-point */
55#define BALLOON_VMW_MAGIC 0x0ba11007
56
57/*
58 * Hypervisor communication port definitions.
59 */
60#define VMW_BALLOON_HV_PORT 0x5670
61#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
62#define VMW_BALLOON_GUEST_ID 1 /* Linux */
63
64enum vmwballoon_capabilities {
65 /*
66 * Bit 0 is reserved and not associated to any capability.
67 */
68 VMW_BALLOON_BASIC_CMDS = (1 << 1),
69 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
70 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
71 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
72 VMW_BALLOON_64_BIT_TARGET = (1 << 5)
73};
74
75#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
76 | VMW_BALLOON_BATCHED_CMDS \
77 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
79
80#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
81
82/*
83 * 64-bit targets are only supported in 64-bit
84 */
85#ifdef CONFIG_64BIT
86#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
87 | VMW_BALLOON_64_BIT_TARGET)
88#else
89#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
90#endif
91
92enum vmballoon_page_size_type {
93 VMW_BALLOON_4K_PAGE,
94 VMW_BALLOON_2M_PAGE,
95 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
96};
97
98#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
99
100static const char * const vmballoon_page_size_names[] = {
101 [VMW_BALLOON_4K_PAGE] = "4k",
102 [VMW_BALLOON_2M_PAGE] = "2M"
103};
104
105enum vmballoon_op {
106 VMW_BALLOON_INFLATE,
107 VMW_BALLOON_DEFLATE
108};
109
110enum vmballoon_op_stat_type {
111 VMW_BALLOON_OP_STAT,
112 VMW_BALLOON_OP_FAIL_STAT
113};
114
115#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
116
117/**
118 * enum vmballoon_cmd_type - backdoor commands.
119 *
120 * Availability of the commands is as followed:
121 *
122 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
123 * %VMW_BALLOON_CMD_GUEST_ID are always available.
124 *
125 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
126 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
127 *
128 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
129 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
130 * are available.
131 *
132 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
133 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
134 * are supported.
135 *
136 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
137 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
138 *
139 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
143 * to be deflated from the balloon.
144 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
145 * runs in the VM.
146 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
147 * ballooned pages (up to 512).
148 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
149 * pages that are about to be deflated from the
150 * balloon (up to 512).
151 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
152 * for 2MB pages.
153 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
154 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
155 * pages.
156 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
157 * that would be invoked when the balloon
158 * size changes.
159 * @VMW_BALLOON_CMD_LAST: Value of the last command.
160 */
161enum vmballoon_cmd_type {
162 VMW_BALLOON_CMD_START,
163 VMW_BALLOON_CMD_GET_TARGET,
164 VMW_BALLOON_CMD_LOCK,
165 VMW_BALLOON_CMD_UNLOCK,
166 VMW_BALLOON_CMD_GUEST_ID,
167 /* No command 5 */
168 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
169 VMW_BALLOON_CMD_BATCHED_UNLOCK,
170 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
171 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
172 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
173 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
174};
175
176#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
177
178enum vmballoon_error_codes {
179 VMW_BALLOON_SUCCESS,
180 VMW_BALLOON_ERROR_CMD_INVALID,
181 VMW_BALLOON_ERROR_PPN_INVALID,
182 VMW_BALLOON_ERROR_PPN_LOCKED,
183 VMW_BALLOON_ERROR_PPN_UNLOCKED,
184 VMW_BALLOON_ERROR_PPN_PINNED,
185 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
186 VMW_BALLOON_ERROR_RESET,
187 VMW_BALLOON_ERROR_BUSY
188};
189
190#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
191
192#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
193 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
194 (1UL << VMW_BALLOON_CMD_LOCK) | \
195 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
196 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
197 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
198 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
199 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
200
201static const char * const vmballoon_cmd_names[] = {
202 [VMW_BALLOON_CMD_START] = "start",
203 [VMW_BALLOON_CMD_GET_TARGET] = "target",
204 [VMW_BALLOON_CMD_LOCK] = "lock",
205 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
206 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
207 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
208 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
209 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
210 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
211 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
212};
213
214enum vmballoon_stat_page {
215 VMW_BALLOON_PAGE_STAT_ALLOC,
216 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
217 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
218 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
219 VMW_BALLOON_PAGE_STAT_FREE,
220 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
221};
222
223#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
224
225enum vmballoon_stat_general {
226 VMW_BALLOON_STAT_TIMER,
227 VMW_BALLOON_STAT_DOORBELL,
228 VMW_BALLOON_STAT_RESET,
229 VMW_BALLOON_STAT_SHRINK,
230 VMW_BALLOON_STAT_SHRINK_FREE,
231 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
232};
233
234#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
235
236static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
237static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
238
239struct vmballoon_ctl {
240 struct list_head pages;
241 struct list_head refused_pages;
242 struct list_head prealloc_pages;
243 unsigned int n_refused_pages;
244 unsigned int n_pages;
245 enum vmballoon_page_size_type page_size;
246 enum vmballoon_op op;
247};
248
249/**
250 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
251 *
252 * @status: the status of the operation, which is written by the hypervisor.
253 * @reserved: reserved for future use. Must be set to zero.
254 * @pfn: the physical frame number of the page to be locked or unlocked.
255 */
256struct vmballoon_batch_entry {
257 u64 status : 5;
258 u64 reserved : PAGE_SHIFT - 5;
259 u64 pfn : 52;
260} __packed;
261
262struct vmballoon {
263 /**
264 * @max_page_size: maximum supported page size for ballooning.
265 *
266 * Protected by @conf_sem
267 */
268 enum vmballoon_page_size_type max_page_size;
269
270 /**
271 * @size: balloon actual size in basic page size (frames).
272 *
273 * While we currently do not support size which is bigger than 32-bit,
274 * in preparation for future support, use 64-bits.
275 */
276 atomic64_t size;
277
278 /**
279 * @target: balloon target size in basic page size (frames).
280 *
281 * We do not protect the target under the assumption that setting the
282 * value is always done through a single write. If this assumption ever
283 * breaks, we would have to use X_ONCE for accesses, and suffer the less
284 * optimized code. Although we may read stale target value if multiple
285 * accesses happen at once, the performance impact should be minor.
286 */
287 unsigned long target;
288
289 /**
290 * @reset_required: reset flag
291 *
292 * Setting this flag may introduce races, but the code is expected to
293 * handle them gracefully. In the worst case, another operation will
294 * fail as reset did not take place. Clearing the flag is done while
295 * holding @conf_sem for write.
296 */
297 bool reset_required;
298
299 /**
300 * @capabilities: hypervisor balloon capabilities.
301 *
302 * Protected by @conf_sem.
303 */
304 unsigned long capabilities;
305
306 /**
307 * @batch_page: pointer to communication batch page.
308 *
309 * When batching is used, batch_page points to a page, which holds up to
310 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
311 */
312 struct vmballoon_batch_entry *batch_page;
313
314 /**
315 * @batch_max_pages: maximum pages that can be locked/unlocked.
316 *
317 * Indicates the number of pages that the hypervisor can lock or unlock
318 * at once, according to whether batching is enabled. If batching is
319 * disabled, only a single page can be locked/unlock on each operation.
320 *
321 * Protected by @conf_sem.
322 */
323 unsigned int batch_max_pages;
324
325 /**
326 * @page: page to be locked/unlocked by the hypervisor
327 *
328 * @page is only used when batching is disabled and a single page is
329 * reclaimed on each iteration.
330 *
331 * Protected by @comm_lock.
332 */
333 struct page *page;
334
335 /**
336 * @shrink_timeout: timeout until the next inflation.
337 *
338 * After an shrink event, indicates the time in jiffies after which
339 * inflation is allowed again. Can be written concurrently with reads,
340 * so must use READ_ONCE/WRITE_ONCE when accessing.
341 */
342 unsigned long shrink_timeout;
343
344 /* statistics */
345 struct vmballoon_stats *stats;
346
347 /**
348 * @b_dev_info: balloon device information descriptor.
349 */
350 struct balloon_dev_info b_dev_info;
351
352 struct delayed_work dwork;
353
354 /**
355 * @huge_pages - list of the inflated 2MB pages.
356 *
357 * Protected by @b_dev_info.pages_lock .
358 */
359 struct list_head huge_pages;
360
361 /**
362 * @vmci_doorbell.
363 *
364 * Protected by @conf_sem.
365 */
366 struct vmci_handle vmci_doorbell;
367
368 /**
369 * @conf_sem: semaphore to protect the configuration and the statistics.
370 */
371 struct rw_semaphore conf_sem;
372
373 /**
374 * @comm_lock: lock to protect the communication with the host.
375 *
376 * Lock ordering: @conf_sem -> @comm_lock .
377 */
378 spinlock_t comm_lock;
379
380 /**
381 * @shrinker: shrinker interface that is used to avoid over-inflation.
382 */
383 struct shrinker *shrinker;
384};
385
386static struct vmballoon balloon;
387
388struct vmballoon_stats {
389 /* timer / doorbell operations */
390 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
391
392 /* allocation statistics for huge and small pages */
393 atomic64_t
394 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
395
396 /* Monitor operations: total operations, and failures */
397 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
398};
399
400static inline bool is_vmballoon_stats_on(void)
401{
402 return IS_ENABLED(CONFIG_DEBUG_FS) &&
403 static_branch_unlikely(&balloon_stat_enabled);
404}
405
406static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
407 enum vmballoon_op_stat_type type)
408{
409 if (is_vmballoon_stats_on())
410 atomic64_inc(&b->stats->ops[op][type]);
411}
412
413static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
414 enum vmballoon_stat_general stat)
415{
416 if (is_vmballoon_stats_on())
417 atomic64_inc(&b->stats->general_stat[stat]);
418}
419
420static inline void vmballoon_stats_gen_add(struct vmballoon *b,
421 enum vmballoon_stat_general stat,
422 unsigned int val)
423{
424 if (is_vmballoon_stats_on())
425 atomic64_add(val, &b->stats->general_stat[stat]);
426}
427
428static inline void vmballoon_stats_page_inc(struct vmballoon *b,
429 enum vmballoon_stat_page stat,
430 enum vmballoon_page_size_type size)
431{
432 if (is_vmballoon_stats_on())
433 atomic64_inc(&b->stats->page_stat[stat][size]);
434}
435
436static inline void vmballoon_stats_page_add(struct vmballoon *b,
437 enum vmballoon_stat_page stat,
438 enum vmballoon_page_size_type size,
439 unsigned int val)
440{
441 if (is_vmballoon_stats_on())
442 atomic64_add(val, &b->stats->page_stat[stat][size]);
443}
444
445static inline unsigned long
446__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
447 unsigned long arg2, unsigned long *result)
448{
449 unsigned long status, dummy1, dummy2, dummy3, local_result;
450
451 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
452
453 asm volatile ("inl %%dx" :
454 "=a"(status),
455 "=c"(dummy1),
456 "=d"(dummy2),
457 "=b"(local_result),
458 "=S"(dummy3) :
459 "0"(VMW_BALLOON_HV_MAGIC),
460 "1"(cmd),
461 "2"(VMW_BALLOON_HV_PORT),
462 "3"(arg1),
463 "4"(arg2) :
464 "memory");
465
466 /* update the result if needed */
467 if (result)
468 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
469 local_result;
470
471 /* update target when applicable */
472 if (status == VMW_BALLOON_SUCCESS &&
473 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
474 WRITE_ONCE(b->target, local_result);
475
476 if (status != VMW_BALLOON_SUCCESS &&
477 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
478 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
479 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
480 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
481 status);
482 }
483
484 /* mark reset required accordingly */
485 if (status == VMW_BALLOON_ERROR_RESET)
486 b->reset_required = true;
487
488 return status;
489}
490
491static __always_inline unsigned long
492vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
493 unsigned long arg2)
494{
495 unsigned long dummy;
496
497 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
498}
499
500/*
501 * Send "start" command to the host, communicating supported version
502 * of the protocol.
503 */
504static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
505{
506 unsigned long status, capabilities;
507
508 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
509 &capabilities);
510
511 switch (status) {
512 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
513 b->capabilities = capabilities;
514 break;
515 case VMW_BALLOON_SUCCESS:
516 b->capabilities = VMW_BALLOON_BASIC_CMDS;
517 break;
518 default:
519 return -EIO;
520 }
521
522 /*
523 * 2MB pages are only supported with batching. If batching is for some
524 * reason disabled, do not use 2MB pages, since otherwise the legacy
525 * mechanism is used with 2MB pages, causing a failure.
526 */
527 b->max_page_size = VMW_BALLOON_4K_PAGE;
528 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
529 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
530 b->max_page_size = VMW_BALLOON_2M_PAGE;
531
532
533 return 0;
534}
535
536/**
537 * vmballoon_send_guest_id - communicate guest type to the host.
538 *
539 * @b: pointer to the balloon.
540 *
541 * Communicate guest type to the host so that it can adjust ballooning
542 * algorithm to the one most appropriate for the guest. This command
543 * is normally issued after sending "start" command and is part of
544 * standard reset sequence.
545 *
546 * Return: zero on success or appropriate error code.
547 */
548static int vmballoon_send_guest_id(struct vmballoon *b)
549{
550 unsigned long status;
551
552 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
553 VMW_BALLOON_GUEST_ID, 0);
554
555 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
556}
557
558/**
559 * vmballoon_page_order() - return the order of the page
560 * @page_size: the size of the page.
561 *
562 * Return: the allocation order.
563 */
564static inline
565unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
566{
567 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
568}
569
570/**
571 * vmballoon_page_in_frames() - returns the number of frames in a page.
572 * @page_size: the size of the page.
573 *
574 * Return: the number of 4k frames.
575 */
576static inline unsigned int
577vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
578{
579 return 1 << vmballoon_page_order(page_size);
580}
581
582/**
583 * vmballoon_mark_page_offline() - mark a page as offline
584 * @page: pointer for the page.
585 * @page_size: the size of the page.
586 */
587static void
588vmballoon_mark_page_offline(struct page *page,
589 enum vmballoon_page_size_type page_size)
590{
591 int i;
592
593 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
594 __SetPageOffline(page + i);
595}
596
597/**
598 * vmballoon_mark_page_online() - mark a page as online
599 * @page: pointer for the page.
600 * @page_size: the size of the page.
601 */
602static void
603vmballoon_mark_page_online(struct page *page,
604 enum vmballoon_page_size_type page_size)
605{
606 int i;
607
608 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
609 __ClearPageOffline(page + i);
610}
611
612/**
613 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
614 *
615 * @b: pointer to the balloon.
616 *
617 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
618 * by the host-guest protocol and EIO if an error occurred in communicating with
619 * the host.
620 */
621static int vmballoon_send_get_target(struct vmballoon *b)
622{
623 unsigned long status;
624 unsigned long limit;
625
626 limit = totalram_pages();
627
628 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
629 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
630 limit != (u32)limit)
631 return -EINVAL;
632
633 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
634
635 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
636}
637
638/**
639 * vmballoon_alloc_page_list - allocates a list of pages.
640 *
641 * @b: pointer to the balloon.
642 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
643 * @req_n_pages: the number of requested pages.
644 *
645 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
646 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
647 *
648 * Return: zero on success or error code otherwise.
649 */
650static int vmballoon_alloc_page_list(struct vmballoon *b,
651 struct vmballoon_ctl *ctl,
652 unsigned int req_n_pages)
653{
654 struct page *page;
655 unsigned int i;
656
657 for (i = 0; i < req_n_pages; i++) {
658 /*
659 * First check if we happen to have pages that were allocated
660 * before. This happens when 2MB page rejected during inflation
661 * by the hypervisor, and then split into 4KB pages.
662 */
663 if (!list_empty(&ctl->prealloc_pages)) {
664 page = list_first_entry(&ctl->prealloc_pages,
665 struct page, lru);
666 list_del(&page->lru);
667 } else {
668 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
669 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
670 __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
671 else
672 page = balloon_page_alloc();
673
674 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
675 ctl->page_size);
676 }
677
678 if (page) {
679 /* Success. Add the page to the list and continue. */
680 list_add(&page->lru, &ctl->pages);
681 continue;
682 }
683
684 /* Allocation failed. Update statistics and stop. */
685 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
686 ctl->page_size);
687 break;
688 }
689
690 ctl->n_pages = i;
691
692 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
693}
694
695/**
696 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
697 *
698 * @b: pointer for %struct vmballoon.
699 * @page: pointer for the page whose result should be handled.
700 * @page_size: size of the page.
701 * @status: status of the operation as provided by the hypervisor.
702 */
703static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
704 enum vmballoon_page_size_type page_size,
705 unsigned long status)
706{
707 /* On success do nothing. The page is already on the balloon list. */
708 if (likely(status == VMW_BALLOON_SUCCESS))
709 return 0;
710
711 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
712 page_to_pfn(page), status,
713 vmballoon_page_size_names[page_size]);
714
715 /* Error occurred */
716 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
717 page_size);
718
719 return -EIO;
720}
721
722/**
723 * vmballoon_status_page - returns the status of (un)lock operation
724 *
725 * @b: pointer to the balloon.
726 * @idx: index for the page for which the operation is performed.
727 * @p: pointer to where the page struct is returned.
728 *
729 * Following a lock or unlock operation, returns the status of the operation for
730 * an individual page. Provides the page that the operation was performed on on
731 * the @page argument.
732 *
733 * Returns: The status of a lock or unlock operation for an individual page.
734 */
735static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
736 struct page **p)
737{
738 if (static_branch_likely(&vmw_balloon_batching)) {
739 /* batching mode */
740 *p = pfn_to_page(b->batch_page[idx].pfn);
741 return b->batch_page[idx].status;
742 }
743
744 /* non-batching mode */
745 *p = b->page;
746
747 /*
748 * If a failure occurs, the indication will be provided in the status
749 * of the entire operation, which is considered before the individual
750 * page status. So for non-batching mode, the indication is always of
751 * success.
752 */
753 return VMW_BALLOON_SUCCESS;
754}
755
756/**
757 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
758 * @b: pointer to the balloon.
759 * @num_pages: number of inflated/deflated pages.
760 * @page_size: size of the page.
761 * @op: the type of operation (lock or unlock).
762 *
763 * Notify the host about page(s) that were ballooned (or removed from the
764 * balloon) so that host can use it without fear that guest will need it (or
765 * stop using them since the VM does). Host may reject some pages, we need to
766 * check the return value and maybe submit a different page. The pages that are
767 * inflated/deflated are pointed by @b->page.
768 *
769 * Return: result as provided by the hypervisor.
770 */
771static unsigned long vmballoon_lock_op(struct vmballoon *b,
772 unsigned int num_pages,
773 enum vmballoon_page_size_type page_size,
774 enum vmballoon_op op)
775{
776 unsigned long cmd, pfn;
777
778 lockdep_assert_held(&b->comm_lock);
779
780 if (static_branch_likely(&vmw_balloon_batching)) {
781 if (op == VMW_BALLOON_INFLATE)
782 cmd = page_size == VMW_BALLOON_2M_PAGE ?
783 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
784 VMW_BALLOON_CMD_BATCHED_LOCK;
785 else
786 cmd = page_size == VMW_BALLOON_2M_PAGE ?
787 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
788 VMW_BALLOON_CMD_BATCHED_UNLOCK;
789
790 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
791 } else {
792 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
793 VMW_BALLOON_CMD_UNLOCK;
794 pfn = page_to_pfn(b->page);
795
796 /* In non-batching mode, PFNs must fit in 32-bit */
797 if (unlikely(pfn != (u32)pfn))
798 return VMW_BALLOON_ERROR_PPN_INVALID;
799 }
800
801 return vmballoon_cmd(b, cmd, pfn, num_pages);
802}
803
804/**
805 * vmballoon_add_page - adds a page towards lock/unlock operation.
806 *
807 * @b: pointer to the balloon.
808 * @idx: index of the page to be ballooned in this batch.
809 * @p: pointer to the page that is about to be ballooned.
810 *
811 * Adds the page to be ballooned. Must be called while holding @comm_lock.
812 */
813static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
814 struct page *p)
815{
816 lockdep_assert_held(&b->comm_lock);
817
818 if (static_branch_likely(&vmw_balloon_batching))
819 b->batch_page[idx] = (struct vmballoon_batch_entry)
820 { .pfn = page_to_pfn(p) };
821 else
822 b->page = p;
823}
824
825/**
826 * vmballoon_lock - lock or unlock a batch of pages.
827 *
828 * @b: pointer to the balloon.
829 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
830 *
831 * Notifies the host of about ballooned pages (after inflation or deflation,
832 * according to @ctl). If the host rejects the page put it on the
833 * @ctl refuse list. These refused page are then released when moving to the
834 * next size of pages.
835 *
836 * Note that we neither free any @page here nor put them back on the ballooned
837 * pages list. Instead we queue it for later processing. We do that for several
838 * reasons. First, we do not want to free the page under the lock. Second, it
839 * allows us to unify the handling of lock and unlock. In the inflate case, the
840 * caller will check if there are too many refused pages and release them.
841 * Although it is not identical to the past behavior, it should not affect
842 * performance.
843 */
844static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
845{
846 unsigned long batch_status;
847 struct page *page;
848 unsigned int i, num_pages;
849
850 num_pages = ctl->n_pages;
851 if (num_pages == 0)
852 return 0;
853
854 /* communication with the host is done under the communication lock */
855 spin_lock(&b->comm_lock);
856
857 i = 0;
858 list_for_each_entry(page, &ctl->pages, lru)
859 vmballoon_add_page(b, i++, page);
860
861 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
862 ctl->op);
863
864 /*
865 * Iterate over the pages in the provided list. Since we are changing
866 * @ctl->n_pages we are saving the original value in @num_pages and
867 * use this value to bound the loop.
868 */
869 for (i = 0; i < num_pages; i++) {
870 unsigned long status;
871
872 status = vmballoon_status_page(b, i, &page);
873
874 /*
875 * Failure of the whole batch overrides a single operation
876 * results.
877 */
878 if (batch_status != VMW_BALLOON_SUCCESS)
879 status = batch_status;
880
881 /* Continue if no error happened */
882 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
883 status))
884 continue;
885
886 /*
887 * Error happened. Move the pages to the refused list and update
888 * the pages number.
889 */
890 list_move(&page->lru, &ctl->refused_pages);
891 ctl->n_pages--;
892 ctl->n_refused_pages++;
893 }
894
895 spin_unlock(&b->comm_lock);
896
897 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
898}
899
900/**
901 * vmballoon_release_page_list() - Releases a page list
902 *
903 * @page_list: list of pages to release.
904 * @n_pages: pointer to the number of pages.
905 * @page_size: whether the pages in the list are 2MB (or else 4KB).
906 *
907 * Releases the list of pages and zeros the number of pages.
908 */
909static void vmballoon_release_page_list(struct list_head *page_list,
910 int *n_pages,
911 enum vmballoon_page_size_type page_size)
912{
913 struct page *page, *tmp;
914
915 list_for_each_entry_safe(page, tmp, page_list, lru) {
916 list_del(&page->lru);
917 __free_pages(page, vmballoon_page_order(page_size));
918 }
919
920 if (n_pages)
921 *n_pages = 0;
922}
923
924
925/*
926 * Release pages that were allocated while attempting to inflate the
927 * balloon but were refused by the host for one reason or another.
928 */
929static void vmballoon_release_refused_pages(struct vmballoon *b,
930 struct vmballoon_ctl *ctl)
931{
932 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
933 ctl->page_size);
934
935 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
936 ctl->page_size);
937}
938
939/**
940 * vmballoon_change - retrieve the required balloon change
941 *
942 * @b: pointer for the balloon.
943 *
944 * Return: the required change for the balloon size. A positive number
945 * indicates inflation, a negative number indicates a deflation.
946 */
947static int64_t vmballoon_change(struct vmballoon *b)
948{
949 int64_t size, target;
950
951 size = atomic64_read(&b->size);
952 target = READ_ONCE(b->target);
953
954 /*
955 * We must cast first because of int sizes
956 * Otherwise we might get huge positives instead of negatives
957 */
958
959 if (b->reset_required)
960 return 0;
961
962 /* consider a 2MB slack on deflate, unless the balloon is emptied */
963 if (target < size && target != 0 &&
964 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
965 return 0;
966
967 /* If an out-of-memory recently occurred, inflation is disallowed. */
968 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
969 return 0;
970
971 return target - size;
972}
973
974/**
975 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
976 *
977 * @b: pointer to balloon.
978 * @pages: list of pages to enqueue.
979 * @n_pages: pointer to number of pages in list. The value is zeroed.
980 * @page_size: whether the pages are 2MB or 4KB pages.
981 *
982 * Enqueues the provides list of pages in the ballooned page list, clears the
983 * list and zeroes the number of pages that was provided.
984 */
985static void vmballoon_enqueue_page_list(struct vmballoon *b,
986 struct list_head *pages,
987 unsigned int *n_pages,
988 enum vmballoon_page_size_type page_size)
989{
990 unsigned long flags;
991 struct page *page;
992
993 if (page_size == VMW_BALLOON_4K_PAGE) {
994 balloon_page_list_enqueue(&b->b_dev_info, pages);
995 } else {
996 /*
997 * Keep the huge pages in a local list which is not available
998 * for the balloon compaction mechanism.
999 */
1000 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1001
1002 list_for_each_entry(page, pages, lru) {
1003 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1004 }
1005
1006 list_splice_init(pages, &b->huge_pages);
1007 __count_vm_events(BALLOON_INFLATE, *n_pages *
1008 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1009 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1010 }
1011
1012 *n_pages = 0;
1013}
1014
1015/**
1016 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1017 *
1018 * @b: pointer to balloon.
1019 * @pages: list of pages to enqueue.
1020 * @n_pages: pointer to number of pages in list. The value is zeroed.
1021 * @page_size: whether the pages are 2MB or 4KB pages.
1022 * @n_req_pages: the number of requested pages.
1023 *
1024 * Dequeues the number of requested pages from the balloon for deflation. The
1025 * number of dequeued pages may be lower, if not enough pages in the requested
1026 * size are available.
1027 */
1028static void vmballoon_dequeue_page_list(struct vmballoon *b,
1029 struct list_head *pages,
1030 unsigned int *n_pages,
1031 enum vmballoon_page_size_type page_size,
1032 unsigned int n_req_pages)
1033{
1034 struct page *page, *tmp;
1035 unsigned int i = 0;
1036 unsigned long flags;
1037
1038 /* In the case of 4k pages, use the compaction infrastructure */
1039 if (page_size == VMW_BALLOON_4K_PAGE) {
1040 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1041 n_req_pages);
1042 return;
1043 }
1044
1045 /* 2MB pages */
1046 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1047 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1048 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1049
1050 list_move(&page->lru, pages);
1051 if (++i == n_req_pages)
1052 break;
1053 }
1054
1055 __count_vm_events(BALLOON_DEFLATE,
1056 i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1057 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1058 *n_pages = i;
1059}
1060
1061/**
1062 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1063 *
1064 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1065 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1066 * then being refused. To prevent this case, this function splits the refused
1067 * pages into 4KB pages and adds them into @prealloc_pages list.
1068 *
1069 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1070 */
1071static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1072{
1073 struct page *page, *tmp;
1074 unsigned int i, order;
1075
1076 order = vmballoon_page_order(ctl->page_size);
1077
1078 list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1079 list_del(&page->lru);
1080 split_page(page, order);
1081 for (i = 0; i < (1 << order); i++)
1082 list_add(&page[i].lru, &ctl->prealloc_pages);
1083 }
1084 ctl->n_refused_pages = 0;
1085}
1086
1087/**
1088 * vmballoon_inflate() - Inflate the balloon towards its target size.
1089 *
1090 * @b: pointer to the balloon.
1091 */
1092static void vmballoon_inflate(struct vmballoon *b)
1093{
1094 int64_t to_inflate_frames;
1095 struct vmballoon_ctl ctl = {
1096 .pages = LIST_HEAD_INIT(ctl.pages),
1097 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1098 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1099 .page_size = b->max_page_size,
1100 .op = VMW_BALLOON_INFLATE
1101 };
1102
1103 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1104 unsigned int to_inflate_pages, page_in_frames;
1105 int alloc_error, lock_error = 0;
1106
1107 VM_BUG_ON(!list_empty(&ctl.pages));
1108 VM_BUG_ON(ctl.n_pages != 0);
1109
1110 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1111
1112 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1113 DIV_ROUND_UP_ULL(to_inflate_frames,
1114 page_in_frames));
1115
1116 /* Start by allocating */
1117 alloc_error = vmballoon_alloc_page_list(b, &ctl,
1118 to_inflate_pages);
1119
1120 /* Actually lock the pages by telling the hypervisor */
1121 lock_error = vmballoon_lock(b, &ctl);
1122
1123 /*
1124 * If an error indicates that something serious went wrong,
1125 * stop the inflation.
1126 */
1127 if (lock_error)
1128 break;
1129
1130 /* Update the balloon size */
1131 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1132
1133 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1134 ctl.page_size);
1135
1136 /*
1137 * If allocation failed or the number of refused pages exceeds
1138 * the maximum allowed, move to the next page size.
1139 */
1140 if (alloc_error ||
1141 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1142 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1143 break;
1144
1145 /*
1146 * Split the refused pages to 4k. This will also empty
1147 * the refused pages list.
1148 */
1149 vmballoon_split_refused_pages(&ctl);
1150 ctl.page_size--;
1151 }
1152
1153 cond_resched();
1154 }
1155
1156 /*
1157 * Release pages that were allocated while attempting to inflate the
1158 * balloon but were refused by the host for one reason or another,
1159 * and update the statistics.
1160 */
1161 if (ctl.n_refused_pages != 0)
1162 vmballoon_release_refused_pages(b, &ctl);
1163
1164 vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1165}
1166
1167/**
1168 * vmballoon_deflate() - Decrease the size of the balloon.
1169 *
1170 * @b: pointer to the balloon
1171 * @n_frames: the number of frames to deflate. If zero, automatically
1172 * calculated according to the target size.
1173 * @coordinated: whether to coordinate with the host
1174 *
1175 * Decrease the size of the balloon allowing guest to use more memory.
1176 *
1177 * Return: The number of deflated frames (i.e., basic page size units)
1178 */
1179static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1180 bool coordinated)
1181{
1182 unsigned long deflated_frames = 0;
1183 unsigned long tried_frames = 0;
1184 struct vmballoon_ctl ctl = {
1185 .pages = LIST_HEAD_INIT(ctl.pages),
1186 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1187 .page_size = VMW_BALLOON_4K_PAGE,
1188 .op = VMW_BALLOON_DEFLATE
1189 };
1190
1191 /* free pages to reach target */
1192 while (true) {
1193 unsigned int to_deflate_pages, n_unlocked_frames;
1194 unsigned int page_in_frames;
1195 int64_t to_deflate_frames;
1196 bool deflated_all;
1197
1198 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1199
1200 VM_BUG_ON(!list_empty(&ctl.pages));
1201 VM_BUG_ON(ctl.n_pages);
1202 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1203 VM_BUG_ON(ctl.n_refused_pages);
1204
1205 /*
1206 * If we were requested a specific number of frames, we try to
1207 * deflate this number of frames. Otherwise, deflation is
1208 * performed according to the target and balloon size.
1209 */
1210 to_deflate_frames = n_frames ? n_frames - tried_frames :
1211 -vmballoon_change(b);
1212
1213 /* break if no work to do */
1214 if (to_deflate_frames <= 0)
1215 break;
1216
1217 /*
1218 * Calculate the number of frames based on current page size,
1219 * but limit the deflated frames to a single chunk
1220 */
1221 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1222 DIV_ROUND_UP_ULL(to_deflate_frames,
1223 page_in_frames));
1224
1225 /* First take the pages from the balloon pages. */
1226 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1227 ctl.page_size, to_deflate_pages);
1228
1229 /*
1230 * Before pages are moving to the refused list, count their
1231 * frames as frames that we tried to deflate.
1232 */
1233 tried_frames += ctl.n_pages * page_in_frames;
1234
1235 /*
1236 * Unlock the pages by communicating with the hypervisor if the
1237 * communication is coordinated (i.e., not pop). We ignore the
1238 * return code. Instead we check if all the pages we manage to
1239 * unlock all the pages. If we failed, we will move to the next
1240 * page size, and would eventually try again later.
1241 */
1242 if (coordinated)
1243 vmballoon_lock(b, &ctl);
1244
1245 /*
1246 * Check if we deflated enough. We will move to the next page
1247 * size if we did not manage to do so. This calculation takes
1248 * place now, as once the pages are released, the number of
1249 * pages is zeroed.
1250 */
1251 deflated_all = (ctl.n_pages == to_deflate_pages);
1252
1253 /* Update local and global counters */
1254 n_unlocked_frames = ctl.n_pages * page_in_frames;
1255 atomic64_sub(n_unlocked_frames, &b->size);
1256 deflated_frames += n_unlocked_frames;
1257
1258 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1259 ctl.page_size, ctl.n_pages);
1260
1261 /* free the ballooned pages */
1262 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1263 ctl.page_size);
1264
1265 /* Return the refused pages to the ballooned list. */
1266 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1267 &ctl.n_refused_pages,
1268 ctl.page_size);
1269
1270 /* If we failed to unlock all the pages, move to next size. */
1271 if (!deflated_all) {
1272 if (ctl.page_size == b->max_page_size)
1273 break;
1274 ctl.page_size++;
1275 }
1276
1277 cond_resched();
1278 }
1279
1280 return deflated_frames;
1281}
1282
1283/**
1284 * vmballoon_deinit_batching - disables batching mode.
1285 *
1286 * @b: pointer to &struct vmballoon.
1287 *
1288 * Disables batching, by deallocating the page for communication with the
1289 * hypervisor and disabling the static key to indicate that batching is off.
1290 */
1291static void vmballoon_deinit_batching(struct vmballoon *b)
1292{
1293 free_page((unsigned long)b->batch_page);
1294 b->batch_page = NULL;
1295 static_branch_disable(&vmw_balloon_batching);
1296 b->batch_max_pages = 1;
1297}
1298
1299/**
1300 * vmballoon_init_batching - enable batching mode.
1301 *
1302 * @b: pointer to &struct vmballoon.
1303 *
1304 * Enables batching, by allocating a page for communication with the hypervisor
1305 * and enabling the static_key to use batching.
1306 *
1307 * Return: zero on success or an appropriate error-code.
1308 */
1309static int vmballoon_init_batching(struct vmballoon *b)
1310{
1311 struct page *page;
1312
1313 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1314 if (!page)
1315 return -ENOMEM;
1316
1317 b->batch_page = page_address(page);
1318 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1319
1320 static_branch_enable(&vmw_balloon_batching);
1321
1322 return 0;
1323}
1324
1325/*
1326 * Receive notification and resize balloon
1327 */
1328static void vmballoon_doorbell(void *client_data)
1329{
1330 struct vmballoon *b = client_data;
1331
1332 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1333
1334 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1335}
1336
1337/*
1338 * Clean up vmci doorbell
1339 */
1340static void vmballoon_vmci_cleanup(struct vmballoon *b)
1341{
1342 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1343 VMCI_INVALID_ID, VMCI_INVALID_ID);
1344
1345 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1346 vmci_doorbell_destroy(b->vmci_doorbell);
1347 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1348 }
1349}
1350
1351/**
1352 * vmballoon_vmci_init - Initialize vmci doorbell.
1353 *
1354 * @b: pointer to the balloon.
1355 *
1356 * Return: zero on success or when wakeup command not supported. Error-code
1357 * otherwise.
1358 *
1359 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1360 */
1361static int vmballoon_vmci_init(struct vmballoon *b)
1362{
1363 unsigned long error;
1364
1365 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1366 return 0;
1367
1368 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1369 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1370 vmballoon_doorbell, b);
1371
1372 if (error != VMCI_SUCCESS)
1373 goto fail;
1374
1375 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1376 b->vmci_doorbell.context,
1377 b->vmci_doorbell.resource, NULL);
1378
1379 if (error != VMW_BALLOON_SUCCESS)
1380 goto fail;
1381
1382 return 0;
1383fail:
1384 vmballoon_vmci_cleanup(b);
1385 return -EIO;
1386}
1387
1388/**
1389 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1390 *
1391 * @b: pointer to the balloon.
1392 *
1393 * This function is called when host decides to "reset" balloon for one reason
1394 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1395 * pages being released.
1396 */
1397static void vmballoon_pop(struct vmballoon *b)
1398{
1399 unsigned long size;
1400
1401 while ((size = atomic64_read(&b->size)))
1402 vmballoon_deflate(b, size, false);
1403}
1404
1405/*
1406 * Perform standard reset sequence by popping the balloon (in case it
1407 * is not empty) and then restarting protocol. This operation normally
1408 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1409 */
1410static void vmballoon_reset(struct vmballoon *b)
1411{
1412 int error;
1413
1414 down_write(&b->conf_sem);
1415
1416 vmballoon_vmci_cleanup(b);
1417
1418 /* free all pages, skipping monitor unlock */
1419 vmballoon_pop(b);
1420
1421 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1422 goto unlock;
1423
1424 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1425 if (vmballoon_init_batching(b)) {
1426 /*
1427 * We failed to initialize batching, inform the monitor
1428 * about it by sending a null capability.
1429 *
1430 * The guest will retry in one second.
1431 */
1432 vmballoon_send_start(b, 0);
1433 goto unlock;
1434 }
1435 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1436 vmballoon_deinit_batching(b);
1437 }
1438
1439 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1440 b->reset_required = false;
1441
1442 error = vmballoon_vmci_init(b);
1443 if (error)
1444 pr_err_once("failed to initialize vmci doorbell\n");
1445
1446 if (vmballoon_send_guest_id(b))
1447 pr_err_once("failed to send guest ID to the host\n");
1448
1449unlock:
1450 up_write(&b->conf_sem);
1451}
1452
1453/**
1454 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1455 *
1456 * @work: pointer to the &work_struct which is provided by the workqueue.
1457 *
1458 * Resets the protocol if needed, gets the new size and adjusts balloon as
1459 * needed. Repeat in 1 sec.
1460 */
1461static void vmballoon_work(struct work_struct *work)
1462{
1463 struct delayed_work *dwork = to_delayed_work(work);
1464 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1465 int64_t change = 0;
1466
1467 if (b->reset_required)
1468 vmballoon_reset(b);
1469
1470 down_read(&b->conf_sem);
1471
1472 /*
1473 * Update the stats while holding the semaphore to ensure that
1474 * @stats_enabled is consistent with whether the stats are actually
1475 * enabled
1476 */
1477 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1478
1479 if (!vmballoon_send_get_target(b))
1480 change = vmballoon_change(b);
1481
1482 if (change != 0) {
1483 pr_debug("%s - size: %llu, target %lu\n", __func__,
1484 atomic64_read(&b->size), READ_ONCE(b->target));
1485
1486 if (change > 0)
1487 vmballoon_inflate(b);
1488 else /* (change < 0) */
1489 vmballoon_deflate(b, 0, true);
1490 }
1491
1492 up_read(&b->conf_sem);
1493
1494 /*
1495 * We are using a freezable workqueue so that balloon operations are
1496 * stopped while the system transitions to/from sleep/hibernation.
1497 */
1498 queue_delayed_work(system_freezable_wq,
1499 dwork, round_jiffies_relative(HZ));
1500
1501}
1502
1503/**
1504 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1505 * @shrinker: pointer to the balloon shrinker.
1506 * @sc: page reclaim information.
1507 *
1508 * Returns: number of pages that were freed during deflation.
1509 */
1510static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1511 struct shrink_control *sc)
1512{
1513 struct vmballoon *b = &balloon;
1514 unsigned long deflated_frames;
1515
1516 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1517
1518 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1519
1520 /*
1521 * If the lock is also contended for read, we cannot easily reclaim and
1522 * we bail out.
1523 */
1524 if (!down_read_trylock(&b->conf_sem))
1525 return 0;
1526
1527 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1528
1529 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1530 deflated_frames);
1531
1532 /*
1533 * Delay future inflation for some time to mitigate the situations in
1534 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1535 * the access is asynchronous.
1536 */
1537 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1538
1539 up_read(&b->conf_sem);
1540
1541 return deflated_frames;
1542}
1543
1544/**
1545 * vmballoon_shrinker_count() - return the number of ballooned pages.
1546 * @shrinker: pointer to the balloon shrinker.
1547 * @sc: page reclaim information.
1548 *
1549 * Returns: number of 4k pages that are allocated for the balloon and can
1550 * therefore be reclaimed under pressure.
1551 */
1552static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1553 struct shrink_control *sc)
1554{
1555 struct vmballoon *b = &balloon;
1556
1557 return atomic64_read(&b->size);
1558}
1559
1560static void vmballoon_unregister_shrinker(struct vmballoon *b)
1561{
1562 shrinker_free(b->shrinker);
1563 b->shrinker = NULL;
1564}
1565
1566static int vmballoon_register_shrinker(struct vmballoon *b)
1567{
1568 /* Do nothing if the shrinker is not enabled */
1569 if (!vmwballoon_shrinker_enable)
1570 return 0;
1571
1572 b->shrinker = shrinker_alloc(0, "vmw-balloon");
1573 if (!b->shrinker)
1574 return -ENOMEM;
1575
1576 b->shrinker->scan_objects = vmballoon_shrinker_scan;
1577 b->shrinker->count_objects = vmballoon_shrinker_count;
1578 b->shrinker->private_data = b;
1579
1580 shrinker_register(b->shrinker);
1581
1582 return 0;
1583}
1584
1585/*
1586 * DEBUGFS Interface
1587 */
1588#ifdef CONFIG_DEBUG_FS
1589
1590static const char * const vmballoon_stat_page_names[] = {
1591 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1592 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1593 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1594 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1595 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1596};
1597
1598static const char * const vmballoon_stat_names[] = {
1599 [VMW_BALLOON_STAT_TIMER] = "timer",
1600 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1601 [VMW_BALLOON_STAT_RESET] = "reset",
1602 [VMW_BALLOON_STAT_SHRINK] = "shrink",
1603 [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
1604};
1605
1606static int vmballoon_enable_stats(struct vmballoon *b)
1607{
1608 int r = 0;
1609
1610 down_write(&b->conf_sem);
1611
1612 /* did we somehow race with another reader which enabled stats? */
1613 if (b->stats)
1614 goto out;
1615
1616 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1617
1618 if (!b->stats) {
1619 /* allocation failed */
1620 r = -ENOMEM;
1621 goto out;
1622 }
1623 static_key_enable(&balloon_stat_enabled.key);
1624out:
1625 up_write(&b->conf_sem);
1626 return r;
1627}
1628
1629/**
1630 * vmballoon_debug_show - shows statistics of balloon operations.
1631 * @f: pointer to the &struct seq_file.
1632 * @offset: ignored.
1633 *
1634 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1635 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1636 * we only collect statistics after the first time the counters are read.
1637 *
1638 * Return: zero on success or an error code.
1639 */
1640static int vmballoon_debug_show(struct seq_file *f, void *offset)
1641{
1642 struct vmballoon *b = f->private;
1643 int i, j;
1644
1645 /* enables stats if they are disabled */
1646 if (!b->stats) {
1647 int r = vmballoon_enable_stats(b);
1648
1649 if (r)
1650 return r;
1651 }
1652
1653 /* format capabilities info */
1654 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1655 VMW_BALLOON_CAPABILITIES);
1656 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1657 seq_printf(f, "%-22s: %16s\n", "is resetting",
1658 b->reset_required ? "y" : "n");
1659
1660 /* format size info */
1661 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1662 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1663
1664 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1665 if (vmballoon_cmd_names[i] == NULL)
1666 continue;
1667
1668 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1669 vmballoon_cmd_names[i],
1670 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1671 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1672 }
1673
1674 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1675 seq_printf(f, "%-22s: %16llu\n",
1676 vmballoon_stat_names[i],
1677 atomic64_read(&b->stats->general_stat[i]));
1678
1679 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1680 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1681 seq_printf(f, "%-18s(%s): %16llu\n",
1682 vmballoon_stat_page_names[i],
1683 vmballoon_page_size_names[j],
1684 atomic64_read(&b->stats->page_stat[i][j]));
1685 }
1686
1687 return 0;
1688}
1689
1690DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1691
1692static void __init vmballoon_debugfs_init(struct vmballoon *b)
1693{
1694 debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1695 &vmballoon_debug_fops);
1696}
1697
1698static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1699{
1700 static_key_disable(&balloon_stat_enabled.key);
1701 debugfs_lookup_and_remove("vmmemctl", NULL);
1702 kfree(b->stats);
1703 b->stats = NULL;
1704}
1705
1706#else
1707
1708static inline void vmballoon_debugfs_init(struct vmballoon *b)
1709{
1710}
1711
1712static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1713{
1714}
1715
1716#endif /* CONFIG_DEBUG_FS */
1717
1718
1719#ifdef CONFIG_BALLOON_COMPACTION
1720/**
1721 * vmballoon_migratepage() - migrates a balloon page.
1722 * @b_dev_info: balloon device information descriptor.
1723 * @newpage: the page to which @page should be migrated.
1724 * @page: a ballooned page that should be migrated.
1725 * @mode: migration mode, ignored.
1726 *
1727 * This function is really open-coded, but that is according to the interface
1728 * that balloon_compaction provides.
1729 *
1730 * Return: zero on success, -EAGAIN when migration cannot be performed
1731 * momentarily, and -EBUSY if migration failed and should be retried
1732 * with that specific page.
1733 */
1734static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1735 struct page *newpage, struct page *page,
1736 enum migrate_mode mode)
1737{
1738 unsigned long status, flags;
1739 struct vmballoon *b;
1740 int ret;
1741
1742 b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1743
1744 /*
1745 * If the semaphore is taken, there is ongoing configuration change
1746 * (i.e., balloon reset), so try again.
1747 */
1748 if (!down_read_trylock(&b->conf_sem))
1749 return -EAGAIN;
1750
1751 spin_lock(&b->comm_lock);
1752 /*
1753 * We must start by deflating and not inflating, as otherwise the
1754 * hypervisor may tell us that it has enough memory and the new page is
1755 * not needed. Since the old page is isolated, we cannot use the list
1756 * interface to unlock it, as the LRU field is used for isolation.
1757 * Instead, we use the native interface directly.
1758 */
1759 vmballoon_add_page(b, 0, page);
1760 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1761 VMW_BALLOON_DEFLATE);
1762
1763 if (status == VMW_BALLOON_SUCCESS)
1764 status = vmballoon_status_page(b, 0, &page);
1765
1766 /*
1767 * If a failure happened, let the migration mechanism know that it
1768 * should not retry.
1769 */
1770 if (status != VMW_BALLOON_SUCCESS) {
1771 spin_unlock(&b->comm_lock);
1772 ret = -EBUSY;
1773 goto out_unlock;
1774 }
1775
1776 /*
1777 * The page is isolated, so it is safe to delete it without holding
1778 * @pages_lock . We keep holding @comm_lock since we will need it in a
1779 * second.
1780 */
1781 balloon_page_delete(page);
1782
1783 put_page(page);
1784
1785 /* Inflate */
1786 vmballoon_add_page(b, 0, newpage);
1787 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1788 VMW_BALLOON_INFLATE);
1789
1790 if (status == VMW_BALLOON_SUCCESS)
1791 status = vmballoon_status_page(b, 0, &newpage);
1792
1793 spin_unlock(&b->comm_lock);
1794
1795 if (status != VMW_BALLOON_SUCCESS) {
1796 /*
1797 * A failure happened. While we can deflate the page we just
1798 * inflated, this deflation can also encounter an error. Instead
1799 * we will decrease the size of the balloon to reflect the
1800 * change and report failure.
1801 */
1802 atomic64_dec(&b->size);
1803 ret = -EBUSY;
1804 } else {
1805 /*
1806 * Success. Take a reference for the page, and we will add it to
1807 * the list after acquiring the lock.
1808 */
1809 get_page(newpage);
1810 ret = MIGRATEPAGE_SUCCESS;
1811 }
1812
1813 /* Update the balloon list under the @pages_lock */
1814 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1815
1816 /*
1817 * On inflation success, we already took a reference for the @newpage.
1818 * If we succeed just insert it to the list and update the statistics
1819 * under the lock.
1820 */
1821 if (ret == MIGRATEPAGE_SUCCESS) {
1822 balloon_page_insert(&b->b_dev_info, newpage);
1823 __count_vm_event(BALLOON_MIGRATE);
1824 }
1825
1826 /*
1827 * We deflated successfully, so regardless to the inflation success, we
1828 * need to reduce the number of isolated_pages.
1829 */
1830 b->b_dev_info.isolated_pages--;
1831 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1832
1833out_unlock:
1834 up_read(&b->conf_sem);
1835 return ret;
1836}
1837
1838/**
1839 * vmballoon_compaction_init() - initialized compaction for the balloon.
1840 *
1841 * @b: pointer to the balloon.
1842 *
1843 * If during the initialization a failure occurred, this function does not
1844 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1845 * case.
1846 *
1847 * Return: zero on success or error code on failure.
1848 */
1849static __init void vmballoon_compaction_init(struct vmballoon *b)
1850{
1851 b->b_dev_info.migratepage = vmballoon_migratepage;
1852}
1853
1854#else /* CONFIG_BALLOON_COMPACTION */
1855static inline void vmballoon_compaction_init(struct vmballoon *b)
1856{
1857}
1858#endif /* CONFIG_BALLOON_COMPACTION */
1859
1860static int __init vmballoon_init(void)
1861{
1862 int error;
1863
1864 /*
1865 * Check if we are running on VMware's hypervisor and bail out
1866 * if we are not.
1867 */
1868 if (x86_hyper_type != X86_HYPER_VMWARE)
1869 return -ENODEV;
1870
1871 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1872
1873 error = vmballoon_register_shrinker(&balloon);
1874 if (error)
1875 return error;
1876
1877 /*
1878 * Initialization of compaction must be done after the call to
1879 * balloon_devinfo_init() .
1880 */
1881 balloon_devinfo_init(&balloon.b_dev_info);
1882 vmballoon_compaction_init(&balloon);
1883
1884 INIT_LIST_HEAD(&balloon.huge_pages);
1885 spin_lock_init(&balloon.comm_lock);
1886 init_rwsem(&balloon.conf_sem);
1887 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1888 balloon.batch_page = NULL;
1889 balloon.page = NULL;
1890 balloon.reset_required = true;
1891
1892 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1893
1894 vmballoon_debugfs_init(&balloon);
1895
1896 return 0;
1897}
1898
1899/*
1900 * Using late_initcall() instead of module_init() allows the balloon to use the
1901 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1902 * VMCI is probed only after the balloon is initialized. If the balloon is used
1903 * as a module, late_initcall() is equivalent to module_init().
1904 */
1905late_initcall(vmballoon_init);
1906
1907static void __exit vmballoon_exit(void)
1908{
1909 vmballoon_unregister_shrinker(&balloon);
1910 vmballoon_vmci_cleanup(&balloon);
1911 cancel_delayed_work_sync(&balloon.dwork);
1912
1913 vmballoon_debugfs_exit(&balloon);
1914
1915 /*
1916 * Deallocate all reserved memory, and reset connection with monitor.
1917 * Reset connection before deallocating memory to avoid potential for
1918 * additional spurious resets from guest touching deallocated pages.
1919 */
1920 vmballoon_send_start(&balloon, 0);
1921 vmballoon_pop(&balloon);
1922}
1923module_exit(vmballoon_exit);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * VMware Balloon driver.
4 *
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6 *
7 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/vmalloc.h>
24#include <linux/sched.h>
25#include <linux/module.h>
26#include <linux/workqueue.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29#include <linux/rwsem.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/mount.h>
33#include <linux/pseudo_fs.h>
34#include <linux/balloon_compaction.h>
35#include <linux/vmw_vmci_defs.h>
36#include <linux/vmw_vmci_api.h>
37#include <asm/hypervisor.h>
38
39MODULE_AUTHOR("VMware, Inc.");
40MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
41MODULE_ALIAS("dmi:*:svnVMware*:*");
42MODULE_ALIAS("vmware_vmmemctl");
43MODULE_LICENSE("GPL");
44
45static bool __read_mostly vmwballoon_shrinker_enable;
46module_param(vmwballoon_shrinker_enable, bool, 0444);
47MODULE_PARM_DESC(vmwballoon_shrinker_enable,
48 "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
49
50/* Delay in seconds after shrink before inflation. */
51#define VMBALLOON_SHRINK_DELAY (5)
52
53/* Maximum number of refused pages we accumulate during inflation cycle */
54#define VMW_BALLOON_MAX_REFUSED 16
55
56/* Magic number for the balloon mount-point */
57#define BALLOON_VMW_MAGIC 0x0ba11007
58
59/*
60 * Hypervisor communication port definitions.
61 */
62#define VMW_BALLOON_HV_PORT 0x5670
63#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
64#define VMW_BALLOON_GUEST_ID 1 /* Linux */
65
66enum vmwballoon_capabilities {
67 /*
68 * Bit 0 is reserved and not associated to any capability.
69 */
70 VMW_BALLOON_BASIC_CMDS = (1 << 1),
71 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
72 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
73 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
74 VMW_BALLOON_64_BIT_TARGET = (1 << 5)
75};
76
77#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
78 | VMW_BALLOON_BATCHED_CMDS \
79 | VMW_BALLOON_BATCHED_2M_CMDS \
80 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
81
82#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
83
84/*
85 * 64-bit targets are only supported in 64-bit
86 */
87#ifdef CONFIG_64BIT
88#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
89 | VMW_BALLOON_64_BIT_TARGET)
90#else
91#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
92#endif
93
94enum vmballoon_page_size_type {
95 VMW_BALLOON_4K_PAGE,
96 VMW_BALLOON_2M_PAGE,
97 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
98};
99
100#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
101
102static const char * const vmballoon_page_size_names[] = {
103 [VMW_BALLOON_4K_PAGE] = "4k",
104 [VMW_BALLOON_2M_PAGE] = "2M"
105};
106
107enum vmballoon_op {
108 VMW_BALLOON_INFLATE,
109 VMW_BALLOON_DEFLATE
110};
111
112enum vmballoon_op_stat_type {
113 VMW_BALLOON_OP_STAT,
114 VMW_BALLOON_OP_FAIL_STAT
115};
116
117#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
118
119/**
120 * enum vmballoon_cmd_type - backdoor commands.
121 *
122 * Availability of the commands is as followed:
123 *
124 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
125 * %VMW_BALLOON_CMD_GUEST_ID are always available.
126 *
127 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
128 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
129 *
130 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
131 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
132 * are available.
133 *
134 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
135 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
136 * are supported.
137 *
138 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
139 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
140 *
141 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
142 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
143 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
144 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
145 * to be deflated from the balloon.
146 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
147 * runs in the VM.
148 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
149 * ballooned pages (up to 512).
150 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
151 * pages that are about to be deflated from the
152 * balloon (up to 512).
153 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
154 * for 2MB pages.
155 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
156 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
157 * pages.
158 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
159 * that would be invoked when the balloon
160 * size changes.
161 * @VMW_BALLOON_CMD_LAST: Value of the last command.
162 */
163enum vmballoon_cmd_type {
164 VMW_BALLOON_CMD_START,
165 VMW_BALLOON_CMD_GET_TARGET,
166 VMW_BALLOON_CMD_LOCK,
167 VMW_BALLOON_CMD_UNLOCK,
168 VMW_BALLOON_CMD_GUEST_ID,
169 /* No command 5 */
170 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
171 VMW_BALLOON_CMD_BATCHED_UNLOCK,
172 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
173 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
174 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
175 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
176};
177
178#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
179
180enum vmballoon_error_codes {
181 VMW_BALLOON_SUCCESS,
182 VMW_BALLOON_ERROR_CMD_INVALID,
183 VMW_BALLOON_ERROR_PPN_INVALID,
184 VMW_BALLOON_ERROR_PPN_LOCKED,
185 VMW_BALLOON_ERROR_PPN_UNLOCKED,
186 VMW_BALLOON_ERROR_PPN_PINNED,
187 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
188 VMW_BALLOON_ERROR_RESET,
189 VMW_BALLOON_ERROR_BUSY
190};
191
192#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
193
194#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
195 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
196 (1UL << VMW_BALLOON_CMD_LOCK) | \
197 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
198 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
199 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
200 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
201 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
202
203static const char * const vmballoon_cmd_names[] = {
204 [VMW_BALLOON_CMD_START] = "start",
205 [VMW_BALLOON_CMD_GET_TARGET] = "target",
206 [VMW_BALLOON_CMD_LOCK] = "lock",
207 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
208 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
209 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
210 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
211 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
212 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
213 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
214};
215
216enum vmballoon_stat_page {
217 VMW_BALLOON_PAGE_STAT_ALLOC,
218 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
219 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
220 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
221 VMW_BALLOON_PAGE_STAT_FREE,
222 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
223};
224
225#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
226
227enum vmballoon_stat_general {
228 VMW_BALLOON_STAT_TIMER,
229 VMW_BALLOON_STAT_DOORBELL,
230 VMW_BALLOON_STAT_RESET,
231 VMW_BALLOON_STAT_SHRINK,
232 VMW_BALLOON_STAT_SHRINK_FREE,
233 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
234};
235
236#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
237
238static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
239static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
240
241struct vmballoon_ctl {
242 struct list_head pages;
243 struct list_head refused_pages;
244 struct list_head prealloc_pages;
245 unsigned int n_refused_pages;
246 unsigned int n_pages;
247 enum vmballoon_page_size_type page_size;
248 enum vmballoon_op op;
249};
250
251/**
252 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
253 *
254 * @status: the status of the operation, which is written by the hypervisor.
255 * @reserved: reserved for future use. Must be set to zero.
256 * @pfn: the physical frame number of the page to be locked or unlocked.
257 */
258struct vmballoon_batch_entry {
259 u64 status : 5;
260 u64 reserved : PAGE_SHIFT - 5;
261 u64 pfn : 52;
262} __packed;
263
264struct vmballoon {
265 /**
266 * @max_page_size: maximum supported page size for ballooning.
267 *
268 * Protected by @conf_sem
269 */
270 enum vmballoon_page_size_type max_page_size;
271
272 /**
273 * @size: balloon actual size in basic page size (frames).
274 *
275 * While we currently do not support size which is bigger than 32-bit,
276 * in preparation for future support, use 64-bits.
277 */
278 atomic64_t size;
279
280 /**
281 * @target: balloon target size in basic page size (frames).
282 *
283 * We do not protect the target under the assumption that setting the
284 * value is always done through a single write. If this assumption ever
285 * breaks, we would have to use X_ONCE for accesses, and suffer the less
286 * optimized code. Although we may read stale target value if multiple
287 * accesses happen at once, the performance impact should be minor.
288 */
289 unsigned long target;
290
291 /**
292 * @reset_required: reset flag
293 *
294 * Setting this flag may introduce races, but the code is expected to
295 * handle them gracefully. In the worst case, another operation will
296 * fail as reset did not take place. Clearing the flag is done while
297 * holding @conf_sem for write.
298 */
299 bool reset_required;
300
301 /**
302 * @capabilities: hypervisor balloon capabilities.
303 *
304 * Protected by @conf_sem.
305 */
306 unsigned long capabilities;
307
308 /**
309 * @batch_page: pointer to communication batch page.
310 *
311 * When batching is used, batch_page points to a page, which holds up to
312 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
313 */
314 struct vmballoon_batch_entry *batch_page;
315
316 /**
317 * @batch_max_pages: maximum pages that can be locked/unlocked.
318 *
319 * Indicates the number of pages that the hypervisor can lock or unlock
320 * at once, according to whether batching is enabled. If batching is
321 * disabled, only a single page can be locked/unlock on each operation.
322 *
323 * Protected by @conf_sem.
324 */
325 unsigned int batch_max_pages;
326
327 /**
328 * @page: page to be locked/unlocked by the hypervisor
329 *
330 * @page is only used when batching is disabled and a single page is
331 * reclaimed on each iteration.
332 *
333 * Protected by @comm_lock.
334 */
335 struct page *page;
336
337 /**
338 * @shrink_timeout: timeout until the next inflation.
339 *
340 * After an shrink event, indicates the time in jiffies after which
341 * inflation is allowed again. Can be written concurrently with reads,
342 * so must use READ_ONCE/WRITE_ONCE when accessing.
343 */
344 unsigned long shrink_timeout;
345
346 /* statistics */
347 struct vmballoon_stats *stats;
348
349#ifdef CONFIG_DEBUG_FS
350 /* debugfs file exporting statistics */
351 struct dentry *dbg_entry;
352#endif
353
354 /**
355 * @b_dev_info: balloon device information descriptor.
356 */
357 struct balloon_dev_info b_dev_info;
358
359 struct delayed_work dwork;
360
361 /**
362 * @huge_pages - list of the inflated 2MB pages.
363 *
364 * Protected by @b_dev_info.pages_lock .
365 */
366 struct list_head huge_pages;
367
368 /**
369 * @vmci_doorbell.
370 *
371 * Protected by @conf_sem.
372 */
373 struct vmci_handle vmci_doorbell;
374
375 /**
376 * @conf_sem: semaphore to protect the configuration and the statistics.
377 */
378 struct rw_semaphore conf_sem;
379
380 /**
381 * @comm_lock: lock to protect the communication with the host.
382 *
383 * Lock ordering: @conf_sem -> @comm_lock .
384 */
385 spinlock_t comm_lock;
386
387 /**
388 * @shrinker: shrinker interface that is used to avoid over-inflation.
389 */
390 struct shrinker shrinker;
391
392 /**
393 * @shrinker_registered: whether the shrinker was registered.
394 *
395 * The shrinker interface does not handle gracefully the removal of
396 * shrinker that was not registered before. This indication allows to
397 * simplify the unregistration process.
398 */
399 bool shrinker_registered;
400};
401
402static struct vmballoon balloon;
403
404struct vmballoon_stats {
405 /* timer / doorbell operations */
406 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
407
408 /* allocation statistics for huge and small pages */
409 atomic64_t
410 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
411
412 /* Monitor operations: total operations, and failures */
413 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
414};
415
416static inline bool is_vmballoon_stats_on(void)
417{
418 return IS_ENABLED(CONFIG_DEBUG_FS) &&
419 static_branch_unlikely(&balloon_stat_enabled);
420}
421
422static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
423 enum vmballoon_op_stat_type type)
424{
425 if (is_vmballoon_stats_on())
426 atomic64_inc(&b->stats->ops[op][type]);
427}
428
429static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
430 enum vmballoon_stat_general stat)
431{
432 if (is_vmballoon_stats_on())
433 atomic64_inc(&b->stats->general_stat[stat]);
434}
435
436static inline void vmballoon_stats_gen_add(struct vmballoon *b,
437 enum vmballoon_stat_general stat,
438 unsigned int val)
439{
440 if (is_vmballoon_stats_on())
441 atomic64_add(val, &b->stats->general_stat[stat]);
442}
443
444static inline void vmballoon_stats_page_inc(struct vmballoon *b,
445 enum vmballoon_stat_page stat,
446 enum vmballoon_page_size_type size)
447{
448 if (is_vmballoon_stats_on())
449 atomic64_inc(&b->stats->page_stat[stat][size]);
450}
451
452static inline void vmballoon_stats_page_add(struct vmballoon *b,
453 enum vmballoon_stat_page stat,
454 enum vmballoon_page_size_type size,
455 unsigned int val)
456{
457 if (is_vmballoon_stats_on())
458 atomic64_add(val, &b->stats->page_stat[stat][size]);
459}
460
461static inline unsigned long
462__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
463 unsigned long arg2, unsigned long *result)
464{
465 unsigned long status, dummy1, dummy2, dummy3, local_result;
466
467 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
468
469 asm volatile ("inl %%dx" :
470 "=a"(status),
471 "=c"(dummy1),
472 "=d"(dummy2),
473 "=b"(local_result),
474 "=S"(dummy3) :
475 "0"(VMW_BALLOON_HV_MAGIC),
476 "1"(cmd),
477 "2"(VMW_BALLOON_HV_PORT),
478 "3"(arg1),
479 "4"(arg2) :
480 "memory");
481
482 /* update the result if needed */
483 if (result)
484 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
485 local_result;
486
487 /* update target when applicable */
488 if (status == VMW_BALLOON_SUCCESS &&
489 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
490 WRITE_ONCE(b->target, local_result);
491
492 if (status != VMW_BALLOON_SUCCESS &&
493 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
494 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
495 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
496 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
497 status);
498 }
499
500 /* mark reset required accordingly */
501 if (status == VMW_BALLOON_ERROR_RESET)
502 b->reset_required = true;
503
504 return status;
505}
506
507static __always_inline unsigned long
508vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
509 unsigned long arg2)
510{
511 unsigned long dummy;
512
513 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
514}
515
516/*
517 * Send "start" command to the host, communicating supported version
518 * of the protocol.
519 */
520static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
521{
522 unsigned long status, capabilities;
523
524 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
525 &capabilities);
526
527 switch (status) {
528 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
529 b->capabilities = capabilities;
530 break;
531 case VMW_BALLOON_SUCCESS:
532 b->capabilities = VMW_BALLOON_BASIC_CMDS;
533 break;
534 default:
535 return -EIO;
536 }
537
538 /*
539 * 2MB pages are only supported with batching. If batching is for some
540 * reason disabled, do not use 2MB pages, since otherwise the legacy
541 * mechanism is used with 2MB pages, causing a failure.
542 */
543 b->max_page_size = VMW_BALLOON_4K_PAGE;
544 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
545 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
546 b->max_page_size = VMW_BALLOON_2M_PAGE;
547
548
549 return 0;
550}
551
552/**
553 * vmballoon_send_guest_id - communicate guest type to the host.
554 *
555 * @b: pointer to the balloon.
556 *
557 * Communicate guest type to the host so that it can adjust ballooning
558 * algorithm to the one most appropriate for the guest. This command
559 * is normally issued after sending "start" command and is part of
560 * standard reset sequence.
561 *
562 * Return: zero on success or appropriate error code.
563 */
564static int vmballoon_send_guest_id(struct vmballoon *b)
565{
566 unsigned long status;
567
568 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
569 VMW_BALLOON_GUEST_ID, 0);
570
571 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
572}
573
574/**
575 * vmballoon_page_order() - return the order of the page
576 * @page_size: the size of the page.
577 *
578 * Return: the allocation order.
579 */
580static inline
581unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
582{
583 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
584}
585
586/**
587 * vmballoon_page_in_frames() - returns the number of frames in a page.
588 * @page_size: the size of the page.
589 *
590 * Return: the number of 4k frames.
591 */
592static inline unsigned int
593vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
594{
595 return 1 << vmballoon_page_order(page_size);
596}
597
598/**
599 * vmballoon_mark_page_offline() - mark a page as offline
600 * @page: pointer for the page.
601 * @page_size: the size of the page.
602 */
603static void
604vmballoon_mark_page_offline(struct page *page,
605 enum vmballoon_page_size_type page_size)
606{
607 int i;
608
609 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
610 __SetPageOffline(page + i);
611}
612
613/**
614 * vmballoon_mark_page_online() - mark a page as online
615 * @page: pointer for the page.
616 * @page_size: the size of the page.
617 */
618static void
619vmballoon_mark_page_online(struct page *page,
620 enum vmballoon_page_size_type page_size)
621{
622 int i;
623
624 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
625 __ClearPageOffline(page + i);
626}
627
628/**
629 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
630 *
631 * @b: pointer to the balloon.
632 *
633 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
634 * by the host-guest protocol and EIO if an error occurred in communicating with
635 * the host.
636 */
637static int vmballoon_send_get_target(struct vmballoon *b)
638{
639 unsigned long status;
640 unsigned long limit;
641
642 limit = totalram_pages();
643
644 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
645 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
646 limit != (u32)limit)
647 return -EINVAL;
648
649 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
650
651 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
652}
653
654/**
655 * vmballoon_alloc_page_list - allocates a list of pages.
656 *
657 * @b: pointer to the balloon.
658 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
659 * @req_n_pages: the number of requested pages.
660 *
661 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
662 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
663 *
664 * Return: zero on success or error code otherwise.
665 */
666static int vmballoon_alloc_page_list(struct vmballoon *b,
667 struct vmballoon_ctl *ctl,
668 unsigned int req_n_pages)
669{
670 struct page *page;
671 unsigned int i;
672
673 for (i = 0; i < req_n_pages; i++) {
674 /*
675 * First check if we happen to have pages that were allocated
676 * before. This happens when 2MB page rejected during inflation
677 * by the hypervisor, and then split into 4KB pages.
678 */
679 if (!list_empty(&ctl->prealloc_pages)) {
680 page = list_first_entry(&ctl->prealloc_pages,
681 struct page, lru);
682 list_del(&page->lru);
683 } else {
684 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
685 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
686 __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
687 else
688 page = balloon_page_alloc();
689
690 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
691 ctl->page_size);
692 }
693
694 if (page) {
695 /* Success. Add the page to the list and continue. */
696 list_add(&page->lru, &ctl->pages);
697 continue;
698 }
699
700 /* Allocation failed. Update statistics and stop. */
701 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
702 ctl->page_size);
703 break;
704 }
705
706 ctl->n_pages = i;
707
708 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
709}
710
711/**
712 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
713 *
714 * @b: pointer for %struct vmballoon.
715 * @page: pointer for the page whose result should be handled.
716 * @page_size: size of the page.
717 * @status: status of the operation as provided by the hypervisor.
718 */
719static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
720 enum vmballoon_page_size_type page_size,
721 unsigned long status)
722{
723 /* On success do nothing. The page is already on the balloon list. */
724 if (likely(status == VMW_BALLOON_SUCCESS))
725 return 0;
726
727 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
728 page_to_pfn(page), status,
729 vmballoon_page_size_names[page_size]);
730
731 /* Error occurred */
732 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
733 page_size);
734
735 return -EIO;
736}
737
738/**
739 * vmballoon_status_page - returns the status of (un)lock operation
740 *
741 * @b: pointer to the balloon.
742 * @idx: index for the page for which the operation is performed.
743 * @p: pointer to where the page struct is returned.
744 *
745 * Following a lock or unlock operation, returns the status of the operation for
746 * an individual page. Provides the page that the operation was performed on on
747 * the @page argument.
748 *
749 * Returns: The status of a lock or unlock operation for an individual page.
750 */
751static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
752 struct page **p)
753{
754 if (static_branch_likely(&vmw_balloon_batching)) {
755 /* batching mode */
756 *p = pfn_to_page(b->batch_page[idx].pfn);
757 return b->batch_page[idx].status;
758 }
759
760 /* non-batching mode */
761 *p = b->page;
762
763 /*
764 * If a failure occurs, the indication will be provided in the status
765 * of the entire operation, which is considered before the individual
766 * page status. So for non-batching mode, the indication is always of
767 * success.
768 */
769 return VMW_BALLOON_SUCCESS;
770}
771
772/**
773 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
774 * @b: pointer to the balloon.
775 * @num_pages: number of inflated/deflated pages.
776 * @page_size: size of the page.
777 * @op: the type of operation (lock or unlock).
778 *
779 * Notify the host about page(s) that were ballooned (or removed from the
780 * balloon) so that host can use it without fear that guest will need it (or
781 * stop using them since the VM does). Host may reject some pages, we need to
782 * check the return value and maybe submit a different page. The pages that are
783 * inflated/deflated are pointed by @b->page.
784 *
785 * Return: result as provided by the hypervisor.
786 */
787static unsigned long vmballoon_lock_op(struct vmballoon *b,
788 unsigned int num_pages,
789 enum vmballoon_page_size_type page_size,
790 enum vmballoon_op op)
791{
792 unsigned long cmd, pfn;
793
794 lockdep_assert_held(&b->comm_lock);
795
796 if (static_branch_likely(&vmw_balloon_batching)) {
797 if (op == VMW_BALLOON_INFLATE)
798 cmd = page_size == VMW_BALLOON_2M_PAGE ?
799 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
800 VMW_BALLOON_CMD_BATCHED_LOCK;
801 else
802 cmd = page_size == VMW_BALLOON_2M_PAGE ?
803 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
804 VMW_BALLOON_CMD_BATCHED_UNLOCK;
805
806 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
807 } else {
808 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
809 VMW_BALLOON_CMD_UNLOCK;
810 pfn = page_to_pfn(b->page);
811
812 /* In non-batching mode, PFNs must fit in 32-bit */
813 if (unlikely(pfn != (u32)pfn))
814 return VMW_BALLOON_ERROR_PPN_INVALID;
815 }
816
817 return vmballoon_cmd(b, cmd, pfn, num_pages);
818}
819
820/**
821 * vmballoon_add_page - adds a page towards lock/unlock operation.
822 *
823 * @b: pointer to the balloon.
824 * @idx: index of the page to be ballooned in this batch.
825 * @p: pointer to the page that is about to be ballooned.
826 *
827 * Adds the page to be ballooned. Must be called while holding @comm_lock.
828 */
829static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
830 struct page *p)
831{
832 lockdep_assert_held(&b->comm_lock);
833
834 if (static_branch_likely(&vmw_balloon_batching))
835 b->batch_page[idx] = (struct vmballoon_batch_entry)
836 { .pfn = page_to_pfn(p) };
837 else
838 b->page = p;
839}
840
841/**
842 * vmballoon_lock - lock or unlock a batch of pages.
843 *
844 * @b: pointer to the balloon.
845 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
846 *
847 * Notifies the host of about ballooned pages (after inflation or deflation,
848 * according to @ctl). If the host rejects the page put it on the
849 * @ctl refuse list. These refused page are then released when moving to the
850 * next size of pages.
851 *
852 * Note that we neither free any @page here nor put them back on the ballooned
853 * pages list. Instead we queue it for later processing. We do that for several
854 * reasons. First, we do not want to free the page under the lock. Second, it
855 * allows us to unify the handling of lock and unlock. In the inflate case, the
856 * caller will check if there are too many refused pages and release them.
857 * Although it is not identical to the past behavior, it should not affect
858 * performance.
859 */
860static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
861{
862 unsigned long batch_status;
863 struct page *page;
864 unsigned int i, num_pages;
865
866 num_pages = ctl->n_pages;
867 if (num_pages == 0)
868 return 0;
869
870 /* communication with the host is done under the communication lock */
871 spin_lock(&b->comm_lock);
872
873 i = 0;
874 list_for_each_entry(page, &ctl->pages, lru)
875 vmballoon_add_page(b, i++, page);
876
877 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
878 ctl->op);
879
880 /*
881 * Iterate over the pages in the provided list. Since we are changing
882 * @ctl->n_pages we are saving the original value in @num_pages and
883 * use this value to bound the loop.
884 */
885 for (i = 0; i < num_pages; i++) {
886 unsigned long status;
887
888 status = vmballoon_status_page(b, i, &page);
889
890 /*
891 * Failure of the whole batch overrides a single operation
892 * results.
893 */
894 if (batch_status != VMW_BALLOON_SUCCESS)
895 status = batch_status;
896
897 /* Continue if no error happened */
898 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
899 status))
900 continue;
901
902 /*
903 * Error happened. Move the pages to the refused list and update
904 * the pages number.
905 */
906 list_move(&page->lru, &ctl->refused_pages);
907 ctl->n_pages--;
908 ctl->n_refused_pages++;
909 }
910
911 spin_unlock(&b->comm_lock);
912
913 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
914}
915
916/**
917 * vmballoon_release_page_list() - Releases a page list
918 *
919 * @page_list: list of pages to release.
920 * @n_pages: pointer to the number of pages.
921 * @page_size: whether the pages in the list are 2MB (or else 4KB).
922 *
923 * Releases the list of pages and zeros the number of pages.
924 */
925static void vmballoon_release_page_list(struct list_head *page_list,
926 int *n_pages,
927 enum vmballoon_page_size_type page_size)
928{
929 struct page *page, *tmp;
930
931 list_for_each_entry_safe(page, tmp, page_list, lru) {
932 list_del(&page->lru);
933 __free_pages(page, vmballoon_page_order(page_size));
934 }
935
936 if (n_pages)
937 *n_pages = 0;
938}
939
940
941/*
942 * Release pages that were allocated while attempting to inflate the
943 * balloon but were refused by the host for one reason or another.
944 */
945static void vmballoon_release_refused_pages(struct vmballoon *b,
946 struct vmballoon_ctl *ctl)
947{
948 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
949 ctl->page_size);
950
951 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
952 ctl->page_size);
953}
954
955/**
956 * vmballoon_change - retrieve the required balloon change
957 *
958 * @b: pointer for the balloon.
959 *
960 * Return: the required change for the balloon size. A positive number
961 * indicates inflation, a negative number indicates a deflation.
962 */
963static int64_t vmballoon_change(struct vmballoon *b)
964{
965 int64_t size, target;
966
967 size = atomic64_read(&b->size);
968 target = READ_ONCE(b->target);
969
970 /*
971 * We must cast first because of int sizes
972 * Otherwise we might get huge positives instead of negatives
973 */
974
975 if (b->reset_required)
976 return 0;
977
978 /* consider a 2MB slack on deflate, unless the balloon is emptied */
979 if (target < size && target != 0 &&
980 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
981 return 0;
982
983 /* If an out-of-memory recently occurred, inflation is disallowed. */
984 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
985 return 0;
986
987 return target - size;
988}
989
990/**
991 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
992 *
993 * @b: pointer to balloon.
994 * @pages: list of pages to enqueue.
995 * @n_pages: pointer to number of pages in list. The value is zeroed.
996 * @page_size: whether the pages are 2MB or 4KB pages.
997 *
998 * Enqueues the provides list of pages in the ballooned page list, clears the
999 * list and zeroes the number of pages that was provided.
1000 */
1001static void vmballoon_enqueue_page_list(struct vmballoon *b,
1002 struct list_head *pages,
1003 unsigned int *n_pages,
1004 enum vmballoon_page_size_type page_size)
1005{
1006 unsigned long flags;
1007 struct page *page;
1008
1009 if (page_size == VMW_BALLOON_4K_PAGE) {
1010 balloon_page_list_enqueue(&b->b_dev_info, pages);
1011 } else {
1012 /*
1013 * Keep the huge pages in a local list which is not available
1014 * for the balloon compaction mechanism.
1015 */
1016 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1017
1018 list_for_each_entry(page, pages, lru) {
1019 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1020 }
1021
1022 list_splice_init(pages, &b->huge_pages);
1023 __count_vm_events(BALLOON_INFLATE, *n_pages *
1024 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1025 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1026 }
1027
1028 *n_pages = 0;
1029}
1030
1031/**
1032 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1033 *
1034 * @b: pointer to balloon.
1035 * @pages: list of pages to enqueue.
1036 * @n_pages: pointer to number of pages in list. The value is zeroed.
1037 * @page_size: whether the pages are 2MB or 4KB pages.
1038 * @n_req_pages: the number of requested pages.
1039 *
1040 * Dequeues the number of requested pages from the balloon for deflation. The
1041 * number of dequeued pages may be lower, if not enough pages in the requested
1042 * size are available.
1043 */
1044static void vmballoon_dequeue_page_list(struct vmballoon *b,
1045 struct list_head *pages,
1046 unsigned int *n_pages,
1047 enum vmballoon_page_size_type page_size,
1048 unsigned int n_req_pages)
1049{
1050 struct page *page, *tmp;
1051 unsigned int i = 0;
1052 unsigned long flags;
1053
1054 /* In the case of 4k pages, use the compaction infrastructure */
1055 if (page_size == VMW_BALLOON_4K_PAGE) {
1056 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1057 n_req_pages);
1058 return;
1059 }
1060
1061 /* 2MB pages */
1062 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1063 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1064 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1065
1066 list_move(&page->lru, pages);
1067 if (++i == n_req_pages)
1068 break;
1069 }
1070
1071 __count_vm_events(BALLOON_DEFLATE,
1072 i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1073 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1074 *n_pages = i;
1075}
1076
1077/**
1078 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1079 *
1080 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1081 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1082 * then being refused. To prevent this case, this function splits the refused
1083 * pages into 4KB pages and adds them into @prealloc_pages list.
1084 *
1085 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1086 */
1087static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1088{
1089 struct page *page, *tmp;
1090 unsigned int i, order;
1091
1092 order = vmballoon_page_order(ctl->page_size);
1093
1094 list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1095 list_del(&page->lru);
1096 split_page(page, order);
1097 for (i = 0; i < (1 << order); i++)
1098 list_add(&page[i].lru, &ctl->prealloc_pages);
1099 }
1100 ctl->n_refused_pages = 0;
1101}
1102
1103/**
1104 * vmballoon_inflate() - Inflate the balloon towards its target size.
1105 *
1106 * @b: pointer to the balloon.
1107 */
1108static void vmballoon_inflate(struct vmballoon *b)
1109{
1110 int64_t to_inflate_frames;
1111 struct vmballoon_ctl ctl = {
1112 .pages = LIST_HEAD_INIT(ctl.pages),
1113 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1114 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1115 .page_size = b->max_page_size,
1116 .op = VMW_BALLOON_INFLATE
1117 };
1118
1119 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1120 unsigned int to_inflate_pages, page_in_frames;
1121 int alloc_error, lock_error = 0;
1122
1123 VM_BUG_ON(!list_empty(&ctl.pages));
1124 VM_BUG_ON(ctl.n_pages != 0);
1125
1126 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1127
1128 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1129 DIV_ROUND_UP_ULL(to_inflate_frames,
1130 page_in_frames));
1131
1132 /* Start by allocating */
1133 alloc_error = vmballoon_alloc_page_list(b, &ctl,
1134 to_inflate_pages);
1135
1136 /* Actually lock the pages by telling the hypervisor */
1137 lock_error = vmballoon_lock(b, &ctl);
1138
1139 /*
1140 * If an error indicates that something serious went wrong,
1141 * stop the inflation.
1142 */
1143 if (lock_error)
1144 break;
1145
1146 /* Update the balloon size */
1147 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1148
1149 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1150 ctl.page_size);
1151
1152 /*
1153 * If allocation failed or the number of refused pages exceeds
1154 * the maximum allowed, move to the next page size.
1155 */
1156 if (alloc_error ||
1157 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1158 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1159 break;
1160
1161 /*
1162 * Split the refused pages to 4k. This will also empty
1163 * the refused pages list.
1164 */
1165 vmballoon_split_refused_pages(&ctl);
1166 ctl.page_size--;
1167 }
1168
1169 cond_resched();
1170 }
1171
1172 /*
1173 * Release pages that were allocated while attempting to inflate the
1174 * balloon but were refused by the host for one reason or another,
1175 * and update the statistics.
1176 */
1177 if (ctl.n_refused_pages != 0)
1178 vmballoon_release_refused_pages(b, &ctl);
1179
1180 vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1181}
1182
1183/**
1184 * vmballoon_deflate() - Decrease the size of the balloon.
1185 *
1186 * @b: pointer to the balloon
1187 * @n_frames: the number of frames to deflate. If zero, automatically
1188 * calculated according to the target size.
1189 * @coordinated: whether to coordinate with the host
1190 *
1191 * Decrease the size of the balloon allowing guest to use more memory.
1192 *
1193 * Return: The number of deflated frames (i.e., basic page size units)
1194 */
1195static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1196 bool coordinated)
1197{
1198 unsigned long deflated_frames = 0;
1199 unsigned long tried_frames = 0;
1200 struct vmballoon_ctl ctl = {
1201 .pages = LIST_HEAD_INIT(ctl.pages),
1202 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1203 .page_size = VMW_BALLOON_4K_PAGE,
1204 .op = VMW_BALLOON_DEFLATE
1205 };
1206
1207 /* free pages to reach target */
1208 while (true) {
1209 unsigned int to_deflate_pages, n_unlocked_frames;
1210 unsigned int page_in_frames;
1211 int64_t to_deflate_frames;
1212 bool deflated_all;
1213
1214 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1215
1216 VM_BUG_ON(!list_empty(&ctl.pages));
1217 VM_BUG_ON(ctl.n_pages);
1218 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1219 VM_BUG_ON(ctl.n_refused_pages);
1220
1221 /*
1222 * If we were requested a specific number of frames, we try to
1223 * deflate this number of frames. Otherwise, deflation is
1224 * performed according to the target and balloon size.
1225 */
1226 to_deflate_frames = n_frames ? n_frames - tried_frames :
1227 -vmballoon_change(b);
1228
1229 /* break if no work to do */
1230 if (to_deflate_frames <= 0)
1231 break;
1232
1233 /*
1234 * Calculate the number of frames based on current page size,
1235 * but limit the deflated frames to a single chunk
1236 */
1237 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1238 DIV_ROUND_UP_ULL(to_deflate_frames,
1239 page_in_frames));
1240
1241 /* First take the pages from the balloon pages. */
1242 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1243 ctl.page_size, to_deflate_pages);
1244
1245 /*
1246 * Before pages are moving to the refused list, count their
1247 * frames as frames that we tried to deflate.
1248 */
1249 tried_frames += ctl.n_pages * page_in_frames;
1250
1251 /*
1252 * Unlock the pages by communicating with the hypervisor if the
1253 * communication is coordinated (i.e., not pop). We ignore the
1254 * return code. Instead we check if all the pages we manage to
1255 * unlock all the pages. If we failed, we will move to the next
1256 * page size, and would eventually try again later.
1257 */
1258 if (coordinated)
1259 vmballoon_lock(b, &ctl);
1260
1261 /*
1262 * Check if we deflated enough. We will move to the next page
1263 * size if we did not manage to do so. This calculation takes
1264 * place now, as once the pages are released, the number of
1265 * pages is zeroed.
1266 */
1267 deflated_all = (ctl.n_pages == to_deflate_pages);
1268
1269 /* Update local and global counters */
1270 n_unlocked_frames = ctl.n_pages * page_in_frames;
1271 atomic64_sub(n_unlocked_frames, &b->size);
1272 deflated_frames += n_unlocked_frames;
1273
1274 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1275 ctl.page_size, ctl.n_pages);
1276
1277 /* free the ballooned pages */
1278 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1279 ctl.page_size);
1280
1281 /* Return the refused pages to the ballooned list. */
1282 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1283 &ctl.n_refused_pages,
1284 ctl.page_size);
1285
1286 /* If we failed to unlock all the pages, move to next size. */
1287 if (!deflated_all) {
1288 if (ctl.page_size == b->max_page_size)
1289 break;
1290 ctl.page_size++;
1291 }
1292
1293 cond_resched();
1294 }
1295
1296 return deflated_frames;
1297}
1298
1299/**
1300 * vmballoon_deinit_batching - disables batching mode.
1301 *
1302 * @b: pointer to &struct vmballoon.
1303 *
1304 * Disables batching, by deallocating the page for communication with the
1305 * hypervisor and disabling the static key to indicate that batching is off.
1306 */
1307static void vmballoon_deinit_batching(struct vmballoon *b)
1308{
1309 free_page((unsigned long)b->batch_page);
1310 b->batch_page = NULL;
1311 static_branch_disable(&vmw_balloon_batching);
1312 b->batch_max_pages = 1;
1313}
1314
1315/**
1316 * vmballoon_init_batching - enable batching mode.
1317 *
1318 * @b: pointer to &struct vmballoon.
1319 *
1320 * Enables batching, by allocating a page for communication with the hypervisor
1321 * and enabling the static_key to use batching.
1322 *
1323 * Return: zero on success or an appropriate error-code.
1324 */
1325static int vmballoon_init_batching(struct vmballoon *b)
1326{
1327 struct page *page;
1328
1329 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1330 if (!page)
1331 return -ENOMEM;
1332
1333 b->batch_page = page_address(page);
1334 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1335
1336 static_branch_enable(&vmw_balloon_batching);
1337
1338 return 0;
1339}
1340
1341/*
1342 * Receive notification and resize balloon
1343 */
1344static void vmballoon_doorbell(void *client_data)
1345{
1346 struct vmballoon *b = client_data;
1347
1348 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1349
1350 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1351}
1352
1353/*
1354 * Clean up vmci doorbell
1355 */
1356static void vmballoon_vmci_cleanup(struct vmballoon *b)
1357{
1358 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1359 VMCI_INVALID_ID, VMCI_INVALID_ID);
1360
1361 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1362 vmci_doorbell_destroy(b->vmci_doorbell);
1363 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1364 }
1365}
1366
1367/**
1368 * vmballoon_vmci_init - Initialize vmci doorbell.
1369 *
1370 * @b: pointer to the balloon.
1371 *
1372 * Return: zero on success or when wakeup command not supported. Error-code
1373 * otherwise.
1374 *
1375 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1376 */
1377static int vmballoon_vmci_init(struct vmballoon *b)
1378{
1379 unsigned long error;
1380
1381 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1382 return 0;
1383
1384 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1385 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1386 vmballoon_doorbell, b);
1387
1388 if (error != VMCI_SUCCESS)
1389 goto fail;
1390
1391 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1392 b->vmci_doorbell.context,
1393 b->vmci_doorbell.resource, NULL);
1394
1395 if (error != VMW_BALLOON_SUCCESS)
1396 goto fail;
1397
1398 return 0;
1399fail:
1400 vmballoon_vmci_cleanup(b);
1401 return -EIO;
1402}
1403
1404/**
1405 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1406 *
1407 * @b: pointer to the balloon.
1408 *
1409 * This function is called when host decides to "reset" balloon for one reason
1410 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1411 * pages being released.
1412 */
1413static void vmballoon_pop(struct vmballoon *b)
1414{
1415 unsigned long size;
1416
1417 while ((size = atomic64_read(&b->size)))
1418 vmballoon_deflate(b, size, false);
1419}
1420
1421/*
1422 * Perform standard reset sequence by popping the balloon (in case it
1423 * is not empty) and then restarting protocol. This operation normally
1424 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1425 */
1426static void vmballoon_reset(struct vmballoon *b)
1427{
1428 int error;
1429
1430 down_write(&b->conf_sem);
1431
1432 vmballoon_vmci_cleanup(b);
1433
1434 /* free all pages, skipping monitor unlock */
1435 vmballoon_pop(b);
1436
1437 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1438 goto unlock;
1439
1440 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1441 if (vmballoon_init_batching(b)) {
1442 /*
1443 * We failed to initialize batching, inform the monitor
1444 * about it by sending a null capability.
1445 *
1446 * The guest will retry in one second.
1447 */
1448 vmballoon_send_start(b, 0);
1449 goto unlock;
1450 }
1451 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1452 vmballoon_deinit_batching(b);
1453 }
1454
1455 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1456 b->reset_required = false;
1457
1458 error = vmballoon_vmci_init(b);
1459 if (error)
1460 pr_err("failed to initialize vmci doorbell\n");
1461
1462 if (vmballoon_send_guest_id(b))
1463 pr_err("failed to send guest ID to the host\n");
1464
1465unlock:
1466 up_write(&b->conf_sem);
1467}
1468
1469/**
1470 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1471 *
1472 * @work: pointer to the &work_struct which is provided by the workqueue.
1473 *
1474 * Resets the protocol if needed, gets the new size and adjusts balloon as
1475 * needed. Repeat in 1 sec.
1476 */
1477static void vmballoon_work(struct work_struct *work)
1478{
1479 struct delayed_work *dwork = to_delayed_work(work);
1480 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1481 int64_t change = 0;
1482
1483 if (b->reset_required)
1484 vmballoon_reset(b);
1485
1486 down_read(&b->conf_sem);
1487
1488 /*
1489 * Update the stats while holding the semaphore to ensure that
1490 * @stats_enabled is consistent with whether the stats are actually
1491 * enabled
1492 */
1493 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1494
1495 if (!vmballoon_send_get_target(b))
1496 change = vmballoon_change(b);
1497
1498 if (change != 0) {
1499 pr_debug("%s - size: %llu, target %lu\n", __func__,
1500 atomic64_read(&b->size), READ_ONCE(b->target));
1501
1502 if (change > 0)
1503 vmballoon_inflate(b);
1504 else /* (change < 0) */
1505 vmballoon_deflate(b, 0, true);
1506 }
1507
1508 up_read(&b->conf_sem);
1509
1510 /*
1511 * We are using a freezable workqueue so that balloon operations are
1512 * stopped while the system transitions to/from sleep/hibernation.
1513 */
1514 queue_delayed_work(system_freezable_wq,
1515 dwork, round_jiffies_relative(HZ));
1516
1517}
1518
1519/**
1520 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1521 * @shrinker: pointer to the balloon shrinker.
1522 * @sc: page reclaim information.
1523 *
1524 * Returns: number of pages that were freed during deflation.
1525 */
1526static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1527 struct shrink_control *sc)
1528{
1529 struct vmballoon *b = &balloon;
1530 unsigned long deflated_frames;
1531
1532 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1533
1534 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1535
1536 /*
1537 * If the lock is also contended for read, we cannot easily reclaim and
1538 * we bail out.
1539 */
1540 if (!down_read_trylock(&b->conf_sem))
1541 return 0;
1542
1543 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1544
1545 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1546 deflated_frames);
1547
1548 /*
1549 * Delay future inflation for some time to mitigate the situations in
1550 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1551 * the access is asynchronous.
1552 */
1553 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1554
1555 up_read(&b->conf_sem);
1556
1557 return deflated_frames;
1558}
1559
1560/**
1561 * vmballoon_shrinker_count() - return the number of ballooned pages.
1562 * @shrinker: pointer to the balloon shrinker.
1563 * @sc: page reclaim information.
1564 *
1565 * Returns: number of 4k pages that are allocated for the balloon and can
1566 * therefore be reclaimed under pressure.
1567 */
1568static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1569 struct shrink_control *sc)
1570{
1571 struct vmballoon *b = &balloon;
1572
1573 return atomic64_read(&b->size);
1574}
1575
1576static void vmballoon_unregister_shrinker(struct vmballoon *b)
1577{
1578 if (b->shrinker_registered)
1579 unregister_shrinker(&b->shrinker);
1580 b->shrinker_registered = false;
1581}
1582
1583static int vmballoon_register_shrinker(struct vmballoon *b)
1584{
1585 int r;
1586
1587 /* Do nothing if the shrinker is not enabled */
1588 if (!vmwballoon_shrinker_enable)
1589 return 0;
1590
1591 b->shrinker.scan_objects = vmballoon_shrinker_scan;
1592 b->shrinker.count_objects = vmballoon_shrinker_count;
1593 b->shrinker.seeks = DEFAULT_SEEKS;
1594
1595 r = register_shrinker(&b->shrinker);
1596
1597 if (r == 0)
1598 b->shrinker_registered = true;
1599
1600 return r;
1601}
1602
1603/*
1604 * DEBUGFS Interface
1605 */
1606#ifdef CONFIG_DEBUG_FS
1607
1608static const char * const vmballoon_stat_page_names[] = {
1609 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1610 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1611 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1612 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1613 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1614};
1615
1616static const char * const vmballoon_stat_names[] = {
1617 [VMW_BALLOON_STAT_TIMER] = "timer",
1618 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1619 [VMW_BALLOON_STAT_RESET] = "reset",
1620 [VMW_BALLOON_STAT_SHRINK] = "shrink",
1621 [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
1622};
1623
1624static int vmballoon_enable_stats(struct vmballoon *b)
1625{
1626 int r = 0;
1627
1628 down_write(&b->conf_sem);
1629
1630 /* did we somehow race with another reader which enabled stats? */
1631 if (b->stats)
1632 goto out;
1633
1634 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1635
1636 if (!b->stats) {
1637 /* allocation failed */
1638 r = -ENOMEM;
1639 goto out;
1640 }
1641 static_key_enable(&balloon_stat_enabled.key);
1642out:
1643 up_write(&b->conf_sem);
1644 return r;
1645}
1646
1647/**
1648 * vmballoon_debug_show - shows statistics of balloon operations.
1649 * @f: pointer to the &struct seq_file.
1650 * @offset: ignored.
1651 *
1652 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1653 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1654 * we only collect statistics after the first time the counters are read.
1655 *
1656 * Return: zero on success or an error code.
1657 */
1658static int vmballoon_debug_show(struct seq_file *f, void *offset)
1659{
1660 struct vmballoon *b = f->private;
1661 int i, j;
1662
1663 /* enables stats if they are disabled */
1664 if (!b->stats) {
1665 int r = vmballoon_enable_stats(b);
1666
1667 if (r)
1668 return r;
1669 }
1670
1671 /* format capabilities info */
1672 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1673 VMW_BALLOON_CAPABILITIES);
1674 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1675 seq_printf(f, "%-22s: %16s\n", "is resetting",
1676 b->reset_required ? "y" : "n");
1677
1678 /* format size info */
1679 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1680 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1681
1682 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1683 if (vmballoon_cmd_names[i] == NULL)
1684 continue;
1685
1686 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1687 vmballoon_cmd_names[i],
1688 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1689 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1690 }
1691
1692 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1693 seq_printf(f, "%-22s: %16llu\n",
1694 vmballoon_stat_names[i],
1695 atomic64_read(&b->stats->general_stat[i]));
1696
1697 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1698 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1699 seq_printf(f, "%-18s(%s): %16llu\n",
1700 vmballoon_stat_page_names[i],
1701 vmballoon_page_size_names[j],
1702 atomic64_read(&b->stats->page_stat[i][j]));
1703 }
1704
1705 return 0;
1706}
1707
1708DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1709
1710static void __init vmballoon_debugfs_init(struct vmballoon *b)
1711{
1712 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1713 &vmballoon_debug_fops);
1714}
1715
1716static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1717{
1718 static_key_disable(&balloon_stat_enabled.key);
1719 debugfs_remove(b->dbg_entry);
1720 kfree(b->stats);
1721 b->stats = NULL;
1722}
1723
1724#else
1725
1726static inline void vmballoon_debugfs_init(struct vmballoon *b)
1727{
1728}
1729
1730static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1731{
1732}
1733
1734#endif /* CONFIG_DEBUG_FS */
1735
1736
1737#ifdef CONFIG_BALLOON_COMPACTION
1738
1739static int vmballoon_init_fs_context(struct fs_context *fc)
1740{
1741 return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
1742}
1743
1744static struct file_system_type vmballoon_fs = {
1745 .name = "balloon-vmware",
1746 .init_fs_context = vmballoon_init_fs_context,
1747 .kill_sb = kill_anon_super,
1748};
1749
1750static struct vfsmount *vmballoon_mnt;
1751
1752/**
1753 * vmballoon_migratepage() - migrates a balloon page.
1754 * @b_dev_info: balloon device information descriptor.
1755 * @newpage: the page to which @page should be migrated.
1756 * @page: a ballooned page that should be migrated.
1757 * @mode: migration mode, ignored.
1758 *
1759 * This function is really open-coded, but that is according to the interface
1760 * that balloon_compaction provides.
1761 *
1762 * Return: zero on success, -EAGAIN when migration cannot be performed
1763 * momentarily, and -EBUSY if migration failed and should be retried
1764 * with that specific page.
1765 */
1766static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1767 struct page *newpage, struct page *page,
1768 enum migrate_mode mode)
1769{
1770 unsigned long status, flags;
1771 struct vmballoon *b;
1772 int ret;
1773
1774 b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1775
1776 /*
1777 * If the semaphore is taken, there is ongoing configuration change
1778 * (i.e., balloon reset), so try again.
1779 */
1780 if (!down_read_trylock(&b->conf_sem))
1781 return -EAGAIN;
1782
1783 spin_lock(&b->comm_lock);
1784 /*
1785 * We must start by deflating and not inflating, as otherwise the
1786 * hypervisor may tell us that it has enough memory and the new page is
1787 * not needed. Since the old page is isolated, we cannot use the list
1788 * interface to unlock it, as the LRU field is used for isolation.
1789 * Instead, we use the native interface directly.
1790 */
1791 vmballoon_add_page(b, 0, page);
1792 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1793 VMW_BALLOON_DEFLATE);
1794
1795 if (status == VMW_BALLOON_SUCCESS)
1796 status = vmballoon_status_page(b, 0, &page);
1797
1798 /*
1799 * If a failure happened, let the migration mechanism know that it
1800 * should not retry.
1801 */
1802 if (status != VMW_BALLOON_SUCCESS) {
1803 spin_unlock(&b->comm_lock);
1804 ret = -EBUSY;
1805 goto out_unlock;
1806 }
1807
1808 /*
1809 * The page is isolated, so it is safe to delete it without holding
1810 * @pages_lock . We keep holding @comm_lock since we will need it in a
1811 * second.
1812 */
1813 balloon_page_delete(page);
1814
1815 put_page(page);
1816
1817 /* Inflate */
1818 vmballoon_add_page(b, 0, newpage);
1819 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1820 VMW_BALLOON_INFLATE);
1821
1822 if (status == VMW_BALLOON_SUCCESS)
1823 status = vmballoon_status_page(b, 0, &newpage);
1824
1825 spin_unlock(&b->comm_lock);
1826
1827 if (status != VMW_BALLOON_SUCCESS) {
1828 /*
1829 * A failure happened. While we can deflate the page we just
1830 * inflated, this deflation can also encounter an error. Instead
1831 * we will decrease the size of the balloon to reflect the
1832 * change and report failure.
1833 */
1834 atomic64_dec(&b->size);
1835 ret = -EBUSY;
1836 } else {
1837 /*
1838 * Success. Take a reference for the page, and we will add it to
1839 * the list after acquiring the lock.
1840 */
1841 get_page(newpage);
1842 ret = MIGRATEPAGE_SUCCESS;
1843 }
1844
1845 /* Update the balloon list under the @pages_lock */
1846 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1847
1848 /*
1849 * On inflation success, we already took a reference for the @newpage.
1850 * If we succeed just insert it to the list and update the statistics
1851 * under the lock.
1852 */
1853 if (ret == MIGRATEPAGE_SUCCESS) {
1854 balloon_page_insert(&b->b_dev_info, newpage);
1855 __count_vm_event(BALLOON_MIGRATE);
1856 }
1857
1858 /*
1859 * We deflated successfully, so regardless to the inflation success, we
1860 * need to reduce the number of isolated_pages.
1861 */
1862 b->b_dev_info.isolated_pages--;
1863 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1864
1865out_unlock:
1866 up_read(&b->conf_sem);
1867 return ret;
1868}
1869
1870/**
1871 * vmballoon_compaction_deinit() - removes compaction related data.
1872 *
1873 * @b: pointer to the balloon.
1874 */
1875static void vmballoon_compaction_deinit(struct vmballoon *b)
1876{
1877 if (!IS_ERR(b->b_dev_info.inode))
1878 iput(b->b_dev_info.inode);
1879
1880 b->b_dev_info.inode = NULL;
1881 kern_unmount(vmballoon_mnt);
1882 vmballoon_mnt = NULL;
1883}
1884
1885/**
1886 * vmballoon_compaction_init() - initialized compaction for the balloon.
1887 *
1888 * @b: pointer to the balloon.
1889 *
1890 * If during the initialization a failure occurred, this function does not
1891 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1892 * case.
1893 *
1894 * Return: zero on success or error code on failure.
1895 */
1896static __init int vmballoon_compaction_init(struct vmballoon *b)
1897{
1898 vmballoon_mnt = kern_mount(&vmballoon_fs);
1899 if (IS_ERR(vmballoon_mnt))
1900 return PTR_ERR(vmballoon_mnt);
1901
1902 b->b_dev_info.migratepage = vmballoon_migratepage;
1903 b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
1904
1905 if (IS_ERR(b->b_dev_info.inode))
1906 return PTR_ERR(b->b_dev_info.inode);
1907
1908 b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
1909 return 0;
1910}
1911
1912#else /* CONFIG_BALLOON_COMPACTION */
1913
1914static void vmballoon_compaction_deinit(struct vmballoon *b)
1915{
1916}
1917
1918static int vmballoon_compaction_init(struct vmballoon *b)
1919{
1920 return 0;
1921}
1922
1923#endif /* CONFIG_BALLOON_COMPACTION */
1924
1925static int __init vmballoon_init(void)
1926{
1927 int error;
1928
1929 /*
1930 * Check if we are running on VMware's hypervisor and bail out
1931 * if we are not.
1932 */
1933 if (x86_hyper_type != X86_HYPER_VMWARE)
1934 return -ENODEV;
1935
1936 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1937
1938 error = vmballoon_register_shrinker(&balloon);
1939 if (error)
1940 goto fail;
1941
1942 /*
1943 * Initialization of compaction must be done after the call to
1944 * balloon_devinfo_init() .
1945 */
1946 balloon_devinfo_init(&balloon.b_dev_info);
1947 error = vmballoon_compaction_init(&balloon);
1948 if (error)
1949 goto fail;
1950
1951 INIT_LIST_HEAD(&balloon.huge_pages);
1952 spin_lock_init(&balloon.comm_lock);
1953 init_rwsem(&balloon.conf_sem);
1954 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1955 balloon.batch_page = NULL;
1956 balloon.page = NULL;
1957 balloon.reset_required = true;
1958
1959 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1960
1961 vmballoon_debugfs_init(&balloon);
1962
1963 return 0;
1964fail:
1965 vmballoon_unregister_shrinker(&balloon);
1966 vmballoon_compaction_deinit(&balloon);
1967 return error;
1968}
1969
1970/*
1971 * Using late_initcall() instead of module_init() allows the balloon to use the
1972 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1973 * VMCI is probed only after the balloon is initialized. If the balloon is used
1974 * as a module, late_initcall() is equivalent to module_init().
1975 */
1976late_initcall(vmballoon_init);
1977
1978static void __exit vmballoon_exit(void)
1979{
1980 vmballoon_unregister_shrinker(&balloon);
1981 vmballoon_vmci_cleanup(&balloon);
1982 cancel_delayed_work_sync(&balloon.dwork);
1983
1984 vmballoon_debugfs_exit(&balloon);
1985
1986 /*
1987 * Deallocate all reserved memory, and reset connection with monitor.
1988 * Reset connection before deallocating memory to avoid potential for
1989 * additional spurious resets from guest touching deallocated pages.
1990 */
1991 vmballoon_send_start(&balloon, 0);
1992 vmballoon_pop(&balloon);
1993
1994 /* Only once we popped the balloon, compaction can be deinit */
1995 vmballoon_compaction_deinit(&balloon);
1996}
1997module_exit(vmballoon_exit);