Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * VMware Balloon driver.
   4 *
   5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 * This is VMware physical memory management driver for Linux. The driver
   8 * acts like a "balloon" that can be inflated to reclaim physical pages by
   9 * reserving them in the guest and invalidating them in the monitor,
  10 * freeing up the underlying machine pages so they can be allocated to
  11 * other guests.  The balloon can also be deflated to allow the guest to
  12 * use more physical memory. Higher level policies can control the sizes
  13 * of balloons in VMs in order to manage physical memory resources.
  14 */
  15
  16//#define DEBUG
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/types.h>
  20#include <linux/io.h>
  21#include <linux/kernel.h>
  22#include <linux/mm.h>
  23#include <linux/vmalloc.h>
  24#include <linux/sched.h>
  25#include <linux/module.h>
  26#include <linux/workqueue.h>
  27#include <linux/debugfs.h>
  28#include <linux/seq_file.h>
  29#include <linux/rwsem.h>
  30#include <linux/slab.h>
  31#include <linux/spinlock.h>
  32#include <linux/balloon_compaction.h>
  33#include <linux/vmw_vmci_defs.h>
  34#include <linux/vmw_vmci_api.h>
  35#include <asm/hypervisor.h>
  36
  37MODULE_AUTHOR("VMware, Inc.");
  38MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
 
  39MODULE_ALIAS("dmi:*:svnVMware*:*");
  40MODULE_ALIAS("vmware_vmmemctl");
  41MODULE_LICENSE("GPL");
  42
  43static bool __read_mostly vmwballoon_shrinker_enable;
  44module_param(vmwballoon_shrinker_enable, bool, 0444);
  45MODULE_PARM_DESC(vmwballoon_shrinker_enable,
  46	"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
  47
  48/* Delay in seconds after shrink before inflation. */
  49#define VMBALLOON_SHRINK_DELAY		(5)
  50
  51/* Maximum number of refused pages we accumulate during inflation cycle */
  52#define VMW_BALLOON_MAX_REFUSED		16
  53
  54/* Magic number for the balloon mount-point */
  55#define BALLOON_VMW_MAGIC		0x0ba11007
  56
  57/*
  58 * Hypervisor communication port definitions.
 
  59 */
  60#define VMW_BALLOON_HV_PORT		0x5670
  61#define VMW_BALLOON_HV_MAGIC		0x456c6d6f
  62#define VMW_BALLOON_GUEST_ID		1	/* Linux */
  63
  64enum vmwballoon_capabilities {
  65	/*
  66	 * Bit 0 is reserved and not associated to any capability.
  67	 */
  68	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
  69	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
  70	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
  71	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
  72	VMW_BALLOON_64_BIT_TARGET		= (1 << 5)
  73};
  74
  75#define VMW_BALLOON_CAPABILITIES_COMMON	(VMW_BALLOON_BASIC_CMDS \
  76					| VMW_BALLOON_BATCHED_CMDS \
  77					| VMW_BALLOON_BATCHED_2M_CMDS \
  78					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  79
  80#define VMW_BALLOON_2M_ORDER		(PMD_SHIFT - PAGE_SHIFT)
  81
  82/*
  83 * 64-bit targets are only supported in 64-bit
 
  84 */
  85#ifdef CONFIG_64BIT
  86#define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_CAPABILITIES_COMMON \
  87					| VMW_BALLOON_64_BIT_TARGET)
  88#else
  89#define VMW_BALLOON_CAPABILITIES	VMW_BALLOON_CAPABILITIES_COMMON
  90#endif
  91
  92enum vmballoon_page_size_type {
  93	VMW_BALLOON_4K_PAGE,
  94	VMW_BALLOON_2M_PAGE,
  95	VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
  96};
  97
  98#define VMW_BALLOON_NUM_PAGE_SIZES	(VMW_BALLOON_LAST_SIZE + 1)
  99
 100static const char * const vmballoon_page_size_names[] = {
 101	[VMW_BALLOON_4K_PAGE]			= "4k",
 102	[VMW_BALLOON_2M_PAGE]			= "2M"
 103};
 104
 105enum vmballoon_op {
 106	VMW_BALLOON_INFLATE,
 107	VMW_BALLOON_DEFLATE
 108};
 
 
 
 109
 110enum vmballoon_op_stat_type {
 111	VMW_BALLOON_OP_STAT,
 112	VMW_BALLOON_OP_FAIL_STAT
 113};
 
 
 114
 115#define VMW_BALLOON_OP_STAT_TYPES	(VMW_BALLOON_OP_FAIL_STAT + 1)
 
 
 
 
 116
 117/**
 118 * enum vmballoon_cmd_type - backdoor commands.
 119 *
 120 * Availability of the commands is as followed:
 121 *
 122 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
 123 * %VMW_BALLOON_CMD_GUEST_ID are always available.
 124 *
 125 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
 126 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
 127 *
 128 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
 129 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
 130 * are available.
 131 *
 132 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
 133 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
 134 * are supported.
 135 *
 136 * If the host reports  VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
 137 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
 138 *
 139 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
 140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
 141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
 142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
 143 *			    to be deflated from the balloon.
 144 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
 145 *			      runs in the VM.
 146 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
 147 *				  ballooned pages (up to 512).
 148 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
 149 *				  pages that are about to be deflated from the
 150 *				  balloon (up to 512).
 151 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
 152 *				     for 2MB pages.
 153 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
 154 *				       @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
 155 *				       pages.
 156 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
 157 *				       that would be invoked when the balloon
 158 *				       size changes.
 159 * @VMW_BALLOON_CMD_LAST: Value of the last command.
 160 */
 161enum vmballoon_cmd_type {
 162	VMW_BALLOON_CMD_START,
 163	VMW_BALLOON_CMD_GET_TARGET,
 164	VMW_BALLOON_CMD_LOCK,
 165	VMW_BALLOON_CMD_UNLOCK,
 166	VMW_BALLOON_CMD_GUEST_ID,
 167	/* No command 5 */
 168	VMW_BALLOON_CMD_BATCHED_LOCK = 6,
 169	VMW_BALLOON_CMD_BATCHED_UNLOCK,
 170	VMW_BALLOON_CMD_BATCHED_2M_LOCK,
 171	VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
 172	VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
 173	VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
 174};
 175
 176#define VMW_BALLOON_CMD_NUM	(VMW_BALLOON_CMD_LAST + 1)
 
 
 
 
 
 
 
 177
 178enum vmballoon_error_codes {
 179	VMW_BALLOON_SUCCESS,
 180	VMW_BALLOON_ERROR_CMD_INVALID,
 181	VMW_BALLOON_ERROR_PPN_INVALID,
 182	VMW_BALLOON_ERROR_PPN_LOCKED,
 183	VMW_BALLOON_ERROR_PPN_UNLOCKED,
 184	VMW_BALLOON_ERROR_PPN_PINNED,
 185	VMW_BALLOON_ERROR_PPN_NOTNEEDED,
 186	VMW_BALLOON_ERROR_RESET,
 187	VMW_BALLOON_ERROR_BUSY
 188};
 189
 190#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
 
 191
 192#define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
 193	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
 194	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
 195	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
 196	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
 197	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
 198	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
 199	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
 200
 201static const char * const vmballoon_cmd_names[] = {
 202	[VMW_BALLOON_CMD_START]			= "start",
 203	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
 204	[VMW_BALLOON_CMD_LOCK]			= "lock",
 205	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
 206	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
 207	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
 208	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
 209	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
 210	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
 211	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
 212};
 213
 214enum vmballoon_stat_page {
 215	VMW_BALLOON_PAGE_STAT_ALLOC,
 216	VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
 217	VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
 218	VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
 219	VMW_BALLOON_PAGE_STAT_FREE,
 220	VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
 221};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222
 223#define VMW_BALLOON_PAGE_STAT_NUM	(VMW_BALLOON_PAGE_STAT_LAST + 1)
 
 
 224
 225enum vmballoon_stat_general {
 226	VMW_BALLOON_STAT_TIMER,
 227	VMW_BALLOON_STAT_DOORBELL,
 228	VMW_BALLOON_STAT_RESET,
 229	VMW_BALLOON_STAT_SHRINK,
 230	VMW_BALLOON_STAT_SHRINK_FREE,
 231	VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
 
 
 
 
 
 
 
 
 
 
 
 
 
 232};
 233
 234#define VMW_BALLOON_STAT_NUM		(VMW_BALLOON_STAT_LAST + 1)
 
 
 
 235
 236static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
 237static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
 238
 239struct vmballoon_ctl {
 240	struct list_head pages;
 
 
 241	struct list_head refused_pages;
 242	struct list_head prealloc_pages;
 243	unsigned int n_refused_pages;
 244	unsigned int n_pages;
 245	enum vmballoon_page_size_type page_size;
 246	enum vmballoon_op op;
 247};
 248
 249/**
 250 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
 251 *
 252 * @status: the status of the operation, which is written by the hypervisor.
 253 * @reserved: reserved for future use. Must be set to zero.
 254 * @pfn: the physical frame number of the page to be locked or unlocked.
 255 */
 256struct vmballoon_batch_entry {
 257	u64 status : 5;
 258	u64 reserved : PAGE_SHIFT - 5;
 259	u64 pfn : 52;
 260} __packed;
 261
 262struct vmballoon {
 263	/**
 264	 * @max_page_size: maximum supported page size for ballooning.
 265	 *
 266	 * Protected by @conf_sem
 267	 */
 268	enum vmballoon_page_size_type max_page_size;
 269
 270	/**
 271	 * @size: balloon actual size in basic page size (frames).
 272	 *
 273	 * While we currently do not support size which is bigger than 32-bit,
 274	 * in preparation for future support, use 64-bits.
 275	 */
 276	atomic64_t size;
 277
 278	/**
 279	 * @target: balloon target size in basic page size (frames).
 280	 *
 281	 * We do not protect the target under the assumption that setting the
 282	 * value is always done through a single write. If this assumption ever
 283	 * breaks, we would have to use X_ONCE for accesses, and suffer the less
 284	 * optimized code. Although we may read stale target value if multiple
 285	 * accesses happen at once, the performance impact should be minor.
 286	 */
 287	unsigned long target;
 288
 289	/**
 290	 * @reset_required: reset flag
 291	 *
 292	 * Setting this flag may introduce races, but the code is expected to
 293	 * handle them gracefully. In the worst case, another operation will
 294	 * fail as reset did not take place. Clearing the flag is done while
 295	 * holding @conf_sem for write.
 296	 */
 297	bool reset_required;
 298
 299	/**
 300	 * @capabilities: hypervisor balloon capabilities.
 301	 *
 302	 * Protected by @conf_sem.
 303	 */
 304	unsigned long capabilities;
 305
 306	/**
 307	 * @batch_page: pointer to communication batch page.
 308	 *
 309	 * When batching is used, batch_page points to a page, which holds up to
 310	 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
 311	 */
 312	struct vmballoon_batch_entry *batch_page;
 313
 314	/**
 315	 * @batch_max_pages: maximum pages that can be locked/unlocked.
 316	 *
 317	 * Indicates the number of pages that the hypervisor can lock or unlock
 318	 * at once, according to whether batching is enabled. If batching is
 319	 * disabled, only a single page can be locked/unlock on each operation.
 320	 *
 321	 * Protected by @conf_sem.
 322	 */
 323	unsigned int batch_max_pages;
 324
 325	/**
 326	 * @page: page to be locked/unlocked by the hypervisor
 327	 *
 328	 * @page is only used when batching is disabled and a single page is
 329	 * reclaimed on each iteration.
 330	 *
 331	 * Protected by @comm_lock.
 332	 */
 333	struct page *page;
 334
 335	/**
 336	 * @shrink_timeout: timeout until the next inflation.
 337	 *
 338	 * After an shrink event, indicates the time in jiffies after which
 339	 * inflation is allowed again. Can be written concurrently with reads,
 340	 * so must use READ_ONCE/WRITE_ONCE when accessing.
 341	 */
 342	unsigned long shrink_timeout;
 343
 
 344	/* statistics */
 345	struct vmballoon_stats *stats;
 346
 347	/**
 348	 * @b_dev_info: balloon device information descriptor.
 349	 */
 350	struct balloon_dev_info b_dev_info;
 351
 352	struct delayed_work dwork;
 353
 354	/**
 355	 * @huge_pages - list of the inflated 2MB pages.
 356	 *
 357	 * Protected by @b_dev_info.pages_lock .
 358	 */
 359	struct list_head huge_pages;
 360
 361	/**
 362	 * @vmci_doorbell.
 363	 *
 364	 * Protected by @conf_sem.
 365	 */
 366	struct vmci_handle vmci_doorbell;
 367
 368	/**
 369	 * @conf_sem: semaphore to protect the configuration and the statistics.
 370	 */
 371	struct rw_semaphore conf_sem;
 372
 373	/**
 374	 * @comm_lock: lock to protect the communication with the host.
 375	 *
 376	 * Lock ordering: @conf_sem -> @comm_lock .
 377	 */
 378	spinlock_t comm_lock;
 379
 380	/**
 381	 * @shrinker: shrinker interface that is used to avoid over-inflation.
 382	 */
 383	struct shrinker *shrinker;
 384};
 385
 386static struct vmballoon balloon;
 387
 388struct vmballoon_stats {
 389	/* timer / doorbell operations */
 390	atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
 391
 392	/* allocation statistics for huge and small pages */
 393	atomic64_t
 394	       page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
 395
 396	/* Monitor operations: total operations, and failures */
 397	atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
 398};
 399
 400static inline bool is_vmballoon_stats_on(void)
 401{
 402	return IS_ENABLED(CONFIG_DEBUG_FS) &&
 403		static_branch_unlikely(&balloon_stat_enabled);
 404}
 405
 406static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
 407					  enum vmballoon_op_stat_type type)
 408{
 409	if (is_vmballoon_stats_on())
 410		atomic64_inc(&b->stats->ops[op][type]);
 411}
 412
 413static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
 414					   enum vmballoon_stat_general stat)
 415{
 416	if (is_vmballoon_stats_on())
 417		atomic64_inc(&b->stats->general_stat[stat]);
 418}
 419
 420static inline void vmballoon_stats_gen_add(struct vmballoon *b,
 421					   enum vmballoon_stat_general stat,
 422					   unsigned int val)
 423{
 424	if (is_vmballoon_stats_on())
 425		atomic64_add(val, &b->stats->general_stat[stat]);
 426}
 427
 428static inline void vmballoon_stats_page_inc(struct vmballoon *b,
 429					    enum vmballoon_stat_page stat,
 430					    enum vmballoon_page_size_type size)
 431{
 432	if (is_vmballoon_stats_on())
 433		atomic64_inc(&b->stats->page_stat[stat][size]);
 434}
 435
 436static inline void vmballoon_stats_page_add(struct vmballoon *b,
 437					    enum vmballoon_stat_page stat,
 438					    enum vmballoon_page_size_type size,
 439					    unsigned int val)
 440{
 441	if (is_vmballoon_stats_on())
 442		atomic64_add(val, &b->stats->page_stat[stat][size]);
 443}
 444
 445static inline unsigned long
 446__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
 447		unsigned long arg2, unsigned long *result)
 448{
 449	unsigned long status, dummy1, dummy2, dummy3, local_result;
 450
 451	vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
 452
 453	asm volatile ("inl %%dx" :
 454		"=a"(status),
 455		"=c"(dummy1),
 456		"=d"(dummy2),
 457		"=b"(local_result),
 458		"=S"(dummy3) :
 459		"0"(VMW_BALLOON_HV_MAGIC),
 460		"1"(cmd),
 461		"2"(VMW_BALLOON_HV_PORT),
 462		"3"(arg1),
 463		"4"(arg2) :
 464		"memory");
 465
 466	/* update the result if needed */
 467	if (result)
 468		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
 469							   local_result;
 470
 471	/* update target when applicable */
 472	if (status == VMW_BALLOON_SUCCESS &&
 473	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
 474		WRITE_ONCE(b->target, local_result);
 475
 476	if (status != VMW_BALLOON_SUCCESS &&
 477	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
 478		vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
 479		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
 480			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
 481			 status);
 482	}
 483
 484	/* mark reset required accordingly */
 485	if (status == VMW_BALLOON_ERROR_RESET)
 486		b->reset_required = true;
 
 487
 488	return status;
 489}
 490
 491static __always_inline unsigned long
 492vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
 493	      unsigned long arg2)
 494{
 495	unsigned long dummy;
 496
 497	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
 498}
 499
 500/*
 501 * Send "start" command to the host, communicating supported version
 502 * of the protocol.
 503 */
 504static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
 505{
 506	unsigned long status, capabilities;
 507
 508	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
 509				 &capabilities);
 510
 511	switch (status) {
 512	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
 513		b->capabilities = capabilities;
 514		break;
 515	case VMW_BALLOON_SUCCESS:
 516		b->capabilities = VMW_BALLOON_BASIC_CMDS;
 517		break;
 518	default:
 519		return -EIO;
 520	}
 521
 522	/*
 523	 * 2MB pages are only supported with batching. If batching is for some
 524	 * reason disabled, do not use 2MB pages, since otherwise the legacy
 525	 * mechanism is used with 2MB pages, causing a failure.
 526	 */
 527	b->max_page_size = VMW_BALLOON_4K_PAGE;
 528	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
 529	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
 530		b->max_page_size = VMW_BALLOON_2M_PAGE;
 531
 532
 533	return 0;
 534}
 535
 536/**
 537 * vmballoon_send_guest_id - communicate guest type to the host.
 538 *
 539 * @b: pointer to the balloon.
 540 *
 541 * Communicate guest type to the host so that it can adjust ballooning
 542 * algorithm to the one most appropriate for the guest. This command
 543 * is normally issued after sending "start" command and is part of
 544 * standard reset sequence.
 545 *
 546 * Return: zero on success or appropriate error code.
 547 */
 548static int vmballoon_send_guest_id(struct vmballoon *b)
 549{
 550	unsigned long status;
 551
 552	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
 553			       VMW_BALLOON_GUEST_ID, 0);
 554
 555	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 556}
 557
 558/**
 559 * vmballoon_page_order() - return the order of the page
 560 * @page_size: the size of the page.
 561 *
 562 * Return: the allocation order.
 563 */
 564static inline
 565unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
 566{
 567	return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
 568}
 569
 570/**
 571 * vmballoon_page_in_frames() - returns the number of frames in a page.
 572 * @page_size: the size of the page.
 573 *
 574 * Return: the number of 4k frames.
 575 */
 576static inline unsigned int
 577vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
 578{
 579	return 1 << vmballoon_page_order(page_size);
 580}
 581
 582/**
 583 * vmballoon_mark_page_offline() - mark a page as offline
 584 * @page: pointer for the page.
 585 * @page_size: the size of the page.
 586 */
 587static void
 588vmballoon_mark_page_offline(struct page *page,
 589			    enum vmballoon_page_size_type page_size)
 590{
 591	int i;
 592
 593	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
 594		__SetPageOffline(page + i);
 
 595}
 596
 597/**
 598 * vmballoon_mark_page_online() - mark a page as online
 599 * @page: pointer for the page.
 600 * @page_size: the size of the page.
 601 */
 602static void
 603vmballoon_mark_page_online(struct page *page,
 604			   enum vmballoon_page_size_type page_size)
 605{
 606	int i;
 607
 608	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
 609		__ClearPageOffline(page + i);
 610}
 611
 612/**
 613 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
 614 *
 615 * @b: pointer to the balloon.
 616 *
 617 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
 618 * by the host-guest protocol and EIO if an error occurred in communicating with
 619 * the host.
 620 */
 621static int vmballoon_send_get_target(struct vmballoon *b)
 622{
 623	unsigned long status;
 
 624	unsigned long limit;
 
 625
 626	limit = totalram_pages();
 627
 628	/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
 629	if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
 630	    limit != (u32)limit)
 631		return -EINVAL;
 632
 633	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
 634
 635	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 636}
 637
 638/**
 639 * vmballoon_alloc_page_list - allocates a list of pages.
 640 *
 641 * @b: pointer to the balloon.
 642 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
 643 * @req_n_pages: the number of requested pages.
 644 *
 645 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
 646 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
 647 *
 648 * Return: zero on success or error code otherwise.
 649 */
 650static int vmballoon_alloc_page_list(struct vmballoon *b,
 651				     struct vmballoon_ctl *ctl,
 652				     unsigned int req_n_pages)
 653{
 654	struct page *page;
 655	unsigned int i;
 656
 657	for (i = 0; i < req_n_pages; i++) {
 658		/*
 659		 * First check if we happen to have pages that were allocated
 660		 * before. This happens when 2MB page rejected during inflation
 661		 * by the hypervisor, and then split into 4KB pages.
 662		 */
 663		if (!list_empty(&ctl->prealloc_pages)) {
 664			page = list_first_entry(&ctl->prealloc_pages,
 665						struct page, lru);
 666			list_del(&page->lru);
 667		} else {
 668			if (ctl->page_size == VMW_BALLOON_2M_PAGE)
 669				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
 670					__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
 671			else
 672				page = balloon_page_alloc();
 673
 674			vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
 675						 ctl->page_size);
 676		}
 
 677
 678		if (page) {
 679			/* Success. Add the page to the list and continue. */
 680			list_add(&page->lru, &ctl->pages);
 681			continue;
 682		}
 683
 684		/* Allocation failed. Update statistics and stop. */
 685		vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
 686					 ctl->page_size);
 687		break;
 688	}
 689
 690	ctl->n_pages = i;
 691
 692	return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
 693}
 694
 695/**
 696 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
 697 *
 698 * @b: pointer for %struct vmballoon.
 699 * @page: pointer for the page whose result should be handled.
 700 * @page_size: size of the page.
 701 * @status: status of the operation as provided by the hypervisor.
 702 */
 703static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
 704				       enum vmballoon_page_size_type page_size,
 705				       unsigned long status)
 706{
 707	/* On success do nothing. The page is already on the balloon list. */
 708	if (likely(status == VMW_BALLOON_SUCCESS))
 709		return 0;
 710
 711	pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
 712		 page_to_pfn(page), status,
 713		 vmballoon_page_size_names[page_size]);
 714
 715	/* Error occurred */
 716	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
 717				 page_size);
 718
 719	return -EIO;
 720}
 721
 722/**
 723 * vmballoon_status_page - returns the status of (un)lock operation
 724 *
 725 * @b: pointer to the balloon.
 726 * @idx: index for the page for which the operation is performed.
 727 * @p: pointer to where the page struct is returned.
 728 *
 729 * Following a lock or unlock operation, returns the status of the operation for
 730 * an individual page. Provides the page that the operation was performed on on
 731 * the @page argument.
 732 *
 733 * Returns: The status of a lock or unlock operation for an individual page.
 734 */
 735static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
 736					   struct page **p)
 737{
 738	if (static_branch_likely(&vmw_balloon_batching)) {
 739		/* batching mode */
 740		*p = pfn_to_page(b->batch_page[idx].pfn);
 741		return b->batch_page[idx].status;
 742	}
 743
 744	/* non-batching mode */
 745	*p = b->page;
 
 746
 747	/*
 748	 * If a failure occurs, the indication will be provided in the status
 749	 * of the entire operation, which is considered before the individual
 750	 * page status. So for non-batching mode, the indication is always of
 751	 * success.
 752	 */
 753	return VMW_BALLOON_SUCCESS;
 754}
 755
 756/**
 757 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
 758 * @b: pointer to the balloon.
 759 * @num_pages: number of inflated/deflated pages.
 760 * @page_size: size of the page.
 761 * @op: the type of operation (lock or unlock).
 762 *
 763 * Notify the host about page(s) that were ballooned (or removed from the
 764 * balloon) so that host can use it without fear that guest will need it (or
 765 * stop using them since the VM does). Host may reject some pages, we need to
 766 * check the return value and maybe submit a different page. The pages that are
 767 * inflated/deflated are pointed by @b->page.
 768 *
 769 * Return: result as provided by the hypervisor.
 770 */
 771static unsigned long vmballoon_lock_op(struct vmballoon *b,
 772				       unsigned int num_pages,
 773				       enum vmballoon_page_size_type page_size,
 774				       enum vmballoon_op op)
 775{
 776	unsigned long cmd, pfn;
 777
 778	lockdep_assert_held(&b->comm_lock);
 779
 780	if (static_branch_likely(&vmw_balloon_batching)) {
 781		if (op == VMW_BALLOON_INFLATE)
 782			cmd = page_size == VMW_BALLOON_2M_PAGE ?
 783				VMW_BALLOON_CMD_BATCHED_2M_LOCK :
 784				VMW_BALLOON_CMD_BATCHED_LOCK;
 785		else
 786			cmd = page_size == VMW_BALLOON_2M_PAGE ?
 787				VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
 788				VMW_BALLOON_CMD_BATCHED_UNLOCK;
 789
 790		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 791	} else {
 792		cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
 793						  VMW_BALLOON_CMD_UNLOCK;
 794		pfn = page_to_pfn(b->page);
 795
 796		/* In non-batching mode, PFNs must fit in 32-bit */
 797		if (unlikely(pfn != (u32)pfn))
 798			return VMW_BALLOON_ERROR_PPN_INVALID;
 799	}
 800
 801	return vmballoon_cmd(b, cmd, pfn, num_pages);
 802}
 803
 804/**
 805 * vmballoon_add_page - adds a page towards lock/unlock operation.
 806 *
 807 * @b: pointer to the balloon.
 808 * @idx: index of the page to be ballooned in this batch.
 809 * @p: pointer to the page that is about to be ballooned.
 810 *
 811 * Adds the page to be ballooned. Must be called while holding @comm_lock.
 812 */
 813static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
 814			       struct page *p)
 815{
 816	lockdep_assert_held(&b->comm_lock);
 
 817
 818	if (static_branch_likely(&vmw_balloon_batching))
 819		b->batch_page[idx] = (struct vmballoon_batch_entry)
 820					{ .pfn = page_to_pfn(p) };
 821	else
 822		b->page = p;
 823}
 824
 825/**
 826 * vmballoon_lock - lock or unlock a batch of pages.
 827 *
 828 * @b: pointer to the balloon.
 829 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
 830 *
 831 * Notifies the host of about ballooned pages (after inflation or deflation,
 832 * according to @ctl). If the host rejects the page put it on the
 833 * @ctl refuse list. These refused page are then released when moving to the
 834 * next size of pages.
 835 *
 836 * Note that we neither free any @page here nor put them back on the ballooned
 837 * pages list. Instead we queue it for later processing. We do that for several
 838 * reasons. First, we do not want to free the page under the lock. Second, it
 839 * allows us to unify the handling of lock and unlock. In the inflate case, the
 840 * caller will check if there are too many refused pages and release them.
 841 * Although it is not identical to the past behavior, it should not affect
 842 * performance.
 843 */
 844static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
 845{
 846	unsigned long batch_status;
 847	struct page *page;
 848	unsigned int i, num_pages;
 849
 850	num_pages = ctl->n_pages;
 851	if (num_pages == 0)
 852		return 0;
 853
 854	/* communication with the host is done under the communication lock */
 855	spin_lock(&b->comm_lock);
 
 856
 857	i = 0;
 858	list_for_each_entry(page, &ctl->pages, lru)
 859		vmballoon_add_page(b, i++, page);
 860
 861	batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
 862					 ctl->op);
 863
 864	/*
 865	 * Iterate over the pages in the provided list. Since we are changing
 866	 * @ctl->n_pages we are saving the original value in @num_pages and
 867	 * use this value to bound the loop.
 868	 */
 869	for (i = 0; i < num_pages; i++) {
 870		unsigned long status;
 871
 872		status = vmballoon_status_page(b, i, &page);
 873
 874		/*
 875		 * Failure of the whole batch overrides a single operation
 876		 * results.
 877		 */
 878		if (batch_status != VMW_BALLOON_SUCCESS)
 879			status = batch_status;
 880
 881		/* Continue if no error happened */
 882		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
 883						 status))
 884			continue;
 885
 886		/*
 887		 * Error happened. Move the pages to the refused list and update
 888		 * the pages number.
 889		 */
 890		list_move(&page->lru, &ctl->refused_pages);
 891		ctl->n_pages--;
 892		ctl->n_refused_pages++;
 893	}
 894
 895	spin_unlock(&b->comm_lock);
 896
 897	return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 898}
 899
 900/**
 901 * vmballoon_release_page_list() - Releases a page list
 902 *
 903 * @page_list: list of pages to release.
 904 * @n_pages: pointer to the number of pages.
 905 * @page_size: whether the pages in the list are 2MB (or else 4KB).
 906 *
 907 * Releases the list of pages and zeros the number of pages.
 908 */
 909static void vmballoon_release_page_list(struct list_head *page_list,
 910				       int *n_pages,
 911				       enum vmballoon_page_size_type page_size)
 912{
 913	struct page *page, *tmp;
 
 914
 915	list_for_each_entry_safe(page, tmp, page_list, lru) {
 916		list_del(&page->lru);
 917		__free_pages(page, vmballoon_page_order(page_size));
 
 
 
 
 
 
 
 918	}
 919
 920	if (n_pages)
 921		*n_pages = 0;
 922}
 923
 924
 925/*
 926 * Release pages that were allocated while attempting to inflate the
 927 * balloon but were refused by the host for one reason or another.
 
 928 */
 929static void vmballoon_release_refused_pages(struct vmballoon *b,
 930					    struct vmballoon_ctl *ctl)
 931{
 932	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
 933				 ctl->page_size);
 934
 935	vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
 936				    ctl->page_size);
 
 
 
 937}
 938
 939/**
 940 * vmballoon_change - retrieve the required balloon change
 941 *
 942 * @b: pointer for the balloon.
 943 *
 944 * Return: the required change for the balloon size. A positive number
 945 * indicates inflation, a negative number indicates a deflation.
 946 */
 947static int64_t vmballoon_change(struct vmballoon *b)
 948{
 949	int64_t size, target;
 
 
 
 
 
 
 
 
 
 950
 951	size = atomic64_read(&b->size);
 952	target = READ_ONCE(b->target);
 
 
 
 
 
 
 
 953
 954	/*
 955	 * We must cast first because of int sizes
 956	 * Otherwise we might get huge positives instead of negatives
 957	 */
 
 
 
 
 
 
 958
 959	if (b->reset_required)
 960		return 0;
 
 
 
 
 
 
 
 
 961
 962	/* consider a 2MB slack on deflate, unless the balloon is emptied */
 963	if (target < size && target != 0 &&
 964	    size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
 965		return 0;
 966
 967	/* If an out-of-memory recently occurred, inflation is disallowed. */
 968	if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
 969		return 0;
 970
 971	return target - size;
 972}
 973
 974/**
 975 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
 976 *
 977 * @b: pointer to balloon.
 978 * @pages: list of pages to enqueue.
 979 * @n_pages: pointer to number of pages in list. The value is zeroed.
 980 * @page_size: whether the pages are 2MB or 4KB pages.
 981 *
 982 * Enqueues the provides list of pages in the ballooned page list, clears the
 983 * list and zeroes the number of pages that was provided.
 984 */
 985static void vmballoon_enqueue_page_list(struct vmballoon *b,
 986					struct list_head *pages,
 987					unsigned int *n_pages,
 988					enum vmballoon_page_size_type page_size)
 989{
 990	unsigned long flags;
 991	struct page *page;
 992
 993	if (page_size == VMW_BALLOON_4K_PAGE) {
 994		balloon_page_list_enqueue(&b->b_dev_info, pages);
 995	} else {
 996		/*
 997		 * Keep the huge pages in a local list which is not available
 998		 * for the balloon compaction mechanism.
 999		 */
1000		spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1001
1002		list_for_each_entry(page, pages, lru) {
1003			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1004		}
1005
1006		list_splice_init(pages, &b->huge_pages);
1007		__count_vm_events(BALLOON_INFLATE, *n_pages *
1008				  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1009		spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1010	}
1011
1012	*n_pages = 0;
1013}
1014
1015/**
1016 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1017 *
1018 * @b: pointer to balloon.
1019 * @pages: list of pages to enqueue.
1020 * @n_pages: pointer to number of pages in list. The value is zeroed.
1021 * @page_size: whether the pages are 2MB or 4KB pages.
1022 * @n_req_pages: the number of requested pages.
1023 *
1024 * Dequeues the number of requested pages from the balloon for deflation. The
1025 * number of dequeued pages may be lower, if not enough pages in the requested
1026 * size are available.
1027 */
1028static void vmballoon_dequeue_page_list(struct vmballoon *b,
1029					struct list_head *pages,
1030					unsigned int *n_pages,
1031					enum vmballoon_page_size_type page_size,
1032					unsigned int n_req_pages)
1033{
1034	struct page *page, *tmp;
1035	unsigned int i = 0;
1036	unsigned long flags;
1037
1038	/* In the case of 4k pages, use the compaction infrastructure */
1039	if (page_size == VMW_BALLOON_4K_PAGE) {
1040		*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1041						     n_req_pages);
1042		return;
1043	}
1044
1045	/* 2MB pages */
1046	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1047	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1048		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1049
1050		list_move(&page->lru, pages);
1051		if (++i == n_req_pages)
1052			break;
1053	}
1054
1055	__count_vm_events(BALLOON_DEFLATE,
1056			  i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1057	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1058	*n_pages = i;
1059}
1060
1061/**
1062 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1063 *
1064 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1065 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1066 * then being refused. To prevent this case, this function splits the refused
1067 * pages into 4KB pages and adds them into @prealloc_pages list.
1068 *
1069 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1070 */
1071static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1072{
1073	struct page *page, *tmp;
1074	unsigned int i, order;
1075
1076	order = vmballoon_page_order(ctl->page_size);
1077
1078	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1079		list_del(&page->lru);
1080		split_page(page, order);
1081		for (i = 0; i < (1 << order); i++)
1082			list_add(&page[i].lru, &ctl->prealloc_pages);
1083	}
1084	ctl->n_refused_pages = 0;
 
1085}
1086
1087/**
1088 * vmballoon_inflate() - Inflate the balloon towards its target size.
1089 *
1090 * @b: pointer to the balloon.
1091 */
1092static void vmballoon_inflate(struct vmballoon *b)
1093{
1094	int64_t to_inflate_frames;
1095	struct vmballoon_ctl ctl = {
1096		.pages = LIST_HEAD_INIT(ctl.pages),
1097		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1098		.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1099		.page_size = b->max_page_size,
1100		.op = VMW_BALLOON_INFLATE
1101	};
1102
1103	while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1104		unsigned int to_inflate_pages, page_in_frames;
1105		int alloc_error, lock_error = 0;
1106
1107		VM_BUG_ON(!list_empty(&ctl.pages));
1108		VM_BUG_ON(ctl.n_pages != 0);
1109
1110		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1111
1112		to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1113					 DIV_ROUND_UP_ULL(to_inflate_frames,
1114							  page_in_frames));
1115
1116		/* Start by allocating */
1117		alloc_error = vmballoon_alloc_page_list(b, &ctl,
1118							to_inflate_pages);
1119
1120		/* Actually lock the pages by telling the hypervisor */
1121		lock_error = vmballoon_lock(b, &ctl);
1122
1123		/*
1124		 * If an error indicates that something serious went wrong,
1125		 * stop the inflation.
1126		 */
1127		if (lock_error)
1128			break;
1129
1130		/* Update the balloon size */
1131		atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1132
1133		vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1134					    ctl.page_size);
1135
1136		/*
1137		 * If allocation failed or the number of refused pages exceeds
1138		 * the maximum allowed, move to the next page size.
1139		 */
1140		if (alloc_error ||
1141		    ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1142			if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1143				break;
1144
1145			/*
1146			 * Split the refused pages to 4k. This will also empty
1147			 * the refused pages list.
1148			 */
1149			vmballoon_split_refused_pages(&ctl);
1150			ctl.page_size--;
1151		}
1152
1153		cond_resched();
1154	}
1155
1156	/*
1157	 * Release pages that were allocated while attempting to inflate the
1158	 * balloon but were refused by the host for one reason or another,
1159	 * and update the statistics.
 
 
 
 
 
 
 
 
 
1160	 */
1161	if (ctl.n_refused_pages != 0)
1162		vmballoon_release_refused_pages(b, &ctl);
1163
1164	vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1165}
1166
1167/**
1168 * vmballoon_deflate() - Decrease the size of the balloon.
1169 *
1170 * @b: pointer to the balloon
1171 * @n_frames: the number of frames to deflate. If zero, automatically
1172 * calculated according to the target size.
1173 * @coordinated: whether to coordinate with the host
1174 *
1175 * Decrease the size of the balloon allowing guest to use more memory.
1176 *
1177 * Return: The number of deflated frames (i.e., basic page size units)
1178 */
1179static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1180				       bool coordinated)
1181{
1182	unsigned long deflated_frames = 0;
1183	unsigned long tried_frames = 0;
1184	struct vmballoon_ctl ctl = {
1185		.pages = LIST_HEAD_INIT(ctl.pages),
1186		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1187		.page_size = VMW_BALLOON_4K_PAGE,
1188		.op = VMW_BALLOON_DEFLATE
1189	};
1190
1191	/* free pages to reach target */
1192	while (true) {
1193		unsigned int to_deflate_pages, n_unlocked_frames;
1194		unsigned int page_in_frames;
1195		int64_t to_deflate_frames;
1196		bool deflated_all;
1197
1198		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1199
1200		VM_BUG_ON(!list_empty(&ctl.pages));
1201		VM_BUG_ON(ctl.n_pages);
1202		VM_BUG_ON(!list_empty(&ctl.refused_pages));
1203		VM_BUG_ON(ctl.n_refused_pages);
1204
1205		/*
1206		 * If we were requested a specific number of frames, we try to
1207		 * deflate this number of frames. Otherwise, deflation is
1208		 * performed according to the target and balloon size.
1209		 */
1210		to_deflate_frames = n_frames ? n_frames - tried_frames :
1211					       -vmballoon_change(b);
1212
1213		/* break if no work to do */
1214		if (to_deflate_frames <= 0)
1215			break;
1216
1217		/*
1218		 * Calculate the number of frames based on current page size,
1219		 * but limit the deflated frames to a single chunk
1220		 */
1221		to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1222					 DIV_ROUND_UP_ULL(to_deflate_frames,
1223							  page_in_frames));
1224
1225		/* First take the pages from the balloon pages. */
1226		vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1227					    ctl.page_size, to_deflate_pages);
1228
1229		/*
1230		 * Before pages are moving to the refused list, count their
1231		 * frames as frames that we tried to deflate.
1232		 */
1233		tried_frames += ctl.n_pages * page_in_frames;
1234
1235		/*
1236		 * Unlock the pages by communicating with the hypervisor if the
1237		 * communication is coordinated (i.e., not pop). We ignore the
1238		 * return code. Instead we check if all the pages we manage to
1239		 * unlock all the pages. If we failed, we will move to the next
1240		 * page size, and would eventually try again later.
1241		 */
1242		if (coordinated)
1243			vmballoon_lock(b, &ctl);
1244
1245		/*
1246		 * Check if we deflated enough. We will move to the next page
1247		 * size if we did not manage to do so. This calculation takes
1248		 * place now, as once the pages are released, the number of
1249		 * pages is zeroed.
1250		 */
1251		deflated_all = (ctl.n_pages == to_deflate_pages);
1252
1253		/* Update local and global counters */
1254		n_unlocked_frames = ctl.n_pages * page_in_frames;
1255		atomic64_sub(n_unlocked_frames, &b->size);
1256		deflated_frames += n_unlocked_frames;
1257
1258		vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1259					 ctl.page_size, ctl.n_pages);
1260
1261		/* free the ballooned pages */
1262		vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1263					    ctl.page_size);
1264
1265		/* Return the refused pages to the ballooned list. */
1266		vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1267					    &ctl.n_refused_pages,
1268					    ctl.page_size);
1269
1270		/* If we failed to unlock all the pages, move to next size. */
1271		if (!deflated_all) {
1272			if (ctl.page_size == b->max_page_size)
1273				break;
1274			ctl.page_size++;
1275		}
1276
1277		cond_resched();
1278	}
1279
1280	return deflated_frames;
1281}
1282
1283/**
1284 * vmballoon_deinit_batching - disables batching mode.
1285 *
1286 * @b: pointer to &struct vmballoon.
1287 *
1288 * Disables batching, by deallocating the page for communication with the
1289 * hypervisor and disabling the static key to indicate that batching is off.
1290 */
1291static void vmballoon_deinit_batching(struct vmballoon *b)
1292{
1293	free_page((unsigned long)b->batch_page);
1294	b->batch_page = NULL;
1295	static_branch_disable(&vmw_balloon_batching);
1296	b->batch_max_pages = 1;
1297}
1298
1299/**
1300 * vmballoon_init_batching - enable batching mode.
1301 *
1302 * @b: pointer to &struct vmballoon.
1303 *
1304 * Enables batching, by allocating a page for communication with the hypervisor
1305 * and enabling the static_key to use batching.
1306 *
1307 * Return: zero on success or an appropriate error-code.
1308 */
1309static int vmballoon_init_batching(struct vmballoon *b)
1310{
1311	struct page *page;
1312
1313	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1314	if (!page)
1315		return -ENOMEM;
1316
1317	b->batch_page = page_address(page);
1318	b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1319
1320	static_branch_enable(&vmw_balloon_batching);
1321
1322	return 0;
1323}
 
 
 
1324
1325/*
1326 * Receive notification and resize balloon
1327 */
1328static void vmballoon_doorbell(void *client_data)
1329{
1330	struct vmballoon *b = client_data;
 
 
 
1331
1332	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
 
1333
1334	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1335}
 
 
1336
1337/*
1338 * Clean up vmci doorbell
1339 */
1340static void vmballoon_vmci_cleanup(struct vmballoon *b)
1341{
1342	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1343		      VMCI_INVALID_ID, VMCI_INVALID_ID);
1344
1345	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1346		vmci_doorbell_destroy(b->vmci_doorbell);
1347		b->vmci_doorbell = VMCI_INVALID_HANDLE;
 
1348	}
1349}
1350
1351/**
1352 * vmballoon_vmci_init - Initialize vmci doorbell.
1353 *
1354 * @b: pointer to the balloon.
1355 *
1356 * Return: zero on success or when wakeup command not supported. Error-code
1357 * otherwise.
1358 *
1359 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1360 */
1361static int vmballoon_vmci_init(struct vmballoon *b)
1362{
1363	unsigned long error;
1364
1365	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1366		return 0;
1367
1368	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1369				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
1370				     vmballoon_doorbell, b);
1371
1372	if (error != VMCI_SUCCESS)
1373		goto fail;
1374
1375	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1376				b->vmci_doorbell.context,
1377				b->vmci_doorbell.resource, NULL);
1378
1379	if (error != VMW_BALLOON_SUCCESS)
1380		goto fail;
1381
1382	return 0;
1383fail:
1384	vmballoon_vmci_cleanup(b);
1385	return -EIO;
1386}
1387
1388/**
1389 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1390 *
1391 * @b: pointer to the balloon.
1392 *
1393 * This function is called when host decides to "reset" balloon for one reason
1394 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1395 * pages being released.
1396 */
1397static void vmballoon_pop(struct vmballoon *b)
1398{
1399	unsigned long size;
1400
1401	while ((size = atomic64_read(&b->size)))
1402		vmballoon_deflate(b, size, false);
1403}
1404
1405/*
1406 * Perform standard reset sequence by popping the balloon (in case it
1407 * is not  empty) and then restarting protocol. This operation normally
1408 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1409 */
1410static void vmballoon_reset(struct vmballoon *b)
1411{
 
 
 
1412	int error;
1413
1414	down_write(&b->conf_sem);
1415
1416	vmballoon_vmci_cleanup(b);
 
1417
1418	/* free all pages, skipping monitor unlock */
1419	vmballoon_pop(b);
1420
1421	if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1422		goto unlock;
1423
1424	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1425		if (vmballoon_init_batching(b)) {
1426			/*
1427			 * We failed to initialize batching, inform the monitor
1428			 * about it by sending a null capability.
1429			 *
1430			 * The guest will retry in one second.
1431			 */
1432			vmballoon_send_start(b, 0);
1433			goto unlock;
1434		}
1435	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1436		vmballoon_deinit_batching(b);
1437	}
1438
1439	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1440	b->reset_required = false;
1441
1442	error = vmballoon_vmci_init(b);
1443	if (error)
1444		pr_err_once("failed to initialize vmci doorbell\n");
1445
1446	if (vmballoon_send_guest_id(b))
1447		pr_err_once("failed to send guest ID to the host\n");
 
1448
1449unlock:
1450	up_write(&b->conf_sem);
 
1451}
1452
1453/**
1454 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1455 *
1456 * @work: pointer to the &work_struct which is provided by the workqueue.
1457 *
1458 * Resets the protocol if needed, gets the new size and adjusts balloon as
1459 * needed. Repeat in 1 sec.
1460 */
1461static void vmballoon_work(struct work_struct *work)
1462{
1463	struct delayed_work *dwork = to_delayed_work(work);
1464	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1465	int64_t change = 0;
 
 
1466
1467	if (b->reset_required)
1468		vmballoon_reset(b);
1469
1470	down_read(&b->conf_sem);
 
1471
1472	/*
1473	 * Update the stats while holding the semaphore to ensure that
1474	 * @stats_enabled is consistent with whether the stats are actually
1475	 * enabled
1476	 */
1477	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1478
1479	if (!vmballoon_send_get_target(b))
1480		change = vmballoon_change(b);
1481
1482	if (change != 0) {
1483		pr_debug("%s - size: %llu, target %lu\n", __func__,
1484			 atomic64_read(&b->size), READ_ONCE(b->target));
1485
1486		if (change > 0)
1487			vmballoon_inflate(b);
1488		else  /* (change < 0) */
1489			vmballoon_deflate(b, 0, true);
1490	}
1491
1492	up_read(&b->conf_sem);
1493
1494	/*
1495	 * We are using a freezable workqueue so that balloon operations are
1496	 * stopped while the system transitions to/from sleep/hibernation.
1497	 */
1498	queue_delayed_work(system_freezable_wq,
1499			   dwork, round_jiffies_relative(HZ));
1500
1501}
1502
1503/**
1504 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1505 * @shrinker: pointer to the balloon shrinker.
1506 * @sc: page reclaim information.
1507 *
1508 * Returns: number of pages that were freed during deflation.
1509 */
1510static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1511					     struct shrink_control *sc)
1512{
1513	struct vmballoon *b = &balloon;
1514	unsigned long deflated_frames;
1515
1516	pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1517
1518	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1519
1520	/*
1521	 * If the lock is also contended for read, we cannot easily reclaim and
1522	 * we bail out.
1523	 */
1524	if (!down_read_trylock(&b->conf_sem))
1525		return 0;
1526
1527	deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1528
1529	vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1530				deflated_frames);
1531
1532	/*
1533	 * Delay future inflation for some time to mitigate the situations in
1534	 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1535	 * the access is asynchronous.
1536	 */
1537	WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1538
1539	up_read(&b->conf_sem);
1540
1541	return deflated_frames;
1542}
1543
1544/**
1545 * vmballoon_shrinker_count() - return the number of ballooned pages.
1546 * @shrinker: pointer to the balloon shrinker.
1547 * @sc: page reclaim information.
1548 *
1549 * Returns: number of 4k pages that are allocated for the balloon and can
1550 *	    therefore be reclaimed under pressure.
1551 */
1552static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1553					      struct shrink_control *sc)
1554{
1555	struct vmballoon *b = &balloon;
1556
1557	return atomic64_read(&b->size);
1558}
1559
1560static void vmballoon_unregister_shrinker(struct vmballoon *b)
1561{
1562	shrinker_free(b->shrinker);
1563	b->shrinker = NULL;
1564}
1565
1566static int vmballoon_register_shrinker(struct vmballoon *b)
1567{
1568	/* Do nothing if the shrinker is not enabled */
1569	if (!vmwballoon_shrinker_enable)
1570		return 0;
1571
1572	b->shrinker = shrinker_alloc(0, "vmw-balloon");
1573	if (!b->shrinker)
1574		return -ENOMEM;
1575
1576	b->shrinker->scan_objects = vmballoon_shrinker_scan;
1577	b->shrinker->count_objects = vmballoon_shrinker_count;
1578	b->shrinker->private_data = b;
1579
1580	shrinker_register(b->shrinker);
1581
1582	return 0;
1583}
1584
1585/*
1586 * DEBUGFS Interface
1587 */
1588#ifdef CONFIG_DEBUG_FS
1589
1590static const char * const vmballoon_stat_page_names[] = {
1591	[VMW_BALLOON_PAGE_STAT_ALLOC]		= "alloc",
1592	[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]	= "allocFail",
1593	[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]	= "errAlloc",
1594	[VMW_BALLOON_PAGE_STAT_REFUSED_FREE]	= "errFree",
1595	[VMW_BALLOON_PAGE_STAT_FREE]		= "free"
1596};
1597
1598static const char * const vmballoon_stat_names[] = {
1599	[VMW_BALLOON_STAT_TIMER]		= "timer",
1600	[VMW_BALLOON_STAT_DOORBELL]		= "doorbell",
1601	[VMW_BALLOON_STAT_RESET]		= "reset",
1602	[VMW_BALLOON_STAT_SHRINK]		= "shrink",
1603	[VMW_BALLOON_STAT_SHRINK_FREE]		= "shrinkFree"
1604};
1605
1606static int vmballoon_enable_stats(struct vmballoon *b)
1607{
1608	int r = 0;
1609
1610	down_write(&b->conf_sem);
1611
1612	/* did we somehow race with another reader which enabled stats? */
1613	if (b->stats)
1614		goto out;
1615
1616	b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1617
1618	if (!b->stats) {
1619		/* allocation failed */
1620		r = -ENOMEM;
1621		goto out;
1622	}
1623	static_key_enable(&balloon_stat_enabled.key);
1624out:
1625	up_write(&b->conf_sem);
1626	return r;
1627}
1628
1629/**
1630 * vmballoon_debug_show - shows statistics of balloon operations.
1631 * @f: pointer to the &struct seq_file.
1632 * @offset: ignored.
1633 *
1634 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1635 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1636 * we only collect statistics after the first time the counters are read.
1637 *
1638 * Return: zero on success or an error code.
1639 */
1640static int vmballoon_debug_show(struct seq_file *f, void *offset)
1641{
1642	struct vmballoon *b = f->private;
1643	int i, j;
1644
1645	/* enables stats if they are disabled */
1646	if (!b->stats) {
1647		int r = vmballoon_enable_stats(b);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1648
1649		if (r)
1650			return r;
1651	}
1652
1653	/* format capabilities info */
1654	seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1655		   VMW_BALLOON_CAPABILITIES);
1656	seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1657	seq_printf(f, "%-22s: %16s\n", "is resetting",
1658		   b->reset_required ? "y" : "n");
1659
1660	/* format size info */
1661	seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1662	seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
 
 
 
 
1663
1664	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1665		if (vmballoon_cmd_names[i] == NULL)
1666			continue;
1667
1668		seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1669			   vmballoon_cmd_names[i],
1670			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1671			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1672	}
1673
1674	for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1675		seq_printf(f, "%-22s: %16llu\n",
1676			   vmballoon_stat_names[i],
1677			   atomic64_read(&b->stats->general_stat[i]));
1678
1679	for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1680		for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1681			seq_printf(f, "%-18s(%s): %16llu\n",
1682				   vmballoon_stat_page_names[i],
1683				   vmballoon_page_size_names[j],
1684				   atomic64_read(&b->stats->page_stat[i][j]));
1685	}
1686
1687	return 0;
1688}
1689
1690DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1691
1692static void __init vmballoon_debugfs_init(struct vmballoon *b)
1693{
1694	debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1695			    &vmballoon_debug_fops);
1696}
1697
1698static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1699{
1700	static_key_disable(&balloon_stat_enabled.key);
1701	debugfs_lookup_and_remove("vmmemctl", NULL);
1702	kfree(b->stats);
1703	b->stats = NULL;
1704}
1705
1706#else
1707
1708static inline void vmballoon_debugfs_init(struct vmballoon *b)
1709{
 
1710}
1711
1712static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1713{
1714}
1715
1716#endif	/* CONFIG_DEBUG_FS */
1717
1718
1719#ifdef CONFIG_BALLOON_COMPACTION
1720/**
1721 * vmballoon_migratepage() - migrates a balloon page.
1722 * @b_dev_info: balloon device information descriptor.
1723 * @newpage: the page to which @page should be migrated.
1724 * @page: a ballooned page that should be migrated.
1725 * @mode: migration mode, ignored.
1726 *
1727 * This function is really open-coded, but that is according to the interface
1728 * that balloon_compaction provides.
1729 *
1730 * Return: zero on success, -EAGAIN when migration cannot be performed
1731 *	   momentarily, and -EBUSY if migration failed and should be retried
1732 *	   with that specific page.
1733 */
1734static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1735				 struct page *newpage, struct page *page,
1736				 enum migrate_mode mode)
1737{
1738	unsigned long status, flags;
1739	struct vmballoon *b;
1740	int ret;
1741
1742	b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1743
1744	/*
1745	 * If the semaphore is taken, there is ongoing configuration change
1746	 * (i.e., balloon reset), so try again.
1747	 */
1748	if (!down_read_trylock(&b->conf_sem))
1749		return -EAGAIN;
1750
1751	spin_lock(&b->comm_lock);
1752	/*
1753	 * We must start by deflating and not inflating, as otherwise the
1754	 * hypervisor may tell us that it has enough memory and the new page is
1755	 * not needed. Since the old page is isolated, we cannot use the list
1756	 * interface to unlock it, as the LRU field is used for isolation.
1757	 * Instead, we use the native interface directly.
1758	 */
1759	vmballoon_add_page(b, 0, page);
1760	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1761				   VMW_BALLOON_DEFLATE);
1762
1763	if (status == VMW_BALLOON_SUCCESS)
1764		status = vmballoon_status_page(b, 0, &page);
1765
1766	/*
1767	 * If a failure happened, let the migration mechanism know that it
1768	 * should not retry.
1769	 */
1770	if (status != VMW_BALLOON_SUCCESS) {
1771		spin_unlock(&b->comm_lock);
1772		ret = -EBUSY;
1773		goto out_unlock;
1774	}
1775
1776	/*
1777	 * The page is isolated, so it is safe to delete it without holding
1778	 * @pages_lock . We keep holding @comm_lock since we will need it in a
1779	 * second.
1780	 */
1781	balloon_page_delete(page);
1782
1783	put_page(page);
1784
1785	/* Inflate */
1786	vmballoon_add_page(b, 0, newpage);
1787	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1788				   VMW_BALLOON_INFLATE);
1789
1790	if (status == VMW_BALLOON_SUCCESS)
1791		status = vmballoon_status_page(b, 0, &newpage);
1792
1793	spin_unlock(&b->comm_lock);
1794
1795	if (status != VMW_BALLOON_SUCCESS) {
1796		/*
1797		 * A failure happened. While we can deflate the page we just
1798		 * inflated, this deflation can also encounter an error. Instead
1799		 * we will decrease the size of the balloon to reflect the
1800		 * change and report failure.
1801		 */
1802		atomic64_dec(&b->size);
1803		ret = -EBUSY;
1804	} else {
1805		/*
1806		 * Success. Take a reference for the page, and we will add it to
1807		 * the list after acquiring the lock.
1808		 */
1809		get_page(newpage);
1810		ret = MIGRATEPAGE_SUCCESS;
1811	}
1812
1813	/* Update the balloon list under the @pages_lock */
1814	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1815
1816	/*
1817	 * On inflation success, we already took a reference for the @newpage.
1818	 * If we succeed just insert it to the list and update the statistics
1819	 * under the lock.
1820	 */
1821	if (ret == MIGRATEPAGE_SUCCESS) {
1822		balloon_page_insert(&b->b_dev_info, newpage);
1823		__count_vm_event(BALLOON_MIGRATE);
1824	}
1825
1826	/*
1827	 * We deflated successfully, so regardless to the inflation success, we
1828	 * need to reduce the number of isolated_pages.
1829	 */
1830	b->b_dev_info.isolated_pages--;
1831	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1832
1833out_unlock:
1834	up_read(&b->conf_sem);
1835	return ret;
1836}
1837
1838/**
1839 * vmballoon_compaction_init() - initialized compaction for the balloon.
1840 *
1841 * @b: pointer to the balloon.
1842 *
1843 * If during the initialization a failure occurred, this function does not
1844 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1845 * case.
1846 *
1847 * Return: zero on success or error code on failure.
1848 */
1849static __init void vmballoon_compaction_init(struct vmballoon *b)
1850{
1851	b->b_dev_info.migratepage = vmballoon_migratepage;
1852}
1853
1854#else /* CONFIG_BALLOON_COMPACTION */
1855static inline void vmballoon_compaction_init(struct vmballoon *b)
1856{
1857}
1858#endif /* CONFIG_BALLOON_COMPACTION */
1859
1860static int __init vmballoon_init(void)
1861{
1862	int error;
1863
1864	/*
1865	 * Check if we are running on VMware's hypervisor and bail out
1866	 * if we are not.
1867	 */
1868	if (x86_hyper_type != X86_HYPER_VMWARE)
1869		return -ENODEV;
1870
1871	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
 
1872
1873	error = vmballoon_register_shrinker(&balloon);
1874	if (error)
1875		return error;
 
 
1876
1877	/*
1878	 * Initialization of compaction must be done after the call to
1879	 * balloon_devinfo_init() .
1880	 */
1881	balloon_devinfo_init(&balloon.b_dev_info);
1882	vmballoon_compaction_init(&balloon);
 
 
1883
1884	INIT_LIST_HEAD(&balloon.huge_pages);
1885	spin_lock_init(&balloon.comm_lock);
1886	init_rwsem(&balloon.conf_sem);
1887	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1888	balloon.batch_page = NULL;
1889	balloon.page = NULL;
1890	balloon.reset_required = true;
1891
1892	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
 
 
1893
1894	vmballoon_debugfs_init(&balloon);
1895
1896	return 0;
1897}
1898
1899/*
1900 * Using late_initcall() instead of module_init() allows the balloon to use the
1901 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1902 * VMCI is probed only after the balloon is initialized. If the balloon is used
1903 * as a module, late_initcall() is equivalent to module_init().
1904 */
1905late_initcall(vmballoon_init);
1906
1907static void __exit vmballoon_exit(void)
1908{
1909	vmballoon_unregister_shrinker(&balloon);
1910	vmballoon_vmci_cleanup(&balloon);
1911	cancel_delayed_work_sync(&balloon.dwork);
1912
1913	vmballoon_debugfs_exit(&balloon);
1914
1915	/*
1916	 * Deallocate all reserved memory, and reset connection with monitor.
1917	 * Reset connection before deallocating memory to avoid potential for
1918	 * additional spurious resets from guest touching deallocated pages.
1919	 */
1920	vmballoon_send_start(&balloon, 0);
1921	vmballoon_pop(&balloon);
1922}
1923module_exit(vmballoon_exit);
v3.1
 
  1/*
  2 * VMware Balloon driver.
  3 *
  4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the
  8 * Free Software Foundation; version 2 of the License and no later version.
  9 *
 10 * This program is distributed in the hope that it will be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 13 * NON INFRINGEMENT.  See the GNU General Public License for more
 14 * details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write to the Free Software
 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19 *
 20 * Maintained by: Dmitry Torokhov <dtor@vmware.com>
 21 */
 22
 23/*
 24 * This is VMware physical memory management driver for Linux. The driver
 25 * acts like a "balloon" that can be inflated to reclaim physical pages by
 26 * reserving them in the guest and invalidating them in the monitor,
 27 * freeing up the underlying machine pages so they can be allocated to
 28 * other guests.  The balloon can also be deflated to allow the guest to
 29 * use more physical memory. Higher level policies can control the sizes
 30 * of balloons in VMs in order to manage physical memory resources.
 31 */
 32
 33//#define DEBUG
 34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 35
 36#include <linux/types.h>
 
 37#include <linux/kernel.h>
 38#include <linux/mm.h>
 
 39#include <linux/sched.h>
 40#include <linux/module.h>
 41#include <linux/workqueue.h>
 42#include <linux/debugfs.h>
 43#include <linux/seq_file.h>
 
 
 
 
 
 
 44#include <asm/hypervisor.h>
 45
 46MODULE_AUTHOR("VMware, Inc.");
 47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
 48MODULE_VERSION("1.2.1.3-k");
 49MODULE_ALIAS("dmi:*:svnVMware*:*");
 50MODULE_ALIAS("vmware_vmmemctl");
 51MODULE_LICENSE("GPL");
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53/*
 54 * Various constants controlling rate of inflaint/deflating balloon,
 55 * measured in pages.
 56 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57
 58/*
 59 * Rate of allocating memory when there is no memory pressure
 60 * (driver performs non-sleeping allocations).
 61 */
 62#define VMW_BALLOON_NOSLEEP_ALLOC_MAX	16384U
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63
 64/*
 65 * Rates of memory allocaton when guest experiences memory pressure
 66 * (driver performs sleeping allocations).
 67 */
 68#define VMW_BALLOON_RATE_ALLOC_MIN	512U
 69#define VMW_BALLOON_RATE_ALLOC_MAX	2048U
 70#define VMW_BALLOON_RATE_ALLOC_INC	16U
 71
 72/*
 73 * Rates for releasing pages while deflating balloon.
 74 */
 75#define VMW_BALLOON_RATE_FREE_MIN	512U
 76#define VMW_BALLOON_RATE_FREE_MAX	16384U
 77#define VMW_BALLOON_RATE_FREE_INC	16U
 78
 79/*
 80 * When guest is under memory pressure, use a reduced page allocation
 81 * rate for next several cycles.
 82 */
 83#define VMW_BALLOON_SLOW_CYCLES		4
 84
 85/*
 86 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
 87 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
 88 * __GFP_NOWARN, to suppress page allocation failure warnings.
 89 */
 90#define VMW_PAGE_ALLOC_NOSLEEP		(__GFP_HIGHMEM|__GFP_NOWARN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91
 92/*
 93 * Use GFP_HIGHUSER when executing in a separate kernel thread
 94 * context and allocation can sleep.  This is less stressful to
 95 * the guest memory system, since it allows the thread to block
 96 * while memory is reclaimed, and won't take pages from emergency
 97 * low-memory pools.
 98 */
 99#define VMW_PAGE_ALLOC_CANSLEEP		(GFP_HIGHUSER)
100
101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD	1024
 
 
 
 
 
 
 
 
 
103
104/* Maximum number of refused pages we accumulate during inflation cycle */
105#define VMW_BALLOON_MAX_REFUSED		16
106
107/*
108 * Hypervisor communication port definitions.
109 */
110#define VMW_BALLOON_HV_PORT		0x5670
111#define VMW_BALLOON_HV_MAGIC		0x456c6d6f
112#define VMW_BALLOON_PROTOCOL_VERSION	2
113#define VMW_BALLOON_GUEST_ID		1	/* Linux */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
115#define VMW_BALLOON_CMD_START		0
116#define VMW_BALLOON_CMD_GET_TARGET	1
117#define VMW_BALLOON_CMD_LOCK		2
118#define VMW_BALLOON_CMD_UNLOCK		3
119#define VMW_BALLOON_CMD_GUEST_ID	4
120
121/* error codes */
122#define VMW_BALLOON_SUCCESS		0
123#define VMW_BALLOON_FAILURE		-1
124#define VMW_BALLOON_ERROR_CMD_INVALID	1
125#define VMW_BALLOON_ERROR_PPN_INVALID	2
126#define VMW_BALLOON_ERROR_PPN_LOCKED	3
127#define VMW_BALLOON_ERROR_PPN_UNLOCKED	4
128#define VMW_BALLOON_ERROR_PPN_PINNED	5
129#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	6
130#define VMW_BALLOON_ERROR_RESET		7
131#define VMW_BALLOON_ERROR_BUSY		8
132
133#define VMWARE_BALLOON_CMD(cmd, data, result)		\
134({							\
135	unsigned long __stat, __dummy1, __dummy2;	\
136	__asm__ __volatile__ ("inl (%%dx)" :		\
137		"=a"(__stat),				\
138		"=c"(__dummy1),				\
139		"=d"(__dummy2),				\
140		"=b"(result) :				\
141		"0"(VMW_BALLOON_HV_MAGIC),		\
142		"1"(VMW_BALLOON_CMD_##cmd),		\
143		"2"(VMW_BALLOON_HV_PORT),		\
144		"3"(data) :				\
145		"memory");				\
146	result &= -1UL;					\
147	__stat & -1UL;					\
148})
149
150#ifdef CONFIG_DEBUG_FS
151struct vmballoon_stats {
152	unsigned int timer;
153
154	/* allocation statustics */
155	unsigned int alloc;
156	unsigned int alloc_fail;
157	unsigned int sleep_alloc;
158	unsigned int sleep_alloc_fail;
159	unsigned int refused_alloc;
160	unsigned int refused_free;
161	unsigned int free;
162
163	/* monitor operations */
164	unsigned int lock;
165	unsigned int lock_fail;
166	unsigned int unlock;
167	unsigned int unlock_fail;
168	unsigned int target;
169	unsigned int target_fail;
170	unsigned int start;
171	unsigned int start_fail;
172	unsigned int guest_type;
173	unsigned int guest_type_fail;
174};
175
176#define STATS_INC(stat) (stat)++
177#else
178#define STATS_INC(stat)
179#endif
180
181struct vmballoon {
 
182
183	/* list of reserved physical pages */
184	struct list_head pages;
185
186	/* transient list of non-balloonable pages */
187	struct list_head refused_pages;
 
188	unsigned int n_refused_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
190	/* balloon size in pages */
191	unsigned int size;
192	unsigned int target;
 
 
 
 
193
194	/* reset flag */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195	bool reset_required;
196
197	/* adjustment rates (pages per second) */
198	unsigned int rate_alloc;
199	unsigned int rate_free;
 
 
 
200
201	/* slowdown page allocations for next few cycles */
202	unsigned int slow_allocation_cycles;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
204#ifdef CONFIG_DEBUG_FS
205	/* statistics */
206	struct vmballoon_stats stats;
207
208	/* debugfs file exporting statistics */
209	struct dentry *dbg_entry;
210#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
212	struct sysinfo sysinfo;
 
 
 
213
214	struct delayed_work dwork;
 
 
 
 
 
 
 
 
 
 
215};
216
217static struct vmballoon balloon;
218
219/*
220 * Send "start" command to the host, communicating supported version
221 * of the protocol.
222 */
223static bool vmballoon_send_start(struct vmballoon *b)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224{
225	unsigned long status, dummy;
 
 
226
227	STATS_INC(b->stats.start);
 
 
 
 
 
 
228
229	status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
230	if (status == VMW_BALLOON_SUCCESS)
231		return true;
 
 
 
 
232
233	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
234	STATS_INC(b->stats.start_fail);
235	return false;
 
 
 
 
236}
237
238static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
 
 
239{
240	switch (status) {
241	case VMW_BALLOON_SUCCESS:
242		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
244	case VMW_BALLOON_ERROR_RESET:
 
245		b->reset_required = true;
246		/* fall through */
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248	default:
249		return false;
250	}
 
 
 
 
 
 
 
 
 
 
 
 
 
251}
252
253/*
 
 
 
 
254 * Communicate guest type to the host so that it can adjust ballooning
255 * algorithm to the one most appropriate for the guest. This command
256 * is normally issued after sending "start" command and is part of
257 * standard reset sequence.
 
 
258 */
259static bool vmballoon_send_guest_id(struct vmballoon *b)
260{
261	unsigned long status, dummy;
262
263	status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
 
264
265	STATS_INC(b->stats.guest_type);
 
266
267	if (vmballoon_check_status(b, status))
268		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
270	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
271	STATS_INC(b->stats.guest_type_fail);
272	return false;
273}
274
275/*
276 * Retrieve desired balloon size from the host.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277 */
278static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
279{
280	unsigned long status;
281	unsigned long target;
282	unsigned long limit;
283	u32 limit32;
284
285	/*
286	 * si_meminfo() is cheap. Moreover, we want to provide dynamic
287	 * max balloon size later. So let us call si_meminfo() every
288	 * iteration.
289	 */
290	si_meminfo(&b->sysinfo);
291	limit = b->sysinfo.totalram;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
293	/* Ensure limit fits in 32-bits */
294	limit32 = (u32)limit;
295	if (limit != limit32)
296		return false;
297
298	/* update stats */
299	STATS_INC(b->stats.target);
 
 
 
300
301	status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
302	if (vmballoon_check_status(b, status)) {
303		*new_target = target;
304		return true;
305	}
306
307	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
308	STATS_INC(b->stats.target_fail);
309	return false;
310}
311
312/*
313 * Notify the host about allocated page so that host can use it without
314 * fear that guest will need it. Host may reject some pages, we need to
315 * check the return value and maybe submit a different page.
 
 
 
316 */
317static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
318				     unsigned int *hv_status)
 
319{
320	unsigned long status, dummy;
321	u32 pfn32;
 
322
323	pfn32 = (u32)pfn;
324	if (pfn32 != pfn)
325		return false;
326
327	STATS_INC(b->stats.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
329	*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
330	if (vmballoon_check_status(b, status))
331		return true;
332
333	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
334	STATS_INC(b->stats.lock_fail);
335	return false;
 
 
 
 
336}
337
338/*
339 * Notify the host that guest intends to release given page back into
340 * the pool of available (to the guest) pages.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341 */
342static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
 
343{
344	unsigned long status, dummy;
345	u32 pfn32;
346
347	pfn32 = (u32)pfn;
348	if (pfn32 != pfn)
349		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
351	STATS_INC(b->stats.unlock);
 
 
352
353	status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
354	if (vmballoon_check_status(b, status))
355		return true;
356
357	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
358	STATS_INC(b->stats.unlock_fail);
359	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360}
361
362/*
363 * Quickly release all pages allocated for the balloon. This function is
364 * called when host decides to "reset" balloon for one reason or another.
365 * Unlike normal "deflate" we do not (shall not) notify host of the pages
366 * being released.
 
 
 
367 */
368static void vmballoon_pop(struct vmballoon *b)
 
 
369{
370	struct page *page, *next;
371	unsigned int count = 0;
372
373	list_for_each_entry_safe(page, next, &b->pages, lru) {
374		list_del(&page->lru);
375		__free_page(page);
376		STATS_INC(b->stats.free);
377		b->size--;
378
379		if (++count >= b->rate_free) {
380			count = 0;
381			cond_resched();
382		}
383	}
 
 
 
384}
385
 
386/*
387 * Perform standard reset sequence by popping the balloon (in case it
388 * is not  empty) and then restarting protocol. This operation normally
389 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
390 */
391static void vmballoon_reset(struct vmballoon *b)
 
392{
393	/* free all pages, skipping monitor unlock */
394	vmballoon_pop(b);
395
396	if (vmballoon_send_start(b)) {
397		b->reset_required = false;
398		if (!vmballoon_send_guest_id(b))
399			pr_err("failed to send guest ID to the host\n");
400	}
401}
402
403/*
404 * Allocate (or reserve) a page for the balloon and notify the host.  If host
405 * refuses the page put it on "refuse" list and allocate another one until host
406 * is satisfied. "Refused" pages are released at the end of inflation cycle
407 * (when we allocate b->rate_alloc pages).
 
 
408 */
409static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
410{
411	struct page *page;
412	gfp_t flags;
413	unsigned int hv_status;
414	bool locked = false;
415
416	do {
417		if (!can_sleep)
418			STATS_INC(b->stats.alloc);
419		else
420			STATS_INC(b->stats.sleep_alloc);
421
422		flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
423		page = alloc_page(flags);
424		if (!page) {
425			if (!can_sleep)
426				STATS_INC(b->stats.alloc_fail);
427			else
428				STATS_INC(b->stats.sleep_alloc_fail);
429			return -ENOMEM;
430		}
431
432		/* inform monitor */
433		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
434		if (!locked) {
435			STATS_INC(b->stats.refused_alloc);
436
437			if (hv_status == VMW_BALLOON_ERROR_RESET ||
438			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
439				__free_page(page);
440				return -EIO;
441			}
442
443			/*
444			 * Place page on the list of non-balloonable pages
445			 * and retry allocation, unless we already accumulated
446			 * too many of them, in which case take a breather.
447			 */
448			list_add(&page->lru, &b->refused_pages);
449			if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
450				return -EIO;
451		}
452	} while (!locked);
453
454	/* track allocated page */
455	list_add(&page->lru, &b->pages);
 
 
456
457	/* update balloon size */
458	b->size++;
 
459
460	return 0;
461}
462
463/*
464 * Release the page allocated for the balloon. Note that we first notify
465 * the host so it can make sure the page will be available for the guest
466 * to use, if needed.
 
 
 
 
 
 
467 */
468static int vmballoon_release_page(struct vmballoon *b, struct page *page)
 
 
 
469{
470	if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
471		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
473	list_del(&page->lru);
 
 
 
 
 
474
475	/* deallocate page */
476	__free_page(page);
477	STATS_INC(b->stats.free);
 
478
479	/* update balloon size */
480	b->size--;
 
 
481
482	return 0;
 
 
 
483}
484
485/*
486 * Release pages that were allocated while attempting to inflate the
487 * balloon but were refused by the host for one reason or another.
 
 
 
 
 
 
488 */
489static void vmballoon_release_refused_pages(struct vmballoon *b)
490{
491	struct page *page, *next;
 
492
493	list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
 
 
494		list_del(&page->lru);
495		__free_page(page);
496		STATS_INC(b->stats.refused_free);
 
497	}
498
499	b->n_refused_pages = 0;
500}
501
502/*
503 * Inflate the balloon towards its target size. Note that we try to limit
504 * the rate of allocation to make sure we are not choking the rest of the
505 * system.
506 */
507static void vmballoon_inflate(struct vmballoon *b)
508{
509	unsigned int goal;
510	unsigned int rate;
511	unsigned int i;
512	unsigned int allocations = 0;
513	int error = 0;
514	bool alloc_can_sleep = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515
516	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
 
517
518	/*
519	 * First try NOSLEEP page allocations to inflate balloon.
520	 *
521	 * If we do not throttle nosleep allocations, we can drain all
522	 * free pages in the guest quickly (if the balloon target is high).
523	 * As a side-effect, draining free pages helps to inform (force)
524	 * the guest to start swapping if balloon target is not met yet,
525	 * which is a desired behavior. However, balloon driver can consume
526	 * all available CPU cycles if too many pages are allocated in a
527	 * second. Therefore, we throttle nosleep allocations even when
528	 * the guest is not under memory pressure. OTOH, if we have already
529	 * predicted that the guest is under memory pressure, then we
530	 * slowdown page allocations considerably.
531	 */
 
 
 
 
 
532
533	goal = b->target - b->size;
534	/*
535	 * Start with no sleep allocation rate which may be higher
536	 * than sleeping allocation rate.
537	 */
538	rate = b->slow_allocation_cycles ?
539			b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
541	pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
542		 __func__, goal, rate, b->rate_alloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
544	for (i = 0; i < goal; i++) {
 
 
545
546		error = vmballoon_reserve_page(b, alloc_can_sleep);
547		if (error) {
548			if (error != -ENOMEM) {
549				/*
550				 * Not a page allocation failure, stop this
551				 * cycle. Maybe we'll get new target from
552				 * the host soon.
553				 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554				break;
555			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556
557			if (alloc_can_sleep) {
558				/*
559				 * CANSLEEP page allocation failed, so guest
560				 * is under severe memory pressure. Quickly
561				 * decrease allocation rate.
562				 */
563				b->rate_alloc = max(b->rate_alloc / 2,
564						    VMW_BALLOON_RATE_ALLOC_MIN);
565				break;
566			}
567
568			/*
569			 * NOSLEEP page allocation failed, so the guest is
570			 * under memory pressure. Let us slow down page
571			 * allocations for next few cycles so that the guest
572			 * gets out of memory pressure. Also, if we already
573			 * allocated b->rate_alloc pages, let's pause,
574			 * otherwise switch to sleeping allocations.
575			 */
576			b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
577
578			if (i >= b->rate_alloc)
579				break;
580
581			alloc_can_sleep = true;
582			/* Lower rate for sleeping allocations. */
583			rate = b->rate_alloc;
584		}
585
586		if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
587			cond_resched();
588			allocations = 0;
589		}
 
 
 
590
591		if (i >= rate) {
592			/* We allocated enough pages, let's take a break. */
593			break;
594		}
595	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596
597	/*
598	 * We reached our goal without failures so try increasing
599	 * allocation rate.
600	 */
601	if (error == 0 && i >= b->rate_alloc) {
602		unsigned int mult = i / b->rate_alloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603
604		b->rate_alloc =
605			min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
606			    VMW_BALLOON_RATE_ALLOC_MAX);
607	}
 
 
 
 
 
 
 
 
608
609	vmballoon_release_refused_pages(b);
 
610}
611
612/*
613 * Decrease the size of the balloon allowing guest to use more memory.
 
 
614 */
615static void vmballoon_deflate(struct vmballoon *b)
616{
617	struct page *page, *next;
618	unsigned int i = 0;
619	unsigned int goal;
620	int error;
621
622	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
623
624	/* limit deallocation rate */
625	goal = min(b->size - b->target, b->rate_free);
626
627	pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
 
 
 
 
628
629	/* free pages to reach target */
630	list_for_each_entry_safe(page, next, &b->pages, lru) {
631		error = vmballoon_release_page(b, page);
632		if (error) {
633			/* quickly decrease rate in case of error */
634			b->rate_free = max(b->rate_free / 2,
635					   VMW_BALLOON_RATE_FREE_MIN);
636			return;
 
 
637		}
 
 
 
 
 
 
 
 
 
 
638
639		if (++i >= goal)
640			break;
641	}
642
643	/* slowly increase rate if there were no errors */
644	b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
645			   VMW_BALLOON_RATE_FREE_MAX);
646}
647
648/*
649 * Balloon work function: reset protocol, if needed, get the new size and
650 * adjust balloon as needed. Repeat in 1 sec.
 
 
 
 
651 */
652static void vmballoon_work(struct work_struct *work)
653{
654	struct delayed_work *dwork = to_delayed_work(work);
655	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
656	unsigned int target;
657
658	STATS_INC(b->stats.timer);
659
660	if (b->reset_required)
661		vmballoon_reset(b);
662
663	if (b->slow_allocation_cycles > 0)
664		b->slow_allocation_cycles--;
665
666	if (vmballoon_send_get_target(b, &target)) {
667		/* update target, adjust size */
668		b->target = target;
 
 
 
669
670		if (b->size < target)
 
 
 
 
 
 
 
671			vmballoon_inflate(b);
672		else if (b->size > target)
673			vmballoon_deflate(b);
674	}
675
 
 
676	/*
677	 * We are using a freezable workqueue so that balloon operations are
678	 * stopped while the system transitions to/from sleep/hibernation.
679	 */
680	queue_delayed_work(system_freezable_wq,
681			   dwork, round_jiffies_relative(HZ));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682}
683
684/*
685 * DEBUGFS Interface
686 */
687#ifdef CONFIG_DEBUG_FS
688
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
689static int vmballoon_debug_show(struct seq_file *f, void *offset)
690{
691	struct vmballoon *b = f->private;
692	struct vmballoon_stats *stats = &b->stats;
693
694	/* format size info */
695	seq_printf(f,
696		   "target:             %8d pages\n"
697		   "current:            %8d pages\n",
698		   b->target, b->size);
699
700	/* format rate info */
701	seq_printf(f,
702		   "rateNoSleepAlloc:   %8d pages/sec\n"
703		   "rateSleepAlloc:     %8d pages/sec\n"
704		   "rateFree:           %8d pages/sec\n",
705		   VMW_BALLOON_NOSLEEP_ALLOC_MAX,
706		   b->rate_alloc, b->rate_free);
707
708	seq_printf(f,
709		   "\n"
710		   "timer:              %8u\n"
711		   "start:              %8u (%4u failed)\n"
712		   "guestType:          %8u (%4u failed)\n"
713		   "lock:               %8u (%4u failed)\n"
714		   "unlock:             %8u (%4u failed)\n"
715		   "target:             %8u (%4u failed)\n"
716		   "primNoSleepAlloc:   %8u (%4u failed)\n"
717		   "primCanSleepAlloc:  %8u (%4u failed)\n"
718		   "primFree:           %8u\n"
719		   "errAlloc:           %8u\n"
720		   "errFree:            %8u\n",
721		   stats->timer,
722		   stats->start, stats->start_fail,
723		   stats->guest_type, stats->guest_type_fail,
724		   stats->lock,  stats->lock_fail,
725		   stats->unlock, stats->unlock_fail,
726		   stats->target, stats->target_fail,
727		   stats->alloc, stats->alloc_fail,
728		   stats->sleep_alloc, stats->sleep_alloc_fail,
729		   stats->free,
730		   stats->refused_alloc, stats->refused_free);
731
732	return 0;
733}
 
734
735static int vmballoon_debug_open(struct inode *inode, struct file *file)
736{
737	return single_open(file, vmballoon_debug_show, inode->i_private);
738}
 
 
739
740static const struct file_operations vmballoon_debug_fops = {
741	.owner		= THIS_MODULE,
742	.open		= vmballoon_debug_open,
743	.read		= seq_read,
744	.llseek		= seq_lseek,
745	.release	= single_release,
746};
747
748static int __init vmballoon_debugfs_init(struct vmballoon *b)
749{
750	int error;
 
 
 
 
 
 
751
752	b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
753					   &vmballoon_debug_fops);
754	if (IS_ERR(b->dbg_entry)) {
755		error = PTR_ERR(b->dbg_entry);
756		pr_err("failed to create debugfs entry, error: %d\n", error);
757		return error;
 
 
 
 
 
758	}
759
760	return 0;
761}
762
 
 
 
 
 
 
 
 
763static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
764{
765	debugfs_remove(b->dbg_entry);
 
 
 
766}
767
768#else
769
770static inline int vmballoon_debugfs_init(struct vmballoon *b)
771{
772	return 0;
773}
774
775static inline void vmballoon_debugfs_exit(struct vmballoon *b)
776{
777}
778
779#endif	/* CONFIG_DEBUG_FS */
780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
781static int __init vmballoon_init(void)
782{
783	int error;
784
785	/*
786	 * Check if we are running on VMware's hypervisor and bail out
787	 * if we are not.
788	 */
789	if (x86_hyper != &x86_hyper_vmware)
790		return -ENODEV;
791
792	INIT_LIST_HEAD(&balloon.pages);
793	INIT_LIST_HEAD(&balloon.refused_pages);
794
795	/* initialize rates */
796	balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
797	balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
798
799	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
800
801	/*
802	 * Start balloon.
 
803	 */
804	if (!vmballoon_send_start(&balloon)) {
805		pr_err("failed to send start command to the host\n");
806		return -EIO;
807	}
808
809	if (!vmballoon_send_guest_id(&balloon)) {
810		pr_err("failed to send guest ID to the host\n");
811		return -EIO;
812	}
 
 
 
813
814	error = vmballoon_debugfs_init(&balloon);
815	if (error)
816		return error;
817
818	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
819
820	return 0;
821}
822module_init(vmballoon_init);
 
 
 
 
 
 
 
823
824static void __exit vmballoon_exit(void)
825{
 
 
826	cancel_delayed_work_sync(&balloon.dwork);
827
828	vmballoon_debugfs_exit(&balloon);
829
830	/*
831	 * Deallocate all reserved memory, and reset connection with monitor.
832	 * Reset connection before deallocating memory to avoid potential for
833	 * additional spurious resets from guest touching deallocated pages.
834	 */
835	vmballoon_send_start(&balloon);
836	vmballoon_pop(&balloon);
837}
838module_exit(vmballoon_exit);