Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * VMware Balloon driver.
  3 *
  4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the
  8 * Free Software Foundation; version 2 of the License and no later version.
  9 *
 10 * This program is distributed in the hope that it will be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 13 * NON INFRINGEMENT.  See the GNU General Public License for more
 14 * details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write to the Free Software
 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19 *
 20 * Maintained by: Dmitry Torokhov <dtor@vmware.com>
 21 */
 22
 23/*
 24 * This is VMware physical memory management driver for Linux. The driver
 25 * acts like a "balloon" that can be inflated to reclaim physical pages by
 26 * reserving them in the guest and invalidating them in the monitor,
 27 * freeing up the underlying machine pages so they can be allocated to
 28 * other guests.  The balloon can also be deflated to allow the guest to
 29 * use more physical memory. Higher level policies can control the sizes
 30 * of balloons in VMs in order to manage physical memory resources.
 31 */
 32
 33//#define DEBUG
 34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 35
 36#include <linux/types.h>
 
 37#include <linux/kernel.h>
 38#include <linux/mm.h>
 
 39#include <linux/sched.h>
 40#include <linux/module.h>
 41#include <linux/workqueue.h>
 42#include <linux/debugfs.h>
 43#include <linux/seq_file.h>
 
 
 
 
 
 
 44#include <asm/hypervisor.h>
 45
 46MODULE_AUTHOR("VMware, Inc.");
 47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
 48MODULE_VERSION("1.2.1.3-k");
 49MODULE_ALIAS("dmi:*:svnVMware*:*");
 50MODULE_ALIAS("vmware_vmmemctl");
 51MODULE_LICENSE("GPL");
 52
 53/*
 54 * Various constants controlling rate of inflaint/deflating balloon,
 55 * measured in pages.
 56 */
 57
 58/*
 59 * Rate of allocating memory when there is no memory pressure
 60 * (driver performs non-sleeping allocations).
 61 */
 62#define VMW_BALLOON_NOSLEEP_ALLOC_MAX	16384U
 63
 64/*
 65 * Rates of memory allocaton when guest experiences memory pressure
 66 * (driver performs sleeping allocations).
 67 */
 68#define VMW_BALLOON_RATE_ALLOC_MIN	512U
 69#define VMW_BALLOON_RATE_ALLOC_MAX	2048U
 70#define VMW_BALLOON_RATE_ALLOC_INC	16U
 71
 72/*
 73 * Rates for releasing pages while deflating balloon.
 74 */
 75#define VMW_BALLOON_RATE_FREE_MIN	512U
 76#define VMW_BALLOON_RATE_FREE_MAX	16384U
 77#define VMW_BALLOON_RATE_FREE_INC	16U
 78
 79/*
 80 * When guest is under memory pressure, use a reduced page allocation
 81 * rate for next several cycles.
 82 */
 83#define VMW_BALLOON_SLOW_CYCLES		4
 
 
 84
 85/*
 86 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
 87 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
 88 * __GFP_NOWARN, to suppress page allocation failure warnings.
 89 */
 90#define VMW_PAGE_ALLOC_NOSLEEP		(__GFP_HIGHMEM|__GFP_NOWARN)
 
 
 
 
 
 
 
 
 
 
 
 91
 92/*
 93 * Use GFP_HIGHUSER when executing in a separate kernel thread
 94 * context and allocation can sleep.  This is less stressful to
 95 * the guest memory system, since it allows the thread to block
 96 * while memory is reclaimed, and won't take pages from emergency
 97 * low-memory pools.
 98 */
 99#define VMW_PAGE_ALLOC_CANSLEEP		(GFP_HIGHUSER)
 
 
 
 
 
100
101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD	1024
 
 
 
103
104/* Maximum number of refused pages we accumulate during inflation cycle */
105#define VMW_BALLOON_MAX_REFUSED		16
106
107/*
108 * Hypervisor communication port definitions.
109 */
110#define VMW_BALLOON_HV_PORT		0x5670
111#define VMW_BALLOON_HV_MAGIC		0x456c6d6f
112#define VMW_BALLOON_PROTOCOL_VERSION	2
113#define VMW_BALLOON_GUEST_ID		1	/* Linux */
114
115#define VMW_BALLOON_CMD_START		0
116#define VMW_BALLOON_CMD_GET_TARGET	1
117#define VMW_BALLOON_CMD_LOCK		2
118#define VMW_BALLOON_CMD_UNLOCK		3
119#define VMW_BALLOON_CMD_GUEST_ID	4
120
121/* error codes */
122#define VMW_BALLOON_SUCCESS		0
123#define VMW_BALLOON_FAILURE		-1
124#define VMW_BALLOON_ERROR_CMD_INVALID	1
125#define VMW_BALLOON_ERROR_PPN_INVALID	2
126#define VMW_BALLOON_ERROR_PPN_LOCKED	3
127#define VMW_BALLOON_ERROR_PPN_UNLOCKED	4
128#define VMW_BALLOON_ERROR_PPN_PINNED	5
129#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	6
130#define VMW_BALLOON_ERROR_RESET		7
131#define VMW_BALLOON_ERROR_BUSY		8
132
133#define VMWARE_BALLOON_CMD(cmd, data, result)		\
134({							\
135	unsigned long __stat, __dummy1, __dummy2;	\
136	__asm__ __volatile__ ("inl (%%dx)" :		\
137		"=a"(__stat),				\
138		"=c"(__dummy1),				\
139		"=d"(__dummy2),				\
140		"=b"(result) :				\
141		"0"(VMW_BALLOON_HV_MAGIC),		\
142		"1"(VMW_BALLOON_CMD_##cmd),		\
143		"2"(VMW_BALLOON_HV_PORT),		\
144		"3"(data) :				\
145		"memory");				\
146	result &= -1UL;					\
147	__stat & -1UL;					\
148})
149
150#ifdef CONFIG_DEBUG_FS
151struct vmballoon_stats {
152	unsigned int timer;
 
 
 
153
154	/* allocation statustics */
155	unsigned int alloc;
156	unsigned int alloc_fail;
157	unsigned int sleep_alloc;
158	unsigned int sleep_alloc_fail;
159	unsigned int refused_alloc;
160	unsigned int refused_free;
161	unsigned int free;
162
163	/* monitor operations */
164	unsigned int lock;
165	unsigned int lock_fail;
166	unsigned int unlock;
167	unsigned int unlock_fail;
168	unsigned int target;
169	unsigned int target_fail;
170	unsigned int start;
171	unsigned int start_fail;
172	unsigned int guest_type;
173	unsigned int guest_type_fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174};
175
176#define STATS_INC(stat) (stat)++
177#else
178#define STATS_INC(stat)
179#endif
180
181struct vmballoon {
 
 
 
 
 
 
 
 
 
 
182
183	/* list of reserved physical pages */
184	struct list_head pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
186	/* transient list of non-balloonable pages */
 
 
 
 
 
 
187	struct list_head refused_pages;
 
188	unsigned int n_refused_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
190	/* balloon size in pages */
191	unsigned int size;
192	unsigned int target;
 
 
 
 
193
194	/* reset flag */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195	bool reset_required;
196
197	/* adjustment rates (pages per second) */
198	unsigned int rate_alloc;
199	unsigned int rate_free;
 
 
 
200
201	/* slowdown page allocations for next few cycles */
202	unsigned int slow_allocation_cycles;
 
 
 
 
 
203
204#ifdef CONFIG_DEBUG_FS
205	/* statistics */
206	struct vmballoon_stats stats;
 
 
 
 
 
 
 
207
208	/* debugfs file exporting statistics */
209	struct dentry *dbg_entry;
210#endif
 
 
 
 
 
 
211
212	struct sysinfo sysinfo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
214	struct delayed_work dwork;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215};
216
217static struct vmballoon balloon;
218
219/*
220 * Send "start" command to the host, communicating supported version
221 * of the protocol.
222 */
223static bool vmballoon_send_start(struct vmballoon *b)
 
 
 
 
 
 
 
 
224{
225	unsigned long status, dummy;
 
 
226
227	STATS_INC(b->stats.start);
 
 
 
 
 
228
229	status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
230	if (status == VMW_BALLOON_SUCCESS)
231		return true;
 
 
 
232
233	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
234	STATS_INC(b->stats.start_fail);
235	return false;
 
 
 
236}
237
238static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
 
 
239{
240	switch (status) {
241	case VMW_BALLOON_SUCCESS:
242		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
244	case VMW_BALLOON_ERROR_RESET:
 
 
 
 
 
 
 
 
 
245		b->reset_required = true;
246		/* fall through */
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248	default:
249		return false;
250	}
 
 
 
 
 
 
 
 
 
 
 
 
 
251}
252
253/*
 
 
 
 
254 * Communicate guest type to the host so that it can adjust ballooning
255 * algorithm to the one most appropriate for the guest. This command
256 * is normally issued after sending "start" command and is part of
257 * standard reset sequence.
 
 
258 */
259static bool vmballoon_send_guest_id(struct vmballoon *b)
260{
261	unsigned long status, dummy;
262
263	status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
 
264
265	STATS_INC(b->stats.guest_type);
 
266
267	if (vmballoon_check_status(b, status))
268		return true;
 
 
 
 
 
 
 
 
 
269
270	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
271	STATS_INC(b->stats.guest_type_fail);
272	return false;
 
 
 
 
 
 
 
273}
274
275/*
276 * Retrieve desired balloon size from the host.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277 */
278static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
279{
280	unsigned long status;
281	unsigned long target;
282	unsigned long limit;
283	u32 limit32;
284
285	/*
286	 * si_meminfo() is cheap. Moreover, we want to provide dynamic
287	 * max balloon size later. So let us call si_meminfo() every
288	 * iteration.
289	 */
290	si_meminfo(&b->sysinfo);
291	limit = b->sysinfo.totalram;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
293	/* Ensure limit fits in 32-bits */
294	limit32 = (u32)limit;
295	if (limit != limit32)
296		return false;
 
 
 
 
 
 
 
 
 
 
 
 
297
298	/* update stats */
299	STATS_INC(b->stats.target);
 
 
 
 
 
 
 
300
301	status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
302	if (vmballoon_check_status(b, status)) {
303		*new_target = target;
304		return true;
305	}
306
307	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
308	STATS_INC(b->stats.target_fail);
309	return false;
310}
311
312/*
313 * Notify the host about allocated page so that host can use it without
314 * fear that guest will need it. Host may reject some pages, we need to
315 * check the return value and maybe submit a different page.
 
 
 
316 */
317static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
318				     unsigned int *hv_status)
 
319{
320	unsigned long status, dummy;
321	u32 pfn32;
 
 
 
 
 
322
323	pfn32 = (u32)pfn;
324	if (pfn32 != pfn)
325		return false;
326
327	STATS_INC(b->stats.lock);
 
328
329	*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
330	if (vmballoon_check_status(b, status))
331		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
333	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
334	STATS_INC(b->stats.lock_fail);
335	return false;
 
 
 
 
 
 
 
336}
337
338/*
339 * Notify the host that guest intends to release given page back into
340 * the pool of available (to the guest) pages.
 
 
 
 
 
 
 
 
 
 
 
341 */
342static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343{
344	unsigned long status, dummy;
345	u32 pfn32;
346
347	pfn32 = (u32)pfn;
348	if (pfn32 != pfn)
349		return false;
 
 
 
350
351	STATS_INC(b->stats.unlock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
353	status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
354	if (vmballoon_check_status(b, status))
355		return true;
356
357	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
358	STATS_INC(b->stats.unlock_fail);
359	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360}
361
362/*
363 * Quickly release all pages allocated for the balloon. This function is
364 * called when host decides to "reset" balloon for one reason or another.
365 * Unlike normal "deflate" we do not (shall not) notify host of the pages
366 * being released.
 
 
 
367 */
368static void vmballoon_pop(struct vmballoon *b)
 
 
369{
370	struct page *page, *next;
371	unsigned int count = 0;
372
373	list_for_each_entry_safe(page, next, &b->pages, lru) {
374		list_del(&page->lru);
375		__free_page(page);
376		STATS_INC(b->stats.free);
377		b->size--;
378
379		if (++count >= b->rate_free) {
380			count = 0;
381			cond_resched();
382		}
383	}
 
 
 
384}
385
 
386/*
387 * Perform standard reset sequence by popping the balloon (in case it
388 * is not  empty) and then restarting protocol. This operation normally
389 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
390 */
391static void vmballoon_reset(struct vmballoon *b)
 
392{
393	/* free all pages, skipping monitor unlock */
394	vmballoon_pop(b);
395
396	if (vmballoon_send_start(b)) {
397		b->reset_required = false;
398		if (!vmballoon_send_guest_id(b))
399			pr_err("failed to send guest ID to the host\n");
400	}
401}
402
403/*
404 * Allocate (or reserve) a page for the balloon and notify the host.  If host
405 * refuses the page put it on "refuse" list and allocate another one until host
406 * is satisfied. "Refused" pages are released at the end of inflation cycle
407 * (when we allocate b->rate_alloc pages).
 
 
408 */
409static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
410{
411	struct page *page;
412	gfp_t flags;
413	unsigned int hv_status;
414	bool locked = false;
415
416	do {
417		if (!can_sleep)
418			STATS_INC(b->stats.alloc);
419		else
420			STATS_INC(b->stats.sleep_alloc);
421
422		flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
423		page = alloc_page(flags);
424		if (!page) {
425			if (!can_sleep)
426				STATS_INC(b->stats.alloc_fail);
427			else
428				STATS_INC(b->stats.sleep_alloc_fail);
429			return -ENOMEM;
430		}
431
432		/* inform monitor */
433		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
434		if (!locked) {
435			STATS_INC(b->stats.refused_alloc);
436
437			if (hv_status == VMW_BALLOON_ERROR_RESET ||
438			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
439				__free_page(page);
440				return -EIO;
441			}
442
443			/*
444			 * Place page on the list of non-balloonable pages
445			 * and retry allocation, unless we already accumulated
446			 * too many of them, in which case take a breather.
447			 */
448			list_add(&page->lru, &b->refused_pages);
449			if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
450				return -EIO;
451		}
452	} while (!locked);
453
454	/* track allocated page */
455	list_add(&page->lru, &b->pages);
 
 
456
457	/* update balloon size */
458	b->size++;
 
459
460	return 0;
461}
462
463/*
464 * Release the page allocated for the balloon. Note that we first notify
465 * the host so it can make sure the page will be available for the guest
466 * to use, if needed.
 
 
 
 
 
 
467 */
468static int vmballoon_release_page(struct vmballoon *b, struct page *page)
 
 
 
469{
470	if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
471		return -EIO;
472
473	list_del(&page->lru);
 
 
 
 
 
 
 
474
475	/* deallocate page */
476	__free_page(page);
477	STATS_INC(b->stats.free);
478
479	/* update balloon size */
480	b->size--;
 
 
 
481
482	return 0;
483}
484
485/*
486 * Release pages that were allocated while attempting to inflate the
487 * balloon but were refused by the host for one reason or another.
488 */
489static void vmballoon_release_refused_pages(struct vmballoon *b)
 
 
 
 
 
 
 
 
 
 
 
 
 
490{
491	struct page *page, *next;
 
 
492
493	list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
494		list_del(&page->lru);
495		__free_page(page);
496		STATS_INC(b->stats.refused_free);
 
497	}
498
499	b->n_refused_pages = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
500}
501
502/*
503 * Inflate the balloon towards its target size. Note that we try to limit
504 * the rate of allocation to make sure we are not choking the rest of the
505 * system.
 
 
 
 
 
506 */
507static void vmballoon_inflate(struct vmballoon *b)
508{
509	unsigned int goal;
510	unsigned int rate;
511	unsigned int i;
512	unsigned int allocations = 0;
513	int error = 0;
514	bool alloc_can_sleep = false;
515
516	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
517
518	/*
519	 * First try NOSLEEP page allocations to inflate balloon.
520	 *
521	 * If we do not throttle nosleep allocations, we can drain all
522	 * free pages in the guest quickly (if the balloon target is high).
523	 * As a side-effect, draining free pages helps to inform (force)
524	 * the guest to start swapping if balloon target is not met yet,
525	 * which is a desired behavior. However, balloon driver can consume
526	 * all available CPU cycles if too many pages are allocated in a
527	 * second. Therefore, we throttle nosleep allocations even when
528	 * the guest is not under memory pressure. OTOH, if we have already
529	 * predicted that the guest is under memory pressure, then we
530	 * slowdown page allocations considerably.
531	 */
532
533	goal = b->target - b->size;
534	/*
535	 * Start with no sleep allocation rate which may be higher
536	 * than sleeping allocation rate.
537	 */
538	rate = b->slow_allocation_cycles ?
539			b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
540
541	pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
542		 __func__, goal, rate, b->rate_alloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
544	for (i = 0; i < goal; i++) {
 
545
546		error = vmballoon_reserve_page(b, alloc_can_sleep);
547		if (error) {
548			if (error != -ENOMEM) {
549				/*
550				 * Not a page allocation failure, stop this
551				 * cycle. Maybe we'll get new target from
552				 * the host soon.
553				 */
554				break;
555			}
556
557			if (alloc_can_sleep) {
558				/*
559				 * CANSLEEP page allocation failed, so guest
560				 * is under severe memory pressure. Quickly
561				 * decrease allocation rate.
562				 */
563				b->rate_alloc = max(b->rate_alloc / 2,
564						    VMW_BALLOON_RATE_ALLOC_MIN);
565				break;
566			}
567
568			/*
569			 * NOSLEEP page allocation failed, so the guest is
570			 * under memory pressure. Let us slow down page
571			 * allocations for next few cycles so that the guest
572			 * gets out of memory pressure. Also, if we already
573			 * allocated b->rate_alloc pages, let's pause,
574			 * otherwise switch to sleeping allocations.
575			 */
576			b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
 
 
577
578			if (i >= b->rate_alloc)
579				break;
580
581			alloc_can_sleep = true;
582			/* Lower rate for sleeping allocations. */
583			rate = b->rate_alloc;
584		}
 
 
 
585
586		if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
587			cond_resched();
588			allocations = 0;
589		}
590
591		if (i >= rate) {
592			/* We allocated enough pages, let's take a break. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594		}
 
 
595	}
596
597	/*
598	 * We reached our goal without failures so try increasing
599	 * allocation rate.
600	 */
601	if (error == 0 && i >= b->rate_alloc) {
602		unsigned int mult = i / b->rate_alloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603
604		b->rate_alloc =
605			min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
606			    VMW_BALLOON_RATE_ALLOC_MAX);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607	}
 
608
609	vmballoon_release_refused_pages(b);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610}
611
612/*
613 * Decrease the size of the balloon allowing guest to use more memory.
 
 
614 */
615static void vmballoon_deflate(struct vmballoon *b)
616{
617	struct page *page, *next;
618	unsigned int i = 0;
619	unsigned int goal;
620	int error;
621
622	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
623
624	/* limit deallocation rate */
625	goal = min(b->size - b->target, b->rate_free);
626
627	pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
 
628
629	/* free pages to reach target */
630	list_for_each_entry_safe(page, next, &b->pages, lru) {
631		error = vmballoon_release_page(b, page);
632		if (error) {
633			/* quickly decrease rate in case of error */
634			b->rate_free = max(b->rate_free / 2,
635					   VMW_BALLOON_RATE_FREE_MIN);
636			return;
637		}
638
639		if (++i >= goal)
640			break;
 
 
 
 
 
 
 
 
 
 
 
641	}
642
643	/* slowly increase rate if there were no errors */
644	b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
645			   VMW_BALLOON_RATE_FREE_MAX);
 
 
 
 
 
 
 
 
 
646}
647
648/*
649 * Balloon work function: reset protocol, if needed, get the new size and
650 * adjust balloon as needed. Repeat in 1 sec.
 
 
 
 
651 */
652static void vmballoon_work(struct work_struct *work)
653{
654	struct delayed_work *dwork = to_delayed_work(work);
655	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
656	unsigned int target;
657
658	STATS_INC(b->stats.timer);
659
660	if (b->reset_required)
661		vmballoon_reset(b);
662
663	if (b->slow_allocation_cycles > 0)
664		b->slow_allocation_cycles--;
 
 
 
 
 
 
 
 
 
665
666	if (vmballoon_send_get_target(b, &target)) {
667		/* update target, adjust size */
668		b->target = target;
669
670		if (b->size < target)
671			vmballoon_inflate(b);
672		else if (b->size > target)
673			vmballoon_deflate(b);
674	}
675
 
 
676	/*
677	 * We are using a freezable workqueue so that balloon operations are
678	 * stopped while the system transitions to/from sleep/hibernation.
679	 */
680	queue_delayed_work(system_freezable_wq,
681			   dwork, round_jiffies_relative(HZ));
 
682}
683
684/*
685 * DEBUGFS Interface
 
 
 
 
686 */
687#ifdef CONFIG_DEBUG_FS
 
 
 
 
688
689static int vmballoon_debug_show(struct seq_file *f, void *offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
690{
691	struct vmballoon *b = f->private;
692	struct vmballoon_stats *stats = &b->stats;
693
694	/* format size info */
695	seq_printf(f,
696		   "target:             %8d pages\n"
697		   "current:            %8d pages\n",
698		   b->target, b->size);
699
700	/* format rate info */
701	seq_printf(f,
702		   "rateNoSleepAlloc:   %8d pages/sec\n"
703		   "rateSleepAlloc:     %8d pages/sec\n"
704		   "rateFree:           %8d pages/sec\n",
705		   VMW_BALLOON_NOSLEEP_ALLOC_MAX,
706		   b->rate_alloc, b->rate_free);
707
708	seq_printf(f,
709		   "\n"
710		   "timer:              %8u\n"
711		   "start:              %8u (%4u failed)\n"
712		   "guestType:          %8u (%4u failed)\n"
713		   "lock:               %8u (%4u failed)\n"
714		   "unlock:             %8u (%4u failed)\n"
715		   "target:             %8u (%4u failed)\n"
716		   "primNoSleepAlloc:   %8u (%4u failed)\n"
717		   "primCanSleepAlloc:  %8u (%4u failed)\n"
718		   "primFree:           %8u\n"
719		   "errAlloc:           %8u\n"
720		   "errFree:            %8u\n",
721		   stats->timer,
722		   stats->start, stats->start_fail,
723		   stats->guest_type, stats->guest_type_fail,
724		   stats->lock,  stats->lock_fail,
725		   stats->unlock, stats->unlock_fail,
726		   stats->target, stats->target_fail,
727		   stats->alloc, stats->alloc_fail,
728		   stats->sleep_alloc, stats->sleep_alloc_fail,
729		   stats->free,
730		   stats->refused_alloc, stats->refused_free);
731
732	return 0;
 
 
 
 
733}
734
735static int vmballoon_debug_open(struct inode *inode, struct file *file)
736{
737	return single_open(file, vmballoon_debug_show, inode->i_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738}
739
740static const struct file_operations vmballoon_debug_fops = {
741	.owner		= THIS_MODULE,
742	.open		= vmballoon_debug_open,
743	.read		= seq_read,
744	.llseek		= seq_lseek,
745	.release	= single_release,
 
 
 
 
 
746};
747
748static int __init vmballoon_debugfs_init(struct vmballoon *b)
 
 
 
 
 
 
 
 
749{
750	int error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751
752	b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
753					   &vmballoon_debug_fops);
754	if (IS_ERR(b->dbg_entry)) {
755		error = PTR_ERR(b->dbg_entry);
756		pr_err("failed to create debugfs entry, error: %d\n", error);
757		return error;
 
 
 
 
 
758	}
759
760	return 0;
761}
762
 
 
 
 
 
 
 
 
763static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
764{
765	debugfs_remove(b->dbg_entry);
 
 
 
766}
767
768#else
769
770static inline int vmballoon_debugfs_init(struct vmballoon *b)
771{
772	return 0;
773}
774
775static inline void vmballoon_debugfs_exit(struct vmballoon *b)
776{
777}
778
779#endif	/* CONFIG_DEBUG_FS */
780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
781static int __init vmballoon_init(void)
782{
783	int error;
784
785	/*
786	 * Check if we are running on VMware's hypervisor and bail out
787	 * if we are not.
788	 */
789	if (x86_hyper != &x86_hyper_vmware)
790		return -ENODEV;
791
792	INIT_LIST_HEAD(&balloon.pages);
793	INIT_LIST_HEAD(&balloon.refused_pages);
794
795	/* initialize rates */
796	balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
797	balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
798
799	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
800
 
 
 
 
801	/*
802	 * Start balloon.
 
803	 */
804	if (!vmballoon_send_start(&balloon)) {
805		pr_err("failed to send start command to the host\n");
806		return -EIO;
807	}
808
809	if (!vmballoon_send_guest_id(&balloon)) {
810		pr_err("failed to send guest ID to the host\n");
811		return -EIO;
812	}
813
814	error = vmballoon_debugfs_init(&balloon);
815	if (error)
816		return error;
817
818	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
819
 
 
820	return 0;
 
 
 
821}
822module_init(vmballoon_init);
 
 
 
 
 
 
 
823
824static void __exit vmballoon_exit(void)
825{
 
 
826	cancel_delayed_work_sync(&balloon.dwork);
827
828	vmballoon_debugfs_exit(&balloon);
829
830	/*
831	 * Deallocate all reserved memory, and reset connection with monitor.
832	 * Reset connection before deallocating memory to avoid potential for
833	 * additional spurious resets from guest touching deallocated pages.
834	 */
835	vmballoon_send_start(&balloon);
836	vmballoon_pop(&balloon);
837}
838module_exit(vmballoon_exit);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * VMware Balloon driver.
   4 *
   5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 * This is VMware physical memory management driver for Linux. The driver
   8 * acts like a "balloon" that can be inflated to reclaim physical pages by
   9 * reserving them in the guest and invalidating them in the monitor,
  10 * freeing up the underlying machine pages so they can be allocated to
  11 * other guests.  The balloon can also be deflated to allow the guest to
  12 * use more physical memory. Higher level policies can control the sizes
  13 * of balloons in VMs in order to manage physical memory resources.
  14 */
  15
  16//#define DEBUG
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/types.h>
  20#include <linux/io.h>
  21#include <linux/kernel.h>
  22#include <linux/mm.h>
  23#include <linux/vmalloc.h>
  24#include <linux/sched.h>
  25#include <linux/module.h>
  26#include <linux/workqueue.h>
  27#include <linux/debugfs.h>
  28#include <linux/seq_file.h>
  29#include <linux/rwsem.h>
  30#include <linux/slab.h>
  31#include <linux/spinlock.h>
  32#include <linux/balloon_compaction.h>
  33#include <linux/vmw_vmci_defs.h>
  34#include <linux/vmw_vmci_api.h>
  35#include <asm/hypervisor.h>
  36
  37MODULE_AUTHOR("VMware, Inc.");
  38MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
 
  39MODULE_ALIAS("dmi:*:svnVMware*:*");
  40MODULE_ALIAS("vmware_vmmemctl");
  41MODULE_LICENSE("GPL");
  42
  43static bool __read_mostly vmwballoon_shrinker_enable;
  44module_param(vmwballoon_shrinker_enable, bool, 0444);
  45MODULE_PARM_DESC(vmwballoon_shrinker_enable,
  46	"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
  47
  48/* Delay in seconds after shrink before inflation. */
  49#define VMBALLOON_SHRINK_DELAY		(5)
 
 
 
  50
  51/* Maximum number of refused pages we accumulate during inflation cycle */
  52#define VMW_BALLOON_MAX_REFUSED		16
 
 
 
 
 
  53
  54/* Magic number for the balloon mount-point */
  55#define BALLOON_VMW_MAGIC		0x0ba11007
 
 
 
 
  56
  57/*
  58 * Hypervisor communication port definitions.
 
  59 */
  60#define VMW_BALLOON_HV_PORT		0x5670
  61#define VMW_BALLOON_HV_MAGIC		0x456c6d6f
  62#define VMW_BALLOON_GUEST_ID		1	/* Linux */
  63
  64enum vmwballoon_capabilities {
  65	/*
  66	 * Bit 0 is reserved and not associated to any capability.
  67	 */
  68	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
  69	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
  70	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
  71	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
  72	VMW_BALLOON_64_BIT_TARGET		= (1 << 5)
  73};
  74
  75#define VMW_BALLOON_CAPABILITIES_COMMON	(VMW_BALLOON_BASIC_CMDS \
  76					| VMW_BALLOON_BATCHED_CMDS \
  77					| VMW_BALLOON_BATCHED_2M_CMDS \
  78					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  79
  80#define VMW_BALLOON_2M_ORDER		(PMD_SHIFT - PAGE_SHIFT)
  81
  82/*
  83 * 64-bit targets are only supported in 64-bit
 
 
 
 
  84 */
  85#ifdef CONFIG_64BIT
  86#define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_CAPABILITIES_COMMON \
  87					| VMW_BALLOON_64_BIT_TARGET)
  88#else
  89#define VMW_BALLOON_CAPABILITIES	VMW_BALLOON_CAPABILITIES_COMMON
  90#endif
  91
  92enum vmballoon_page_size_type {
  93	VMW_BALLOON_4K_PAGE,
  94	VMW_BALLOON_2M_PAGE,
  95	VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
  96};
  97
  98#define VMW_BALLOON_NUM_PAGE_SIZES	(VMW_BALLOON_LAST_SIZE + 1)
 
  99
 100static const char * const vmballoon_page_size_names[] = {
 101	[VMW_BALLOON_4K_PAGE]			= "4k",
 102	[VMW_BALLOON_2M_PAGE]			= "2M"
 103};
 
 
 
 104
 105enum vmballoon_op {
 106	VMW_BALLOON_INFLATE,
 107	VMW_BALLOON_DEFLATE
 108};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 109
 110enum vmballoon_op_stat_type {
 111	VMW_BALLOON_OP_STAT,
 112	VMW_BALLOON_OP_FAIL_STAT
 113};
 114
 115#define VMW_BALLOON_OP_STAT_TYPES	(VMW_BALLOON_OP_FAIL_STAT + 1)
 116
 117/**
 118 * enum vmballoon_cmd_type - backdoor commands.
 119 *
 120 * Availability of the commands is as followed:
 121 *
 122 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
 123 * %VMW_BALLOON_CMD_GUEST_ID are always available.
 124 *
 125 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
 126 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
 127 *
 128 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
 129 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
 130 * are available.
 131 *
 132 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
 133 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
 134 * are supported.
 135 *
 136 * If the host reports  VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
 137 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
 138 *
 139 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
 140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
 141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
 142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
 143 *			    to be deflated from the balloon.
 144 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
 145 *			      runs in the VM.
 146 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
 147 *				  ballooned pages (up to 512).
 148 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
 149 *				  pages that are about to be deflated from the
 150 *				  balloon (up to 512).
 151 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
 152 *				     for 2MB pages.
 153 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
 154 *				       @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
 155 *				       pages.
 156 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
 157 *				       that would be invoked when the balloon
 158 *				       size changes.
 159 * @VMW_BALLOON_CMD_LAST: Value of the last command.
 160 */
 161enum vmballoon_cmd_type {
 162	VMW_BALLOON_CMD_START,
 163	VMW_BALLOON_CMD_GET_TARGET,
 164	VMW_BALLOON_CMD_LOCK,
 165	VMW_BALLOON_CMD_UNLOCK,
 166	VMW_BALLOON_CMD_GUEST_ID,
 167	/* No command 5 */
 168	VMW_BALLOON_CMD_BATCHED_LOCK = 6,
 169	VMW_BALLOON_CMD_BATCHED_UNLOCK,
 170	VMW_BALLOON_CMD_BATCHED_2M_LOCK,
 171	VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
 172	VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
 173	VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
 174};
 175
 176#define VMW_BALLOON_CMD_NUM	(VMW_BALLOON_CMD_LAST + 1)
 
 
 
 177
 178enum vmballoon_error_codes {
 179	VMW_BALLOON_SUCCESS,
 180	VMW_BALLOON_ERROR_CMD_INVALID,
 181	VMW_BALLOON_ERROR_PPN_INVALID,
 182	VMW_BALLOON_ERROR_PPN_LOCKED,
 183	VMW_BALLOON_ERROR_PPN_UNLOCKED,
 184	VMW_BALLOON_ERROR_PPN_PINNED,
 185	VMW_BALLOON_ERROR_PPN_NOTNEEDED,
 186	VMW_BALLOON_ERROR_RESET,
 187	VMW_BALLOON_ERROR_BUSY
 188};
 189
 190#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
 191
 192#define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
 193	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
 194	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
 195	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
 196	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
 197	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
 198	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
 199	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
 200
 201static const char * const vmballoon_cmd_names[] = {
 202	[VMW_BALLOON_CMD_START]			= "start",
 203	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
 204	[VMW_BALLOON_CMD_LOCK]			= "lock",
 205	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
 206	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
 207	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
 208	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
 209	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
 210	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
 211	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
 212};
 213
 214enum vmballoon_stat_page {
 215	VMW_BALLOON_PAGE_STAT_ALLOC,
 216	VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
 217	VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
 218	VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
 219	VMW_BALLOON_PAGE_STAT_FREE,
 220	VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
 221};
 222
 223#define VMW_BALLOON_PAGE_STAT_NUM	(VMW_BALLOON_PAGE_STAT_LAST + 1)
 224
 225enum vmballoon_stat_general {
 226	VMW_BALLOON_STAT_TIMER,
 227	VMW_BALLOON_STAT_DOORBELL,
 228	VMW_BALLOON_STAT_RESET,
 229	VMW_BALLOON_STAT_SHRINK,
 230	VMW_BALLOON_STAT_SHRINK_FREE,
 231	VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
 232};
 233
 234#define VMW_BALLOON_STAT_NUM		(VMW_BALLOON_STAT_LAST + 1)
 235
 236static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
 237static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
 238
 239struct vmballoon_ctl {
 240	struct list_head pages;
 241	struct list_head refused_pages;
 242	struct list_head prealloc_pages;
 243	unsigned int n_refused_pages;
 244	unsigned int n_pages;
 245	enum vmballoon_page_size_type page_size;
 246	enum vmballoon_op op;
 247};
 248
 249/**
 250 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
 251 *
 252 * @status: the status of the operation, which is written by the hypervisor.
 253 * @reserved: reserved for future use. Must be set to zero.
 254 * @pfn: the physical frame number of the page to be locked or unlocked.
 255 */
 256struct vmballoon_batch_entry {
 257	u64 status : 5;
 258	u64 reserved : PAGE_SHIFT - 5;
 259	u64 pfn : 52;
 260} __packed;
 261
 262struct vmballoon {
 263	/**
 264	 * @max_page_size: maximum supported page size for ballooning.
 265	 *
 266	 * Protected by @conf_sem
 267	 */
 268	enum vmballoon_page_size_type max_page_size;
 269
 270	/**
 271	 * @size: balloon actual size in basic page size (frames).
 272	 *
 273	 * While we currently do not support size which is bigger than 32-bit,
 274	 * in preparation for future support, use 64-bits.
 275	 */
 276	atomic64_t size;
 277
 278	/**
 279	 * @target: balloon target size in basic page size (frames).
 280	 *
 281	 * We do not protect the target under the assumption that setting the
 282	 * value is always done through a single write. If this assumption ever
 283	 * breaks, we would have to use X_ONCE for accesses, and suffer the less
 284	 * optimized code. Although we may read stale target value if multiple
 285	 * accesses happen at once, the performance impact should be minor.
 286	 */
 287	unsigned long target;
 288
 289	/**
 290	 * @reset_required: reset flag
 291	 *
 292	 * Setting this flag may introduce races, but the code is expected to
 293	 * handle them gracefully. In the worst case, another operation will
 294	 * fail as reset did not take place. Clearing the flag is done while
 295	 * holding @conf_sem for write.
 296	 */
 297	bool reset_required;
 298
 299	/**
 300	 * @capabilities: hypervisor balloon capabilities.
 301	 *
 302	 * Protected by @conf_sem.
 303	 */
 304	unsigned long capabilities;
 305
 306	/**
 307	 * @batch_page: pointer to communication batch page.
 308	 *
 309	 * When batching is used, batch_page points to a page, which holds up to
 310	 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
 311	 */
 312	struct vmballoon_batch_entry *batch_page;
 313
 314	/**
 315	 * @batch_max_pages: maximum pages that can be locked/unlocked.
 316	 *
 317	 * Indicates the number of pages that the hypervisor can lock or unlock
 318	 * at once, according to whether batching is enabled. If batching is
 319	 * disabled, only a single page can be locked/unlock on each operation.
 320	 *
 321	 * Protected by @conf_sem.
 322	 */
 323	unsigned int batch_max_pages;
 324
 325	/**
 326	 * @page: page to be locked/unlocked by the hypervisor
 327	 *
 328	 * @page is only used when batching is disabled and a single page is
 329	 * reclaimed on each iteration.
 330	 *
 331	 * Protected by @comm_lock.
 332	 */
 333	struct page *page;
 334
 335	/**
 336	 * @shrink_timeout: timeout until the next inflation.
 337	 *
 338	 * After an shrink event, indicates the time in jiffies after which
 339	 * inflation is allowed again. Can be written concurrently with reads,
 340	 * so must use READ_ONCE/WRITE_ONCE when accessing.
 341	 */
 342	unsigned long shrink_timeout;
 343
 344	/* statistics */
 345	struct vmballoon_stats *stats;
 346
 347	/**
 348	 * @b_dev_info: balloon device information descriptor.
 349	 */
 350	struct balloon_dev_info b_dev_info;
 351
 352	struct delayed_work dwork;
 353
 354	/**
 355	 * @huge_pages - list of the inflated 2MB pages.
 356	 *
 357	 * Protected by @b_dev_info.pages_lock .
 358	 */
 359	struct list_head huge_pages;
 360
 361	/**
 362	 * @vmci_doorbell.
 363	 *
 364	 * Protected by @conf_sem.
 365	 */
 366	struct vmci_handle vmci_doorbell;
 367
 368	/**
 369	 * @conf_sem: semaphore to protect the configuration and the statistics.
 370	 */
 371	struct rw_semaphore conf_sem;
 372
 373	/**
 374	 * @comm_lock: lock to protect the communication with the host.
 375	 *
 376	 * Lock ordering: @conf_sem -> @comm_lock .
 377	 */
 378	spinlock_t comm_lock;
 379
 380	/**
 381	 * @shrinker: shrinker interface that is used to avoid over-inflation.
 382	 */
 383	struct shrinker shrinker;
 384
 385	/**
 386	 * @shrinker_registered: whether the shrinker was registered.
 387	 *
 388	 * The shrinker interface does not handle gracefully the removal of
 389	 * shrinker that was not registered before. This indication allows to
 390	 * simplify the unregistration process.
 391	 */
 392	bool shrinker_registered;
 393};
 394
 395static struct vmballoon balloon;
 396
 397struct vmballoon_stats {
 398	/* timer / doorbell operations */
 399	atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
 400
 401	/* allocation statistics for huge and small pages */
 402	atomic64_t
 403	       page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
 404
 405	/* Monitor operations: total operations, and failures */
 406	atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
 407};
 408
 409static inline bool is_vmballoon_stats_on(void)
 410{
 411	return IS_ENABLED(CONFIG_DEBUG_FS) &&
 412		static_branch_unlikely(&balloon_stat_enabled);
 413}
 414
 415static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
 416					  enum vmballoon_op_stat_type type)
 417{
 418	if (is_vmballoon_stats_on())
 419		atomic64_inc(&b->stats->ops[op][type]);
 420}
 421
 422static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
 423					   enum vmballoon_stat_general stat)
 424{
 425	if (is_vmballoon_stats_on())
 426		atomic64_inc(&b->stats->general_stat[stat]);
 427}
 428
 429static inline void vmballoon_stats_gen_add(struct vmballoon *b,
 430					   enum vmballoon_stat_general stat,
 431					   unsigned int val)
 432{
 433	if (is_vmballoon_stats_on())
 434		atomic64_add(val, &b->stats->general_stat[stat]);
 435}
 436
 437static inline void vmballoon_stats_page_inc(struct vmballoon *b,
 438					    enum vmballoon_stat_page stat,
 439					    enum vmballoon_page_size_type size)
 440{
 441	if (is_vmballoon_stats_on())
 442		atomic64_inc(&b->stats->page_stat[stat][size]);
 443}
 444
 445static inline void vmballoon_stats_page_add(struct vmballoon *b,
 446					    enum vmballoon_stat_page stat,
 447					    enum vmballoon_page_size_type size,
 448					    unsigned int val)
 449{
 450	if (is_vmballoon_stats_on())
 451		atomic64_add(val, &b->stats->page_stat[stat][size]);
 452}
 453
 454static inline unsigned long
 455__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
 456		unsigned long arg2, unsigned long *result)
 457{
 458	unsigned long status, dummy1, dummy2, dummy3, local_result;
 459
 460	vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
 461
 462	asm volatile ("inl %%dx" :
 463		"=a"(status),
 464		"=c"(dummy1),
 465		"=d"(dummy2),
 466		"=b"(local_result),
 467		"=S"(dummy3) :
 468		"0"(VMW_BALLOON_HV_MAGIC),
 469		"1"(cmd),
 470		"2"(VMW_BALLOON_HV_PORT),
 471		"3"(arg1),
 472		"4"(arg2) :
 473		"memory");
 474
 475	/* update the result if needed */
 476	if (result)
 477		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
 478							   local_result;
 479
 480	/* update target when applicable */
 481	if (status == VMW_BALLOON_SUCCESS &&
 482	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
 483		WRITE_ONCE(b->target, local_result);
 484
 485	if (status != VMW_BALLOON_SUCCESS &&
 486	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
 487		vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
 488		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
 489			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
 490			 status);
 491	}
 492
 493	/* mark reset required accordingly */
 494	if (status == VMW_BALLOON_ERROR_RESET)
 495		b->reset_required = true;
 
 496
 497	return status;
 498}
 499
 500static __always_inline unsigned long
 501vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
 502	      unsigned long arg2)
 503{
 504	unsigned long dummy;
 505
 506	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
 507}
 508
 509/*
 510 * Send "start" command to the host, communicating supported version
 511 * of the protocol.
 512 */
 513static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
 514{
 515	unsigned long status, capabilities;
 516
 517	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
 518				 &capabilities);
 519
 520	switch (status) {
 521	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
 522		b->capabilities = capabilities;
 523		break;
 524	case VMW_BALLOON_SUCCESS:
 525		b->capabilities = VMW_BALLOON_BASIC_CMDS;
 526		break;
 527	default:
 528		return -EIO;
 529	}
 530
 531	/*
 532	 * 2MB pages are only supported with batching. If batching is for some
 533	 * reason disabled, do not use 2MB pages, since otherwise the legacy
 534	 * mechanism is used with 2MB pages, causing a failure.
 535	 */
 536	b->max_page_size = VMW_BALLOON_4K_PAGE;
 537	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
 538	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
 539		b->max_page_size = VMW_BALLOON_2M_PAGE;
 540
 541
 542	return 0;
 543}
 544
 545/**
 546 * vmballoon_send_guest_id - communicate guest type to the host.
 547 *
 548 * @b: pointer to the balloon.
 549 *
 550 * Communicate guest type to the host so that it can adjust ballooning
 551 * algorithm to the one most appropriate for the guest. This command
 552 * is normally issued after sending "start" command and is part of
 553 * standard reset sequence.
 554 *
 555 * Return: zero on success or appropriate error code.
 556 */
 557static int vmballoon_send_guest_id(struct vmballoon *b)
 558{
 559	unsigned long status;
 560
 561	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
 562			       VMW_BALLOON_GUEST_ID, 0);
 563
 564	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 565}
 566
 567/**
 568 * vmballoon_page_order() - return the order of the page
 569 * @page_size: the size of the page.
 570 *
 571 * Return: the allocation order.
 572 */
 573static inline
 574unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
 575{
 576	return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
 577}
 578
 579/**
 580 * vmballoon_page_in_frames() - returns the number of frames in a page.
 581 * @page_size: the size of the page.
 582 *
 583 * Return: the number of 4k frames.
 584 */
 585static inline unsigned int
 586vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
 587{
 588	return 1 << vmballoon_page_order(page_size);
 589}
 590
 591/**
 592 * vmballoon_mark_page_offline() - mark a page as offline
 593 * @page: pointer for the page.
 594 * @page_size: the size of the page.
 595 */
 596static void
 597vmballoon_mark_page_offline(struct page *page,
 598			    enum vmballoon_page_size_type page_size)
 599{
 600	int i;
 601
 602	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
 603		__SetPageOffline(page + i);
 604}
 605
 606/**
 607 * vmballoon_mark_page_online() - mark a page as online
 608 * @page: pointer for the page.
 609 * @page_size: the size of the page.
 610 */
 611static void
 612vmballoon_mark_page_online(struct page *page,
 613			   enum vmballoon_page_size_type page_size)
 614{
 615	int i;
 616
 617	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
 618		__ClearPageOffline(page + i);
 619}
 620
 621/**
 622 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
 623 *
 624 * @b: pointer to the balloon.
 625 *
 626 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
 627 * by the host-guest protocol and EIO if an error occurred in communicating with
 628 * the host.
 629 */
 630static int vmballoon_send_get_target(struct vmballoon *b)
 631{
 632	unsigned long status;
 
 633	unsigned long limit;
 
 634
 635	limit = totalram_pages();
 636
 637	/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
 638	if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
 639	    limit != (u32)limit)
 640		return -EINVAL;
 641
 642	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
 643
 644	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 645}
 646
 647/**
 648 * vmballoon_alloc_page_list - allocates a list of pages.
 649 *
 650 * @b: pointer to the balloon.
 651 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
 652 * @req_n_pages: the number of requested pages.
 653 *
 654 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
 655 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
 656 *
 657 * Return: zero on success or error code otherwise.
 658 */
 659static int vmballoon_alloc_page_list(struct vmballoon *b,
 660				     struct vmballoon_ctl *ctl,
 661				     unsigned int req_n_pages)
 662{
 663	struct page *page;
 664	unsigned int i;
 665
 666	for (i = 0; i < req_n_pages; i++) {
 667		/*
 668		 * First check if we happen to have pages that were allocated
 669		 * before. This happens when 2MB page rejected during inflation
 670		 * by the hypervisor, and then split into 4KB pages.
 671		 */
 672		if (!list_empty(&ctl->prealloc_pages)) {
 673			page = list_first_entry(&ctl->prealloc_pages,
 674						struct page, lru);
 675			list_del(&page->lru);
 676		} else {
 677			if (ctl->page_size == VMW_BALLOON_2M_PAGE)
 678				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
 679					__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
 680			else
 681				page = balloon_page_alloc();
 682
 683			vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
 684						 ctl->page_size);
 685		}
 686
 687		if (page) {
 688			/* Success. Add the page to the list and continue. */
 689			list_add(&page->lru, &ctl->pages);
 690			continue;
 691		}
 692
 693		/* Allocation failed. Update statistics and stop. */
 694		vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
 695					 ctl->page_size);
 696		break;
 697	}
 698
 699	ctl->n_pages = i;
 700
 701	return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
 702}
 703
 704/**
 705 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
 706 *
 707 * @b: pointer for %struct vmballoon.
 708 * @page: pointer for the page whose result should be handled.
 709 * @page_size: size of the page.
 710 * @status: status of the operation as provided by the hypervisor.
 711 */
 712static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
 713				       enum vmballoon_page_size_type page_size,
 714				       unsigned long status)
 715{
 716	/* On success do nothing. The page is already on the balloon list. */
 717	if (likely(status == VMW_BALLOON_SUCCESS))
 718		return 0;
 719
 720	pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
 721		 page_to_pfn(page), status,
 722		 vmballoon_page_size_names[page_size]);
 723
 724	/* Error occurred */
 725	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
 726				 page_size);
 727
 728	return -EIO;
 729}
 730
 731/**
 732 * vmballoon_status_page - returns the status of (un)lock operation
 733 *
 734 * @b: pointer to the balloon.
 735 * @idx: index for the page for which the operation is performed.
 736 * @p: pointer to where the page struct is returned.
 737 *
 738 * Following a lock or unlock operation, returns the status of the operation for
 739 * an individual page. Provides the page that the operation was performed on on
 740 * the @page argument.
 741 *
 742 * Returns: The status of a lock or unlock operation for an individual page.
 743 */
 744static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
 745					   struct page **p)
 746{
 747	if (static_branch_likely(&vmw_balloon_batching)) {
 748		/* batching mode */
 749		*p = pfn_to_page(b->batch_page[idx].pfn);
 750		return b->batch_page[idx].status;
 751	}
 752
 753	/* non-batching mode */
 754	*p = b->page;
 755
 756	/*
 757	 * If a failure occurs, the indication will be provided in the status
 758	 * of the entire operation, which is considered before the individual
 759	 * page status. So for non-batching mode, the indication is always of
 760	 * success.
 761	 */
 762	return VMW_BALLOON_SUCCESS;
 763}
 764
 765/**
 766 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
 767 * @b: pointer to the balloon.
 768 * @num_pages: number of inflated/deflated pages.
 769 * @page_size: size of the page.
 770 * @op: the type of operation (lock or unlock).
 771 *
 772 * Notify the host about page(s) that were ballooned (or removed from the
 773 * balloon) so that host can use it without fear that guest will need it (or
 774 * stop using them since the VM does). Host may reject some pages, we need to
 775 * check the return value and maybe submit a different page. The pages that are
 776 * inflated/deflated are pointed by @b->page.
 777 *
 778 * Return: result as provided by the hypervisor.
 779 */
 780static unsigned long vmballoon_lock_op(struct vmballoon *b,
 781				       unsigned int num_pages,
 782				       enum vmballoon_page_size_type page_size,
 783				       enum vmballoon_op op)
 784{
 785	unsigned long cmd, pfn;
 786
 787	lockdep_assert_held(&b->comm_lock);
 788
 789	if (static_branch_likely(&vmw_balloon_batching)) {
 790		if (op == VMW_BALLOON_INFLATE)
 791			cmd = page_size == VMW_BALLOON_2M_PAGE ?
 792				VMW_BALLOON_CMD_BATCHED_2M_LOCK :
 793				VMW_BALLOON_CMD_BATCHED_LOCK;
 794		else
 795			cmd = page_size == VMW_BALLOON_2M_PAGE ?
 796				VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
 797				VMW_BALLOON_CMD_BATCHED_UNLOCK;
 798
 799		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 800	} else {
 801		cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
 802						  VMW_BALLOON_CMD_UNLOCK;
 803		pfn = page_to_pfn(b->page);
 804
 805		/* In non-batching mode, PFNs must fit in 32-bit */
 806		if (unlikely(pfn != (u32)pfn))
 807			return VMW_BALLOON_ERROR_PPN_INVALID;
 808	}
 809
 810	return vmballoon_cmd(b, cmd, pfn, num_pages);
 811}
 812
 813/**
 814 * vmballoon_add_page - adds a page towards lock/unlock operation.
 815 *
 816 * @b: pointer to the balloon.
 817 * @idx: index of the page to be ballooned in this batch.
 818 * @p: pointer to the page that is about to be ballooned.
 819 *
 820 * Adds the page to be ballooned. Must be called while holding @comm_lock.
 821 */
 822static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
 823			       struct page *p)
 824{
 825	lockdep_assert_held(&b->comm_lock);
 
 826
 827	if (static_branch_likely(&vmw_balloon_batching))
 828		b->batch_page[idx] = (struct vmballoon_batch_entry)
 829					{ .pfn = page_to_pfn(p) };
 830	else
 831		b->page = p;
 832}
 833
 834/**
 835 * vmballoon_lock - lock or unlock a batch of pages.
 836 *
 837 * @b: pointer to the balloon.
 838 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
 839 *
 840 * Notifies the host of about ballooned pages (after inflation or deflation,
 841 * according to @ctl). If the host rejects the page put it on the
 842 * @ctl refuse list. These refused page are then released when moving to the
 843 * next size of pages.
 844 *
 845 * Note that we neither free any @page here nor put them back on the ballooned
 846 * pages list. Instead we queue it for later processing. We do that for several
 847 * reasons. First, we do not want to free the page under the lock. Second, it
 848 * allows us to unify the handling of lock and unlock. In the inflate case, the
 849 * caller will check if there are too many refused pages and release them.
 850 * Although it is not identical to the past behavior, it should not affect
 851 * performance.
 852 */
 853static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
 854{
 855	unsigned long batch_status;
 856	struct page *page;
 857	unsigned int i, num_pages;
 858
 859	num_pages = ctl->n_pages;
 860	if (num_pages == 0)
 861		return 0;
 862
 863	/* communication with the host is done under the communication lock */
 864	spin_lock(&b->comm_lock);
 865
 866	i = 0;
 867	list_for_each_entry(page, &ctl->pages, lru)
 868		vmballoon_add_page(b, i++, page);
 869
 870	batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
 871					 ctl->op);
 872
 873	/*
 874	 * Iterate over the pages in the provided list. Since we are changing
 875	 * @ctl->n_pages we are saving the original value in @num_pages and
 876	 * use this value to bound the loop.
 877	 */
 878	for (i = 0; i < num_pages; i++) {
 879		unsigned long status;
 880
 881		status = vmballoon_status_page(b, i, &page);
 
 
 882
 883		/*
 884		 * Failure of the whole batch overrides a single operation
 885		 * results.
 886		 */
 887		if (batch_status != VMW_BALLOON_SUCCESS)
 888			status = batch_status;
 889
 890		/* Continue if no error happened */
 891		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
 892						 status))
 893			continue;
 894
 895		/*
 896		 * Error happened. Move the pages to the refused list and update
 897		 * the pages number.
 898		 */
 899		list_move(&page->lru, &ctl->refused_pages);
 900		ctl->n_pages--;
 901		ctl->n_refused_pages++;
 902	}
 903
 904	spin_unlock(&b->comm_lock);
 905
 906	return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 907}
 908
 909/**
 910 * vmballoon_release_page_list() - Releases a page list
 911 *
 912 * @page_list: list of pages to release.
 913 * @n_pages: pointer to the number of pages.
 914 * @page_size: whether the pages in the list are 2MB (or else 4KB).
 915 *
 916 * Releases the list of pages and zeros the number of pages.
 917 */
 918static void vmballoon_release_page_list(struct list_head *page_list,
 919				       int *n_pages,
 920				       enum vmballoon_page_size_type page_size)
 921{
 922	struct page *page, *tmp;
 
 923
 924	list_for_each_entry_safe(page, tmp, page_list, lru) {
 925		list_del(&page->lru);
 926		__free_pages(page, vmballoon_page_order(page_size));
 
 
 
 
 
 
 
 927	}
 928
 929	if (n_pages)
 930		*n_pages = 0;
 931}
 932
 933
 934/*
 935 * Release pages that were allocated while attempting to inflate the
 936 * balloon but were refused by the host for one reason or another.
 
 937 */
 938static void vmballoon_release_refused_pages(struct vmballoon *b,
 939					    struct vmballoon_ctl *ctl)
 940{
 941	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
 942				 ctl->page_size);
 943
 944	vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
 945				    ctl->page_size);
 
 
 
 946}
 947
 948/**
 949 * vmballoon_change - retrieve the required balloon change
 950 *
 951 * @b: pointer for the balloon.
 952 *
 953 * Return: the required change for the balloon size. A positive number
 954 * indicates inflation, a negative number indicates a deflation.
 955 */
 956static int64_t vmballoon_change(struct vmballoon *b)
 957{
 958	int64_t size, target;
 
 
 
 
 
 
 
 
 
 959
 960	size = atomic64_read(&b->size);
 961	target = READ_ONCE(b->target);
 
 
 
 
 
 
 
 962
 963	/*
 964	 * We must cast first because of int sizes
 965	 * Otherwise we might get huge positives instead of negatives
 966	 */
 
 
 
 
 
 
 967
 968	if (b->reset_required)
 969		return 0;
 
 
 
 
 
 
 
 
 970
 971	/* consider a 2MB slack on deflate, unless the balloon is emptied */
 972	if (target < size && target != 0 &&
 973	    size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
 974		return 0;
 975
 976	/* If an out-of-memory recently occurred, inflation is disallowed. */
 977	if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
 978		return 0;
 979
 980	return target - size;
 981}
 982
 983/**
 984 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
 985 *
 986 * @b: pointer to balloon.
 987 * @pages: list of pages to enqueue.
 988 * @n_pages: pointer to number of pages in list. The value is zeroed.
 989 * @page_size: whether the pages are 2MB or 4KB pages.
 990 *
 991 * Enqueues the provides list of pages in the ballooned page list, clears the
 992 * list and zeroes the number of pages that was provided.
 993 */
 994static void vmballoon_enqueue_page_list(struct vmballoon *b,
 995					struct list_head *pages,
 996					unsigned int *n_pages,
 997					enum vmballoon_page_size_type page_size)
 998{
 999	unsigned long flags;
1000	struct page *page;
1001
1002	if (page_size == VMW_BALLOON_4K_PAGE) {
1003		balloon_page_list_enqueue(&b->b_dev_info, pages);
1004	} else {
1005		/*
1006		 * Keep the huge pages in a local list which is not available
1007		 * for the balloon compaction mechanism.
1008		 */
1009		spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1010
1011		list_for_each_entry(page, pages, lru) {
1012			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1013		}
1014
1015		list_splice_init(pages, &b->huge_pages);
1016		__count_vm_events(BALLOON_INFLATE, *n_pages *
1017				  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1018		spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1019	}
1020
1021	*n_pages = 0;
1022}
1023
1024/**
1025 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1026 *
1027 * @b: pointer to balloon.
1028 * @pages: list of pages to enqueue.
1029 * @n_pages: pointer to number of pages in list. The value is zeroed.
1030 * @page_size: whether the pages are 2MB or 4KB pages.
1031 * @n_req_pages: the number of requested pages.
1032 *
1033 * Dequeues the number of requested pages from the balloon for deflation. The
1034 * number of dequeued pages may be lower, if not enough pages in the requested
1035 * size are available.
1036 */
1037static void vmballoon_dequeue_page_list(struct vmballoon *b,
1038					struct list_head *pages,
1039					unsigned int *n_pages,
1040					enum vmballoon_page_size_type page_size,
1041					unsigned int n_req_pages)
1042{
1043	struct page *page, *tmp;
1044	unsigned int i = 0;
1045	unsigned long flags;
1046
1047	/* In the case of 4k pages, use the compaction infrastructure */
1048	if (page_size == VMW_BALLOON_4K_PAGE) {
1049		*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1050						     n_req_pages);
1051		return;
1052	}
1053
1054	/* 2MB pages */
1055	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1056	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1057		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1058
1059		list_move(&page->lru, pages);
1060		if (++i == n_req_pages)
1061			break;
1062	}
1063
1064	__count_vm_events(BALLOON_DEFLATE,
1065			  i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1066	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1067	*n_pages = i;
1068}
1069
1070/**
1071 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1072 *
1073 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1074 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1075 * then being refused. To prevent this case, this function splits the refused
1076 * pages into 4KB pages and adds them into @prealloc_pages list.
1077 *
1078 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1079 */
1080static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1081{
1082	struct page *page, *tmp;
1083	unsigned int i, order;
 
 
 
 
1084
1085	order = vmballoon_page_order(ctl->page_size);
1086
1087	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1088		list_del(&page->lru);
1089		split_page(page, order);
1090		for (i = 0; i < (1 << order); i++)
1091			list_add(&page[i].lru, &ctl->prealloc_pages);
1092	}
1093	ctl->n_refused_pages = 0;
1094}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1095
1096/**
1097 * vmballoon_inflate() - Inflate the balloon towards its target size.
1098 *
1099 * @b: pointer to the balloon.
1100 */
1101static void vmballoon_inflate(struct vmballoon *b)
1102{
1103	int64_t to_inflate_frames;
1104	struct vmballoon_ctl ctl = {
1105		.pages = LIST_HEAD_INIT(ctl.pages),
1106		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1107		.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1108		.page_size = b->max_page_size,
1109		.op = VMW_BALLOON_INFLATE
1110	};
1111
1112	while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1113		unsigned int to_inflate_pages, page_in_frames;
1114		int alloc_error, lock_error = 0;
1115
1116		VM_BUG_ON(!list_empty(&ctl.pages));
1117		VM_BUG_ON(ctl.n_pages != 0);
1118
1119		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1120
1121		to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1122					 DIV_ROUND_UP_ULL(to_inflate_frames,
1123							  page_in_frames));
1124
1125		/* Start by allocating */
1126		alloc_error = vmballoon_alloc_page_list(b, &ctl,
1127							to_inflate_pages);
1128
1129		/* Actually lock the pages by telling the hypervisor */
1130		lock_error = vmballoon_lock(b, &ctl);
1131
1132		/*
1133		 * If an error indicates that something serious went wrong,
1134		 * stop the inflation.
1135		 */
1136		if (lock_error)
1137			break;
1138
1139		/* Update the balloon size */
1140		atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1141
1142		vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1143					    ctl.page_size);
 
 
 
 
 
 
 
 
1144
1145		/*
1146		 * If allocation failed or the number of refused pages exceeds
1147		 * the maximum allowed, move to the next page size.
1148		 */
1149		if (alloc_error ||
1150		    ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1151			if (ctl.page_size == VMW_BALLOON_4K_PAGE)
 
1152				break;
 
1153
1154			/*
1155			 * Split the refused pages to 4k. This will also empty
1156			 * the refused pages list.
 
 
 
 
1157			 */
1158			vmballoon_split_refused_pages(&ctl);
1159			ctl.page_size--;
1160		}
1161
1162		cond_resched();
1163	}
1164
1165	/*
1166	 * Release pages that were allocated while attempting to inflate the
1167	 * balloon but were refused by the host for one reason or another,
1168	 * and update the statistics.
1169	 */
1170	if (ctl.n_refused_pages != 0)
1171		vmballoon_release_refused_pages(b, &ctl);
1172
1173	vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1174}
 
 
1175
1176/**
1177 * vmballoon_deflate() - Decrease the size of the balloon.
1178 *
1179 * @b: pointer to the balloon
1180 * @n_frames: the number of frames to deflate. If zero, automatically
1181 * calculated according to the target size.
1182 * @coordinated: whether to coordinate with the host
1183 *
1184 * Decrease the size of the balloon allowing guest to use more memory.
1185 *
1186 * Return: The number of deflated frames (i.e., basic page size units)
1187 */
1188static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1189				       bool coordinated)
1190{
1191	unsigned long deflated_frames = 0;
1192	unsigned long tried_frames = 0;
1193	struct vmballoon_ctl ctl = {
1194		.pages = LIST_HEAD_INIT(ctl.pages),
1195		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1196		.page_size = VMW_BALLOON_4K_PAGE,
1197		.op = VMW_BALLOON_DEFLATE
1198	};
1199
1200	/* free pages to reach target */
1201	while (true) {
1202		unsigned int to_deflate_pages, n_unlocked_frames;
1203		unsigned int page_in_frames;
1204		int64_t to_deflate_frames;
1205		bool deflated_all;
1206
1207		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1208
1209		VM_BUG_ON(!list_empty(&ctl.pages));
1210		VM_BUG_ON(ctl.n_pages);
1211		VM_BUG_ON(!list_empty(&ctl.refused_pages));
1212		VM_BUG_ON(ctl.n_refused_pages);
1213
1214		/*
1215		 * If we were requested a specific number of frames, we try to
1216		 * deflate this number of frames. Otherwise, deflation is
1217		 * performed according to the target and balloon size.
1218		 */
1219		to_deflate_frames = n_frames ? n_frames - tried_frames :
1220					       -vmballoon_change(b);
1221
1222		/* break if no work to do */
1223		if (to_deflate_frames <= 0)
1224			break;
1225
1226		/*
1227		 * Calculate the number of frames based on current page size,
1228		 * but limit the deflated frames to a single chunk
1229		 */
1230		to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1231					 DIV_ROUND_UP_ULL(to_deflate_frames,
1232							  page_in_frames));
1233
1234		/* First take the pages from the balloon pages. */
1235		vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1236					    ctl.page_size, to_deflate_pages);
1237
1238		/*
1239		 * Before pages are moving to the refused list, count their
1240		 * frames as frames that we tried to deflate.
1241		 */
1242		tried_frames += ctl.n_pages * page_in_frames;
1243
1244		/*
1245		 * Unlock the pages by communicating with the hypervisor if the
1246		 * communication is coordinated (i.e., not pop). We ignore the
1247		 * return code. Instead we check if all the pages we manage to
1248		 * unlock all the pages. If we failed, we will move to the next
1249		 * page size, and would eventually try again later.
1250		 */
1251		if (coordinated)
1252			vmballoon_lock(b, &ctl);
1253
1254		/*
1255		 * Check if we deflated enough. We will move to the next page
1256		 * size if we did not manage to do so. This calculation takes
1257		 * place now, as once the pages are released, the number of
1258		 * pages is zeroed.
1259		 */
1260		deflated_all = (ctl.n_pages == to_deflate_pages);
1261
1262		/* Update local and global counters */
1263		n_unlocked_frames = ctl.n_pages * page_in_frames;
1264		atomic64_sub(n_unlocked_frames, &b->size);
1265		deflated_frames += n_unlocked_frames;
1266
1267		vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1268					 ctl.page_size, ctl.n_pages);
1269
1270		/* free the ballooned pages */
1271		vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1272					    ctl.page_size);
1273
1274		/* Return the refused pages to the ballooned list. */
1275		vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1276					    &ctl.n_refused_pages,
1277					    ctl.page_size);
1278
1279		/* If we failed to unlock all the pages, move to next size. */
1280		if (!deflated_all) {
1281			if (ctl.page_size == b->max_page_size)
1282				break;
1283			ctl.page_size++;
1284		}
1285
1286		cond_resched();
1287	}
1288
1289	return deflated_frames;
1290}
1291
1292/**
1293 * vmballoon_deinit_batching - disables batching mode.
1294 *
1295 * @b: pointer to &struct vmballoon.
1296 *
1297 * Disables batching, by deallocating the page for communication with the
1298 * hypervisor and disabling the static key to indicate that batching is off.
1299 */
1300static void vmballoon_deinit_batching(struct vmballoon *b)
1301{
1302	free_page((unsigned long)b->batch_page);
1303	b->batch_page = NULL;
1304	static_branch_disable(&vmw_balloon_batching);
1305	b->batch_max_pages = 1;
1306}
1307
1308/**
1309 * vmballoon_init_batching - enable batching mode.
1310 *
1311 * @b: pointer to &struct vmballoon.
1312 *
1313 * Enables batching, by allocating a page for communication with the hypervisor
1314 * and enabling the static_key to use batching.
1315 *
1316 * Return: zero on success or an appropriate error-code.
1317 */
1318static int vmballoon_init_batching(struct vmballoon *b)
1319{
1320	struct page *page;
1321
1322	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1323	if (!page)
1324		return -ENOMEM;
1325
1326	b->batch_page = page_address(page);
1327	b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1328
1329	static_branch_enable(&vmw_balloon_batching);
1330
1331	return 0;
1332}
1333
1334/*
1335 * Receive notification and resize balloon
1336 */
1337static void vmballoon_doorbell(void *client_data)
1338{
1339	struct vmballoon *b = client_data;
1340
1341	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1342
1343	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1344}
1345
1346/*
1347 * Clean up vmci doorbell
1348 */
1349static void vmballoon_vmci_cleanup(struct vmballoon *b)
1350{
1351	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1352		      VMCI_INVALID_ID, VMCI_INVALID_ID);
1353
1354	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1355		vmci_doorbell_destroy(b->vmci_doorbell);
1356		b->vmci_doorbell = VMCI_INVALID_HANDLE;
1357	}
1358}
1359
1360/**
1361 * vmballoon_vmci_init - Initialize vmci doorbell.
1362 *
1363 * @b: pointer to the balloon.
1364 *
1365 * Return: zero on success or when wakeup command not supported. Error-code
1366 * otherwise.
1367 *
1368 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1369 */
1370static int vmballoon_vmci_init(struct vmballoon *b)
1371{
1372	unsigned long error;
1373
1374	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1375		return 0;
1376
1377	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1378				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
1379				     vmballoon_doorbell, b);
1380
1381	if (error != VMCI_SUCCESS)
1382		goto fail;
1383
1384	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1385				b->vmci_doorbell.context,
1386				b->vmci_doorbell.resource, NULL);
1387
1388	if (error != VMW_BALLOON_SUCCESS)
1389		goto fail;
1390
1391	return 0;
1392fail:
1393	vmballoon_vmci_cleanup(b);
1394	return -EIO;
1395}
1396
1397/**
1398 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1399 *
1400 * @b: pointer to the balloon.
1401 *
1402 * This function is called when host decides to "reset" balloon for one reason
1403 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1404 * pages being released.
1405 */
1406static void vmballoon_pop(struct vmballoon *b)
1407{
1408	unsigned long size;
1409
1410	while ((size = atomic64_read(&b->size)))
1411		vmballoon_deflate(b, size, false);
1412}
1413
1414/*
1415 * Perform standard reset sequence by popping the balloon (in case it
1416 * is not  empty) and then restarting protocol. This operation normally
1417 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1418 */
1419static void vmballoon_reset(struct vmballoon *b)
1420{
 
 
 
1421	int error;
1422
1423	down_write(&b->conf_sem);
1424
1425	vmballoon_vmci_cleanup(b);
 
1426
1427	/* free all pages, skipping monitor unlock */
1428	vmballoon_pop(b);
1429
1430	if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1431		goto unlock;
 
 
 
 
 
 
 
1432
1433	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1434		if (vmballoon_init_batching(b)) {
1435			/*
1436			 * We failed to initialize batching, inform the monitor
1437			 * about it by sending a null capability.
1438			 *
1439			 * The guest will retry in one second.
1440			 */
1441			vmballoon_send_start(b, 0);
1442			goto unlock;
1443		}
1444	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1445		vmballoon_deinit_batching(b);
1446	}
1447
1448	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1449	b->reset_required = false;
1450
1451	error = vmballoon_vmci_init(b);
1452	if (error)
1453		pr_err_once("failed to initialize vmci doorbell\n");
1454
1455	if (vmballoon_send_guest_id(b))
1456		pr_err_once("failed to send guest ID to the host\n");
1457
1458unlock:
1459	up_write(&b->conf_sem);
1460}
1461
1462/**
1463 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1464 *
1465 * @work: pointer to the &work_struct which is provided by the workqueue.
1466 *
1467 * Resets the protocol if needed, gets the new size and adjusts balloon as
1468 * needed. Repeat in 1 sec.
1469 */
1470static void vmballoon_work(struct work_struct *work)
1471{
1472	struct delayed_work *dwork = to_delayed_work(work);
1473	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1474	int64_t change = 0;
 
 
1475
1476	if (b->reset_required)
1477		vmballoon_reset(b);
1478
1479	down_read(&b->conf_sem);
1480
1481	/*
1482	 * Update the stats while holding the semaphore to ensure that
1483	 * @stats_enabled is consistent with whether the stats are actually
1484	 * enabled
1485	 */
1486	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1487
1488	if (!vmballoon_send_get_target(b))
1489		change = vmballoon_change(b);
1490
1491	if (change != 0) {
1492		pr_debug("%s - size: %llu, target %lu\n", __func__,
1493			 atomic64_read(&b->size), READ_ONCE(b->target));
1494
1495		if (change > 0)
1496			vmballoon_inflate(b);
1497		else  /* (change < 0) */
1498			vmballoon_deflate(b, 0, true);
1499	}
1500
1501	up_read(&b->conf_sem);
1502
1503	/*
1504	 * We are using a freezable workqueue so that balloon operations are
1505	 * stopped while the system transitions to/from sleep/hibernation.
1506	 */
1507	queue_delayed_work(system_freezable_wq,
1508			   dwork, round_jiffies_relative(HZ));
1509
1510}
1511
1512/**
1513 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1514 * @shrinker: pointer to the balloon shrinker.
1515 * @sc: page reclaim information.
1516 *
1517 * Returns: number of pages that were freed during deflation.
1518 */
1519static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1520					     struct shrink_control *sc)
1521{
1522	struct vmballoon *b = &balloon;
1523	unsigned long deflated_frames;
1524
1525	pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1526
1527	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1528
1529	/*
1530	 * If the lock is also contended for read, we cannot easily reclaim and
1531	 * we bail out.
1532	 */
1533	if (!down_read_trylock(&b->conf_sem))
1534		return 0;
1535
1536	deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1537
1538	vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1539				deflated_frames);
1540
1541	/*
1542	 * Delay future inflation for some time to mitigate the situations in
1543	 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1544	 * the access is asynchronous.
1545	 */
1546	WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1547
1548	up_read(&b->conf_sem);
1549
1550	return deflated_frames;
1551}
1552
1553/**
1554 * vmballoon_shrinker_count() - return the number of ballooned pages.
1555 * @shrinker: pointer to the balloon shrinker.
1556 * @sc: page reclaim information.
1557 *
1558 * Returns: number of 4k pages that are allocated for the balloon and can
1559 *	    therefore be reclaimed under pressure.
1560 */
1561static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1562					      struct shrink_control *sc)
1563{
1564	struct vmballoon *b = &balloon;
 
1565
1566	return atomic64_read(&b->size);
1567}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568
1569static void vmballoon_unregister_shrinker(struct vmballoon *b)
1570{
1571	if (b->shrinker_registered)
1572		unregister_shrinker(&b->shrinker);
1573	b->shrinker_registered = false;
1574}
1575
1576static int vmballoon_register_shrinker(struct vmballoon *b)
1577{
1578	int r;
1579
1580	/* Do nothing if the shrinker is not enabled */
1581	if (!vmwballoon_shrinker_enable)
1582		return 0;
1583
1584	b->shrinker.scan_objects = vmballoon_shrinker_scan;
1585	b->shrinker.count_objects = vmballoon_shrinker_count;
1586	b->shrinker.seeks = DEFAULT_SEEKS;
1587
1588	r = register_shrinker(&b->shrinker, "vmw-balloon");
1589
1590	if (r == 0)
1591		b->shrinker_registered = true;
1592
1593	return r;
1594}
1595
1596/*
1597 * DEBUGFS Interface
1598 */
1599#ifdef CONFIG_DEBUG_FS
1600
1601static const char * const vmballoon_stat_page_names[] = {
1602	[VMW_BALLOON_PAGE_STAT_ALLOC]		= "alloc",
1603	[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]	= "allocFail",
1604	[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]	= "errAlloc",
1605	[VMW_BALLOON_PAGE_STAT_REFUSED_FREE]	= "errFree",
1606	[VMW_BALLOON_PAGE_STAT_FREE]		= "free"
1607};
1608
1609static const char * const vmballoon_stat_names[] = {
1610	[VMW_BALLOON_STAT_TIMER]		= "timer",
1611	[VMW_BALLOON_STAT_DOORBELL]		= "doorbell",
1612	[VMW_BALLOON_STAT_RESET]		= "reset",
1613	[VMW_BALLOON_STAT_SHRINK]		= "shrink",
1614	[VMW_BALLOON_STAT_SHRINK_FREE]		= "shrinkFree"
1615};
1616
1617static int vmballoon_enable_stats(struct vmballoon *b)
1618{
1619	int r = 0;
1620
1621	down_write(&b->conf_sem);
1622
1623	/* did we somehow race with another reader which enabled stats? */
1624	if (b->stats)
1625		goto out;
1626
1627	b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1628
1629	if (!b->stats) {
1630		/* allocation failed */
1631		r = -ENOMEM;
1632		goto out;
1633	}
1634	static_key_enable(&balloon_stat_enabled.key);
1635out:
1636	up_write(&b->conf_sem);
1637	return r;
1638}
1639
1640/**
1641 * vmballoon_debug_show - shows statistics of balloon operations.
1642 * @f: pointer to the &struct seq_file.
1643 * @offset: ignored.
1644 *
1645 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1646 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1647 * we only collect statistics after the first time the counters are read.
1648 *
1649 * Return: zero on success or an error code.
1650 */
1651static int vmballoon_debug_show(struct seq_file *f, void *offset)
1652{
1653	struct vmballoon *b = f->private;
1654	int i, j;
1655
1656	/* enables stats if they are disabled */
1657	if (!b->stats) {
1658		int r = vmballoon_enable_stats(b);
1659
1660		if (r)
1661			return r;
1662	}
1663
1664	/* format capabilities info */
1665	seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1666		   VMW_BALLOON_CAPABILITIES);
1667	seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1668	seq_printf(f, "%-22s: %16s\n", "is resetting",
1669		   b->reset_required ? "y" : "n");
1670
1671	/* format size info */
1672	seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1673	seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1674
1675	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1676		if (vmballoon_cmd_names[i] == NULL)
1677			continue;
1678
1679		seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1680			   vmballoon_cmd_names[i],
1681			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1682			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1683	}
1684
1685	for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1686		seq_printf(f, "%-22s: %16llu\n",
1687			   vmballoon_stat_names[i],
1688			   atomic64_read(&b->stats->general_stat[i]));
1689
1690	for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1691		for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1692			seq_printf(f, "%-18s(%s): %16llu\n",
1693				   vmballoon_stat_page_names[i],
1694				   vmballoon_page_size_names[j],
1695				   atomic64_read(&b->stats->page_stat[i][j]));
1696	}
1697
1698	return 0;
1699}
1700
1701DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1702
1703static void __init vmballoon_debugfs_init(struct vmballoon *b)
1704{
1705	debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1706			    &vmballoon_debug_fops);
1707}
1708
1709static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1710{
1711	static_key_disable(&balloon_stat_enabled.key);
1712	debugfs_remove(debugfs_lookup("vmmemctl", NULL));
1713	kfree(b->stats);
1714	b->stats = NULL;
1715}
1716
1717#else
1718
1719static inline void vmballoon_debugfs_init(struct vmballoon *b)
1720{
 
1721}
1722
1723static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1724{
1725}
1726
1727#endif	/* CONFIG_DEBUG_FS */
1728
1729
1730#ifdef CONFIG_BALLOON_COMPACTION
1731/**
1732 * vmballoon_migratepage() - migrates a balloon page.
1733 * @b_dev_info: balloon device information descriptor.
1734 * @newpage: the page to which @page should be migrated.
1735 * @page: a ballooned page that should be migrated.
1736 * @mode: migration mode, ignored.
1737 *
1738 * This function is really open-coded, but that is according to the interface
1739 * that balloon_compaction provides.
1740 *
1741 * Return: zero on success, -EAGAIN when migration cannot be performed
1742 *	   momentarily, and -EBUSY if migration failed and should be retried
1743 *	   with that specific page.
1744 */
1745static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1746				 struct page *newpage, struct page *page,
1747				 enum migrate_mode mode)
1748{
1749	unsigned long status, flags;
1750	struct vmballoon *b;
1751	int ret;
1752
1753	b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1754
1755	/*
1756	 * If the semaphore is taken, there is ongoing configuration change
1757	 * (i.e., balloon reset), so try again.
1758	 */
1759	if (!down_read_trylock(&b->conf_sem))
1760		return -EAGAIN;
1761
1762	spin_lock(&b->comm_lock);
1763	/*
1764	 * We must start by deflating and not inflating, as otherwise the
1765	 * hypervisor may tell us that it has enough memory and the new page is
1766	 * not needed. Since the old page is isolated, we cannot use the list
1767	 * interface to unlock it, as the LRU field is used for isolation.
1768	 * Instead, we use the native interface directly.
1769	 */
1770	vmballoon_add_page(b, 0, page);
1771	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1772				   VMW_BALLOON_DEFLATE);
1773
1774	if (status == VMW_BALLOON_SUCCESS)
1775		status = vmballoon_status_page(b, 0, &page);
1776
1777	/*
1778	 * If a failure happened, let the migration mechanism know that it
1779	 * should not retry.
1780	 */
1781	if (status != VMW_BALLOON_SUCCESS) {
1782		spin_unlock(&b->comm_lock);
1783		ret = -EBUSY;
1784		goto out_unlock;
1785	}
1786
1787	/*
1788	 * The page is isolated, so it is safe to delete it without holding
1789	 * @pages_lock . We keep holding @comm_lock since we will need it in a
1790	 * second.
1791	 */
1792	balloon_page_delete(page);
1793
1794	put_page(page);
1795
1796	/* Inflate */
1797	vmballoon_add_page(b, 0, newpage);
1798	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1799				   VMW_BALLOON_INFLATE);
1800
1801	if (status == VMW_BALLOON_SUCCESS)
1802		status = vmballoon_status_page(b, 0, &newpage);
1803
1804	spin_unlock(&b->comm_lock);
1805
1806	if (status != VMW_BALLOON_SUCCESS) {
1807		/*
1808		 * A failure happened. While we can deflate the page we just
1809		 * inflated, this deflation can also encounter an error. Instead
1810		 * we will decrease the size of the balloon to reflect the
1811		 * change and report failure.
1812		 */
1813		atomic64_dec(&b->size);
1814		ret = -EBUSY;
1815	} else {
1816		/*
1817		 * Success. Take a reference for the page, and we will add it to
1818		 * the list after acquiring the lock.
1819		 */
1820		get_page(newpage);
1821		ret = MIGRATEPAGE_SUCCESS;
1822	}
1823
1824	/* Update the balloon list under the @pages_lock */
1825	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1826
1827	/*
1828	 * On inflation success, we already took a reference for the @newpage.
1829	 * If we succeed just insert it to the list and update the statistics
1830	 * under the lock.
1831	 */
1832	if (ret == MIGRATEPAGE_SUCCESS) {
1833		balloon_page_insert(&b->b_dev_info, newpage);
1834		__count_vm_event(BALLOON_MIGRATE);
1835	}
1836
1837	/*
1838	 * We deflated successfully, so regardless to the inflation success, we
1839	 * need to reduce the number of isolated_pages.
1840	 */
1841	b->b_dev_info.isolated_pages--;
1842	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1843
1844out_unlock:
1845	up_read(&b->conf_sem);
1846	return ret;
1847}
1848
1849/**
1850 * vmballoon_compaction_init() - initialized compaction for the balloon.
1851 *
1852 * @b: pointer to the balloon.
1853 *
1854 * If during the initialization a failure occurred, this function does not
1855 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1856 * case.
1857 *
1858 * Return: zero on success or error code on failure.
1859 */
1860static __init void vmballoon_compaction_init(struct vmballoon *b)
1861{
1862	b->b_dev_info.migratepage = vmballoon_migratepage;
1863}
1864
1865#else /* CONFIG_BALLOON_COMPACTION */
1866static inline void vmballoon_compaction_init(struct vmballoon *b)
1867{
1868}
1869#endif /* CONFIG_BALLOON_COMPACTION */
1870
1871static int __init vmballoon_init(void)
1872{
1873	int error;
1874
1875	/*
1876	 * Check if we are running on VMware's hypervisor and bail out
1877	 * if we are not.
1878	 */
1879	if (x86_hyper_type != X86_HYPER_VMWARE)
1880		return -ENODEV;
1881
 
 
 
 
 
 
 
1882	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1883
1884	error = vmballoon_register_shrinker(&balloon);
1885	if (error)
1886		goto fail;
1887
1888	/*
1889	 * Initialization of compaction must be done after the call to
1890	 * balloon_devinfo_init() .
1891	 */
1892	balloon_devinfo_init(&balloon.b_dev_info);
1893	vmballoon_compaction_init(&balloon);
 
 
1894
1895	INIT_LIST_HEAD(&balloon.huge_pages);
1896	spin_lock_init(&balloon.comm_lock);
1897	init_rwsem(&balloon.conf_sem);
1898	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1899	balloon.batch_page = NULL;
1900	balloon.page = NULL;
1901	balloon.reset_required = true;
 
1902
1903	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1904
1905	vmballoon_debugfs_init(&balloon);
1906
1907	return 0;
1908fail:
1909	vmballoon_unregister_shrinker(&balloon);
1910	return error;
1911}
1912
1913/*
1914 * Using late_initcall() instead of module_init() allows the balloon to use the
1915 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1916 * VMCI is probed only after the balloon is initialized. If the balloon is used
1917 * as a module, late_initcall() is equivalent to module_init().
1918 */
1919late_initcall(vmballoon_init);
1920
1921static void __exit vmballoon_exit(void)
1922{
1923	vmballoon_unregister_shrinker(&balloon);
1924	vmballoon_vmci_cleanup(&balloon);
1925	cancel_delayed_work_sync(&balloon.dwork);
1926
1927	vmballoon_debugfs_exit(&balloon);
1928
1929	/*
1930	 * Deallocate all reserved memory, and reset connection with monitor.
1931	 * Reset connection before deallocating memory to avoid potential for
1932	 * additional spurious resets from guest touching deallocated pages.
1933	 */
1934	vmballoon_send_start(&balloon, 0);
1935	vmballoon_pop(&balloon);
1936}
1937module_exit(vmballoon_exit);