Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (c) 2012, Microsoft Corporation.
   3 *
   4 * Author:
   5 *   K. Y. Srinivasan <kys@microsoft.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published
   9 * by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14 * NON INFRINGEMENT.  See the GNU General Public License for more
  15 * details.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/kernel.h>
  22#include <linux/jiffies.h>
  23#include <linux/mman.h>
  24#include <linux/delay.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/kthread.h>
  29#include <linux/completion.h>
  30#include <linux/memory_hotplug.h>
  31#include <linux/memory.h>
  32#include <linux/notifier.h>
  33#include <linux/percpu_counter.h>
  34
  35#include <linux/hyperv.h>
  36
  37/*
  38 * We begin with definitions supporting the Dynamic Memory protocol
  39 * with the host.
  40 *
  41 * Begin protocol definitions.
  42 */
  43
  44
  45
  46/*
  47 * Protocol versions. The low word is the minor version, the high word the major
  48 * version.
  49 *
  50 * History:
  51 * Initial version 1.0
  52 * Changed to 0.1 on 2009/03/25
  53 * Changes to 0.2 on 2009/05/14
  54 * Changes to 0.3 on 2009/12/03
  55 * Changed to 1.0 on 2011/04/05
  56 */
  57
  58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
  59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
  60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
  61
  62enum {
  63	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
  64	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
  65	DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
  66
  67	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
  68	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
  69	DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
  70
  71	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
  72};
  73
  74
  75
  76/*
  77 * Message Types
  78 */
  79
  80enum dm_message_type {
  81	/*
  82	 * Version 0.3
  83	 */
  84	DM_ERROR			= 0,
  85	DM_VERSION_REQUEST		= 1,
  86	DM_VERSION_RESPONSE		= 2,
  87	DM_CAPABILITIES_REPORT		= 3,
  88	DM_CAPABILITIES_RESPONSE	= 4,
  89	DM_STATUS_REPORT		= 5,
  90	DM_BALLOON_REQUEST		= 6,
  91	DM_BALLOON_RESPONSE		= 7,
  92	DM_UNBALLOON_REQUEST		= 8,
  93	DM_UNBALLOON_RESPONSE		= 9,
  94	DM_MEM_HOT_ADD_REQUEST		= 10,
  95	DM_MEM_HOT_ADD_RESPONSE		= 11,
  96	DM_VERSION_03_MAX		= 11,
  97	/*
  98	 * Version 1.0.
  99	 */
 100	DM_INFO_MESSAGE			= 12,
 101	DM_VERSION_1_MAX		= 12
 102};
 103
 104
 105/*
 106 * Structures defining the dynamic memory management
 107 * protocol.
 108 */
 109
 110union dm_version {
 111	struct {
 112		__u16 minor_version;
 113		__u16 major_version;
 114	};
 115	__u32 version;
 116} __packed;
 117
 118
 119union dm_caps {
 120	struct {
 121		__u64 balloon:1;
 122		__u64 hot_add:1;
 123		/*
 124		 * To support guests that may have alignment
 125		 * limitations on hot-add, the guest can specify
 126		 * its alignment requirements; a value of n
 127		 * represents an alignment of 2^n in mega bytes.
 128		 */
 129		__u64 hot_add_alignment:4;
 130		__u64 reservedz:58;
 131	} cap_bits;
 132	__u64 caps;
 133} __packed;
 134
 135union dm_mem_page_range {
 136	struct  {
 137		/*
 138		 * The PFN number of the first page in the range.
 139		 * 40 bits is the architectural limit of a PFN
 140		 * number for AMD64.
 141		 */
 142		__u64 start_page:40;
 143		/*
 144		 * The number of pages in the range.
 145		 */
 146		__u64 page_cnt:24;
 147	} finfo;
 148	__u64  page_range;
 149} __packed;
 150
 151
 152
 153/*
 154 * The header for all dynamic memory messages:
 155 *
 156 * type: Type of the message.
 157 * size: Size of the message in bytes; including the header.
 158 * trans_id: The guest is responsible for manufacturing this ID.
 159 */
 160
 161struct dm_header {
 162	__u16 type;
 163	__u16 size;
 164	__u32 trans_id;
 165} __packed;
 166
 167/*
 168 * A generic message format for dynamic memory.
 169 * Specific message formats are defined later in the file.
 170 */
 171
 172struct dm_message {
 173	struct dm_header hdr;
 174	__u8 data[]; /* enclosed message */
 175} __packed;
 176
 177
 178/*
 179 * Specific message types supporting the dynamic memory protocol.
 180 */
 181
 182/*
 183 * Version negotiation message. Sent from the guest to the host.
 184 * The guest is free to try different versions until the host
 185 * accepts the version.
 186 *
 187 * dm_version: The protocol version requested.
 188 * is_last_attempt: If TRUE, this is the last version guest will request.
 189 * reservedz: Reserved field, set to zero.
 190 */
 191
 192struct dm_version_request {
 193	struct dm_header hdr;
 194	union dm_version version;
 195	__u32 is_last_attempt:1;
 196	__u32 reservedz:31;
 197} __packed;
 198
 199/*
 200 * Version response message; Host to Guest and indicates
 201 * if the host has accepted the version sent by the guest.
 202 *
 203 * is_accepted: If TRUE, host has accepted the version and the guest
 204 * should proceed to the next stage of the protocol. FALSE indicates that
 205 * guest should re-try with a different version.
 206 *
 207 * reservedz: Reserved field, set to zero.
 208 */
 209
 210struct dm_version_response {
 211	struct dm_header hdr;
 212	__u64 is_accepted:1;
 213	__u64 reservedz:63;
 214} __packed;
 215
 216/*
 217 * Message reporting capabilities. This is sent from the guest to the
 218 * host.
 219 */
 220
 221struct dm_capabilities {
 222	struct dm_header hdr;
 223	union dm_caps caps;
 224	__u64 min_page_cnt;
 225	__u64 max_page_number;
 226} __packed;
 227
 228/*
 229 * Response to the capabilities message. This is sent from the host to the
 230 * guest. This message notifies if the host has accepted the guest's
 231 * capabilities. If the host has not accepted, the guest must shutdown
 232 * the service.
 233 *
 234 * is_accepted: Indicates if the host has accepted guest's capabilities.
 235 * reservedz: Must be 0.
 236 */
 237
 238struct dm_capabilities_resp_msg {
 239	struct dm_header hdr;
 240	__u64 is_accepted:1;
 241	__u64 reservedz:63;
 242} __packed;
 243
 244/*
 245 * This message is used to report memory pressure from the guest.
 246 * This message is not part of any transaction and there is no
 247 * response to this message.
 248 *
 249 * num_avail: Available memory in pages.
 250 * num_committed: Committed memory in pages.
 251 * page_file_size: The accumulated size of all page files
 252 *		   in the system in pages.
 253 * zero_free: The nunber of zero and free pages.
 254 * page_file_writes: The writes to the page file in pages.
 255 * io_diff: An indicator of file cache efficiency or page file activity,
 256 *	    calculated as File Cache Page Fault Count - Page Read Count.
 257 *	    This value is in pages.
 258 *
 259 * Some of these metrics are Windows specific and fortunately
 260 * the algorithm on the host side that computes the guest memory
 261 * pressure only uses num_committed value.
 262 */
 263
 264struct dm_status {
 265	struct dm_header hdr;
 266	__u64 num_avail;
 267	__u64 num_committed;
 268	__u64 page_file_size;
 269	__u64 zero_free;
 270	__u32 page_file_writes;
 271	__u32 io_diff;
 272} __packed;
 273
 274
 275/*
 276 * Message to ask the guest to allocate memory - balloon up message.
 277 * This message is sent from the host to the guest. The guest may not be
 278 * able to allocate as much memory as requested.
 279 *
 280 * num_pages: number of pages to allocate.
 281 */
 282
 283struct dm_balloon {
 284	struct dm_header hdr;
 285	__u32 num_pages;
 286	__u32 reservedz;
 287} __packed;
 288
 289
 290/*
 291 * Balloon response message; this message is sent from the guest
 292 * to the host in response to the balloon message.
 293 *
 294 * reservedz: Reserved; must be set to zero.
 295 * more_pages: If FALSE, this is the last message of the transaction.
 296 * if TRUE there will atleast one more message from the guest.
 297 *
 298 * range_count: The number of ranges in the range array.
 299 *
 300 * range_array: An array of page ranges returned to the host.
 301 *
 302 */
 303
 304struct dm_balloon_response {
 305	struct dm_header hdr;
 306	__u32 reservedz;
 307	__u32 more_pages:1;
 308	__u32 range_count:31;
 309	union dm_mem_page_range range_array[];
 310} __packed;
 311
 312/*
 313 * Un-balloon message; this message is sent from the host
 314 * to the guest to give guest more memory.
 315 *
 316 * more_pages: If FALSE, this is the last message of the transaction.
 317 * if TRUE there will atleast one more message from the guest.
 318 *
 319 * reservedz: Reserved; must be set to zero.
 320 *
 321 * range_count: The number of ranges in the range array.
 322 *
 323 * range_array: An array of page ranges returned to the host.
 324 *
 325 */
 326
 327struct dm_unballoon_request {
 328	struct dm_header hdr;
 329	__u32 more_pages:1;
 330	__u32 reservedz:31;
 331	__u32 range_count;
 332	union dm_mem_page_range range_array[];
 333} __packed;
 334
 335/*
 336 * Un-balloon response message; this message is sent from the guest
 337 * to the host in response to an unballoon request.
 338 *
 339 */
 340
 341struct dm_unballoon_response {
 342	struct dm_header hdr;
 343} __packed;
 344
 345
 346/*
 347 * Hot add request message. Message sent from the host to the guest.
 348 *
 349 * mem_range: Memory range to hot add.
 350 *
 351 * On Linux we currently don't support this since we cannot hot add
 352 * arbitrary granularity of memory.
 353 */
 354
 355struct dm_hot_add {
 356	struct dm_header hdr;
 357	union dm_mem_page_range range;
 358} __packed;
 359
 360/*
 361 * Hot add response message.
 362 * This message is sent by the guest to report the status of a hot add request.
 363 * If page_count is less than the requested page count, then the host should
 364 * assume all further hot add requests will fail, since this indicates that
 365 * the guest has hit an upper physical memory barrier.
 366 *
 367 * Hot adds may also fail due to low resources; in this case, the guest must
 368 * not complete this message until the hot add can succeed, and the host must
 369 * not send a new hot add request until the response is sent.
 370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 371 * times it fails the request.
 372 *
 373 *
 374 * page_count: number of pages that were successfully hot added.
 375 *
 376 * result: result of the operation 1: success, 0: failure.
 377 *
 378 */
 379
 380struct dm_hot_add_response {
 381	struct dm_header hdr;
 382	__u32 page_count;
 383	__u32 result;
 384} __packed;
 385
 386/*
 387 * Types of information sent from host to the guest.
 388 */
 389
 390enum dm_info_type {
 391	INFO_TYPE_MAX_PAGE_CNT = 0,
 392	MAX_INFO_TYPE
 393};
 394
 395
 396/*
 397 * Header for the information message.
 398 */
 399
 400struct dm_info_header {
 401	enum dm_info_type type;
 402	__u32 data_size;
 403} __packed;
 404
 405/*
 406 * This message is sent from the host to the guest to pass
 407 * some relevant information (win8 addition).
 408 *
 409 * reserved: no used.
 410 * info_size: size of the information blob.
 411 * info: information blob.
 412 */
 413
 414struct dm_info_msg {
 415	struct dm_header hdr;
 416	__u32 reserved;
 417	__u32 info_size;
 418	__u8  info[];
 419};
 420
 421/*
 422 * End protocol definitions.
 423 */
 424
 425/*
 426 * State to manage hot adding memory into the guest.
 427 * The range start_pfn : end_pfn specifies the range
 428 * that the host has asked us to hot add. The range
 429 * start_pfn : ha_end_pfn specifies the range that we have
 430 * currently hot added. We hot add in multiples of 128M
 431 * chunks; it is possible that we may not be able to bring
 432 * online all the pages in the region. The range
 433 * covered_end_pfn defines the pages that can
 434 * be brough online.
 435 */
 436
 437struct hv_hotadd_state {
 438	struct list_head list;
 439	unsigned long start_pfn;
 
 440	unsigned long covered_end_pfn;
 441	unsigned long ha_end_pfn;
 442	unsigned long end_pfn;
 443};
 444
 445struct balloon_state {
 446	__u32 num_pages;
 447	struct work_struct wrk;
 448};
 449
 450struct hot_add_wrk {
 451	union dm_mem_page_range ha_page_range;
 452	union dm_mem_page_range ha_region_range;
 453	struct work_struct wrk;
 454};
 455
 456static bool hot_add = true;
 457static bool do_hot_add;
 458/*
 459 * Delay reporting memory pressure by
 460 * the specified number of seconds.
 461 */
 462static uint pressure_report_delay = 45;
 463
 464/*
 465 * The last time we posted a pressure report to host.
 466 */
 467static unsigned long last_post_time;
 468
 469module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 470MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 471
 472module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 473MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 474static atomic_t trans_id = ATOMIC_INIT(0);
 475
 476static int dm_ring_size = (5 * PAGE_SIZE);
 477
 478/*
 479 * Driver specific state.
 480 */
 481
 482enum hv_dm_state {
 483	DM_INITIALIZING = 0,
 484	DM_INITIALIZED,
 485	DM_BALLOON_UP,
 486	DM_BALLOON_DOWN,
 487	DM_HOT_ADD,
 488	DM_INIT_ERROR
 489};
 490
 491
 492static __u8 recv_buffer[PAGE_SIZE];
 493static __u8 *send_buffer;
 494#define PAGES_IN_2M	512
 495#define HA_CHUNK (32 * 1024)
 496
 497struct hv_dynmem_device {
 498	struct hv_device *dev;
 499	enum hv_dm_state state;
 500	struct completion host_event;
 501	struct completion config_event;
 502
 503	/*
 504	 * Number of pages we have currently ballooned out.
 505	 */
 506	unsigned int num_pages_ballooned;
 507	unsigned int num_pages_onlined;
 508	unsigned int num_pages_added;
 509
 510	/*
 511	 * State to manage the ballooning (up) operation.
 512	 */
 513	struct balloon_state balloon_wrk;
 514
 515	/*
 516	 * State to execute the "hot-add" operation.
 517	 */
 518	struct hot_add_wrk ha_wrk;
 519
 520	/*
 521	 * This state tracks if the host has specified a hot-add
 522	 * region.
 523	 */
 524	bool host_specified_ha_region;
 525
 526	/*
 527	 * State to synchronize hot-add.
 528	 */
 529	struct completion  ol_waitevent;
 530	bool ha_waiting;
 531	/*
 532	 * This thread handles hot-add
 533	 * requests from the host as well as notifying
 534	 * the host with regards to memory pressure in
 535	 * the guest.
 536	 */
 537	struct task_struct *thread;
 538
 539	struct mutex ha_region_mutex;
 540
 541	/*
 542	 * A list of hot-add regions.
 543	 */
 544	struct list_head ha_region_list;
 545
 546	/*
 547	 * We start with the highest version we can support
 548	 * and downgrade based on the host; we save here the
 549	 * next version to try.
 550	 */
 551	__u32 next_version;
 552};
 553
 554static struct hv_dynmem_device dm_device;
 555
 556static void post_status(struct hv_dynmem_device *dm);
 557
 558#ifdef CONFIG_MEMORY_HOTPLUG
 559static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
 560			      void *v)
 561{
 562	struct memory_notify *mem = (struct memory_notify *)v;
 563
 564	switch (val) {
 565	case MEM_GOING_ONLINE:
 566		mutex_lock(&dm_device.ha_region_mutex);
 567		break;
 568
 569	case MEM_ONLINE:
 570		dm_device.num_pages_onlined += mem->nr_pages;
 571	case MEM_CANCEL_ONLINE:
 572		if (val == MEM_ONLINE ||
 573		    mutex_is_locked(&dm_device.ha_region_mutex))
 574			mutex_unlock(&dm_device.ha_region_mutex);
 575		if (dm_device.ha_waiting) {
 576			dm_device.ha_waiting = false;
 577			complete(&dm_device.ol_waitevent);
 578		}
 579		break;
 580
 581	case MEM_OFFLINE:
 582		mutex_lock(&dm_device.ha_region_mutex);
 583		dm_device.num_pages_onlined -= mem->nr_pages;
 584		mutex_unlock(&dm_device.ha_region_mutex);
 585		break;
 586	case MEM_GOING_OFFLINE:
 587	case MEM_CANCEL_OFFLINE:
 588		break;
 589	}
 590	return NOTIFY_OK;
 591}
 592
 593static struct notifier_block hv_memory_nb = {
 594	.notifier_call = hv_memory_notifier,
 595	.priority = 0
 596};
 597
 598
 599static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
 600{
 601	int i;
 602
 603	for (i = 0; i < size; i++) {
 604		struct page *pg;
 605		pg = pfn_to_page(start_pfn + i);
 606		__online_page_set_limits(pg);
 607		__online_page_increment_counters(pg);
 608		__online_page_free(pg);
 609	}
 610}
 611
 612static void hv_mem_hot_add(unsigned long start, unsigned long size,
 613				unsigned long pfn_count,
 614				struct hv_hotadd_state *has)
 615{
 616	int ret = 0;
 617	int i, nid;
 618	unsigned long start_pfn;
 619	unsigned long processed_pfn;
 620	unsigned long total_pfn = pfn_count;
 621
 622	for (i = 0; i < (size/HA_CHUNK); i++) {
 623		start_pfn = start + (i * HA_CHUNK);
 624		has->ha_end_pfn +=  HA_CHUNK;
 625
 626		if (total_pfn > HA_CHUNK) {
 627			processed_pfn = HA_CHUNK;
 628			total_pfn -= HA_CHUNK;
 629		} else {
 630			processed_pfn = total_pfn;
 631			total_pfn = 0;
 632		}
 633
 634		has->covered_end_pfn +=  processed_pfn;
 635
 636		init_completion(&dm_device.ol_waitevent);
 637		dm_device.ha_waiting = true;
 638
 639		mutex_unlock(&dm_device.ha_region_mutex);
 640		nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
 641		ret = add_memory(nid, PFN_PHYS((start_pfn)),
 642				(HA_CHUNK << PAGE_SHIFT));
 643
 644		if (ret) {
 645			pr_info("hot_add memory failed error is %d\n", ret);
 646			if (ret == -EEXIST) {
 647				/*
 648				 * This error indicates that the error
 649				 * is not a transient failure. This is the
 650				 * case where the guest's physical address map
 651				 * precludes hot adding memory. Stop all further
 652				 * memory hot-add.
 653				 */
 654				do_hot_add = false;
 655			}
 656			has->ha_end_pfn -= HA_CHUNK;
 657			has->covered_end_pfn -=  processed_pfn;
 658			mutex_lock(&dm_device.ha_region_mutex);
 659			break;
 660		}
 661
 662		/*
 663		 * Wait for the memory block to be onlined.
 664		 * Since the hot add has succeeded, it is ok to
 665		 * proceed even if the pages in the hot added region
 666		 * have not been "onlined" within the allowed time.
 667		 */
 668		wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
 669		mutex_lock(&dm_device.ha_region_mutex);
 670		post_status(&dm_device);
 671	}
 672
 673	return;
 674}
 675
 676static void hv_online_page(struct page *pg)
 677{
 678	struct list_head *cur;
 679	struct hv_hotadd_state *has;
 680	unsigned long cur_start_pgp;
 681	unsigned long cur_end_pgp;
 682
 
 
 
 
 
 683	list_for_each(cur, &dm_device.ha_region_list) {
 684		has = list_entry(cur, struct hv_hotadd_state, list);
 685		cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
 
 686		cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
 687
 688		if (((unsigned long)pg >= cur_start_pgp) &&
 689			((unsigned long)pg < cur_end_pgp)) {
 690			/*
 691			 * This frame is currently backed; online the
 692			 * page.
 693			 */
 694			__online_page_set_limits(pg);
 695			__online_page_increment_counters(pg);
 696			__online_page_free(pg);
 
 697		}
 698	}
 699}
 700
 701static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 702{
 703	struct list_head *cur;
 704	struct hv_hotadd_state *has;
 705	unsigned long residual, new_inc;
 706
 707	if (list_empty(&dm_device.ha_region_list))
 708		return false;
 709
 710	list_for_each(cur, &dm_device.ha_region_list) {
 711		has = list_entry(cur, struct hv_hotadd_state, list);
 712
 713		/*
 714		 * If the pfn range we are dealing with is not in the current
 715		 * "hot add block", move on.
 716		 */
 717		if ((start_pfn >= has->end_pfn))
 718			continue;
 719		/*
 720		 * If the current hot add-request extends beyond
 721		 * our current limit; extend it.
 722		 */
 723		if ((start_pfn + pfn_cnt) > has->end_pfn) {
 724			residual = (start_pfn + pfn_cnt - has->end_pfn);
 725			/*
 726			 * Extend the region by multiples of HA_CHUNK.
 727			 */
 728			new_inc = (residual / HA_CHUNK) * HA_CHUNK;
 729			if (residual % HA_CHUNK)
 730				new_inc += HA_CHUNK;
 731
 732			has->end_pfn += new_inc;
 733		}
 734
 735		/*
 736		 * If the current start pfn is not where the covered_end
 737		 * is, update it.
 738		 */
 739
 740		if (has->covered_end_pfn != start_pfn)
 741			has->covered_end_pfn = start_pfn;
 742
 
 743		return true;
 744
 745	}
 746
 747	return false;
 748}
 749
 750static unsigned long handle_pg_range(unsigned long pg_start,
 751					unsigned long pg_count)
 752{
 753	unsigned long start_pfn = pg_start;
 754	unsigned long pfn_cnt = pg_count;
 755	unsigned long size;
 756	struct list_head *cur;
 757	struct hv_hotadd_state *has;
 758	unsigned long pgs_ol = 0;
 759	unsigned long old_covered_state;
 760
 761	if (list_empty(&dm_device.ha_region_list))
 762		return 0;
 763
 764	list_for_each(cur, &dm_device.ha_region_list) {
 765		has = list_entry(cur, struct hv_hotadd_state, list);
 766
 767		/*
 768		 * If the pfn range we are dealing with is not in the current
 769		 * "hot add block", move on.
 770		 */
 771		if ((start_pfn >= has->end_pfn))
 772			continue;
 773
 774		old_covered_state = has->covered_end_pfn;
 775
 776		if (start_pfn < has->ha_end_pfn) {
 777			/*
 778			 * This is the case where we are backing pages
 779			 * in an already hot added region. Bring
 780			 * these pages online first.
 781			 */
 782			pgs_ol = has->ha_end_pfn - start_pfn;
 783			if (pgs_ol > pfn_cnt)
 784				pgs_ol = pfn_cnt;
 785
 786			/*
 787			 * Check if the corresponding memory block is already
 788			 * online by checking its last previously backed page.
 789			 * In case it is we need to bring rest (which was not
 790			 * backed previously) online too.
 791			 */
 792			if (start_pfn > has->start_pfn &&
 793			    !PageReserved(pfn_to_page(start_pfn - 1)))
 794				hv_bring_pgs_online(start_pfn, pgs_ol);
 795
 796			has->covered_end_pfn +=  pgs_ol;
 
 797			pfn_cnt -= pgs_ol;
 798		}
 799
 800		if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
 801			/*
 802			 * We have some residual hot add range
 803			 * that needs to be hot added; hot add
 804			 * it now. Hot add a multiple of
 805			 * of HA_CHUNK that fully covers the pages
 806			 * we have.
 807			 */
 808			size = (has->end_pfn - has->ha_end_pfn);
 809			if (pfn_cnt <= size) {
 810				size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
 811				if (pfn_cnt % HA_CHUNK)
 812					size += HA_CHUNK;
 813			} else {
 814				pfn_cnt = size;
 815			}
 816			hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
 817		}
 818		/*
 819		 * If we managed to online any pages that were given to us,
 820		 * we declare success.
 821		 */
 822		return has->covered_end_pfn - old_covered_state;
 823
 824	}
 825
 826	return 0;
 827}
 828
 829static unsigned long process_hot_add(unsigned long pg_start,
 830					unsigned long pfn_cnt,
 831					unsigned long rg_start,
 832					unsigned long rg_size)
 833{
 834	struct hv_hotadd_state *ha_region = NULL;
 835
 836	if (pfn_cnt == 0)
 837		return 0;
 838
 839	if (!dm_device.host_specified_ha_region)
 840		if (pfn_covered(pg_start, pfn_cnt))
 841			goto do_pg_range;
 842
 843	/*
 844	 * If the host has specified a hot-add range; deal with it first.
 845	 */
 846
 847	if (rg_size != 0) {
 848		ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
 849		if (!ha_region)
 850			return 0;
 851
 852		INIT_LIST_HEAD(&ha_region->list);
 853
 854		list_add_tail(&ha_region->list, &dm_device.ha_region_list);
 855		ha_region->start_pfn = rg_start;
 856		ha_region->ha_end_pfn = rg_start;
 
 857		ha_region->covered_end_pfn = pg_start;
 858		ha_region->end_pfn = rg_start + rg_size;
 859	}
 860
 861do_pg_range:
 862	/*
 863	 * Process the page range specified; bringing them
 864	 * online if possible.
 865	 */
 866	return handle_pg_range(pg_start, pfn_cnt);
 867}
 868
 869#endif
 870
 871static void hot_add_req(struct work_struct *dummy)
 872{
 873	struct dm_hot_add_response resp;
 874#ifdef CONFIG_MEMORY_HOTPLUG
 875	unsigned long pg_start, pfn_cnt;
 876	unsigned long rg_start, rg_sz;
 877#endif
 878	struct hv_dynmem_device *dm = &dm_device;
 879
 880	memset(&resp, 0, sizeof(struct dm_hot_add_response));
 881	resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
 882	resp.hdr.size = sizeof(struct dm_hot_add_response);
 883
 884#ifdef CONFIG_MEMORY_HOTPLUG
 885	mutex_lock(&dm_device.ha_region_mutex);
 886	pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
 887	pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
 888
 889	rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
 890	rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
 891
 892	if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
 893		unsigned long region_size;
 894		unsigned long region_start;
 895
 896		/*
 897		 * The host has not specified the hot-add region.
 898		 * Based on the hot-add page range being specified,
 899		 * compute a hot-add region that can cover the pages
 900		 * that need to be hot-added while ensuring the alignment
 901		 * and size requirements of Linux as it relates to hot-add.
 902		 */
 903		region_start = pg_start;
 904		region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
 905		if (pfn_cnt % HA_CHUNK)
 906			region_size += HA_CHUNK;
 907
 908		region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
 909
 910		rg_start = region_start;
 911		rg_sz = region_size;
 912	}
 913
 914	if (do_hot_add)
 915		resp.page_count = process_hot_add(pg_start, pfn_cnt,
 916						rg_start, rg_sz);
 917
 918	dm->num_pages_added += resp.page_count;
 919	mutex_unlock(&dm_device.ha_region_mutex);
 920#endif
 921	/*
 922	 * The result field of the response structure has the
 923	 * following semantics:
 924	 *
 925	 * 1. If all or some pages hot-added: Guest should return success.
 926	 *
 927	 * 2. If no pages could be hot-added:
 928	 *
 929	 * If the guest returns success, then the host
 930	 * will not attempt any further hot-add operations. This
 931	 * signifies a permanent failure.
 932	 *
 933	 * If the guest returns failure, then this failure will be
 934	 * treated as a transient failure and the host may retry the
 935	 * hot-add operation after some delay.
 936	 */
 937	if (resp.page_count > 0)
 938		resp.result = 1;
 939	else if (!do_hot_add)
 940		resp.result = 1;
 941	else
 942		resp.result = 0;
 943
 944	if (!do_hot_add || (resp.page_count == 0))
 945		pr_info("Memory hot add failed\n");
 946
 947	dm->state = DM_INITIALIZED;
 948	resp.hdr.trans_id = atomic_inc_return(&trans_id);
 949	vmbus_sendpacket(dm->dev->channel, &resp,
 950			sizeof(struct dm_hot_add_response),
 951			(unsigned long)NULL,
 952			VM_PKT_DATA_INBAND, 0);
 953}
 954
 955static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 956{
 957	struct dm_info_header *info_hdr;
 958
 959	info_hdr = (struct dm_info_header *)msg->info;
 960
 961	switch (info_hdr->type) {
 962	case INFO_TYPE_MAX_PAGE_CNT:
 963		pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
 964		pr_info("Data Size is %d\n", info_hdr->data_size);
 965		break;
 966	default:
 967		pr_info("Received Unknown type: %d\n", info_hdr->type);
 968	}
 969}
 970
 971static unsigned long compute_balloon_floor(void)
 972{
 973	unsigned long min_pages;
 974#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
 975	/* Simple continuous piecewiese linear function:
 976	 *  max MiB -> min MiB  gradient
 977	 *       0         0
 978	 *      16        16
 979	 *      32        24
 980	 *     128        72    (1/2)
 981	 *     512       168    (1/4)
 982	 *    2048       360    (1/8)
 983	 *    8192       744    (1/16)
 984	 *   32768      1512	(1/32)
 
 985	 */
 986	if (totalram_pages < MB2PAGES(128))
 987		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
 988	else if (totalram_pages < MB2PAGES(512))
 989		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
 990	else if (totalram_pages < MB2PAGES(2048))
 991		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
 992	else if (totalram_pages < MB2PAGES(8192))
 993		min_pages = MB2PAGES(232) + (totalram_pages >> 4);
 994	else
 995		min_pages = MB2PAGES(488) + (totalram_pages >> 5);
 996#undef MB2PAGES
 997	return min_pages;
 998}
 999
1000/*
1001 * Post our status as it relates memory pressure to the
1002 * host. Host expects the guests to post this status
1003 * periodically at 1 second intervals.
1004 *
1005 * The metrics specified in this protocol are very Windows
1006 * specific and so we cook up numbers here to convey our memory
1007 * pressure.
1008 */
1009
1010static void post_status(struct hv_dynmem_device *dm)
1011{
1012	struct dm_status status;
1013	struct sysinfo val;
1014	unsigned long now = jiffies;
1015	unsigned long last_post = last_post_time;
1016
1017	if (pressure_report_delay > 0) {
1018		--pressure_report_delay;
1019		return;
1020	}
1021
1022	if (!time_after(now, (last_post_time + HZ)))
1023		return;
1024
1025	si_meminfo(&val);
1026	memset(&status, 0, sizeof(struct dm_status));
1027	status.hdr.type = DM_STATUS_REPORT;
1028	status.hdr.size = sizeof(struct dm_status);
1029	status.hdr.trans_id = atomic_inc_return(&trans_id);
1030
1031	/*
1032	 * The host expects the guest to report free and committed memory.
1033	 * Furthermore, the host expects the pressure information to include
1034	 * the ballooned out pages. For a given amount of memory that we are
1035	 * managing we need to compute a floor below which we should not
1036	 * balloon. Compute this and add it to the pressure report.
1037	 * We also need to report all offline pages (num_pages_added -
1038	 * num_pages_onlined) as committed to the host, otherwise it can try
1039	 * asking us to balloon them out.
1040	 */
1041	status.num_avail = val.freeram;
1042	status.num_committed = vm_memory_committed() +
1043		dm->num_pages_ballooned +
1044		(dm->num_pages_added > dm->num_pages_onlined ?
1045		 dm->num_pages_added - dm->num_pages_onlined : 0) +
1046		compute_balloon_floor();
1047
1048	/*
1049	 * If our transaction ID is no longer current, just don't
1050	 * send the status. This can happen if we were interrupted
1051	 * after we picked our transaction ID.
1052	 */
1053	if (status.hdr.trans_id != atomic_read(&trans_id))
1054		return;
1055
1056	/*
1057	 * If the last post time that we sampled has changed,
1058	 * we have raced, don't post the status.
1059	 */
1060	if (last_post != last_post_time)
1061		return;
1062
1063	last_post_time = jiffies;
1064	vmbus_sendpacket(dm->dev->channel, &status,
1065				sizeof(struct dm_status),
1066				(unsigned long)NULL,
1067				VM_PKT_DATA_INBAND, 0);
1068
1069}
1070
1071static void free_balloon_pages(struct hv_dynmem_device *dm,
1072			 union dm_mem_page_range *range_array)
1073{
1074	int num_pages = range_array->finfo.page_cnt;
1075	__u64 start_frame = range_array->finfo.start_page;
1076	struct page *pg;
1077	int i;
1078
1079	for (i = 0; i < num_pages; i++) {
1080		pg = pfn_to_page(i + start_frame);
1081		__free_page(pg);
1082		dm->num_pages_ballooned--;
1083	}
1084}
1085
1086
1087
1088static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1089					unsigned int num_pages,
1090					struct dm_balloon_response *bl_resp,
1091					int alloc_unit)
1092{
1093	unsigned int i = 0;
1094	struct page *pg;
1095
1096	if (num_pages < alloc_unit)
1097		return 0;
1098
1099	for (i = 0; (i * alloc_unit) < num_pages; i++) {
1100		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1101			PAGE_SIZE)
1102			return i * alloc_unit;
1103
1104		/*
1105		 * We execute this code in a thread context. Furthermore,
1106		 * we don't want the kernel to try too hard.
1107		 */
1108		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1109				__GFP_NOMEMALLOC | __GFP_NOWARN,
1110				get_order(alloc_unit << PAGE_SHIFT));
1111
1112		if (!pg)
 
1113			return i * alloc_unit;
 
 
1114
1115		dm->num_pages_ballooned += alloc_unit;
1116
1117		/*
1118		 * If we allocatted 2M pages; split them so we
1119		 * can free them in any order we get.
1120		 */
1121
1122		if (alloc_unit != 1)
1123			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1124
1125		bl_resp->range_count++;
1126		bl_resp->range_array[i].finfo.start_page =
1127			page_to_pfn(pg);
1128		bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1129		bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1130
1131	}
1132
1133	return num_pages;
1134}
1135
1136
1137
1138static void balloon_up(struct work_struct *dummy)
1139{
1140	unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1141	unsigned int num_ballooned = 0;
1142	struct dm_balloon_response *bl_resp;
1143	int alloc_unit;
1144	int ret;
 
1145	bool done = false;
1146	int i;
1147	struct sysinfo val;
1148	unsigned long floor;
1149
1150	/* The host balloons pages in 2M granularity. */
1151	WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1152
1153	/*
1154	 * We will attempt 2M allocations. However, if we fail to
1155	 * allocate 2M chunks, we will go back to 4k allocations.
1156	 */
1157	alloc_unit = 512;
1158
1159	si_meminfo(&val);
1160	floor = compute_balloon_floor();
1161
1162	/* Refuse to balloon below the floor, keep the 2M granularity. */
1163	if (val.freeram < num_pages || val.freeram - num_pages < floor) {
1164		num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
1165		num_pages -= num_pages % PAGES_IN_2M;
1166	}
1167
1168	while (!done) {
1169		bl_resp = (struct dm_balloon_response *)send_buffer;
1170		memset(send_buffer, 0, PAGE_SIZE);
1171		bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1172		bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1173		bl_resp->more_pages = 1;
1174
1175
1176		num_pages -= num_ballooned;
1177		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1178						    bl_resp, alloc_unit);
 
1179
1180		if (alloc_unit != 1 && num_ballooned == 0) {
1181			alloc_unit = 1;
1182			continue;
1183		}
1184
1185		if (num_ballooned == 0 || num_ballooned == num_pages) {
1186			bl_resp->more_pages = 0;
1187			done = true;
1188			dm_device.state = DM_INITIALIZED;
1189		}
1190
1191		/*
1192		 * We are pushing a lot of data through the channel;
1193		 * deal with transient failures caused because of the
1194		 * lack of space in the ring buffer.
1195		 */
1196
1197		do {
1198			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1199			ret = vmbus_sendpacket(dm_device.dev->channel,
1200						bl_resp,
1201						bl_resp->hdr.size,
1202						(unsigned long)NULL,
1203						VM_PKT_DATA_INBAND, 0);
1204
1205			if (ret == -EAGAIN)
1206				msleep(20);
1207			post_status(&dm_device);
1208		} while (ret == -EAGAIN);
1209
1210		if (ret) {
1211			/*
1212			 * Free up the memory we allocatted.
1213			 */
1214			pr_info("Balloon response failed\n");
1215
1216			for (i = 0; i < bl_resp->range_count; i++)
1217				free_balloon_pages(&dm_device,
1218						 &bl_resp->range_array[i]);
1219
1220			done = true;
1221		}
1222	}
1223
1224}
1225
1226static void balloon_down(struct hv_dynmem_device *dm,
1227			struct dm_unballoon_request *req)
1228{
1229	union dm_mem_page_range *range_array = req->range_array;
1230	int range_count = req->range_count;
1231	struct dm_unballoon_response resp;
1232	int i;
1233
1234	for (i = 0; i < range_count; i++) {
1235		free_balloon_pages(dm, &range_array[i]);
1236		complete(&dm_device.config_event);
1237	}
1238
1239	if (req->more_pages == 1)
1240		return;
1241
1242	memset(&resp, 0, sizeof(struct dm_unballoon_response));
1243	resp.hdr.type = DM_UNBALLOON_RESPONSE;
1244	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1245	resp.hdr.size = sizeof(struct dm_unballoon_response);
1246
1247	vmbus_sendpacket(dm_device.dev->channel, &resp,
1248				sizeof(struct dm_unballoon_response),
1249				(unsigned long)NULL,
1250				VM_PKT_DATA_INBAND, 0);
1251
1252	dm->state = DM_INITIALIZED;
1253}
1254
1255static void balloon_onchannelcallback(void *context);
1256
1257static int dm_thread_func(void *dm_dev)
1258{
1259	struct hv_dynmem_device *dm = dm_dev;
 
1260
1261	while (!kthread_should_stop()) {
1262		wait_for_completion_interruptible_timeout(
1263						&dm_device.config_event, 1*HZ);
1264		/*
1265		 * The host expects us to post information on the memory
1266		 * pressure every second.
1267		 */
1268		reinit_completion(&dm_device.config_event);
1269		post_status(dm);
 
 
1270	}
1271
1272	return 0;
1273}
1274
1275
1276static void version_resp(struct hv_dynmem_device *dm,
1277			struct dm_version_response *vresp)
1278{
1279	struct dm_version_request version_req;
1280	int ret;
1281
1282	if (vresp->is_accepted) {
1283		/*
1284		 * We are done; wakeup the
1285		 * context waiting for version
1286		 * negotiation.
1287		 */
1288		complete(&dm->host_event);
1289		return;
1290	}
1291	/*
1292	 * If there are more versions to try, continue
1293	 * with negotiations; if not
1294	 * shutdown the service since we are not able
1295	 * to negotiate a suitable version number
1296	 * with the host.
1297	 */
1298	if (dm->next_version == 0)
1299		goto version_error;
1300
 
1301	memset(&version_req, 0, sizeof(struct dm_version_request));
1302	version_req.hdr.type = DM_VERSION_REQUEST;
1303	version_req.hdr.size = sizeof(struct dm_version_request);
1304	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1305	version_req.version.version = dm->next_version;
1306
1307	/*
1308	 * Set the next version to try in case current version fails.
1309	 * Win7 protocol ought to be the last one to try.
1310	 */
1311	switch (version_req.version.version) {
1312	case DYNMEM_PROTOCOL_VERSION_WIN8:
1313		dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1314		version_req.is_last_attempt = 0;
1315		break;
1316	default:
1317		dm->next_version = 0;
1318		version_req.is_last_attempt = 1;
1319	}
1320
1321	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1322				sizeof(struct dm_version_request),
1323				(unsigned long)NULL,
1324				VM_PKT_DATA_INBAND, 0);
1325
1326	if (ret)
1327		goto version_error;
1328
1329	return;
1330
1331version_error:
1332	dm->state = DM_INIT_ERROR;
1333	complete(&dm->host_event);
1334}
1335
1336static void cap_resp(struct hv_dynmem_device *dm,
1337			struct dm_capabilities_resp_msg *cap_resp)
1338{
1339	if (!cap_resp->is_accepted) {
1340		pr_info("Capabilities not accepted by host\n");
1341		dm->state = DM_INIT_ERROR;
1342	}
1343	complete(&dm->host_event);
1344}
1345
1346static void balloon_onchannelcallback(void *context)
1347{
1348	struct hv_device *dev = context;
1349	u32 recvlen;
1350	u64 requestid;
1351	struct dm_message *dm_msg;
1352	struct dm_header *dm_hdr;
1353	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1354	struct dm_balloon *bal_msg;
1355	struct dm_hot_add *ha_msg;
1356	union dm_mem_page_range *ha_pg_range;
1357	union dm_mem_page_range *ha_region;
1358
1359	memset(recv_buffer, 0, sizeof(recv_buffer));
1360	vmbus_recvpacket(dev->channel, recv_buffer,
1361			 PAGE_SIZE, &recvlen, &requestid);
1362
1363	if (recvlen > 0) {
1364		dm_msg = (struct dm_message *)recv_buffer;
1365		dm_hdr = &dm_msg->hdr;
1366
1367		switch (dm_hdr->type) {
1368		case DM_VERSION_RESPONSE:
1369			version_resp(dm,
1370				 (struct dm_version_response *)dm_msg);
1371			break;
1372
1373		case DM_CAPABILITIES_RESPONSE:
1374			cap_resp(dm,
1375				 (struct dm_capabilities_resp_msg *)dm_msg);
1376			break;
1377
1378		case DM_BALLOON_REQUEST:
1379			if (dm->state == DM_BALLOON_UP)
1380				pr_warn("Currently ballooning\n");
1381			bal_msg = (struct dm_balloon *)recv_buffer;
1382			dm->state = DM_BALLOON_UP;
1383			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1384			schedule_work(&dm_device.balloon_wrk.wrk);
1385			break;
1386
1387		case DM_UNBALLOON_REQUEST:
1388			dm->state = DM_BALLOON_DOWN;
1389			balloon_down(dm,
1390				 (struct dm_unballoon_request *)recv_buffer);
1391			break;
1392
1393		case DM_MEM_HOT_ADD_REQUEST:
1394			if (dm->state == DM_HOT_ADD)
1395				pr_warn("Currently hot-adding\n");
1396			dm->state = DM_HOT_ADD;
1397			ha_msg = (struct dm_hot_add *)recv_buffer;
1398			if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1399				/*
1400				 * This is a normal hot-add request specifying
1401				 * hot-add memory.
1402				 */
1403				ha_pg_range = &ha_msg->range;
1404				dm->ha_wrk.ha_page_range = *ha_pg_range;
1405				dm->ha_wrk.ha_region_range.page_range = 0;
1406			} else {
1407				/*
1408				 * Host is specifying that we first hot-add
1409				 * a region and then partially populate this
1410				 * region.
1411				 */
1412				dm->host_specified_ha_region = true;
1413				ha_pg_range = &ha_msg->range;
1414				ha_region = &ha_pg_range[1];
1415				dm->ha_wrk.ha_page_range = *ha_pg_range;
1416				dm->ha_wrk.ha_region_range = *ha_region;
1417			}
1418			schedule_work(&dm_device.ha_wrk.wrk);
1419			break;
1420
1421		case DM_INFO_MESSAGE:
1422			process_info(dm, (struct dm_info_msg *)dm_msg);
1423			break;
1424
1425		default:
1426			pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1427
1428		}
1429	}
1430
1431}
1432
1433static int balloon_probe(struct hv_device *dev,
1434			const struct hv_vmbus_device_id *dev_id)
1435{
1436	int ret;
1437	unsigned long t;
1438	struct dm_version_request version_req;
1439	struct dm_capabilities cap_msg;
1440
1441	do_hot_add = hot_add;
1442
1443	/*
1444	 * First allocate a send buffer.
1445	 */
1446
1447	send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1448	if (!send_buffer)
1449		return -ENOMEM;
1450
1451	ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1452			balloon_onchannelcallback, dev);
1453
1454	if (ret)
1455		goto probe_error0;
1456
1457	dm_device.dev = dev;
1458	dm_device.state = DM_INITIALIZING;
1459	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1460	init_completion(&dm_device.host_event);
1461	init_completion(&dm_device.config_event);
1462	INIT_LIST_HEAD(&dm_device.ha_region_list);
1463	mutex_init(&dm_device.ha_region_mutex);
1464	INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1465	INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1466	dm_device.host_specified_ha_region = false;
1467
1468	dm_device.thread =
1469		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1470	if (IS_ERR(dm_device.thread)) {
1471		ret = PTR_ERR(dm_device.thread);
1472		goto probe_error1;
1473	}
1474
1475#ifdef CONFIG_MEMORY_HOTPLUG
1476	set_online_page_callback(&hv_online_page);
1477	register_memory_notifier(&hv_memory_nb);
1478#endif
1479
1480	hv_set_drvdata(dev, &dm_device);
1481	/*
1482	 * Initiate the hand shake with the host and negotiate
1483	 * a version that the host can support. We start with the
1484	 * highest version number and go down if the host cannot
1485	 * support it.
1486	 */
1487	memset(&version_req, 0, sizeof(struct dm_version_request));
1488	version_req.hdr.type = DM_VERSION_REQUEST;
1489	version_req.hdr.size = sizeof(struct dm_version_request);
1490	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1491	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1492	version_req.is_last_attempt = 0;
1493
1494	ret = vmbus_sendpacket(dev->channel, &version_req,
1495				sizeof(struct dm_version_request),
1496				(unsigned long)NULL,
1497				VM_PKT_DATA_INBAND, 0);
1498	if (ret)
1499		goto probe_error2;
1500
1501	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1502	if (t == 0) {
1503		ret = -ETIMEDOUT;
1504		goto probe_error2;
1505	}
1506
1507	/*
1508	 * If we could not negotiate a compatible version with the host
1509	 * fail the probe function.
1510	 */
1511	if (dm_device.state == DM_INIT_ERROR) {
1512		ret = -ETIMEDOUT;
1513		goto probe_error2;
1514	}
1515	/*
1516	 * Now submit our capabilities to the host.
1517	 */
1518	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1519	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1520	cap_msg.hdr.size = sizeof(struct dm_capabilities);
1521	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1522
1523	cap_msg.caps.cap_bits.balloon = 1;
1524	cap_msg.caps.cap_bits.hot_add = 1;
1525
1526	/*
1527	 * Specify our alignment requirements as it relates
1528	 * memory hot-add. Specify 128MB alignment.
1529	 */
1530	cap_msg.caps.cap_bits.hot_add_alignment = 7;
1531
1532	/*
1533	 * Currently the host does not use these
1534	 * values and we set them to what is done in the
1535	 * Windows driver.
1536	 */
1537	cap_msg.min_page_cnt = 0;
1538	cap_msg.max_page_number = -1;
1539
1540	ret = vmbus_sendpacket(dev->channel, &cap_msg,
1541				sizeof(struct dm_capabilities),
1542				(unsigned long)NULL,
1543				VM_PKT_DATA_INBAND, 0);
1544	if (ret)
1545		goto probe_error2;
1546
1547	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1548	if (t == 0) {
1549		ret = -ETIMEDOUT;
1550		goto probe_error2;
1551	}
1552
1553	/*
1554	 * If the host does not like our capabilities,
1555	 * fail the probe function.
1556	 */
1557	if (dm_device.state == DM_INIT_ERROR) {
1558		ret = -ETIMEDOUT;
1559		goto probe_error2;
1560	}
1561
1562	dm_device.state = DM_INITIALIZED;
1563
1564	return 0;
1565
1566probe_error2:
1567#ifdef CONFIG_MEMORY_HOTPLUG
1568	restore_online_page_callback(&hv_online_page);
1569#endif
1570	kthread_stop(dm_device.thread);
1571
1572probe_error1:
1573	vmbus_close(dev->channel);
1574probe_error0:
1575	kfree(send_buffer);
1576	return ret;
1577}
1578
1579static int balloon_remove(struct hv_device *dev)
1580{
1581	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1582	struct list_head *cur, *tmp;
1583	struct hv_hotadd_state *has;
1584
1585	if (dm->num_pages_ballooned != 0)
1586		pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1587
1588	cancel_work_sync(&dm->balloon_wrk.wrk);
1589	cancel_work_sync(&dm->ha_wrk.wrk);
1590
1591	vmbus_close(dev->channel);
1592	kthread_stop(dm->thread);
1593	kfree(send_buffer);
1594#ifdef CONFIG_MEMORY_HOTPLUG
1595	restore_online_page_callback(&hv_online_page);
1596	unregister_memory_notifier(&hv_memory_nb);
1597#endif
1598	list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1599		has = list_entry(cur, struct hv_hotadd_state, list);
1600		list_del(&has->list);
1601		kfree(has);
1602	}
1603
1604	return 0;
1605}
1606
1607static const struct hv_vmbus_device_id id_table[] = {
1608	/* Dynamic Memory Class ID */
1609	/* 525074DC-8985-46e2-8057-A307DC18A502 */
1610	{ HV_DM_GUID, },
1611	{ },
1612};
1613
1614MODULE_DEVICE_TABLE(vmbus, id_table);
1615
1616static  struct hv_driver balloon_drv = {
1617	.name = "hv_balloon",
1618	.id_table = id_table,
1619	.probe =  balloon_probe,
1620	.remove =  balloon_remove,
1621};
1622
1623static int __init init_balloon_drv(void)
1624{
1625
1626	return vmbus_driver_register(&balloon_drv);
1627}
1628
1629module_init(init_balloon_drv);
1630
1631MODULE_DESCRIPTION("Hyper-V Balloon");
1632MODULE_LICENSE("GPL");
v3.15
   1/*
   2 * Copyright (c) 2012, Microsoft Corporation.
   3 *
   4 * Author:
   5 *   K. Y. Srinivasan <kys@microsoft.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published
   9 * by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14 * NON INFRINGEMENT.  See the GNU General Public License for more
  15 * details.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/kernel.h>
 
  22#include <linux/mman.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/kthread.h>
  28#include <linux/completion.h>
  29#include <linux/memory_hotplug.h>
  30#include <linux/memory.h>
  31#include <linux/notifier.h>
  32#include <linux/percpu_counter.h>
  33
  34#include <linux/hyperv.h>
  35
  36/*
  37 * We begin with definitions supporting the Dynamic Memory protocol
  38 * with the host.
  39 *
  40 * Begin protocol definitions.
  41 */
  42
  43
  44
  45/*
  46 * Protocol versions. The low word is the minor version, the high word the major
  47 * version.
  48 *
  49 * History:
  50 * Initial version 1.0
  51 * Changed to 0.1 on 2009/03/25
  52 * Changes to 0.2 on 2009/05/14
  53 * Changes to 0.3 on 2009/12/03
  54 * Changed to 1.0 on 2011/04/05
  55 */
  56
  57#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
  58#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
  59#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
  60
  61enum {
  62	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
  63	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
 
  64
  65	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
  66	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
 
  67
  68	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
  69};
  70
  71
  72
  73/*
  74 * Message Types
  75 */
  76
  77enum dm_message_type {
  78	/*
  79	 * Version 0.3
  80	 */
  81	DM_ERROR			= 0,
  82	DM_VERSION_REQUEST		= 1,
  83	DM_VERSION_RESPONSE		= 2,
  84	DM_CAPABILITIES_REPORT		= 3,
  85	DM_CAPABILITIES_RESPONSE	= 4,
  86	DM_STATUS_REPORT		= 5,
  87	DM_BALLOON_REQUEST		= 6,
  88	DM_BALLOON_RESPONSE		= 7,
  89	DM_UNBALLOON_REQUEST		= 8,
  90	DM_UNBALLOON_RESPONSE		= 9,
  91	DM_MEM_HOT_ADD_REQUEST		= 10,
  92	DM_MEM_HOT_ADD_RESPONSE		= 11,
  93	DM_VERSION_03_MAX		= 11,
  94	/*
  95	 * Version 1.0.
  96	 */
  97	DM_INFO_MESSAGE			= 12,
  98	DM_VERSION_1_MAX		= 12
  99};
 100
 101
 102/*
 103 * Structures defining the dynamic memory management
 104 * protocol.
 105 */
 106
 107union dm_version {
 108	struct {
 109		__u16 minor_version;
 110		__u16 major_version;
 111	};
 112	__u32 version;
 113} __packed;
 114
 115
 116union dm_caps {
 117	struct {
 118		__u64 balloon:1;
 119		__u64 hot_add:1;
 120		/*
 121		 * To support guests that may have alignment
 122		 * limitations on hot-add, the guest can specify
 123		 * its alignment requirements; a value of n
 124		 * represents an alignment of 2^n in mega bytes.
 125		 */
 126		__u64 hot_add_alignment:4;
 127		__u64 reservedz:58;
 128	} cap_bits;
 129	__u64 caps;
 130} __packed;
 131
 132union dm_mem_page_range {
 133	struct  {
 134		/*
 135		 * The PFN number of the first page in the range.
 136		 * 40 bits is the architectural limit of a PFN
 137		 * number for AMD64.
 138		 */
 139		__u64 start_page:40;
 140		/*
 141		 * The number of pages in the range.
 142		 */
 143		__u64 page_cnt:24;
 144	} finfo;
 145	__u64  page_range;
 146} __packed;
 147
 148
 149
 150/*
 151 * The header for all dynamic memory messages:
 152 *
 153 * type: Type of the message.
 154 * size: Size of the message in bytes; including the header.
 155 * trans_id: The guest is responsible for manufacturing this ID.
 156 */
 157
 158struct dm_header {
 159	__u16 type;
 160	__u16 size;
 161	__u32 trans_id;
 162} __packed;
 163
 164/*
 165 * A generic message format for dynamic memory.
 166 * Specific message formats are defined later in the file.
 167 */
 168
 169struct dm_message {
 170	struct dm_header hdr;
 171	__u8 data[]; /* enclosed message */
 172} __packed;
 173
 174
 175/*
 176 * Specific message types supporting the dynamic memory protocol.
 177 */
 178
 179/*
 180 * Version negotiation message. Sent from the guest to the host.
 181 * The guest is free to try different versions until the host
 182 * accepts the version.
 183 *
 184 * dm_version: The protocol version requested.
 185 * is_last_attempt: If TRUE, this is the last version guest will request.
 186 * reservedz: Reserved field, set to zero.
 187 */
 188
 189struct dm_version_request {
 190	struct dm_header hdr;
 191	union dm_version version;
 192	__u32 is_last_attempt:1;
 193	__u32 reservedz:31;
 194} __packed;
 195
 196/*
 197 * Version response message; Host to Guest and indicates
 198 * if the host has accepted the version sent by the guest.
 199 *
 200 * is_accepted: If TRUE, host has accepted the version and the guest
 201 * should proceed to the next stage of the protocol. FALSE indicates that
 202 * guest should re-try with a different version.
 203 *
 204 * reservedz: Reserved field, set to zero.
 205 */
 206
 207struct dm_version_response {
 208	struct dm_header hdr;
 209	__u64 is_accepted:1;
 210	__u64 reservedz:63;
 211} __packed;
 212
 213/*
 214 * Message reporting capabilities. This is sent from the guest to the
 215 * host.
 216 */
 217
 218struct dm_capabilities {
 219	struct dm_header hdr;
 220	union dm_caps caps;
 221	__u64 min_page_cnt;
 222	__u64 max_page_number;
 223} __packed;
 224
 225/*
 226 * Response to the capabilities message. This is sent from the host to the
 227 * guest. This message notifies if the host has accepted the guest's
 228 * capabilities. If the host has not accepted, the guest must shutdown
 229 * the service.
 230 *
 231 * is_accepted: Indicates if the host has accepted guest's capabilities.
 232 * reservedz: Must be 0.
 233 */
 234
 235struct dm_capabilities_resp_msg {
 236	struct dm_header hdr;
 237	__u64 is_accepted:1;
 238	__u64 reservedz:63;
 239} __packed;
 240
 241/*
 242 * This message is used to report memory pressure from the guest.
 243 * This message is not part of any transaction and there is no
 244 * response to this message.
 245 *
 246 * num_avail: Available memory in pages.
 247 * num_committed: Committed memory in pages.
 248 * page_file_size: The accumulated size of all page files
 249 *		   in the system in pages.
 250 * zero_free: The nunber of zero and free pages.
 251 * page_file_writes: The writes to the page file in pages.
 252 * io_diff: An indicator of file cache efficiency or page file activity,
 253 *	    calculated as File Cache Page Fault Count - Page Read Count.
 254 *	    This value is in pages.
 255 *
 256 * Some of these metrics are Windows specific and fortunately
 257 * the algorithm on the host side that computes the guest memory
 258 * pressure only uses num_committed value.
 259 */
 260
 261struct dm_status {
 262	struct dm_header hdr;
 263	__u64 num_avail;
 264	__u64 num_committed;
 265	__u64 page_file_size;
 266	__u64 zero_free;
 267	__u32 page_file_writes;
 268	__u32 io_diff;
 269} __packed;
 270
 271
 272/*
 273 * Message to ask the guest to allocate memory - balloon up message.
 274 * This message is sent from the host to the guest. The guest may not be
 275 * able to allocate as much memory as requested.
 276 *
 277 * num_pages: number of pages to allocate.
 278 */
 279
 280struct dm_balloon {
 281	struct dm_header hdr;
 282	__u32 num_pages;
 283	__u32 reservedz;
 284} __packed;
 285
 286
 287/*
 288 * Balloon response message; this message is sent from the guest
 289 * to the host in response to the balloon message.
 290 *
 291 * reservedz: Reserved; must be set to zero.
 292 * more_pages: If FALSE, this is the last message of the transaction.
 293 * if TRUE there will atleast one more message from the guest.
 294 *
 295 * range_count: The number of ranges in the range array.
 296 *
 297 * range_array: An array of page ranges returned to the host.
 298 *
 299 */
 300
 301struct dm_balloon_response {
 302	struct dm_header hdr;
 303	__u32 reservedz;
 304	__u32 more_pages:1;
 305	__u32 range_count:31;
 306	union dm_mem_page_range range_array[];
 307} __packed;
 308
 309/*
 310 * Un-balloon message; this message is sent from the host
 311 * to the guest to give guest more memory.
 312 *
 313 * more_pages: If FALSE, this is the last message of the transaction.
 314 * if TRUE there will atleast one more message from the guest.
 315 *
 316 * reservedz: Reserved; must be set to zero.
 317 *
 318 * range_count: The number of ranges in the range array.
 319 *
 320 * range_array: An array of page ranges returned to the host.
 321 *
 322 */
 323
 324struct dm_unballoon_request {
 325	struct dm_header hdr;
 326	__u32 more_pages:1;
 327	__u32 reservedz:31;
 328	__u32 range_count;
 329	union dm_mem_page_range range_array[];
 330} __packed;
 331
 332/*
 333 * Un-balloon response message; this message is sent from the guest
 334 * to the host in response to an unballoon request.
 335 *
 336 */
 337
 338struct dm_unballoon_response {
 339	struct dm_header hdr;
 340} __packed;
 341
 342
 343/*
 344 * Hot add request message. Message sent from the host to the guest.
 345 *
 346 * mem_range: Memory range to hot add.
 347 *
 348 * On Linux we currently don't support this since we cannot hot add
 349 * arbitrary granularity of memory.
 350 */
 351
 352struct dm_hot_add {
 353	struct dm_header hdr;
 354	union dm_mem_page_range range;
 355} __packed;
 356
 357/*
 358 * Hot add response message.
 359 * This message is sent by the guest to report the status of a hot add request.
 360 * If page_count is less than the requested page count, then the host should
 361 * assume all further hot add requests will fail, since this indicates that
 362 * the guest has hit an upper physical memory barrier.
 363 *
 364 * Hot adds may also fail due to low resources; in this case, the guest must
 365 * not complete this message until the hot add can succeed, and the host must
 366 * not send a new hot add request until the response is sent.
 367 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 368 * times it fails the request.
 369 *
 370 *
 371 * page_count: number of pages that were successfully hot added.
 372 *
 373 * result: result of the operation 1: success, 0: failure.
 374 *
 375 */
 376
 377struct dm_hot_add_response {
 378	struct dm_header hdr;
 379	__u32 page_count;
 380	__u32 result;
 381} __packed;
 382
 383/*
 384 * Types of information sent from host to the guest.
 385 */
 386
 387enum dm_info_type {
 388	INFO_TYPE_MAX_PAGE_CNT = 0,
 389	MAX_INFO_TYPE
 390};
 391
 392
 393/*
 394 * Header for the information message.
 395 */
 396
 397struct dm_info_header {
 398	enum dm_info_type type;
 399	__u32 data_size;
 400} __packed;
 401
 402/*
 403 * This message is sent from the host to the guest to pass
 404 * some relevant information (win8 addition).
 405 *
 406 * reserved: no used.
 407 * info_size: size of the information blob.
 408 * info: information blob.
 409 */
 410
 411struct dm_info_msg {
 412	struct dm_header hdr;
 413	__u32 reserved;
 414	__u32 info_size;
 415	__u8  info[];
 416};
 417
 418/*
 419 * End protocol definitions.
 420 */
 421
 422/*
 423 * State to manage hot adding memory into the guest.
 424 * The range start_pfn : end_pfn specifies the range
 425 * that the host has asked us to hot add. The range
 426 * start_pfn : ha_end_pfn specifies the range that we have
 427 * currently hot added. We hot add in multiples of 128M
 428 * chunks; it is possible that we may not be able to bring
 429 * online all the pages in the region. The range
 430 * covered_start_pfn : covered_end_pfn defines the pages that can
 431 * be brough online.
 432 */
 433
 434struct hv_hotadd_state {
 435	struct list_head list;
 436	unsigned long start_pfn;
 437	unsigned long covered_start_pfn;
 438	unsigned long covered_end_pfn;
 439	unsigned long ha_end_pfn;
 440	unsigned long end_pfn;
 441};
 442
 443struct balloon_state {
 444	__u32 num_pages;
 445	struct work_struct wrk;
 446};
 447
 448struct hot_add_wrk {
 449	union dm_mem_page_range ha_page_range;
 450	union dm_mem_page_range ha_region_range;
 451	struct work_struct wrk;
 452};
 453
 454static bool hot_add = true;
 455static bool do_hot_add;
 456/*
 457 * Delay reporting memory pressure by
 458 * the specified number of seconds.
 459 */
 460static uint pressure_report_delay = 45;
 461
 
 
 
 
 
 462module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 463MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 464
 465module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 466MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 467static atomic_t trans_id = ATOMIC_INIT(0);
 468
 469static int dm_ring_size = (5 * PAGE_SIZE);
 470
 471/*
 472 * Driver specific state.
 473 */
 474
 475enum hv_dm_state {
 476	DM_INITIALIZING = 0,
 477	DM_INITIALIZED,
 478	DM_BALLOON_UP,
 479	DM_BALLOON_DOWN,
 480	DM_HOT_ADD,
 481	DM_INIT_ERROR
 482};
 483
 484
 485static __u8 recv_buffer[PAGE_SIZE];
 486static __u8 *send_buffer;
 487#define PAGES_IN_2M	512
 488#define HA_CHUNK (32 * 1024)
 489
 490struct hv_dynmem_device {
 491	struct hv_device *dev;
 492	enum hv_dm_state state;
 493	struct completion host_event;
 494	struct completion config_event;
 495
 496	/*
 497	 * Number of pages we have currently ballooned out.
 498	 */
 499	unsigned int num_pages_ballooned;
 
 
 500
 501	/*
 502	 * State to manage the ballooning (up) operation.
 503	 */
 504	struct balloon_state balloon_wrk;
 505
 506	/*
 507	 * State to execute the "hot-add" operation.
 508	 */
 509	struct hot_add_wrk ha_wrk;
 510
 511	/*
 512	 * This state tracks if the host has specified a hot-add
 513	 * region.
 514	 */
 515	bool host_specified_ha_region;
 516
 517	/*
 518	 * State to synchronize hot-add.
 519	 */
 520	struct completion  ol_waitevent;
 521	bool ha_waiting;
 522	/*
 523	 * This thread handles hot-add
 524	 * requests from the host as well as notifying
 525	 * the host with regards to memory pressure in
 526	 * the guest.
 527	 */
 528	struct task_struct *thread;
 529
 
 
 530	/*
 531	 * A list of hot-add regions.
 532	 */
 533	struct list_head ha_region_list;
 534
 535	/*
 536	 * We start with the highest version we can support
 537	 * and downgrade based on the host; we save here the
 538	 * next version to try.
 539	 */
 540	__u32 next_version;
 541};
 542
 543static struct hv_dynmem_device dm_device;
 544
 
 
 545#ifdef CONFIG_MEMORY_HOTPLUG
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546
 547static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
 548{
 549	int i;
 550
 551	for (i = 0; i < size; i++) {
 552		struct page *pg;
 553		pg = pfn_to_page(start_pfn + i);
 554		__online_page_set_limits(pg);
 555		__online_page_increment_counters(pg);
 556		__online_page_free(pg);
 557	}
 558}
 559
 560static void hv_mem_hot_add(unsigned long start, unsigned long size,
 561				unsigned long pfn_count,
 562				struct hv_hotadd_state *has)
 563{
 564	int ret = 0;
 565	int i, nid;
 566	unsigned long start_pfn;
 567	unsigned long processed_pfn;
 568	unsigned long total_pfn = pfn_count;
 569
 570	for (i = 0; i < (size/HA_CHUNK); i++) {
 571		start_pfn = start + (i * HA_CHUNK);
 572		has->ha_end_pfn +=  HA_CHUNK;
 573
 574		if (total_pfn > HA_CHUNK) {
 575			processed_pfn = HA_CHUNK;
 576			total_pfn -= HA_CHUNK;
 577		} else {
 578			processed_pfn = total_pfn;
 579			total_pfn = 0;
 580		}
 581
 582		has->covered_end_pfn +=  processed_pfn;
 583
 584		init_completion(&dm_device.ol_waitevent);
 585		dm_device.ha_waiting = true;
 586
 
 587		nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
 588		ret = add_memory(nid, PFN_PHYS((start_pfn)),
 589				(HA_CHUNK << PAGE_SHIFT));
 590
 591		if (ret) {
 592			pr_info("hot_add memory failed error is %d\n", ret);
 593			if (ret == -EEXIST) {
 594				/*
 595				 * This error indicates that the error
 596				 * is not a transient failure. This is the
 597				 * case where the guest's physical address map
 598				 * precludes hot adding memory. Stop all further
 599				 * memory hot-add.
 600				 */
 601				do_hot_add = false;
 602			}
 603			has->ha_end_pfn -= HA_CHUNK;
 604			has->covered_end_pfn -=  processed_pfn;
 
 605			break;
 606		}
 607
 608		/*
 609		 * Wait for the memory block to be onlined.
 610		 * Since the hot add has succeeded, it is ok to
 611		 * proceed even if the pages in the hot added region
 612		 * have not been "onlined" within the allowed time.
 613		 */
 614		wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
 615
 
 616	}
 617
 618	return;
 619}
 620
 621static void hv_online_page(struct page *pg)
 622{
 623	struct list_head *cur;
 624	struct hv_hotadd_state *has;
 625	unsigned long cur_start_pgp;
 626	unsigned long cur_end_pgp;
 627
 628	if (dm_device.ha_waiting) {
 629		dm_device.ha_waiting = false;
 630		complete(&dm_device.ol_waitevent);
 631	}
 632
 633	list_for_each(cur, &dm_device.ha_region_list) {
 634		has = list_entry(cur, struct hv_hotadd_state, list);
 635		cur_start_pgp = (unsigned long)
 636				pfn_to_page(has->covered_start_pfn);
 637		cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
 638
 639		if (((unsigned long)pg >= cur_start_pgp) &&
 640			((unsigned long)pg < cur_end_pgp)) {
 641			/*
 642			 * This frame is currently backed; online the
 643			 * page.
 644			 */
 645			__online_page_set_limits(pg);
 646			__online_page_increment_counters(pg);
 647			__online_page_free(pg);
 648			has->covered_start_pfn++;
 649		}
 650	}
 651}
 652
 653static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 654{
 655	struct list_head *cur;
 656	struct hv_hotadd_state *has;
 657	unsigned long residual, new_inc;
 658
 659	if (list_empty(&dm_device.ha_region_list))
 660		return false;
 661
 662	list_for_each(cur, &dm_device.ha_region_list) {
 663		has = list_entry(cur, struct hv_hotadd_state, list);
 664
 665		/*
 666		 * If the pfn range we are dealing with is not in the current
 667		 * "hot add block", move on.
 668		 */
 669		if ((start_pfn >= has->end_pfn))
 670			continue;
 671		/*
 672		 * If the current hot add-request extends beyond
 673		 * our current limit; extend it.
 674		 */
 675		if ((start_pfn + pfn_cnt) > has->end_pfn) {
 676			residual = (start_pfn + pfn_cnt - has->end_pfn);
 677			/*
 678			 * Extend the region by multiples of HA_CHUNK.
 679			 */
 680			new_inc = (residual / HA_CHUNK) * HA_CHUNK;
 681			if (residual % HA_CHUNK)
 682				new_inc += HA_CHUNK;
 683
 684			has->end_pfn += new_inc;
 685		}
 686
 687		/*
 688		 * If the current start pfn is not where the covered_end
 689		 * is, update it.
 690		 */
 691
 692		if (has->covered_end_pfn != start_pfn) {
 693			has->covered_end_pfn = start_pfn;
 694			has->covered_start_pfn = start_pfn;
 695		}
 696		return true;
 697
 698	}
 699
 700	return false;
 701}
 702
 703static unsigned long handle_pg_range(unsigned long pg_start,
 704					unsigned long pg_count)
 705{
 706	unsigned long start_pfn = pg_start;
 707	unsigned long pfn_cnt = pg_count;
 708	unsigned long size;
 709	struct list_head *cur;
 710	struct hv_hotadd_state *has;
 711	unsigned long pgs_ol = 0;
 712	unsigned long old_covered_state;
 713
 714	if (list_empty(&dm_device.ha_region_list))
 715		return 0;
 716
 717	list_for_each(cur, &dm_device.ha_region_list) {
 718		has = list_entry(cur, struct hv_hotadd_state, list);
 719
 720		/*
 721		 * If the pfn range we are dealing with is not in the current
 722		 * "hot add block", move on.
 723		 */
 724		if ((start_pfn >= has->end_pfn))
 725			continue;
 726
 727		old_covered_state = has->covered_end_pfn;
 728
 729		if (start_pfn < has->ha_end_pfn) {
 730			/*
 731			 * This is the case where we are backing pages
 732			 * in an already hot added region. Bring
 733			 * these pages online first.
 734			 */
 735			pgs_ol = has->ha_end_pfn - start_pfn;
 736			if (pgs_ol > pfn_cnt)
 737				pgs_ol = pfn_cnt;
 738			hv_bring_pgs_online(start_pfn, pgs_ol);
 
 
 
 
 
 
 
 
 
 
 739			has->covered_end_pfn +=  pgs_ol;
 740			has->covered_start_pfn +=  pgs_ol;
 741			pfn_cnt -= pgs_ol;
 742		}
 743
 744		if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
 745			/*
 746			 * We have some residual hot add range
 747			 * that needs to be hot added; hot add
 748			 * it now. Hot add a multiple of
 749			 * of HA_CHUNK that fully covers the pages
 750			 * we have.
 751			 */
 752			size = (has->end_pfn - has->ha_end_pfn);
 753			if (pfn_cnt <= size) {
 754				size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
 755				if (pfn_cnt % HA_CHUNK)
 756					size += HA_CHUNK;
 757			} else {
 758				pfn_cnt = size;
 759			}
 760			hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
 761		}
 762		/*
 763		 * If we managed to online any pages that were given to us,
 764		 * we declare success.
 765		 */
 766		return has->covered_end_pfn - old_covered_state;
 767
 768	}
 769
 770	return 0;
 771}
 772
 773static unsigned long process_hot_add(unsigned long pg_start,
 774					unsigned long pfn_cnt,
 775					unsigned long rg_start,
 776					unsigned long rg_size)
 777{
 778	struct hv_hotadd_state *ha_region = NULL;
 779
 780	if (pfn_cnt == 0)
 781		return 0;
 782
 783	if (!dm_device.host_specified_ha_region)
 784		if (pfn_covered(pg_start, pfn_cnt))
 785			goto do_pg_range;
 786
 787	/*
 788	 * If the host has specified a hot-add range; deal with it first.
 789	 */
 790
 791	if (rg_size != 0) {
 792		ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
 793		if (!ha_region)
 794			return 0;
 795
 796		INIT_LIST_HEAD(&ha_region->list);
 797
 798		list_add_tail(&ha_region->list, &dm_device.ha_region_list);
 799		ha_region->start_pfn = rg_start;
 800		ha_region->ha_end_pfn = rg_start;
 801		ha_region->covered_start_pfn = pg_start;
 802		ha_region->covered_end_pfn = pg_start;
 803		ha_region->end_pfn = rg_start + rg_size;
 804	}
 805
 806do_pg_range:
 807	/*
 808	 * Process the page range specified; bringing them
 809	 * online if possible.
 810	 */
 811	return handle_pg_range(pg_start, pfn_cnt);
 812}
 813
 814#endif
 815
 816static void hot_add_req(struct work_struct *dummy)
 817{
 818	struct dm_hot_add_response resp;
 819#ifdef CONFIG_MEMORY_HOTPLUG
 820	unsigned long pg_start, pfn_cnt;
 821	unsigned long rg_start, rg_sz;
 822#endif
 823	struct hv_dynmem_device *dm = &dm_device;
 824
 825	memset(&resp, 0, sizeof(struct dm_hot_add_response));
 826	resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
 827	resp.hdr.size = sizeof(struct dm_hot_add_response);
 828
 829#ifdef CONFIG_MEMORY_HOTPLUG
 
 830	pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
 831	pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
 832
 833	rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
 834	rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
 835
 836	if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
 837		unsigned long region_size;
 838		unsigned long region_start;
 839
 840		/*
 841		 * The host has not specified the hot-add region.
 842		 * Based on the hot-add page range being specified,
 843		 * compute a hot-add region that can cover the pages
 844		 * that need to be hot-added while ensuring the alignment
 845		 * and size requirements of Linux as it relates to hot-add.
 846		 */
 847		region_start = pg_start;
 848		region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
 849		if (pfn_cnt % HA_CHUNK)
 850			region_size += HA_CHUNK;
 851
 852		region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
 853
 854		rg_start = region_start;
 855		rg_sz = region_size;
 856	}
 857
 858	if (do_hot_add)
 859		resp.page_count = process_hot_add(pg_start, pfn_cnt,
 860						rg_start, rg_sz);
 
 
 
 861#endif
 862	/*
 863	 * The result field of the response structure has the
 864	 * following semantics:
 865	 *
 866	 * 1. If all or some pages hot-added: Guest should return success.
 867	 *
 868	 * 2. If no pages could be hot-added:
 869	 *
 870	 * If the guest returns success, then the host
 871	 * will not attempt any further hot-add operations. This
 872	 * signifies a permanent failure.
 873	 *
 874	 * If the guest returns failure, then this failure will be
 875	 * treated as a transient failure and the host may retry the
 876	 * hot-add operation after some delay.
 877	 */
 878	if (resp.page_count > 0)
 879		resp.result = 1;
 880	else if (!do_hot_add)
 881		resp.result = 1;
 882	else
 883		resp.result = 0;
 884
 885	if (!do_hot_add || (resp.page_count == 0))
 886		pr_info("Memory hot add failed\n");
 887
 888	dm->state = DM_INITIALIZED;
 889	resp.hdr.trans_id = atomic_inc_return(&trans_id);
 890	vmbus_sendpacket(dm->dev->channel, &resp,
 891			sizeof(struct dm_hot_add_response),
 892			(unsigned long)NULL,
 893			VM_PKT_DATA_INBAND, 0);
 894}
 895
 896static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 897{
 898	struct dm_info_header *info_hdr;
 899
 900	info_hdr = (struct dm_info_header *)msg->info;
 901
 902	switch (info_hdr->type) {
 903	case INFO_TYPE_MAX_PAGE_CNT:
 904		pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
 905		pr_info("Data Size is %d\n", info_hdr->data_size);
 906		break;
 907	default:
 908		pr_info("Received Unknown type: %d\n", info_hdr->type);
 909	}
 910}
 911
 912static unsigned long compute_balloon_floor(void)
 913{
 914	unsigned long min_pages;
 915#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
 916	/* Simple continuous piecewiese linear function:
 917	 *  max MiB -> min MiB  gradient
 918	 *       0         0
 919	 *      16        16
 920	 *      32        24
 921	 *     128        72    (1/2)
 922	 *     512       168    (1/4)
 923	 *    2048       360    (1/8)
 924	 *    8192       552    (1/32)
 925	 *   32768      1320
 926	 *  131072      4392
 927	 */
 928	if (totalram_pages < MB2PAGES(128))
 929		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
 930	else if (totalram_pages < MB2PAGES(512))
 931		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
 932	else if (totalram_pages < MB2PAGES(2048))
 933		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
 
 
 934	else
 935		min_pages = MB2PAGES(296) + (totalram_pages >> 5);
 936#undef MB2PAGES
 937	return min_pages;
 938}
 939
 940/*
 941 * Post our status as it relates memory pressure to the
 942 * host. Host expects the guests to post this status
 943 * periodically at 1 second intervals.
 944 *
 945 * The metrics specified in this protocol are very Windows
 946 * specific and so we cook up numbers here to convey our memory
 947 * pressure.
 948 */
 949
 950static void post_status(struct hv_dynmem_device *dm)
 951{
 952	struct dm_status status;
 953	struct sysinfo val;
 
 
 954
 955	if (pressure_report_delay > 0) {
 956		--pressure_report_delay;
 957		return;
 958	}
 
 
 
 
 959	si_meminfo(&val);
 960	memset(&status, 0, sizeof(struct dm_status));
 961	status.hdr.type = DM_STATUS_REPORT;
 962	status.hdr.size = sizeof(struct dm_status);
 963	status.hdr.trans_id = atomic_inc_return(&trans_id);
 964
 965	/*
 966	 * The host expects the guest to report free memory.
 967	 * Further, the host expects the pressure information to
 968	 * include the ballooned out pages.
 969	 * For a given amount of memory that we are managing, we
 970	 * need to compute a floor below which we should not balloon.
 971	 * Compute this and add it to the pressure report.
 
 
 972	 */
 973	status.num_avail = val.freeram;
 974	status.num_committed = vm_memory_committed() +
 975				dm->num_pages_ballooned +
 976				compute_balloon_floor();
 
 
 977
 978	/*
 979	 * If our transaction ID is no longer current, just don't
 980	 * send the status. This can happen if we were interrupted
 981	 * after we picked our transaction ID.
 982	 */
 983	if (status.hdr.trans_id != atomic_read(&trans_id))
 984		return;
 985
 
 
 
 
 
 
 
 
 986	vmbus_sendpacket(dm->dev->channel, &status,
 987				sizeof(struct dm_status),
 988				(unsigned long)NULL,
 989				VM_PKT_DATA_INBAND, 0);
 990
 991}
 992
 993static void free_balloon_pages(struct hv_dynmem_device *dm,
 994			 union dm_mem_page_range *range_array)
 995{
 996	int num_pages = range_array->finfo.page_cnt;
 997	__u64 start_frame = range_array->finfo.start_page;
 998	struct page *pg;
 999	int i;
1000
1001	for (i = 0; i < num_pages; i++) {
1002		pg = pfn_to_page(i + start_frame);
1003		__free_page(pg);
1004		dm->num_pages_ballooned--;
1005	}
1006}
1007
1008
1009
1010static int  alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
1011			 struct dm_balloon_response *bl_resp, int alloc_unit,
1012			 bool *alloc_error)
 
1013{
1014	int i = 0;
1015	struct page *pg;
1016
1017	if (num_pages < alloc_unit)
1018		return 0;
1019
1020	for (i = 0; (i * alloc_unit) < num_pages; i++) {
1021		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1022			PAGE_SIZE)
1023			return i * alloc_unit;
1024
1025		/*
1026		 * We execute this code in a thread context. Furthermore,
1027		 * we don't want the kernel to try too hard.
1028		 */
1029		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1030				__GFP_NOMEMALLOC | __GFP_NOWARN,
1031				get_order(alloc_unit << PAGE_SHIFT));
1032
1033		if (!pg) {
1034			*alloc_error = true;
1035			return i * alloc_unit;
1036		}
1037
1038
1039		dm->num_pages_ballooned += alloc_unit;
1040
1041		/*
1042		 * If we allocatted 2M pages; split them so we
1043		 * can free them in any order we get.
1044		 */
1045
1046		if (alloc_unit != 1)
1047			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1048
1049		bl_resp->range_count++;
1050		bl_resp->range_array[i].finfo.start_page =
1051			page_to_pfn(pg);
1052		bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1053		bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1054
1055	}
1056
1057	return num_pages;
1058}
1059
1060
1061
1062static void balloon_up(struct work_struct *dummy)
1063{
1064	int num_pages = dm_device.balloon_wrk.num_pages;
1065	int num_ballooned = 0;
1066	struct dm_balloon_response *bl_resp;
1067	int alloc_unit;
1068	int ret;
1069	bool alloc_error = false;
1070	bool done = false;
1071	int i;
 
 
1072
 
 
1073
1074	/*
1075	 * We will attempt 2M allocations. However, if we fail to
1076	 * allocate 2M chunks, we will go back to 4k allocations.
1077	 */
1078	alloc_unit = 512;
1079
 
 
 
 
 
 
 
 
 
1080	while (!done) {
1081		bl_resp = (struct dm_balloon_response *)send_buffer;
1082		memset(send_buffer, 0, PAGE_SIZE);
1083		bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1084		bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1085		bl_resp->more_pages = 1;
1086
1087
1088		num_pages -= num_ballooned;
1089		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1090						bl_resp, alloc_unit,
1091						 &alloc_error);
1092
1093		if ((alloc_error) && (alloc_unit != 1)) {
1094			alloc_unit = 1;
1095			continue;
1096		}
1097
1098		if ((alloc_error) || (num_ballooned == num_pages)) {
1099			bl_resp->more_pages = 0;
1100			done = true;
1101			dm_device.state = DM_INITIALIZED;
1102		}
1103
1104		/*
1105		 * We are pushing a lot of data through the channel;
1106		 * deal with transient failures caused because of the
1107		 * lack of space in the ring buffer.
1108		 */
1109
1110		do {
1111			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1112			ret = vmbus_sendpacket(dm_device.dev->channel,
1113						bl_resp,
1114						bl_resp->hdr.size,
1115						(unsigned long)NULL,
1116						VM_PKT_DATA_INBAND, 0);
1117
1118			if (ret == -EAGAIN)
1119				msleep(20);
1120
1121		} while (ret == -EAGAIN);
1122
1123		if (ret) {
1124			/*
1125			 * Free up the memory we allocatted.
1126			 */
1127			pr_info("Balloon response failed\n");
1128
1129			for (i = 0; i < bl_resp->range_count; i++)
1130				free_balloon_pages(&dm_device,
1131						 &bl_resp->range_array[i]);
1132
1133			done = true;
1134		}
1135	}
1136
1137}
1138
1139static void balloon_down(struct hv_dynmem_device *dm,
1140			struct dm_unballoon_request *req)
1141{
1142	union dm_mem_page_range *range_array = req->range_array;
1143	int range_count = req->range_count;
1144	struct dm_unballoon_response resp;
1145	int i;
1146
1147	for (i = 0; i < range_count; i++)
1148		free_balloon_pages(dm, &range_array[i]);
 
 
1149
1150	if (req->more_pages == 1)
1151		return;
1152
1153	memset(&resp, 0, sizeof(struct dm_unballoon_response));
1154	resp.hdr.type = DM_UNBALLOON_RESPONSE;
1155	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1156	resp.hdr.size = sizeof(struct dm_unballoon_response);
1157
1158	vmbus_sendpacket(dm_device.dev->channel, &resp,
1159				sizeof(struct dm_unballoon_response),
1160				(unsigned long)NULL,
1161				VM_PKT_DATA_INBAND, 0);
1162
1163	dm->state = DM_INITIALIZED;
1164}
1165
1166static void balloon_onchannelcallback(void *context);
1167
1168static int dm_thread_func(void *dm_dev)
1169{
1170	struct hv_dynmem_device *dm = dm_dev;
1171	int t;
1172
1173	while (!kthread_should_stop()) {
1174		t = wait_for_completion_interruptible_timeout(
1175						&dm_device.config_event, 1*HZ);
1176		/*
1177		 * The host expects us to post information on the memory
1178		 * pressure every second.
1179		 */
1180
1181		if (t == 0)
1182			post_status(dm);
1183
1184	}
1185
1186	return 0;
1187}
1188
1189
1190static void version_resp(struct hv_dynmem_device *dm,
1191			struct dm_version_response *vresp)
1192{
1193	struct dm_version_request version_req;
1194	int ret;
1195
1196	if (vresp->is_accepted) {
1197		/*
1198		 * We are done; wakeup the
1199		 * context waiting for version
1200		 * negotiation.
1201		 */
1202		complete(&dm->host_event);
1203		return;
1204	}
1205	/*
1206	 * If there are more versions to try, continue
1207	 * with negotiations; if not
1208	 * shutdown the service since we are not able
1209	 * to negotiate a suitable version number
1210	 * with the host.
1211	 */
1212	if (dm->next_version == 0)
1213		goto version_error;
1214
1215	dm->next_version = 0;
1216	memset(&version_req, 0, sizeof(struct dm_version_request));
1217	version_req.hdr.type = DM_VERSION_REQUEST;
1218	version_req.hdr.size = sizeof(struct dm_version_request);
1219	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1220	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
1221	version_req.is_last_attempt = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
1222
1223	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1224				sizeof(struct dm_version_request),
1225				(unsigned long)NULL,
1226				VM_PKT_DATA_INBAND, 0);
1227
1228	if (ret)
1229		goto version_error;
1230
1231	return;
1232
1233version_error:
1234	dm->state = DM_INIT_ERROR;
1235	complete(&dm->host_event);
1236}
1237
1238static void cap_resp(struct hv_dynmem_device *dm,
1239			struct dm_capabilities_resp_msg *cap_resp)
1240{
1241	if (!cap_resp->is_accepted) {
1242		pr_info("Capabilities not accepted by host\n");
1243		dm->state = DM_INIT_ERROR;
1244	}
1245	complete(&dm->host_event);
1246}
1247
1248static void balloon_onchannelcallback(void *context)
1249{
1250	struct hv_device *dev = context;
1251	u32 recvlen;
1252	u64 requestid;
1253	struct dm_message *dm_msg;
1254	struct dm_header *dm_hdr;
1255	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1256	struct dm_balloon *bal_msg;
1257	struct dm_hot_add *ha_msg;
1258	union dm_mem_page_range *ha_pg_range;
1259	union dm_mem_page_range *ha_region;
1260
1261	memset(recv_buffer, 0, sizeof(recv_buffer));
1262	vmbus_recvpacket(dev->channel, recv_buffer,
1263			 PAGE_SIZE, &recvlen, &requestid);
1264
1265	if (recvlen > 0) {
1266		dm_msg = (struct dm_message *)recv_buffer;
1267		dm_hdr = &dm_msg->hdr;
1268
1269		switch (dm_hdr->type) {
1270		case DM_VERSION_RESPONSE:
1271			version_resp(dm,
1272				 (struct dm_version_response *)dm_msg);
1273			break;
1274
1275		case DM_CAPABILITIES_RESPONSE:
1276			cap_resp(dm,
1277				 (struct dm_capabilities_resp_msg *)dm_msg);
1278			break;
1279
1280		case DM_BALLOON_REQUEST:
1281			if (dm->state == DM_BALLOON_UP)
1282				pr_warn("Currently ballooning\n");
1283			bal_msg = (struct dm_balloon *)recv_buffer;
1284			dm->state = DM_BALLOON_UP;
1285			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1286			schedule_work(&dm_device.balloon_wrk.wrk);
1287			break;
1288
1289		case DM_UNBALLOON_REQUEST:
1290			dm->state = DM_BALLOON_DOWN;
1291			balloon_down(dm,
1292				 (struct dm_unballoon_request *)recv_buffer);
1293			break;
1294
1295		case DM_MEM_HOT_ADD_REQUEST:
1296			if (dm->state == DM_HOT_ADD)
1297				pr_warn("Currently hot-adding\n");
1298			dm->state = DM_HOT_ADD;
1299			ha_msg = (struct dm_hot_add *)recv_buffer;
1300			if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1301				/*
1302				 * This is a normal hot-add request specifying
1303				 * hot-add memory.
1304				 */
1305				ha_pg_range = &ha_msg->range;
1306				dm->ha_wrk.ha_page_range = *ha_pg_range;
1307				dm->ha_wrk.ha_region_range.page_range = 0;
1308			} else {
1309				/*
1310				 * Host is specifying that we first hot-add
1311				 * a region and then partially populate this
1312				 * region.
1313				 */
1314				dm->host_specified_ha_region = true;
1315				ha_pg_range = &ha_msg->range;
1316				ha_region = &ha_pg_range[1];
1317				dm->ha_wrk.ha_page_range = *ha_pg_range;
1318				dm->ha_wrk.ha_region_range = *ha_region;
1319			}
1320			schedule_work(&dm_device.ha_wrk.wrk);
1321			break;
1322
1323		case DM_INFO_MESSAGE:
1324			process_info(dm, (struct dm_info_msg *)dm_msg);
1325			break;
1326
1327		default:
1328			pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1329
1330		}
1331	}
1332
1333}
1334
1335static int balloon_probe(struct hv_device *dev,
1336			const struct hv_vmbus_device_id *dev_id)
1337{
1338	int ret, t;
 
1339	struct dm_version_request version_req;
1340	struct dm_capabilities cap_msg;
1341
1342	do_hot_add = hot_add;
1343
1344	/*
1345	 * First allocate a send buffer.
1346	 */
1347
1348	send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1349	if (!send_buffer)
1350		return -ENOMEM;
1351
1352	ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1353			balloon_onchannelcallback, dev);
1354
1355	if (ret)
1356		goto probe_error0;
1357
1358	dm_device.dev = dev;
1359	dm_device.state = DM_INITIALIZING;
1360	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1361	init_completion(&dm_device.host_event);
1362	init_completion(&dm_device.config_event);
1363	INIT_LIST_HEAD(&dm_device.ha_region_list);
 
1364	INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1365	INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1366	dm_device.host_specified_ha_region = false;
1367
1368	dm_device.thread =
1369		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1370	if (IS_ERR(dm_device.thread)) {
1371		ret = PTR_ERR(dm_device.thread);
1372		goto probe_error1;
1373	}
1374
1375#ifdef CONFIG_MEMORY_HOTPLUG
1376	set_online_page_callback(&hv_online_page);
 
1377#endif
1378
1379	hv_set_drvdata(dev, &dm_device);
1380	/*
1381	 * Initiate the hand shake with the host and negotiate
1382	 * a version that the host can support. We start with the
1383	 * highest version number and go down if the host cannot
1384	 * support it.
1385	 */
1386	memset(&version_req, 0, sizeof(struct dm_version_request));
1387	version_req.hdr.type = DM_VERSION_REQUEST;
1388	version_req.hdr.size = sizeof(struct dm_version_request);
1389	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1390	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
1391	version_req.is_last_attempt = 0;
1392
1393	ret = vmbus_sendpacket(dev->channel, &version_req,
1394				sizeof(struct dm_version_request),
1395				(unsigned long)NULL,
1396				VM_PKT_DATA_INBAND, 0);
1397	if (ret)
1398		goto probe_error2;
1399
1400	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1401	if (t == 0) {
1402		ret = -ETIMEDOUT;
1403		goto probe_error2;
1404	}
1405
1406	/*
1407	 * If we could not negotiate a compatible version with the host
1408	 * fail the probe function.
1409	 */
1410	if (dm_device.state == DM_INIT_ERROR) {
1411		ret = -ETIMEDOUT;
1412		goto probe_error2;
1413	}
1414	/*
1415	 * Now submit our capabilities to the host.
1416	 */
1417	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1418	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1419	cap_msg.hdr.size = sizeof(struct dm_capabilities);
1420	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1421
1422	cap_msg.caps.cap_bits.balloon = 1;
1423	cap_msg.caps.cap_bits.hot_add = 1;
1424
1425	/*
1426	 * Specify our alignment requirements as it relates
1427	 * memory hot-add. Specify 128MB alignment.
1428	 */
1429	cap_msg.caps.cap_bits.hot_add_alignment = 7;
1430
1431	/*
1432	 * Currently the host does not use these
1433	 * values and we set them to what is done in the
1434	 * Windows driver.
1435	 */
1436	cap_msg.min_page_cnt = 0;
1437	cap_msg.max_page_number = -1;
1438
1439	ret = vmbus_sendpacket(dev->channel, &cap_msg,
1440				sizeof(struct dm_capabilities),
1441				(unsigned long)NULL,
1442				VM_PKT_DATA_INBAND, 0);
1443	if (ret)
1444		goto probe_error2;
1445
1446	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1447	if (t == 0) {
1448		ret = -ETIMEDOUT;
1449		goto probe_error2;
1450	}
1451
1452	/*
1453	 * If the host does not like our capabilities,
1454	 * fail the probe function.
1455	 */
1456	if (dm_device.state == DM_INIT_ERROR) {
1457		ret = -ETIMEDOUT;
1458		goto probe_error2;
1459	}
1460
1461	dm_device.state = DM_INITIALIZED;
1462
1463	return 0;
1464
1465probe_error2:
1466#ifdef CONFIG_MEMORY_HOTPLUG
1467	restore_online_page_callback(&hv_online_page);
1468#endif
1469	kthread_stop(dm_device.thread);
1470
1471probe_error1:
1472	vmbus_close(dev->channel);
1473probe_error0:
1474	kfree(send_buffer);
1475	return ret;
1476}
1477
1478static int balloon_remove(struct hv_device *dev)
1479{
1480	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1481	struct list_head *cur, *tmp;
1482	struct hv_hotadd_state *has;
1483
1484	if (dm->num_pages_ballooned != 0)
1485		pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1486
1487	cancel_work_sync(&dm->balloon_wrk.wrk);
1488	cancel_work_sync(&dm->ha_wrk.wrk);
1489
1490	vmbus_close(dev->channel);
1491	kthread_stop(dm->thread);
1492	kfree(send_buffer);
1493#ifdef CONFIG_MEMORY_HOTPLUG
1494	restore_online_page_callback(&hv_online_page);
 
1495#endif
1496	list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1497		has = list_entry(cur, struct hv_hotadd_state, list);
1498		list_del(&has->list);
1499		kfree(has);
1500	}
1501
1502	return 0;
1503}
1504
1505static const struct hv_vmbus_device_id id_table[] = {
1506	/* Dynamic Memory Class ID */
1507	/* 525074DC-8985-46e2-8057-A307DC18A502 */
1508	{ HV_DM_GUID, },
1509	{ },
1510};
1511
1512MODULE_DEVICE_TABLE(vmbus, id_table);
1513
1514static  struct hv_driver balloon_drv = {
1515	.name = "hv_balloon",
1516	.id_table = id_table,
1517	.probe =  balloon_probe,
1518	.remove =  balloon_remove,
1519};
1520
1521static int __init init_balloon_drv(void)
1522{
1523
1524	return vmbus_driver_register(&balloon_drv);
1525}
1526
1527module_init(init_balloon_drv);
1528
1529MODULE_DESCRIPTION("Hyper-V Balloon");
1530MODULE_LICENSE("GPL");