Loading...
1/*
2 * Copyright (c) 2012, Microsoft Corporation.
3 *
4 * Author:
5 * K. Y. Srinivasan <kys@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/kernel.h>
22#include <linux/jiffies.h>
23#include <linux/mman.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/kthread.h>
29#include <linux/completion.h>
30#include <linux/memory_hotplug.h>
31#include <linux/memory.h>
32#include <linux/notifier.h>
33#include <linux/percpu_counter.h>
34
35#include <linux/hyperv.h>
36
37/*
38 * We begin with definitions supporting the Dynamic Memory protocol
39 * with the host.
40 *
41 * Begin protocol definitions.
42 */
43
44
45
46/*
47 * Protocol versions. The low word is the minor version, the high word the major
48 * version.
49 *
50 * History:
51 * Initial version 1.0
52 * Changed to 0.1 on 2009/03/25
53 * Changes to 0.2 on 2009/05/14
54 * Changes to 0.3 on 2009/12/03
55 * Changed to 1.0 on 2011/04/05
56 */
57
58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61
62enum {
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
66
67 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
70
71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
72};
73
74
75
76/*
77 * Message Types
78 */
79
80enum dm_message_type {
81 /*
82 * Version 0.3
83 */
84 DM_ERROR = 0,
85 DM_VERSION_REQUEST = 1,
86 DM_VERSION_RESPONSE = 2,
87 DM_CAPABILITIES_REPORT = 3,
88 DM_CAPABILITIES_RESPONSE = 4,
89 DM_STATUS_REPORT = 5,
90 DM_BALLOON_REQUEST = 6,
91 DM_BALLOON_RESPONSE = 7,
92 DM_UNBALLOON_REQUEST = 8,
93 DM_UNBALLOON_RESPONSE = 9,
94 DM_MEM_HOT_ADD_REQUEST = 10,
95 DM_MEM_HOT_ADD_RESPONSE = 11,
96 DM_VERSION_03_MAX = 11,
97 /*
98 * Version 1.0.
99 */
100 DM_INFO_MESSAGE = 12,
101 DM_VERSION_1_MAX = 12
102};
103
104
105/*
106 * Structures defining the dynamic memory management
107 * protocol.
108 */
109
110union dm_version {
111 struct {
112 __u16 minor_version;
113 __u16 major_version;
114 };
115 __u32 version;
116} __packed;
117
118
119union dm_caps {
120 struct {
121 __u64 balloon:1;
122 __u64 hot_add:1;
123 /*
124 * To support guests that may have alignment
125 * limitations on hot-add, the guest can specify
126 * its alignment requirements; a value of n
127 * represents an alignment of 2^n in mega bytes.
128 */
129 __u64 hot_add_alignment:4;
130 __u64 reservedz:58;
131 } cap_bits;
132 __u64 caps;
133} __packed;
134
135union dm_mem_page_range {
136 struct {
137 /*
138 * The PFN number of the first page in the range.
139 * 40 bits is the architectural limit of a PFN
140 * number for AMD64.
141 */
142 __u64 start_page:40;
143 /*
144 * The number of pages in the range.
145 */
146 __u64 page_cnt:24;
147 } finfo;
148 __u64 page_range;
149} __packed;
150
151
152
153/*
154 * The header for all dynamic memory messages:
155 *
156 * type: Type of the message.
157 * size: Size of the message in bytes; including the header.
158 * trans_id: The guest is responsible for manufacturing this ID.
159 */
160
161struct dm_header {
162 __u16 type;
163 __u16 size;
164 __u32 trans_id;
165} __packed;
166
167/*
168 * A generic message format for dynamic memory.
169 * Specific message formats are defined later in the file.
170 */
171
172struct dm_message {
173 struct dm_header hdr;
174 __u8 data[]; /* enclosed message */
175} __packed;
176
177
178/*
179 * Specific message types supporting the dynamic memory protocol.
180 */
181
182/*
183 * Version negotiation message. Sent from the guest to the host.
184 * The guest is free to try different versions until the host
185 * accepts the version.
186 *
187 * dm_version: The protocol version requested.
188 * is_last_attempt: If TRUE, this is the last version guest will request.
189 * reservedz: Reserved field, set to zero.
190 */
191
192struct dm_version_request {
193 struct dm_header hdr;
194 union dm_version version;
195 __u32 is_last_attempt:1;
196 __u32 reservedz:31;
197} __packed;
198
199/*
200 * Version response message; Host to Guest and indicates
201 * if the host has accepted the version sent by the guest.
202 *
203 * is_accepted: If TRUE, host has accepted the version and the guest
204 * should proceed to the next stage of the protocol. FALSE indicates that
205 * guest should re-try with a different version.
206 *
207 * reservedz: Reserved field, set to zero.
208 */
209
210struct dm_version_response {
211 struct dm_header hdr;
212 __u64 is_accepted:1;
213 __u64 reservedz:63;
214} __packed;
215
216/*
217 * Message reporting capabilities. This is sent from the guest to the
218 * host.
219 */
220
221struct dm_capabilities {
222 struct dm_header hdr;
223 union dm_caps caps;
224 __u64 min_page_cnt;
225 __u64 max_page_number;
226} __packed;
227
228/*
229 * Response to the capabilities message. This is sent from the host to the
230 * guest. This message notifies if the host has accepted the guest's
231 * capabilities. If the host has not accepted, the guest must shutdown
232 * the service.
233 *
234 * is_accepted: Indicates if the host has accepted guest's capabilities.
235 * reservedz: Must be 0.
236 */
237
238struct dm_capabilities_resp_msg {
239 struct dm_header hdr;
240 __u64 is_accepted:1;
241 __u64 reservedz:63;
242} __packed;
243
244/*
245 * This message is used to report memory pressure from the guest.
246 * This message is not part of any transaction and there is no
247 * response to this message.
248 *
249 * num_avail: Available memory in pages.
250 * num_committed: Committed memory in pages.
251 * page_file_size: The accumulated size of all page files
252 * in the system in pages.
253 * zero_free: The nunber of zero and free pages.
254 * page_file_writes: The writes to the page file in pages.
255 * io_diff: An indicator of file cache efficiency or page file activity,
256 * calculated as File Cache Page Fault Count - Page Read Count.
257 * This value is in pages.
258 *
259 * Some of these metrics are Windows specific and fortunately
260 * the algorithm on the host side that computes the guest memory
261 * pressure only uses num_committed value.
262 */
263
264struct dm_status {
265 struct dm_header hdr;
266 __u64 num_avail;
267 __u64 num_committed;
268 __u64 page_file_size;
269 __u64 zero_free;
270 __u32 page_file_writes;
271 __u32 io_diff;
272} __packed;
273
274
275/*
276 * Message to ask the guest to allocate memory - balloon up message.
277 * This message is sent from the host to the guest. The guest may not be
278 * able to allocate as much memory as requested.
279 *
280 * num_pages: number of pages to allocate.
281 */
282
283struct dm_balloon {
284 struct dm_header hdr;
285 __u32 num_pages;
286 __u32 reservedz;
287} __packed;
288
289
290/*
291 * Balloon response message; this message is sent from the guest
292 * to the host in response to the balloon message.
293 *
294 * reservedz: Reserved; must be set to zero.
295 * more_pages: If FALSE, this is the last message of the transaction.
296 * if TRUE there will atleast one more message from the guest.
297 *
298 * range_count: The number of ranges in the range array.
299 *
300 * range_array: An array of page ranges returned to the host.
301 *
302 */
303
304struct dm_balloon_response {
305 struct dm_header hdr;
306 __u32 reservedz;
307 __u32 more_pages:1;
308 __u32 range_count:31;
309 union dm_mem_page_range range_array[];
310} __packed;
311
312/*
313 * Un-balloon message; this message is sent from the host
314 * to the guest to give guest more memory.
315 *
316 * more_pages: If FALSE, this is the last message of the transaction.
317 * if TRUE there will atleast one more message from the guest.
318 *
319 * reservedz: Reserved; must be set to zero.
320 *
321 * range_count: The number of ranges in the range array.
322 *
323 * range_array: An array of page ranges returned to the host.
324 *
325 */
326
327struct dm_unballoon_request {
328 struct dm_header hdr;
329 __u32 more_pages:1;
330 __u32 reservedz:31;
331 __u32 range_count;
332 union dm_mem_page_range range_array[];
333} __packed;
334
335/*
336 * Un-balloon response message; this message is sent from the guest
337 * to the host in response to an unballoon request.
338 *
339 */
340
341struct dm_unballoon_response {
342 struct dm_header hdr;
343} __packed;
344
345
346/*
347 * Hot add request message. Message sent from the host to the guest.
348 *
349 * mem_range: Memory range to hot add.
350 *
351 * On Linux we currently don't support this since we cannot hot add
352 * arbitrary granularity of memory.
353 */
354
355struct dm_hot_add {
356 struct dm_header hdr;
357 union dm_mem_page_range range;
358} __packed;
359
360/*
361 * Hot add response message.
362 * This message is sent by the guest to report the status of a hot add request.
363 * If page_count is less than the requested page count, then the host should
364 * assume all further hot add requests will fail, since this indicates that
365 * the guest has hit an upper physical memory barrier.
366 *
367 * Hot adds may also fail due to low resources; in this case, the guest must
368 * not complete this message until the hot add can succeed, and the host must
369 * not send a new hot add request until the response is sent.
370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
371 * times it fails the request.
372 *
373 *
374 * page_count: number of pages that were successfully hot added.
375 *
376 * result: result of the operation 1: success, 0: failure.
377 *
378 */
379
380struct dm_hot_add_response {
381 struct dm_header hdr;
382 __u32 page_count;
383 __u32 result;
384} __packed;
385
386/*
387 * Types of information sent from host to the guest.
388 */
389
390enum dm_info_type {
391 INFO_TYPE_MAX_PAGE_CNT = 0,
392 MAX_INFO_TYPE
393};
394
395
396/*
397 * Header for the information message.
398 */
399
400struct dm_info_header {
401 enum dm_info_type type;
402 __u32 data_size;
403} __packed;
404
405/*
406 * This message is sent from the host to the guest to pass
407 * some relevant information (win8 addition).
408 *
409 * reserved: no used.
410 * info_size: size of the information blob.
411 * info: information blob.
412 */
413
414struct dm_info_msg {
415 struct dm_header hdr;
416 __u32 reserved;
417 __u32 info_size;
418 __u8 info[];
419};
420
421/*
422 * End protocol definitions.
423 */
424
425/*
426 * State to manage hot adding memory into the guest.
427 * The range start_pfn : end_pfn specifies the range
428 * that the host has asked us to hot add. The range
429 * start_pfn : ha_end_pfn specifies the range that we have
430 * currently hot added. We hot add in multiples of 128M
431 * chunks; it is possible that we may not be able to bring
432 * online all the pages in the region. The range
433 * covered_end_pfn defines the pages that can
434 * be brough online.
435 */
436
437struct hv_hotadd_state {
438 struct list_head list;
439 unsigned long start_pfn;
440 unsigned long covered_end_pfn;
441 unsigned long ha_end_pfn;
442 unsigned long end_pfn;
443};
444
445struct balloon_state {
446 __u32 num_pages;
447 struct work_struct wrk;
448};
449
450struct hot_add_wrk {
451 union dm_mem_page_range ha_page_range;
452 union dm_mem_page_range ha_region_range;
453 struct work_struct wrk;
454};
455
456static bool hot_add = true;
457static bool do_hot_add;
458/*
459 * Delay reporting memory pressure by
460 * the specified number of seconds.
461 */
462static uint pressure_report_delay = 45;
463
464/*
465 * The last time we posted a pressure report to host.
466 */
467static unsigned long last_post_time;
468
469module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
470MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
471
472module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
473MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
474static atomic_t trans_id = ATOMIC_INIT(0);
475
476static int dm_ring_size = (5 * PAGE_SIZE);
477
478/*
479 * Driver specific state.
480 */
481
482enum hv_dm_state {
483 DM_INITIALIZING = 0,
484 DM_INITIALIZED,
485 DM_BALLOON_UP,
486 DM_BALLOON_DOWN,
487 DM_HOT_ADD,
488 DM_INIT_ERROR
489};
490
491
492static __u8 recv_buffer[PAGE_SIZE];
493static __u8 *send_buffer;
494#define PAGES_IN_2M 512
495#define HA_CHUNK (32 * 1024)
496
497struct hv_dynmem_device {
498 struct hv_device *dev;
499 enum hv_dm_state state;
500 struct completion host_event;
501 struct completion config_event;
502
503 /*
504 * Number of pages we have currently ballooned out.
505 */
506 unsigned int num_pages_ballooned;
507 unsigned int num_pages_onlined;
508 unsigned int num_pages_added;
509
510 /*
511 * State to manage the ballooning (up) operation.
512 */
513 struct balloon_state balloon_wrk;
514
515 /*
516 * State to execute the "hot-add" operation.
517 */
518 struct hot_add_wrk ha_wrk;
519
520 /*
521 * This state tracks if the host has specified a hot-add
522 * region.
523 */
524 bool host_specified_ha_region;
525
526 /*
527 * State to synchronize hot-add.
528 */
529 struct completion ol_waitevent;
530 bool ha_waiting;
531 /*
532 * This thread handles hot-add
533 * requests from the host as well as notifying
534 * the host with regards to memory pressure in
535 * the guest.
536 */
537 struct task_struct *thread;
538
539 struct mutex ha_region_mutex;
540
541 /*
542 * A list of hot-add regions.
543 */
544 struct list_head ha_region_list;
545
546 /*
547 * We start with the highest version we can support
548 * and downgrade based on the host; we save here the
549 * next version to try.
550 */
551 __u32 next_version;
552};
553
554static struct hv_dynmem_device dm_device;
555
556static void post_status(struct hv_dynmem_device *dm);
557
558#ifdef CONFIG_MEMORY_HOTPLUG
559static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
560 void *v)
561{
562 struct memory_notify *mem = (struct memory_notify *)v;
563
564 switch (val) {
565 case MEM_GOING_ONLINE:
566 mutex_lock(&dm_device.ha_region_mutex);
567 break;
568
569 case MEM_ONLINE:
570 dm_device.num_pages_onlined += mem->nr_pages;
571 case MEM_CANCEL_ONLINE:
572 if (val == MEM_ONLINE ||
573 mutex_is_locked(&dm_device.ha_region_mutex))
574 mutex_unlock(&dm_device.ha_region_mutex);
575 if (dm_device.ha_waiting) {
576 dm_device.ha_waiting = false;
577 complete(&dm_device.ol_waitevent);
578 }
579 break;
580
581 case MEM_OFFLINE:
582 mutex_lock(&dm_device.ha_region_mutex);
583 dm_device.num_pages_onlined -= mem->nr_pages;
584 mutex_unlock(&dm_device.ha_region_mutex);
585 break;
586 case MEM_GOING_OFFLINE:
587 case MEM_CANCEL_OFFLINE:
588 break;
589 }
590 return NOTIFY_OK;
591}
592
593static struct notifier_block hv_memory_nb = {
594 .notifier_call = hv_memory_notifier,
595 .priority = 0
596};
597
598
599static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
600{
601 int i;
602
603 for (i = 0; i < size; i++) {
604 struct page *pg;
605 pg = pfn_to_page(start_pfn + i);
606 __online_page_set_limits(pg);
607 __online_page_increment_counters(pg);
608 __online_page_free(pg);
609 }
610}
611
612static void hv_mem_hot_add(unsigned long start, unsigned long size,
613 unsigned long pfn_count,
614 struct hv_hotadd_state *has)
615{
616 int ret = 0;
617 int i, nid;
618 unsigned long start_pfn;
619 unsigned long processed_pfn;
620 unsigned long total_pfn = pfn_count;
621
622 for (i = 0; i < (size/HA_CHUNK); i++) {
623 start_pfn = start + (i * HA_CHUNK);
624 has->ha_end_pfn += HA_CHUNK;
625
626 if (total_pfn > HA_CHUNK) {
627 processed_pfn = HA_CHUNK;
628 total_pfn -= HA_CHUNK;
629 } else {
630 processed_pfn = total_pfn;
631 total_pfn = 0;
632 }
633
634 has->covered_end_pfn += processed_pfn;
635
636 init_completion(&dm_device.ol_waitevent);
637 dm_device.ha_waiting = true;
638
639 mutex_unlock(&dm_device.ha_region_mutex);
640 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
641 ret = add_memory(nid, PFN_PHYS((start_pfn)),
642 (HA_CHUNK << PAGE_SHIFT));
643
644 if (ret) {
645 pr_info("hot_add memory failed error is %d\n", ret);
646 if (ret == -EEXIST) {
647 /*
648 * This error indicates that the error
649 * is not a transient failure. This is the
650 * case where the guest's physical address map
651 * precludes hot adding memory. Stop all further
652 * memory hot-add.
653 */
654 do_hot_add = false;
655 }
656 has->ha_end_pfn -= HA_CHUNK;
657 has->covered_end_pfn -= processed_pfn;
658 mutex_lock(&dm_device.ha_region_mutex);
659 break;
660 }
661
662 /*
663 * Wait for the memory block to be onlined.
664 * Since the hot add has succeeded, it is ok to
665 * proceed even if the pages in the hot added region
666 * have not been "onlined" within the allowed time.
667 */
668 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
669 mutex_lock(&dm_device.ha_region_mutex);
670 post_status(&dm_device);
671 }
672
673 return;
674}
675
676static void hv_online_page(struct page *pg)
677{
678 struct list_head *cur;
679 struct hv_hotadd_state *has;
680 unsigned long cur_start_pgp;
681 unsigned long cur_end_pgp;
682
683 list_for_each(cur, &dm_device.ha_region_list) {
684 has = list_entry(cur, struct hv_hotadd_state, list);
685 cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
686 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
687
688 if (((unsigned long)pg >= cur_start_pgp) &&
689 ((unsigned long)pg < cur_end_pgp)) {
690 /*
691 * This frame is currently backed; online the
692 * page.
693 */
694 __online_page_set_limits(pg);
695 __online_page_increment_counters(pg);
696 __online_page_free(pg);
697 }
698 }
699}
700
701static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
702{
703 struct list_head *cur;
704 struct hv_hotadd_state *has;
705 unsigned long residual, new_inc;
706
707 if (list_empty(&dm_device.ha_region_list))
708 return false;
709
710 list_for_each(cur, &dm_device.ha_region_list) {
711 has = list_entry(cur, struct hv_hotadd_state, list);
712
713 /*
714 * If the pfn range we are dealing with is not in the current
715 * "hot add block", move on.
716 */
717 if ((start_pfn >= has->end_pfn))
718 continue;
719 /*
720 * If the current hot add-request extends beyond
721 * our current limit; extend it.
722 */
723 if ((start_pfn + pfn_cnt) > has->end_pfn) {
724 residual = (start_pfn + pfn_cnt - has->end_pfn);
725 /*
726 * Extend the region by multiples of HA_CHUNK.
727 */
728 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
729 if (residual % HA_CHUNK)
730 new_inc += HA_CHUNK;
731
732 has->end_pfn += new_inc;
733 }
734
735 /*
736 * If the current start pfn is not where the covered_end
737 * is, update it.
738 */
739
740 if (has->covered_end_pfn != start_pfn)
741 has->covered_end_pfn = start_pfn;
742
743 return true;
744
745 }
746
747 return false;
748}
749
750static unsigned long handle_pg_range(unsigned long pg_start,
751 unsigned long pg_count)
752{
753 unsigned long start_pfn = pg_start;
754 unsigned long pfn_cnt = pg_count;
755 unsigned long size;
756 struct list_head *cur;
757 struct hv_hotadd_state *has;
758 unsigned long pgs_ol = 0;
759 unsigned long old_covered_state;
760
761 if (list_empty(&dm_device.ha_region_list))
762 return 0;
763
764 list_for_each(cur, &dm_device.ha_region_list) {
765 has = list_entry(cur, struct hv_hotadd_state, list);
766
767 /*
768 * If the pfn range we are dealing with is not in the current
769 * "hot add block", move on.
770 */
771 if ((start_pfn >= has->end_pfn))
772 continue;
773
774 old_covered_state = has->covered_end_pfn;
775
776 if (start_pfn < has->ha_end_pfn) {
777 /*
778 * This is the case where we are backing pages
779 * in an already hot added region. Bring
780 * these pages online first.
781 */
782 pgs_ol = has->ha_end_pfn - start_pfn;
783 if (pgs_ol > pfn_cnt)
784 pgs_ol = pfn_cnt;
785
786 /*
787 * Check if the corresponding memory block is already
788 * online by checking its last previously backed page.
789 * In case it is we need to bring rest (which was not
790 * backed previously) online too.
791 */
792 if (start_pfn > has->start_pfn &&
793 !PageReserved(pfn_to_page(start_pfn - 1)))
794 hv_bring_pgs_online(start_pfn, pgs_ol);
795
796 has->covered_end_pfn += pgs_ol;
797 pfn_cnt -= pgs_ol;
798 }
799
800 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
801 /*
802 * We have some residual hot add range
803 * that needs to be hot added; hot add
804 * it now. Hot add a multiple of
805 * of HA_CHUNK that fully covers the pages
806 * we have.
807 */
808 size = (has->end_pfn - has->ha_end_pfn);
809 if (pfn_cnt <= size) {
810 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
811 if (pfn_cnt % HA_CHUNK)
812 size += HA_CHUNK;
813 } else {
814 pfn_cnt = size;
815 }
816 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
817 }
818 /*
819 * If we managed to online any pages that were given to us,
820 * we declare success.
821 */
822 return has->covered_end_pfn - old_covered_state;
823
824 }
825
826 return 0;
827}
828
829static unsigned long process_hot_add(unsigned long pg_start,
830 unsigned long pfn_cnt,
831 unsigned long rg_start,
832 unsigned long rg_size)
833{
834 struct hv_hotadd_state *ha_region = NULL;
835
836 if (pfn_cnt == 0)
837 return 0;
838
839 if (!dm_device.host_specified_ha_region)
840 if (pfn_covered(pg_start, pfn_cnt))
841 goto do_pg_range;
842
843 /*
844 * If the host has specified a hot-add range; deal with it first.
845 */
846
847 if (rg_size != 0) {
848 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
849 if (!ha_region)
850 return 0;
851
852 INIT_LIST_HEAD(&ha_region->list);
853
854 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
855 ha_region->start_pfn = rg_start;
856 ha_region->ha_end_pfn = rg_start;
857 ha_region->covered_end_pfn = pg_start;
858 ha_region->end_pfn = rg_start + rg_size;
859 }
860
861do_pg_range:
862 /*
863 * Process the page range specified; bringing them
864 * online if possible.
865 */
866 return handle_pg_range(pg_start, pfn_cnt);
867}
868
869#endif
870
871static void hot_add_req(struct work_struct *dummy)
872{
873 struct dm_hot_add_response resp;
874#ifdef CONFIG_MEMORY_HOTPLUG
875 unsigned long pg_start, pfn_cnt;
876 unsigned long rg_start, rg_sz;
877#endif
878 struct hv_dynmem_device *dm = &dm_device;
879
880 memset(&resp, 0, sizeof(struct dm_hot_add_response));
881 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
882 resp.hdr.size = sizeof(struct dm_hot_add_response);
883
884#ifdef CONFIG_MEMORY_HOTPLUG
885 mutex_lock(&dm_device.ha_region_mutex);
886 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
887 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
888
889 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
890 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
891
892 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
893 unsigned long region_size;
894 unsigned long region_start;
895
896 /*
897 * The host has not specified the hot-add region.
898 * Based on the hot-add page range being specified,
899 * compute a hot-add region that can cover the pages
900 * that need to be hot-added while ensuring the alignment
901 * and size requirements of Linux as it relates to hot-add.
902 */
903 region_start = pg_start;
904 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
905 if (pfn_cnt % HA_CHUNK)
906 region_size += HA_CHUNK;
907
908 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
909
910 rg_start = region_start;
911 rg_sz = region_size;
912 }
913
914 if (do_hot_add)
915 resp.page_count = process_hot_add(pg_start, pfn_cnt,
916 rg_start, rg_sz);
917
918 dm->num_pages_added += resp.page_count;
919 mutex_unlock(&dm_device.ha_region_mutex);
920#endif
921 /*
922 * The result field of the response structure has the
923 * following semantics:
924 *
925 * 1. If all or some pages hot-added: Guest should return success.
926 *
927 * 2. If no pages could be hot-added:
928 *
929 * If the guest returns success, then the host
930 * will not attempt any further hot-add operations. This
931 * signifies a permanent failure.
932 *
933 * If the guest returns failure, then this failure will be
934 * treated as a transient failure and the host may retry the
935 * hot-add operation after some delay.
936 */
937 if (resp.page_count > 0)
938 resp.result = 1;
939 else if (!do_hot_add)
940 resp.result = 1;
941 else
942 resp.result = 0;
943
944 if (!do_hot_add || (resp.page_count == 0))
945 pr_info("Memory hot add failed\n");
946
947 dm->state = DM_INITIALIZED;
948 resp.hdr.trans_id = atomic_inc_return(&trans_id);
949 vmbus_sendpacket(dm->dev->channel, &resp,
950 sizeof(struct dm_hot_add_response),
951 (unsigned long)NULL,
952 VM_PKT_DATA_INBAND, 0);
953}
954
955static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
956{
957 struct dm_info_header *info_hdr;
958
959 info_hdr = (struct dm_info_header *)msg->info;
960
961 switch (info_hdr->type) {
962 case INFO_TYPE_MAX_PAGE_CNT:
963 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
964 pr_info("Data Size is %d\n", info_hdr->data_size);
965 break;
966 default:
967 pr_info("Received Unknown type: %d\n", info_hdr->type);
968 }
969}
970
971static unsigned long compute_balloon_floor(void)
972{
973 unsigned long min_pages;
974#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
975 /* Simple continuous piecewiese linear function:
976 * max MiB -> min MiB gradient
977 * 0 0
978 * 16 16
979 * 32 24
980 * 128 72 (1/2)
981 * 512 168 (1/4)
982 * 2048 360 (1/8)
983 * 8192 744 (1/16)
984 * 32768 1512 (1/32)
985 */
986 if (totalram_pages < MB2PAGES(128))
987 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
988 else if (totalram_pages < MB2PAGES(512))
989 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
990 else if (totalram_pages < MB2PAGES(2048))
991 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
992 else if (totalram_pages < MB2PAGES(8192))
993 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
994 else
995 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
996#undef MB2PAGES
997 return min_pages;
998}
999
1000/*
1001 * Post our status as it relates memory pressure to the
1002 * host. Host expects the guests to post this status
1003 * periodically at 1 second intervals.
1004 *
1005 * The metrics specified in this protocol are very Windows
1006 * specific and so we cook up numbers here to convey our memory
1007 * pressure.
1008 */
1009
1010static void post_status(struct hv_dynmem_device *dm)
1011{
1012 struct dm_status status;
1013 struct sysinfo val;
1014 unsigned long now = jiffies;
1015 unsigned long last_post = last_post_time;
1016
1017 if (pressure_report_delay > 0) {
1018 --pressure_report_delay;
1019 return;
1020 }
1021
1022 if (!time_after(now, (last_post_time + HZ)))
1023 return;
1024
1025 si_meminfo(&val);
1026 memset(&status, 0, sizeof(struct dm_status));
1027 status.hdr.type = DM_STATUS_REPORT;
1028 status.hdr.size = sizeof(struct dm_status);
1029 status.hdr.trans_id = atomic_inc_return(&trans_id);
1030
1031 /*
1032 * The host expects the guest to report free and committed memory.
1033 * Furthermore, the host expects the pressure information to include
1034 * the ballooned out pages. For a given amount of memory that we are
1035 * managing we need to compute a floor below which we should not
1036 * balloon. Compute this and add it to the pressure report.
1037 * We also need to report all offline pages (num_pages_added -
1038 * num_pages_onlined) as committed to the host, otherwise it can try
1039 * asking us to balloon them out.
1040 */
1041 status.num_avail = val.freeram;
1042 status.num_committed = vm_memory_committed() +
1043 dm->num_pages_ballooned +
1044 (dm->num_pages_added > dm->num_pages_onlined ?
1045 dm->num_pages_added - dm->num_pages_onlined : 0) +
1046 compute_balloon_floor();
1047
1048 /*
1049 * If our transaction ID is no longer current, just don't
1050 * send the status. This can happen if we were interrupted
1051 * after we picked our transaction ID.
1052 */
1053 if (status.hdr.trans_id != atomic_read(&trans_id))
1054 return;
1055
1056 /*
1057 * If the last post time that we sampled has changed,
1058 * we have raced, don't post the status.
1059 */
1060 if (last_post != last_post_time)
1061 return;
1062
1063 last_post_time = jiffies;
1064 vmbus_sendpacket(dm->dev->channel, &status,
1065 sizeof(struct dm_status),
1066 (unsigned long)NULL,
1067 VM_PKT_DATA_INBAND, 0);
1068
1069}
1070
1071static void free_balloon_pages(struct hv_dynmem_device *dm,
1072 union dm_mem_page_range *range_array)
1073{
1074 int num_pages = range_array->finfo.page_cnt;
1075 __u64 start_frame = range_array->finfo.start_page;
1076 struct page *pg;
1077 int i;
1078
1079 for (i = 0; i < num_pages; i++) {
1080 pg = pfn_to_page(i + start_frame);
1081 __free_page(pg);
1082 dm->num_pages_ballooned--;
1083 }
1084}
1085
1086
1087
1088static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1089 unsigned int num_pages,
1090 struct dm_balloon_response *bl_resp,
1091 int alloc_unit)
1092{
1093 unsigned int i = 0;
1094 struct page *pg;
1095
1096 if (num_pages < alloc_unit)
1097 return 0;
1098
1099 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1100 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1101 PAGE_SIZE)
1102 return i * alloc_unit;
1103
1104 /*
1105 * We execute this code in a thread context. Furthermore,
1106 * we don't want the kernel to try too hard.
1107 */
1108 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1109 __GFP_NOMEMALLOC | __GFP_NOWARN,
1110 get_order(alloc_unit << PAGE_SHIFT));
1111
1112 if (!pg)
1113 return i * alloc_unit;
1114
1115 dm->num_pages_ballooned += alloc_unit;
1116
1117 /*
1118 * If we allocatted 2M pages; split them so we
1119 * can free them in any order we get.
1120 */
1121
1122 if (alloc_unit != 1)
1123 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1124
1125 bl_resp->range_count++;
1126 bl_resp->range_array[i].finfo.start_page =
1127 page_to_pfn(pg);
1128 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1129 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1130
1131 }
1132
1133 return num_pages;
1134}
1135
1136
1137
1138static void balloon_up(struct work_struct *dummy)
1139{
1140 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1141 unsigned int num_ballooned = 0;
1142 struct dm_balloon_response *bl_resp;
1143 int alloc_unit;
1144 int ret;
1145 bool done = false;
1146 int i;
1147 struct sysinfo val;
1148 unsigned long floor;
1149
1150 /* The host balloons pages in 2M granularity. */
1151 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1152
1153 /*
1154 * We will attempt 2M allocations. However, if we fail to
1155 * allocate 2M chunks, we will go back to 4k allocations.
1156 */
1157 alloc_unit = 512;
1158
1159 si_meminfo(&val);
1160 floor = compute_balloon_floor();
1161
1162 /* Refuse to balloon below the floor, keep the 2M granularity. */
1163 if (val.freeram < num_pages || val.freeram - num_pages < floor) {
1164 num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
1165 num_pages -= num_pages % PAGES_IN_2M;
1166 }
1167
1168 while (!done) {
1169 bl_resp = (struct dm_balloon_response *)send_buffer;
1170 memset(send_buffer, 0, PAGE_SIZE);
1171 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1172 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1173 bl_resp->more_pages = 1;
1174
1175
1176 num_pages -= num_ballooned;
1177 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1178 bl_resp, alloc_unit);
1179
1180 if (alloc_unit != 1 && num_ballooned == 0) {
1181 alloc_unit = 1;
1182 continue;
1183 }
1184
1185 if (num_ballooned == 0 || num_ballooned == num_pages) {
1186 bl_resp->more_pages = 0;
1187 done = true;
1188 dm_device.state = DM_INITIALIZED;
1189 }
1190
1191 /*
1192 * We are pushing a lot of data through the channel;
1193 * deal with transient failures caused because of the
1194 * lack of space in the ring buffer.
1195 */
1196
1197 do {
1198 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1199 ret = vmbus_sendpacket(dm_device.dev->channel,
1200 bl_resp,
1201 bl_resp->hdr.size,
1202 (unsigned long)NULL,
1203 VM_PKT_DATA_INBAND, 0);
1204
1205 if (ret == -EAGAIN)
1206 msleep(20);
1207 post_status(&dm_device);
1208 } while (ret == -EAGAIN);
1209
1210 if (ret) {
1211 /*
1212 * Free up the memory we allocatted.
1213 */
1214 pr_info("Balloon response failed\n");
1215
1216 for (i = 0; i < bl_resp->range_count; i++)
1217 free_balloon_pages(&dm_device,
1218 &bl_resp->range_array[i]);
1219
1220 done = true;
1221 }
1222 }
1223
1224}
1225
1226static void balloon_down(struct hv_dynmem_device *dm,
1227 struct dm_unballoon_request *req)
1228{
1229 union dm_mem_page_range *range_array = req->range_array;
1230 int range_count = req->range_count;
1231 struct dm_unballoon_response resp;
1232 int i;
1233
1234 for (i = 0; i < range_count; i++) {
1235 free_balloon_pages(dm, &range_array[i]);
1236 complete(&dm_device.config_event);
1237 }
1238
1239 if (req->more_pages == 1)
1240 return;
1241
1242 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1243 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1244 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1245 resp.hdr.size = sizeof(struct dm_unballoon_response);
1246
1247 vmbus_sendpacket(dm_device.dev->channel, &resp,
1248 sizeof(struct dm_unballoon_response),
1249 (unsigned long)NULL,
1250 VM_PKT_DATA_INBAND, 0);
1251
1252 dm->state = DM_INITIALIZED;
1253}
1254
1255static void balloon_onchannelcallback(void *context);
1256
1257static int dm_thread_func(void *dm_dev)
1258{
1259 struct hv_dynmem_device *dm = dm_dev;
1260
1261 while (!kthread_should_stop()) {
1262 wait_for_completion_interruptible_timeout(
1263 &dm_device.config_event, 1*HZ);
1264 /*
1265 * The host expects us to post information on the memory
1266 * pressure every second.
1267 */
1268 reinit_completion(&dm_device.config_event);
1269 post_status(dm);
1270 }
1271
1272 return 0;
1273}
1274
1275
1276static void version_resp(struct hv_dynmem_device *dm,
1277 struct dm_version_response *vresp)
1278{
1279 struct dm_version_request version_req;
1280 int ret;
1281
1282 if (vresp->is_accepted) {
1283 /*
1284 * We are done; wakeup the
1285 * context waiting for version
1286 * negotiation.
1287 */
1288 complete(&dm->host_event);
1289 return;
1290 }
1291 /*
1292 * If there are more versions to try, continue
1293 * with negotiations; if not
1294 * shutdown the service since we are not able
1295 * to negotiate a suitable version number
1296 * with the host.
1297 */
1298 if (dm->next_version == 0)
1299 goto version_error;
1300
1301 memset(&version_req, 0, sizeof(struct dm_version_request));
1302 version_req.hdr.type = DM_VERSION_REQUEST;
1303 version_req.hdr.size = sizeof(struct dm_version_request);
1304 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1305 version_req.version.version = dm->next_version;
1306
1307 /*
1308 * Set the next version to try in case current version fails.
1309 * Win7 protocol ought to be the last one to try.
1310 */
1311 switch (version_req.version.version) {
1312 case DYNMEM_PROTOCOL_VERSION_WIN8:
1313 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1314 version_req.is_last_attempt = 0;
1315 break;
1316 default:
1317 dm->next_version = 0;
1318 version_req.is_last_attempt = 1;
1319 }
1320
1321 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1322 sizeof(struct dm_version_request),
1323 (unsigned long)NULL,
1324 VM_PKT_DATA_INBAND, 0);
1325
1326 if (ret)
1327 goto version_error;
1328
1329 return;
1330
1331version_error:
1332 dm->state = DM_INIT_ERROR;
1333 complete(&dm->host_event);
1334}
1335
1336static void cap_resp(struct hv_dynmem_device *dm,
1337 struct dm_capabilities_resp_msg *cap_resp)
1338{
1339 if (!cap_resp->is_accepted) {
1340 pr_info("Capabilities not accepted by host\n");
1341 dm->state = DM_INIT_ERROR;
1342 }
1343 complete(&dm->host_event);
1344}
1345
1346static void balloon_onchannelcallback(void *context)
1347{
1348 struct hv_device *dev = context;
1349 u32 recvlen;
1350 u64 requestid;
1351 struct dm_message *dm_msg;
1352 struct dm_header *dm_hdr;
1353 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1354 struct dm_balloon *bal_msg;
1355 struct dm_hot_add *ha_msg;
1356 union dm_mem_page_range *ha_pg_range;
1357 union dm_mem_page_range *ha_region;
1358
1359 memset(recv_buffer, 0, sizeof(recv_buffer));
1360 vmbus_recvpacket(dev->channel, recv_buffer,
1361 PAGE_SIZE, &recvlen, &requestid);
1362
1363 if (recvlen > 0) {
1364 dm_msg = (struct dm_message *)recv_buffer;
1365 dm_hdr = &dm_msg->hdr;
1366
1367 switch (dm_hdr->type) {
1368 case DM_VERSION_RESPONSE:
1369 version_resp(dm,
1370 (struct dm_version_response *)dm_msg);
1371 break;
1372
1373 case DM_CAPABILITIES_RESPONSE:
1374 cap_resp(dm,
1375 (struct dm_capabilities_resp_msg *)dm_msg);
1376 break;
1377
1378 case DM_BALLOON_REQUEST:
1379 if (dm->state == DM_BALLOON_UP)
1380 pr_warn("Currently ballooning\n");
1381 bal_msg = (struct dm_balloon *)recv_buffer;
1382 dm->state = DM_BALLOON_UP;
1383 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1384 schedule_work(&dm_device.balloon_wrk.wrk);
1385 break;
1386
1387 case DM_UNBALLOON_REQUEST:
1388 dm->state = DM_BALLOON_DOWN;
1389 balloon_down(dm,
1390 (struct dm_unballoon_request *)recv_buffer);
1391 break;
1392
1393 case DM_MEM_HOT_ADD_REQUEST:
1394 if (dm->state == DM_HOT_ADD)
1395 pr_warn("Currently hot-adding\n");
1396 dm->state = DM_HOT_ADD;
1397 ha_msg = (struct dm_hot_add *)recv_buffer;
1398 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1399 /*
1400 * This is a normal hot-add request specifying
1401 * hot-add memory.
1402 */
1403 ha_pg_range = &ha_msg->range;
1404 dm->ha_wrk.ha_page_range = *ha_pg_range;
1405 dm->ha_wrk.ha_region_range.page_range = 0;
1406 } else {
1407 /*
1408 * Host is specifying that we first hot-add
1409 * a region and then partially populate this
1410 * region.
1411 */
1412 dm->host_specified_ha_region = true;
1413 ha_pg_range = &ha_msg->range;
1414 ha_region = &ha_pg_range[1];
1415 dm->ha_wrk.ha_page_range = *ha_pg_range;
1416 dm->ha_wrk.ha_region_range = *ha_region;
1417 }
1418 schedule_work(&dm_device.ha_wrk.wrk);
1419 break;
1420
1421 case DM_INFO_MESSAGE:
1422 process_info(dm, (struct dm_info_msg *)dm_msg);
1423 break;
1424
1425 default:
1426 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1427
1428 }
1429 }
1430
1431}
1432
1433static int balloon_probe(struct hv_device *dev,
1434 const struct hv_vmbus_device_id *dev_id)
1435{
1436 int ret;
1437 unsigned long t;
1438 struct dm_version_request version_req;
1439 struct dm_capabilities cap_msg;
1440
1441 do_hot_add = hot_add;
1442
1443 /*
1444 * First allocate a send buffer.
1445 */
1446
1447 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1448 if (!send_buffer)
1449 return -ENOMEM;
1450
1451 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1452 balloon_onchannelcallback, dev);
1453
1454 if (ret)
1455 goto probe_error0;
1456
1457 dm_device.dev = dev;
1458 dm_device.state = DM_INITIALIZING;
1459 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1460 init_completion(&dm_device.host_event);
1461 init_completion(&dm_device.config_event);
1462 INIT_LIST_HEAD(&dm_device.ha_region_list);
1463 mutex_init(&dm_device.ha_region_mutex);
1464 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1465 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1466 dm_device.host_specified_ha_region = false;
1467
1468 dm_device.thread =
1469 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1470 if (IS_ERR(dm_device.thread)) {
1471 ret = PTR_ERR(dm_device.thread);
1472 goto probe_error1;
1473 }
1474
1475#ifdef CONFIG_MEMORY_HOTPLUG
1476 set_online_page_callback(&hv_online_page);
1477 register_memory_notifier(&hv_memory_nb);
1478#endif
1479
1480 hv_set_drvdata(dev, &dm_device);
1481 /*
1482 * Initiate the hand shake with the host and negotiate
1483 * a version that the host can support. We start with the
1484 * highest version number and go down if the host cannot
1485 * support it.
1486 */
1487 memset(&version_req, 0, sizeof(struct dm_version_request));
1488 version_req.hdr.type = DM_VERSION_REQUEST;
1489 version_req.hdr.size = sizeof(struct dm_version_request);
1490 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1491 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1492 version_req.is_last_attempt = 0;
1493
1494 ret = vmbus_sendpacket(dev->channel, &version_req,
1495 sizeof(struct dm_version_request),
1496 (unsigned long)NULL,
1497 VM_PKT_DATA_INBAND, 0);
1498 if (ret)
1499 goto probe_error2;
1500
1501 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1502 if (t == 0) {
1503 ret = -ETIMEDOUT;
1504 goto probe_error2;
1505 }
1506
1507 /*
1508 * If we could not negotiate a compatible version with the host
1509 * fail the probe function.
1510 */
1511 if (dm_device.state == DM_INIT_ERROR) {
1512 ret = -ETIMEDOUT;
1513 goto probe_error2;
1514 }
1515 /*
1516 * Now submit our capabilities to the host.
1517 */
1518 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1519 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1520 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1521 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1522
1523 cap_msg.caps.cap_bits.balloon = 1;
1524 cap_msg.caps.cap_bits.hot_add = 1;
1525
1526 /*
1527 * Specify our alignment requirements as it relates
1528 * memory hot-add. Specify 128MB alignment.
1529 */
1530 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1531
1532 /*
1533 * Currently the host does not use these
1534 * values and we set them to what is done in the
1535 * Windows driver.
1536 */
1537 cap_msg.min_page_cnt = 0;
1538 cap_msg.max_page_number = -1;
1539
1540 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1541 sizeof(struct dm_capabilities),
1542 (unsigned long)NULL,
1543 VM_PKT_DATA_INBAND, 0);
1544 if (ret)
1545 goto probe_error2;
1546
1547 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1548 if (t == 0) {
1549 ret = -ETIMEDOUT;
1550 goto probe_error2;
1551 }
1552
1553 /*
1554 * If the host does not like our capabilities,
1555 * fail the probe function.
1556 */
1557 if (dm_device.state == DM_INIT_ERROR) {
1558 ret = -ETIMEDOUT;
1559 goto probe_error2;
1560 }
1561
1562 dm_device.state = DM_INITIALIZED;
1563
1564 return 0;
1565
1566probe_error2:
1567#ifdef CONFIG_MEMORY_HOTPLUG
1568 restore_online_page_callback(&hv_online_page);
1569#endif
1570 kthread_stop(dm_device.thread);
1571
1572probe_error1:
1573 vmbus_close(dev->channel);
1574probe_error0:
1575 kfree(send_buffer);
1576 return ret;
1577}
1578
1579static int balloon_remove(struct hv_device *dev)
1580{
1581 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1582 struct list_head *cur, *tmp;
1583 struct hv_hotadd_state *has;
1584
1585 if (dm->num_pages_ballooned != 0)
1586 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1587
1588 cancel_work_sync(&dm->balloon_wrk.wrk);
1589 cancel_work_sync(&dm->ha_wrk.wrk);
1590
1591 vmbus_close(dev->channel);
1592 kthread_stop(dm->thread);
1593 kfree(send_buffer);
1594#ifdef CONFIG_MEMORY_HOTPLUG
1595 restore_online_page_callback(&hv_online_page);
1596 unregister_memory_notifier(&hv_memory_nb);
1597#endif
1598 list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1599 has = list_entry(cur, struct hv_hotadd_state, list);
1600 list_del(&has->list);
1601 kfree(has);
1602 }
1603
1604 return 0;
1605}
1606
1607static const struct hv_vmbus_device_id id_table[] = {
1608 /* Dynamic Memory Class ID */
1609 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1610 { HV_DM_GUID, },
1611 { },
1612};
1613
1614MODULE_DEVICE_TABLE(vmbus, id_table);
1615
1616static struct hv_driver balloon_drv = {
1617 .name = "hv_balloon",
1618 .id_table = id_table,
1619 .probe = balloon_probe,
1620 .remove = balloon_remove,
1621};
1622
1623static int __init init_balloon_drv(void)
1624{
1625
1626 return vmbus_driver_register(&balloon_drv);
1627}
1628
1629module_init(init_balloon_drv);
1630
1631MODULE_DESCRIPTION("Hyper-V Balloon");
1632MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012, Microsoft Corporation.
4 *
5 * Author:
6 * K. Y. Srinivasan <kys@microsoft.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/jiffies.h>
13#include <linux/mman.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/kthread.h>
19#include <linux/completion.h>
20#include <linux/memory_hotplug.h>
21#include <linux/memory.h>
22#include <linux/notifier.h>
23#include <linux/percpu_counter.h>
24#include <linux/page_reporting.h>
25
26#include <linux/hyperv.h>
27#include <asm/hyperv-tlfs.h>
28
29#include <asm/mshyperv.h>
30
31#define CREATE_TRACE_POINTS
32#include "hv_trace_balloon.h"
33
34/*
35 * We begin with definitions supporting the Dynamic Memory protocol
36 * with the host.
37 *
38 * Begin protocol definitions.
39 */
40
41
42
43/*
44 * Protocol versions. The low word is the minor version, the high word the major
45 * version.
46 *
47 * History:
48 * Initial version 1.0
49 * Changed to 0.1 on 2009/03/25
50 * Changes to 0.2 on 2009/05/14
51 * Changes to 0.3 on 2009/12/03
52 * Changed to 1.0 on 2011/04/05
53 */
54
55#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
56#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
57#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
58
59enum {
60 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
61 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
62 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
63
64 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
65 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
66 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
67
68 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
69};
70
71
72
73/*
74 * Message Types
75 */
76
77enum dm_message_type {
78 /*
79 * Version 0.3
80 */
81 DM_ERROR = 0,
82 DM_VERSION_REQUEST = 1,
83 DM_VERSION_RESPONSE = 2,
84 DM_CAPABILITIES_REPORT = 3,
85 DM_CAPABILITIES_RESPONSE = 4,
86 DM_STATUS_REPORT = 5,
87 DM_BALLOON_REQUEST = 6,
88 DM_BALLOON_RESPONSE = 7,
89 DM_UNBALLOON_REQUEST = 8,
90 DM_UNBALLOON_RESPONSE = 9,
91 DM_MEM_HOT_ADD_REQUEST = 10,
92 DM_MEM_HOT_ADD_RESPONSE = 11,
93 DM_VERSION_03_MAX = 11,
94 /*
95 * Version 1.0.
96 */
97 DM_INFO_MESSAGE = 12,
98 DM_VERSION_1_MAX = 12
99};
100
101
102/*
103 * Structures defining the dynamic memory management
104 * protocol.
105 */
106
107union dm_version {
108 struct {
109 __u16 minor_version;
110 __u16 major_version;
111 };
112 __u32 version;
113} __packed;
114
115
116union dm_caps {
117 struct {
118 __u64 balloon:1;
119 __u64 hot_add:1;
120 /*
121 * To support guests that may have alignment
122 * limitations on hot-add, the guest can specify
123 * its alignment requirements; a value of n
124 * represents an alignment of 2^n in mega bytes.
125 */
126 __u64 hot_add_alignment:4;
127 __u64 reservedz:58;
128 } cap_bits;
129 __u64 caps;
130} __packed;
131
132union dm_mem_page_range {
133 struct {
134 /*
135 * The PFN number of the first page in the range.
136 * 40 bits is the architectural limit of a PFN
137 * number for AMD64.
138 */
139 __u64 start_page:40;
140 /*
141 * The number of pages in the range.
142 */
143 __u64 page_cnt:24;
144 } finfo;
145 __u64 page_range;
146} __packed;
147
148
149
150/*
151 * The header for all dynamic memory messages:
152 *
153 * type: Type of the message.
154 * size: Size of the message in bytes; including the header.
155 * trans_id: The guest is responsible for manufacturing this ID.
156 */
157
158struct dm_header {
159 __u16 type;
160 __u16 size;
161 __u32 trans_id;
162} __packed;
163
164/*
165 * A generic message format for dynamic memory.
166 * Specific message formats are defined later in the file.
167 */
168
169struct dm_message {
170 struct dm_header hdr;
171 __u8 data[]; /* enclosed message */
172} __packed;
173
174
175/*
176 * Specific message types supporting the dynamic memory protocol.
177 */
178
179/*
180 * Version negotiation message. Sent from the guest to the host.
181 * The guest is free to try different versions until the host
182 * accepts the version.
183 *
184 * dm_version: The protocol version requested.
185 * is_last_attempt: If TRUE, this is the last version guest will request.
186 * reservedz: Reserved field, set to zero.
187 */
188
189struct dm_version_request {
190 struct dm_header hdr;
191 union dm_version version;
192 __u32 is_last_attempt:1;
193 __u32 reservedz:31;
194} __packed;
195
196/*
197 * Version response message; Host to Guest and indicates
198 * if the host has accepted the version sent by the guest.
199 *
200 * is_accepted: If TRUE, host has accepted the version and the guest
201 * should proceed to the next stage of the protocol. FALSE indicates that
202 * guest should re-try with a different version.
203 *
204 * reservedz: Reserved field, set to zero.
205 */
206
207struct dm_version_response {
208 struct dm_header hdr;
209 __u64 is_accepted:1;
210 __u64 reservedz:63;
211} __packed;
212
213/*
214 * Message reporting capabilities. This is sent from the guest to the
215 * host.
216 */
217
218struct dm_capabilities {
219 struct dm_header hdr;
220 union dm_caps caps;
221 __u64 min_page_cnt;
222 __u64 max_page_number;
223} __packed;
224
225/*
226 * Response to the capabilities message. This is sent from the host to the
227 * guest. This message notifies if the host has accepted the guest's
228 * capabilities. If the host has not accepted, the guest must shutdown
229 * the service.
230 *
231 * is_accepted: Indicates if the host has accepted guest's capabilities.
232 * reservedz: Must be 0.
233 */
234
235struct dm_capabilities_resp_msg {
236 struct dm_header hdr;
237 __u64 is_accepted:1;
238 __u64 reservedz:63;
239} __packed;
240
241/*
242 * This message is used to report memory pressure from the guest.
243 * This message is not part of any transaction and there is no
244 * response to this message.
245 *
246 * num_avail: Available memory in pages.
247 * num_committed: Committed memory in pages.
248 * page_file_size: The accumulated size of all page files
249 * in the system in pages.
250 * zero_free: The nunber of zero and free pages.
251 * page_file_writes: The writes to the page file in pages.
252 * io_diff: An indicator of file cache efficiency or page file activity,
253 * calculated as File Cache Page Fault Count - Page Read Count.
254 * This value is in pages.
255 *
256 * Some of these metrics are Windows specific and fortunately
257 * the algorithm on the host side that computes the guest memory
258 * pressure only uses num_committed value.
259 */
260
261struct dm_status {
262 struct dm_header hdr;
263 __u64 num_avail;
264 __u64 num_committed;
265 __u64 page_file_size;
266 __u64 zero_free;
267 __u32 page_file_writes;
268 __u32 io_diff;
269} __packed;
270
271
272/*
273 * Message to ask the guest to allocate memory - balloon up message.
274 * This message is sent from the host to the guest. The guest may not be
275 * able to allocate as much memory as requested.
276 *
277 * num_pages: number of pages to allocate.
278 */
279
280struct dm_balloon {
281 struct dm_header hdr;
282 __u32 num_pages;
283 __u32 reservedz;
284} __packed;
285
286
287/*
288 * Balloon response message; this message is sent from the guest
289 * to the host in response to the balloon message.
290 *
291 * reservedz: Reserved; must be set to zero.
292 * more_pages: If FALSE, this is the last message of the transaction.
293 * if TRUE there will atleast one more message from the guest.
294 *
295 * range_count: The number of ranges in the range array.
296 *
297 * range_array: An array of page ranges returned to the host.
298 *
299 */
300
301struct dm_balloon_response {
302 struct dm_header hdr;
303 __u32 reservedz;
304 __u32 more_pages:1;
305 __u32 range_count:31;
306 union dm_mem_page_range range_array[];
307} __packed;
308
309/*
310 * Un-balloon message; this message is sent from the host
311 * to the guest to give guest more memory.
312 *
313 * more_pages: If FALSE, this is the last message of the transaction.
314 * if TRUE there will atleast one more message from the guest.
315 *
316 * reservedz: Reserved; must be set to zero.
317 *
318 * range_count: The number of ranges in the range array.
319 *
320 * range_array: An array of page ranges returned to the host.
321 *
322 */
323
324struct dm_unballoon_request {
325 struct dm_header hdr;
326 __u32 more_pages:1;
327 __u32 reservedz:31;
328 __u32 range_count;
329 union dm_mem_page_range range_array[];
330} __packed;
331
332/*
333 * Un-balloon response message; this message is sent from the guest
334 * to the host in response to an unballoon request.
335 *
336 */
337
338struct dm_unballoon_response {
339 struct dm_header hdr;
340} __packed;
341
342
343/*
344 * Hot add request message. Message sent from the host to the guest.
345 *
346 * mem_range: Memory range to hot add.
347 *
348 */
349
350struct dm_hot_add {
351 struct dm_header hdr;
352 union dm_mem_page_range range;
353} __packed;
354
355/*
356 * Hot add response message.
357 * This message is sent by the guest to report the status of a hot add request.
358 * If page_count is less than the requested page count, then the host should
359 * assume all further hot add requests will fail, since this indicates that
360 * the guest has hit an upper physical memory barrier.
361 *
362 * Hot adds may also fail due to low resources; in this case, the guest must
363 * not complete this message until the hot add can succeed, and the host must
364 * not send a new hot add request until the response is sent.
365 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
366 * times it fails the request.
367 *
368 *
369 * page_count: number of pages that were successfully hot added.
370 *
371 * result: result of the operation 1: success, 0: failure.
372 *
373 */
374
375struct dm_hot_add_response {
376 struct dm_header hdr;
377 __u32 page_count;
378 __u32 result;
379} __packed;
380
381/*
382 * Types of information sent from host to the guest.
383 */
384
385enum dm_info_type {
386 INFO_TYPE_MAX_PAGE_CNT = 0,
387 MAX_INFO_TYPE
388};
389
390
391/*
392 * Header for the information message.
393 */
394
395struct dm_info_header {
396 enum dm_info_type type;
397 __u32 data_size;
398} __packed;
399
400/*
401 * This message is sent from the host to the guest to pass
402 * some relevant information (win8 addition).
403 *
404 * reserved: no used.
405 * info_size: size of the information blob.
406 * info: information blob.
407 */
408
409struct dm_info_msg {
410 struct dm_header hdr;
411 __u32 reserved;
412 __u32 info_size;
413 __u8 info[];
414};
415
416/*
417 * End protocol definitions.
418 */
419
420/*
421 * State to manage hot adding memory into the guest.
422 * The range start_pfn : end_pfn specifies the range
423 * that the host has asked us to hot add. The range
424 * start_pfn : ha_end_pfn specifies the range that we have
425 * currently hot added. We hot add in multiples of 128M
426 * chunks; it is possible that we may not be able to bring
427 * online all the pages in the region. The range
428 * covered_start_pfn:covered_end_pfn defines the pages that can
429 * be brough online.
430 */
431
432struct hv_hotadd_state {
433 struct list_head list;
434 unsigned long start_pfn;
435 unsigned long covered_start_pfn;
436 unsigned long covered_end_pfn;
437 unsigned long ha_end_pfn;
438 unsigned long end_pfn;
439 /*
440 * A list of gaps.
441 */
442 struct list_head gap_list;
443};
444
445struct hv_hotadd_gap {
446 struct list_head list;
447 unsigned long start_pfn;
448 unsigned long end_pfn;
449};
450
451struct balloon_state {
452 __u32 num_pages;
453 struct work_struct wrk;
454};
455
456struct hot_add_wrk {
457 union dm_mem_page_range ha_page_range;
458 union dm_mem_page_range ha_region_range;
459 struct work_struct wrk;
460};
461
462static bool allow_hibernation;
463static bool hot_add = true;
464static bool do_hot_add;
465/*
466 * Delay reporting memory pressure by
467 * the specified number of seconds.
468 */
469static uint pressure_report_delay = 45;
470
471/*
472 * The last time we posted a pressure report to host.
473 */
474static unsigned long last_post_time;
475
476module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
477MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
478
479module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
480MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
481static atomic_t trans_id = ATOMIC_INIT(0);
482
483static int dm_ring_size = 20 * 1024;
484
485/*
486 * Driver specific state.
487 */
488
489enum hv_dm_state {
490 DM_INITIALIZING = 0,
491 DM_INITIALIZED,
492 DM_BALLOON_UP,
493 DM_BALLOON_DOWN,
494 DM_HOT_ADD,
495 DM_INIT_ERROR
496};
497
498
499static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
500static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
501#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
502#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
503
504struct hv_dynmem_device {
505 struct hv_device *dev;
506 enum hv_dm_state state;
507 struct completion host_event;
508 struct completion config_event;
509
510 /*
511 * Number of pages we have currently ballooned out.
512 */
513 unsigned int num_pages_ballooned;
514 unsigned int num_pages_onlined;
515 unsigned int num_pages_added;
516
517 /*
518 * State to manage the ballooning (up) operation.
519 */
520 struct balloon_state balloon_wrk;
521
522 /*
523 * State to execute the "hot-add" operation.
524 */
525 struct hot_add_wrk ha_wrk;
526
527 /*
528 * This state tracks if the host has specified a hot-add
529 * region.
530 */
531 bool host_specified_ha_region;
532
533 /*
534 * State to synchronize hot-add.
535 */
536 struct completion ol_waitevent;
537 /*
538 * This thread handles hot-add
539 * requests from the host as well as notifying
540 * the host with regards to memory pressure in
541 * the guest.
542 */
543 struct task_struct *thread;
544
545 /*
546 * Protects ha_region_list, num_pages_onlined counter and individual
547 * regions from ha_region_list.
548 */
549 spinlock_t ha_lock;
550
551 /*
552 * A list of hot-add regions.
553 */
554 struct list_head ha_region_list;
555
556 /*
557 * We start with the highest version we can support
558 * and downgrade based on the host; we save here the
559 * next version to try.
560 */
561 __u32 next_version;
562
563 /*
564 * The negotiated version agreed by host.
565 */
566 __u32 version;
567
568 struct page_reporting_dev_info pr_dev_info;
569};
570
571static struct hv_dynmem_device dm_device;
572
573static void post_status(struct hv_dynmem_device *dm);
574
575#ifdef CONFIG_MEMORY_HOTPLUG
576static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
577 unsigned long pfn)
578{
579 struct hv_hotadd_gap *gap;
580
581 /* The page is not backed. */
582 if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
583 return false;
584
585 /* Check for gaps. */
586 list_for_each_entry(gap, &has->gap_list, list) {
587 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
588 return false;
589 }
590
591 return true;
592}
593
594static unsigned long hv_page_offline_check(unsigned long start_pfn,
595 unsigned long nr_pages)
596{
597 unsigned long pfn = start_pfn, count = 0;
598 struct hv_hotadd_state *has;
599 bool found;
600
601 while (pfn < start_pfn + nr_pages) {
602 /*
603 * Search for HAS which covers the pfn and when we find one
604 * count how many consequitive PFNs are covered.
605 */
606 found = false;
607 list_for_each_entry(has, &dm_device.ha_region_list, list) {
608 while ((pfn >= has->start_pfn) &&
609 (pfn < has->end_pfn) &&
610 (pfn < start_pfn + nr_pages)) {
611 found = true;
612 if (has_pfn_is_backed(has, pfn))
613 count++;
614 pfn++;
615 }
616 }
617
618 /*
619 * This PFN is not in any HAS (e.g. we're offlining a region
620 * which was present at boot), no need to account for it. Go
621 * to the next one.
622 */
623 if (!found)
624 pfn++;
625 }
626
627 return count;
628}
629
630static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
631 void *v)
632{
633 struct memory_notify *mem = (struct memory_notify *)v;
634 unsigned long flags, pfn_count;
635
636 switch (val) {
637 case MEM_ONLINE:
638 case MEM_CANCEL_ONLINE:
639 complete(&dm_device.ol_waitevent);
640 break;
641
642 case MEM_OFFLINE:
643 spin_lock_irqsave(&dm_device.ha_lock, flags);
644 pfn_count = hv_page_offline_check(mem->start_pfn,
645 mem->nr_pages);
646 if (pfn_count <= dm_device.num_pages_onlined) {
647 dm_device.num_pages_onlined -= pfn_count;
648 } else {
649 /*
650 * We're offlining more pages than we managed to online.
651 * This is unexpected. In any case don't let
652 * num_pages_onlined wrap around zero.
653 */
654 WARN_ON_ONCE(1);
655 dm_device.num_pages_onlined = 0;
656 }
657 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
658 break;
659 case MEM_GOING_ONLINE:
660 case MEM_GOING_OFFLINE:
661 case MEM_CANCEL_OFFLINE:
662 break;
663 }
664 return NOTIFY_OK;
665}
666
667static struct notifier_block hv_memory_nb = {
668 .notifier_call = hv_memory_notifier,
669 .priority = 0
670};
671
672/* Check if the particular page is backed and can be onlined and online it. */
673static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
674{
675 if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
676 if (!PageOffline(pg))
677 __SetPageOffline(pg);
678 return;
679 }
680 if (PageOffline(pg))
681 __ClearPageOffline(pg);
682
683 /* This frame is currently backed; online the page. */
684 generic_online_page(pg, 0);
685
686 lockdep_assert_held(&dm_device.ha_lock);
687 dm_device.num_pages_onlined++;
688}
689
690static void hv_bring_pgs_online(struct hv_hotadd_state *has,
691 unsigned long start_pfn, unsigned long size)
692{
693 int i;
694
695 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
696 for (i = 0; i < size; i++)
697 hv_page_online_one(has, pfn_to_page(start_pfn + i));
698}
699
700static void hv_mem_hot_add(unsigned long start, unsigned long size,
701 unsigned long pfn_count,
702 struct hv_hotadd_state *has)
703{
704 int ret = 0;
705 int i, nid;
706 unsigned long start_pfn;
707 unsigned long processed_pfn;
708 unsigned long total_pfn = pfn_count;
709 unsigned long flags;
710
711 for (i = 0; i < (size/HA_CHUNK); i++) {
712 start_pfn = start + (i * HA_CHUNK);
713
714 spin_lock_irqsave(&dm_device.ha_lock, flags);
715 has->ha_end_pfn += HA_CHUNK;
716
717 if (total_pfn > HA_CHUNK) {
718 processed_pfn = HA_CHUNK;
719 total_pfn -= HA_CHUNK;
720 } else {
721 processed_pfn = total_pfn;
722 total_pfn = 0;
723 }
724
725 has->covered_end_pfn += processed_pfn;
726 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
727
728 reinit_completion(&dm_device.ol_waitevent);
729
730 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
731 ret = add_memory(nid, PFN_PHYS((start_pfn)),
732 (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
733
734 if (ret) {
735 pr_err("hot_add memory failed error is %d\n", ret);
736 if (ret == -EEXIST) {
737 /*
738 * This error indicates that the error
739 * is not a transient failure. This is the
740 * case where the guest's physical address map
741 * precludes hot adding memory. Stop all further
742 * memory hot-add.
743 */
744 do_hot_add = false;
745 }
746 spin_lock_irqsave(&dm_device.ha_lock, flags);
747 has->ha_end_pfn -= HA_CHUNK;
748 has->covered_end_pfn -= processed_pfn;
749 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
750 break;
751 }
752
753 /*
754 * Wait for memory to get onlined. If the kernel onlined the
755 * memory when adding it, this will return directly. Otherwise,
756 * it will wait for user space to online the memory. This helps
757 * to avoid adding memory faster than it is getting onlined. As
758 * adding succeeded, it is ok to proceed even if the memory was
759 * not onlined in time.
760 */
761 wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
762 post_status(&dm_device);
763 }
764}
765
766static void hv_online_page(struct page *pg, unsigned int order)
767{
768 struct hv_hotadd_state *has;
769 unsigned long flags;
770 unsigned long pfn = page_to_pfn(pg);
771
772 spin_lock_irqsave(&dm_device.ha_lock, flags);
773 list_for_each_entry(has, &dm_device.ha_region_list, list) {
774 /* The page belongs to a different HAS. */
775 if ((pfn < has->start_pfn) ||
776 (pfn + (1UL << order) > has->end_pfn))
777 continue;
778
779 hv_bring_pgs_online(has, pfn, 1UL << order);
780 break;
781 }
782 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
783}
784
785static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
786{
787 struct hv_hotadd_state *has;
788 struct hv_hotadd_gap *gap;
789 unsigned long residual, new_inc;
790 int ret = 0;
791 unsigned long flags;
792
793 spin_lock_irqsave(&dm_device.ha_lock, flags);
794 list_for_each_entry(has, &dm_device.ha_region_list, list) {
795 /*
796 * If the pfn range we are dealing with is not in the current
797 * "hot add block", move on.
798 */
799 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
800 continue;
801
802 /*
803 * If the current start pfn is not where the covered_end
804 * is, create a gap and update covered_end_pfn.
805 */
806 if (has->covered_end_pfn != start_pfn) {
807 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
808 if (!gap) {
809 ret = -ENOMEM;
810 break;
811 }
812
813 INIT_LIST_HEAD(&gap->list);
814 gap->start_pfn = has->covered_end_pfn;
815 gap->end_pfn = start_pfn;
816 list_add_tail(&gap->list, &has->gap_list);
817
818 has->covered_end_pfn = start_pfn;
819 }
820
821 /*
822 * If the current hot add-request extends beyond
823 * our current limit; extend it.
824 */
825 if ((start_pfn + pfn_cnt) > has->end_pfn) {
826 residual = (start_pfn + pfn_cnt - has->end_pfn);
827 /*
828 * Extend the region by multiples of HA_CHUNK.
829 */
830 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
831 if (residual % HA_CHUNK)
832 new_inc += HA_CHUNK;
833
834 has->end_pfn += new_inc;
835 }
836
837 ret = 1;
838 break;
839 }
840 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
841
842 return ret;
843}
844
845static unsigned long handle_pg_range(unsigned long pg_start,
846 unsigned long pg_count)
847{
848 unsigned long start_pfn = pg_start;
849 unsigned long pfn_cnt = pg_count;
850 unsigned long size;
851 struct hv_hotadd_state *has;
852 unsigned long pgs_ol = 0;
853 unsigned long old_covered_state;
854 unsigned long res = 0, flags;
855
856 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
857 pg_start);
858
859 spin_lock_irqsave(&dm_device.ha_lock, flags);
860 list_for_each_entry(has, &dm_device.ha_region_list, list) {
861 /*
862 * If the pfn range we are dealing with is not in the current
863 * "hot add block", move on.
864 */
865 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
866 continue;
867
868 old_covered_state = has->covered_end_pfn;
869
870 if (start_pfn < has->ha_end_pfn) {
871 /*
872 * This is the case where we are backing pages
873 * in an already hot added region. Bring
874 * these pages online first.
875 */
876 pgs_ol = has->ha_end_pfn - start_pfn;
877 if (pgs_ol > pfn_cnt)
878 pgs_ol = pfn_cnt;
879
880 has->covered_end_pfn += pgs_ol;
881 pfn_cnt -= pgs_ol;
882 /*
883 * Check if the corresponding memory block is already
884 * online. It is possible to observe struct pages still
885 * being uninitialized here so check section instead.
886 * In case the section is online we need to bring the
887 * rest of pfns (which were not backed previously)
888 * online too.
889 */
890 if (start_pfn > has->start_pfn &&
891 online_section_nr(pfn_to_section_nr(start_pfn)))
892 hv_bring_pgs_online(has, start_pfn, pgs_ol);
893
894 }
895
896 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
897 /*
898 * We have some residual hot add range
899 * that needs to be hot added; hot add
900 * it now. Hot add a multiple of
901 * of HA_CHUNK that fully covers the pages
902 * we have.
903 */
904 size = (has->end_pfn - has->ha_end_pfn);
905 if (pfn_cnt <= size) {
906 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
907 if (pfn_cnt % HA_CHUNK)
908 size += HA_CHUNK;
909 } else {
910 pfn_cnt = size;
911 }
912 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
913 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
914 spin_lock_irqsave(&dm_device.ha_lock, flags);
915 }
916 /*
917 * If we managed to online any pages that were given to us,
918 * we declare success.
919 */
920 res = has->covered_end_pfn - old_covered_state;
921 break;
922 }
923 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
924
925 return res;
926}
927
928static unsigned long process_hot_add(unsigned long pg_start,
929 unsigned long pfn_cnt,
930 unsigned long rg_start,
931 unsigned long rg_size)
932{
933 struct hv_hotadd_state *ha_region = NULL;
934 int covered;
935 unsigned long flags;
936
937 if (pfn_cnt == 0)
938 return 0;
939
940 if (!dm_device.host_specified_ha_region) {
941 covered = pfn_covered(pg_start, pfn_cnt);
942 if (covered < 0)
943 return 0;
944
945 if (covered)
946 goto do_pg_range;
947 }
948
949 /*
950 * If the host has specified a hot-add range; deal with it first.
951 */
952
953 if (rg_size != 0) {
954 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
955 if (!ha_region)
956 return 0;
957
958 INIT_LIST_HEAD(&ha_region->list);
959 INIT_LIST_HEAD(&ha_region->gap_list);
960
961 ha_region->start_pfn = rg_start;
962 ha_region->ha_end_pfn = rg_start;
963 ha_region->covered_start_pfn = pg_start;
964 ha_region->covered_end_pfn = pg_start;
965 ha_region->end_pfn = rg_start + rg_size;
966
967 spin_lock_irqsave(&dm_device.ha_lock, flags);
968 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
969 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
970 }
971
972do_pg_range:
973 /*
974 * Process the page range specified; bringing them
975 * online if possible.
976 */
977 return handle_pg_range(pg_start, pfn_cnt);
978}
979
980#endif
981
982static void hot_add_req(struct work_struct *dummy)
983{
984 struct dm_hot_add_response resp;
985#ifdef CONFIG_MEMORY_HOTPLUG
986 unsigned long pg_start, pfn_cnt;
987 unsigned long rg_start, rg_sz;
988#endif
989 struct hv_dynmem_device *dm = &dm_device;
990
991 memset(&resp, 0, sizeof(struct dm_hot_add_response));
992 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
993 resp.hdr.size = sizeof(struct dm_hot_add_response);
994
995#ifdef CONFIG_MEMORY_HOTPLUG
996 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
997 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
998
999 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
1000 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1001
1002 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1003 unsigned long region_size;
1004 unsigned long region_start;
1005
1006 /*
1007 * The host has not specified the hot-add region.
1008 * Based on the hot-add page range being specified,
1009 * compute a hot-add region that can cover the pages
1010 * that need to be hot-added while ensuring the alignment
1011 * and size requirements of Linux as it relates to hot-add.
1012 */
1013 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1014 if (pfn_cnt % HA_CHUNK)
1015 region_size += HA_CHUNK;
1016
1017 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1018
1019 rg_start = region_start;
1020 rg_sz = region_size;
1021 }
1022
1023 if (do_hot_add)
1024 resp.page_count = process_hot_add(pg_start, pfn_cnt,
1025 rg_start, rg_sz);
1026
1027 dm->num_pages_added += resp.page_count;
1028#endif
1029 /*
1030 * The result field of the response structure has the
1031 * following semantics:
1032 *
1033 * 1. If all or some pages hot-added: Guest should return success.
1034 *
1035 * 2. If no pages could be hot-added:
1036 *
1037 * If the guest returns success, then the host
1038 * will not attempt any further hot-add operations. This
1039 * signifies a permanent failure.
1040 *
1041 * If the guest returns failure, then this failure will be
1042 * treated as a transient failure and the host may retry the
1043 * hot-add operation after some delay.
1044 */
1045 if (resp.page_count > 0)
1046 resp.result = 1;
1047 else if (!do_hot_add)
1048 resp.result = 1;
1049 else
1050 resp.result = 0;
1051
1052 if (!do_hot_add || resp.page_count == 0) {
1053 if (!allow_hibernation)
1054 pr_err("Memory hot add failed\n");
1055 else
1056 pr_info("Ignore hot-add request!\n");
1057 }
1058
1059 dm->state = DM_INITIALIZED;
1060 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1061 vmbus_sendpacket(dm->dev->channel, &resp,
1062 sizeof(struct dm_hot_add_response),
1063 (unsigned long)NULL,
1064 VM_PKT_DATA_INBAND, 0);
1065}
1066
1067static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1068{
1069 struct dm_info_header *info_hdr;
1070
1071 info_hdr = (struct dm_info_header *)msg->info;
1072
1073 switch (info_hdr->type) {
1074 case INFO_TYPE_MAX_PAGE_CNT:
1075 if (info_hdr->data_size == sizeof(__u64)) {
1076 __u64 *max_page_count = (__u64 *)&info_hdr[1];
1077
1078 pr_info("Max. dynamic memory size: %llu MB\n",
1079 (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
1080 }
1081
1082 break;
1083 default:
1084 pr_warn("Received Unknown type: %d\n", info_hdr->type);
1085 }
1086}
1087
1088static unsigned long compute_balloon_floor(void)
1089{
1090 unsigned long min_pages;
1091 unsigned long nr_pages = totalram_pages();
1092#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1093 /* Simple continuous piecewiese linear function:
1094 * max MiB -> min MiB gradient
1095 * 0 0
1096 * 16 16
1097 * 32 24
1098 * 128 72 (1/2)
1099 * 512 168 (1/4)
1100 * 2048 360 (1/8)
1101 * 8192 744 (1/16)
1102 * 32768 1512 (1/32)
1103 */
1104 if (nr_pages < MB2PAGES(128))
1105 min_pages = MB2PAGES(8) + (nr_pages >> 1);
1106 else if (nr_pages < MB2PAGES(512))
1107 min_pages = MB2PAGES(40) + (nr_pages >> 2);
1108 else if (nr_pages < MB2PAGES(2048))
1109 min_pages = MB2PAGES(104) + (nr_pages >> 3);
1110 else if (nr_pages < MB2PAGES(8192))
1111 min_pages = MB2PAGES(232) + (nr_pages >> 4);
1112 else
1113 min_pages = MB2PAGES(488) + (nr_pages >> 5);
1114#undef MB2PAGES
1115 return min_pages;
1116}
1117
1118/*
1119 * Post our status as it relates memory pressure to the
1120 * host. Host expects the guests to post this status
1121 * periodically at 1 second intervals.
1122 *
1123 * The metrics specified in this protocol are very Windows
1124 * specific and so we cook up numbers here to convey our memory
1125 * pressure.
1126 */
1127
1128static void post_status(struct hv_dynmem_device *dm)
1129{
1130 struct dm_status status;
1131 unsigned long now = jiffies;
1132 unsigned long last_post = last_post_time;
1133
1134 if (pressure_report_delay > 0) {
1135 --pressure_report_delay;
1136 return;
1137 }
1138
1139 if (!time_after(now, (last_post_time + HZ)))
1140 return;
1141
1142 memset(&status, 0, sizeof(struct dm_status));
1143 status.hdr.type = DM_STATUS_REPORT;
1144 status.hdr.size = sizeof(struct dm_status);
1145 status.hdr.trans_id = atomic_inc_return(&trans_id);
1146
1147 /*
1148 * The host expects the guest to report free and committed memory.
1149 * Furthermore, the host expects the pressure information to include
1150 * the ballooned out pages. For a given amount of memory that we are
1151 * managing we need to compute a floor below which we should not
1152 * balloon. Compute this and add it to the pressure report.
1153 * We also need to report all offline pages (num_pages_added -
1154 * num_pages_onlined) as committed to the host, otherwise it can try
1155 * asking us to balloon them out.
1156 */
1157 status.num_avail = si_mem_available();
1158 status.num_committed = vm_memory_committed() +
1159 dm->num_pages_ballooned +
1160 (dm->num_pages_added > dm->num_pages_onlined ?
1161 dm->num_pages_added - dm->num_pages_onlined : 0) +
1162 compute_balloon_floor();
1163
1164 trace_balloon_status(status.num_avail, status.num_committed,
1165 vm_memory_committed(), dm->num_pages_ballooned,
1166 dm->num_pages_added, dm->num_pages_onlined);
1167 /*
1168 * If our transaction ID is no longer current, just don't
1169 * send the status. This can happen if we were interrupted
1170 * after we picked our transaction ID.
1171 */
1172 if (status.hdr.trans_id != atomic_read(&trans_id))
1173 return;
1174
1175 /*
1176 * If the last post time that we sampled has changed,
1177 * we have raced, don't post the status.
1178 */
1179 if (last_post != last_post_time)
1180 return;
1181
1182 last_post_time = jiffies;
1183 vmbus_sendpacket(dm->dev->channel, &status,
1184 sizeof(struct dm_status),
1185 (unsigned long)NULL,
1186 VM_PKT_DATA_INBAND, 0);
1187
1188}
1189
1190static void free_balloon_pages(struct hv_dynmem_device *dm,
1191 union dm_mem_page_range *range_array)
1192{
1193 int num_pages = range_array->finfo.page_cnt;
1194 __u64 start_frame = range_array->finfo.start_page;
1195 struct page *pg;
1196 int i;
1197
1198 for (i = 0; i < num_pages; i++) {
1199 pg = pfn_to_page(i + start_frame);
1200 __ClearPageOffline(pg);
1201 __free_page(pg);
1202 dm->num_pages_ballooned--;
1203 adjust_managed_page_count(pg, 1);
1204 }
1205}
1206
1207
1208
1209static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1210 unsigned int num_pages,
1211 struct dm_balloon_response *bl_resp,
1212 int alloc_unit)
1213{
1214 unsigned int i, j;
1215 struct page *pg;
1216
1217 for (i = 0; i < num_pages / alloc_unit; i++) {
1218 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1219 HV_HYP_PAGE_SIZE)
1220 return i * alloc_unit;
1221
1222 /*
1223 * We execute this code in a thread context. Furthermore,
1224 * we don't want the kernel to try too hard.
1225 */
1226 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1227 __GFP_NOMEMALLOC | __GFP_NOWARN,
1228 get_order(alloc_unit << PAGE_SHIFT));
1229
1230 if (!pg)
1231 return i * alloc_unit;
1232
1233 dm->num_pages_ballooned += alloc_unit;
1234
1235 /*
1236 * If we allocatted 2M pages; split them so we
1237 * can free them in any order we get.
1238 */
1239
1240 if (alloc_unit != 1)
1241 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1242
1243 /* mark all pages offline */
1244 for (j = 0; j < alloc_unit; j++) {
1245 __SetPageOffline(pg + j);
1246 adjust_managed_page_count(pg + j, -1);
1247 }
1248
1249 bl_resp->range_count++;
1250 bl_resp->range_array[i].finfo.start_page =
1251 page_to_pfn(pg);
1252 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1253 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1254
1255 }
1256
1257 return i * alloc_unit;
1258}
1259
1260static void balloon_up(struct work_struct *dummy)
1261{
1262 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1263 unsigned int num_ballooned = 0;
1264 struct dm_balloon_response *bl_resp;
1265 int alloc_unit;
1266 int ret;
1267 bool done = false;
1268 int i;
1269 long avail_pages;
1270 unsigned long floor;
1271
1272 /*
1273 * We will attempt 2M allocations. However, if we fail to
1274 * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
1275 */
1276 alloc_unit = PAGES_IN_2M;
1277
1278 avail_pages = si_mem_available();
1279 floor = compute_balloon_floor();
1280
1281 /* Refuse to balloon below the floor. */
1282 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1283 pr_info("Balloon request will be partially fulfilled. %s\n",
1284 avail_pages < num_pages ? "Not enough memory." :
1285 "Balloon floor reached.");
1286
1287 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1288 }
1289
1290 while (!done) {
1291 memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
1292 bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
1293 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1294 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1295 bl_resp->more_pages = 1;
1296
1297 num_pages -= num_ballooned;
1298 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1299 bl_resp, alloc_unit);
1300
1301 if (alloc_unit != 1 && num_ballooned == 0) {
1302 alloc_unit = 1;
1303 continue;
1304 }
1305
1306 if (num_ballooned == 0 || num_ballooned == num_pages) {
1307 pr_debug("Ballooned %u out of %u requested pages.\n",
1308 num_pages, dm_device.balloon_wrk.num_pages);
1309
1310 bl_resp->more_pages = 0;
1311 done = true;
1312 dm_device.state = DM_INITIALIZED;
1313 }
1314
1315 /*
1316 * We are pushing a lot of data through the channel;
1317 * deal with transient failures caused because of the
1318 * lack of space in the ring buffer.
1319 */
1320
1321 do {
1322 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1323 ret = vmbus_sendpacket(dm_device.dev->channel,
1324 bl_resp,
1325 bl_resp->hdr.size,
1326 (unsigned long)NULL,
1327 VM_PKT_DATA_INBAND, 0);
1328
1329 if (ret == -EAGAIN)
1330 msleep(20);
1331 post_status(&dm_device);
1332 } while (ret == -EAGAIN);
1333
1334 if (ret) {
1335 /*
1336 * Free up the memory we allocatted.
1337 */
1338 pr_err("Balloon response failed\n");
1339
1340 for (i = 0; i < bl_resp->range_count; i++)
1341 free_balloon_pages(&dm_device,
1342 &bl_resp->range_array[i]);
1343
1344 done = true;
1345 }
1346 }
1347
1348}
1349
1350static void balloon_down(struct hv_dynmem_device *dm,
1351 struct dm_unballoon_request *req)
1352{
1353 union dm_mem_page_range *range_array = req->range_array;
1354 int range_count = req->range_count;
1355 struct dm_unballoon_response resp;
1356 int i;
1357 unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1358
1359 for (i = 0; i < range_count; i++) {
1360 free_balloon_pages(dm, &range_array[i]);
1361 complete(&dm_device.config_event);
1362 }
1363
1364 pr_debug("Freed %u ballooned pages.\n",
1365 prev_pages_ballooned - dm->num_pages_ballooned);
1366
1367 if (req->more_pages == 1)
1368 return;
1369
1370 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1371 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1372 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1373 resp.hdr.size = sizeof(struct dm_unballoon_response);
1374
1375 vmbus_sendpacket(dm_device.dev->channel, &resp,
1376 sizeof(struct dm_unballoon_response),
1377 (unsigned long)NULL,
1378 VM_PKT_DATA_INBAND, 0);
1379
1380 dm->state = DM_INITIALIZED;
1381}
1382
1383static void balloon_onchannelcallback(void *context);
1384
1385static int dm_thread_func(void *dm_dev)
1386{
1387 struct hv_dynmem_device *dm = dm_dev;
1388
1389 while (!kthread_should_stop()) {
1390 wait_for_completion_interruptible_timeout(
1391 &dm_device.config_event, 1*HZ);
1392 /*
1393 * The host expects us to post information on the memory
1394 * pressure every second.
1395 */
1396 reinit_completion(&dm_device.config_event);
1397 post_status(dm);
1398 }
1399
1400 return 0;
1401}
1402
1403
1404static void version_resp(struct hv_dynmem_device *dm,
1405 struct dm_version_response *vresp)
1406{
1407 struct dm_version_request version_req;
1408 int ret;
1409
1410 if (vresp->is_accepted) {
1411 /*
1412 * We are done; wakeup the
1413 * context waiting for version
1414 * negotiation.
1415 */
1416 complete(&dm->host_event);
1417 return;
1418 }
1419 /*
1420 * If there are more versions to try, continue
1421 * with negotiations; if not
1422 * shutdown the service since we are not able
1423 * to negotiate a suitable version number
1424 * with the host.
1425 */
1426 if (dm->next_version == 0)
1427 goto version_error;
1428
1429 memset(&version_req, 0, sizeof(struct dm_version_request));
1430 version_req.hdr.type = DM_VERSION_REQUEST;
1431 version_req.hdr.size = sizeof(struct dm_version_request);
1432 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1433 version_req.version.version = dm->next_version;
1434 dm->version = version_req.version.version;
1435
1436 /*
1437 * Set the next version to try in case current version fails.
1438 * Win7 protocol ought to be the last one to try.
1439 */
1440 switch (version_req.version.version) {
1441 case DYNMEM_PROTOCOL_VERSION_WIN8:
1442 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1443 version_req.is_last_attempt = 0;
1444 break;
1445 default:
1446 dm->next_version = 0;
1447 version_req.is_last_attempt = 1;
1448 }
1449
1450 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1451 sizeof(struct dm_version_request),
1452 (unsigned long)NULL,
1453 VM_PKT_DATA_INBAND, 0);
1454
1455 if (ret)
1456 goto version_error;
1457
1458 return;
1459
1460version_error:
1461 dm->state = DM_INIT_ERROR;
1462 complete(&dm->host_event);
1463}
1464
1465static void cap_resp(struct hv_dynmem_device *dm,
1466 struct dm_capabilities_resp_msg *cap_resp)
1467{
1468 if (!cap_resp->is_accepted) {
1469 pr_err("Capabilities not accepted by host\n");
1470 dm->state = DM_INIT_ERROR;
1471 }
1472 complete(&dm->host_event);
1473}
1474
1475static void balloon_onchannelcallback(void *context)
1476{
1477 struct hv_device *dev = context;
1478 u32 recvlen;
1479 u64 requestid;
1480 struct dm_message *dm_msg;
1481 struct dm_header *dm_hdr;
1482 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1483 struct dm_balloon *bal_msg;
1484 struct dm_hot_add *ha_msg;
1485 union dm_mem_page_range *ha_pg_range;
1486 union dm_mem_page_range *ha_region;
1487
1488 memset(recv_buffer, 0, sizeof(recv_buffer));
1489 vmbus_recvpacket(dev->channel, recv_buffer,
1490 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
1491
1492 if (recvlen > 0) {
1493 dm_msg = (struct dm_message *)recv_buffer;
1494 dm_hdr = &dm_msg->hdr;
1495
1496 switch (dm_hdr->type) {
1497 case DM_VERSION_RESPONSE:
1498 version_resp(dm,
1499 (struct dm_version_response *)dm_msg);
1500 break;
1501
1502 case DM_CAPABILITIES_RESPONSE:
1503 cap_resp(dm,
1504 (struct dm_capabilities_resp_msg *)dm_msg);
1505 break;
1506
1507 case DM_BALLOON_REQUEST:
1508 if (allow_hibernation) {
1509 pr_info("Ignore balloon-up request!\n");
1510 break;
1511 }
1512
1513 if (dm->state == DM_BALLOON_UP)
1514 pr_warn("Currently ballooning\n");
1515 bal_msg = (struct dm_balloon *)recv_buffer;
1516 dm->state = DM_BALLOON_UP;
1517 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1518 schedule_work(&dm_device.balloon_wrk.wrk);
1519 break;
1520
1521 case DM_UNBALLOON_REQUEST:
1522 if (allow_hibernation) {
1523 pr_info("Ignore balloon-down request!\n");
1524 break;
1525 }
1526
1527 dm->state = DM_BALLOON_DOWN;
1528 balloon_down(dm,
1529 (struct dm_unballoon_request *)recv_buffer);
1530 break;
1531
1532 case DM_MEM_HOT_ADD_REQUEST:
1533 if (dm->state == DM_HOT_ADD)
1534 pr_warn("Currently hot-adding\n");
1535 dm->state = DM_HOT_ADD;
1536 ha_msg = (struct dm_hot_add *)recv_buffer;
1537 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1538 /*
1539 * This is a normal hot-add request specifying
1540 * hot-add memory.
1541 */
1542 dm->host_specified_ha_region = false;
1543 ha_pg_range = &ha_msg->range;
1544 dm->ha_wrk.ha_page_range = *ha_pg_range;
1545 dm->ha_wrk.ha_region_range.page_range = 0;
1546 } else {
1547 /*
1548 * Host is specifying that we first hot-add
1549 * a region and then partially populate this
1550 * region.
1551 */
1552 dm->host_specified_ha_region = true;
1553 ha_pg_range = &ha_msg->range;
1554 ha_region = &ha_pg_range[1];
1555 dm->ha_wrk.ha_page_range = *ha_pg_range;
1556 dm->ha_wrk.ha_region_range = *ha_region;
1557 }
1558 schedule_work(&dm_device.ha_wrk.wrk);
1559 break;
1560
1561 case DM_INFO_MESSAGE:
1562 process_info(dm, (struct dm_info_msg *)dm_msg);
1563 break;
1564
1565 default:
1566 pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
1567
1568 }
1569 }
1570
1571}
1572
1573/* Hyper-V only supports reporting 2MB pages or higher */
1574#define HV_MIN_PAGE_REPORTING_ORDER 9
1575#define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER)
1576static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
1577 struct scatterlist *sgl, unsigned int nents)
1578{
1579 unsigned long flags;
1580 struct hv_memory_hint *hint;
1581 int i;
1582 u64 status;
1583 struct scatterlist *sg;
1584
1585 WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
1586 WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN);
1587 local_irq_save(flags);
1588 hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
1589 if (!hint) {
1590 local_irq_restore(flags);
1591 return -ENOSPC;
1592 }
1593
1594 hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
1595 hint->reserved = 0;
1596 for_each_sg(sgl, sg, nents, i) {
1597 union hv_gpa_page_range *range;
1598
1599 range = &hint->ranges[i];
1600 range->address_space = 0;
1601 /* page reporting only reports 2MB pages or higher */
1602 range->page.largepage = 1;
1603 range->page.additional_pages =
1604 (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1;
1605 range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
1606 range->base_large_pfn =
1607 page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER;
1608 }
1609
1610 status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
1611 hint, NULL);
1612 local_irq_restore(flags);
1613 if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
1614 pr_err("Cold memory discard hypercall failed with status %llx\n",
1615 status);
1616 return -EINVAL;
1617 }
1618
1619 return 0;
1620}
1621
1622static void enable_page_reporting(void)
1623{
1624 int ret;
1625
1626 /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */
1627 if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) {
1628 pr_debug("Cold memory discard is only supported on 2MB pages and above\n");
1629 return;
1630 }
1631
1632 if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
1633 pr_debug("Cold memory discard hint not supported by Hyper-V\n");
1634 return;
1635 }
1636
1637 BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
1638 dm_device.pr_dev_info.report = hv_free_page_report;
1639 ret = page_reporting_register(&dm_device.pr_dev_info);
1640 if (ret < 0) {
1641 dm_device.pr_dev_info.report = NULL;
1642 pr_err("Failed to enable cold memory discard: %d\n", ret);
1643 } else {
1644 pr_info("Cold memory discard hint enabled\n");
1645 }
1646}
1647
1648static void disable_page_reporting(void)
1649{
1650 if (dm_device.pr_dev_info.report) {
1651 page_reporting_unregister(&dm_device.pr_dev_info);
1652 dm_device.pr_dev_info.report = NULL;
1653 }
1654}
1655
1656static int balloon_connect_vsp(struct hv_device *dev)
1657{
1658 struct dm_version_request version_req;
1659 struct dm_capabilities cap_msg;
1660 unsigned long t;
1661 int ret;
1662
1663 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1664 balloon_onchannelcallback, dev);
1665 if (ret)
1666 return ret;
1667
1668 /*
1669 * Initiate the hand shake with the host and negotiate
1670 * a version that the host can support. We start with the
1671 * highest version number and go down if the host cannot
1672 * support it.
1673 */
1674 memset(&version_req, 0, sizeof(struct dm_version_request));
1675 version_req.hdr.type = DM_VERSION_REQUEST;
1676 version_req.hdr.size = sizeof(struct dm_version_request);
1677 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1678 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1679 version_req.is_last_attempt = 0;
1680 dm_device.version = version_req.version.version;
1681
1682 ret = vmbus_sendpacket(dev->channel, &version_req,
1683 sizeof(struct dm_version_request),
1684 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1685 if (ret)
1686 goto out;
1687
1688 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1689 if (t == 0) {
1690 ret = -ETIMEDOUT;
1691 goto out;
1692 }
1693
1694 /*
1695 * If we could not negotiate a compatible version with the host
1696 * fail the probe function.
1697 */
1698 if (dm_device.state == DM_INIT_ERROR) {
1699 ret = -EPROTO;
1700 goto out;
1701 }
1702
1703 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1704 DYNMEM_MAJOR_VERSION(dm_device.version),
1705 DYNMEM_MINOR_VERSION(dm_device.version));
1706
1707 /*
1708 * Now submit our capabilities to the host.
1709 */
1710 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1711 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1712 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1713 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1714
1715 /*
1716 * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
1717 * currently still requires the bits to be set, so we have to add code
1718 * to fail the host's hot-add and balloon up/down requests, if any.
1719 */
1720 cap_msg.caps.cap_bits.balloon = 1;
1721 cap_msg.caps.cap_bits.hot_add = 1;
1722
1723 /*
1724 * Specify our alignment requirements as it relates
1725 * memory hot-add. Specify 128MB alignment.
1726 */
1727 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1728
1729 /*
1730 * Currently the host does not use these
1731 * values and we set them to what is done in the
1732 * Windows driver.
1733 */
1734 cap_msg.min_page_cnt = 0;
1735 cap_msg.max_page_number = -1;
1736
1737 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1738 sizeof(struct dm_capabilities),
1739 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1740 if (ret)
1741 goto out;
1742
1743 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1744 if (t == 0) {
1745 ret = -ETIMEDOUT;
1746 goto out;
1747 }
1748
1749 /*
1750 * If the host does not like our capabilities,
1751 * fail the probe function.
1752 */
1753 if (dm_device.state == DM_INIT_ERROR) {
1754 ret = -EPROTO;
1755 goto out;
1756 }
1757
1758 return 0;
1759out:
1760 vmbus_close(dev->channel);
1761 return ret;
1762}
1763
1764static int balloon_probe(struct hv_device *dev,
1765 const struct hv_vmbus_device_id *dev_id)
1766{
1767 int ret;
1768
1769 allow_hibernation = hv_is_hibernation_supported();
1770 if (allow_hibernation)
1771 hot_add = false;
1772
1773#ifdef CONFIG_MEMORY_HOTPLUG
1774 do_hot_add = hot_add;
1775#else
1776 do_hot_add = false;
1777#endif
1778 dm_device.dev = dev;
1779 dm_device.state = DM_INITIALIZING;
1780 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1781 init_completion(&dm_device.host_event);
1782 init_completion(&dm_device.config_event);
1783 INIT_LIST_HEAD(&dm_device.ha_region_list);
1784 spin_lock_init(&dm_device.ha_lock);
1785 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1786 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1787 dm_device.host_specified_ha_region = false;
1788
1789#ifdef CONFIG_MEMORY_HOTPLUG
1790 set_online_page_callback(&hv_online_page);
1791 init_completion(&dm_device.ol_waitevent);
1792 register_memory_notifier(&hv_memory_nb);
1793#endif
1794
1795 hv_set_drvdata(dev, &dm_device);
1796
1797 ret = balloon_connect_vsp(dev);
1798 if (ret != 0)
1799 return ret;
1800
1801 enable_page_reporting();
1802 dm_device.state = DM_INITIALIZED;
1803
1804 dm_device.thread =
1805 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1806 if (IS_ERR(dm_device.thread)) {
1807 ret = PTR_ERR(dm_device.thread);
1808 goto probe_error;
1809 }
1810
1811 return 0;
1812
1813probe_error:
1814 dm_device.state = DM_INIT_ERROR;
1815 dm_device.thread = NULL;
1816 disable_page_reporting();
1817 vmbus_close(dev->channel);
1818#ifdef CONFIG_MEMORY_HOTPLUG
1819 unregister_memory_notifier(&hv_memory_nb);
1820 restore_online_page_callback(&hv_online_page);
1821#endif
1822 return ret;
1823}
1824
1825static int balloon_remove(struct hv_device *dev)
1826{
1827 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1828 struct hv_hotadd_state *has, *tmp;
1829 struct hv_hotadd_gap *gap, *tmp_gap;
1830 unsigned long flags;
1831
1832 if (dm->num_pages_ballooned != 0)
1833 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1834
1835 cancel_work_sync(&dm->balloon_wrk.wrk);
1836 cancel_work_sync(&dm->ha_wrk.wrk);
1837
1838 kthread_stop(dm->thread);
1839 disable_page_reporting();
1840 vmbus_close(dev->channel);
1841#ifdef CONFIG_MEMORY_HOTPLUG
1842 unregister_memory_notifier(&hv_memory_nb);
1843 restore_online_page_callback(&hv_online_page);
1844#endif
1845 spin_lock_irqsave(&dm_device.ha_lock, flags);
1846 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1847 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1848 list_del(&gap->list);
1849 kfree(gap);
1850 }
1851 list_del(&has->list);
1852 kfree(has);
1853 }
1854 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1855
1856 return 0;
1857}
1858
1859static int balloon_suspend(struct hv_device *hv_dev)
1860{
1861 struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
1862
1863 tasklet_disable(&hv_dev->channel->callback_event);
1864
1865 cancel_work_sync(&dm->balloon_wrk.wrk);
1866 cancel_work_sync(&dm->ha_wrk.wrk);
1867
1868 if (dm->thread) {
1869 kthread_stop(dm->thread);
1870 dm->thread = NULL;
1871 vmbus_close(hv_dev->channel);
1872 }
1873
1874 tasklet_enable(&hv_dev->channel->callback_event);
1875
1876 return 0;
1877
1878}
1879
1880static int balloon_resume(struct hv_device *dev)
1881{
1882 int ret;
1883
1884 dm_device.state = DM_INITIALIZING;
1885
1886 ret = balloon_connect_vsp(dev);
1887
1888 if (ret != 0)
1889 goto out;
1890
1891 dm_device.thread =
1892 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1893 if (IS_ERR(dm_device.thread)) {
1894 ret = PTR_ERR(dm_device.thread);
1895 dm_device.thread = NULL;
1896 goto close_channel;
1897 }
1898
1899 dm_device.state = DM_INITIALIZED;
1900 return 0;
1901close_channel:
1902 vmbus_close(dev->channel);
1903out:
1904 dm_device.state = DM_INIT_ERROR;
1905#ifdef CONFIG_MEMORY_HOTPLUG
1906 unregister_memory_notifier(&hv_memory_nb);
1907 restore_online_page_callback(&hv_online_page);
1908#endif
1909 return ret;
1910}
1911
1912static const struct hv_vmbus_device_id id_table[] = {
1913 /* Dynamic Memory Class ID */
1914 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1915 { HV_DM_GUID, },
1916 { },
1917};
1918
1919MODULE_DEVICE_TABLE(vmbus, id_table);
1920
1921static struct hv_driver balloon_drv = {
1922 .name = "hv_balloon",
1923 .id_table = id_table,
1924 .probe = balloon_probe,
1925 .remove = balloon_remove,
1926 .suspend = balloon_suspend,
1927 .resume = balloon_resume,
1928 .driver = {
1929 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1930 },
1931};
1932
1933static int __init init_balloon_drv(void)
1934{
1935
1936 return vmbus_driver_register(&balloon_drv);
1937}
1938
1939module_init(init_balloon_drv);
1940
1941MODULE_DESCRIPTION("Hyper-V Balloon");
1942MODULE_LICENSE("GPL");