Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
4 * Tosatti's implementations.
5 *
6 * Copyright 2008 Rusty Russell IBM Corporation
7 */
8
9#include <linux/virtio.h>
10#include <linux/virtio_balloon.h>
11#include <linux/swap.h>
12#include <linux/workqueue.h>
13#include <linux/delay.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/balloon_compaction.h>
17#include <linux/oom.h>
18#include <linux/wait.h>
19#include <linux/mm.h>
20#include <linux/page_reporting.h>
21
22/*
23 * Balloon device works in 4K page units. So each page is pointed to by
24 * multiple balloon pages. All memory counters in this driver are in balloon
25 * page units.
26 */
27#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
28#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
29/* Maximum number of (4k) pages to deflate on OOM notifications. */
30#define VIRTIO_BALLOON_OOM_NR_PAGES 256
31#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
32
33#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
34 __GFP_NOMEMALLOC)
35/* The order of free page blocks to report to host */
36#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER
37/* The size of a free page block in bytes */
38#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
39 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
40#define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
41
42enum virtio_balloon_vq {
43 VIRTIO_BALLOON_VQ_INFLATE,
44 VIRTIO_BALLOON_VQ_DEFLATE,
45 VIRTIO_BALLOON_VQ_STATS,
46 VIRTIO_BALLOON_VQ_FREE_PAGE,
47 VIRTIO_BALLOON_VQ_REPORTING,
48 VIRTIO_BALLOON_VQ_MAX
49};
50
51enum virtio_balloon_config_read {
52 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
53};
54
55struct virtio_balloon {
56 struct virtio_device *vdev;
57 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
58
59 /* Balloon's own wq for cpu-intensive work items */
60 struct workqueue_struct *balloon_wq;
61 /* The free page reporting work item submitted to the balloon wq */
62 struct work_struct report_free_page_work;
63
64 /* The balloon servicing is delegated to a freezable workqueue. */
65 struct work_struct update_balloon_stats_work;
66 struct work_struct update_balloon_size_work;
67
68 /* Prevent updating balloon when it is being canceled. */
69 spinlock_t stop_update_lock;
70 bool stop_update;
71 /* Bitmap to indicate if reading the related config fields are needed */
72 unsigned long config_read_bitmap;
73
74 /* The list of allocated free pages, waiting to be given back to mm */
75 struct list_head free_page_list;
76 spinlock_t free_page_list_lock;
77 /* The number of free page blocks on the above list */
78 unsigned long num_free_page_blocks;
79 /*
80 * The cmd id received from host.
81 * Read it via virtio_balloon_cmd_id_received to get the latest value
82 * sent from host.
83 */
84 u32 cmd_id_received_cache;
85 /* The cmd id that is actively in use */
86 __virtio32 cmd_id_active;
87 /* Buffer to store the stop sign */
88 __virtio32 cmd_id_stop;
89
90 /* Waiting for host to ack the pages we released. */
91 wait_queue_head_t acked;
92
93 /* Number of balloon pages we've told the Host we're not using. */
94 unsigned int num_pages;
95 /*
96 * The pages we've told the Host we're not using are enqueued
97 * at vb_dev_info->pages list.
98 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
99 * to num_pages above.
100 */
101 struct balloon_dev_info vb_dev_info;
102
103 /* Synchronize access/update to this struct virtio_balloon elements */
104 struct mutex balloon_lock;
105
106 /* The array of pfns we tell the Host about. */
107 unsigned int num_pfns;
108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
109
110 /* Memory statistics */
111 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
112
113 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
114 struct shrinker *shrinker;
115
116 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
117 struct notifier_block oom_nb;
118
119 /* Free page reporting device */
120 struct virtqueue *reporting_vq;
121 struct page_reporting_dev_info pr_dev_info;
122
123 /* State for keeping the wakeup_source active while adjusting the balloon */
124 spinlock_t wakeup_lock;
125 bool processing_wakeup_event;
126 u32 wakeup_signal_mask;
127};
128
129#define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0)
130#define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1)
131
132static const struct virtio_device_id id_table[] = {
133 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
134 { 0 },
135};
136
137static u32 page_to_balloon_pfn(struct page *page)
138{
139 unsigned long pfn = page_to_pfn(page);
140
141 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
142 /* Convert pfn from Linux page size to balloon page size. */
143 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
144}
145
146static void start_wakeup_event(struct virtio_balloon *vb, u32 mask)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&vb->wakeup_lock, flags);
151 vb->wakeup_signal_mask |= mask;
152 if (!vb->processing_wakeup_event) {
153 vb->processing_wakeup_event = true;
154 pm_stay_awake(&vb->vdev->dev);
155 }
156 spin_unlock_irqrestore(&vb->wakeup_lock, flags);
157}
158
159static void process_wakeup_event(struct virtio_balloon *vb, u32 mask)
160{
161 spin_lock_irq(&vb->wakeup_lock);
162 vb->wakeup_signal_mask &= ~mask;
163 spin_unlock_irq(&vb->wakeup_lock);
164}
165
166static void finish_wakeup_event(struct virtio_balloon *vb)
167{
168 spin_lock_irq(&vb->wakeup_lock);
169 if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) {
170 vb->processing_wakeup_event = false;
171 pm_relax(&vb->vdev->dev);
172 }
173 spin_unlock_irq(&vb->wakeup_lock);
174}
175
176static void balloon_ack(struct virtqueue *vq)
177{
178 struct virtio_balloon *vb = vq->vdev->priv;
179
180 wake_up(&vb->acked);
181}
182
183static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
184{
185 struct scatterlist sg;
186 unsigned int len;
187
188 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
189
190 /* We should always be able to add one buffer to an empty queue. */
191 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
192 virtqueue_kick(vq);
193
194 /* When host has read buffer, this completes via balloon_ack */
195 wait_event(vb->acked, virtqueue_get_buf(vq, &len));
196
197}
198
199static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
200 struct scatterlist *sg, unsigned int nents)
201{
202 struct virtio_balloon *vb =
203 container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
204 struct virtqueue *vq = vb->reporting_vq;
205 unsigned int unused, err;
206
207 /* We should always be able to add these buffers to an empty queue. */
208 err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
209
210 /*
211 * In the extremely unlikely case that something has occurred and we
212 * are able to trigger an error we will simply display a warning
213 * and exit without actually processing the pages.
214 */
215 if (WARN_ON_ONCE(err))
216 return err;
217
218 virtqueue_kick(vq);
219
220 /* When host has read buffer, this completes via balloon_ack */
221 wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
222
223 return 0;
224}
225
226static void set_page_pfns(struct virtio_balloon *vb,
227 __virtio32 pfns[], struct page *page)
228{
229 unsigned int i;
230
231 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
232
233 /*
234 * Set balloon pfns pointing at this page.
235 * Note that the first pfn points at start of the page.
236 */
237 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
238 pfns[i] = cpu_to_virtio32(vb->vdev,
239 page_to_balloon_pfn(page) + i);
240}
241
242static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
243{
244 unsigned int num_allocated_pages;
245 unsigned int num_pfns;
246 struct page *page;
247 LIST_HEAD(pages);
248
249 /* We can only do one array worth at a time. */
250 num = min(num, ARRAY_SIZE(vb->pfns));
251
252 for (num_pfns = 0; num_pfns < num;
253 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
254 struct page *page = balloon_page_alloc();
255
256 if (!page) {
257 dev_info_ratelimited(&vb->vdev->dev,
258 "Out of puff! Can't get %u pages\n",
259 VIRTIO_BALLOON_PAGES_PER_PAGE);
260 /* Sleep for at least 1/5 of a second before retry. */
261 msleep(200);
262 break;
263 }
264
265 balloon_page_push(&pages, page);
266 }
267
268 mutex_lock(&vb->balloon_lock);
269
270 vb->num_pfns = 0;
271
272 while ((page = balloon_page_pop(&pages))) {
273 balloon_page_enqueue(&vb->vb_dev_info, page);
274
275 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
276 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
277 if (!virtio_has_feature(vb->vdev,
278 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
279 adjust_managed_page_count(page, -1);
280 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
281 }
282
283 num_allocated_pages = vb->num_pfns;
284 /* Did we get any? */
285 if (vb->num_pfns != 0)
286 tell_host(vb, vb->inflate_vq);
287 mutex_unlock(&vb->balloon_lock);
288
289 return num_allocated_pages;
290}
291
292static void release_pages_balloon(struct virtio_balloon *vb,
293 struct list_head *pages)
294{
295 struct page *page, *next;
296
297 list_for_each_entry_safe(page, next, pages, lru) {
298 if (!virtio_has_feature(vb->vdev,
299 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
300 adjust_managed_page_count(page, 1);
301 list_del(&page->lru);
302 put_page(page); /* balloon reference */
303 }
304}
305
306static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
307{
308 unsigned int num_freed_pages;
309 struct page *page;
310 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
311 LIST_HEAD(pages);
312
313 /* We can only do one array worth at a time. */
314 num = min(num, ARRAY_SIZE(vb->pfns));
315
316 mutex_lock(&vb->balloon_lock);
317 /* We can't release more pages than taken */
318 num = min(num, (size_t)vb->num_pages);
319 for (vb->num_pfns = 0; vb->num_pfns < num;
320 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
321 page = balloon_page_dequeue(vb_dev_info);
322 if (!page)
323 break;
324 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
325 list_add(&page->lru, &pages);
326 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
327 }
328
329 num_freed_pages = vb->num_pfns;
330 /*
331 * Note that if
332 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
333 * is true, we *have* to do it in this order
334 */
335 if (vb->num_pfns != 0)
336 tell_host(vb, vb->deflate_vq);
337 release_pages_balloon(vb, &pages);
338 mutex_unlock(&vb->balloon_lock);
339 return num_freed_pages;
340}
341
342static inline void update_stat(struct virtio_balloon *vb, int idx,
343 u16 tag, u64 val)
344{
345 BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
346 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
347 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
348}
349
350#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
351
352#ifdef CONFIG_VM_EVENT_COUNTERS
353/* Return the number of entries filled by vm events */
354static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
355{
356 unsigned long events[NR_VM_EVENT_ITEMS];
357 unsigned int idx = 0;
358 unsigned int zid;
359 unsigned long stall = 0;
360
361 all_vm_events(events);
362 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
363 pages_to_bytes(events[PSWPIN]));
364 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
365 pages_to_bytes(events[PSWPOUT]));
366 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
367 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
368 update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
369
370 /* sum all the stall events */
371 for (zid = 0; zid < MAX_NR_ZONES; zid++)
372 stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
373
374 update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
375
376 update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
377 pages_to_bytes(events[PGSCAN_KSWAPD]));
378 update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
379 pages_to_bytes(events[PGSCAN_DIRECT]));
380 update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
381 pages_to_bytes(events[PGSTEAL_KSWAPD]));
382 update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
383 pages_to_bytes(events[PGSTEAL_DIRECT]));
384
385#ifdef CONFIG_HUGETLB_PAGE
386 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
387 events[HTLB_BUDDY_PGALLOC]);
388 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
389 events[HTLB_BUDDY_PGALLOC_FAIL]);
390#endif /* CONFIG_HUGETLB_PAGE */
391
392 return idx;
393}
394#else /* CONFIG_VM_EVENT_COUNTERS */
395static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
396{
397 return 0;
398}
399#endif /* CONFIG_VM_EVENT_COUNTERS */
400
401static unsigned int update_balloon_stats(struct virtio_balloon *vb)
402{
403 struct sysinfo i;
404 unsigned int idx;
405 long available;
406 unsigned long caches;
407
408 idx = update_balloon_vm_stats(vb);
409
410 si_meminfo(&i);
411 available = si_mem_available();
412 caches = global_node_page_state(NR_FILE_PAGES);
413 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
414 pages_to_bytes(i.freeram));
415 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
416 pages_to_bytes(i.totalram));
417 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
418 pages_to_bytes(available));
419 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES,
420 pages_to_bytes(caches));
421
422 return idx;
423}
424
425/*
426 * While most virtqueues communicate guest-initiated requests to the hypervisor,
427 * the stats queue operates in reverse. The driver initializes the virtqueue
428 * with a single buffer. From that point forward, all conversations consist of
429 * a hypervisor request (a call to this function) which directs us to refill
430 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
431 * we delegate the job to a freezable workqueue that will do the actual work via
432 * stats_handle_request().
433 */
434static void stats_request(struct virtqueue *vq)
435{
436 struct virtio_balloon *vb = vq->vdev->priv;
437
438 spin_lock(&vb->stop_update_lock);
439 if (!vb->stop_update) {
440 start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
441 queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
442 }
443 spin_unlock(&vb->stop_update_lock);
444}
445
446static void stats_handle_request(struct virtio_balloon *vb)
447{
448 struct virtqueue *vq;
449 struct scatterlist sg;
450 unsigned int len, num_stats;
451
452 num_stats = update_balloon_stats(vb);
453
454 vq = vb->stats_vq;
455 if (!virtqueue_get_buf(vq, &len))
456 return;
457 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
458 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
459 virtqueue_kick(vq);
460}
461
462static inline s64 towards_target(struct virtio_balloon *vb)
463{
464 s64 target;
465 u32 num_pages;
466
467 /* Legacy balloon config space is LE, unlike all other devices. */
468 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
469 &num_pages);
470
471 /*
472 * Aligned up to guest page size to avoid inflating and deflating
473 * balloon endlessly.
474 */
475 target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
476 return target - vb->num_pages;
477}
478
479/* Gives back @num_to_return blocks of free pages to mm. */
480static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
481 unsigned long num_to_return)
482{
483 struct page *page;
484 unsigned long num_returned;
485
486 spin_lock_irq(&vb->free_page_list_lock);
487 for (num_returned = 0; num_returned < num_to_return; num_returned++) {
488 page = balloon_page_pop(&vb->free_page_list);
489 if (!page)
490 break;
491 free_pages((unsigned long)page_address(page),
492 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
493 }
494 vb->num_free_page_blocks -= num_returned;
495 spin_unlock_irq(&vb->free_page_list_lock);
496
497 return num_returned;
498}
499
500static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
501{
502 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
503 return;
504
505 /* No need to queue the work if the bit was already set. */
506 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
507 &vb->config_read_bitmap))
508 return;
509
510 queue_work(vb->balloon_wq, &vb->report_free_page_work);
511}
512
513static void start_update_balloon_size(struct virtio_balloon *vb)
514{
515 start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
516 queue_work(system_freezable_wq, &vb->update_balloon_size_work);
517}
518
519static void virtballoon_changed(struct virtio_device *vdev)
520{
521 struct virtio_balloon *vb = vdev->priv;
522 unsigned long flags;
523
524 spin_lock_irqsave(&vb->stop_update_lock, flags);
525 if (!vb->stop_update) {
526 start_update_balloon_size(vb);
527 virtio_balloon_queue_free_page_work(vb);
528 }
529 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
530}
531
532static void update_balloon_size(struct virtio_balloon *vb)
533{
534 u32 actual = vb->num_pages;
535
536 /* Legacy balloon config space is LE, unlike all other devices. */
537 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
538 &actual);
539}
540
541static void update_balloon_stats_func(struct work_struct *work)
542{
543 struct virtio_balloon *vb;
544
545 vb = container_of(work, struct virtio_balloon,
546 update_balloon_stats_work);
547
548 process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
549 stats_handle_request(vb);
550 finish_wakeup_event(vb);
551}
552
553static void update_balloon_size_func(struct work_struct *work)
554{
555 struct virtio_balloon *vb;
556 s64 diff;
557
558 vb = container_of(work, struct virtio_balloon,
559 update_balloon_size_work);
560
561 process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
562
563 diff = towards_target(vb);
564
565 if (diff) {
566 if (diff > 0)
567 diff -= fill_balloon(vb, diff);
568 else
569 diff += leak_balloon(vb, -diff);
570 update_balloon_size(vb);
571 }
572
573 if (diff)
574 queue_work(system_freezable_wq, work);
575 else
576 finish_wakeup_event(vb);
577}
578
579static int init_vqs(struct virtio_balloon *vb)
580{
581 struct virtqueue_info vqs_info[VIRTIO_BALLOON_VQ_MAX] = {};
582 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
583 int err;
584
585 /*
586 * Inflateq and deflateq are used unconditionally. The names[]
587 * will be NULL if the related feature is not enabled, which will
588 * cause no allocation for the corresponding virtqueue in find_vqs.
589 */
590 vqs_info[VIRTIO_BALLOON_VQ_INFLATE].callback = balloon_ack;
591 vqs_info[VIRTIO_BALLOON_VQ_INFLATE].name = "inflate";
592 vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].callback = balloon_ack;
593 vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].name = "deflate";
594
595 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
596 vqs_info[VIRTIO_BALLOON_VQ_STATS].name = "stats";
597 vqs_info[VIRTIO_BALLOON_VQ_STATS].callback = stats_request;
598 }
599
600 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
601 vqs_info[VIRTIO_BALLOON_VQ_FREE_PAGE].name = "free_page_vq";
602
603 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
604 vqs_info[VIRTIO_BALLOON_VQ_REPORTING].name = "reporting_vq";
605 vqs_info[VIRTIO_BALLOON_VQ_REPORTING].callback = balloon_ack;
606 }
607
608 err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
609 vqs_info, NULL);
610 if (err)
611 return err;
612
613 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
614 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
615 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
616 struct scatterlist sg;
617 unsigned int num_stats;
618 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
619
620 /*
621 * Prime this virtqueue with one buffer so the hypervisor can
622 * use it to signal us later (it can't be broken yet!).
623 */
624 num_stats = update_balloon_stats(vb);
625
626 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
627 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
628 GFP_KERNEL);
629 if (err) {
630 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
631 __func__);
632 return err;
633 }
634 virtqueue_kick(vb->stats_vq);
635 }
636
637 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
638 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
639
640 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
641 vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
642
643 return 0;
644}
645
646static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
647{
648 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
649 &vb->config_read_bitmap)) {
650 /* Legacy balloon config space is LE, unlike all other devices. */
651 virtio_cread_le(vb->vdev, struct virtio_balloon_config,
652 free_page_hint_cmd_id,
653 &vb->cmd_id_received_cache);
654 }
655
656 return vb->cmd_id_received_cache;
657}
658
659static int send_cmd_id_start(struct virtio_balloon *vb)
660{
661 struct scatterlist sg;
662 struct virtqueue *vq = vb->free_page_vq;
663 int err, unused;
664
665 /* Detach all the used buffers from the vq */
666 while (virtqueue_get_buf(vq, &unused))
667 ;
668
669 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
670 virtio_balloon_cmd_id_received(vb));
671 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
672 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
673 if (!err)
674 virtqueue_kick(vq);
675 return err;
676}
677
678static int send_cmd_id_stop(struct virtio_balloon *vb)
679{
680 struct scatterlist sg;
681 struct virtqueue *vq = vb->free_page_vq;
682 int err, unused;
683
684 /* Detach all the used buffers from the vq */
685 while (virtqueue_get_buf(vq, &unused))
686 ;
687
688 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop));
689 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL);
690 if (!err)
691 virtqueue_kick(vq);
692 return err;
693}
694
695static int get_free_page_and_send(struct virtio_balloon *vb)
696{
697 struct virtqueue *vq = vb->free_page_vq;
698 struct page *page;
699 struct scatterlist sg;
700 int err, unused;
701 void *p;
702
703 /* Detach all the used buffers from the vq */
704 while (virtqueue_get_buf(vq, &unused))
705 ;
706
707 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
708 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
709 /*
710 * When the allocation returns NULL, it indicates that we have got all
711 * the possible free pages, so return -EINTR to stop.
712 */
713 if (!page)
714 return -EINTR;
715
716 p = page_address(page);
717 sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES);
718 /* There is always 1 entry reserved for the cmd id to use. */
719 if (vq->num_free > 1) {
720 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
721 if (unlikely(err)) {
722 free_pages((unsigned long)p,
723 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
724 return err;
725 }
726 virtqueue_kick(vq);
727 spin_lock_irq(&vb->free_page_list_lock);
728 balloon_page_push(&vb->free_page_list, page);
729 vb->num_free_page_blocks++;
730 spin_unlock_irq(&vb->free_page_list_lock);
731 } else {
732 /*
733 * The vq has no available entry to add this page block, so
734 * just free it.
735 */
736 free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
737 }
738
739 return 0;
740}
741
742static int send_free_pages(struct virtio_balloon *vb)
743{
744 int err;
745 u32 cmd_id_active;
746
747 while (1) {
748 /*
749 * If a stop id or a new cmd id was just received from host,
750 * stop the reporting.
751 */
752 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
753 if (unlikely(cmd_id_active !=
754 virtio_balloon_cmd_id_received(vb)))
755 break;
756
757 /*
758 * The free page blocks are allocated and sent to host one by
759 * one.
760 */
761 err = get_free_page_and_send(vb);
762 if (err == -EINTR)
763 break;
764 else if (unlikely(err))
765 return err;
766 }
767
768 return 0;
769}
770
771static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
772{
773 int err;
774 struct device *dev = &vb->vdev->dev;
775
776 /* Start by sending the received cmd id to host with an outbuf. */
777 err = send_cmd_id_start(vb);
778 if (unlikely(err))
779 dev_err(dev, "Failed to send a start id, err = %d\n", err);
780
781 err = send_free_pages(vb);
782 if (unlikely(err))
783 dev_err(dev, "Failed to send a free page, err = %d\n", err);
784
785 /* End by sending a stop id to host with an outbuf. */
786 err = send_cmd_id_stop(vb);
787 if (unlikely(err))
788 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
789}
790
791static void report_free_page_func(struct work_struct *work)
792{
793 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
794 report_free_page_work);
795 u32 cmd_id_received;
796
797 cmd_id_received = virtio_balloon_cmd_id_received(vb);
798 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
799 /* Pass ULONG_MAX to give back all the free pages */
800 return_free_pages_to_mm(vb, ULONG_MAX);
801 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
802 cmd_id_received !=
803 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
804 virtio_balloon_report_free_page(vb);
805 }
806}
807
808#ifdef CONFIG_BALLOON_COMPACTION
809/*
810 * virtballoon_migratepage - perform the balloon page migration on behalf of
811 * a compaction thread. (called under page lock)
812 * @vb_dev_info: the balloon device
813 * @newpage: page that will replace the isolated page after migration finishes.
814 * @page : the isolated (old) page that is about to be migrated to newpage.
815 * @mode : compaction mode -- not used for balloon page migration.
816 *
817 * After a ballooned page gets isolated by compaction procedures, this is the
818 * function that performs the page migration on behalf of a compaction thread
819 * The page migration for virtio balloon is done in a simple swap fashion which
820 * follows these two macro steps:
821 * 1) insert newpage into vb->pages list and update the host about it;
822 * 2) update the host about the old page removed from vb->pages list;
823 *
824 * This function preforms the balloon page migration task.
825 * Called through movable_operations->migrate_page
826 */
827static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
828 struct page *newpage, struct page *page, enum migrate_mode mode)
829{
830 struct virtio_balloon *vb = container_of(vb_dev_info,
831 struct virtio_balloon, vb_dev_info);
832 unsigned long flags;
833
834 /*
835 * In order to avoid lock contention while migrating pages concurrently
836 * to leak_balloon() or fill_balloon() we just give up the balloon_lock
837 * this turn, as it is easier to retry the page migration later.
838 * This also prevents fill_balloon() getting stuck into a mutex
839 * recursion in the case it ends up triggering memory compaction
840 * while it is attempting to inflate the ballon.
841 */
842 if (!mutex_trylock(&vb->balloon_lock))
843 return -EAGAIN;
844
845 get_page(newpage); /* balloon reference */
846
847 /*
848 * When we migrate a page to a different zone and adjusted the
849 * managed page count when inflating, we have to fixup the count of
850 * both involved zones.
851 */
852 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
853 page_zone(page) != page_zone(newpage)) {
854 adjust_managed_page_count(page, 1);
855 adjust_managed_page_count(newpage, -1);
856 }
857
858 /* balloon's page migration 1st step -- inflate "newpage" */
859 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
860 balloon_page_insert(vb_dev_info, newpage);
861 vb_dev_info->isolated_pages--;
862 __count_vm_event(BALLOON_MIGRATE);
863 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
864 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
865 set_page_pfns(vb, vb->pfns, newpage);
866 tell_host(vb, vb->inflate_vq);
867
868 /* balloon's page migration 2nd step -- deflate "page" */
869 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
870 balloon_page_delete(page);
871 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
872 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
873 set_page_pfns(vb, vb->pfns, page);
874 tell_host(vb, vb->deflate_vq);
875
876 mutex_unlock(&vb->balloon_lock);
877
878 put_page(page); /* balloon reference */
879
880 return MIGRATEPAGE_SUCCESS;
881}
882#endif /* CONFIG_BALLOON_COMPACTION */
883
884static unsigned long shrink_free_pages(struct virtio_balloon *vb,
885 unsigned long pages_to_free)
886{
887 unsigned long blocks_to_free, blocks_freed;
888
889 pages_to_free = round_up(pages_to_free,
890 VIRTIO_BALLOON_HINT_BLOCK_PAGES);
891 blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES;
892 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
893
894 return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
895}
896
897static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
898 struct shrink_control *sc)
899{
900 struct virtio_balloon *vb = shrinker->private_data;
901
902 return shrink_free_pages(vb, sc->nr_to_scan);
903}
904
905static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
906 struct shrink_control *sc)
907{
908 struct virtio_balloon *vb = shrinker->private_data;
909
910 return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
911}
912
913static int virtio_balloon_oom_notify(struct notifier_block *nb,
914 unsigned long dummy, void *parm)
915{
916 struct virtio_balloon *vb = container_of(nb,
917 struct virtio_balloon, oom_nb);
918 unsigned long *freed = parm;
919
920 *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
921 VIRTIO_BALLOON_PAGES_PER_PAGE;
922 update_balloon_size(vb);
923
924 return NOTIFY_OK;
925}
926
927static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
928{
929 shrinker_free(vb->shrinker);
930}
931
932static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
933{
934 vb->shrinker = shrinker_alloc(0, "virtio-balloon");
935 if (!vb->shrinker)
936 return -ENOMEM;
937
938 vb->shrinker->scan_objects = virtio_balloon_shrinker_scan;
939 vb->shrinker->count_objects = virtio_balloon_shrinker_count;
940 vb->shrinker->private_data = vb;
941
942 shrinker_register(vb->shrinker);
943
944 return 0;
945}
946
947static int virtballoon_probe(struct virtio_device *vdev)
948{
949 struct virtio_balloon *vb;
950 int err;
951
952 if (!vdev->config->get) {
953 dev_err(&vdev->dev, "%s failure: config access disabled\n",
954 __func__);
955 return -EINVAL;
956 }
957
958 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL);
959 if (!vb) {
960 err = -ENOMEM;
961 goto out;
962 }
963
964 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
965 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
966 spin_lock_init(&vb->stop_update_lock);
967 mutex_init(&vb->balloon_lock);
968 init_waitqueue_head(&vb->acked);
969 vb->vdev = vdev;
970
971 balloon_devinfo_init(&vb->vb_dev_info);
972
973 err = init_vqs(vb);
974 if (err)
975 goto out_free_vb;
976
977#ifdef CONFIG_BALLOON_COMPACTION
978 vb->vb_dev_info.migratepage = virtballoon_migratepage;
979#endif
980 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
981 /*
982 * There is always one entry reserved for cmd id, so the ring
983 * size needs to be at least two to report free page hints.
984 */
985 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
986 err = -ENOSPC;
987 goto out_del_vqs;
988 }
989 vb->balloon_wq = alloc_workqueue("balloon-wq",
990 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
991 if (!vb->balloon_wq) {
992 err = -ENOMEM;
993 goto out_del_vqs;
994 }
995 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
996 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
997 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
998 VIRTIO_BALLOON_CMD_ID_STOP);
999 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
1000 VIRTIO_BALLOON_CMD_ID_STOP);
1001 spin_lock_init(&vb->free_page_list_lock);
1002 INIT_LIST_HEAD(&vb->free_page_list);
1003 /*
1004 * We're allowed to reuse any free pages, even if they are
1005 * still to be processed by the host.
1006 */
1007 err = virtio_balloon_register_shrinker(vb);
1008 if (err)
1009 goto out_del_balloon_wq;
1010 }
1011
1012 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
1013 vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
1014 vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
1015 err = register_oom_notifier(&vb->oom_nb);
1016 if (err < 0)
1017 goto out_unregister_shrinker;
1018 }
1019
1020 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
1021 /* Start with poison val of 0 representing general init */
1022 __u32 poison_val = 0;
1023
1024 /*
1025 * Let the hypervisor know that we are expecting a
1026 * specific value to be written back in balloon pages.
1027 *
1028 * If the PAGE_POISON value was larger than a byte we would
1029 * need to byte swap poison_val here to guarantee it is
1030 * little-endian. However for now it is a single byte so we
1031 * can pass it as-is.
1032 */
1033 if (!want_init_on_free())
1034 memset(&poison_val, PAGE_POISON, sizeof(poison_val));
1035
1036 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
1037 poison_val, &poison_val);
1038 }
1039
1040 vb->pr_dev_info.report = virtballoon_free_page_report;
1041 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
1042 unsigned int capacity;
1043
1044 capacity = virtqueue_get_vring_size(vb->reporting_vq);
1045 if (capacity < PAGE_REPORTING_CAPACITY) {
1046 err = -ENOSPC;
1047 goto out_unregister_oom;
1048 }
1049
1050 /*
1051 * The default page reporting order is @pageblock_order, which
1052 * corresponds to 512MB in size on ARM64 when 64KB base page
1053 * size is used. The page reporting won't be triggered if the
1054 * freeing page can't come up with a free area like that huge.
1055 * So we specify the page reporting order to 5, corresponding
1056 * to 2MB. It helps to avoid THP splitting if 4KB base page
1057 * size is used by host.
1058 *
1059 * Ideally, the page reporting order is selected based on the
1060 * host's base page size. However, it needs more work to report
1061 * that value. The hard-coded order would be fine currently.
1062 */
1063#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES)
1064 vb->pr_dev_info.order = 5;
1065#endif
1066
1067 err = page_reporting_register(&vb->pr_dev_info);
1068 if (err)
1069 goto out_unregister_oom;
1070 }
1071
1072 spin_lock_init(&vb->wakeup_lock);
1073
1074 /*
1075 * The virtio balloon itself can't wake up the device, but it is
1076 * responsible for processing wakeup events passed up from the transport
1077 * layer. Wakeup sources don't support nesting/chaining calls, so we use
1078 * our own wakeup source to ensure wakeup events are properly handled
1079 * without trampling on the transport layer's wakeup source.
1080 */
1081 device_set_wakeup_capable(&vb->vdev->dev, true);
1082
1083 virtio_device_ready(vdev);
1084
1085 if (towards_target(vb))
1086 virtballoon_changed(vdev);
1087 return 0;
1088
1089out_unregister_oom:
1090 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1091 unregister_oom_notifier(&vb->oom_nb);
1092out_unregister_shrinker:
1093 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1094 virtio_balloon_unregister_shrinker(vb);
1095out_del_balloon_wq:
1096 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1097 destroy_workqueue(vb->balloon_wq);
1098out_del_vqs:
1099 vdev->config->del_vqs(vdev);
1100out_free_vb:
1101 kfree(vb);
1102out:
1103 return err;
1104}
1105
1106static void remove_common(struct virtio_balloon *vb)
1107{
1108 /* There might be pages left in the balloon: free them. */
1109 while (vb->num_pages)
1110 leak_balloon(vb, vb->num_pages);
1111 update_balloon_size(vb);
1112
1113 /* There might be free pages that are being reported: release them. */
1114 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1115 return_free_pages_to_mm(vb, ULONG_MAX);
1116
1117 /* Now we reset the device so we can clean up the queues. */
1118 virtio_reset_device(vb->vdev);
1119
1120 vb->vdev->config->del_vqs(vb->vdev);
1121}
1122
1123static void virtballoon_remove(struct virtio_device *vdev)
1124{
1125 struct virtio_balloon *vb = vdev->priv;
1126
1127 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
1128 page_reporting_unregister(&vb->pr_dev_info);
1129 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1130 unregister_oom_notifier(&vb->oom_nb);
1131 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1132 virtio_balloon_unregister_shrinker(vb);
1133 spin_lock_irq(&vb->stop_update_lock);
1134 vb->stop_update = true;
1135 spin_unlock_irq(&vb->stop_update_lock);
1136 cancel_work_sync(&vb->update_balloon_size_work);
1137 cancel_work_sync(&vb->update_balloon_stats_work);
1138
1139 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
1140 cancel_work_sync(&vb->report_free_page_work);
1141 destroy_workqueue(vb->balloon_wq);
1142 }
1143
1144 remove_common(vb);
1145 kfree(vb);
1146}
1147
1148#ifdef CONFIG_PM_SLEEP
1149static int virtballoon_freeze(struct virtio_device *vdev)
1150{
1151 struct virtio_balloon *vb = vdev->priv;
1152
1153 /*
1154 * The workqueue is already frozen by the PM core before this
1155 * function is called.
1156 */
1157 remove_common(vb);
1158 return 0;
1159}
1160
1161static int virtballoon_restore(struct virtio_device *vdev)
1162{
1163 struct virtio_balloon *vb = vdev->priv;
1164 int ret;
1165
1166 ret = init_vqs(vdev->priv);
1167 if (ret)
1168 return ret;
1169
1170 virtio_device_ready(vdev);
1171
1172 if (towards_target(vb))
1173 virtballoon_changed(vdev);
1174 update_balloon_size(vb);
1175 return 0;
1176}
1177#endif
1178
1179static int virtballoon_validate(struct virtio_device *vdev)
1180{
1181 /*
1182 * Inform the hypervisor that our pages are poisoned or
1183 * initialized. If we cannot do that then we should disable
1184 * page reporting as it could potentially change the contents
1185 * of our free pages.
1186 */
1187 if (!want_init_on_free() && !page_poisoning_enabled_static())
1188 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
1189 else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
1190 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
1191
1192 __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
1193 return 0;
1194}
1195
1196static unsigned int features[] = {
1197 VIRTIO_BALLOON_F_MUST_TELL_HOST,
1198 VIRTIO_BALLOON_F_STATS_VQ,
1199 VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
1200 VIRTIO_BALLOON_F_FREE_PAGE_HINT,
1201 VIRTIO_BALLOON_F_PAGE_POISON,
1202 VIRTIO_BALLOON_F_REPORTING,
1203};
1204
1205static struct virtio_driver virtio_balloon_driver = {
1206 .feature_table = features,
1207 .feature_table_size = ARRAY_SIZE(features),
1208 .driver.name = KBUILD_MODNAME,
1209 .id_table = id_table,
1210 .validate = virtballoon_validate,
1211 .probe = virtballoon_probe,
1212 .remove = virtballoon_remove,
1213 .config_changed = virtballoon_changed,
1214#ifdef CONFIG_PM_SLEEP
1215 .freeze = virtballoon_freeze,
1216 .restore = virtballoon_restore,
1217#endif
1218};
1219
1220module_virtio_driver(virtio_balloon_driver);
1221MODULE_DEVICE_TABLE(virtio, id_table);
1222MODULE_DESCRIPTION("Virtio balloon driver");
1223MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
4 * Tosatti's implementations.
5 *
6 * Copyright 2008 Rusty Russell IBM Corporation
7 */
8
9#include <linux/virtio.h>
10#include <linux/virtio_balloon.h>
11#include <linux/swap.h>
12#include <linux/workqueue.h>
13#include <linux/delay.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/balloon_compaction.h>
17#include <linux/oom.h>
18#include <linux/wait.h>
19#include <linux/mm.h>
20#include <linux/page_reporting.h>
21
22/*
23 * Balloon device works in 4K page units. So each page is pointed to by
24 * multiple balloon pages. All memory counters in this driver are in balloon
25 * page units.
26 */
27#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
28#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
29/* Maximum number of (4k) pages to deflate on OOM notifications. */
30#define VIRTIO_BALLOON_OOM_NR_PAGES 256
31#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
32
33#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
34 __GFP_NOMEMALLOC)
35/* The order of free page blocks to report to host */
36#define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1)
37/* The size of a free page block in bytes */
38#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
39 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
40#define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
41
42enum virtio_balloon_vq {
43 VIRTIO_BALLOON_VQ_INFLATE,
44 VIRTIO_BALLOON_VQ_DEFLATE,
45 VIRTIO_BALLOON_VQ_STATS,
46 VIRTIO_BALLOON_VQ_FREE_PAGE,
47 VIRTIO_BALLOON_VQ_REPORTING,
48 VIRTIO_BALLOON_VQ_MAX
49};
50
51enum virtio_balloon_config_read {
52 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
53};
54
55struct virtio_balloon {
56 struct virtio_device *vdev;
57 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
58
59 /* Balloon's own wq for cpu-intensive work items */
60 struct workqueue_struct *balloon_wq;
61 /* The free page reporting work item submitted to the balloon wq */
62 struct work_struct report_free_page_work;
63
64 /* The balloon servicing is delegated to a freezable workqueue. */
65 struct work_struct update_balloon_stats_work;
66 struct work_struct update_balloon_size_work;
67
68 /* Prevent updating balloon when it is being canceled. */
69 spinlock_t stop_update_lock;
70 bool stop_update;
71 /* Bitmap to indicate if reading the related config fields are needed */
72 unsigned long config_read_bitmap;
73
74 /* The list of allocated free pages, waiting to be given back to mm */
75 struct list_head free_page_list;
76 spinlock_t free_page_list_lock;
77 /* The number of free page blocks on the above list */
78 unsigned long num_free_page_blocks;
79 /*
80 * The cmd id received from host.
81 * Read it via virtio_balloon_cmd_id_received to get the latest value
82 * sent from host.
83 */
84 u32 cmd_id_received_cache;
85 /* The cmd id that is actively in use */
86 __virtio32 cmd_id_active;
87 /* Buffer to store the stop sign */
88 __virtio32 cmd_id_stop;
89
90 /* Waiting for host to ack the pages we released. */
91 wait_queue_head_t acked;
92
93 /* Number of balloon pages we've told the Host we're not using. */
94 unsigned int num_pages;
95 /*
96 * The pages we've told the Host we're not using are enqueued
97 * at vb_dev_info->pages list.
98 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
99 * to num_pages above.
100 */
101 struct balloon_dev_info vb_dev_info;
102
103 /* Synchronize access/update to this struct virtio_balloon elements */
104 struct mutex balloon_lock;
105
106 /* The array of pfns we tell the Host about. */
107 unsigned int num_pfns;
108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
109
110 /* Memory statistics */
111 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
112
113 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
114 struct shrinker shrinker;
115
116 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
117 struct notifier_block oom_nb;
118
119 /* Free page reporting device */
120 struct virtqueue *reporting_vq;
121 struct page_reporting_dev_info pr_dev_info;
122};
123
124static const struct virtio_device_id id_table[] = {
125 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
126 { 0 },
127};
128
129static u32 page_to_balloon_pfn(struct page *page)
130{
131 unsigned long pfn = page_to_pfn(page);
132
133 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
134 /* Convert pfn from Linux page size to balloon page size. */
135 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
136}
137
138static void balloon_ack(struct virtqueue *vq)
139{
140 struct virtio_balloon *vb = vq->vdev->priv;
141
142 wake_up(&vb->acked);
143}
144
145static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
146{
147 struct scatterlist sg;
148 unsigned int len;
149
150 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
151
152 /* We should always be able to add one buffer to an empty queue. */
153 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
154 virtqueue_kick(vq);
155
156 /* When host has read buffer, this completes via balloon_ack */
157 wait_event(vb->acked, virtqueue_get_buf(vq, &len));
158
159}
160
161static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
162 struct scatterlist *sg, unsigned int nents)
163{
164 struct virtio_balloon *vb =
165 container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
166 struct virtqueue *vq = vb->reporting_vq;
167 unsigned int unused, err;
168
169 /* We should always be able to add these buffers to an empty queue. */
170 err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
171
172 /*
173 * In the extremely unlikely case that something has occurred and we
174 * are able to trigger an error we will simply display a warning
175 * and exit without actually processing the pages.
176 */
177 if (WARN_ON_ONCE(err))
178 return err;
179
180 virtqueue_kick(vq);
181
182 /* When host has read buffer, this completes via balloon_ack */
183 wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
184
185 return 0;
186}
187
188static void set_page_pfns(struct virtio_balloon *vb,
189 __virtio32 pfns[], struct page *page)
190{
191 unsigned int i;
192
193 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
194
195 /*
196 * Set balloon pfns pointing at this page.
197 * Note that the first pfn points at start of the page.
198 */
199 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
200 pfns[i] = cpu_to_virtio32(vb->vdev,
201 page_to_balloon_pfn(page) + i);
202}
203
204static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
205{
206 unsigned int num_allocated_pages;
207 unsigned int num_pfns;
208 struct page *page;
209 LIST_HEAD(pages);
210
211 /* We can only do one array worth at a time. */
212 num = min(num, ARRAY_SIZE(vb->pfns));
213
214 for (num_pfns = 0; num_pfns < num;
215 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
216 struct page *page = balloon_page_alloc();
217
218 if (!page) {
219 dev_info_ratelimited(&vb->vdev->dev,
220 "Out of puff! Can't get %u pages\n",
221 VIRTIO_BALLOON_PAGES_PER_PAGE);
222 /* Sleep for at least 1/5 of a second before retry. */
223 msleep(200);
224 break;
225 }
226
227 balloon_page_push(&pages, page);
228 }
229
230 mutex_lock(&vb->balloon_lock);
231
232 vb->num_pfns = 0;
233
234 while ((page = balloon_page_pop(&pages))) {
235 balloon_page_enqueue(&vb->vb_dev_info, page);
236
237 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
238 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
239 if (!virtio_has_feature(vb->vdev,
240 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
241 adjust_managed_page_count(page, -1);
242 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
243 }
244
245 num_allocated_pages = vb->num_pfns;
246 /* Did we get any? */
247 if (vb->num_pfns != 0)
248 tell_host(vb, vb->inflate_vq);
249 mutex_unlock(&vb->balloon_lock);
250
251 return num_allocated_pages;
252}
253
254static void release_pages_balloon(struct virtio_balloon *vb,
255 struct list_head *pages)
256{
257 struct page *page, *next;
258
259 list_for_each_entry_safe(page, next, pages, lru) {
260 if (!virtio_has_feature(vb->vdev,
261 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
262 adjust_managed_page_count(page, 1);
263 list_del(&page->lru);
264 put_page(page); /* balloon reference */
265 }
266}
267
268static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
269{
270 unsigned int num_freed_pages;
271 struct page *page;
272 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
273 LIST_HEAD(pages);
274
275 /* We can only do one array worth at a time. */
276 num = min(num, ARRAY_SIZE(vb->pfns));
277
278 mutex_lock(&vb->balloon_lock);
279 /* We can't release more pages than taken */
280 num = min(num, (size_t)vb->num_pages);
281 for (vb->num_pfns = 0; vb->num_pfns < num;
282 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
283 page = balloon_page_dequeue(vb_dev_info);
284 if (!page)
285 break;
286 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
287 list_add(&page->lru, &pages);
288 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
289 }
290
291 num_freed_pages = vb->num_pfns;
292 /*
293 * Note that if
294 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
295 * is true, we *have* to do it in this order
296 */
297 if (vb->num_pfns != 0)
298 tell_host(vb, vb->deflate_vq);
299 release_pages_balloon(vb, &pages);
300 mutex_unlock(&vb->balloon_lock);
301 return num_freed_pages;
302}
303
304static inline void update_stat(struct virtio_balloon *vb, int idx,
305 u16 tag, u64 val)
306{
307 BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
308 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
309 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
310}
311
312#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
313
314static unsigned int update_balloon_stats(struct virtio_balloon *vb)
315{
316 unsigned long events[NR_VM_EVENT_ITEMS];
317 struct sysinfo i;
318 unsigned int idx = 0;
319 long available;
320 unsigned long caches;
321
322 all_vm_events(events);
323 si_meminfo(&i);
324
325 available = si_mem_available();
326 caches = global_node_page_state(NR_FILE_PAGES);
327
328#ifdef CONFIG_VM_EVENT_COUNTERS
329 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
330 pages_to_bytes(events[PSWPIN]));
331 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
332 pages_to_bytes(events[PSWPOUT]));
333 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
334 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
335#ifdef CONFIG_HUGETLB_PAGE
336 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
337 events[HTLB_BUDDY_PGALLOC]);
338 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
339 events[HTLB_BUDDY_PGALLOC_FAIL]);
340#endif
341#endif
342 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
343 pages_to_bytes(i.freeram));
344 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
345 pages_to_bytes(i.totalram));
346 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
347 pages_to_bytes(available));
348 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES,
349 pages_to_bytes(caches));
350
351 return idx;
352}
353
354/*
355 * While most virtqueues communicate guest-initiated requests to the hypervisor,
356 * the stats queue operates in reverse. The driver initializes the virtqueue
357 * with a single buffer. From that point forward, all conversations consist of
358 * a hypervisor request (a call to this function) which directs us to refill
359 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
360 * we delegate the job to a freezable workqueue that will do the actual work via
361 * stats_handle_request().
362 */
363static void stats_request(struct virtqueue *vq)
364{
365 struct virtio_balloon *vb = vq->vdev->priv;
366
367 spin_lock(&vb->stop_update_lock);
368 if (!vb->stop_update)
369 queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
370 spin_unlock(&vb->stop_update_lock);
371}
372
373static void stats_handle_request(struct virtio_balloon *vb)
374{
375 struct virtqueue *vq;
376 struct scatterlist sg;
377 unsigned int len, num_stats;
378
379 num_stats = update_balloon_stats(vb);
380
381 vq = vb->stats_vq;
382 if (!virtqueue_get_buf(vq, &len))
383 return;
384 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
385 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
386 virtqueue_kick(vq);
387}
388
389static inline s64 towards_target(struct virtio_balloon *vb)
390{
391 s64 target;
392 u32 num_pages;
393
394 /* Legacy balloon config space is LE, unlike all other devices. */
395 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
396 &num_pages);
397
398 target = num_pages;
399 return target - vb->num_pages;
400}
401
402/* Gives back @num_to_return blocks of free pages to mm. */
403static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
404 unsigned long num_to_return)
405{
406 struct page *page;
407 unsigned long num_returned;
408
409 spin_lock_irq(&vb->free_page_list_lock);
410 for (num_returned = 0; num_returned < num_to_return; num_returned++) {
411 page = balloon_page_pop(&vb->free_page_list);
412 if (!page)
413 break;
414 free_pages((unsigned long)page_address(page),
415 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
416 }
417 vb->num_free_page_blocks -= num_returned;
418 spin_unlock_irq(&vb->free_page_list_lock);
419
420 return num_returned;
421}
422
423static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
424{
425 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
426 return;
427
428 /* No need to queue the work if the bit was already set. */
429 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
430 &vb->config_read_bitmap))
431 return;
432
433 queue_work(vb->balloon_wq, &vb->report_free_page_work);
434}
435
436static void virtballoon_changed(struct virtio_device *vdev)
437{
438 struct virtio_balloon *vb = vdev->priv;
439 unsigned long flags;
440
441 spin_lock_irqsave(&vb->stop_update_lock, flags);
442 if (!vb->stop_update) {
443 queue_work(system_freezable_wq,
444 &vb->update_balloon_size_work);
445 virtio_balloon_queue_free_page_work(vb);
446 }
447 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
448}
449
450static void update_balloon_size(struct virtio_balloon *vb)
451{
452 u32 actual = vb->num_pages;
453
454 /* Legacy balloon config space is LE, unlike all other devices. */
455 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
456 &actual);
457}
458
459static void update_balloon_stats_func(struct work_struct *work)
460{
461 struct virtio_balloon *vb;
462
463 vb = container_of(work, struct virtio_balloon,
464 update_balloon_stats_work);
465 stats_handle_request(vb);
466}
467
468static void update_balloon_size_func(struct work_struct *work)
469{
470 struct virtio_balloon *vb;
471 s64 diff;
472
473 vb = container_of(work, struct virtio_balloon,
474 update_balloon_size_work);
475 diff = towards_target(vb);
476
477 if (!diff)
478 return;
479
480 if (diff > 0)
481 diff -= fill_balloon(vb, diff);
482 else
483 diff += leak_balloon(vb, -diff);
484 update_balloon_size(vb);
485
486 if (diff)
487 queue_work(system_freezable_wq, work);
488}
489
490static int init_vqs(struct virtio_balloon *vb)
491{
492 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
493 vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX];
494 const char *names[VIRTIO_BALLOON_VQ_MAX];
495 int err;
496
497 /*
498 * Inflateq and deflateq are used unconditionally. The names[]
499 * will be NULL if the related feature is not enabled, which will
500 * cause no allocation for the corresponding virtqueue in find_vqs.
501 */
502 callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack;
503 names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
504 callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
505 names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
506 callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
507 names[VIRTIO_BALLOON_VQ_STATS] = NULL;
508 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
509 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
510 names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
511
512 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
513 names[VIRTIO_BALLOON_VQ_STATS] = "stats";
514 callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request;
515 }
516
517 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
518 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq";
519 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
520 }
521
522 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
523 names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
524 callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
525 }
526
527 err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
528 callbacks, names, NULL);
529 if (err)
530 return err;
531
532 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
533 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
534 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
535 struct scatterlist sg;
536 unsigned int num_stats;
537 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
538
539 /*
540 * Prime this virtqueue with one buffer so the hypervisor can
541 * use it to signal us later (it can't be broken yet!).
542 */
543 num_stats = update_balloon_stats(vb);
544
545 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
546 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
547 GFP_KERNEL);
548 if (err) {
549 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
550 __func__);
551 return err;
552 }
553 virtqueue_kick(vb->stats_vq);
554 }
555
556 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
557 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
558
559 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
560 vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
561
562 return 0;
563}
564
565static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
566{
567 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
568 &vb->config_read_bitmap)) {
569 /* Legacy balloon config space is LE, unlike all other devices. */
570 virtio_cread_le(vb->vdev, struct virtio_balloon_config,
571 free_page_hint_cmd_id,
572 &vb->cmd_id_received_cache);
573 }
574
575 return vb->cmd_id_received_cache;
576}
577
578static int send_cmd_id_start(struct virtio_balloon *vb)
579{
580 struct scatterlist sg;
581 struct virtqueue *vq = vb->free_page_vq;
582 int err, unused;
583
584 /* Detach all the used buffers from the vq */
585 while (virtqueue_get_buf(vq, &unused))
586 ;
587
588 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
589 virtio_balloon_cmd_id_received(vb));
590 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
591 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
592 if (!err)
593 virtqueue_kick(vq);
594 return err;
595}
596
597static int send_cmd_id_stop(struct virtio_balloon *vb)
598{
599 struct scatterlist sg;
600 struct virtqueue *vq = vb->free_page_vq;
601 int err, unused;
602
603 /* Detach all the used buffers from the vq */
604 while (virtqueue_get_buf(vq, &unused))
605 ;
606
607 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop));
608 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL);
609 if (!err)
610 virtqueue_kick(vq);
611 return err;
612}
613
614static int get_free_page_and_send(struct virtio_balloon *vb)
615{
616 struct virtqueue *vq = vb->free_page_vq;
617 struct page *page;
618 struct scatterlist sg;
619 int err, unused;
620 void *p;
621
622 /* Detach all the used buffers from the vq */
623 while (virtqueue_get_buf(vq, &unused))
624 ;
625
626 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
627 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
628 /*
629 * When the allocation returns NULL, it indicates that we have got all
630 * the possible free pages, so return -EINTR to stop.
631 */
632 if (!page)
633 return -EINTR;
634
635 p = page_address(page);
636 sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES);
637 /* There is always 1 entry reserved for the cmd id to use. */
638 if (vq->num_free > 1) {
639 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
640 if (unlikely(err)) {
641 free_pages((unsigned long)p,
642 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
643 return err;
644 }
645 virtqueue_kick(vq);
646 spin_lock_irq(&vb->free_page_list_lock);
647 balloon_page_push(&vb->free_page_list, page);
648 vb->num_free_page_blocks++;
649 spin_unlock_irq(&vb->free_page_list_lock);
650 } else {
651 /*
652 * The vq has no available entry to add this page block, so
653 * just free it.
654 */
655 free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
656 }
657
658 return 0;
659}
660
661static int send_free_pages(struct virtio_balloon *vb)
662{
663 int err;
664 u32 cmd_id_active;
665
666 while (1) {
667 /*
668 * If a stop id or a new cmd id was just received from host,
669 * stop the reporting.
670 */
671 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
672 if (unlikely(cmd_id_active !=
673 virtio_balloon_cmd_id_received(vb)))
674 break;
675
676 /*
677 * The free page blocks are allocated and sent to host one by
678 * one.
679 */
680 err = get_free_page_and_send(vb);
681 if (err == -EINTR)
682 break;
683 else if (unlikely(err))
684 return err;
685 }
686
687 return 0;
688}
689
690static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
691{
692 int err;
693 struct device *dev = &vb->vdev->dev;
694
695 /* Start by sending the received cmd id to host with an outbuf. */
696 err = send_cmd_id_start(vb);
697 if (unlikely(err))
698 dev_err(dev, "Failed to send a start id, err = %d\n", err);
699
700 err = send_free_pages(vb);
701 if (unlikely(err))
702 dev_err(dev, "Failed to send a free page, err = %d\n", err);
703
704 /* End by sending a stop id to host with an outbuf. */
705 err = send_cmd_id_stop(vb);
706 if (unlikely(err))
707 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
708}
709
710static void report_free_page_func(struct work_struct *work)
711{
712 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
713 report_free_page_work);
714 u32 cmd_id_received;
715
716 cmd_id_received = virtio_balloon_cmd_id_received(vb);
717 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
718 /* Pass ULONG_MAX to give back all the free pages */
719 return_free_pages_to_mm(vb, ULONG_MAX);
720 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
721 cmd_id_received !=
722 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
723 virtio_balloon_report_free_page(vb);
724 }
725}
726
727#ifdef CONFIG_BALLOON_COMPACTION
728/*
729 * virtballoon_migratepage - perform the balloon page migration on behalf of
730 * a compaction thread. (called under page lock)
731 * @vb_dev_info: the balloon device
732 * @newpage: page that will replace the isolated page after migration finishes.
733 * @page : the isolated (old) page that is about to be migrated to newpage.
734 * @mode : compaction mode -- not used for balloon page migration.
735 *
736 * After a ballooned page gets isolated by compaction procedures, this is the
737 * function that performs the page migration on behalf of a compaction thread
738 * The page migration for virtio balloon is done in a simple swap fashion which
739 * follows these two macro steps:
740 * 1) insert newpage into vb->pages list and update the host about it;
741 * 2) update the host about the old page removed from vb->pages list;
742 *
743 * This function preforms the balloon page migration task.
744 * Called through balloon_mapping->a_ops->migratepage
745 */
746static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
747 struct page *newpage, struct page *page, enum migrate_mode mode)
748{
749 struct virtio_balloon *vb = container_of(vb_dev_info,
750 struct virtio_balloon, vb_dev_info);
751 unsigned long flags;
752
753 /*
754 * In order to avoid lock contention while migrating pages concurrently
755 * to leak_balloon() or fill_balloon() we just give up the balloon_lock
756 * this turn, as it is easier to retry the page migration later.
757 * This also prevents fill_balloon() getting stuck into a mutex
758 * recursion in the case it ends up triggering memory compaction
759 * while it is attempting to inflate the ballon.
760 */
761 if (!mutex_trylock(&vb->balloon_lock))
762 return -EAGAIN;
763
764 get_page(newpage); /* balloon reference */
765
766 /*
767 * When we migrate a page to a different zone and adjusted the
768 * managed page count when inflating, we have to fixup the count of
769 * both involved zones.
770 */
771 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
772 page_zone(page) != page_zone(newpage)) {
773 adjust_managed_page_count(page, 1);
774 adjust_managed_page_count(newpage, -1);
775 }
776
777 /* balloon's page migration 1st step -- inflate "newpage" */
778 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
779 balloon_page_insert(vb_dev_info, newpage);
780 vb_dev_info->isolated_pages--;
781 __count_vm_event(BALLOON_MIGRATE);
782 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
783 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
784 set_page_pfns(vb, vb->pfns, newpage);
785 tell_host(vb, vb->inflate_vq);
786
787 /* balloon's page migration 2nd step -- deflate "page" */
788 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
789 balloon_page_delete(page);
790 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
791 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
792 set_page_pfns(vb, vb->pfns, page);
793 tell_host(vb, vb->deflate_vq);
794
795 mutex_unlock(&vb->balloon_lock);
796
797 put_page(page); /* balloon reference */
798
799 return MIGRATEPAGE_SUCCESS;
800}
801#endif /* CONFIG_BALLOON_COMPACTION */
802
803static unsigned long shrink_free_pages(struct virtio_balloon *vb,
804 unsigned long pages_to_free)
805{
806 unsigned long blocks_to_free, blocks_freed;
807
808 pages_to_free = round_up(pages_to_free,
809 VIRTIO_BALLOON_HINT_BLOCK_PAGES);
810 blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES;
811 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
812
813 return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
814}
815
816static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
817 struct shrink_control *sc)
818{
819 struct virtio_balloon *vb = container_of(shrinker,
820 struct virtio_balloon, shrinker);
821
822 return shrink_free_pages(vb, sc->nr_to_scan);
823}
824
825static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
826 struct shrink_control *sc)
827{
828 struct virtio_balloon *vb = container_of(shrinker,
829 struct virtio_balloon, shrinker);
830
831 return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
832}
833
834static int virtio_balloon_oom_notify(struct notifier_block *nb,
835 unsigned long dummy, void *parm)
836{
837 struct virtio_balloon *vb = container_of(nb,
838 struct virtio_balloon, oom_nb);
839 unsigned long *freed = parm;
840
841 *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
842 VIRTIO_BALLOON_PAGES_PER_PAGE;
843 update_balloon_size(vb);
844
845 return NOTIFY_OK;
846}
847
848static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
849{
850 unregister_shrinker(&vb->shrinker);
851}
852
853static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
854{
855 vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
856 vb->shrinker.count_objects = virtio_balloon_shrinker_count;
857 vb->shrinker.seeks = DEFAULT_SEEKS;
858
859 return register_shrinker(&vb->shrinker, "virtio-balloon");
860}
861
862static int virtballoon_probe(struct virtio_device *vdev)
863{
864 struct virtio_balloon *vb;
865 int err;
866
867 if (!vdev->config->get) {
868 dev_err(&vdev->dev, "%s failure: config access disabled\n",
869 __func__);
870 return -EINVAL;
871 }
872
873 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL);
874 if (!vb) {
875 err = -ENOMEM;
876 goto out;
877 }
878
879 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
880 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
881 spin_lock_init(&vb->stop_update_lock);
882 mutex_init(&vb->balloon_lock);
883 init_waitqueue_head(&vb->acked);
884 vb->vdev = vdev;
885
886 balloon_devinfo_init(&vb->vb_dev_info);
887
888 err = init_vqs(vb);
889 if (err)
890 goto out_free_vb;
891
892#ifdef CONFIG_BALLOON_COMPACTION
893 vb->vb_dev_info.migratepage = virtballoon_migratepage;
894#endif
895 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
896 /*
897 * There is always one entry reserved for cmd id, so the ring
898 * size needs to be at least two to report free page hints.
899 */
900 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
901 err = -ENOSPC;
902 goto out_del_vqs;
903 }
904 vb->balloon_wq = alloc_workqueue("balloon-wq",
905 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
906 if (!vb->balloon_wq) {
907 err = -ENOMEM;
908 goto out_del_vqs;
909 }
910 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
911 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
912 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
913 VIRTIO_BALLOON_CMD_ID_STOP);
914 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
915 VIRTIO_BALLOON_CMD_ID_STOP);
916 spin_lock_init(&vb->free_page_list_lock);
917 INIT_LIST_HEAD(&vb->free_page_list);
918 /*
919 * We're allowed to reuse any free pages, even if they are
920 * still to be processed by the host.
921 */
922 err = virtio_balloon_register_shrinker(vb);
923 if (err)
924 goto out_del_balloon_wq;
925 }
926
927 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
928 vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
929 vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
930 err = register_oom_notifier(&vb->oom_nb);
931 if (err < 0)
932 goto out_unregister_shrinker;
933 }
934
935 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
936 /* Start with poison val of 0 representing general init */
937 __u32 poison_val = 0;
938
939 /*
940 * Let the hypervisor know that we are expecting a
941 * specific value to be written back in balloon pages.
942 *
943 * If the PAGE_POISON value was larger than a byte we would
944 * need to byte swap poison_val here to guarantee it is
945 * little-endian. However for now it is a single byte so we
946 * can pass it as-is.
947 */
948 if (!want_init_on_free())
949 memset(&poison_val, PAGE_POISON, sizeof(poison_val));
950
951 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
952 poison_val, &poison_val);
953 }
954
955 vb->pr_dev_info.report = virtballoon_free_page_report;
956 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
957 unsigned int capacity;
958
959 capacity = virtqueue_get_vring_size(vb->reporting_vq);
960 if (capacity < PAGE_REPORTING_CAPACITY) {
961 err = -ENOSPC;
962 goto out_unregister_oom;
963 }
964
965 /*
966 * The default page reporting order is @pageblock_order, which
967 * corresponds to 512MB in size on ARM64 when 64KB base page
968 * size is used. The page reporting won't be triggered if the
969 * freeing page can't come up with a free area like that huge.
970 * So we specify the page reporting order to 5, corresponding
971 * to 2MB. It helps to avoid THP splitting if 4KB base page
972 * size is used by host.
973 *
974 * Ideally, the page reporting order is selected based on the
975 * host's base page size. However, it needs more work to report
976 * that value. The hard-coded order would be fine currently.
977 */
978#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES)
979 vb->pr_dev_info.order = 5;
980#endif
981
982 err = page_reporting_register(&vb->pr_dev_info);
983 if (err)
984 goto out_unregister_oom;
985 }
986
987 virtio_device_ready(vdev);
988
989 if (towards_target(vb))
990 virtballoon_changed(vdev);
991 return 0;
992
993out_unregister_oom:
994 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
995 unregister_oom_notifier(&vb->oom_nb);
996out_unregister_shrinker:
997 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
998 virtio_balloon_unregister_shrinker(vb);
999out_del_balloon_wq:
1000 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1001 destroy_workqueue(vb->balloon_wq);
1002out_del_vqs:
1003 vdev->config->del_vqs(vdev);
1004out_free_vb:
1005 kfree(vb);
1006out:
1007 return err;
1008}
1009
1010static void remove_common(struct virtio_balloon *vb)
1011{
1012 /* There might be pages left in the balloon: free them. */
1013 while (vb->num_pages)
1014 leak_balloon(vb, vb->num_pages);
1015 update_balloon_size(vb);
1016
1017 /* There might be free pages that are being reported: release them. */
1018 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1019 return_free_pages_to_mm(vb, ULONG_MAX);
1020
1021 /* Now we reset the device so we can clean up the queues. */
1022 virtio_reset_device(vb->vdev);
1023
1024 vb->vdev->config->del_vqs(vb->vdev);
1025}
1026
1027static void virtballoon_remove(struct virtio_device *vdev)
1028{
1029 struct virtio_balloon *vb = vdev->priv;
1030
1031 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
1032 page_reporting_unregister(&vb->pr_dev_info);
1033 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1034 unregister_oom_notifier(&vb->oom_nb);
1035 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1036 virtio_balloon_unregister_shrinker(vb);
1037 spin_lock_irq(&vb->stop_update_lock);
1038 vb->stop_update = true;
1039 spin_unlock_irq(&vb->stop_update_lock);
1040 cancel_work_sync(&vb->update_balloon_size_work);
1041 cancel_work_sync(&vb->update_balloon_stats_work);
1042
1043 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
1044 cancel_work_sync(&vb->report_free_page_work);
1045 destroy_workqueue(vb->balloon_wq);
1046 }
1047
1048 remove_common(vb);
1049 kfree(vb);
1050}
1051
1052#ifdef CONFIG_PM_SLEEP
1053static int virtballoon_freeze(struct virtio_device *vdev)
1054{
1055 struct virtio_balloon *vb = vdev->priv;
1056
1057 /*
1058 * The workqueue is already frozen by the PM core before this
1059 * function is called.
1060 */
1061 remove_common(vb);
1062 return 0;
1063}
1064
1065static int virtballoon_restore(struct virtio_device *vdev)
1066{
1067 struct virtio_balloon *vb = vdev->priv;
1068 int ret;
1069
1070 ret = init_vqs(vdev->priv);
1071 if (ret)
1072 return ret;
1073
1074 virtio_device_ready(vdev);
1075
1076 if (towards_target(vb))
1077 virtballoon_changed(vdev);
1078 update_balloon_size(vb);
1079 return 0;
1080}
1081#endif
1082
1083static int virtballoon_validate(struct virtio_device *vdev)
1084{
1085 /*
1086 * Inform the hypervisor that our pages are poisoned or
1087 * initialized. If we cannot do that then we should disable
1088 * page reporting as it could potentially change the contents
1089 * of our free pages.
1090 */
1091 if (!want_init_on_free() && !page_poisoning_enabled_static())
1092 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
1093 else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
1094 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
1095
1096 __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
1097 return 0;
1098}
1099
1100static unsigned int features[] = {
1101 VIRTIO_BALLOON_F_MUST_TELL_HOST,
1102 VIRTIO_BALLOON_F_STATS_VQ,
1103 VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
1104 VIRTIO_BALLOON_F_FREE_PAGE_HINT,
1105 VIRTIO_BALLOON_F_PAGE_POISON,
1106 VIRTIO_BALLOON_F_REPORTING,
1107};
1108
1109static struct virtio_driver virtio_balloon_driver = {
1110 .feature_table = features,
1111 .feature_table_size = ARRAY_SIZE(features),
1112 .driver.name = KBUILD_MODNAME,
1113 .driver.owner = THIS_MODULE,
1114 .id_table = id_table,
1115 .validate = virtballoon_validate,
1116 .probe = virtballoon_probe,
1117 .remove = virtballoon_remove,
1118 .config_changed = virtballoon_changed,
1119#ifdef CONFIG_PM_SLEEP
1120 .freeze = virtballoon_freeze,
1121 .restore = virtballoon_restore,
1122#endif
1123};
1124
1125module_virtio_driver(virtio_balloon_driver);
1126MODULE_DEVICE_TABLE(virtio, id_table);
1127MODULE_DESCRIPTION("Virtio balloon driver");
1128MODULE_LICENSE("GPL");