Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Virtio-mem device driver.
   4 *
   5 * Copyright Red Hat, Inc. 2020
   6 *
   7 * Author(s): David Hildenbrand <david@redhat.com>
   8 */
   9
  10#include <linux/virtio.h>
  11#include <linux/virtio_mem.h>
  12#include <linux/workqueue.h>
  13#include <linux/slab.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/memory_hotplug.h>
  17#include <linux/memory.h>
  18#include <linux/hrtimer.h>
  19#include <linux/crash_dump.h>
  20#include <linux/mutex.h>
  21#include <linux/bitmap.h>
  22#include <linux/lockdep.h>
 
  23
  24#include <acpi/acpi_numa.h>
  25
  26static bool unplug_online = true;
  27module_param(unplug_online, bool, 0644);
  28MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
  29
  30enum virtio_mem_mb_state {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  31	/* Unplugged, not added to Linux. Can be reused later. */
  32	VIRTIO_MEM_MB_STATE_UNUSED = 0,
  33	/* (Partially) plugged, not added to Linux. Error on add_memory(). */
  34	VIRTIO_MEM_MB_STATE_PLUGGED,
  35	/* Fully plugged, fully added to Linux, offline. */
  36	VIRTIO_MEM_MB_STATE_OFFLINE,
  37	/* Partially plugged, fully added to Linux, offline. */
  38	VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
  39	/* Fully plugged, fully added to Linux, online (!ZONE_MOVABLE). */
  40	VIRTIO_MEM_MB_STATE_ONLINE,
  41	/* Partially plugged, fully added to Linux, online (!ZONE_MOVABLE). */
  42	VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
  43	/*
  44	 * Fully plugged, fully added to Linux, online (ZONE_MOVABLE).
  45	 * We are not allowed to allocate (unplug) parts of this block that
  46	 * are not movable (similar to gigantic pages). We will never allow
  47	 * to online OFFLINE_PARTIAL to ZONE_MOVABLE (as they would contain
  48	 * unmovable parts).
  49	 */
  50	VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE,
  51	VIRTIO_MEM_MB_STATE_COUNT
 
 
 
 
 
 
 
 
 
 
 
  52};
  53
  54struct virtio_mem {
  55	struct virtio_device *vdev;
  56
  57	/* We might first have to unplug all memory when starting up. */
  58	bool unplug_all_required;
  59
  60	/* Workqueue that processes the plug/unplug requests. */
  61	struct work_struct wq;
 
  62	atomic_t config_changed;
  63
  64	/* Virtqueue for guest->host requests. */
  65	struct virtqueue *vq;
  66
  67	/* Wait for a host response to a guest request. */
  68	wait_queue_head_t host_resp;
  69
  70	/* Space for one guest request and the host response. */
  71	struct virtio_mem_req req;
  72	struct virtio_mem_resp resp;
  73
  74	/* The current size of the device. */
  75	uint64_t plugged_size;
  76	/* The requested size of the device. */
  77	uint64_t requested_size;
  78
  79	/* The device block size (for communicating with the device). */
  80	uint64_t device_block_size;
  81	/* The translated node id. NUMA_NO_NODE in case not specified. */
  82	int nid;
  83	/* Physical start address of the memory region. */
  84	uint64_t addr;
  85	/* Maximum region size in bytes. */
  86	uint64_t region_size;
  87
  88	/* The subblock size. */
  89	uint64_t subblock_size;
  90	/* The number of subblocks per memory block. */
  91	uint32_t nb_sb_per_mb;
  92
  93	/* Id of the first memory block of this device. */
  94	unsigned long first_mb_id;
  95	/* Id of the last memory block of this device. */
  96	unsigned long last_mb_id;
  97	/* Id of the last usable memory block of this device. */
  98	unsigned long last_usable_mb_id;
  99	/* Id of the next memory bock to prepare when needed. */
 100	unsigned long next_mb_id;
 101
 102	/* The parent resource for all memory added via this device. */
 103	struct resource *parent_resource;
 104	/*
 105	 * Copy of "System RAM (virtio_mem)" to be used for
 106	 * add_memory_driver_managed().
 107	 */
 108	const char *resource_name;
 109
 110	/* Summary of all memory block states. */
 111	unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
 112#define VIRTIO_MEM_NB_OFFLINE_THRESHOLD		10
 113
 114	/*
 115	 * One byte state per memory block.
 116	 *
 117	 * Allocated via vmalloc(). When preparing new blocks, resized
 118	 * (alloc+copy+free) when needed (crossing pages with the next mb).
 119	 * (when crossing pages).
 120	 *
 121	 * With 128MB memory blocks, we have states for 512GB of memory in one
 122	 * page.
 123	 */
 124	uint8_t *mb_state;
 125
 126	/*
 127	 * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
 128	 *
 129	 * With 4MB subblocks, we manage 128GB of memory in one page.
 130	 */
 131	unsigned long *sb_bitmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132
 133	/*
 134	 * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
 
 135	 *
 136	 * When this lock is held the pointers can't change, ONLINE and
 137	 * OFFLINE blocks can't change the state and no subblocks will get
 138	 * plugged/unplugged.
 
 
 
 139	 */
 140	struct mutex hotplug_mutex;
 141	bool hotplug_active;
 142
 143	/* An error occurred we cannot handle - stop processing requests. */
 144	bool broken;
 145
 
 
 
 146	/* The driver is being removed. */
 147	spinlock_t removal_lock;
 148	bool removing;
 149
 150	/* Timer for retrying to plug/unplug memory. */
 151	struct hrtimer retry_timer;
 152	unsigned int retry_timer_ms;
 153#define VIRTIO_MEM_RETRY_TIMER_MIN_MS		50000
 154#define VIRTIO_MEM_RETRY_TIMER_MAX_MS		300000
 155
 156	/* Memory notifier (online/offline events). */
 157	struct notifier_block memory_notifier;
 158
 
 
 
 
 
 
 
 159	/* Next device in the list of virtio-mem devices. */
 160	struct list_head next;
 161};
 162
 163/*
 164 * We have to share a single online_page callback among all virtio-mem
 165 * devices. We use RCU to iterate the list in the callback.
 166 */
 167static DEFINE_MUTEX(virtio_mem_mutex);
 168static LIST_HEAD(virtio_mem_devices);
 169
 170static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
 
 
 
 
 
 
 
 171
 172/*
 173 * Register a virtio-mem device so it will be considered for the online_page
 174 * callback.
 175 */
 176static int register_virtio_mem_device(struct virtio_mem *vm)
 177{
 178	int rc = 0;
 179
 180	/* First device registers the callback. */
 181	mutex_lock(&virtio_mem_mutex);
 182	if (list_empty(&virtio_mem_devices))
 183		rc = set_online_page_callback(&virtio_mem_online_page_cb);
 184	if (!rc)
 185		list_add_rcu(&vm->next, &virtio_mem_devices);
 186	mutex_unlock(&virtio_mem_mutex);
 187
 188	return rc;
 189}
 190
 191/*
 192 * Unregister a virtio-mem device so it will no longer be considered for the
 193 * online_page callback.
 194 */
 195static void unregister_virtio_mem_device(struct virtio_mem *vm)
 196{
 197	/* Last device unregisters the callback. */
 198	mutex_lock(&virtio_mem_mutex);
 199	list_del_rcu(&vm->next);
 200	if (list_empty(&virtio_mem_devices))
 201		restore_online_page_callback(&virtio_mem_online_page_cb);
 202	mutex_unlock(&virtio_mem_mutex);
 203
 204	synchronize_rcu();
 205}
 206
 207/*
 208 * Calculate the memory block id of a given address.
 209 */
 210static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
 211{
 212	return addr / memory_block_size_bytes();
 213}
 214
 215/*
 216 * Calculate the physical start address of a given memory block id.
 217 */
 218static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
 219{
 220	return mb_id * memory_block_size_bytes();
 221}
 222
 223/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224 * Calculate the subblock id of a given address.
 225 */
 226static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
 227					      unsigned long addr)
 228{
 229	const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
 230	const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
 231
 232	return (addr - mb_addr) / vm->subblock_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 233}
 234
 
 
 
 
 
 
 
 
 
 
 
 
 235/*
 236 * Set the state of a memory block, taking care of the state counter.
 237 */
 238static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
 239				    enum virtio_mem_mb_state state)
 240{
 241	const unsigned long idx = mb_id - vm->first_mb_id;
 242	enum virtio_mem_mb_state old_state;
 243
 244	old_state = vm->mb_state[idx];
 245	vm->mb_state[idx] = state;
 246
 247	BUG_ON(vm->nb_mb_state[old_state] == 0);
 248	vm->nb_mb_state[old_state]--;
 249	vm->nb_mb_state[state]++;
 250}
 251
 252/*
 253 * Get the state of a memory block.
 254 */
 255static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
 256							unsigned long mb_id)
 257{
 258	const unsigned long idx = mb_id - vm->first_mb_id;
 259
 260	return vm->mb_state[idx];
 261}
 262
 263/*
 264 * Prepare the state array for the next memory block.
 265 */
 266static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
 267{
 268	unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
 269	unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
 270	int old_pages = PFN_UP(old_bytes);
 271	int new_pages = PFN_UP(new_bytes);
 272	uint8_t *new_mb_state;
 273
 274	if (vm->mb_state && old_pages == new_pages)
 275		return 0;
 276
 277	new_mb_state = vzalloc(new_pages * PAGE_SIZE);
 278	if (!new_mb_state)
 279		return -ENOMEM;
 280
 281	mutex_lock(&vm->hotplug_mutex);
 282	if (vm->mb_state)
 283		memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
 284	vfree(vm->mb_state);
 285	vm->mb_state = new_mb_state;
 286	mutex_unlock(&vm->hotplug_mutex);
 287
 288	return 0;
 289}
 290
 291#define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
 292	for (_mb_id = _vm->first_mb_id; \
 293	     _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
 294	     _mb_id++) \
 295		if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
 296
 297#define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
 298	for (_mb_id = _vm->next_mb_id - 1; \
 299	     _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
 300	     _mb_id--) \
 301		if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
 
 
 
 
 
 
 
 
 
 
 302
 303/*
 304 * Mark all selected subblocks plugged.
 305 *
 306 * Will not modify the state of the memory block.
 307 */
 308static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
 309					 unsigned long mb_id, int sb_id,
 310					 int count)
 311{
 312	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
 313
 314	__bitmap_set(vm->sb_bitmap, bit, count);
 315}
 316
 317/*
 318 * Mark all selected subblocks unplugged.
 319 *
 320 * Will not modify the state of the memory block.
 321 */
 322static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
 323					   unsigned long mb_id, int sb_id,
 324					   int count)
 325{
 326	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
 327
 328	__bitmap_clear(vm->sb_bitmap, bit, count);
 329}
 330
 331/*
 332 * Test if all selected subblocks are plugged.
 333 */
 334static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
 335					  unsigned long mb_id, int sb_id,
 336					  int count)
 337{
 338	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
 339
 340	if (count == 1)
 341		return test_bit(bit, vm->sb_bitmap);
 342
 343	/* TODO: Helper similar to bitmap_set() */
 344	return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
 345	       bit + count;
 346}
 347
 348/*
 349 * Test if all selected subblocks are unplugged.
 350 */
 351static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
 352					    unsigned long mb_id, int sb_id,
 353					    int count)
 354{
 355	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
 356
 357	/* TODO: Helper similar to bitmap_set() */
 358	return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
 
 359}
 360
 361/*
 362 * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
 363 * none.
 364 */
 365static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
 366					    unsigned long mb_id)
 367{
 368	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
 369
 370	return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
 371	       bit;
 372}
 373
 374/*
 375 * Prepare the subblock bitmap for the next memory block.
 376 */
 377static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
 378{
 379	const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
 380	const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
 381	const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
 382	int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
 383	int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
 384	unsigned long *new_sb_bitmap, *old_sb_bitmap;
 385
 386	if (vm->sb_bitmap && old_pages == new_pages)
 387		return 0;
 388
 389	new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
 390	if (!new_sb_bitmap)
 391		return -ENOMEM;
 392
 393	mutex_lock(&vm->hotplug_mutex);
 394	if (new_sb_bitmap)
 395		memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
 396
 397	old_sb_bitmap = vm->sb_bitmap;
 398	vm->sb_bitmap = new_sb_bitmap;
 399	mutex_unlock(&vm->hotplug_mutex);
 400
 401	vfree(old_sb_bitmap);
 402	return 0;
 403}
 404
 405/*
 406 * Try to add a memory block to Linux. This will usually only fail
 407 * if out of memory.
 
 
 
 
 
 
 
 
 
 
 
 408 *
 409 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
 410 * onlining code).
 411 *
 412 * Will not modify the state of the memory block.
 413 */
 414static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
 
 415{
 416	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
 417	int nid = vm->nid;
 418
 419	if (nid == NUMA_NO_NODE)
 420		nid = memory_add_physaddr_to_nid(addr);
 421
 422	/*
 423	 * When force-unloading the driver and we still have memory added to
 424	 * Linux, the resource name has to stay.
 425	 */
 426	if (!vm->resource_name) {
 427		vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
 428						  GFP_KERNEL);
 429		if (!vm->resource_name)
 430			return -ENOMEM;
 431	}
 432
 433	dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
 434	return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
 435					 vm->resource_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436}
 437
 438/*
 439 * Try to remove a memory block from Linux. Will only fail if the memory block
 440 * is not offline.
 441 *
 442 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
 443 * onlining code).
 444 *
 445 * Will not modify the state of the memory block.
 446 */
 447static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
 
 448{
 449	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
 450	int nid = vm->nid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451
 452	if (nid == NUMA_NO_NODE)
 453		nid = memory_add_physaddr_to_nid(addr);
 
 
 
 
 
 454
 455	dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
 456	return remove_memory(nid, addr, memory_block_size_bytes());
 457}
 458
 459/*
 460 * Try to offline and remove a memory block from Linux.
 461 *
 462 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
 463 * onlining code).
 464 *
 465 * Will not modify the state of the memory block.
 466 */
 467static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
 468					    unsigned long mb_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 469{
 470	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
 471	int nid = vm->nid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472
 473	if (nid == NUMA_NO_NODE)
 474		nid = memory_add_physaddr_to_nid(addr);
 
 
 
 
 475
 476	dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
 477		mb_id);
 478	return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 479}
 480
 481/*
 482 * Trigger the workqueue so the device can perform its magic.
 483 */
 484static void virtio_mem_retry(struct virtio_mem *vm)
 485{
 486	unsigned long flags;
 487
 488	spin_lock_irqsave(&vm->removal_lock, flags);
 489	if (!vm->removing)
 490		queue_work(system_freezable_wq, &vm->wq);
 491	spin_unlock_irqrestore(&vm->removal_lock, flags);
 492}
 493
 494static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
 495{
 496	int node = NUMA_NO_NODE;
 497
 498#if defined(CONFIG_ACPI_NUMA)
 499	if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
 500		node = pxm_to_node(node_id);
 501#endif
 502	return node;
 503}
 504
 505/*
 506 * Test if a virtio-mem device overlaps with the given range. Can be called
 507 * from (notifier) callbacks lockless.
 508 */
 509static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
 510				      unsigned long start, unsigned long size)
 511{
 512	unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
 513	unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
 514				memory_block_size_bytes();
 515
 516	return start < dev_end && dev_start < start + size;
 517}
 518
 519/*
 520 * Test if a virtio-mem device owns a memory block. Can be called from
 521 * (notifier) callbacks lockless.
 522 */
 523static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
 
 524{
 525	return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
 526}
 527
 528static int virtio_mem_notify_going_online(struct virtio_mem *vm,
 529					  unsigned long mb_id,
 530					  enum zone_type zone)
 531{
 532	switch (virtio_mem_mb_get_state(vm, mb_id)) {
 533	case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
 534		/*
 535		 * We won't allow to online a partially plugged memory block
 536		 * to the MOVABLE zone - it would contain unmovable parts.
 537		 */
 538		if (zone == ZONE_MOVABLE) {
 539			dev_warn_ratelimited(&vm->vdev->dev,
 540					     "memory block has holes, MOVABLE not supported\n");
 541			return NOTIFY_BAD;
 542		}
 543		return NOTIFY_OK;
 544	case VIRTIO_MEM_MB_STATE_OFFLINE:
 545		return NOTIFY_OK;
 546	default:
 547		break;
 548	}
 549	dev_warn_ratelimited(&vm->vdev->dev,
 550			     "memory block onlining denied\n");
 551	return NOTIFY_BAD;
 552}
 553
 554static void virtio_mem_notify_offline(struct virtio_mem *vm,
 555				      unsigned long mb_id)
 556{
 557	switch (virtio_mem_mb_get_state(vm, mb_id)) {
 558	case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
 559		virtio_mem_mb_set_state(vm, mb_id,
 560					VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
 561		break;
 562	case VIRTIO_MEM_MB_STATE_ONLINE:
 563	case VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE:
 564		virtio_mem_mb_set_state(vm, mb_id,
 565					VIRTIO_MEM_MB_STATE_OFFLINE);
 
 566		break;
 567	default:
 568		BUG();
 569		break;
 570	}
 571
 572	/*
 573	 * Trigger the workqueue, maybe we can now unplug memory. Also,
 574	 * when we offline and remove a memory block, this will re-trigger
 575	 * us immediately - which is often nice because the removal of
 576	 * the memory block (e.g., memmap) might have freed up memory
 577	 * on other memory blocks we manage.
 578	 */
 579	virtio_mem_retry(vm);
 580}
 581
 582static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id,
 583				     enum zone_type zone)
 584{
 585	unsigned long nb_offline;
 586
 587	switch (virtio_mem_mb_get_state(vm, mb_id)) {
 588	case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
 589		BUG_ON(zone == ZONE_MOVABLE);
 590		virtio_mem_mb_set_state(vm, mb_id,
 591					VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
 592		break;
 593	case VIRTIO_MEM_MB_STATE_OFFLINE:
 594		if (zone == ZONE_MOVABLE)
 595			virtio_mem_mb_set_state(vm, mb_id,
 596					    VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE);
 597		else
 598			virtio_mem_mb_set_state(vm, mb_id,
 599						VIRTIO_MEM_MB_STATE_ONLINE);
 600		break;
 601	default:
 602		BUG();
 603		break;
 604	}
 605	nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
 606		     vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
 607
 608	/* see if we can add new blocks now that we onlined one block */
 609	if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
 610		virtio_mem_retry(vm);
 611}
 612
 613static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
 614					    unsigned long mb_id)
 615{
 616	const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
 617	struct page *page;
 618	unsigned long pfn;
 619	int sb_id, i;
 620
 621	for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
 622		if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
 623			continue;
 624		/*
 625		 * Drop our reference to the pages so the memory can get
 626		 * offlined and add the unplugged pages to the managed
 627		 * page counters (so offlining code can correctly subtract
 628		 * them again).
 629		 */
 630		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
 631			       sb_id * vm->subblock_size);
 632		adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
 633		for (i = 0; i < nr_pages; i++) {
 634			page = pfn_to_page(pfn + i);
 635			if (WARN_ON(!page_ref_dec_and_test(page)))
 636				dump_page(page, "unplugged page referenced");
 637		}
 638	}
 639}
 640
 641static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
 642					     unsigned long mb_id)
 643{
 644	const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
 645	unsigned long pfn;
 646	int sb_id, i;
 647
 648	for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
 649		if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
 650			continue;
 651		/*
 652		 * Get the reference we dropped when going offline and
 653		 * subtract the unplugged pages from the managed page
 654		 * counters.
 655		 */
 656		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
 657			       sb_id * vm->subblock_size);
 658		adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
 659		for (i = 0; i < nr_pages; i++)
 660			page_ref_inc(pfn_to_page(pfn + i));
 661	}
 662}
 663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664/*
 665 * This callback will either be called synchronously from add_memory() or
 666 * asynchronously (e.g., triggered via user space). We have to be careful
 667 * with locking when calling add_memory().
 668 */
 669static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
 670					 unsigned long action, void *arg)
 671{
 672	struct virtio_mem *vm = container_of(nb, struct virtio_mem,
 673					     memory_notifier);
 674	struct memory_notify *mhp = arg;
 675	const unsigned long start = PFN_PHYS(mhp->start_pfn);
 676	const unsigned long size = PFN_PHYS(mhp->nr_pages);
 677	const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
 678	enum zone_type zone;
 679	int rc = NOTIFY_OK;
 
 680
 681	if (!virtio_mem_overlaps_range(vm, start, size))
 682		return NOTIFY_DONE;
 683
 684	/*
 685	 * Memory is onlined/offlined in memory block granularity. We cannot
 686	 * cross virtio-mem device boundaries and memory block boundaries. Bail
 687	 * out if this ever changes.
 688	 */
 689	if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
 690			 !IS_ALIGNED(start, memory_block_size_bytes())))
 691		return NOTIFY_BAD;
 
 
 
 
 
 
 
 
 
 
 
 
 
 692
 693	/*
 694	 * Avoid circular locking lockdep warnings. We lock the mutex
 695	 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
 696	 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
 697	 * between both notifier calls and will bail out. False positive.
 698	 */
 699	lockdep_off();
 700
 701	switch (action) {
 702	case MEM_GOING_OFFLINE:
 703		mutex_lock(&vm->hotplug_mutex);
 704		if (vm->removing) {
 705			rc = notifier_from_errno(-EBUSY);
 706			mutex_unlock(&vm->hotplug_mutex);
 707			break;
 708		}
 709		vm->hotplug_active = true;
 710		virtio_mem_notify_going_offline(vm, mb_id);
 
 
 
 
 
 711		break;
 712	case MEM_GOING_ONLINE:
 713		mutex_lock(&vm->hotplug_mutex);
 714		if (vm->removing) {
 715			rc = notifier_from_errno(-EBUSY);
 716			mutex_unlock(&vm->hotplug_mutex);
 717			break;
 718		}
 719		vm->hotplug_active = true;
 720		zone = page_zonenum(pfn_to_page(mhp->start_pfn));
 721		rc = virtio_mem_notify_going_online(vm, mb_id, zone);
 722		break;
 723	case MEM_OFFLINE:
 724		virtio_mem_notify_offline(vm, mb_id);
 
 
 
 
 
 
 
 
 
 
 725		vm->hotplug_active = false;
 726		mutex_unlock(&vm->hotplug_mutex);
 727		break;
 728	case MEM_ONLINE:
 729		zone = page_zonenum(pfn_to_page(mhp->start_pfn));
 730		virtio_mem_notify_online(vm, mb_id, zone);
 
 
 
 
 
 
 
 
 
 
 
 
 731		vm->hotplug_active = false;
 732		mutex_unlock(&vm->hotplug_mutex);
 733		break;
 734	case MEM_CANCEL_OFFLINE:
 735		if (!vm->hotplug_active)
 736			break;
 737		virtio_mem_notify_cancel_offline(vm, mb_id);
 
 
 
 
 
 738		vm->hotplug_active = false;
 739		mutex_unlock(&vm->hotplug_mutex);
 740		break;
 741	case MEM_CANCEL_ONLINE:
 742		if (!vm->hotplug_active)
 743			break;
 744		vm->hotplug_active = false;
 745		mutex_unlock(&vm->hotplug_mutex);
 746		break;
 747	default:
 748		break;
 749	}
 750
 751	lockdep_on();
 752
 753	return rc;
 754}
 755
 756/*
 757 * Set a range of pages PG_offline. Remember pages that were never onlined
 758 * (via generic_online_page()) using PageDirty().
 759 */
 760static void virtio_mem_set_fake_offline(unsigned long pfn,
 761					unsigned int nr_pages, bool onlined)
 762{
 
 763	for (; nr_pages--; pfn++) {
 764		struct page *page = pfn_to_page(pfn);
 765
 766		__SetPageOffline(page);
 767		if (!onlined) {
 768			SetPageDirty(page);
 769			/* FIXME: remove after cleanups */
 770			ClearPageReserved(page);
 771		}
 772	}
 
 773}
 774
 775/*
 776 * Clear PG_offline from a range of pages. If the pages were never onlined,
 777 * (via generic_online_page()), clear PageDirty().
 778 */
 779static void virtio_mem_clear_fake_offline(unsigned long pfn,
 780					  unsigned int nr_pages, bool onlined)
 781{
 782	for (; nr_pages--; pfn++) {
 783		struct page *page = pfn_to_page(pfn);
 784
 785		__ClearPageOffline(page);
 786		if (!onlined)
 787			ClearPageDirty(page);
 788	}
 789}
 790
 791/*
 792 * Release a range of fake-offline pages to the buddy, effectively
 793 * fake-onlining them.
 794 */
 795static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
 796{
 797	const int order = MAX_ORDER - 1;
 798	int i;
 799
 800	/*
 801	 * We are always called with subblock granularity, which is at least
 802	 * aligned to MAX_ORDER - 1.
 
 803	 */
 
 
 
 804	for (i = 0; i < nr_pages; i += 1 << order) {
 805		struct page *page = pfn_to_page(pfn + i);
 806
 807		/*
 808		 * If the page is PageDirty(), it was kept fake-offline when
 809		 * onlining the memory block. Otherwise, it was allocated
 810		 * using alloc_contig_range(). All pages in a subblock are
 811		 * alike.
 812		 */
 813		if (PageDirty(page)) {
 814			virtio_mem_clear_fake_offline(pfn + i, 1 << order,
 815						      false);
 816			generic_online_page(page, order);
 817		} else {
 818			virtio_mem_clear_fake_offline(pfn + i, 1 << order,
 819						      true);
 820			free_contig_range(pfn + i, 1 << order);
 821			adjust_managed_page_count(page, 1 << order);
 822		}
 823	}
 824}
 825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
 827{
 828	const unsigned long addr = page_to_phys(page);
 829	const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
 830	struct virtio_mem *vm;
 831	int sb_id;
 832
 833	/*
 834	 * We exploit here that subblocks have at least MAX_ORDER - 1
 835	 * size/alignment and that this callback is is called with such a
 836	 * size/alignment. So we cannot cross subblocks and therefore
 837	 * also not memory blocks.
 838	 */
 839	rcu_read_lock();
 840	list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
 841		if (!virtio_mem_owned_mb(vm, mb_id))
 
 
 
 
 842			continue;
 843
 844		sb_id = virtio_mem_phys_to_sb_id(vm, addr);
 845		/*
 846		 * If plugged, online the pages, otherwise, set them fake
 847		 * offline (PageOffline).
 
 
 848		 */
 849		if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
 850			generic_online_page(page, order);
 851		else
 852			virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
 853						    false);
 854		rcu_read_unlock();
 
 
 855		return;
 856	}
 857	rcu_read_unlock();
 858
 859	/* not virtio-mem memory, but e.g., a DIMM. online it */
 860	generic_online_page(page, order);
 861}
 862
 863static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
 864					const struct virtio_mem_req *req)
 865{
 866	struct scatterlist *sgs[2], sg_req, sg_resp;
 867	unsigned int len;
 868	int rc;
 869
 870	/* don't use the request residing on the stack (vaddr) */
 871	vm->req = *req;
 872
 873	/* out: buffer for request */
 874	sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
 875	sgs[0] = &sg_req;
 876
 877	/* in: buffer for response */
 878	sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
 879	sgs[1] = &sg_resp;
 880
 881	rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
 882	if (rc < 0)
 883		return rc;
 884
 885	virtqueue_kick(vm->vq);
 886
 887	/* wait for a response */
 888	wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
 889
 890	return virtio16_to_cpu(vm->vdev, vm->resp.type);
 891}
 892
 893static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
 894					uint64_t size)
 895{
 896	const uint64_t nb_vm_blocks = size / vm->device_block_size;
 897	const struct virtio_mem_req req = {
 898		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
 899		.u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
 900		.u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
 901	};
 
 902
 903	if (atomic_read(&vm->config_changed))
 904		return -EAGAIN;
 905
 
 
 
 906	switch (virtio_mem_send_request(vm, &req)) {
 907	case VIRTIO_MEM_RESP_ACK:
 908		vm->plugged_size += size;
 909		return 0;
 910	case VIRTIO_MEM_RESP_NACK:
 911		return -EAGAIN;
 
 912	case VIRTIO_MEM_RESP_BUSY:
 913		return -ETXTBSY;
 
 914	case VIRTIO_MEM_RESP_ERROR:
 915		return -EINVAL;
 
 916	default:
 917		return -ENOMEM;
 918	}
 
 
 
 919}
 920
 921static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
 922					  uint64_t size)
 923{
 924	const uint64_t nb_vm_blocks = size / vm->device_block_size;
 925	const struct virtio_mem_req req = {
 926		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
 927		.u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
 928		.u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
 929	};
 
 930
 931	if (atomic_read(&vm->config_changed))
 932		return -EAGAIN;
 933
 
 
 
 934	switch (virtio_mem_send_request(vm, &req)) {
 935	case VIRTIO_MEM_RESP_ACK:
 936		vm->plugged_size -= size;
 937		return 0;
 938	case VIRTIO_MEM_RESP_BUSY:
 939		return -ETXTBSY;
 
 940	case VIRTIO_MEM_RESP_ERROR:
 941		return -EINVAL;
 
 942	default:
 943		return -ENOMEM;
 944	}
 
 
 
 945}
 946
 947static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
 948{
 949	const struct virtio_mem_req req = {
 950		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
 951	};
 
 
 
 952
 953	switch (virtio_mem_send_request(vm, &req)) {
 954	case VIRTIO_MEM_RESP_ACK:
 955		vm->unplug_all_required = false;
 956		vm->plugged_size = 0;
 957		/* usable region might have shrunk */
 958		atomic_set(&vm->config_changed, 1);
 959		return 0;
 960	case VIRTIO_MEM_RESP_BUSY:
 961		return -ETXTBSY;
 
 962	default:
 963		return -ENOMEM;
 964	}
 
 
 
 965}
 966
 967/*
 968 * Plug selected subblocks. Updates the plugged state, but not the state
 969 * of the memory block.
 970 */
 971static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
 972				 int sb_id, int count)
 973{
 974	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
 975			      sb_id * vm->subblock_size;
 976	const uint64_t size = count * vm->subblock_size;
 977	int rc;
 978
 979	dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
 980		sb_id, sb_id + count - 1);
 981
 982	rc = virtio_mem_send_plug_request(vm, addr, size);
 983	if (!rc)
 984		virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
 985	return rc;
 986}
 987
 988/*
 989 * Unplug selected subblocks. Updates the plugged state, but not the state
 990 * of the memory block.
 991 */
 992static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
 993				   int sb_id, int count)
 994{
 995	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
 996			      sb_id * vm->subblock_size;
 997	const uint64_t size = count * vm->subblock_size;
 998	int rc;
 999
1000	dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
1001		mb_id, sb_id, sb_id + count - 1);
1002
1003	rc = virtio_mem_send_unplug_request(vm, addr, size);
1004	if (!rc)
1005		virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
1006	return rc;
1007}
1008
1009/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010 * Unplug the desired number of plugged subblocks of a offline or not-added
1011 * memory block. Will fail if any subblock cannot get unplugged (instead of
1012 * skipping it).
1013 *
1014 * Will not modify the state of the memory block.
1015 *
1016 * Note: can fail after some subblocks were unplugged.
1017 */
1018static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
1019				       unsigned long mb_id, uint64_t *nb_sb)
1020{
1021	int sb_id, count;
1022	int rc;
1023
1024	sb_id = vm->nb_sb_per_mb - 1;
1025	while (*nb_sb) {
1026		/* Find the next candidate subblock */
1027		while (sb_id >= 0 &&
1028		       virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
1029			sb_id--;
1030		if (sb_id < 0)
1031			break;
1032		/* Try to unplug multiple subblocks at a time */
1033		count = 1;
1034		while (count < *nb_sb && sb_id > 0 &&
1035		       virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1036			count++;
1037			sb_id--;
1038		}
1039
1040		rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1041		if (rc)
1042			return rc;
1043		*nb_sb -= count;
1044		sb_id--;
1045	}
1046
1047	return 0;
1048}
1049
1050/*
1051 * Unplug all plugged subblocks of an offline or not-added memory block.
1052 *
1053 * Will not modify the state of the memory block.
1054 *
1055 * Note: can fail after some subblocks were unplugged.
1056 */
1057static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
1058{
1059	uint64_t nb_sb = vm->nb_sb_per_mb;
1060
1061	return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
1062}
1063
1064/*
1065 * Prepare tracking data for the next memory block.
1066 */
1067static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
1068				      unsigned long *mb_id)
1069{
1070	int rc;
1071
1072	if (vm->next_mb_id > vm->last_usable_mb_id)
1073		return -ENOSPC;
1074
1075	/* Resize the state array if required. */
1076	rc = virtio_mem_mb_state_prepare_next_mb(vm);
1077	if (rc)
1078		return rc;
1079
1080	/* Resize the subblock bitmap if required. */
1081	rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
1082	if (rc)
1083		return rc;
1084
1085	vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
1086	*mb_id = vm->next_mb_id++;
1087	return 0;
1088}
1089
1090/*
1091 * Don't add too many blocks that are not onlined yet to avoid running OOM.
1092 */
1093static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
1094{
1095	unsigned long nb_offline;
1096
1097	nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
1098		     vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
1099	return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
1100}
1101
1102/*
1103 * Try to plug the desired number of subblocks and add the memory block
1104 * to Linux.
1105 *
1106 * Will modify the state of the memory block.
1107 */
1108static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
1109				      unsigned long mb_id,
1110				      uint64_t *nb_sb)
1111{
1112	const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
1113	int rc, rc2;
1114
1115	if (WARN_ON_ONCE(!count))
1116		return -EINVAL;
1117
1118	/*
1119	 * Plug the requested number of subblocks before adding it to linux,
1120	 * so that onlining will directly online all plugged subblocks.
1121	 */
1122	rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
1123	if (rc)
1124		return rc;
1125
1126	/*
1127	 * Mark the block properly offline before adding it to Linux,
1128	 * so the memory notifiers will find the block in the right state.
1129	 */
1130	if (count == vm->nb_sb_per_mb)
1131		virtio_mem_mb_set_state(vm, mb_id,
1132					VIRTIO_MEM_MB_STATE_OFFLINE);
1133	else
1134		virtio_mem_mb_set_state(vm, mb_id,
1135					VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1136
1137	/* Add the memory block to linux - if that fails, try to unplug. */
1138	rc = virtio_mem_mb_add(vm, mb_id);
1139	if (rc) {
1140		enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
1141
1142		dev_err(&vm->vdev->dev,
1143			"adding memory block %lu failed with %d\n", mb_id, rc);
1144		rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
1145
1146		/*
1147		 * TODO: Linux MM does not properly clean up yet in all cases
1148		 * where adding of memory failed - especially on -ENOMEM.
1149		 */
1150		if (rc2)
1151			new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
1152		virtio_mem_mb_set_state(vm, mb_id, new_state);
1153		return rc;
1154	}
1155
1156	*nb_sb -= count;
1157	return 0;
1158}
1159
1160/*
1161 * Try to plug the desired number of subblocks of a memory block that
1162 * is already added to Linux.
1163 *
1164 * Will modify the state of the memory block.
1165 *
1166 * Note: Can fail after some subblocks were successfully plugged.
1167 */
1168static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
1169				     uint64_t *nb_sb, bool online)
1170{
 
1171	unsigned long pfn, nr_pages;
1172	int sb_id, count;
1173	int rc;
1174
1175	if (WARN_ON_ONCE(!*nb_sb))
1176		return -EINVAL;
1177
1178	while (*nb_sb) {
1179		sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
1180		if (sb_id >= vm->nb_sb_per_mb)
1181			break;
1182		count = 1;
1183		while (count < *nb_sb &&
1184		       sb_id + count < vm->nb_sb_per_mb &&
1185		       !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
1186						      1))
1187			count++;
1188
1189		rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
1190		if (rc)
1191			return rc;
1192		*nb_sb -= count;
1193		if (!online)
1194			continue;
1195
1196		/* fake-online the pages if the memory block is online */
1197		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1198			       sb_id * vm->subblock_size);
1199		nr_pages = PFN_DOWN(count * vm->subblock_size);
1200		virtio_mem_fake_online(pfn, nr_pages);
1201	}
1202
1203	if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1204		if (online)
1205			virtio_mem_mb_set_state(vm, mb_id,
1206						VIRTIO_MEM_MB_STATE_ONLINE);
1207		else
1208			virtio_mem_mb_set_state(vm, mb_id,
1209						VIRTIO_MEM_MB_STATE_OFFLINE);
1210	}
1211
1212	return 0;
1213}
1214
1215/*
1216 * Try to plug the requested amount of memory.
1217 */
1218static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1219{
1220	uint64_t nb_sb = diff / vm->subblock_size;
 
 
 
 
 
1221	unsigned long mb_id;
1222	int rc;
1223
1224	if (!nb_sb)
1225		return 0;
1226
1227	/* Don't race with onlining/offlining */
1228	mutex_lock(&vm->hotplug_mutex);
1229
1230	/* Try to plug subblocks of partially plugged online blocks. */
1231	virtio_mem_for_each_mb_state(vm, mb_id,
1232				     VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1233		rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
1234		if (rc || !nb_sb)
1235			goto out_unlock;
1236		cond_resched();
1237	}
1238
1239	/* Try to plug subblocks of partially plugged offline blocks. */
1240	virtio_mem_for_each_mb_state(vm, mb_id,
1241				     VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1242		rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
1243		if (rc || !nb_sb)
1244			goto out_unlock;
1245		cond_resched();
1246	}
1247
1248	/*
1249	 * We won't be working on online/offline memory blocks from this point,
1250	 * so we can't race with memory onlining/offlining. Drop the mutex.
1251	 */
1252	mutex_unlock(&vm->hotplug_mutex);
1253
1254	/* Try to plug and add unused blocks */
1255	virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
1256		if (virtio_mem_too_many_mb_offline(vm))
1257			return -ENOSPC;
1258
1259		rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1260		if (rc || !nb_sb)
1261			return rc;
1262		cond_resched();
1263	}
1264
1265	/* Try to prepare, plug and add new blocks */
1266	while (nb_sb) {
1267		if (virtio_mem_too_many_mb_offline(vm))
1268			return -ENOSPC;
1269
1270		rc = virtio_mem_prepare_next_mb(vm, &mb_id);
1271		if (rc)
1272			return rc;
1273		rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1274		if (rc)
1275			return rc;
1276		cond_resched();
1277	}
1278
1279	return 0;
1280out_unlock:
1281	mutex_unlock(&vm->hotplug_mutex);
1282	return rc;
1283}
1284
1285/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286 * Unplug the desired number of plugged subblocks of an offline memory block.
1287 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1288 *
1289 * Will modify the state of the memory block. Might temporarily drop the
1290 * hotplug_mutex.
1291 *
1292 * Note: Can fail after some subblocks were successfully unplugged.
1293 */
1294static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
1295					       unsigned long mb_id,
1296					       uint64_t *nb_sb)
1297{
1298	int rc;
1299
1300	rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
1301
1302	/* some subblocks might have been unplugged even on failure */
1303	if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
1304		virtio_mem_mb_set_state(vm, mb_id,
1305					VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1306	if (rc)
1307		return rc;
1308
1309	if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1310		/*
1311		 * Remove the block from Linux - this should never fail.
1312		 * Hinder the block from getting onlined by marking it
1313		 * unplugged. Temporarily drop the mutex, so
1314		 * any pending GOING_ONLINE requests can be serviced/rejected.
1315		 */
1316		virtio_mem_mb_set_state(vm, mb_id,
1317					VIRTIO_MEM_MB_STATE_UNUSED);
1318
1319		mutex_unlock(&vm->hotplug_mutex);
1320		rc = virtio_mem_mb_remove(vm, mb_id);
1321		BUG_ON(rc);
1322		mutex_lock(&vm->hotplug_mutex);
1323	}
1324	return 0;
1325}
1326
1327/*
1328 * Unplug the given plugged subblocks of an online memory block.
1329 *
1330 * Will modify the state of the memory block.
1331 */
1332static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
1333					  unsigned long mb_id, int sb_id,
1334					  int count)
1335{
1336	const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
 
1337	unsigned long start_pfn;
1338	int rc;
1339
1340	start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1341			     sb_id * vm->subblock_size);
1342	rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
1343				MIGRATE_MOVABLE, GFP_KERNEL);
1344	if (rc == -ENOMEM)
1345		/* whoops, out of memory */
1346		return rc;
1347	if (rc)
1348		return -EBUSY;
1349
1350	/* Mark it as fake-offline before unplugging it */
1351	virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
1352	adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
1353
1354	/* Try to unplug the allocated memory */
1355	rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1356	if (rc) {
1357		/* Return the memory to the buddy. */
1358		virtio_mem_fake_online(start_pfn, nr_pages);
1359		return rc;
1360	}
1361
1362	virtio_mem_mb_set_state(vm, mb_id,
1363				VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
 
 
 
 
 
 
 
 
 
1364	return 0;
1365}
1366
1367/*
1368 * Unplug the desired number of plugged subblocks of an online memory block.
1369 * Will skip subblock that are busy.
1370 *
1371 * Will modify the state of the memory block. Might temporarily drop the
1372 * hotplug_mutex.
1373 *
1374 * Note: Can fail after some subblocks were successfully unplugged. Can
1375 *       return 0 even if subblocks were busy and could not get unplugged.
1376 */
1377static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
1378					      unsigned long mb_id,
1379					      uint64_t *nb_sb)
1380{
1381	int rc, sb_id;
1382
1383	/* If possible, try to unplug the complete block in one shot. */
1384	if (*nb_sb >= vm->nb_sb_per_mb &&
1385	    virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1386		rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
1387						    vm->nb_sb_per_mb);
1388		if (!rc) {
1389			*nb_sb -= vm->nb_sb_per_mb;
1390			goto unplugged;
1391		} else if (rc != -EBUSY)
1392			return rc;
1393	}
1394
1395	/* Fallback to single subblocks. */
1396	for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1397		/* Find the next candidate subblock */
1398		while (sb_id >= 0 &&
1399		       !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
1400			sb_id--;
1401		if (sb_id < 0)
1402			break;
1403
1404		rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
1405		if (rc == -EBUSY)
1406			continue;
1407		else if (rc)
1408			return rc;
1409		*nb_sb -= 1;
1410	}
1411
1412unplugged:
1413	/*
1414	 * Once all subblocks of a memory block were unplugged, offline and
1415	 * remove it. This will usually not fail, as no memory is in use
1416	 * anymore - however some other notifiers might NACK the request.
1417	 */
1418	if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1419		mutex_unlock(&vm->hotplug_mutex);
1420		rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
1421		mutex_lock(&vm->hotplug_mutex);
1422		if (!rc)
1423			virtio_mem_mb_set_state(vm, mb_id,
1424						VIRTIO_MEM_MB_STATE_UNUSED);
1425	}
1426
1427	return 0;
1428}
1429
1430/*
1431 * Try to unplug the requested amount of memory.
 
 
 
 
 
 
 
 
 
1432 */
1433static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
1434{
1435	uint64_t nb_sb = diff / vm->subblock_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1436	unsigned long mb_id;
1437	int rc;
1438
1439	if (!nb_sb)
1440		return 0;
1441
1442	/*
1443	 * We'll drop the mutex a couple of times when it is safe to do so.
1444	 * This might result in some blocks switching the state (online/offline)
1445	 * and we could miss them in this run - we will retry again later.
1446	 */
1447	mutex_lock(&vm->hotplug_mutex);
1448
1449	/* Try to unplug subblocks of partially plugged offline blocks. */
1450	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1451					 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1452		rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1453							 &nb_sb);
1454		if (rc || !nb_sb)
1455			goto out_unlock;
1456		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
1457	}
1458
1459	/* Try to unplug subblocks of plugged offline blocks. */
1460	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1461					 VIRTIO_MEM_MB_STATE_OFFLINE) {
1462		rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1463							 &nb_sb);
1464		if (rc || !nb_sb)
1465			goto out_unlock;
1466		cond_resched();
1467	}
1468
1469	if (!unplug_online) {
1470		mutex_unlock(&vm->hotplug_mutex);
1471		return 0;
1472	}
 
 
 
 
 
 
 
 
 
 
 
 
1473
1474	/* Try to unplug subblocks of partially plugged online blocks. */
1475	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1476					 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1477		rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1478							&nb_sb);
1479		if (rc || !nb_sb)
1480			goto out_unlock;
1481		mutex_unlock(&vm->hotplug_mutex);
1482		cond_resched();
1483		mutex_lock(&vm->hotplug_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
1484	}
 
1485
1486	/* Try to unplug subblocks of plugged online blocks. */
1487	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1488					 VIRTIO_MEM_MB_STATE_ONLINE) {
1489		rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1490							&nb_sb);
1491		if (rc || !nb_sb)
1492			goto out_unlock;
1493		mutex_unlock(&vm->hotplug_mutex);
1494		cond_resched();
1495		mutex_lock(&vm->hotplug_mutex);
 
1496	}
1497
1498	mutex_unlock(&vm->hotplug_mutex);
1499	return nb_sb ? -EBUSY : 0;
1500out_unlock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501	mutex_unlock(&vm->hotplug_mutex);
1502	return rc;
1503}
1504
1505/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506 * Try to unplug all blocks that couldn't be unplugged before, for example,
1507 * because the hypervisor was busy.
 
1508 */
1509static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
1510{
1511	unsigned long mb_id;
1512	int rc;
1513
1514	virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
1515		rc = virtio_mem_mb_unplug(vm, mb_id);
 
 
 
 
 
 
 
 
 
 
 
 
1516		if (rc)
1517			return rc;
1518		virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
 
1519	}
1520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1521	return 0;
1522}
1523
1524/*
1525 * Update all parts of the config that could have changed.
1526 */
1527static void virtio_mem_refresh_config(struct virtio_mem *vm)
1528{
1529	const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1530	uint64_t new_plugged_size, usable_region_size, end_addr;
1531
1532	/* the plugged_size is just a reflection of what _we_ did previously */
1533	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1534			&new_plugged_size);
1535	if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
1536		vm->plugged_size = new_plugged_size;
1537
1538	/* calculate the last usable memory block id */
1539	virtio_cread_le(vm->vdev, struct virtio_mem_config,
1540			usable_region_size, &usable_region_size);
1541	end_addr = vm->addr + usable_region_size;
1542	end_addr = min(end_addr, phys_limit);
1543	vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1544
1545	/* see if there is a request to change the size */
1546	virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
1547			&vm->requested_size);
1548
1549	dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
1550	dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
1551}
1552
1553/*
1554 * Workqueue function for handling plug/unplug requests and config updates.
1555 */
1556static void virtio_mem_run_wq(struct work_struct *work)
1557{
1558	struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
1559	uint64_t diff;
1560	int rc;
1561
 
 
 
 
 
 
1562	hrtimer_cancel(&vm->retry_timer);
1563
1564	if (vm->broken)
1565		return;
1566
 
1567retry:
1568	rc = 0;
1569
1570	/* Make sure we start with a clean state if there are leftovers. */
1571	if (unlikely(vm->unplug_all_required))
1572		rc = virtio_mem_send_unplug_all_request(vm);
1573
1574	if (atomic_read(&vm->config_changed)) {
1575		atomic_set(&vm->config_changed, 0);
1576		virtio_mem_refresh_config(vm);
1577	}
1578
1579	/* Unplug any leftovers from previous runs */
1580	if (!rc)
1581		rc = virtio_mem_unplug_pending_mb(vm);
1582
1583	if (!rc && vm->requested_size != vm->plugged_size) {
1584		if (vm->requested_size > vm->plugged_size) {
1585			diff = vm->requested_size - vm->plugged_size;
1586			rc = virtio_mem_plug_request(vm, diff);
1587		} else {
1588			diff = vm->plugged_size - vm->requested_size;
1589			rc = virtio_mem_unplug_request(vm, diff);
1590		}
1591	}
1592
 
 
 
 
 
 
 
1593	switch (rc) {
1594	case 0:
1595		vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1596		break;
1597	case -ENOSPC:
1598		/*
1599		 * We cannot add any more memory (alignment, physical limit)
1600		 * or we have too many offline memory blocks.
1601		 */
1602		break;
1603	case -ETXTBSY:
1604		/*
1605		 * The hypervisor cannot process our request right now
1606		 * (e.g., out of memory, migrating);
1607		 */
1608	case -EBUSY:
1609		/*
1610		 * We cannot free up any memory to unplug it (all plugged memory
1611		 * is busy).
1612		 */
1613	case -ENOMEM:
1614		/* Out of memory, try again later. */
1615		hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
1616			      HRTIMER_MODE_REL);
1617		break;
1618	case -EAGAIN:
1619		/* Retry immediately (e.g., the config changed). */
1620		goto retry;
1621	default:
1622		/* Unknown error, mark as broken */
1623		dev_err(&vm->vdev->dev,
1624			"unknown error, marking device broken: %d\n", rc);
1625		vm->broken = true;
1626	}
 
 
1627}
1628
1629static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
1630{
1631	struct virtio_mem *vm = container_of(timer, struct virtio_mem,
1632					     retry_timer);
1633
1634	virtio_mem_retry(vm);
1635	vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
1636				   VIRTIO_MEM_RETRY_TIMER_MAX_MS);
1637	return HRTIMER_NORESTART;
1638}
1639
1640static void virtio_mem_handle_response(struct virtqueue *vq)
1641{
1642	struct virtio_mem *vm = vq->vdev->priv;
1643
1644	wake_up(&vm->host_resp);
1645}
1646
1647static int virtio_mem_init_vq(struct virtio_mem *vm)
1648{
1649	struct virtqueue *vq;
1650
1651	vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
1652				   "guest-request");
1653	if (IS_ERR(vq))
1654		return PTR_ERR(vq);
1655	vm->vq = vq;
1656
1657	return 0;
1658}
1659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1660static int virtio_mem_init(struct virtio_mem *vm)
1661{
1662	const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1663	uint16_t node_id;
1664
1665	if (!vm->vdev->config->get) {
1666		dev_err(&vm->vdev->dev, "config access disabled\n");
1667		return -EINVAL;
1668	}
1669
1670	/*
1671	 * We don't want to (un)plug or reuse any memory when in kdump. The
1672	 * memory is still accessible (but not mapped).
1673	 */
1674	if (is_kdump_kernel()) {
1675		dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
1676		return -EBUSY;
1677	}
1678
1679	/* Fetch all properties that can't change. */
1680	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1681			&vm->plugged_size);
1682	virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
1683			&vm->device_block_size);
1684	virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
1685			&node_id);
1686	vm->nid = virtio_mem_translate_node_id(vm, node_id);
1687	virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
1688	virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
1689			&vm->region_size);
1690
1691	/*
1692	 * We always hotplug memory in memory block granularity. This way,
1693	 * we have to wait for exactly one memory block to online.
1694	 */
1695	if (vm->device_block_size > memory_block_size_bytes()) {
1696		dev_err(&vm->vdev->dev,
1697			"The block size is not supported (too big).\n");
1698		return -EINVAL;
1699	}
1700
1701	/* bad device setup - warn only */
1702	if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
1703		dev_warn(&vm->vdev->dev,
1704			 "The alignment of the physical start address can make some memory unusable.\n");
1705	if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
1706		dev_warn(&vm->vdev->dev,
1707			 "The alignment of the physical end address can make some memory unusable.\n");
1708	if (vm->addr + vm->region_size > phys_limit)
1709		dev_warn(&vm->vdev->dev,
1710			 "Some memory is not addressable. This can make some memory unusable.\n");
1711
1712	/*
1713	 * Calculate the subblock size:
1714	 * - At least MAX_ORDER - 1 / pageblock_order.
1715	 * - At least the device block size.
1716	 * In the worst case, a single subblock per memory block.
1717	 */
1718	vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
1719						     pageblock_order);
1720	vm->subblock_size = max_t(uint64_t, vm->device_block_size,
1721				  vm->subblock_size);
1722	vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
1723
1724	/* Round up to the next full memory block */
1725	vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
1726						   memory_block_size_bytes());
1727	vm->next_mb_id = vm->first_mb_id;
1728	vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
1729			 vm->region_size) - 1;
1730
1731	dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
1732	dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
1733	dev_info(&vm->vdev->dev, "device block size: 0x%llx",
1734		 (unsigned long long)vm->device_block_size);
1735	dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
1736		 memory_block_size_bytes());
1737	dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
1738		 (unsigned long long)vm->subblock_size);
1739	if (vm->nid != NUMA_NO_NODE)
1740		dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
1741
1742	return 0;
 
 
 
 
 
 
1743}
1744
1745static int virtio_mem_create_resource(struct virtio_mem *vm)
1746{
1747	/*
1748	 * When force-unloading the driver and removing the device, we
1749	 * could have a garbage pointer. Duplicate the string.
1750	 */
1751	const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
1752
1753	if (!name)
1754		return -ENOMEM;
1755
 
1756	vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
1757						   name, IORESOURCE_SYSTEM_RAM);
 
1758	if (!vm->parent_resource) {
1759		kfree(name);
1760		dev_warn(&vm->vdev->dev, "could not reserve device region\n");
1761		dev_info(&vm->vdev->dev,
1762			 "reloading the driver is not supported\n");
1763		return -EBUSY;
1764	}
1765
1766	/* The memory is not actually busy - make add_memory() work. */
1767	vm->parent_resource->flags &= ~IORESOURCE_BUSY;
1768	return 0;
1769}
1770
1771static void virtio_mem_delete_resource(struct virtio_mem *vm)
1772{
1773	const char *name;
1774
1775	if (!vm->parent_resource)
1776		return;
1777
1778	name = vm->parent_resource->name;
1779	release_resource(vm->parent_resource);
1780	kfree(vm->parent_resource);
1781	kfree(name);
1782	vm->parent_resource = NULL;
1783}
1784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1785static int virtio_mem_probe(struct virtio_device *vdev)
1786{
1787	struct virtio_mem *vm;
1788	int rc;
1789
1790	BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
1791	BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
1792
1793	vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1794	if (!vm)
1795		return -ENOMEM;
1796
1797	init_waitqueue_head(&vm->host_resp);
1798	vm->vdev = vdev;
1799	INIT_WORK(&vm->wq, virtio_mem_run_wq);
1800	mutex_init(&vm->hotplug_mutex);
1801	INIT_LIST_HEAD(&vm->next);
1802	spin_lock_init(&vm->removal_lock);
1803	hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1804	vm->retry_timer.function = virtio_mem_timer_expired;
1805	vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
 
1806
1807	/* register the virtqueue */
1808	rc = virtio_mem_init_vq(vm);
1809	if (rc)
1810		goto out_free_vm;
1811
1812	/* initialize the device by querying the config */
1813	rc = virtio_mem_init(vm);
1814	if (rc)
1815		goto out_del_vq;
1816
1817	/* create the parent resource for all memory */
1818	rc = virtio_mem_create_resource(vm);
1819	if (rc)
1820		goto out_del_vq;
1821
1822	/*
1823	 * If we still have memory plugged, we have to unplug all memory first.
1824	 * Registering our parent resource makes sure that this memory isn't
1825	 * actually in use (e.g., trying to reload the driver).
1826	 */
1827	if (vm->plugged_size) {
1828		vm->unplug_all_required = 1;
1829		dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
1830	}
1831
1832	/* register callbacks */
1833	vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
1834	rc = register_memory_notifier(&vm->memory_notifier);
1835	if (rc)
1836		goto out_del_resource;
1837	rc = register_virtio_mem_device(vm);
1838	if (rc)
1839		goto out_unreg_mem;
1840
1841	virtio_device_ready(vdev);
1842
1843	/* trigger a config update to start processing the requested_size */
1844	atomic_set(&vm->config_changed, 1);
1845	queue_work(system_freezable_wq, &vm->wq);
 
 
1846
1847	return 0;
1848out_unreg_mem:
1849	unregister_memory_notifier(&vm->memory_notifier);
1850out_del_resource:
1851	virtio_mem_delete_resource(vm);
1852out_del_vq:
1853	vdev->config->del_vqs(vdev);
1854out_free_vm:
1855	kfree(vm);
1856	vdev->priv = NULL;
1857
1858	return rc;
1859}
1860
1861static void virtio_mem_remove(struct virtio_device *vdev)
1862{
1863	struct virtio_mem *vm = vdev->priv;
1864	unsigned long mb_id;
1865	int rc;
1866
1867	/*
1868	 * Make sure the workqueue won't be triggered anymore and no memory
1869	 * blocks can be onlined/offlined until we're finished here.
1870	 */
1871	mutex_lock(&vm->hotplug_mutex);
1872	spin_lock_irq(&vm->removal_lock);
1873	vm->removing = true;
1874	spin_unlock_irq(&vm->removal_lock);
1875	mutex_unlock(&vm->hotplug_mutex);
1876
1877	/* wait until the workqueue stopped */
1878	cancel_work_sync(&vm->wq);
1879	hrtimer_cancel(&vm->retry_timer);
1880
1881	/*
1882	 * After we unregistered our callbacks, user space can online partially
1883	 * plugged offline blocks. Make sure to remove them.
1884	 */
1885	virtio_mem_for_each_mb_state(vm, mb_id,
1886				     VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1887		rc = virtio_mem_mb_remove(vm, mb_id);
1888		BUG_ON(rc);
1889		virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
 
 
 
 
 
 
 
 
1890	}
1891	/*
1892	 * After we unregistered our callbacks, user space can no longer
1893	 * offline partially plugged online memory blocks. No need to worry
1894	 * about them.
1895	 */
1896
1897	/* unregister callbacks */
1898	unregister_virtio_mem_device(vm);
1899	unregister_memory_notifier(&vm->memory_notifier);
1900
1901	/*
1902	 * There is no way we could reliably remove all memory we have added to
1903	 * the system. And there is no way to stop the driver/device from going
1904	 * away. Warn at least.
1905	 */
1906	if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
1907	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
1908	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
1909	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
1910	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
1911		dev_warn(&vdev->dev, "device still has system memory added\n");
1912	} else {
1913		virtio_mem_delete_resource(vm);
1914		kfree_const(vm->resource_name);
 
1915	}
1916
1917	/* remove all tracking data - no locking needed */
1918	vfree(vm->mb_state);
1919	vfree(vm->sb_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920
1921	/* reset the device and cleanup the queues */
1922	vdev->config->reset(vdev);
1923	vdev->config->del_vqs(vdev);
1924
1925	kfree(vm);
1926	vdev->priv = NULL;
1927}
1928
1929static void virtio_mem_config_changed(struct virtio_device *vdev)
1930{
1931	struct virtio_mem *vm = vdev->priv;
1932
 
 
 
1933	atomic_set(&vm->config_changed, 1);
1934	virtio_mem_retry(vm);
1935}
1936
1937#ifdef CONFIG_PM_SLEEP
1938static int virtio_mem_freeze(struct virtio_device *vdev)
1939{
1940	/*
1941	 * When restarting the VM, all memory is usually unplugged. Don't
1942	 * allow to suspend/hibernate.
1943	 */
1944	dev_err(&vdev->dev, "save/restore not supported.\n");
1945	return -EPERM;
1946}
1947
1948static int virtio_mem_restore(struct virtio_device *vdev)
1949{
1950	return -EPERM;
1951}
1952#endif
1953
1954static unsigned int virtio_mem_features[] = {
1955#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
1956	VIRTIO_MEM_F_ACPI_PXM,
1957#endif
 
1958};
1959
1960static struct virtio_device_id virtio_mem_id_table[] = {
1961	{ VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
1962	{ 0 },
1963};
1964
1965static struct virtio_driver virtio_mem_driver = {
1966	.feature_table = virtio_mem_features,
1967	.feature_table_size = ARRAY_SIZE(virtio_mem_features),
1968	.driver.name = KBUILD_MODNAME,
1969	.driver.owner = THIS_MODULE,
1970	.id_table = virtio_mem_id_table,
1971	.probe = virtio_mem_probe,
1972	.remove = virtio_mem_remove,
1973	.config_changed = virtio_mem_config_changed,
1974#ifdef CONFIG_PM_SLEEP
1975	.freeze	=	virtio_mem_freeze,
1976	.restore =	virtio_mem_restore,
1977#endif
1978};
1979
1980module_virtio_driver(virtio_mem_driver);
1981MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
1982MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
1983MODULE_DESCRIPTION("Virtio-mem driver");
1984MODULE_LICENSE("GPL");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Virtio-mem device driver.
   4 *
   5 * Copyright Red Hat, Inc. 2020
   6 *
   7 * Author(s): David Hildenbrand <david@redhat.com>
   8 */
   9
  10#include <linux/virtio.h>
  11#include <linux/virtio_mem.h>
  12#include <linux/workqueue.h>
  13#include <linux/slab.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/memory_hotplug.h>
  17#include <linux/memory.h>
  18#include <linux/hrtimer.h>
  19#include <linux/crash_dump.h>
  20#include <linux/mutex.h>
  21#include <linux/bitmap.h>
  22#include <linux/lockdep.h>
  23#include <linux/log2.h>
  24
  25#include <acpi/acpi_numa.h>
  26
  27static bool unplug_online = true;
  28module_param(unplug_online, bool, 0644);
  29MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
  30
  31static bool force_bbm;
  32module_param(force_bbm, bool, 0444);
  33MODULE_PARM_DESC(force_bbm,
  34		"Force Big Block Mode. Default is 0 (auto-selection)");
  35
  36static unsigned long bbm_block_size;
  37module_param(bbm_block_size, ulong, 0444);
  38MODULE_PARM_DESC(bbm_block_size,
  39		 "Big Block size in bytes. Default is 0 (auto-detection).");
  40
  41/*
  42 * virtio-mem currently supports the following modes of operation:
  43 *
  44 * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
  45 *   size of a Sub Block (SB) is determined based on the device block size, the
  46 *   pageblock size, and the maximum allocation granularity of the buddy.
  47 *   Subblocks within a Linux memory block might either be plugged or unplugged.
  48 *   Memory is added/removed to Linux MM in Linux memory block granularity.
  49 *
  50 * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
  51 *   Memory is added/removed to Linux MM in Big Block granularity.
  52 *
  53 * The mode is determined automatically based on the Linux memory block size
  54 * and the device block size.
  55 *
  56 * User space / core MM (auto onlining) is responsible for onlining added
  57 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
  58 * always onlined separately, and all memory within a Linux memory block is
  59 * onlined to the same zone - virtio-mem relies on this behavior.
  60 */
  61
  62/*
  63 * State of a Linux memory block in SBM.
  64 */
  65enum virtio_mem_sbm_mb_state {
  66	/* Unplugged, not added to Linux. Can be reused later. */
  67	VIRTIO_MEM_SBM_MB_UNUSED = 0,
  68	/* (Partially) plugged, not added to Linux. Error on add_memory(). */
  69	VIRTIO_MEM_SBM_MB_PLUGGED,
  70	/* Fully plugged, fully added to Linux, offline. */
  71	VIRTIO_MEM_SBM_MB_OFFLINE,
  72	/* Partially plugged, fully added to Linux, offline. */
  73	VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
  74	/* Fully plugged, fully added to Linux, onlined to a kernel zone. */
  75	VIRTIO_MEM_SBM_MB_KERNEL,
  76	/* Partially plugged, fully added to Linux, online to a kernel zone */
  77	VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
  78	/* Fully plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
  79	VIRTIO_MEM_SBM_MB_MOVABLE,
  80	/* Partially plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
  81	VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
  82	VIRTIO_MEM_SBM_MB_COUNT
  83};
  84
  85/*
  86 * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
  87 */
  88enum virtio_mem_bbm_bb_state {
  89	/* Unplugged, not added to Linux. Can be reused later. */
  90	VIRTIO_MEM_BBM_BB_UNUSED = 0,
  91	/* Plugged, not added to Linux. Error on add_memory(). */
  92	VIRTIO_MEM_BBM_BB_PLUGGED,
  93	/* Plugged and added to Linux. */
  94	VIRTIO_MEM_BBM_BB_ADDED,
  95	/* All online parts are fake-offline, ready to remove. */
  96	VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
  97	VIRTIO_MEM_BBM_BB_COUNT
  98};
  99
 100struct virtio_mem {
 101	struct virtio_device *vdev;
 102
 103	/* We might first have to unplug all memory when starting up. */
 104	bool unplug_all_required;
 105
 106	/* Workqueue that processes the plug/unplug requests. */
 107	struct work_struct wq;
 108	atomic_t wq_active;
 109	atomic_t config_changed;
 110
 111	/* Virtqueue for guest->host requests. */
 112	struct virtqueue *vq;
 113
 114	/* Wait for a host response to a guest request. */
 115	wait_queue_head_t host_resp;
 116
 117	/* Space for one guest request and the host response. */
 118	struct virtio_mem_req req;
 119	struct virtio_mem_resp resp;
 120
 121	/* The current size of the device. */
 122	uint64_t plugged_size;
 123	/* The requested size of the device. */
 124	uint64_t requested_size;
 125
 126	/* The device block size (for communicating with the device). */
 127	uint64_t device_block_size;
 128	/* The determined node id for all memory of the device. */
 129	int nid;
 130	/* Physical start address of the memory region. */
 131	uint64_t addr;
 132	/* Maximum region size in bytes. */
 133	uint64_t region_size;
 134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135	/* The parent resource for all memory added via this device. */
 136	struct resource *parent_resource;
 137	/*
 138	 * Copy of "System RAM (virtio_mem)" to be used for
 139	 * add_memory_driver_managed().
 140	 */
 141	const char *resource_name;
 142	/* Memory group identification. */
 143	int mgid;
 
 
 144
 145	/*
 146	 * We don't want to add too much memory if it's not getting onlined,
 147	 * to avoid running OOM. Besides this threshold, we allow to have at
 148	 * least two offline blocks at a time (whatever is bigger).
 149	 */
 150#define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD		(1024 * 1024 * 1024)
 151	atomic64_t offline_size;
 152	uint64_t offline_threshold;
 153
 154	/* If set, the driver is in SBM, otherwise in BBM. */
 155	bool in_sbm;
 156
 157	union {
 158		struct {
 159			/* Id of the first memory block of this device. */
 160			unsigned long first_mb_id;
 161			/* Id of the last usable memory block of this device. */
 162			unsigned long last_usable_mb_id;
 163			/* Id of the next memory bock to prepare when needed. */
 164			unsigned long next_mb_id;
 165
 166			/* The subblock size. */
 167			uint64_t sb_size;
 168			/* The number of subblocks per Linux memory block. */
 169			uint32_t sbs_per_mb;
 170
 171			/*
 172			 * Some of the Linux memory blocks tracked as "partially
 173			 * plugged" are completely unplugged and can be offlined
 174			 * and removed -- which previously failed.
 175			 */
 176			bool have_unplugged_mb;
 177
 178			/* Summary of all memory block states. */
 179			unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
 180
 181			/*
 182			 * One byte state per memory block. Allocated via
 183			 * vmalloc(). Resized (alloc+copy+free) on demand.
 184			 *
 185			 * With 128 MiB memory blocks, we have states for 512
 186			 * GiB of memory in one 4 KiB page.
 187			 */
 188			uint8_t *mb_states;
 189
 190			/*
 191			 * Bitmap: one bit per subblock. Allocated similar to
 192			 * sbm.mb_states.
 193			 *
 194			 * A set bit means the corresponding subblock is
 195			 * plugged, otherwise it's unblocked.
 196			 *
 197			 * With 4 MiB subblocks, we manage 128 GiB of memory
 198			 * in one 4 KiB page.
 199			 */
 200			unsigned long *sb_states;
 201		} sbm;
 202
 203		struct {
 204			/* Id of the first big block of this device. */
 205			unsigned long first_bb_id;
 206			/* Id of the last usable big block of this device. */
 207			unsigned long last_usable_bb_id;
 208			/* Id of the next device bock to prepare when needed. */
 209			unsigned long next_bb_id;
 210
 211			/* Summary of all big block states. */
 212			unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
 213
 214			/* One byte state per big block. See sbm.mb_states. */
 215			uint8_t *bb_states;
 216
 217			/* The block size used for plugging/adding/removing. */
 218			uint64_t bb_size;
 219		} bbm;
 220	};
 221
 222	/*
 223	 * Mutex that protects the sbm.mb_count, sbm.mb_states,
 224	 * sbm.sb_states, bbm.bb_count, and bbm.bb_states
 225	 *
 226	 * When this lock is held the pointers can't change, ONLINE and
 227	 * OFFLINE blocks can't change the state and no subblocks will get
 228	 * plugged/unplugged.
 229	 *
 230	 * In kdump mode, used to serialize requests, last_block_addr and
 231	 * last_block_plugged.
 232	 */
 233	struct mutex hotplug_mutex;
 234	bool hotplug_active;
 235
 236	/* An error occurred we cannot handle - stop processing requests. */
 237	bool broken;
 238
 239	/* Cached valued of is_kdump_kernel() when the device was probed. */
 240	bool in_kdump;
 241
 242	/* The driver is being removed. */
 243	spinlock_t removal_lock;
 244	bool removing;
 245
 246	/* Timer for retrying to plug/unplug memory. */
 247	struct hrtimer retry_timer;
 248	unsigned int retry_timer_ms;
 249#define VIRTIO_MEM_RETRY_TIMER_MIN_MS		50000
 250#define VIRTIO_MEM_RETRY_TIMER_MAX_MS		300000
 251
 252	/* Memory notifier (online/offline events). */
 253	struct notifier_block memory_notifier;
 254
 255#ifdef CONFIG_PROC_VMCORE
 256	/* vmcore callback for /proc/vmcore handling in kdump mode */
 257	struct vmcore_cb vmcore_cb;
 258	uint64_t last_block_addr;
 259	bool last_block_plugged;
 260#endif /* CONFIG_PROC_VMCORE */
 261
 262	/* Next device in the list of virtio-mem devices. */
 263	struct list_head next;
 264};
 265
 266/*
 267 * We have to share a single online_page callback among all virtio-mem
 268 * devices. We use RCU to iterate the list in the callback.
 269 */
 270static DEFINE_MUTEX(virtio_mem_mutex);
 271static LIST_HEAD(virtio_mem_devices);
 272
 273static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
 274static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
 275						  unsigned long nr_pages);
 276static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
 277						   unsigned long nr_pages);
 278static void virtio_mem_retry(struct virtio_mem *vm);
 279static int virtio_mem_create_resource(struct virtio_mem *vm);
 280static void virtio_mem_delete_resource(struct virtio_mem *vm);
 281
 282/*
 283 * Register a virtio-mem device so it will be considered for the online_page
 284 * callback.
 285 */
 286static int register_virtio_mem_device(struct virtio_mem *vm)
 287{
 288	int rc = 0;
 289
 290	/* First device registers the callback. */
 291	mutex_lock(&virtio_mem_mutex);
 292	if (list_empty(&virtio_mem_devices))
 293		rc = set_online_page_callback(&virtio_mem_online_page_cb);
 294	if (!rc)
 295		list_add_rcu(&vm->next, &virtio_mem_devices);
 296	mutex_unlock(&virtio_mem_mutex);
 297
 298	return rc;
 299}
 300
 301/*
 302 * Unregister a virtio-mem device so it will no longer be considered for the
 303 * online_page callback.
 304 */
 305static void unregister_virtio_mem_device(struct virtio_mem *vm)
 306{
 307	/* Last device unregisters the callback. */
 308	mutex_lock(&virtio_mem_mutex);
 309	list_del_rcu(&vm->next);
 310	if (list_empty(&virtio_mem_devices))
 311		restore_online_page_callback(&virtio_mem_online_page_cb);
 312	mutex_unlock(&virtio_mem_mutex);
 313
 314	synchronize_rcu();
 315}
 316
 317/*
 318 * Calculate the memory block id of a given address.
 319 */
 320static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
 321{
 322	return addr / memory_block_size_bytes();
 323}
 324
 325/*
 326 * Calculate the physical start address of a given memory block id.
 327 */
 328static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
 329{
 330	return mb_id * memory_block_size_bytes();
 331}
 332
 333/*
 334 * Calculate the big block id of a given address.
 335 */
 336static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
 337					      uint64_t addr)
 338{
 339	return addr / vm->bbm.bb_size;
 340}
 341
 342/*
 343 * Calculate the physical start address of a given big block id.
 344 */
 345static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
 346					 unsigned long bb_id)
 347{
 348	return bb_id * vm->bbm.bb_size;
 349}
 350
 351/*
 352 * Calculate the subblock id of a given address.
 353 */
 354static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
 355					      unsigned long addr)
 356{
 357	const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
 358	const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
 359
 360	return (addr - mb_addr) / vm->sbm.sb_size;
 361}
 362
 363/*
 364 * Set the state of a big block, taking care of the state counter.
 365 */
 366static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
 367					unsigned long bb_id,
 368					enum virtio_mem_bbm_bb_state state)
 369{
 370	const unsigned long idx = bb_id - vm->bbm.first_bb_id;
 371	enum virtio_mem_bbm_bb_state old_state;
 372
 373	old_state = vm->bbm.bb_states[idx];
 374	vm->bbm.bb_states[idx] = state;
 375
 376	BUG_ON(vm->bbm.bb_count[old_state] == 0);
 377	vm->bbm.bb_count[old_state]--;
 378	vm->bbm.bb_count[state]++;
 379}
 380
 381/*
 382 * Get the state of a big block.
 383 */
 384static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
 385								unsigned long bb_id)
 386{
 387	return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
 388}
 389
 390/*
 391 * Prepare the big block state array for the next big block.
 392 */
 393static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
 394{
 395	unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
 396	unsigned long new_bytes = old_bytes + 1;
 397	int old_pages = PFN_UP(old_bytes);
 398	int new_pages = PFN_UP(new_bytes);
 399	uint8_t *new_array;
 400
 401	if (vm->bbm.bb_states && old_pages == new_pages)
 402		return 0;
 403
 404	new_array = vzalloc(new_pages * PAGE_SIZE);
 405	if (!new_array)
 406		return -ENOMEM;
 407
 408	mutex_lock(&vm->hotplug_mutex);
 409	if (vm->bbm.bb_states)
 410		memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
 411	vfree(vm->bbm.bb_states);
 412	vm->bbm.bb_states = new_array;
 413	mutex_unlock(&vm->hotplug_mutex);
 414
 415	return 0;
 416}
 417
 418#define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
 419	for (_bb_id = vm->bbm.first_bb_id; \
 420	     _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
 421	     _bb_id++) \
 422		if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
 423
 424#define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
 425	for (_bb_id = vm->bbm.next_bb_id - 1; \
 426	     _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
 427	     _bb_id--) \
 428		if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
 429
 430/*
 431 * Set the state of a memory block, taking care of the state counter.
 432 */
 433static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
 434					unsigned long mb_id, uint8_t state)
 435{
 436	const unsigned long idx = mb_id - vm->sbm.first_mb_id;
 437	uint8_t old_state;
 438
 439	old_state = vm->sbm.mb_states[idx];
 440	vm->sbm.mb_states[idx] = state;
 441
 442	BUG_ON(vm->sbm.mb_count[old_state] == 0);
 443	vm->sbm.mb_count[old_state]--;
 444	vm->sbm.mb_count[state]++;
 445}
 446
 447/*
 448 * Get the state of a memory block.
 449 */
 450static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
 451					   unsigned long mb_id)
 452{
 453	const unsigned long idx = mb_id - vm->sbm.first_mb_id;
 454
 455	return vm->sbm.mb_states[idx];
 456}
 457
 458/*
 459 * Prepare the state array for the next memory block.
 460 */
 461static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
 462{
 463	int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
 464	int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
 465	uint8_t *new_array;
 
 
 466
 467	if (vm->sbm.mb_states && old_pages == new_pages)
 468		return 0;
 469
 470	new_array = vzalloc(new_pages * PAGE_SIZE);
 471	if (!new_array)
 472		return -ENOMEM;
 473
 474	mutex_lock(&vm->hotplug_mutex);
 475	if (vm->sbm.mb_states)
 476		memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
 477	vfree(vm->sbm.mb_states);
 478	vm->sbm.mb_states = new_array;
 479	mutex_unlock(&vm->hotplug_mutex);
 480
 481	return 0;
 482}
 483
 484#define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
 485	for (_mb_id = _vm->sbm.first_mb_id; \
 486	     _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
 487	     _mb_id++) \
 488		if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
 489
 490#define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
 491	for (_mb_id = _vm->sbm.next_mb_id - 1; \
 492	     _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
 493	     _mb_id--) \
 494		if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
 495
 496/*
 497 * Calculate the bit number in the subblock bitmap for the given subblock
 498 * inside the given memory block.
 499 */
 500static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
 501					  unsigned long mb_id, int sb_id)
 502{
 503	return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
 504}
 505
 506/*
 507 * Mark all selected subblocks plugged.
 508 *
 509 * Will not modify the state of the memory block.
 510 */
 511static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
 512					  unsigned long mb_id, int sb_id,
 513					  int count)
 514{
 515	const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
 516
 517	__bitmap_set(vm->sbm.sb_states, bit, count);
 518}
 519
 520/*
 521 * Mark all selected subblocks unplugged.
 522 *
 523 * Will not modify the state of the memory block.
 524 */
 525static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
 526					    unsigned long mb_id, int sb_id,
 527					    int count)
 528{
 529	const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
 530
 531	__bitmap_clear(vm->sbm.sb_states, bit, count);
 532}
 533
 534/*
 535 * Test if all selected subblocks are plugged.
 536 */
 537static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
 538					   unsigned long mb_id, int sb_id,
 539					   int count)
 540{
 541	const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
 542
 543	if (count == 1)
 544		return test_bit(bit, vm->sbm.sb_states);
 545
 546	/* TODO: Helper similar to bitmap_set() */
 547	return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
 548	       bit + count;
 549}
 550
 551/*
 552 * Test if all selected subblocks are unplugged.
 553 */
 554static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
 555					     unsigned long mb_id, int sb_id,
 556					     int count)
 557{
 558	const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
 559
 560	/* TODO: Helper similar to bitmap_set() */
 561	return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
 562	       bit + count;
 563}
 564
 565/*
 566 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
 567 * none.
 568 */
 569static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
 570					    unsigned long mb_id)
 571{
 572	const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
 573
 574	return find_next_zero_bit(vm->sbm.sb_states,
 575				  bit + vm->sbm.sbs_per_mb, bit) - bit;
 576}
 577
 578/*
 579 * Prepare the subblock bitmap for the next memory block.
 580 */
 581static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
 582{
 583	const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
 584	const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
 585	const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
 586	int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
 587	int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
 588	unsigned long *new_bitmap, *old_bitmap;
 589
 590	if (vm->sbm.sb_states && old_pages == new_pages)
 591		return 0;
 592
 593	new_bitmap = vzalloc(new_pages * PAGE_SIZE);
 594	if (!new_bitmap)
 595		return -ENOMEM;
 596
 597	mutex_lock(&vm->hotplug_mutex);
 598	if (vm->sbm.sb_states)
 599		memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
 600
 601	old_bitmap = vm->sbm.sb_states;
 602	vm->sbm.sb_states = new_bitmap;
 603	mutex_unlock(&vm->hotplug_mutex);
 604
 605	vfree(old_bitmap);
 606	return 0;
 607}
 608
 609/*
 610 * Test if we could add memory without creating too much offline memory -
 611 * to avoid running OOM if memory is getting onlined deferred.
 612 */
 613static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
 614{
 615	if (WARN_ON_ONCE(size > vm->offline_threshold))
 616		return false;
 617
 618	return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
 619}
 620
 621/*
 622 * Try adding memory to Linux. Will usually only fail if out of memory.
 623 *
 624 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
 625 * onlining code).
 626 *
 627 * Will not modify the state of memory blocks in virtio-mem.
 628 */
 629static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
 630				 uint64_t size)
 631{
 632	int rc;
 
 
 
 
 633
 634	/*
 635	 * When force-unloading the driver and we still have memory added to
 636	 * Linux, the resource name has to stay.
 637	 */
 638	if (!vm->resource_name) {
 639		vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
 640						  GFP_KERNEL);
 641		if (!vm->resource_name)
 642			return -ENOMEM;
 643	}
 644
 645	dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
 646		addr + size - 1);
 647	/* Memory might get onlined immediately. */
 648	atomic64_add(size, &vm->offline_size);
 649	rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
 650				       MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
 651	if (rc) {
 652		atomic64_sub(size, &vm->offline_size);
 653		dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
 654		/*
 655		 * TODO: Linux MM does not properly clean up yet in all cases
 656		 * where adding of memory failed - especially on -ENOMEM.
 657		 */
 658	}
 659	return rc;
 660}
 661
 662/*
 663 * See virtio_mem_add_memory(): Try adding a single Linux memory block.
 664 */
 665static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
 666{
 667	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
 668	const uint64_t size = memory_block_size_bytes();
 669
 670	return virtio_mem_add_memory(vm, addr, size);
 671}
 672
 673/*
 674 * See virtio_mem_add_memory(): Try adding a big block.
 675 */
 676static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
 677{
 678	const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
 679	const uint64_t size = vm->bbm.bb_size;
 680
 681	return virtio_mem_add_memory(vm, addr, size);
 682}
 683
 684/*
 685 * Try removing memory from Linux. Will only fail if memory blocks aren't
 686 * offline.
 687 *
 688 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
 689 * onlining code).
 690 *
 691 * Will not modify the state of memory blocks in virtio-mem.
 692 */
 693static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
 694				    uint64_t size)
 695{
 696	int rc;
 697
 698	dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
 699		addr + size - 1);
 700	rc = remove_memory(addr, size);
 701	if (!rc) {
 702		atomic64_sub(size, &vm->offline_size);
 703		/*
 704		 * We might have freed up memory we can now unplug, retry
 705		 * immediately instead of waiting.
 706		 */
 707		virtio_mem_retry(vm);
 708	} else {
 709		dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
 710	}
 711	return rc;
 712}
 713
 714/*
 715 * See virtio_mem_remove_memory(): Try removing a single Linux memory block.
 716 */
 717static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
 718{
 719	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
 720	const uint64_t size = memory_block_size_bytes();
 721
 722	return virtio_mem_remove_memory(vm, addr, size);
 
 723}
 724
 725/*
 726 * Try offlining and removing memory from Linux.
 727 *
 728 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
 729 * onlining code).
 730 *
 731 * Will not modify the state of memory blocks in virtio-mem.
 732 */
 733static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
 734						uint64_t addr,
 735						uint64_t size)
 736{
 737	int rc;
 738
 739	dev_dbg(&vm->vdev->dev,
 740		"offlining and removing memory: 0x%llx - 0x%llx\n", addr,
 741		addr + size - 1);
 742
 743	rc = offline_and_remove_memory(addr, size);
 744	if (!rc) {
 745		atomic64_sub(size, &vm->offline_size);
 746		/*
 747		 * We might have freed up memory we can now unplug, retry
 748		 * immediately instead of waiting.
 749		 */
 750		virtio_mem_retry(vm);
 751		return 0;
 752	}
 753	dev_dbg(&vm->vdev->dev, "offlining and removing memory failed: %d\n", rc);
 754	/*
 755	 * We don't really expect this to fail, because we fake-offlined all
 756	 * memory already. But it could fail in corner cases.
 757	 */
 758	WARN_ON_ONCE(rc != -ENOMEM && rc != -EBUSY);
 759	return rc == -ENOMEM ? -ENOMEM : -EBUSY;
 760}
 761
 762/*
 763 * See virtio_mem_offline_and_remove_memory(): Try offlining and removing
 764 * a single Linux memory block.
 765 */
 766static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
 767						unsigned long mb_id)
 768{
 769	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
 770	const uint64_t size = memory_block_size_bytes();
 771
 772	return virtio_mem_offline_and_remove_memory(vm, addr, size);
 773}
 774
 775/*
 776 * Try (offlining and) removing memory from Linux in case all subblocks are
 777 * unplugged. Can be called on online and offline memory blocks.
 778 *
 779 * May modify the state of memory blocks in virtio-mem.
 780 */
 781static int virtio_mem_sbm_try_remove_unplugged_mb(struct virtio_mem *vm,
 782						  unsigned long mb_id)
 783{
 784	int rc;
 785
 786	/*
 787	 * Once all subblocks of a memory block were unplugged, offline and
 788	 * remove it.
 789	 */
 790	if (!virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
 791		return 0;
 792
 793	/* offline_and_remove_memory() works for online and offline memory. */
 794	mutex_unlock(&vm->hotplug_mutex);
 795	rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
 796	mutex_lock(&vm->hotplug_mutex);
 797	if (!rc)
 798		virtio_mem_sbm_set_mb_state(vm, mb_id,
 799					    VIRTIO_MEM_SBM_MB_UNUSED);
 800	return rc;
 801}
 802
 803/*
 804 * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
 805 * all Linux memory blocks covered by the big block.
 806 */
 807static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
 808						unsigned long bb_id)
 809{
 810	const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
 811	const uint64_t size = vm->bbm.bb_size;
 812
 813	return virtio_mem_offline_and_remove_memory(vm, addr, size);
 814}
 815
 816/*
 817 * Trigger the workqueue so the device can perform its magic.
 818 */
 819static void virtio_mem_retry(struct virtio_mem *vm)
 820{
 821	unsigned long flags;
 822
 823	spin_lock_irqsave(&vm->removal_lock, flags);
 824	if (!vm->removing)
 825		queue_work(system_freezable_wq, &vm->wq);
 826	spin_unlock_irqrestore(&vm->removal_lock, flags);
 827}
 828
 829static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
 830{
 831	int node = NUMA_NO_NODE;
 832
 833#if defined(CONFIG_ACPI_NUMA)
 834	if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
 835		node = pxm_to_node(node_id);
 836#endif
 837	return node;
 838}
 839
 840/*
 841 * Test if a virtio-mem device overlaps with the given range. Can be called
 842 * from (notifier) callbacks lockless.
 843 */
 844static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
 845				      uint64_t size)
 846{
 847	return start < vm->addr + vm->region_size && vm->addr < start + size;
 
 
 
 
 848}
 849
 850/*
 851 * Test if a virtio-mem device contains a given range. Can be called from
 852 * (notifier) callbacks lockless.
 853 */
 854static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
 855				      uint64_t size)
 856{
 857	return start >= vm->addr && start + size <= vm->addr + vm->region_size;
 858}
 859
 860static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
 861					      unsigned long mb_id)
 
 862{
 863	switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
 864	case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
 865	case VIRTIO_MEM_SBM_MB_OFFLINE:
 
 
 
 
 
 
 
 
 
 
 866		return NOTIFY_OK;
 867	default:
 868		break;
 869	}
 870	dev_warn_ratelimited(&vm->vdev->dev,
 871			     "memory block onlining denied\n");
 872	return NOTIFY_BAD;
 873}
 874
 875static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
 876					  unsigned long mb_id)
 877{
 878	switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
 879	case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
 880	case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
 881		virtio_mem_sbm_set_mb_state(vm, mb_id,
 882					    VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
 883		break;
 884	case VIRTIO_MEM_SBM_MB_KERNEL:
 885	case VIRTIO_MEM_SBM_MB_MOVABLE:
 886		virtio_mem_sbm_set_mb_state(vm, mb_id,
 887					    VIRTIO_MEM_SBM_MB_OFFLINE);
 888		break;
 889	default:
 890		BUG();
 891		break;
 892	}
 
 
 
 
 
 
 
 
 
 893}
 894
 895static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
 896					 unsigned long mb_id,
 897					 unsigned long start_pfn)
 898{
 899	const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
 900	int new_state;
 901
 902	switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
 903	case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
 904		new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
 905		if (is_movable)
 906			new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
 907		break;
 908	case VIRTIO_MEM_SBM_MB_OFFLINE:
 909		new_state = VIRTIO_MEM_SBM_MB_KERNEL;
 910		if (is_movable)
 911			new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
 
 912		break;
 913	default:
 914		BUG();
 915		break;
 916	}
 917	virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
 
 
 
 
 
 918}
 919
 920static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
 921						unsigned long mb_id)
 922{
 923	const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
 
 924	unsigned long pfn;
 925	int sb_id;
 926
 927	for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
 928		if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
 929			continue;
 
 
 
 
 
 
 930		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
 931			       sb_id * vm->sbm.sb_size);
 932		virtio_mem_fake_offline_going_offline(pfn, nr_pages);
 
 
 
 
 
 933	}
 934}
 935
 936static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
 937						 unsigned long mb_id)
 938{
 939	const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
 940	unsigned long pfn;
 941	int sb_id;
 942
 943	for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
 944		if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
 945			continue;
 
 
 
 
 
 946		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
 947			       sb_id * vm->sbm.sb_size);
 948		virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
 
 
 949	}
 950}
 951
 952static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
 953						unsigned long bb_id,
 954						unsigned long pfn,
 955						unsigned long nr_pages)
 956{
 957	/*
 958	 * When marked as "fake-offline", all online memory of this device block
 959	 * is allocated by us. Otherwise, we don't have any memory allocated.
 960	 */
 961	if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
 962	    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
 963		return;
 964	virtio_mem_fake_offline_going_offline(pfn, nr_pages);
 965}
 966
 967static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
 968						 unsigned long bb_id,
 969						 unsigned long pfn,
 970						 unsigned long nr_pages)
 971{
 972	if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
 973	    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
 974		return;
 975	virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
 976}
 977
 978/*
 979 * This callback will either be called synchronously from add_memory() or
 980 * asynchronously (e.g., triggered via user space). We have to be careful
 981 * with locking when calling add_memory().
 982 */
 983static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
 984					 unsigned long action, void *arg)
 985{
 986	struct virtio_mem *vm = container_of(nb, struct virtio_mem,
 987					     memory_notifier);
 988	struct memory_notify *mhp = arg;
 989	const unsigned long start = PFN_PHYS(mhp->start_pfn);
 990	const unsigned long size = PFN_PHYS(mhp->nr_pages);
 
 
 991	int rc = NOTIFY_OK;
 992	unsigned long id;
 993
 994	if (!virtio_mem_overlaps_range(vm, start, size))
 995		return NOTIFY_DONE;
 996
 997	if (vm->in_sbm) {
 998		id = virtio_mem_phys_to_mb_id(start);
 999		/*
1000		 * In SBM, we add memory in separate memory blocks - we expect
1001		 * it to be onlined/offlined in the same granularity. Bail out
1002		 * if this ever changes.
1003		 */
1004		if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
1005				 !IS_ALIGNED(start, memory_block_size_bytes())))
1006			return NOTIFY_BAD;
1007	} else {
1008		id = virtio_mem_phys_to_bb_id(vm, start);
1009		/*
1010		 * In BBM, we only care about onlining/offlining happening
1011		 * within a single big block, we don't care about the
1012		 * actual granularity as we don't track individual Linux
1013		 * memory blocks.
1014		 */
1015		if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
1016			return NOTIFY_BAD;
1017	}
1018
1019	/*
1020	 * Avoid circular locking lockdep warnings. We lock the mutex
1021	 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
1022	 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
1023	 * between both notifier calls and will bail out. False positive.
1024	 */
1025	lockdep_off();
1026
1027	switch (action) {
1028	case MEM_GOING_OFFLINE:
1029		mutex_lock(&vm->hotplug_mutex);
1030		if (vm->removing) {
1031			rc = notifier_from_errno(-EBUSY);
1032			mutex_unlock(&vm->hotplug_mutex);
1033			break;
1034		}
1035		vm->hotplug_active = true;
1036		if (vm->in_sbm)
1037			virtio_mem_sbm_notify_going_offline(vm, id);
1038		else
1039			virtio_mem_bbm_notify_going_offline(vm, id,
1040							    mhp->start_pfn,
1041							    mhp->nr_pages);
1042		break;
1043	case MEM_GOING_ONLINE:
1044		mutex_lock(&vm->hotplug_mutex);
1045		if (vm->removing) {
1046			rc = notifier_from_errno(-EBUSY);
1047			mutex_unlock(&vm->hotplug_mutex);
1048			break;
1049		}
1050		vm->hotplug_active = true;
1051		if (vm->in_sbm)
1052			rc = virtio_mem_sbm_notify_going_online(vm, id);
1053		break;
1054	case MEM_OFFLINE:
1055		if (vm->in_sbm)
1056			virtio_mem_sbm_notify_offline(vm, id);
1057
1058		atomic64_add(size, &vm->offline_size);
1059		/*
1060		 * Trigger the workqueue. Now that we have some offline memory,
1061		 * maybe we can handle pending unplug requests.
1062		 */
1063		if (!unplug_online)
1064			virtio_mem_retry(vm);
1065
1066		vm->hotplug_active = false;
1067		mutex_unlock(&vm->hotplug_mutex);
1068		break;
1069	case MEM_ONLINE:
1070		if (vm->in_sbm)
1071			virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
1072
1073		atomic64_sub(size, &vm->offline_size);
1074		/*
1075		 * Start adding more memory once we onlined half of our
1076		 * threshold. Don't trigger if it's possibly due to our actipn
1077		 * (e.g., us adding memory which gets onlined immediately from
1078		 * the core).
1079		 */
1080		if (!atomic_read(&vm->wq_active) &&
1081		    virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
1082			virtio_mem_retry(vm);
1083
1084		vm->hotplug_active = false;
1085		mutex_unlock(&vm->hotplug_mutex);
1086		break;
1087	case MEM_CANCEL_OFFLINE:
1088		if (!vm->hotplug_active)
1089			break;
1090		if (vm->in_sbm)
1091			virtio_mem_sbm_notify_cancel_offline(vm, id);
1092		else
1093			virtio_mem_bbm_notify_cancel_offline(vm, id,
1094							     mhp->start_pfn,
1095							     mhp->nr_pages);
1096		vm->hotplug_active = false;
1097		mutex_unlock(&vm->hotplug_mutex);
1098		break;
1099	case MEM_CANCEL_ONLINE:
1100		if (!vm->hotplug_active)
1101			break;
1102		vm->hotplug_active = false;
1103		mutex_unlock(&vm->hotplug_mutex);
1104		break;
1105	default:
1106		break;
1107	}
1108
1109	lockdep_on();
1110
1111	return rc;
1112}
1113
1114/*
1115 * Set a range of pages PG_offline. Remember pages that were never onlined
1116 * (via generic_online_page()) using PageDirty().
1117 */
1118static void virtio_mem_set_fake_offline(unsigned long pfn,
1119					unsigned long nr_pages, bool onlined)
1120{
1121	page_offline_begin();
1122	for (; nr_pages--; pfn++) {
1123		struct page *page = pfn_to_page(pfn);
1124
1125		__SetPageOffline(page);
1126		if (!onlined) {
1127			SetPageDirty(page);
1128			/* FIXME: remove after cleanups */
1129			ClearPageReserved(page);
1130		}
1131	}
1132	page_offline_end();
1133}
1134
1135/*
1136 * Clear PG_offline from a range of pages. If the pages were never onlined,
1137 * (via generic_online_page()), clear PageDirty().
1138 */
1139static void virtio_mem_clear_fake_offline(unsigned long pfn,
1140					  unsigned long nr_pages, bool onlined)
1141{
1142	for (; nr_pages--; pfn++) {
1143		struct page *page = pfn_to_page(pfn);
1144
1145		__ClearPageOffline(page);
1146		if (!onlined)
1147			ClearPageDirty(page);
1148	}
1149}
1150
1151/*
1152 * Release a range of fake-offline pages to the buddy, effectively
1153 * fake-onlining them.
1154 */
1155static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
1156{
1157	unsigned long order = MAX_PAGE_ORDER;
1158	unsigned long i;
1159
1160	/*
1161	 * We might get called for ranges that don't cover properly aligned
1162	 * MAX_PAGE_ORDER pages; however, we can only online properly aligned
1163	 * pages with an order of MAX_PAGE_ORDER at maximum.
1164	 */
1165	while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
1166		order--;
1167
1168	for (i = 0; i < nr_pages; i += 1 << order) {
1169		struct page *page = pfn_to_page(pfn + i);
1170
1171		/*
1172		 * If the page is PageDirty(), it was kept fake-offline when
1173		 * onlining the memory block. Otherwise, it was allocated
1174		 * using alloc_contig_range(). All pages in a subblock are
1175		 * alike.
1176		 */
1177		if (PageDirty(page)) {
1178			virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
 
1179			generic_online_page(page, order);
1180		} else {
1181			virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
 
1182			free_contig_range(pfn + i, 1 << order);
1183			adjust_managed_page_count(page, 1 << order);
1184		}
1185	}
1186}
1187
1188/*
1189 * Try to allocate a range, marking pages fake-offline, effectively
1190 * fake-offlining them.
1191 */
1192static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
1193				   unsigned long nr_pages)
1194{
1195	const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
1196	int rc, retry_count;
1197
1198	/*
1199	 * TODO: We want an alloc_contig_range() mode that tries to allocate
1200	 * harder (e.g., dealing with temporarily pinned pages, PCP), especially
1201	 * with ZONE_MOVABLE. So for now, retry a couple of times with
1202	 * ZONE_MOVABLE before giving up - because that zone is supposed to give
1203	 * some guarantees.
1204	 */
1205	for (retry_count = 0; retry_count < 5; retry_count++) {
1206		/*
1207		 * If the config changed, stop immediately and go back to the
1208		 * main loop: avoid trying to keep unplugging if the device
1209		 * might have decided to not remove any more memory.
1210		 */
1211		if (atomic_read(&vm->config_changed))
1212			return -EAGAIN;
1213
1214		rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
1215					GFP_KERNEL);
1216		if (rc == -ENOMEM)
1217			/* whoops, out of memory */
1218			return rc;
1219		else if (rc && !is_movable)
1220			break;
1221		else if (rc)
1222			continue;
1223
1224		virtio_mem_set_fake_offline(pfn, nr_pages, true);
1225		adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1226		return 0;
1227	}
1228
1229	return -EBUSY;
1230}
1231
1232/*
1233 * Handle fake-offline pages when memory is going offline - such that the
1234 * pages can be skipped by mm-core when offlining.
1235 */
1236static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
1237						  unsigned long nr_pages)
1238{
1239	struct page *page;
1240	unsigned long i;
1241
1242	/*
1243	 * Drop our reference to the pages so the memory can get offlined
1244	 * and add the unplugged pages to the managed page counters (so
1245	 * offlining code can correctly subtract them again).
1246	 */
1247	adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
1248	/* Drop our reference to the pages so the memory can get offlined. */
1249	for (i = 0; i < nr_pages; i++) {
1250		page = pfn_to_page(pfn + i);
1251		if (WARN_ON(!page_ref_dec_and_test(page)))
1252			dump_page(page, "fake-offline page referenced");
1253	}
1254}
1255
1256/*
1257 * Handle fake-offline pages when memory offlining is canceled - to undo
1258 * what we did in virtio_mem_fake_offline_going_offline().
1259 */
1260static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
1261						   unsigned long nr_pages)
1262{
1263	unsigned long i;
1264
1265	/*
1266	 * Get the reference we dropped when going offline and subtract the
1267	 * unplugged pages from the managed page counters.
1268	 */
1269	adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1270	for (i = 0; i < nr_pages; i++)
1271		page_ref_inc(pfn_to_page(pfn + i));
1272}
1273
1274static void virtio_mem_online_page(struct virtio_mem *vm,
1275				   struct page *page, unsigned int order)
1276{
1277	const unsigned long start = page_to_phys(page);
1278	const unsigned long end = start + PFN_PHYS(1 << order);
1279	unsigned long addr, next, id, sb_id, count;
1280	bool do_online;
1281
1282	/*
1283	 * We can get called with any order up to MAX_PAGE_ORDER. If our subblock
1284	 * size is smaller than that and we have a mixture of plugged and
1285	 * unplugged subblocks within such a page, we have to process in
1286	 * smaller granularity. In that case we'll adjust the order exactly once
1287	 * within the loop.
1288	 */
1289	for (addr = start; addr < end; ) {
1290		next = addr + PFN_PHYS(1 << order);
1291
1292		if (vm->in_sbm) {
1293			id = virtio_mem_phys_to_mb_id(addr);
1294			sb_id = virtio_mem_phys_to_sb_id(vm, addr);
1295			count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
1296
1297			if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
1298				/* Fully plugged. */
1299				do_online = true;
1300			} else if (count == 1 ||
1301				   virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
1302				/* Fully unplugged. */
1303				do_online = false;
1304			} else {
1305				/*
1306				 * Mixture, process sub-blocks instead. This
1307				 * will be at least the size of a pageblock.
1308				 * We'll run into this case exactly once.
1309				 */
1310				order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
1311				do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
1312				continue;
1313			}
1314		} else {
1315			/*
1316			 * If the whole block is marked fake offline, keep
1317			 * everything that way.
1318			 */
1319			id = virtio_mem_phys_to_bb_id(vm, addr);
1320			do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
1321				    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
1322		}
1323
1324		if (do_online)
1325			generic_online_page(pfn_to_page(PFN_DOWN(addr)), order);
1326		else
1327			virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
1328						    false);
1329		addr = next;
1330	}
1331}
1332
1333static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
1334{
1335	const unsigned long addr = page_to_phys(page);
 
1336	struct virtio_mem *vm;
 
1337
 
 
 
 
 
 
1338	rcu_read_lock();
1339	list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
1340		/*
1341		 * Pages we're onlining will never cross memory blocks and,
1342		 * therefore, not virtio-mem devices.
1343		 */
1344		if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
1345			continue;
1346
 
1347		/*
1348		 * virtio_mem_set_fake_offline() might sleep. We can safely
1349		 * drop the RCU lock at this point because the device
1350		 * cannot go away. See virtio_mem_remove() how races
1351		 * between memory onlining and device removal are handled.
1352		 */
 
 
 
 
 
1353		rcu_read_unlock();
1354
1355		virtio_mem_online_page(vm, page, order);
1356		return;
1357	}
1358	rcu_read_unlock();
1359
1360	/* not virtio-mem memory, but e.g., a DIMM. online it */
1361	generic_online_page(page, order);
1362}
1363
1364static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
1365					const struct virtio_mem_req *req)
1366{
1367	struct scatterlist *sgs[2], sg_req, sg_resp;
1368	unsigned int len;
1369	int rc;
1370
1371	/* don't use the request residing on the stack (vaddr) */
1372	vm->req = *req;
1373
1374	/* out: buffer for request */
1375	sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
1376	sgs[0] = &sg_req;
1377
1378	/* in: buffer for response */
1379	sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
1380	sgs[1] = &sg_resp;
1381
1382	rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
1383	if (rc < 0)
1384		return rc;
1385
1386	virtqueue_kick(vm->vq);
1387
1388	/* wait for a response */
1389	wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
1390
1391	return virtio16_to_cpu(vm->vdev, vm->resp.type);
1392}
1393
1394static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1395					uint64_t size)
1396{
1397	const uint64_t nb_vm_blocks = size / vm->device_block_size;
1398	const struct virtio_mem_req req = {
1399		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1400		.u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1401		.u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1402	};
1403	int rc = -ENOMEM;
1404
1405	if (atomic_read(&vm->config_changed))
1406		return -EAGAIN;
1407
1408	dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1409		addr + size - 1);
1410
1411	switch (virtio_mem_send_request(vm, &req)) {
1412	case VIRTIO_MEM_RESP_ACK:
1413		vm->plugged_size += size;
1414		return 0;
1415	case VIRTIO_MEM_RESP_NACK:
1416		rc = -EAGAIN;
1417		break;
1418	case VIRTIO_MEM_RESP_BUSY:
1419		rc = -ETXTBSY;
1420		break;
1421	case VIRTIO_MEM_RESP_ERROR:
1422		rc = -EINVAL;
1423		break;
1424	default:
1425		break;
1426	}
1427
1428	dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1429	return rc;
1430}
1431
1432static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1433					  uint64_t size)
1434{
1435	const uint64_t nb_vm_blocks = size / vm->device_block_size;
1436	const struct virtio_mem_req req = {
1437		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1438		.u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1439		.u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1440	};
1441	int rc = -ENOMEM;
1442
1443	if (atomic_read(&vm->config_changed))
1444		return -EAGAIN;
1445
1446	dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1447		addr + size - 1);
1448
1449	switch (virtio_mem_send_request(vm, &req)) {
1450	case VIRTIO_MEM_RESP_ACK:
1451		vm->plugged_size -= size;
1452		return 0;
1453	case VIRTIO_MEM_RESP_BUSY:
1454		rc = -ETXTBSY;
1455		break;
1456	case VIRTIO_MEM_RESP_ERROR:
1457		rc = -EINVAL;
1458		break;
1459	default:
1460		break;
1461	}
1462
1463	dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1464	return rc;
1465}
1466
1467static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1468{
1469	const struct virtio_mem_req req = {
1470		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1471	};
1472	int rc = -ENOMEM;
1473
1474	dev_dbg(&vm->vdev->dev, "unplugging all memory");
1475
1476	switch (virtio_mem_send_request(vm, &req)) {
1477	case VIRTIO_MEM_RESP_ACK:
1478		vm->unplug_all_required = false;
1479		vm->plugged_size = 0;
1480		/* usable region might have shrunk */
1481		atomic_set(&vm->config_changed, 1);
1482		return 0;
1483	case VIRTIO_MEM_RESP_BUSY:
1484		rc = -ETXTBSY;
1485		break;
1486	default:
1487		break;
1488	}
1489
1490	dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1491	return rc;
1492}
1493
1494/*
1495 * Plug selected subblocks. Updates the plugged state, but not the state
1496 * of the memory block.
1497 */
1498static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1499				  int sb_id, int count)
1500{
1501	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1502			      sb_id * vm->sbm.sb_size;
1503	const uint64_t size = count * vm->sbm.sb_size;
1504	int rc;
1505
 
 
 
1506	rc = virtio_mem_send_plug_request(vm, addr, size);
1507	if (!rc)
1508		virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1509	return rc;
1510}
1511
1512/*
1513 * Unplug selected subblocks. Updates the plugged state, but not the state
1514 * of the memory block.
1515 */
1516static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1517				    int sb_id, int count)
1518{
1519	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1520			      sb_id * vm->sbm.sb_size;
1521	const uint64_t size = count * vm->sbm.sb_size;
1522	int rc;
1523
 
 
 
1524	rc = virtio_mem_send_unplug_request(vm, addr, size);
1525	if (!rc)
1526		virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1527	return rc;
1528}
1529
1530/*
1531 * Request to unplug a big block.
1532 *
1533 * Will not modify the state of the big block.
1534 */
1535static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
1536{
1537	const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1538	const uint64_t size = vm->bbm.bb_size;
1539
1540	return virtio_mem_send_unplug_request(vm, addr, size);
1541}
1542
1543/*
1544 * Request to plug a big block.
1545 *
1546 * Will not modify the state of the big block.
1547 */
1548static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
1549{
1550	const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1551	const uint64_t size = vm->bbm.bb_size;
1552
1553	return virtio_mem_send_plug_request(vm, addr, size);
1554}
1555
1556/*
1557 * Unplug the desired number of plugged subblocks of a offline or not-added
1558 * memory block. Will fail if any subblock cannot get unplugged (instead of
1559 * skipping it).
1560 *
1561 * Will not modify the state of the memory block.
1562 *
1563 * Note: can fail after some subblocks were unplugged.
1564 */
1565static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
1566					    unsigned long mb_id, uint64_t *nb_sb)
1567{
1568	int sb_id, count;
1569	int rc;
1570
1571	sb_id = vm->sbm.sbs_per_mb - 1;
1572	while (*nb_sb) {
1573		/* Find the next candidate subblock */
1574		while (sb_id >= 0 &&
1575		       virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1576			sb_id--;
1577		if (sb_id < 0)
1578			break;
1579		/* Try to unplug multiple subblocks at a time */
1580		count = 1;
1581		while (count < *nb_sb && sb_id > 0 &&
1582		       virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1583			count++;
1584			sb_id--;
1585		}
1586
1587		rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1588		if (rc)
1589			return rc;
1590		*nb_sb -= count;
1591		sb_id--;
1592	}
1593
1594	return 0;
1595}
1596
1597/*
1598 * Unplug all plugged subblocks of an offline or not-added memory block.
1599 *
1600 * Will not modify the state of the memory block.
1601 *
1602 * Note: can fail after some subblocks were unplugged.
1603 */
1604static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1605{
1606	uint64_t nb_sb = vm->sbm.sbs_per_mb;
1607
1608	return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
1609}
1610
1611/*
1612 * Prepare tracking data for the next memory block.
1613 */
1614static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1615					  unsigned long *mb_id)
1616{
1617	int rc;
1618
1619	if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1620		return -ENOSPC;
1621
1622	/* Resize the state array if required. */
1623	rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1624	if (rc)
1625		return rc;
1626
1627	/* Resize the subblock bitmap if required. */
1628	rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1629	if (rc)
1630		return rc;
1631
1632	vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1633	*mb_id = vm->sbm.next_mb_id++;
1634	return 0;
1635}
1636
1637/*
 
 
 
 
 
 
 
 
 
 
 
 
1638 * Try to plug the desired number of subblocks and add the memory block
1639 * to Linux.
1640 *
1641 * Will modify the state of the memory block.
1642 */
1643static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1644					  unsigned long mb_id, uint64_t *nb_sb)
 
1645{
1646	const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1647	int rc;
1648
1649	if (WARN_ON_ONCE(!count))
1650		return -EINVAL;
1651
1652	/*
1653	 * Plug the requested number of subblocks before adding it to linux,
1654	 * so that onlining will directly online all plugged subblocks.
1655	 */
1656	rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1657	if (rc)
1658		return rc;
1659
1660	/*
1661	 * Mark the block properly offline before adding it to Linux,
1662	 * so the memory notifiers will find the block in the right state.
1663	 */
1664	if (count == vm->sbm.sbs_per_mb)
1665		virtio_mem_sbm_set_mb_state(vm, mb_id,
1666					    VIRTIO_MEM_SBM_MB_OFFLINE);
1667	else
1668		virtio_mem_sbm_set_mb_state(vm, mb_id,
1669					    VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1670
1671	/* Add the memory block to linux - if that fails, try to unplug. */
1672	rc = virtio_mem_sbm_add_mb(vm, mb_id);
1673	if (rc) {
1674		int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
 
 
 
 
1675
1676		if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1677			new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
1678		virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
 
 
 
 
1679		return rc;
1680	}
1681
1682	*nb_sb -= count;
1683	return 0;
1684}
1685
1686/*
1687 * Try to plug the desired number of subblocks of a memory block that
1688 * is already added to Linux.
1689 *
1690 * Will modify the state of the memory block.
1691 *
1692 * Note: Can fail after some subblocks were successfully plugged.
1693 */
1694static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1695				      unsigned long mb_id, uint64_t *nb_sb)
1696{
1697	const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1698	unsigned long pfn, nr_pages;
1699	int sb_id, count;
1700	int rc;
1701
1702	if (WARN_ON_ONCE(!*nb_sb))
1703		return -EINVAL;
1704
1705	while (*nb_sb) {
1706		sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1707		if (sb_id >= vm->sbm.sbs_per_mb)
1708			break;
1709		count = 1;
1710		while (count < *nb_sb &&
1711		       sb_id + count < vm->sbm.sbs_per_mb &&
1712		       !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
 
1713			count++;
1714
1715		rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1716		if (rc)
1717			return rc;
1718		*nb_sb -= count;
1719		if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
1720			continue;
1721
1722		/* fake-online the pages if the memory block is online */
1723		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1724			       sb_id * vm->sbm.sb_size);
1725		nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1726		virtio_mem_fake_online(pfn, nr_pages);
1727	}
1728
1729	if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1730		virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
 
 
 
 
 
 
1731
1732	return 0;
1733}
1734
1735static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
 
 
 
1736{
1737	const int mb_states[] = {
1738		VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1739		VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1740		VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1741	};
1742	uint64_t nb_sb = diff / vm->sbm.sb_size;
1743	unsigned long mb_id;
1744	int rc, i;
1745
1746	if (!nb_sb)
1747		return 0;
1748
1749	/* Don't race with onlining/offlining */
1750	mutex_lock(&vm->hotplug_mutex);
1751
1752	for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
1753		virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
1754			rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
1755			if (rc || !nb_sb)
1756				goto out_unlock;
1757			cond_resched();
1758		}
 
 
 
 
 
 
 
 
 
1759	}
1760
1761	/*
1762	 * We won't be working on online/offline memory blocks from this point,
1763	 * so we can't race with memory onlining/offlining. Drop the mutex.
1764	 */
1765	mutex_unlock(&vm->hotplug_mutex);
1766
1767	/* Try to plug and add unused blocks */
1768	virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1769		if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1770			return -ENOSPC;
1771
1772		rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1773		if (rc || !nb_sb)
1774			return rc;
1775		cond_resched();
1776	}
1777
1778	/* Try to prepare, plug and add new blocks */
1779	while (nb_sb) {
1780		if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1781			return -ENOSPC;
1782
1783		rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1784		if (rc)
1785			return rc;
1786		rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1787		if (rc)
1788			return rc;
1789		cond_resched();
1790	}
1791
1792	return 0;
1793out_unlock:
1794	mutex_unlock(&vm->hotplug_mutex);
1795	return rc;
1796}
1797
1798/*
1799 * Plug a big block and add it to Linux.
1800 *
1801 * Will modify the state of the big block.
1802 */
1803static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
1804					  unsigned long bb_id)
1805{
1806	int rc;
1807
1808	if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
1809			 VIRTIO_MEM_BBM_BB_UNUSED))
1810		return -EINVAL;
1811
1812	rc = virtio_mem_bbm_plug_bb(vm, bb_id);
1813	if (rc)
1814		return rc;
1815	virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
1816
1817	rc = virtio_mem_bbm_add_bb(vm, bb_id);
1818	if (rc) {
1819		if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
1820			virtio_mem_bbm_set_bb_state(vm, bb_id,
1821						    VIRTIO_MEM_BBM_BB_UNUSED);
1822		else
1823			/* Retry from the main loop. */
1824			virtio_mem_bbm_set_bb_state(vm, bb_id,
1825						    VIRTIO_MEM_BBM_BB_PLUGGED);
1826		return rc;
1827	}
1828	return 0;
1829}
1830
1831/*
1832 * Prepare tracking data for the next big block.
1833 */
1834static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
1835					  unsigned long *bb_id)
1836{
1837	int rc;
1838
1839	if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
1840		return -ENOSPC;
1841
1842	/* Resize the big block state array if required. */
1843	rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
1844	if (rc)
1845		return rc;
1846
1847	vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
1848	*bb_id = vm->bbm.next_bb_id;
1849	vm->bbm.next_bb_id++;
1850	return 0;
1851}
1852
1853static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1854{
1855	uint64_t nb_bb = diff / vm->bbm.bb_size;
1856	unsigned long bb_id;
1857	int rc;
1858
1859	if (!nb_bb)
1860		return 0;
1861
1862	/* Try to plug and add unused big blocks */
1863	virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
1864		if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1865			return -ENOSPC;
1866
1867		rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1868		if (!rc)
1869			nb_bb--;
1870		if (rc || !nb_bb)
1871			return rc;
1872		cond_resched();
1873	}
1874
1875	/* Try to prepare, plug and add new big blocks */
1876	while (nb_bb) {
1877		if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1878			return -ENOSPC;
1879
1880		rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
1881		if (rc)
1882			return rc;
1883		rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1884		if (!rc)
1885			nb_bb--;
1886		if (rc)
1887			return rc;
1888		cond_resched();
1889	}
1890
1891	return 0;
1892}
1893
1894/*
1895 * Try to plug the requested amount of memory.
1896 */
1897static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1898{
1899	if (vm->in_sbm)
1900		return virtio_mem_sbm_plug_request(vm, diff);
1901	return virtio_mem_bbm_plug_request(vm, diff);
1902}
1903
1904/*
1905 * Unplug the desired number of plugged subblocks of an offline memory block.
1906 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1907 *
1908 * Will modify the state of the memory block. Might temporarily drop the
1909 * hotplug_mutex.
1910 *
1911 * Note: Can fail after some subblocks were successfully unplugged.
1912 */
1913static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1914						unsigned long mb_id,
1915						uint64_t *nb_sb)
1916{
1917	int rc;
1918
1919	rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
1920
1921	/* some subblocks might have been unplugged even on failure */
1922	if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1923		virtio_mem_sbm_set_mb_state(vm, mb_id,
1924					    VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1925	if (rc)
1926		return rc;
1927
1928	if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1929		/*
1930		 * Remove the block from Linux - this should never fail.
1931		 * Hinder the block from getting onlined by marking it
1932		 * unplugged. Temporarily drop the mutex, so
1933		 * any pending GOING_ONLINE requests can be serviced/rejected.
1934		 */
1935		virtio_mem_sbm_set_mb_state(vm, mb_id,
1936					    VIRTIO_MEM_SBM_MB_UNUSED);
1937
1938		mutex_unlock(&vm->hotplug_mutex);
1939		rc = virtio_mem_sbm_remove_mb(vm, mb_id);
1940		BUG_ON(rc);
1941		mutex_lock(&vm->hotplug_mutex);
1942	}
1943	return 0;
1944}
1945
1946/*
1947 * Unplug the given plugged subblocks of an online memory block.
1948 *
1949 * Will modify the state of the memory block.
1950 */
1951static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1952					   unsigned long mb_id, int sb_id,
1953					   int count)
1954{
1955	const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1956	const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1957	unsigned long start_pfn;
1958	int rc;
1959
1960	start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1961			     sb_id * vm->sbm.sb_size);
 
 
 
 
 
 
 
1962
1963	rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages);
1964	if (rc)
1965		return rc;
1966
1967	/* Try to unplug the allocated memory */
1968	rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1969	if (rc) {
1970		/* Return the memory to the buddy. */
1971		virtio_mem_fake_online(start_pfn, nr_pages);
1972		return rc;
1973	}
1974
1975	switch (old_state) {
1976	case VIRTIO_MEM_SBM_MB_KERNEL:
1977		virtio_mem_sbm_set_mb_state(vm, mb_id,
1978					    VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
1979		break;
1980	case VIRTIO_MEM_SBM_MB_MOVABLE:
1981		virtio_mem_sbm_set_mb_state(vm, mb_id,
1982					    VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
1983		break;
1984	}
1985
1986	return 0;
1987}
1988
1989/*
1990 * Unplug the desired number of plugged subblocks of an online memory block.
1991 * Will skip subblock that are busy.
1992 *
1993 * Will modify the state of the memory block. Might temporarily drop the
1994 * hotplug_mutex.
1995 *
1996 * Note: Can fail after some subblocks were successfully unplugged. Can
1997 *       return 0 even if subblocks were busy and could not get unplugged.
1998 */
1999static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
2000					       unsigned long mb_id,
2001					       uint64_t *nb_sb)
2002{
2003	int rc, sb_id;
2004
2005	/* If possible, try to unplug the complete block in one shot. */
2006	if (*nb_sb >= vm->sbm.sbs_per_mb &&
2007	    virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
2008		rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
2009						     vm->sbm.sbs_per_mb);
2010		if (!rc) {
2011			*nb_sb -= vm->sbm.sbs_per_mb;
2012			goto unplugged;
2013		} else if (rc != -EBUSY)
2014			return rc;
2015	}
2016
2017	/* Fallback to single subblocks. */
2018	for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
2019		/* Find the next candidate subblock */
2020		while (sb_id >= 0 &&
2021		       !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
2022			sb_id--;
2023		if (sb_id < 0)
2024			break;
2025
2026		rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
2027		if (rc == -EBUSY)
2028			continue;
2029		else if (rc)
2030			return rc;
2031		*nb_sb -= 1;
2032	}
2033
2034unplugged:
2035	rc = virtio_mem_sbm_try_remove_unplugged_mb(vm, mb_id);
2036	if (rc)
2037		vm->sbm.have_unplugged_mb = 1;
2038	/* Ignore errors, this is not critical. We'll retry later. */
 
 
 
 
 
 
 
 
 
 
2039	return 0;
2040}
2041
2042/*
2043 * Unplug the desired number of plugged subblocks of a memory block that is
2044 * already added to Linux. Will skip subblock of online memory blocks that are
2045 * busy (by the OS). Will fail if any subblock that's not busy cannot get
2046 * unplugged.
2047 *
2048 * Will modify the state of the memory block. Might temporarily drop the
2049 * hotplug_mutex.
2050 *
2051 * Note: Can fail after some subblocks were successfully unplugged. Can
2052 *       return 0 even if subblocks were busy and could not get unplugged.
2053 */
2054static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
2055					unsigned long mb_id,
2056					uint64_t *nb_sb)
2057{
2058	const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
2059
2060	switch (old_state) {
2061	case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
2062	case VIRTIO_MEM_SBM_MB_KERNEL:
2063	case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
2064	case VIRTIO_MEM_SBM_MB_MOVABLE:
2065		return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
2066	case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
2067	case VIRTIO_MEM_SBM_MB_OFFLINE:
2068		return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
2069	}
2070	return -EINVAL;
2071}
2072
2073static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2074{
2075	const int mb_states[] = {
2076		VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
2077		VIRTIO_MEM_SBM_MB_OFFLINE,
2078		VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
2079		VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
2080		VIRTIO_MEM_SBM_MB_MOVABLE,
2081		VIRTIO_MEM_SBM_MB_KERNEL,
2082	};
2083	uint64_t nb_sb = diff / vm->sbm.sb_size;
2084	unsigned long mb_id;
2085	int rc, i;
2086
2087	if (!nb_sb)
2088		return 0;
2089
2090	/*
2091	 * We'll drop the mutex a couple of times when it is safe to do so.
2092	 * This might result in some blocks switching the state (online/offline)
2093	 * and we could miss them in this run - we will retry again later.
2094	 */
2095	mutex_lock(&vm->hotplug_mutex);
2096
2097	/*
2098	 * We try unplug from partially plugged blocks first, to try removing
2099	 * whole memory blocks along with metadata. We prioritize ZONE_MOVABLE
2100	 * as it's more reliable to unplug memory and remove whole memory
2101	 * blocks, and we don't want to trigger a zone imbalances by
2102	 * accidentially removing too much kernel memory.
2103	 */
2104	for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
2105		virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
2106			rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
2107			if (rc || !nb_sb)
2108				goto out_unlock;
2109			mutex_unlock(&vm->hotplug_mutex);
2110			cond_resched();
2111			mutex_lock(&vm->hotplug_mutex);
2112		}
2113		if (!unplug_online && i == 1) {
2114			mutex_unlock(&vm->hotplug_mutex);
2115			return 0;
2116		}
2117	}
2118
2119	mutex_unlock(&vm->hotplug_mutex);
2120	return nb_sb ? -EBUSY : 0;
2121out_unlock:
2122	mutex_unlock(&vm->hotplug_mutex);
2123	return rc;
2124}
 
 
 
2125
2126/*
2127 * Try to offline and remove a big block from Linux and unplug it. Will fail
2128 * with -EBUSY if some memory is busy and cannot get unplugged.
2129 *
2130 * Will modify the state of the memory block. Might temporarily drop the
2131 * hotplug_mutex.
2132 */
2133static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
2134						       unsigned long bb_id)
2135{
2136	const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2137	const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2138	unsigned long end_pfn = start_pfn + nr_pages;
2139	unsigned long pfn;
2140	struct page *page;
2141	int rc;
2142
2143	if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
2144			 VIRTIO_MEM_BBM_BB_ADDED))
2145		return -EINVAL;
2146
2147	/*
2148	 * Start by fake-offlining all memory. Once we marked the device
2149	 * block as fake-offline, all newly onlined memory will
2150	 * automatically be kept fake-offline. Protect from concurrent
2151	 * onlining/offlining until we have a consistent state.
2152	 */
2153	mutex_lock(&vm->hotplug_mutex);
2154	virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
2155
2156	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2157		page = pfn_to_online_page(pfn);
2158		if (!page)
2159			continue;
2160
2161		rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
2162		if (rc) {
2163			end_pfn = pfn;
2164			goto rollback;
2165		}
2166	}
2167	mutex_unlock(&vm->hotplug_mutex);
2168
2169	rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
2170	if (rc) {
 
 
 
 
 
 
 
2171		mutex_lock(&vm->hotplug_mutex);
2172		goto rollback;
2173	}
2174
2175	rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
2176	if (rc)
2177		virtio_mem_bbm_set_bb_state(vm, bb_id,
2178					    VIRTIO_MEM_BBM_BB_PLUGGED);
2179	else
2180		virtio_mem_bbm_set_bb_state(vm, bb_id,
2181					    VIRTIO_MEM_BBM_BB_UNUSED);
2182	return rc;
2183
2184rollback:
2185	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2186		page = pfn_to_online_page(pfn);
2187		if (!page)
2188			continue;
2189		virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
2190	}
2191	virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
2192	mutex_unlock(&vm->hotplug_mutex);
2193	return rc;
2194}
2195
2196/*
2197 * Test if a big block is completely offline.
2198 */
2199static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
2200					 unsigned long bb_id)
2201{
2202	const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2203	const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2204	unsigned long pfn;
2205
2206	for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2207	     pfn += PAGES_PER_SECTION) {
2208		if (pfn_to_online_page(pfn))
2209			return false;
2210	}
2211
2212	return true;
2213}
2214
2215/*
2216 * Test if a big block is completely onlined to ZONE_MOVABLE (or offline).
2217 */
2218static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
2219					 unsigned long bb_id)
2220{
2221	const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2222	const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2223	struct page *page;
2224	unsigned long pfn;
2225
2226	for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2227	     pfn += PAGES_PER_SECTION) {
2228		page = pfn_to_online_page(pfn);
2229		if (!page)
2230			continue;
2231		if (page_zonenum(page) != ZONE_MOVABLE)
2232			return false;
2233	}
2234
2235	return true;
2236}
2237
2238static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2239{
2240	uint64_t nb_bb = diff / vm->bbm.bb_size;
2241	uint64_t bb_id;
2242	int rc, i;
2243
2244	if (!nb_bb)
2245		return 0;
2246
2247	/*
2248	 * Try to unplug big blocks. Similar to SBM, start with offline
2249	 * big blocks.
2250	 */
2251	for (i = 0; i < 3; i++) {
2252		virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
2253			cond_resched();
2254
2255			/*
2256			 * As we're holding no locks, these checks are racy,
2257			 * but we don't care.
2258			 */
2259			if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
2260				continue;
2261			if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
2262				continue;
2263			rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
2264			if (rc == -EBUSY)
2265				continue;
2266			if (!rc)
2267				nb_bb--;
2268			if (rc || !nb_bb)
2269				return rc;
2270		}
2271		if (i == 0 && !unplug_online)
2272			return 0;
2273	}
2274
2275	return nb_bb ? -EBUSY : 0;
2276}
2277
2278/*
2279 * Try to unplug the requested amount of memory.
2280 */
2281static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
2282{
2283	if (vm->in_sbm)
2284		return virtio_mem_sbm_unplug_request(vm, diff);
2285	return virtio_mem_bbm_unplug_request(vm, diff);
2286}
2287
2288/*
2289 * Try to unplug all blocks that couldn't be unplugged before, for example,
2290 * because the hypervisor was busy. Further, offline and remove any memory
2291 * blocks where we previously failed.
2292 */
2293static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
2294{
2295	unsigned long id;
2296	int rc = 0;
2297
2298	if (!vm->in_sbm) {
2299		virtio_mem_bbm_for_each_bb(vm, id,
2300					   VIRTIO_MEM_BBM_BB_PLUGGED) {
2301			rc = virtio_mem_bbm_unplug_bb(vm, id);
2302			if (rc)
2303				return rc;
2304			virtio_mem_bbm_set_bb_state(vm, id,
2305						    VIRTIO_MEM_BBM_BB_UNUSED);
2306		}
2307		return 0;
2308	}
2309
2310	virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
2311		rc = virtio_mem_sbm_unplug_mb(vm, id);
2312		if (rc)
2313			return rc;
2314		virtio_mem_sbm_set_mb_state(vm, id,
2315					    VIRTIO_MEM_SBM_MB_UNUSED);
2316	}
2317
2318	if (!vm->sbm.have_unplugged_mb)
2319		return 0;
2320
2321	/*
2322	 * Let's retry (offlining and) removing completely unplugged Linux
2323	 * memory blocks.
2324	 */
2325	vm->sbm.have_unplugged_mb = false;
2326
2327	mutex_lock(&vm->hotplug_mutex);
2328	virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL)
2329		rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
2330	virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL)
2331		rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
2332	virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
2333		rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
2334	mutex_unlock(&vm->hotplug_mutex);
2335
2336	if (rc)
2337		vm->sbm.have_unplugged_mb = true;
2338	/* Ignore errors, this is not critical. We'll retry later. */
2339	return 0;
2340}
2341
2342/*
2343 * Update all parts of the config that could have changed.
2344 */
2345static void virtio_mem_refresh_config(struct virtio_mem *vm)
2346{
2347	const struct range pluggable_range = mhp_get_pluggable_range(true);
2348	uint64_t new_plugged_size, usable_region_size, end_addr;
2349
2350	/* the plugged_size is just a reflection of what _we_ did previously */
2351	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2352			&new_plugged_size);
2353	if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
2354		vm->plugged_size = new_plugged_size;
2355
2356	/* calculate the last usable memory block id */
2357	virtio_cread_le(vm->vdev, struct virtio_mem_config,
2358			usable_region_size, &usable_region_size);
2359	end_addr = min(vm->addr + usable_region_size - 1,
2360		       pluggable_range.end);
2361
2362	if (vm->in_sbm) {
2363		vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
2364		if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
2365			vm->sbm.last_usable_mb_id--;
2366	} else {
2367		vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
2368								     end_addr);
2369		if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
2370			vm->bbm.last_usable_bb_id--;
2371	}
2372	/*
2373	 * If we cannot plug any of our device memory (e.g., nothing in the
2374	 * usable region is addressable), the last usable memory block id will
2375	 * be smaller than the first usable memory block id. We'll stop
2376	 * attempting to add memory with -ENOSPC from our main loop.
2377	 */
2378
2379	/* see if there is a request to change the size */
2380	virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
2381			&vm->requested_size);
2382
2383	dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
2384	dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
2385}
2386
2387/*
2388 * Workqueue function for handling plug/unplug requests and config updates.
2389 */
2390static void virtio_mem_run_wq(struct work_struct *work)
2391{
2392	struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
2393	uint64_t diff;
2394	int rc;
2395
2396	if (unlikely(vm->in_kdump)) {
2397		dev_warn_once(&vm->vdev->dev,
2398			     "unexpected workqueue run in kdump kernel\n");
2399		return;
2400	}
2401
2402	hrtimer_cancel(&vm->retry_timer);
2403
2404	if (vm->broken)
2405		return;
2406
2407	atomic_set(&vm->wq_active, 1);
2408retry:
2409	rc = 0;
2410
2411	/* Make sure we start with a clean state if there are leftovers. */
2412	if (unlikely(vm->unplug_all_required))
2413		rc = virtio_mem_send_unplug_all_request(vm);
2414
2415	if (atomic_read(&vm->config_changed)) {
2416		atomic_set(&vm->config_changed, 0);
2417		virtio_mem_refresh_config(vm);
2418	}
2419
2420	/* Cleanup any leftovers from previous runs */
2421	if (!rc)
2422		rc = virtio_mem_cleanup_pending_mb(vm);
2423
2424	if (!rc && vm->requested_size != vm->plugged_size) {
2425		if (vm->requested_size > vm->plugged_size) {
2426			diff = vm->requested_size - vm->plugged_size;
2427			rc = virtio_mem_plug_request(vm, diff);
2428		} else {
2429			diff = vm->plugged_size - vm->requested_size;
2430			rc = virtio_mem_unplug_request(vm, diff);
2431		}
2432	}
2433
2434	/*
2435	 * Keep retrying to offline and remove completely unplugged Linux
2436	 * memory blocks.
2437	 */
2438	if (!rc && vm->in_sbm && vm->sbm.have_unplugged_mb)
2439		rc = -EBUSY;
2440
2441	switch (rc) {
2442	case 0:
2443		vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2444		break;
2445	case -ENOSPC:
2446		/*
2447		 * We cannot add any more memory (alignment, physical limit)
2448		 * or we have too many offline memory blocks.
2449		 */
2450		break;
2451	case -ETXTBSY:
2452		/*
2453		 * The hypervisor cannot process our request right now
2454		 * (e.g., out of memory, migrating);
2455		 */
2456	case -EBUSY:
2457		/*
2458		 * We cannot free up any memory to unplug it (all plugged memory
2459		 * is busy).
2460		 */
2461	case -ENOMEM:
2462		/* Out of memory, try again later. */
2463		hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
2464			      HRTIMER_MODE_REL);
2465		break;
2466	case -EAGAIN:
2467		/* Retry immediately (e.g., the config changed). */
2468		goto retry;
2469	default:
2470		/* Unknown error, mark as broken */
2471		dev_err(&vm->vdev->dev,
2472			"unknown error, marking device broken: %d\n", rc);
2473		vm->broken = true;
2474	}
2475
2476	atomic_set(&vm->wq_active, 0);
2477}
2478
2479static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
2480{
2481	struct virtio_mem *vm = container_of(timer, struct virtio_mem,
2482					     retry_timer);
2483
2484	virtio_mem_retry(vm);
2485	vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
2486				   VIRTIO_MEM_RETRY_TIMER_MAX_MS);
2487	return HRTIMER_NORESTART;
2488}
2489
2490static void virtio_mem_handle_response(struct virtqueue *vq)
2491{
2492	struct virtio_mem *vm = vq->vdev->priv;
2493
2494	wake_up(&vm->host_resp);
2495}
2496
2497static int virtio_mem_init_vq(struct virtio_mem *vm)
2498{
2499	struct virtqueue *vq;
2500
2501	vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
2502				   "guest-request");
2503	if (IS_ERR(vq))
2504		return PTR_ERR(vq);
2505	vm->vq = vq;
2506
2507	return 0;
2508}
2509
2510static int virtio_mem_init_hotplug(struct virtio_mem *vm)
2511{
2512	const struct range pluggable_range = mhp_get_pluggable_range(true);
2513	uint64_t unit_pages, sb_size, addr;
2514	int rc;
2515
2516	/* bad device setup - warn only */
2517	if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
2518		dev_warn(&vm->vdev->dev,
2519			 "The alignment of the physical start address can make some memory unusable.\n");
2520	if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
2521		dev_warn(&vm->vdev->dev,
2522			 "The alignment of the physical end address can make some memory unusable.\n");
2523	if (vm->addr < pluggable_range.start ||
2524	    vm->addr + vm->region_size - 1 > pluggable_range.end)
2525		dev_warn(&vm->vdev->dev,
2526			 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
2527
2528	/* Prepare the offline threshold - make sure we can add two blocks. */
2529	vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
2530				      VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
2531
2532	/*
2533	 * alloc_contig_range() works reliably with pageblock
2534	 * granularity on ZONE_NORMAL, use pageblock_nr_pages.
2535	 */
2536	sb_size = PAGE_SIZE * pageblock_nr_pages;
2537	sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
2538
2539	if (sb_size < memory_block_size_bytes() && !force_bbm) {
2540		/* SBM: At least two subblocks per Linux memory block. */
2541		vm->in_sbm = true;
2542		vm->sbm.sb_size = sb_size;
2543		vm->sbm.sbs_per_mb = memory_block_size_bytes() /
2544				     vm->sbm.sb_size;
2545
2546		/* Round up to the next full memory block */
2547		addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2548		       memory_block_size_bytes() - 1;
2549		vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
2550		vm->sbm.next_mb_id = vm->sbm.first_mb_id;
2551	} else {
2552		/* BBM: At least one Linux memory block. */
2553		vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
2554					memory_block_size_bytes());
2555
2556		if (bbm_block_size) {
2557			if (!is_power_of_2(bbm_block_size)) {
2558				dev_warn(&vm->vdev->dev,
2559					 "bbm_block_size is not a power of 2");
2560			} else if (bbm_block_size < vm->bbm.bb_size) {
2561				dev_warn(&vm->vdev->dev,
2562					 "bbm_block_size is too small");
2563			} else {
2564				vm->bbm.bb_size = bbm_block_size;
2565			}
2566		}
2567
2568		/* Round up to the next aligned big block */
2569		addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2570		       vm->bbm.bb_size - 1;
2571		vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
2572		vm->bbm.next_bb_id = vm->bbm.first_bb_id;
2573
2574		/* Make sure we can add two big blocks. */
2575		vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
2576					      vm->offline_threshold);
2577	}
2578
2579	dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
2580		 memory_block_size_bytes());
2581	if (vm->in_sbm)
2582		dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
2583			 (unsigned long long)vm->sbm.sb_size);
2584	else
2585		dev_info(&vm->vdev->dev, "big block size: 0x%llx",
2586			 (unsigned long long)vm->bbm.bb_size);
2587
2588	/* create the parent resource for all memory */
2589	rc = virtio_mem_create_resource(vm);
2590	if (rc)
2591		return rc;
2592
2593	/* use a single dynamic memory group to cover the whole memory device */
2594	if (vm->in_sbm)
2595		unit_pages = PHYS_PFN(memory_block_size_bytes());
2596	else
2597		unit_pages = PHYS_PFN(vm->bbm.bb_size);
2598	rc = memory_group_register_dynamic(vm->nid, unit_pages);
2599	if (rc < 0)
2600		goto out_del_resource;
2601	vm->mgid = rc;
2602
2603	/*
2604	 * If we still have memory plugged, we have to unplug all memory first.
2605	 * Registering our parent resource makes sure that this memory isn't
2606	 * actually in use (e.g., trying to reload the driver).
2607	 */
2608	if (vm->plugged_size) {
2609		vm->unplug_all_required = true;
2610		dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
2611	}
2612
2613	/* register callbacks */
2614	vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
2615	rc = register_memory_notifier(&vm->memory_notifier);
2616	if (rc)
2617		goto out_unreg_group;
2618	rc = register_virtio_mem_device(vm);
2619	if (rc)
2620		goto out_unreg_mem;
2621
2622	return 0;
2623out_unreg_mem:
2624	unregister_memory_notifier(&vm->memory_notifier);
2625out_unreg_group:
2626	memory_group_unregister(vm->mgid);
2627out_del_resource:
2628	virtio_mem_delete_resource(vm);
2629	return rc;
2630}
2631
2632#ifdef CONFIG_PROC_VMCORE
2633static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
2634					 uint64_t size)
2635{
2636	const uint64_t nb_vm_blocks = size / vm->device_block_size;
2637	const struct virtio_mem_req req = {
2638		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
2639		.u.state.addr = cpu_to_virtio64(vm->vdev, addr),
2640		.u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
2641	};
2642	int rc = -ENOMEM;
2643
2644	dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
2645		addr + size - 1);
2646
2647	switch (virtio_mem_send_request(vm, &req)) {
2648	case VIRTIO_MEM_RESP_ACK:
2649		return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
2650	case VIRTIO_MEM_RESP_ERROR:
2651		rc = -EINVAL;
2652		break;
2653	default:
2654		break;
2655	}
2656
2657	dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
2658	return rc;
2659}
2660
2661static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
2662					 unsigned long pfn)
2663{
2664	struct virtio_mem *vm = container_of(cb, struct virtio_mem,
2665					     vmcore_cb);
2666	uint64_t addr = PFN_PHYS(pfn);
2667	bool is_ram;
2668	int rc;
2669
2670	if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
2671		return true;
2672	if (!vm->plugged_size)
2673		return false;
2674
2675	/*
2676	 * We have to serialize device requests and access to the information
2677	 * about the block queried last.
2678	 */
2679	mutex_lock(&vm->hotplug_mutex);
2680
2681	addr = ALIGN_DOWN(addr, vm->device_block_size);
2682	if (addr != vm->last_block_addr) {
2683		rc = virtio_mem_send_state_request(vm, addr,
2684						   vm->device_block_size);
2685		/* On any kind of error, we're going to signal !ram. */
2686		if (rc == VIRTIO_MEM_STATE_PLUGGED)
2687			vm->last_block_plugged = true;
2688		else
2689			vm->last_block_plugged = false;
2690		vm->last_block_addr = addr;
2691	}
2692
2693	is_ram = vm->last_block_plugged;
2694	mutex_unlock(&vm->hotplug_mutex);
2695	return is_ram;
2696}
2697#endif /* CONFIG_PROC_VMCORE */
2698
2699static int virtio_mem_init_kdump(struct virtio_mem *vm)
2700{
2701#ifdef CONFIG_PROC_VMCORE
2702	dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
2703	vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
2704	register_vmcore_cb(&vm->vmcore_cb);
2705	return 0;
2706#else /* CONFIG_PROC_VMCORE */
2707	dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
2708	return -EBUSY;
2709#endif /* CONFIG_PROC_VMCORE */
2710}
2711
2712static int virtio_mem_init(struct virtio_mem *vm)
2713{
 
2714	uint16_t node_id;
2715
2716	if (!vm->vdev->config->get) {
2717		dev_err(&vm->vdev->dev, "config access disabled\n");
2718		return -EINVAL;
2719	}
2720
 
 
 
 
 
 
 
 
 
2721	/* Fetch all properties that can't change. */
2722	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2723			&vm->plugged_size);
2724	virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
2725			&vm->device_block_size);
2726	virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
2727			&node_id);
2728	vm->nid = virtio_mem_translate_node_id(vm, node_id);
2729	virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
2730	virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
2731			&vm->region_size);
2732
2733	/* Determine the nid for the device based on the lowest address. */
2734	if (vm->nid == NUMA_NO_NODE)
2735		vm->nid = memory_add_physaddr_to_nid(vm->addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2736
2737	dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
2738	dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
2739	dev_info(&vm->vdev->dev, "device block size: 0x%llx",
2740		 (unsigned long long)vm->device_block_size);
2741	if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
 
 
 
 
2742		dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
2743
2744	/*
2745	 * We don't want to (un)plug or reuse any memory when in kdump. The
2746	 * memory is still accessible (but not exposed to Linux).
2747	 */
2748	if (vm->in_kdump)
2749		return virtio_mem_init_kdump(vm);
2750	return virtio_mem_init_hotplug(vm);
2751}
2752
2753static int virtio_mem_create_resource(struct virtio_mem *vm)
2754{
2755	/*
2756	 * When force-unloading the driver and removing the device, we
2757	 * could have a garbage pointer. Duplicate the string.
2758	 */
2759	const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
2760
2761	if (!name)
2762		return -ENOMEM;
2763
2764	/* Disallow mapping device memory via /dev/mem completely. */
2765	vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
2766						   name, IORESOURCE_SYSTEM_RAM |
2767						   IORESOURCE_EXCLUSIVE);
2768	if (!vm->parent_resource) {
2769		kfree(name);
2770		dev_warn(&vm->vdev->dev, "could not reserve device region\n");
2771		dev_info(&vm->vdev->dev,
2772			 "reloading the driver is not supported\n");
2773		return -EBUSY;
2774	}
2775
2776	/* The memory is not actually busy - make add_memory() work. */
2777	vm->parent_resource->flags &= ~IORESOURCE_BUSY;
2778	return 0;
2779}
2780
2781static void virtio_mem_delete_resource(struct virtio_mem *vm)
2782{
2783	const char *name;
2784
2785	if (!vm->parent_resource)
2786		return;
2787
2788	name = vm->parent_resource->name;
2789	release_resource(vm->parent_resource);
2790	kfree(vm->parent_resource);
2791	kfree(name);
2792	vm->parent_resource = NULL;
2793}
2794
2795static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
2796{
2797	return 1;
2798}
2799
2800static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
2801{
2802	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2803
2804	return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
2805				   vm->addr + vm->region_size, NULL,
2806				   virtio_mem_range_has_system_ram) == 1;
2807}
2808
2809static int virtio_mem_probe(struct virtio_device *vdev)
2810{
2811	struct virtio_mem *vm;
2812	int rc;
2813
2814	BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
2815	BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
2816
2817	vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2818	if (!vm)
2819		return -ENOMEM;
2820
2821	init_waitqueue_head(&vm->host_resp);
2822	vm->vdev = vdev;
2823	INIT_WORK(&vm->wq, virtio_mem_run_wq);
2824	mutex_init(&vm->hotplug_mutex);
2825	INIT_LIST_HEAD(&vm->next);
2826	spin_lock_init(&vm->removal_lock);
2827	hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2828	vm->retry_timer.function = virtio_mem_timer_expired;
2829	vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2830	vm->in_kdump = is_kdump_kernel();
2831
2832	/* register the virtqueue */
2833	rc = virtio_mem_init_vq(vm);
2834	if (rc)
2835		goto out_free_vm;
2836
2837	/* initialize the device by querying the config */
2838	rc = virtio_mem_init(vm);
2839	if (rc)
2840		goto out_del_vq;
2841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842	virtio_device_ready(vdev);
2843
2844	/* trigger a config update to start processing the requested_size */
2845	if (!vm->in_kdump) {
2846		atomic_set(&vm->config_changed, 1);
2847		queue_work(system_freezable_wq, &vm->wq);
2848	}
2849
2850	return 0;
 
 
 
 
2851out_del_vq:
2852	vdev->config->del_vqs(vdev);
2853out_free_vm:
2854	kfree(vm);
2855	vdev->priv = NULL;
2856
2857	return rc;
2858}
2859
2860static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
2861{
 
2862	unsigned long mb_id;
2863	int rc;
2864
2865	/*
2866	 * Make sure the workqueue won't be triggered anymore and no memory
2867	 * blocks can be onlined/offlined until we're finished here.
2868	 */
2869	mutex_lock(&vm->hotplug_mutex);
2870	spin_lock_irq(&vm->removal_lock);
2871	vm->removing = true;
2872	spin_unlock_irq(&vm->removal_lock);
2873	mutex_unlock(&vm->hotplug_mutex);
2874
2875	/* wait until the workqueue stopped */
2876	cancel_work_sync(&vm->wq);
2877	hrtimer_cancel(&vm->retry_timer);
2878
2879	if (vm->in_sbm) {
2880		/*
2881		 * After we unregistered our callbacks, user space can online
2882		 * partially plugged offline blocks. Make sure to remove them.
2883		 */
2884		virtio_mem_sbm_for_each_mb(vm, mb_id,
2885					   VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
2886			rc = virtio_mem_sbm_remove_mb(vm, mb_id);
2887			BUG_ON(rc);
2888			virtio_mem_sbm_set_mb_state(vm, mb_id,
2889						    VIRTIO_MEM_SBM_MB_UNUSED);
2890		}
2891		/*
2892		 * After we unregistered our callbacks, user space can no longer
2893		 * offline partially plugged online memory blocks. No need to
2894		 * worry about them.
2895		 */
2896	}
 
 
 
 
 
2897
2898	/* unregister callbacks */
2899	unregister_virtio_mem_device(vm);
2900	unregister_memory_notifier(&vm->memory_notifier);
2901
2902	/*
2903	 * There is no way we could reliably remove all memory we have added to
2904	 * the system. And there is no way to stop the driver/device from going
2905	 * away. Warn at least.
2906	 */
2907	if (virtio_mem_has_memory_added(vm)) {
2908		dev_warn(&vm->vdev->dev,
2909			 "device still has system memory added\n");
 
 
 
2910	} else {
2911		virtio_mem_delete_resource(vm);
2912		kfree_const(vm->resource_name);
2913		memory_group_unregister(vm->mgid);
2914	}
2915
2916	/* remove all tracking data - no locking needed */
2917	if (vm->in_sbm) {
2918		vfree(vm->sbm.mb_states);
2919		vfree(vm->sbm.sb_states);
2920	} else {
2921		vfree(vm->bbm.bb_states);
2922	}
2923}
2924
2925static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
2926{
2927#ifdef CONFIG_PROC_VMCORE
2928	unregister_vmcore_cb(&vm->vmcore_cb);
2929#endif /* CONFIG_PROC_VMCORE */
2930}
2931
2932static void virtio_mem_remove(struct virtio_device *vdev)
2933{
2934	struct virtio_mem *vm = vdev->priv;
2935
2936	if (vm->in_kdump)
2937		virtio_mem_deinit_kdump(vm);
2938	else
2939		virtio_mem_deinit_hotplug(vm);
2940
2941	/* reset the device and cleanup the queues */
2942	virtio_reset_device(vdev);
2943	vdev->config->del_vqs(vdev);
2944
2945	kfree(vm);
2946	vdev->priv = NULL;
2947}
2948
2949static void virtio_mem_config_changed(struct virtio_device *vdev)
2950{
2951	struct virtio_mem *vm = vdev->priv;
2952
2953	if (unlikely(vm->in_kdump))
2954		return;
2955
2956	atomic_set(&vm->config_changed, 1);
2957	virtio_mem_retry(vm);
2958}
2959
2960#ifdef CONFIG_PM_SLEEP
2961static int virtio_mem_freeze(struct virtio_device *vdev)
2962{
2963	/*
2964	 * When restarting the VM, all memory is usually unplugged. Don't
2965	 * allow to suspend/hibernate.
2966	 */
2967	dev_err(&vdev->dev, "save/restore not supported.\n");
2968	return -EPERM;
2969}
2970
2971static int virtio_mem_restore(struct virtio_device *vdev)
2972{
2973	return -EPERM;
2974}
2975#endif
2976
2977static unsigned int virtio_mem_features[] = {
2978#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
2979	VIRTIO_MEM_F_ACPI_PXM,
2980#endif
2981	VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
2982};
2983
2984static const struct virtio_device_id virtio_mem_id_table[] = {
2985	{ VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
2986	{ 0 },
2987};
2988
2989static struct virtio_driver virtio_mem_driver = {
2990	.feature_table = virtio_mem_features,
2991	.feature_table_size = ARRAY_SIZE(virtio_mem_features),
2992	.driver.name = KBUILD_MODNAME,
2993	.driver.owner = THIS_MODULE,
2994	.id_table = virtio_mem_id_table,
2995	.probe = virtio_mem_probe,
2996	.remove = virtio_mem_remove,
2997	.config_changed = virtio_mem_config_changed,
2998#ifdef CONFIG_PM_SLEEP
2999	.freeze	=	virtio_mem_freeze,
3000	.restore =	virtio_mem_restore,
3001#endif
3002};
3003
3004module_virtio_driver(virtio_mem_driver);
3005MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
3006MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
3007MODULE_DESCRIPTION("Virtio-mem driver");
3008MODULE_LICENSE("GPL");