1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio-mem device driver. 4 * 5 * Copyright Red Hat, Inc. 2020 6 * 7 * Author(s): David Hildenbrand <david@redhat.com> 8 */ 9 10 #include <linux/virtio.h> 11 #include <linux/virtio_mem.h> 12 #include <linux/workqueue.h> 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/memory_hotplug.h> 17 #include <linux/memory.h> 18 #include <linux/hrtimer.h> 19 #include <linux/crash_dump.h> 20 #include <linux/mutex.h> 21 #include <linux/bitmap.h> 22 #include <linux/lockdep.h> 23 24 #include <acpi/acpi_numa.h> 25 26 static bool unplug_online = true; 27 module_param(unplug_online, bool, 0644); 28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory"); 29 30 static bool force_bbm; 31 module_param(force_bbm, bool, 0444); 32 MODULE_PARM_DESC(force_bbm, 33 "Force Big Block Mode. Default is 0 (auto-selection)"); 34 35 static unsigned long bbm_block_size; 36 module_param(bbm_block_size, ulong, 0444); 37 MODULE_PARM_DESC(bbm_block_size, 38 "Big Block size in bytes. Default is 0 (auto-detection)."); 39 40 static bool bbm_safe_unplug = true; 41 module_param(bbm_safe_unplug, bool, 0444); 42 MODULE_PARM_DESC(bbm_safe_unplug, 43 "Use a safe unplug mechanism in BBM, avoiding long/endless loops"); 44 45 /* 46 * virtio-mem currently supports the following modes of operation: 47 * 48 * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The 49 * size of a Sub Block (SB) is determined based on the device block size, the 50 * pageblock size, and the maximum allocation granularity of the buddy. 51 * Subblocks within a Linux memory block might either be plugged or unplugged. 52 * Memory is added/removed to Linux MM in Linux memory block granularity. 53 * 54 * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks. 55 * Memory is added/removed to Linux MM in Big Block granularity. 56 * 57 * The mode is determined automatically based on the Linux memory block size 58 * and the device block size. 59 * 60 * User space / core MM (auto onlining) is responsible for onlining added 61 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are 62 * always onlined separately, and all memory within a Linux memory block is 63 * onlined to the same zone - virtio-mem relies on this behavior. 64 */ 65 66 /* 67 * State of a Linux memory block in SBM. 68 */ 69 enum virtio_mem_sbm_mb_state { 70 /* Unplugged, not added to Linux. Can be reused later. */ 71 VIRTIO_MEM_SBM_MB_UNUSED = 0, 72 /* (Partially) plugged, not added to Linux. Error on add_memory(). */ 73 VIRTIO_MEM_SBM_MB_PLUGGED, 74 /* Fully plugged, fully added to Linux, offline. */ 75 VIRTIO_MEM_SBM_MB_OFFLINE, 76 /* Partially plugged, fully added to Linux, offline. */ 77 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL, 78 /* Fully plugged, fully added to Linux, online. */ 79 VIRTIO_MEM_SBM_MB_ONLINE, 80 /* Partially plugged, fully added to Linux, online. */ 81 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL, 82 VIRTIO_MEM_SBM_MB_COUNT 83 }; 84 85 /* 86 * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks. 87 */ 88 enum virtio_mem_bbm_bb_state { 89 /* Unplugged, not added to Linux. Can be reused later. */ 90 VIRTIO_MEM_BBM_BB_UNUSED = 0, 91 /* Plugged, not added to Linux. Error on add_memory(). */ 92 VIRTIO_MEM_BBM_BB_PLUGGED, 93 /* Plugged and added to Linux. */ 94 VIRTIO_MEM_BBM_BB_ADDED, 95 /* All online parts are fake-offline, ready to remove. */ 96 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE, 97 VIRTIO_MEM_BBM_BB_COUNT 98 }; 99 100 struct virtio_mem { 101 struct virtio_device *vdev; 102 103 /* We might first have to unplug all memory when starting up. */ 104 bool unplug_all_required; 105 106 /* Workqueue that processes the plug/unplug requests. */ 107 struct work_struct wq; 108 atomic_t wq_active; 109 atomic_t config_changed; 110 111 /* Virtqueue for guest->host requests. */ 112 struct virtqueue *vq; 113 114 /* Wait for a host response to a guest request. */ 115 wait_queue_head_t host_resp; 116 117 /* Space for one guest request and the host response. */ 118 struct virtio_mem_req req; 119 struct virtio_mem_resp resp; 120 121 /* The current size of the device. */ 122 uint64_t plugged_size; 123 /* The requested size of the device. */ 124 uint64_t requested_size; 125 126 /* The device block size (for communicating with the device). */ 127 uint64_t device_block_size; 128 /* The determined node id for all memory of the device. */ 129 int nid; 130 /* Physical start address of the memory region. */ 131 uint64_t addr; 132 /* Maximum region size in bytes. */ 133 uint64_t region_size; 134 135 /* The parent resource for all memory added via this device. */ 136 struct resource *parent_resource; 137 /* 138 * Copy of "System RAM (virtio_mem)" to be used for 139 * add_memory_driver_managed(). 140 */ 141 const char *resource_name; 142 143 /* 144 * We don't want to add too much memory if it's not getting onlined, 145 * to avoid running OOM. Besides this threshold, we allow to have at 146 * least two offline blocks at a time (whatever is bigger). 147 */ 148 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024) 149 atomic64_t offline_size; 150 uint64_t offline_threshold; 151 152 /* If set, the driver is in SBM, otherwise in BBM. */ 153 bool in_sbm; 154 155 union { 156 struct { 157 /* Id of the first memory block of this device. */ 158 unsigned long first_mb_id; 159 /* Id of the last usable memory block of this device. */ 160 unsigned long last_usable_mb_id; 161 /* Id of the next memory bock to prepare when needed. */ 162 unsigned long next_mb_id; 163 164 /* The subblock size. */ 165 uint64_t sb_size; 166 /* The number of subblocks per Linux memory block. */ 167 uint32_t sbs_per_mb; 168 169 /* Summary of all memory block states. */ 170 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT]; 171 172 /* 173 * One byte state per memory block. Allocated via 174 * vmalloc(). Resized (alloc+copy+free) on demand. 175 * 176 * With 128 MiB memory blocks, we have states for 512 177 * GiB of memory in one 4 KiB page. 178 */ 179 uint8_t *mb_states; 180 181 /* 182 * Bitmap: one bit per subblock. Allocated similar to 183 * sbm.mb_states. 184 * 185 * A set bit means the corresponding subblock is 186 * plugged, otherwise it's unblocked. 187 * 188 * With 4 MiB subblocks, we manage 128 GiB of memory 189 * in one 4 KiB page. 190 */ 191 unsigned long *sb_states; 192 } sbm; 193 194 struct { 195 /* Id of the first big block of this device. */ 196 unsigned long first_bb_id; 197 /* Id of the last usable big block of this device. */ 198 unsigned long last_usable_bb_id; 199 /* Id of the next device bock to prepare when needed. */ 200 unsigned long next_bb_id; 201 202 /* Summary of all big block states. */ 203 unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT]; 204 205 /* One byte state per big block. See sbm.mb_states. */ 206 uint8_t *bb_states; 207 208 /* The block size used for plugging/adding/removing. */ 209 uint64_t bb_size; 210 } bbm; 211 }; 212 213 /* 214 * Mutex that protects the sbm.mb_count, sbm.mb_states, 215 * sbm.sb_states, bbm.bb_count, and bbm.bb_states 216 * 217 * When this lock is held the pointers can't change, ONLINE and 218 * OFFLINE blocks can't change the state and no subblocks will get 219 * plugged/unplugged. 220 */ 221 struct mutex hotplug_mutex; 222 bool hotplug_active; 223 224 /* An error occurred we cannot handle - stop processing requests. */ 225 bool broken; 226 227 /* The driver is being removed. */ 228 spinlock_t removal_lock; 229 bool removing; 230 231 /* Timer for retrying to plug/unplug memory. */ 232 struct hrtimer retry_timer; 233 unsigned int retry_timer_ms; 234 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000 235 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000 236 237 /* Memory notifier (online/offline events). */ 238 struct notifier_block memory_notifier; 239 240 /* Next device in the list of virtio-mem devices. */ 241 struct list_head next; 242 }; 243 244 /* 245 * We have to share a single online_page callback among all virtio-mem 246 * devices. We use RCU to iterate the list in the callback. 247 */ 248 static DEFINE_MUTEX(virtio_mem_mutex); 249 static LIST_HEAD(virtio_mem_devices); 250 251 static void virtio_mem_online_page_cb(struct page *page, unsigned int order); 252 static void virtio_mem_fake_offline_going_offline(unsigned long pfn, 253 unsigned long nr_pages); 254 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, 255 unsigned long nr_pages); 256 static void virtio_mem_retry(struct virtio_mem *vm); 257 258 /* 259 * Register a virtio-mem device so it will be considered for the online_page 260 * callback. 261 */ 262 static int register_virtio_mem_device(struct virtio_mem *vm) 263 { 264 int rc = 0; 265 266 /* First device registers the callback. */ 267 mutex_lock(&virtio_mem_mutex); 268 if (list_empty(&virtio_mem_devices)) 269 rc = set_online_page_callback(&virtio_mem_online_page_cb); 270 if (!rc) 271 list_add_rcu(&vm->next, &virtio_mem_devices); 272 mutex_unlock(&virtio_mem_mutex); 273 274 return rc; 275 } 276 277 /* 278 * Unregister a virtio-mem device so it will no longer be considered for the 279 * online_page callback. 280 */ 281 static void unregister_virtio_mem_device(struct virtio_mem *vm) 282 { 283 /* Last device unregisters the callback. */ 284 mutex_lock(&virtio_mem_mutex); 285 list_del_rcu(&vm->next); 286 if (list_empty(&virtio_mem_devices)) 287 restore_online_page_callback(&virtio_mem_online_page_cb); 288 mutex_unlock(&virtio_mem_mutex); 289 290 synchronize_rcu(); 291 } 292 293 /* 294 * Calculate the memory block id of a given address. 295 */ 296 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr) 297 { 298 return addr / memory_block_size_bytes(); 299 } 300 301 /* 302 * Calculate the physical start address of a given memory block id. 303 */ 304 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id) 305 { 306 return mb_id * memory_block_size_bytes(); 307 } 308 309 /* 310 * Calculate the big block id of a given address. 311 */ 312 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, 313 uint64_t addr) 314 { 315 return addr / vm->bbm.bb_size; 316 } 317 318 /* 319 * Calculate the physical start address of a given big block id. 320 */ 321 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, 322 unsigned long bb_id) 323 { 324 return bb_id * vm->bbm.bb_size; 325 } 326 327 /* 328 * Calculate the subblock id of a given address. 329 */ 330 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, 331 unsigned long addr) 332 { 333 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr); 334 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id); 335 336 return (addr - mb_addr) / vm->sbm.sb_size; 337 } 338 339 /* 340 * Set the state of a big block, taking care of the state counter. 341 */ 342 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm, 343 unsigned long bb_id, 344 enum virtio_mem_bbm_bb_state state) 345 { 346 const unsigned long idx = bb_id - vm->bbm.first_bb_id; 347 enum virtio_mem_bbm_bb_state old_state; 348 349 old_state = vm->bbm.bb_states[idx]; 350 vm->bbm.bb_states[idx] = state; 351 352 BUG_ON(vm->bbm.bb_count[old_state] == 0); 353 vm->bbm.bb_count[old_state]--; 354 vm->bbm.bb_count[state]++; 355 } 356 357 /* 358 * Get the state of a big block. 359 */ 360 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm, 361 unsigned long bb_id) 362 { 363 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id]; 364 } 365 366 /* 367 * Prepare the big block state array for the next big block. 368 */ 369 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm) 370 { 371 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id; 372 unsigned long new_bytes = old_bytes + 1; 373 int old_pages = PFN_UP(old_bytes); 374 int new_pages = PFN_UP(new_bytes); 375 uint8_t *new_array; 376 377 if (vm->bbm.bb_states && old_pages == new_pages) 378 return 0; 379 380 new_array = vzalloc(new_pages * PAGE_SIZE); 381 if (!new_array) 382 return -ENOMEM; 383 384 mutex_lock(&vm->hotplug_mutex); 385 if (vm->bbm.bb_states) 386 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE); 387 vfree(vm->bbm.bb_states); 388 vm->bbm.bb_states = new_array; 389 mutex_unlock(&vm->hotplug_mutex); 390 391 return 0; 392 } 393 394 #define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \ 395 for (_bb_id = vm->bbm.first_bb_id; \ 396 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \ 397 _bb_id++) \ 398 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state) 399 400 #define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \ 401 for (_bb_id = vm->bbm.next_bb_id - 1; \ 402 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \ 403 _bb_id--) \ 404 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state) 405 406 /* 407 * Set the state of a memory block, taking care of the state counter. 408 */ 409 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm, 410 unsigned long mb_id, uint8_t state) 411 { 412 const unsigned long idx = mb_id - vm->sbm.first_mb_id; 413 uint8_t old_state; 414 415 old_state = vm->sbm.mb_states[idx]; 416 vm->sbm.mb_states[idx] = state; 417 418 BUG_ON(vm->sbm.mb_count[old_state] == 0); 419 vm->sbm.mb_count[old_state]--; 420 vm->sbm.mb_count[state]++; 421 } 422 423 /* 424 * Get the state of a memory block. 425 */ 426 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm, 427 unsigned long mb_id) 428 { 429 const unsigned long idx = mb_id - vm->sbm.first_mb_id; 430 431 return vm->sbm.mb_states[idx]; 432 } 433 434 /* 435 * Prepare the state array for the next memory block. 436 */ 437 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm) 438 { 439 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id); 440 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1); 441 uint8_t *new_array; 442 443 if (vm->sbm.mb_states && old_pages == new_pages) 444 return 0; 445 446 new_array = vzalloc(new_pages * PAGE_SIZE); 447 if (!new_array) 448 return -ENOMEM; 449 450 mutex_lock(&vm->hotplug_mutex); 451 if (vm->sbm.mb_states) 452 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE); 453 vfree(vm->sbm.mb_states); 454 vm->sbm.mb_states = new_array; 455 mutex_unlock(&vm->hotplug_mutex); 456 457 return 0; 458 } 459 460 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \ 461 for (_mb_id = _vm->sbm.first_mb_id; \ 462 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \ 463 _mb_id++) \ 464 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state) 465 466 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \ 467 for (_mb_id = _vm->sbm.next_mb_id - 1; \ 468 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \ 469 _mb_id--) \ 470 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state) 471 472 /* 473 * Calculate the bit number in the subblock bitmap for the given subblock 474 * inside the given memory block. 475 */ 476 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm, 477 unsigned long mb_id, int sb_id) 478 { 479 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id; 480 } 481 482 /* 483 * Mark all selected subblocks plugged. 484 * 485 * Will not modify the state of the memory block. 486 */ 487 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm, 488 unsigned long mb_id, int sb_id, 489 int count) 490 { 491 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 492 493 __bitmap_set(vm->sbm.sb_states, bit, count); 494 } 495 496 /* 497 * Mark all selected subblocks unplugged. 498 * 499 * Will not modify the state of the memory block. 500 */ 501 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm, 502 unsigned long mb_id, int sb_id, 503 int count) 504 { 505 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 506 507 __bitmap_clear(vm->sbm.sb_states, bit, count); 508 } 509 510 /* 511 * Test if all selected subblocks are plugged. 512 */ 513 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm, 514 unsigned long mb_id, int sb_id, 515 int count) 516 { 517 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 518 519 if (count == 1) 520 return test_bit(bit, vm->sbm.sb_states); 521 522 /* TODO: Helper similar to bitmap_set() */ 523 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >= 524 bit + count; 525 } 526 527 /* 528 * Test if all selected subblocks are unplugged. 529 */ 530 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm, 531 unsigned long mb_id, int sb_id, 532 int count) 533 { 534 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 535 536 /* TODO: Helper similar to bitmap_set() */ 537 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >= 538 bit + count; 539 } 540 541 /* 542 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is 543 * none. 544 */ 545 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm, 546 unsigned long mb_id) 547 { 548 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0); 549 550 return find_next_zero_bit(vm->sbm.sb_states, 551 bit + vm->sbm.sbs_per_mb, bit) - bit; 552 } 553 554 /* 555 * Prepare the subblock bitmap for the next memory block. 556 */ 557 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm) 558 { 559 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id; 560 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb; 561 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb; 562 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long)); 563 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long)); 564 unsigned long *new_bitmap, *old_bitmap; 565 566 if (vm->sbm.sb_states && old_pages == new_pages) 567 return 0; 568 569 new_bitmap = vzalloc(new_pages * PAGE_SIZE); 570 if (!new_bitmap) 571 return -ENOMEM; 572 573 mutex_lock(&vm->hotplug_mutex); 574 if (new_bitmap) 575 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE); 576 577 old_bitmap = vm->sbm.sb_states; 578 vm->sbm.sb_states = new_bitmap; 579 mutex_unlock(&vm->hotplug_mutex); 580 581 vfree(old_bitmap); 582 return 0; 583 } 584 585 /* 586 * Test if we could add memory without creating too much offline memory - 587 * to avoid running OOM if memory is getting onlined deferred. 588 */ 589 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size) 590 { 591 if (WARN_ON_ONCE(size > vm->offline_threshold)) 592 return false; 593 594 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold; 595 } 596 597 /* 598 * Try adding memory to Linux. Will usually only fail if out of memory. 599 * 600 * Must not be called with the vm->hotplug_mutex held (possible deadlock with 601 * onlining code). 602 * 603 * Will not modify the state of memory blocks in virtio-mem. 604 */ 605 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr, 606 uint64_t size) 607 { 608 int rc; 609 610 /* 611 * When force-unloading the driver and we still have memory added to 612 * Linux, the resource name has to stay. 613 */ 614 if (!vm->resource_name) { 615 vm->resource_name = kstrdup_const("System RAM (virtio_mem)", 616 GFP_KERNEL); 617 if (!vm->resource_name) 618 return -ENOMEM; 619 } 620 621 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr, 622 addr + size - 1); 623 /* Memory might get onlined immediately. */ 624 atomic64_add(size, &vm->offline_size); 625 rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name, 626 MHP_MERGE_RESOURCE); 627 if (rc) { 628 atomic64_sub(size, &vm->offline_size); 629 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc); 630 /* 631 * TODO: Linux MM does not properly clean up yet in all cases 632 * where adding of memory failed - especially on -ENOMEM. 633 */ 634 } 635 return rc; 636 } 637 638 /* 639 * See virtio_mem_add_memory(): Try adding a single Linux memory block. 640 */ 641 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id) 642 { 643 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); 644 const uint64_t size = memory_block_size_bytes(); 645 646 return virtio_mem_add_memory(vm, addr, size); 647 } 648 649 /* 650 * See virtio_mem_add_memory(): Try adding a big block. 651 */ 652 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id) 653 { 654 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 655 const uint64_t size = vm->bbm.bb_size; 656 657 return virtio_mem_add_memory(vm, addr, size); 658 } 659 660 /* 661 * Try removing memory from Linux. Will only fail if memory blocks aren't 662 * offline. 663 * 664 * Must not be called with the vm->hotplug_mutex held (possible deadlock with 665 * onlining code). 666 * 667 * Will not modify the state of memory blocks in virtio-mem. 668 */ 669 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr, 670 uint64_t size) 671 { 672 int rc; 673 674 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr, 675 addr + size - 1); 676 rc = remove_memory(vm->nid, addr, size); 677 if (!rc) { 678 atomic64_sub(size, &vm->offline_size); 679 /* 680 * We might have freed up memory we can now unplug, retry 681 * immediately instead of waiting. 682 */ 683 virtio_mem_retry(vm); 684 } else { 685 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc); 686 } 687 return rc; 688 } 689 690 /* 691 * See virtio_mem_remove_memory(): Try removing a single Linux memory block. 692 */ 693 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id) 694 { 695 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); 696 const uint64_t size = memory_block_size_bytes(); 697 698 return virtio_mem_remove_memory(vm, addr, size); 699 } 700 701 /* 702 * See virtio_mem_remove_memory(): Try to remove all Linux memory blocks covered 703 * by the big block. 704 */ 705 static int virtio_mem_bbm_remove_bb(struct virtio_mem *vm, unsigned long bb_id) 706 { 707 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 708 const uint64_t size = vm->bbm.bb_size; 709 710 return virtio_mem_remove_memory(vm, addr, size); 711 } 712 713 /* 714 * Try offlining and removing memory from Linux. 715 * 716 * Must not be called with the vm->hotplug_mutex held (possible deadlock with 717 * onlining code). 718 * 719 * Will not modify the state of memory blocks in virtio-mem. 720 */ 721 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm, 722 uint64_t addr, 723 uint64_t size) 724 { 725 int rc; 726 727 dev_dbg(&vm->vdev->dev, 728 "offlining and removing memory: 0x%llx - 0x%llx\n", addr, 729 addr + size - 1); 730 731 rc = offline_and_remove_memory(vm->nid, addr, size); 732 if (!rc) { 733 atomic64_sub(size, &vm->offline_size); 734 /* 735 * We might have freed up memory we can now unplug, retry 736 * immediately instead of waiting. 737 */ 738 virtio_mem_retry(vm); 739 } else { 740 dev_dbg(&vm->vdev->dev, 741 "offlining and removing memory failed: %d\n", rc); 742 } 743 return rc; 744 } 745 746 /* 747 * See virtio_mem_offline_and_remove_memory(): Try offlining and removing 748 * a single Linux memory block. 749 */ 750 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm, 751 unsigned long mb_id) 752 { 753 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); 754 const uint64_t size = memory_block_size_bytes(); 755 756 return virtio_mem_offline_and_remove_memory(vm, addr, size); 757 } 758 759 /* 760 * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a 761 * all Linux memory blocks covered by the big block. 762 */ 763 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm, 764 unsigned long bb_id) 765 { 766 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 767 const uint64_t size = vm->bbm.bb_size; 768 769 return virtio_mem_offline_and_remove_memory(vm, addr, size); 770 } 771 772 /* 773 * Trigger the workqueue so the device can perform its magic. 774 */ 775 static void virtio_mem_retry(struct virtio_mem *vm) 776 { 777 unsigned long flags; 778 779 spin_lock_irqsave(&vm->removal_lock, flags); 780 if (!vm->removing) 781 queue_work(system_freezable_wq, &vm->wq); 782 spin_unlock_irqrestore(&vm->removal_lock, flags); 783 } 784 785 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id) 786 { 787 int node = NUMA_NO_NODE; 788 789 #if defined(CONFIG_ACPI_NUMA) 790 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM)) 791 node = pxm_to_node(node_id); 792 #endif 793 return node; 794 } 795 796 /* 797 * Test if a virtio-mem device overlaps with the given range. Can be called 798 * from (notifier) callbacks lockless. 799 */ 800 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start, 801 uint64_t size) 802 { 803 return start < vm->addr + vm->region_size && vm->addr < start + size; 804 } 805 806 /* 807 * Test if a virtio-mem device contains a given range. Can be called from 808 * (notifier) callbacks lockless. 809 */ 810 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start, 811 uint64_t size) 812 { 813 return start >= vm->addr && start + size <= vm->addr + vm->region_size; 814 } 815 816 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm, 817 unsigned long mb_id) 818 { 819 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { 820 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL: 821 case VIRTIO_MEM_SBM_MB_OFFLINE: 822 return NOTIFY_OK; 823 default: 824 break; 825 } 826 dev_warn_ratelimited(&vm->vdev->dev, 827 "memory block onlining denied\n"); 828 return NOTIFY_BAD; 829 } 830 831 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm, 832 unsigned long mb_id) 833 { 834 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { 835 case VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL: 836 virtio_mem_sbm_set_mb_state(vm, mb_id, 837 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL); 838 break; 839 case VIRTIO_MEM_SBM_MB_ONLINE: 840 virtio_mem_sbm_set_mb_state(vm, mb_id, 841 VIRTIO_MEM_SBM_MB_OFFLINE); 842 break; 843 default: 844 BUG(); 845 break; 846 } 847 } 848 849 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm, 850 unsigned long mb_id) 851 { 852 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { 853 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL: 854 virtio_mem_sbm_set_mb_state(vm, mb_id, 855 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL); 856 break; 857 case VIRTIO_MEM_SBM_MB_OFFLINE: 858 virtio_mem_sbm_set_mb_state(vm, mb_id, 859 VIRTIO_MEM_SBM_MB_ONLINE); 860 break; 861 default: 862 BUG(); 863 break; 864 } 865 } 866 867 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm, 868 unsigned long mb_id) 869 { 870 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size); 871 unsigned long pfn; 872 int sb_id; 873 874 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) { 875 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) 876 continue; 877 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 878 sb_id * vm->sbm.sb_size); 879 virtio_mem_fake_offline_going_offline(pfn, nr_pages); 880 } 881 } 882 883 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm, 884 unsigned long mb_id) 885 { 886 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size); 887 unsigned long pfn; 888 int sb_id; 889 890 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) { 891 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) 892 continue; 893 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 894 sb_id * vm->sbm.sb_size); 895 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages); 896 } 897 } 898 899 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm, 900 unsigned long bb_id, 901 unsigned long pfn, 902 unsigned long nr_pages) 903 { 904 /* 905 * When marked as "fake-offline", all online memory of this device block 906 * is allocated by us. Otherwise, we don't have any memory allocated. 907 */ 908 if (virtio_mem_bbm_get_bb_state(vm, bb_id) != 909 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE) 910 return; 911 virtio_mem_fake_offline_going_offline(pfn, nr_pages); 912 } 913 914 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm, 915 unsigned long bb_id, 916 unsigned long pfn, 917 unsigned long nr_pages) 918 { 919 if (virtio_mem_bbm_get_bb_state(vm, bb_id) != 920 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE) 921 return; 922 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages); 923 } 924 925 /* 926 * This callback will either be called synchronously from add_memory() or 927 * asynchronously (e.g., triggered via user space). We have to be careful 928 * with locking when calling add_memory(). 929 */ 930 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, 931 unsigned long action, void *arg) 932 { 933 struct virtio_mem *vm = container_of(nb, struct virtio_mem, 934 memory_notifier); 935 struct memory_notify *mhp = arg; 936 const unsigned long start = PFN_PHYS(mhp->start_pfn); 937 const unsigned long size = PFN_PHYS(mhp->nr_pages); 938 int rc = NOTIFY_OK; 939 unsigned long id; 940 941 if (!virtio_mem_overlaps_range(vm, start, size)) 942 return NOTIFY_DONE; 943 944 if (vm->in_sbm) { 945 id = virtio_mem_phys_to_mb_id(start); 946 /* 947 * In SBM, we add memory in separate memory blocks - we expect 948 * it to be onlined/offlined in the same granularity. Bail out 949 * if this ever changes. 950 */ 951 if (WARN_ON_ONCE(size != memory_block_size_bytes() || 952 !IS_ALIGNED(start, memory_block_size_bytes()))) 953 return NOTIFY_BAD; 954 } else { 955 id = virtio_mem_phys_to_bb_id(vm, start); 956 /* 957 * In BBM, we only care about onlining/offlining happening 958 * within a single big block, we don't care about the 959 * actual granularity as we don't track individual Linux 960 * memory blocks. 961 */ 962 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1))) 963 return NOTIFY_BAD; 964 } 965 966 /* 967 * Avoid circular locking lockdep warnings. We lock the mutex 968 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The 969 * blocking_notifier_call_chain() has it's own lock, which gets unlocked 970 * between both notifier calls and will bail out. False positive. 971 */ 972 lockdep_off(); 973 974 switch (action) { 975 case MEM_GOING_OFFLINE: 976 mutex_lock(&vm->hotplug_mutex); 977 if (vm->removing) { 978 rc = notifier_from_errno(-EBUSY); 979 mutex_unlock(&vm->hotplug_mutex); 980 break; 981 } 982 vm->hotplug_active = true; 983 if (vm->in_sbm) 984 virtio_mem_sbm_notify_going_offline(vm, id); 985 else 986 virtio_mem_bbm_notify_going_offline(vm, id, 987 mhp->start_pfn, 988 mhp->nr_pages); 989 break; 990 case MEM_GOING_ONLINE: 991 mutex_lock(&vm->hotplug_mutex); 992 if (vm->removing) { 993 rc = notifier_from_errno(-EBUSY); 994 mutex_unlock(&vm->hotplug_mutex); 995 break; 996 } 997 vm->hotplug_active = true; 998 if (vm->in_sbm) 999 rc = virtio_mem_sbm_notify_going_online(vm, id); 1000 break; 1001 case MEM_OFFLINE: 1002 if (vm->in_sbm) 1003 virtio_mem_sbm_notify_offline(vm, id); 1004 1005 atomic64_add(size, &vm->offline_size); 1006 /* 1007 * Trigger the workqueue. Now that we have some offline memory, 1008 * maybe we can handle pending unplug requests. 1009 */ 1010 if (!unplug_online) 1011 virtio_mem_retry(vm); 1012 1013 vm->hotplug_active = false; 1014 mutex_unlock(&vm->hotplug_mutex); 1015 break; 1016 case MEM_ONLINE: 1017 if (vm->in_sbm) 1018 virtio_mem_sbm_notify_online(vm, id); 1019 1020 atomic64_sub(size, &vm->offline_size); 1021 /* 1022 * Start adding more memory once we onlined half of our 1023 * threshold. Don't trigger if it's possibly due to our actipn 1024 * (e.g., us adding memory which gets onlined immediately from 1025 * the core). 1026 */ 1027 if (!atomic_read(&vm->wq_active) && 1028 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2)) 1029 virtio_mem_retry(vm); 1030 1031 vm->hotplug_active = false; 1032 mutex_unlock(&vm->hotplug_mutex); 1033 break; 1034 case MEM_CANCEL_OFFLINE: 1035 if (!vm->hotplug_active) 1036 break; 1037 if (vm->in_sbm) 1038 virtio_mem_sbm_notify_cancel_offline(vm, id); 1039 else 1040 virtio_mem_bbm_notify_cancel_offline(vm, id, 1041 mhp->start_pfn, 1042 mhp->nr_pages); 1043 vm->hotplug_active = false; 1044 mutex_unlock(&vm->hotplug_mutex); 1045 break; 1046 case MEM_CANCEL_ONLINE: 1047 if (!vm->hotplug_active) 1048 break; 1049 vm->hotplug_active = false; 1050 mutex_unlock(&vm->hotplug_mutex); 1051 break; 1052 default: 1053 break; 1054 } 1055 1056 lockdep_on(); 1057 1058 return rc; 1059 } 1060 1061 /* 1062 * Set a range of pages PG_offline. Remember pages that were never onlined 1063 * (via generic_online_page()) using PageDirty(). 1064 */ 1065 static void virtio_mem_set_fake_offline(unsigned long pfn, 1066 unsigned long nr_pages, bool onlined) 1067 { 1068 for (; nr_pages--; pfn++) { 1069 struct page *page = pfn_to_page(pfn); 1070 1071 __SetPageOffline(page); 1072 if (!onlined) { 1073 SetPageDirty(page); 1074 /* FIXME: remove after cleanups */ 1075 ClearPageReserved(page); 1076 } 1077 } 1078 } 1079 1080 /* 1081 * Clear PG_offline from a range of pages. If the pages were never onlined, 1082 * (via generic_online_page()), clear PageDirty(). 1083 */ 1084 static void virtio_mem_clear_fake_offline(unsigned long pfn, 1085 unsigned long nr_pages, bool onlined) 1086 { 1087 for (; nr_pages--; pfn++) { 1088 struct page *page = pfn_to_page(pfn); 1089 1090 __ClearPageOffline(page); 1091 if (!onlined) 1092 ClearPageDirty(page); 1093 } 1094 } 1095 1096 /* 1097 * Release a range of fake-offline pages to the buddy, effectively 1098 * fake-onlining them. 1099 */ 1100 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) 1101 { 1102 const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES; 1103 unsigned long i; 1104 1105 /* 1106 * We are always called at least with MAX_ORDER_NR_PAGES 1107 * granularity/alignment (e.g., the way subblocks work). All pages 1108 * inside such a block are alike. 1109 */ 1110 for (i = 0; i < nr_pages; i += max_nr_pages) { 1111 struct page *page = pfn_to_page(pfn + i); 1112 1113 /* 1114 * If the page is PageDirty(), it was kept fake-offline when 1115 * onlining the memory block. Otherwise, it was allocated 1116 * using alloc_contig_range(). All pages in a subblock are 1117 * alike. 1118 */ 1119 if (PageDirty(page)) { 1120 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, 1121 false); 1122 generic_online_page(page, MAX_ORDER - 1); 1123 } else { 1124 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, 1125 true); 1126 free_contig_range(pfn + i, max_nr_pages); 1127 adjust_managed_page_count(page, max_nr_pages); 1128 } 1129 } 1130 } 1131 1132 /* 1133 * Try to allocate a range, marking pages fake-offline, effectively 1134 * fake-offlining them. 1135 */ 1136 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages) 1137 { 1138 const bool is_movable = zone_idx(page_zone(pfn_to_page(pfn))) == 1139 ZONE_MOVABLE; 1140 int rc, retry_count; 1141 1142 /* 1143 * TODO: We want an alloc_contig_range() mode that tries to allocate 1144 * harder (e.g., dealing with temporarily pinned pages, PCP), especially 1145 * with ZONE_MOVABLE. So for now, retry a couple of times with 1146 * ZONE_MOVABLE before giving up - because that zone is supposed to give 1147 * some guarantees. 1148 */ 1149 for (retry_count = 0; retry_count < 5; retry_count++) { 1150 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE, 1151 GFP_KERNEL); 1152 if (rc == -ENOMEM) 1153 /* whoops, out of memory */ 1154 return rc; 1155 else if (rc && !is_movable) 1156 break; 1157 else if (rc) 1158 continue; 1159 1160 virtio_mem_set_fake_offline(pfn, nr_pages, true); 1161 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages); 1162 return 0; 1163 } 1164 1165 return -EBUSY; 1166 } 1167 1168 /* 1169 * Handle fake-offline pages when memory is going offline - such that the 1170 * pages can be skipped by mm-core when offlining. 1171 */ 1172 static void virtio_mem_fake_offline_going_offline(unsigned long pfn, 1173 unsigned long nr_pages) 1174 { 1175 struct page *page; 1176 unsigned long i; 1177 1178 /* 1179 * Drop our reference to the pages so the memory can get offlined 1180 * and add the unplugged pages to the managed page counters (so 1181 * offlining code can correctly subtract them again). 1182 */ 1183 adjust_managed_page_count(pfn_to_page(pfn), nr_pages); 1184 /* Drop our reference to the pages so the memory can get offlined. */ 1185 for (i = 0; i < nr_pages; i++) { 1186 page = pfn_to_page(pfn + i); 1187 if (WARN_ON(!page_ref_dec_and_test(page))) 1188 dump_page(page, "fake-offline page referenced"); 1189 } 1190 } 1191 1192 /* 1193 * Handle fake-offline pages when memory offlining is canceled - to undo 1194 * what we did in virtio_mem_fake_offline_going_offline(). 1195 */ 1196 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, 1197 unsigned long nr_pages) 1198 { 1199 unsigned long i; 1200 1201 /* 1202 * Get the reference we dropped when going offline and subtract the 1203 * unplugged pages from the managed page counters. 1204 */ 1205 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages); 1206 for (i = 0; i < nr_pages; i++) 1207 page_ref_inc(pfn_to_page(pfn + i)); 1208 } 1209 1210 static void virtio_mem_online_page_cb(struct page *page, unsigned int order) 1211 { 1212 const unsigned long addr = page_to_phys(page); 1213 unsigned long id, sb_id; 1214 struct virtio_mem *vm; 1215 bool do_online; 1216 1217 rcu_read_lock(); 1218 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { 1219 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order))) 1220 continue; 1221 1222 if (vm->in_sbm) { 1223 /* 1224 * We exploit here that subblocks have at least 1225 * MAX_ORDER_NR_PAGES size/alignment - so we cannot 1226 * cross subblocks within one call. 1227 */ 1228 id = virtio_mem_phys_to_mb_id(addr); 1229 sb_id = virtio_mem_phys_to_sb_id(vm, addr); 1230 do_online = virtio_mem_sbm_test_sb_plugged(vm, id, 1231 sb_id, 1); 1232 } else { 1233 /* 1234 * If the whole block is marked fake offline, keep 1235 * everything that way. 1236 */ 1237 id = virtio_mem_phys_to_bb_id(vm, addr); 1238 do_online = virtio_mem_bbm_get_bb_state(vm, id) != 1239 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE; 1240 } 1241 if (do_online) 1242 generic_online_page(page, order); 1243 else 1244 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order, 1245 false); 1246 rcu_read_unlock(); 1247 return; 1248 } 1249 rcu_read_unlock(); 1250 1251 /* not virtio-mem memory, but e.g., a DIMM. online it */ 1252 generic_online_page(page, order); 1253 } 1254 1255 static uint64_t virtio_mem_send_request(struct virtio_mem *vm, 1256 const struct virtio_mem_req *req) 1257 { 1258 struct scatterlist *sgs[2], sg_req, sg_resp; 1259 unsigned int len; 1260 int rc; 1261 1262 /* don't use the request residing on the stack (vaddr) */ 1263 vm->req = *req; 1264 1265 /* out: buffer for request */ 1266 sg_init_one(&sg_req, &vm->req, sizeof(vm->req)); 1267 sgs[0] = &sg_req; 1268 1269 /* in: buffer for response */ 1270 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp)); 1271 sgs[1] = &sg_resp; 1272 1273 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL); 1274 if (rc < 0) 1275 return rc; 1276 1277 virtqueue_kick(vm->vq); 1278 1279 /* wait for a response */ 1280 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len)); 1281 1282 return virtio16_to_cpu(vm->vdev, vm->resp.type); 1283 } 1284 1285 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, 1286 uint64_t size) 1287 { 1288 const uint64_t nb_vm_blocks = size / vm->device_block_size; 1289 const struct virtio_mem_req req = { 1290 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG), 1291 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr), 1292 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), 1293 }; 1294 int rc = -ENOMEM; 1295 1296 if (atomic_read(&vm->config_changed)) 1297 return -EAGAIN; 1298 1299 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr, 1300 addr + size - 1); 1301 1302 switch (virtio_mem_send_request(vm, &req)) { 1303 case VIRTIO_MEM_RESP_ACK: 1304 vm->plugged_size += size; 1305 return 0; 1306 case VIRTIO_MEM_RESP_NACK: 1307 rc = -EAGAIN; 1308 break; 1309 case VIRTIO_MEM_RESP_BUSY: 1310 rc = -ETXTBSY; 1311 break; 1312 case VIRTIO_MEM_RESP_ERROR: 1313 rc = -EINVAL; 1314 break; 1315 default: 1316 break; 1317 } 1318 1319 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc); 1320 return rc; 1321 } 1322 1323 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, 1324 uint64_t size) 1325 { 1326 const uint64_t nb_vm_blocks = size / vm->device_block_size; 1327 const struct virtio_mem_req req = { 1328 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG), 1329 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr), 1330 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), 1331 }; 1332 int rc = -ENOMEM; 1333 1334 if (atomic_read(&vm->config_changed)) 1335 return -EAGAIN; 1336 1337 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, 1338 addr + size - 1); 1339 1340 switch (virtio_mem_send_request(vm, &req)) { 1341 case VIRTIO_MEM_RESP_ACK: 1342 vm->plugged_size -= size; 1343 return 0; 1344 case VIRTIO_MEM_RESP_BUSY: 1345 rc = -ETXTBSY; 1346 break; 1347 case VIRTIO_MEM_RESP_ERROR: 1348 rc = -EINVAL; 1349 break; 1350 default: 1351 break; 1352 } 1353 1354 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc); 1355 return rc; 1356 } 1357 1358 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) 1359 { 1360 const struct virtio_mem_req req = { 1361 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL), 1362 }; 1363 int rc = -ENOMEM; 1364 1365 dev_dbg(&vm->vdev->dev, "unplugging all memory"); 1366 1367 switch (virtio_mem_send_request(vm, &req)) { 1368 case VIRTIO_MEM_RESP_ACK: 1369 vm->unplug_all_required = false; 1370 vm->plugged_size = 0; 1371 /* usable region might have shrunk */ 1372 atomic_set(&vm->config_changed, 1); 1373 return 0; 1374 case VIRTIO_MEM_RESP_BUSY: 1375 rc = -ETXTBSY; 1376 break; 1377 default: 1378 break; 1379 } 1380 1381 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc); 1382 return rc; 1383 } 1384 1385 /* 1386 * Plug selected subblocks. Updates the plugged state, but not the state 1387 * of the memory block. 1388 */ 1389 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id, 1390 int sb_id, int count) 1391 { 1392 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) + 1393 sb_id * vm->sbm.sb_size; 1394 const uint64_t size = count * vm->sbm.sb_size; 1395 int rc; 1396 1397 rc = virtio_mem_send_plug_request(vm, addr, size); 1398 if (!rc) 1399 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count); 1400 return rc; 1401 } 1402 1403 /* 1404 * Unplug selected subblocks. Updates the plugged state, but not the state 1405 * of the memory block. 1406 */ 1407 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id, 1408 int sb_id, int count) 1409 { 1410 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) + 1411 sb_id * vm->sbm.sb_size; 1412 const uint64_t size = count * vm->sbm.sb_size; 1413 int rc; 1414 1415 rc = virtio_mem_send_unplug_request(vm, addr, size); 1416 if (!rc) 1417 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count); 1418 return rc; 1419 } 1420 1421 /* 1422 * Request to unplug a big block. 1423 * 1424 * Will not modify the state of the big block. 1425 */ 1426 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id) 1427 { 1428 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 1429 const uint64_t size = vm->bbm.bb_size; 1430 1431 return virtio_mem_send_unplug_request(vm, addr, size); 1432 } 1433 1434 /* 1435 * Request to plug a big block. 1436 * 1437 * Will not modify the state of the big block. 1438 */ 1439 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id) 1440 { 1441 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 1442 const uint64_t size = vm->bbm.bb_size; 1443 1444 return virtio_mem_send_plug_request(vm, addr, size); 1445 } 1446 1447 /* 1448 * Unplug the desired number of plugged subblocks of a offline or not-added 1449 * memory block. Will fail if any subblock cannot get unplugged (instead of 1450 * skipping it). 1451 * 1452 * Will not modify the state of the memory block. 1453 * 1454 * Note: can fail after some subblocks were unplugged. 1455 */ 1456 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm, 1457 unsigned long mb_id, uint64_t *nb_sb) 1458 { 1459 int sb_id, count; 1460 int rc; 1461 1462 sb_id = vm->sbm.sbs_per_mb - 1; 1463 while (*nb_sb) { 1464 /* Find the next candidate subblock */ 1465 while (sb_id >= 0 && 1466 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1)) 1467 sb_id--; 1468 if (sb_id < 0) 1469 break; 1470 /* Try to unplug multiple subblocks at a time */ 1471 count = 1; 1472 while (count < *nb_sb && sb_id > 0 && 1473 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) { 1474 count++; 1475 sb_id--; 1476 } 1477 1478 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count); 1479 if (rc) 1480 return rc; 1481 *nb_sb -= count; 1482 sb_id--; 1483 } 1484 1485 return 0; 1486 } 1487 1488 /* 1489 * Unplug all plugged subblocks of an offline or not-added memory block. 1490 * 1491 * Will not modify the state of the memory block. 1492 * 1493 * Note: can fail after some subblocks were unplugged. 1494 */ 1495 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id) 1496 { 1497 uint64_t nb_sb = vm->sbm.sbs_per_mb; 1498 1499 return virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb); 1500 } 1501 1502 /* 1503 * Prepare tracking data for the next memory block. 1504 */ 1505 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm, 1506 unsigned long *mb_id) 1507 { 1508 int rc; 1509 1510 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id) 1511 return -ENOSPC; 1512 1513 /* Resize the state array if required. */ 1514 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm); 1515 if (rc) 1516 return rc; 1517 1518 /* Resize the subblock bitmap if required. */ 1519 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm); 1520 if (rc) 1521 return rc; 1522 1523 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++; 1524 *mb_id = vm->sbm.next_mb_id++; 1525 return 0; 1526 } 1527 1528 /* 1529 * Try to plug the desired number of subblocks and add the memory block 1530 * to Linux. 1531 * 1532 * Will modify the state of the memory block. 1533 */ 1534 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm, 1535 unsigned long mb_id, uint64_t *nb_sb) 1536 { 1537 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb); 1538 int rc; 1539 1540 if (WARN_ON_ONCE(!count)) 1541 return -EINVAL; 1542 1543 /* 1544 * Plug the requested number of subblocks before adding it to linux, 1545 * so that onlining will directly online all plugged subblocks. 1546 */ 1547 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count); 1548 if (rc) 1549 return rc; 1550 1551 /* 1552 * Mark the block properly offline before adding it to Linux, 1553 * so the memory notifiers will find the block in the right state. 1554 */ 1555 if (count == vm->sbm.sbs_per_mb) 1556 virtio_mem_sbm_set_mb_state(vm, mb_id, 1557 VIRTIO_MEM_SBM_MB_OFFLINE); 1558 else 1559 virtio_mem_sbm_set_mb_state(vm, mb_id, 1560 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL); 1561 1562 /* Add the memory block to linux - if that fails, try to unplug. */ 1563 rc = virtio_mem_sbm_add_mb(vm, mb_id); 1564 if (rc) { 1565 int new_state = VIRTIO_MEM_SBM_MB_UNUSED; 1566 1567 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count)) 1568 new_state = VIRTIO_MEM_SBM_MB_PLUGGED; 1569 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state); 1570 return rc; 1571 } 1572 1573 *nb_sb -= count; 1574 return 0; 1575 } 1576 1577 /* 1578 * Try to plug the desired number of subblocks of a memory block that 1579 * is already added to Linux. 1580 * 1581 * Will modify the state of the memory block. 1582 * 1583 * Note: Can fail after some subblocks were successfully plugged. 1584 */ 1585 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm, 1586 unsigned long mb_id, uint64_t *nb_sb, 1587 bool online) 1588 { 1589 unsigned long pfn, nr_pages; 1590 int sb_id, count; 1591 int rc; 1592 1593 if (WARN_ON_ONCE(!*nb_sb)) 1594 return -EINVAL; 1595 1596 while (*nb_sb) { 1597 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id); 1598 if (sb_id >= vm->sbm.sbs_per_mb) 1599 break; 1600 count = 1; 1601 while (count < *nb_sb && 1602 sb_id + count < vm->sbm.sbs_per_mb && 1603 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1)) 1604 count++; 1605 1606 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count); 1607 if (rc) 1608 return rc; 1609 *nb_sb -= count; 1610 if (!online) 1611 continue; 1612 1613 /* fake-online the pages if the memory block is online */ 1614 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 1615 sb_id * vm->sbm.sb_size); 1616 nr_pages = PFN_DOWN(count * vm->sbm.sb_size); 1617 virtio_mem_fake_online(pfn, nr_pages); 1618 } 1619 1620 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1621 if (online) 1622 virtio_mem_sbm_set_mb_state(vm, mb_id, 1623 VIRTIO_MEM_SBM_MB_ONLINE); 1624 else 1625 virtio_mem_sbm_set_mb_state(vm, mb_id, 1626 VIRTIO_MEM_SBM_MB_OFFLINE); 1627 } 1628 1629 return 0; 1630 } 1631 1632 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff) 1633 { 1634 uint64_t nb_sb = diff / vm->sbm.sb_size; 1635 unsigned long mb_id; 1636 int rc; 1637 1638 if (!nb_sb) 1639 return 0; 1640 1641 /* Don't race with onlining/offlining */ 1642 mutex_lock(&vm->hotplug_mutex); 1643 1644 /* Try to plug subblocks of partially plugged online blocks. */ 1645 virtio_mem_sbm_for_each_mb(vm, mb_id, 1646 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) { 1647 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, true); 1648 if (rc || !nb_sb) 1649 goto out_unlock; 1650 cond_resched(); 1651 } 1652 1653 /* Try to plug subblocks of partially plugged offline blocks. */ 1654 virtio_mem_sbm_for_each_mb(vm, mb_id, 1655 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) { 1656 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, false); 1657 if (rc || !nb_sb) 1658 goto out_unlock; 1659 cond_resched(); 1660 } 1661 1662 /* 1663 * We won't be working on online/offline memory blocks from this point, 1664 * so we can't race with memory onlining/offlining. Drop the mutex. 1665 */ 1666 mutex_unlock(&vm->hotplug_mutex); 1667 1668 /* Try to plug and add unused blocks */ 1669 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) { 1670 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes())) 1671 return -ENOSPC; 1672 1673 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb); 1674 if (rc || !nb_sb) 1675 return rc; 1676 cond_resched(); 1677 } 1678 1679 /* Try to prepare, plug and add new blocks */ 1680 while (nb_sb) { 1681 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes())) 1682 return -ENOSPC; 1683 1684 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id); 1685 if (rc) 1686 return rc; 1687 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb); 1688 if (rc) 1689 return rc; 1690 cond_resched(); 1691 } 1692 1693 return 0; 1694 out_unlock: 1695 mutex_unlock(&vm->hotplug_mutex); 1696 return rc; 1697 } 1698 1699 /* 1700 * Plug a big block and add it to Linux. 1701 * 1702 * Will modify the state of the big block. 1703 */ 1704 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm, 1705 unsigned long bb_id) 1706 { 1707 int rc; 1708 1709 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != 1710 VIRTIO_MEM_BBM_BB_UNUSED)) 1711 return -EINVAL; 1712 1713 rc = virtio_mem_bbm_plug_bb(vm, bb_id); 1714 if (rc) 1715 return rc; 1716 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED); 1717 1718 rc = virtio_mem_bbm_add_bb(vm, bb_id); 1719 if (rc) { 1720 if (!virtio_mem_bbm_unplug_bb(vm, bb_id)) 1721 virtio_mem_bbm_set_bb_state(vm, bb_id, 1722 VIRTIO_MEM_BBM_BB_UNUSED); 1723 else 1724 /* Retry from the main loop. */ 1725 virtio_mem_bbm_set_bb_state(vm, bb_id, 1726 VIRTIO_MEM_BBM_BB_PLUGGED); 1727 return rc; 1728 } 1729 return 0; 1730 } 1731 1732 /* 1733 * Prepare tracking data for the next big block. 1734 */ 1735 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm, 1736 unsigned long *bb_id) 1737 { 1738 int rc; 1739 1740 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id) 1741 return -ENOSPC; 1742 1743 /* Resize the big block state array if required. */ 1744 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm); 1745 if (rc) 1746 return rc; 1747 1748 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++; 1749 *bb_id = vm->bbm.next_bb_id; 1750 vm->bbm.next_bb_id++; 1751 return 0; 1752 } 1753 1754 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff) 1755 { 1756 uint64_t nb_bb = diff / vm->bbm.bb_size; 1757 unsigned long bb_id; 1758 int rc; 1759 1760 if (!nb_bb) 1761 return 0; 1762 1763 /* Try to plug and add unused big blocks */ 1764 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) { 1765 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size)) 1766 return -ENOSPC; 1767 1768 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id); 1769 if (!rc) 1770 nb_bb--; 1771 if (rc || !nb_bb) 1772 return rc; 1773 cond_resched(); 1774 } 1775 1776 /* Try to prepare, plug and add new big blocks */ 1777 while (nb_bb) { 1778 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size)) 1779 return -ENOSPC; 1780 1781 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id); 1782 if (rc) 1783 return rc; 1784 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id); 1785 if (!rc) 1786 nb_bb--; 1787 if (rc) 1788 return rc; 1789 cond_resched(); 1790 } 1791 1792 return 0; 1793 } 1794 1795 /* 1796 * Try to plug the requested amount of memory. 1797 */ 1798 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff) 1799 { 1800 if (vm->in_sbm) 1801 return virtio_mem_sbm_plug_request(vm, diff); 1802 return virtio_mem_bbm_plug_request(vm, diff); 1803 } 1804 1805 /* 1806 * Unplug the desired number of plugged subblocks of an offline memory block. 1807 * Will fail if any subblock cannot get unplugged (instead of skipping it). 1808 * 1809 * Will modify the state of the memory block. Might temporarily drop the 1810 * hotplug_mutex. 1811 * 1812 * Note: Can fail after some subblocks were successfully unplugged. 1813 */ 1814 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm, 1815 unsigned long mb_id, 1816 uint64_t *nb_sb) 1817 { 1818 int rc; 1819 1820 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, nb_sb); 1821 1822 /* some subblocks might have been unplugged even on failure */ 1823 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) 1824 virtio_mem_sbm_set_mb_state(vm, mb_id, 1825 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL); 1826 if (rc) 1827 return rc; 1828 1829 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1830 /* 1831 * Remove the block from Linux - this should never fail. 1832 * Hinder the block from getting onlined by marking it 1833 * unplugged. Temporarily drop the mutex, so 1834 * any pending GOING_ONLINE requests can be serviced/rejected. 1835 */ 1836 virtio_mem_sbm_set_mb_state(vm, mb_id, 1837 VIRTIO_MEM_SBM_MB_UNUSED); 1838 1839 mutex_unlock(&vm->hotplug_mutex); 1840 rc = virtio_mem_sbm_remove_mb(vm, mb_id); 1841 BUG_ON(rc); 1842 mutex_lock(&vm->hotplug_mutex); 1843 } 1844 return 0; 1845 } 1846 1847 /* 1848 * Unplug the given plugged subblocks of an online memory block. 1849 * 1850 * Will modify the state of the memory block. 1851 */ 1852 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm, 1853 unsigned long mb_id, int sb_id, 1854 int count) 1855 { 1856 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count; 1857 unsigned long start_pfn; 1858 int rc; 1859 1860 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 1861 sb_id * vm->sbm.sb_size); 1862 1863 rc = virtio_mem_fake_offline(start_pfn, nr_pages); 1864 if (rc) 1865 return rc; 1866 1867 /* Try to unplug the allocated memory */ 1868 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count); 1869 if (rc) { 1870 /* Return the memory to the buddy. */ 1871 virtio_mem_fake_online(start_pfn, nr_pages); 1872 return rc; 1873 } 1874 1875 virtio_mem_sbm_set_mb_state(vm, mb_id, 1876 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL); 1877 return 0; 1878 } 1879 1880 /* 1881 * Unplug the desired number of plugged subblocks of an online memory block. 1882 * Will skip subblock that are busy. 1883 * 1884 * Will modify the state of the memory block. Might temporarily drop the 1885 * hotplug_mutex. 1886 * 1887 * Note: Can fail after some subblocks were successfully unplugged. Can 1888 * return 0 even if subblocks were busy and could not get unplugged. 1889 */ 1890 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm, 1891 unsigned long mb_id, 1892 uint64_t *nb_sb) 1893 { 1894 int rc, sb_id; 1895 1896 /* If possible, try to unplug the complete block in one shot. */ 1897 if (*nb_sb >= vm->sbm.sbs_per_mb && 1898 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1899 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0, 1900 vm->sbm.sbs_per_mb); 1901 if (!rc) { 1902 *nb_sb -= vm->sbm.sbs_per_mb; 1903 goto unplugged; 1904 } else if (rc != -EBUSY) 1905 return rc; 1906 } 1907 1908 /* Fallback to single subblocks. */ 1909 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) { 1910 /* Find the next candidate subblock */ 1911 while (sb_id >= 0 && 1912 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) 1913 sb_id--; 1914 if (sb_id < 0) 1915 break; 1916 1917 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1); 1918 if (rc == -EBUSY) 1919 continue; 1920 else if (rc) 1921 return rc; 1922 *nb_sb -= 1; 1923 } 1924 1925 unplugged: 1926 /* 1927 * Once all subblocks of a memory block were unplugged, offline and 1928 * remove it. This will usually not fail, as no memory is in use 1929 * anymore - however some other notifiers might NACK the request. 1930 */ 1931 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1932 mutex_unlock(&vm->hotplug_mutex); 1933 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id); 1934 mutex_lock(&vm->hotplug_mutex); 1935 if (!rc) 1936 virtio_mem_sbm_set_mb_state(vm, mb_id, 1937 VIRTIO_MEM_SBM_MB_UNUSED); 1938 } 1939 1940 return 0; 1941 } 1942 1943 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff) 1944 { 1945 uint64_t nb_sb = diff / vm->sbm.sb_size; 1946 unsigned long mb_id; 1947 int rc; 1948 1949 if (!nb_sb) 1950 return 0; 1951 1952 /* 1953 * We'll drop the mutex a couple of times when it is safe to do so. 1954 * This might result in some blocks switching the state (online/offline) 1955 * and we could miss them in this run - we will retry again later. 1956 */ 1957 mutex_lock(&vm->hotplug_mutex); 1958 1959 /* Try to unplug subblocks of partially plugged offline blocks. */ 1960 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, 1961 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) { 1962 rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb); 1963 if (rc || !nb_sb) 1964 goto out_unlock; 1965 cond_resched(); 1966 } 1967 1968 /* Try to unplug subblocks of plugged offline blocks. */ 1969 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_OFFLINE) { 1970 rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb); 1971 if (rc || !nb_sb) 1972 goto out_unlock; 1973 cond_resched(); 1974 } 1975 1976 if (!unplug_online) { 1977 mutex_unlock(&vm->hotplug_mutex); 1978 return 0; 1979 } 1980 1981 /* Try to unplug subblocks of partially plugged online blocks. */ 1982 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, 1983 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) { 1984 rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb); 1985 if (rc || !nb_sb) 1986 goto out_unlock; 1987 mutex_unlock(&vm->hotplug_mutex); 1988 cond_resched(); 1989 mutex_lock(&vm->hotplug_mutex); 1990 } 1991 1992 /* Try to unplug subblocks of plugged online blocks. */ 1993 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_ONLINE) { 1994 rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb); 1995 if (rc || !nb_sb) 1996 goto out_unlock; 1997 mutex_unlock(&vm->hotplug_mutex); 1998 cond_resched(); 1999 mutex_lock(&vm->hotplug_mutex); 2000 } 2001 2002 mutex_unlock(&vm->hotplug_mutex); 2003 return nb_sb ? -EBUSY : 0; 2004 out_unlock: 2005 mutex_unlock(&vm->hotplug_mutex); 2006 return rc; 2007 } 2008 2009 /* 2010 * Try to offline and remove a big block from Linux and unplug it. Will fail 2011 * with -EBUSY if some memory is busy and cannot get unplugged. 2012 * 2013 * Will modify the state of the memory block. Might temporarily drop the 2014 * hotplug_mutex. 2015 */ 2016 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm, 2017 unsigned long bb_id) 2018 { 2019 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); 2020 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); 2021 unsigned long end_pfn = start_pfn + nr_pages; 2022 unsigned long pfn; 2023 struct page *page; 2024 int rc; 2025 2026 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != 2027 VIRTIO_MEM_BBM_BB_ADDED)) 2028 return -EINVAL; 2029 2030 if (bbm_safe_unplug) { 2031 /* 2032 * Start by fake-offlining all memory. Once we marked the device 2033 * block as fake-offline, all newly onlined memory will 2034 * automatically be kept fake-offline. Protect from concurrent 2035 * onlining/offlining until we have a consistent state. 2036 */ 2037 mutex_lock(&vm->hotplug_mutex); 2038 virtio_mem_bbm_set_bb_state(vm, bb_id, 2039 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE); 2040 2041 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2042 page = pfn_to_online_page(pfn); 2043 if (!page) 2044 continue; 2045 2046 rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION); 2047 if (rc) { 2048 end_pfn = pfn; 2049 goto rollback_safe_unplug; 2050 } 2051 } 2052 mutex_unlock(&vm->hotplug_mutex); 2053 } 2054 2055 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id); 2056 if (rc) { 2057 if (bbm_safe_unplug) { 2058 mutex_lock(&vm->hotplug_mutex); 2059 goto rollback_safe_unplug; 2060 } 2061 return rc; 2062 } 2063 2064 rc = virtio_mem_bbm_unplug_bb(vm, bb_id); 2065 if (rc) 2066 virtio_mem_bbm_set_bb_state(vm, bb_id, 2067 VIRTIO_MEM_BBM_BB_PLUGGED); 2068 else 2069 virtio_mem_bbm_set_bb_state(vm, bb_id, 2070 VIRTIO_MEM_BBM_BB_UNUSED); 2071 return rc; 2072 2073 rollback_safe_unplug: 2074 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2075 page = pfn_to_online_page(pfn); 2076 if (!page) 2077 continue; 2078 virtio_mem_fake_online(pfn, PAGES_PER_SECTION); 2079 } 2080 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED); 2081 mutex_unlock(&vm->hotplug_mutex); 2082 return rc; 2083 } 2084 2085 /* 2086 * Try to remove a big block from Linux and unplug it. Will fail with 2087 * -EBUSY if some memory is online. 2088 * 2089 * Will modify the state of the memory block. 2090 */ 2091 static int virtio_mem_bbm_remove_and_unplug_bb(struct virtio_mem *vm, 2092 unsigned long bb_id) 2093 { 2094 int rc; 2095 2096 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != 2097 VIRTIO_MEM_BBM_BB_ADDED)) 2098 return -EINVAL; 2099 2100 rc = virtio_mem_bbm_remove_bb(vm, bb_id); 2101 if (rc) 2102 return -EBUSY; 2103 2104 rc = virtio_mem_bbm_unplug_bb(vm, bb_id); 2105 if (rc) 2106 virtio_mem_bbm_set_bb_state(vm, bb_id, 2107 VIRTIO_MEM_BBM_BB_PLUGGED); 2108 else 2109 virtio_mem_bbm_set_bb_state(vm, bb_id, 2110 VIRTIO_MEM_BBM_BB_UNUSED); 2111 return rc; 2112 } 2113 2114 /* 2115 * Test if a big block is completely offline. 2116 */ 2117 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm, 2118 unsigned long bb_id) 2119 { 2120 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); 2121 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); 2122 unsigned long pfn; 2123 2124 for (pfn = start_pfn; pfn < start_pfn + nr_pages; 2125 pfn += PAGES_PER_SECTION) { 2126 if (pfn_to_online_page(pfn)) 2127 return false; 2128 } 2129 2130 return true; 2131 } 2132 2133 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff) 2134 { 2135 uint64_t nb_bb = diff / vm->bbm.bb_size; 2136 uint64_t bb_id; 2137 int rc; 2138 2139 if (!nb_bb) 2140 return 0; 2141 2142 /* Try to unplug completely offline big blocks first. */ 2143 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) { 2144 cond_resched(); 2145 /* 2146 * As we're holding no locks, this check is racy as memory 2147 * can get onlined in the meantime - but we'll fail gracefully. 2148 */ 2149 if (!virtio_mem_bbm_bb_is_offline(vm, bb_id)) 2150 continue; 2151 rc = virtio_mem_bbm_remove_and_unplug_bb(vm, bb_id); 2152 if (rc == -EBUSY) 2153 continue; 2154 if (!rc) 2155 nb_bb--; 2156 if (rc || !nb_bb) 2157 return rc; 2158 } 2159 2160 if (!unplug_online) 2161 return 0; 2162 2163 /* Try to unplug any big blocks. */ 2164 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) { 2165 cond_resched(); 2166 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id); 2167 if (rc == -EBUSY) 2168 continue; 2169 if (!rc) 2170 nb_bb--; 2171 if (rc || !nb_bb) 2172 return rc; 2173 } 2174 2175 return nb_bb ? -EBUSY : 0; 2176 } 2177 2178 /* 2179 * Try to unplug the requested amount of memory. 2180 */ 2181 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) 2182 { 2183 if (vm->in_sbm) 2184 return virtio_mem_sbm_unplug_request(vm, diff); 2185 return virtio_mem_bbm_unplug_request(vm, diff); 2186 } 2187 2188 /* 2189 * Try to unplug all blocks that couldn't be unplugged before, for example, 2190 * because the hypervisor was busy. 2191 */ 2192 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) 2193 { 2194 unsigned long id; 2195 int rc; 2196 2197 if (!vm->in_sbm) { 2198 virtio_mem_bbm_for_each_bb(vm, id, 2199 VIRTIO_MEM_BBM_BB_PLUGGED) { 2200 rc = virtio_mem_bbm_unplug_bb(vm, id); 2201 if (rc) 2202 return rc; 2203 virtio_mem_bbm_set_bb_state(vm, id, 2204 VIRTIO_MEM_BBM_BB_UNUSED); 2205 } 2206 return 0; 2207 } 2208 2209 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) { 2210 rc = virtio_mem_sbm_unplug_mb(vm, id); 2211 if (rc) 2212 return rc; 2213 virtio_mem_sbm_set_mb_state(vm, id, 2214 VIRTIO_MEM_SBM_MB_UNUSED); 2215 } 2216 2217 return 0; 2218 } 2219 2220 /* 2221 * Update all parts of the config that could have changed. 2222 */ 2223 static void virtio_mem_refresh_config(struct virtio_mem *vm) 2224 { 2225 const struct range pluggable_range = mhp_get_pluggable_range(true); 2226 uint64_t new_plugged_size, usable_region_size, end_addr; 2227 2228 /* the plugged_size is just a reflection of what _we_ did previously */ 2229 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, 2230 &new_plugged_size); 2231 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) 2232 vm->plugged_size = new_plugged_size; 2233 2234 /* calculate the last usable memory block id */ 2235 virtio_cread_le(vm->vdev, struct virtio_mem_config, 2236 usable_region_size, &usable_region_size); 2237 end_addr = min(vm->addr + usable_region_size - 1, 2238 pluggable_range.end); 2239 2240 if (vm->in_sbm) { 2241 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr); 2242 if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes())) 2243 vm->sbm.last_usable_mb_id--; 2244 } else { 2245 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm, 2246 end_addr); 2247 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size)) 2248 vm->bbm.last_usable_bb_id--; 2249 } 2250 /* 2251 * If we cannot plug any of our device memory (e.g., nothing in the 2252 * usable region is addressable), the last usable memory block id will 2253 * be smaller than the first usable memory block id. We'll stop 2254 * attempting to add memory with -ENOSPC from our main loop. 2255 */ 2256 2257 /* see if there is a request to change the size */ 2258 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size, 2259 &vm->requested_size); 2260 2261 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); 2262 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size); 2263 } 2264 2265 /* 2266 * Workqueue function for handling plug/unplug requests and config updates. 2267 */ 2268 static void virtio_mem_run_wq(struct work_struct *work) 2269 { 2270 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq); 2271 uint64_t diff; 2272 int rc; 2273 2274 hrtimer_cancel(&vm->retry_timer); 2275 2276 if (vm->broken) 2277 return; 2278 2279 atomic_set(&vm->wq_active, 1); 2280 retry: 2281 rc = 0; 2282 2283 /* Make sure we start with a clean state if there are leftovers. */ 2284 if (unlikely(vm->unplug_all_required)) 2285 rc = virtio_mem_send_unplug_all_request(vm); 2286 2287 if (atomic_read(&vm->config_changed)) { 2288 atomic_set(&vm->config_changed, 0); 2289 virtio_mem_refresh_config(vm); 2290 } 2291 2292 /* Unplug any leftovers from previous runs */ 2293 if (!rc) 2294 rc = virtio_mem_unplug_pending_mb(vm); 2295 2296 if (!rc && vm->requested_size != vm->plugged_size) { 2297 if (vm->requested_size > vm->plugged_size) { 2298 diff = vm->requested_size - vm->plugged_size; 2299 rc = virtio_mem_plug_request(vm, diff); 2300 } else { 2301 diff = vm->plugged_size - vm->requested_size; 2302 rc = virtio_mem_unplug_request(vm, diff); 2303 } 2304 } 2305 2306 switch (rc) { 2307 case 0: 2308 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; 2309 break; 2310 case -ENOSPC: 2311 /* 2312 * We cannot add any more memory (alignment, physical limit) 2313 * or we have too many offline memory blocks. 2314 */ 2315 break; 2316 case -ETXTBSY: 2317 /* 2318 * The hypervisor cannot process our request right now 2319 * (e.g., out of memory, migrating); 2320 */ 2321 case -EBUSY: 2322 /* 2323 * We cannot free up any memory to unplug it (all plugged memory 2324 * is busy). 2325 */ 2326 case -ENOMEM: 2327 /* Out of memory, try again later. */ 2328 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms), 2329 HRTIMER_MODE_REL); 2330 break; 2331 case -EAGAIN: 2332 /* Retry immediately (e.g., the config changed). */ 2333 goto retry; 2334 default: 2335 /* Unknown error, mark as broken */ 2336 dev_err(&vm->vdev->dev, 2337 "unknown error, marking device broken: %d\n", rc); 2338 vm->broken = true; 2339 } 2340 2341 atomic_set(&vm->wq_active, 0); 2342 } 2343 2344 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer) 2345 { 2346 struct virtio_mem *vm = container_of(timer, struct virtio_mem, 2347 retry_timer); 2348 2349 virtio_mem_retry(vm); 2350 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2, 2351 VIRTIO_MEM_RETRY_TIMER_MAX_MS); 2352 return HRTIMER_NORESTART; 2353 } 2354 2355 static void virtio_mem_handle_response(struct virtqueue *vq) 2356 { 2357 struct virtio_mem *vm = vq->vdev->priv; 2358 2359 wake_up(&vm->host_resp); 2360 } 2361 2362 static int virtio_mem_init_vq(struct virtio_mem *vm) 2363 { 2364 struct virtqueue *vq; 2365 2366 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response, 2367 "guest-request"); 2368 if (IS_ERR(vq)) 2369 return PTR_ERR(vq); 2370 vm->vq = vq; 2371 2372 return 0; 2373 } 2374 2375 static int virtio_mem_init(struct virtio_mem *vm) 2376 { 2377 const struct range pluggable_range = mhp_get_pluggable_range(true); 2378 uint64_t sb_size, addr; 2379 uint16_t node_id; 2380 2381 if (!vm->vdev->config->get) { 2382 dev_err(&vm->vdev->dev, "config access disabled\n"); 2383 return -EINVAL; 2384 } 2385 2386 /* 2387 * We don't want to (un)plug or reuse any memory when in kdump. The 2388 * memory is still accessible (but not mapped). 2389 */ 2390 if (is_kdump_kernel()) { 2391 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n"); 2392 return -EBUSY; 2393 } 2394 2395 /* Fetch all properties that can't change. */ 2396 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, 2397 &vm->plugged_size); 2398 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, 2399 &vm->device_block_size); 2400 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, 2401 &node_id); 2402 vm->nid = virtio_mem_translate_node_id(vm, node_id); 2403 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); 2404 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, 2405 &vm->region_size); 2406 2407 /* Determine the nid for the device based on the lowest address. */ 2408 if (vm->nid == NUMA_NO_NODE) 2409 vm->nid = memory_add_physaddr_to_nid(vm->addr); 2410 2411 /* bad device setup - warn only */ 2412 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes())) 2413 dev_warn(&vm->vdev->dev, 2414 "The alignment of the physical start address can make some memory unusable.\n"); 2415 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes())) 2416 dev_warn(&vm->vdev->dev, 2417 "The alignment of the physical end address can make some memory unusable.\n"); 2418 if (vm->addr < pluggable_range.start || 2419 vm->addr + vm->region_size - 1 > pluggable_range.end) 2420 dev_warn(&vm->vdev->dev, 2421 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n"); 2422 2423 /* 2424 * We want subblocks to span at least MAX_ORDER_NR_PAGES and 2425 * pageblock_nr_pages pages. This: 2426 * - Simplifies our page onlining code (virtio_mem_online_page_cb) 2427 * and fake page onlining code (virtio_mem_fake_online). 2428 * - Is required for now for alloc_contig_range() to work reliably - 2429 * it doesn't properly handle smaller granularity on ZONE_NORMAL. 2430 */ 2431 sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES, 2432 pageblock_nr_pages) * PAGE_SIZE; 2433 sb_size = max_t(uint64_t, vm->device_block_size, sb_size); 2434 2435 if (sb_size < memory_block_size_bytes() && !force_bbm) { 2436 /* SBM: At least two subblocks per Linux memory block. */ 2437 vm->in_sbm = true; 2438 vm->sbm.sb_size = sb_size; 2439 vm->sbm.sbs_per_mb = memory_block_size_bytes() / 2440 vm->sbm.sb_size; 2441 2442 /* Round up to the next full memory block */ 2443 addr = max_t(uint64_t, vm->addr, pluggable_range.start) + 2444 memory_block_size_bytes() - 1; 2445 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr); 2446 vm->sbm.next_mb_id = vm->sbm.first_mb_id; 2447 } else { 2448 /* BBM: At least one Linux memory block. */ 2449 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size, 2450 memory_block_size_bytes()); 2451 2452 if (bbm_block_size) { 2453 if (!is_power_of_2(bbm_block_size)) { 2454 dev_warn(&vm->vdev->dev, 2455 "bbm_block_size is not a power of 2"); 2456 } else if (bbm_block_size < vm->bbm.bb_size) { 2457 dev_warn(&vm->vdev->dev, 2458 "bbm_block_size is too small"); 2459 } else { 2460 vm->bbm.bb_size = bbm_block_size; 2461 } 2462 } 2463 2464 /* Round up to the next aligned big block */ 2465 addr = max_t(uint64_t, vm->addr, pluggable_range.start) + 2466 vm->bbm.bb_size - 1; 2467 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr); 2468 vm->bbm.next_bb_id = vm->bbm.first_bb_id; 2469 } 2470 2471 /* Prepare the offline threshold - make sure we can add two blocks. */ 2472 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(), 2473 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD); 2474 /* In BBM, we also want at least two big blocks. */ 2475 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size, 2476 vm->offline_threshold); 2477 2478 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); 2479 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); 2480 dev_info(&vm->vdev->dev, "device block size: 0x%llx", 2481 (unsigned long long)vm->device_block_size); 2482 dev_info(&vm->vdev->dev, "memory block size: 0x%lx", 2483 memory_block_size_bytes()); 2484 if (vm->in_sbm) 2485 dev_info(&vm->vdev->dev, "subblock size: 0x%llx", 2486 (unsigned long long)vm->sbm.sb_size); 2487 else 2488 dev_info(&vm->vdev->dev, "big block size: 0x%llx", 2489 (unsigned long long)vm->bbm.bb_size); 2490 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA)) 2491 dev_info(&vm->vdev->dev, "nid: %d", vm->nid); 2492 2493 return 0; 2494 } 2495 2496 static int virtio_mem_create_resource(struct virtio_mem *vm) 2497 { 2498 /* 2499 * When force-unloading the driver and removing the device, we 2500 * could have a garbage pointer. Duplicate the string. 2501 */ 2502 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL); 2503 2504 if (!name) 2505 return -ENOMEM; 2506 2507 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, 2508 name, IORESOURCE_SYSTEM_RAM); 2509 if (!vm->parent_resource) { 2510 kfree(name); 2511 dev_warn(&vm->vdev->dev, "could not reserve device region\n"); 2512 dev_info(&vm->vdev->dev, 2513 "reloading the driver is not supported\n"); 2514 return -EBUSY; 2515 } 2516 2517 /* The memory is not actually busy - make add_memory() work. */ 2518 vm->parent_resource->flags &= ~IORESOURCE_BUSY; 2519 return 0; 2520 } 2521 2522 static void virtio_mem_delete_resource(struct virtio_mem *vm) 2523 { 2524 const char *name; 2525 2526 if (!vm->parent_resource) 2527 return; 2528 2529 name = vm->parent_resource->name; 2530 release_resource(vm->parent_resource); 2531 kfree(vm->parent_resource); 2532 kfree(name); 2533 vm->parent_resource = NULL; 2534 } 2535 2536 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg) 2537 { 2538 return 1; 2539 } 2540 2541 static bool virtio_mem_has_memory_added(struct virtio_mem *vm) 2542 { 2543 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 2544 2545 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr, 2546 vm->addr + vm->region_size, NULL, 2547 virtio_mem_range_has_system_ram) == 1; 2548 } 2549 2550 static int virtio_mem_probe(struct virtio_device *vdev) 2551 { 2552 struct virtio_mem *vm; 2553 int rc; 2554 2555 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24); 2556 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10); 2557 2558 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL); 2559 if (!vm) 2560 return -ENOMEM; 2561 2562 init_waitqueue_head(&vm->host_resp); 2563 vm->vdev = vdev; 2564 INIT_WORK(&vm->wq, virtio_mem_run_wq); 2565 mutex_init(&vm->hotplug_mutex); 2566 INIT_LIST_HEAD(&vm->next); 2567 spin_lock_init(&vm->removal_lock); 2568 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2569 vm->retry_timer.function = virtio_mem_timer_expired; 2570 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; 2571 2572 /* register the virtqueue */ 2573 rc = virtio_mem_init_vq(vm); 2574 if (rc) 2575 goto out_free_vm; 2576 2577 /* initialize the device by querying the config */ 2578 rc = virtio_mem_init(vm); 2579 if (rc) 2580 goto out_del_vq; 2581 2582 /* create the parent resource for all memory */ 2583 rc = virtio_mem_create_resource(vm); 2584 if (rc) 2585 goto out_del_vq; 2586 2587 /* 2588 * If we still have memory plugged, we have to unplug all memory first. 2589 * Registering our parent resource makes sure that this memory isn't 2590 * actually in use (e.g., trying to reload the driver). 2591 */ 2592 if (vm->plugged_size) { 2593 vm->unplug_all_required = true; 2594 dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); 2595 } 2596 2597 /* register callbacks */ 2598 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; 2599 rc = register_memory_notifier(&vm->memory_notifier); 2600 if (rc) 2601 goto out_del_resource; 2602 rc = register_virtio_mem_device(vm); 2603 if (rc) 2604 goto out_unreg_mem; 2605 2606 virtio_device_ready(vdev); 2607 2608 /* trigger a config update to start processing the requested_size */ 2609 atomic_set(&vm->config_changed, 1); 2610 queue_work(system_freezable_wq, &vm->wq); 2611 2612 return 0; 2613 out_unreg_mem: 2614 unregister_memory_notifier(&vm->memory_notifier); 2615 out_del_resource: 2616 virtio_mem_delete_resource(vm); 2617 out_del_vq: 2618 vdev->config->del_vqs(vdev); 2619 out_free_vm: 2620 kfree(vm); 2621 vdev->priv = NULL; 2622 2623 return rc; 2624 } 2625 2626 static void virtio_mem_remove(struct virtio_device *vdev) 2627 { 2628 struct virtio_mem *vm = vdev->priv; 2629 unsigned long mb_id; 2630 int rc; 2631 2632 /* 2633 * Make sure the workqueue won't be triggered anymore and no memory 2634 * blocks can be onlined/offlined until we're finished here. 2635 */ 2636 mutex_lock(&vm->hotplug_mutex); 2637 spin_lock_irq(&vm->removal_lock); 2638 vm->removing = true; 2639 spin_unlock_irq(&vm->removal_lock); 2640 mutex_unlock(&vm->hotplug_mutex); 2641 2642 /* wait until the workqueue stopped */ 2643 cancel_work_sync(&vm->wq); 2644 hrtimer_cancel(&vm->retry_timer); 2645 2646 if (vm->in_sbm) { 2647 /* 2648 * After we unregistered our callbacks, user space can online 2649 * partially plugged offline blocks. Make sure to remove them. 2650 */ 2651 virtio_mem_sbm_for_each_mb(vm, mb_id, 2652 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) { 2653 rc = virtio_mem_sbm_remove_mb(vm, mb_id); 2654 BUG_ON(rc); 2655 virtio_mem_sbm_set_mb_state(vm, mb_id, 2656 VIRTIO_MEM_SBM_MB_UNUSED); 2657 } 2658 /* 2659 * After we unregistered our callbacks, user space can no longer 2660 * offline partially plugged online memory blocks. No need to 2661 * worry about them. 2662 */ 2663 } 2664 2665 /* unregister callbacks */ 2666 unregister_virtio_mem_device(vm); 2667 unregister_memory_notifier(&vm->memory_notifier); 2668 2669 /* 2670 * There is no way we could reliably remove all memory we have added to 2671 * the system. And there is no way to stop the driver/device from going 2672 * away. Warn at least. 2673 */ 2674 if (virtio_mem_has_memory_added(vm)) { 2675 dev_warn(&vdev->dev, "device still has system memory added\n"); 2676 } else { 2677 virtio_mem_delete_resource(vm); 2678 kfree_const(vm->resource_name); 2679 } 2680 2681 /* remove all tracking data - no locking needed */ 2682 if (vm->in_sbm) { 2683 vfree(vm->sbm.mb_states); 2684 vfree(vm->sbm.sb_states); 2685 } else { 2686 vfree(vm->bbm.bb_states); 2687 } 2688 2689 /* reset the device and cleanup the queues */ 2690 vdev->config->reset(vdev); 2691 vdev->config->del_vqs(vdev); 2692 2693 kfree(vm); 2694 vdev->priv = NULL; 2695 } 2696 2697 static void virtio_mem_config_changed(struct virtio_device *vdev) 2698 { 2699 struct virtio_mem *vm = vdev->priv; 2700 2701 atomic_set(&vm->config_changed, 1); 2702 virtio_mem_retry(vm); 2703 } 2704 2705 #ifdef CONFIG_PM_SLEEP 2706 static int virtio_mem_freeze(struct virtio_device *vdev) 2707 { 2708 /* 2709 * When restarting the VM, all memory is usually unplugged. Don't 2710 * allow to suspend/hibernate. 2711 */ 2712 dev_err(&vdev->dev, "save/restore not supported.\n"); 2713 return -EPERM; 2714 } 2715 2716 static int virtio_mem_restore(struct virtio_device *vdev) 2717 { 2718 return -EPERM; 2719 } 2720 #endif 2721 2722 static unsigned int virtio_mem_features[] = { 2723 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA) 2724 VIRTIO_MEM_F_ACPI_PXM, 2725 #endif 2726 }; 2727 2728 static const struct virtio_device_id virtio_mem_id_table[] = { 2729 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID }, 2730 { 0 }, 2731 }; 2732 2733 static struct virtio_driver virtio_mem_driver = { 2734 .feature_table = virtio_mem_features, 2735 .feature_table_size = ARRAY_SIZE(virtio_mem_features), 2736 .driver.name = KBUILD_MODNAME, 2737 .driver.owner = THIS_MODULE, 2738 .id_table = virtio_mem_id_table, 2739 .probe = virtio_mem_probe, 2740 .remove = virtio_mem_remove, 2741 .config_changed = virtio_mem_config_changed, 2742 #ifdef CONFIG_PM_SLEEP 2743 .freeze = virtio_mem_freeze, 2744 .restore = virtio_mem_restore, 2745 #endif 2746 }; 2747 2748 module_virtio_driver(virtio_mem_driver); 2749 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table); 2750 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>"); 2751 MODULE_DESCRIPTION("Virtio-mem driver"); 2752 MODULE_LICENSE("GPL"); 2753