1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio-mem device driver. 4 * 5 * Copyright Red Hat, Inc. 2020 6 * 7 * Author(s): David Hildenbrand <david@redhat.com> 8 */ 9 10 #include <linux/virtio.h> 11 #include <linux/virtio_mem.h> 12 #include <linux/workqueue.h> 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/memory_hotplug.h> 17 #include <linux/memory.h> 18 #include <linux/hrtimer.h> 19 #include <linux/crash_dump.h> 20 #include <linux/mutex.h> 21 #include <linux/bitmap.h> 22 #include <linux/lockdep.h> 23 24 #include <acpi/acpi_numa.h> 25 26 static bool unplug_online = true; 27 module_param(unplug_online, bool, 0644); 28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory"); 29 30 static bool force_bbm; 31 module_param(force_bbm, bool, 0444); 32 MODULE_PARM_DESC(force_bbm, 33 "Force Big Block Mode. Default is 0 (auto-selection)"); 34 35 static unsigned long bbm_block_size; 36 module_param(bbm_block_size, ulong, 0444); 37 MODULE_PARM_DESC(bbm_block_size, 38 "Big Block size in bytes. Default is 0 (auto-detection)."); 39 40 static bool bbm_safe_unplug = true; 41 module_param(bbm_safe_unplug, bool, 0444); 42 MODULE_PARM_DESC(bbm_safe_unplug, 43 "Use a safe unplug mechanism in BBM, avoiding long/endless loops"); 44 45 /* 46 * virtio-mem currently supports the following modes of operation: 47 * 48 * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The 49 * size of a Sub Block (SB) is determined based on the device block size, the 50 * pageblock size, and the maximum allocation granularity of the buddy. 51 * Subblocks within a Linux memory block might either be plugged or unplugged. 52 * Memory is added/removed to Linux MM in Linux memory block granularity. 53 * 54 * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks. 55 * Memory is added/removed to Linux MM in Big Block granularity. 56 * 57 * The mode is determined automatically based on the Linux memory block size 58 * and the device block size. 59 * 60 * User space / core MM (auto onlining) is responsible for onlining added 61 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are 62 * always onlined separately, and all memory within a Linux memory block is 63 * onlined to the same zone - virtio-mem relies on this behavior. 64 */ 65 66 /* 67 * State of a Linux memory block in SBM. 68 */ 69 enum virtio_mem_sbm_mb_state { 70 /* Unplugged, not added to Linux. Can be reused later. */ 71 VIRTIO_MEM_SBM_MB_UNUSED = 0, 72 /* (Partially) plugged, not added to Linux. Error on add_memory(). */ 73 VIRTIO_MEM_SBM_MB_PLUGGED, 74 /* Fully plugged, fully added to Linux, offline. */ 75 VIRTIO_MEM_SBM_MB_OFFLINE, 76 /* Partially plugged, fully added to Linux, offline. */ 77 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL, 78 /* Fully plugged, fully added to Linux, online. */ 79 VIRTIO_MEM_SBM_MB_ONLINE, 80 /* Partially plugged, fully added to Linux, online. */ 81 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL, 82 VIRTIO_MEM_SBM_MB_COUNT 83 }; 84 85 /* 86 * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks. 87 */ 88 enum virtio_mem_bbm_bb_state { 89 /* Unplugged, not added to Linux. Can be reused later. */ 90 VIRTIO_MEM_BBM_BB_UNUSED = 0, 91 /* Plugged, not added to Linux. Error on add_memory(). */ 92 VIRTIO_MEM_BBM_BB_PLUGGED, 93 /* Plugged and added to Linux. */ 94 VIRTIO_MEM_BBM_BB_ADDED, 95 /* All online parts are fake-offline, ready to remove. */ 96 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE, 97 VIRTIO_MEM_BBM_BB_COUNT 98 }; 99 100 struct virtio_mem { 101 struct virtio_device *vdev; 102 103 /* We might first have to unplug all memory when starting up. */ 104 bool unplug_all_required; 105 106 /* Workqueue that processes the plug/unplug requests. */ 107 struct work_struct wq; 108 atomic_t wq_active; 109 atomic_t config_changed; 110 111 /* Virtqueue for guest->host requests. */ 112 struct virtqueue *vq; 113 114 /* Wait for a host response to a guest request. */ 115 wait_queue_head_t host_resp; 116 117 /* Space for one guest request and the host response. */ 118 struct virtio_mem_req req; 119 struct virtio_mem_resp resp; 120 121 /* The current size of the device. */ 122 uint64_t plugged_size; 123 /* The requested size of the device. */ 124 uint64_t requested_size; 125 126 /* The device block size (for communicating with the device). */ 127 uint64_t device_block_size; 128 /* The determined node id for all memory of the device. */ 129 int nid; 130 /* Physical start address of the memory region. */ 131 uint64_t addr; 132 /* Maximum region size in bytes. */ 133 uint64_t region_size; 134 135 /* The parent resource for all memory added via this device. */ 136 struct resource *parent_resource; 137 /* 138 * Copy of "System RAM (virtio_mem)" to be used for 139 * add_memory_driver_managed(). 140 */ 141 const char *resource_name; 142 143 /* 144 * We don't want to add too much memory if it's not getting onlined, 145 * to avoid running OOM. Besides this threshold, we allow to have at 146 * least two offline blocks at a time (whatever is bigger). 147 */ 148 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024) 149 atomic64_t offline_size; 150 uint64_t offline_threshold; 151 152 /* If set, the driver is in SBM, otherwise in BBM. */ 153 bool in_sbm; 154 155 union { 156 struct { 157 /* Id of the first memory block of this device. */ 158 unsigned long first_mb_id; 159 /* Id of the last usable memory block of this device. */ 160 unsigned long last_usable_mb_id; 161 /* Id of the next memory bock to prepare when needed. */ 162 unsigned long next_mb_id; 163 164 /* The subblock size. */ 165 uint64_t sb_size; 166 /* The number of subblocks per Linux memory block. */ 167 uint32_t sbs_per_mb; 168 169 /* Summary of all memory block states. */ 170 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT]; 171 172 /* 173 * One byte state per memory block. Allocated via 174 * vmalloc(). Resized (alloc+copy+free) on demand. 175 * 176 * With 128 MiB memory blocks, we have states for 512 177 * GiB of memory in one 4 KiB page. 178 */ 179 uint8_t *mb_states; 180 181 /* 182 * Bitmap: one bit per subblock. Allocated similar to 183 * sbm.mb_states. 184 * 185 * A set bit means the corresponding subblock is 186 * plugged, otherwise it's unblocked. 187 * 188 * With 4 MiB subblocks, we manage 128 GiB of memory 189 * in one 4 KiB page. 190 */ 191 unsigned long *sb_states; 192 } sbm; 193 194 struct { 195 /* Id of the first big block of this device. */ 196 unsigned long first_bb_id; 197 /* Id of the last usable big block of this device. */ 198 unsigned long last_usable_bb_id; 199 /* Id of the next device bock to prepare when needed. */ 200 unsigned long next_bb_id; 201 202 /* Summary of all big block states. */ 203 unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT]; 204 205 /* One byte state per big block. See sbm.mb_states. */ 206 uint8_t *bb_states; 207 208 /* The block size used for plugging/adding/removing. */ 209 uint64_t bb_size; 210 } bbm; 211 }; 212 213 /* 214 * Mutex that protects the sbm.mb_count, sbm.mb_states, 215 * sbm.sb_states, bbm.bb_count, and bbm.bb_states 216 * 217 * When this lock is held the pointers can't change, ONLINE and 218 * OFFLINE blocks can't change the state and no subblocks will get 219 * plugged/unplugged. 220 */ 221 struct mutex hotplug_mutex; 222 bool hotplug_active; 223 224 /* An error occurred we cannot handle - stop processing requests. */ 225 bool broken; 226 227 /* The driver is being removed. */ 228 spinlock_t removal_lock; 229 bool removing; 230 231 /* Timer for retrying to plug/unplug memory. */ 232 struct hrtimer retry_timer; 233 unsigned int retry_timer_ms; 234 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000 235 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000 236 237 /* Memory notifier (online/offline events). */ 238 struct notifier_block memory_notifier; 239 240 /* Next device in the list of virtio-mem devices. */ 241 struct list_head next; 242 }; 243 244 /* 245 * We have to share a single online_page callback among all virtio-mem 246 * devices. We use RCU to iterate the list in the callback. 247 */ 248 static DEFINE_MUTEX(virtio_mem_mutex); 249 static LIST_HEAD(virtio_mem_devices); 250 251 static void virtio_mem_online_page_cb(struct page *page, unsigned int order); 252 static void virtio_mem_fake_offline_going_offline(unsigned long pfn, 253 unsigned long nr_pages); 254 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, 255 unsigned long nr_pages); 256 static void virtio_mem_retry(struct virtio_mem *vm); 257 258 /* 259 * Register a virtio-mem device so it will be considered for the online_page 260 * callback. 261 */ 262 static int register_virtio_mem_device(struct virtio_mem *vm) 263 { 264 int rc = 0; 265 266 /* First device registers the callback. */ 267 mutex_lock(&virtio_mem_mutex); 268 if (list_empty(&virtio_mem_devices)) 269 rc = set_online_page_callback(&virtio_mem_online_page_cb); 270 if (!rc) 271 list_add_rcu(&vm->next, &virtio_mem_devices); 272 mutex_unlock(&virtio_mem_mutex); 273 274 return rc; 275 } 276 277 /* 278 * Unregister a virtio-mem device so it will no longer be considered for the 279 * online_page callback. 280 */ 281 static void unregister_virtio_mem_device(struct virtio_mem *vm) 282 { 283 /* Last device unregisters the callback. */ 284 mutex_lock(&virtio_mem_mutex); 285 list_del_rcu(&vm->next); 286 if (list_empty(&virtio_mem_devices)) 287 restore_online_page_callback(&virtio_mem_online_page_cb); 288 mutex_unlock(&virtio_mem_mutex); 289 290 synchronize_rcu(); 291 } 292 293 /* 294 * Calculate the memory block id of a given address. 295 */ 296 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr) 297 { 298 return addr / memory_block_size_bytes(); 299 } 300 301 /* 302 * Calculate the physical start address of a given memory block id. 303 */ 304 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id) 305 { 306 return mb_id * memory_block_size_bytes(); 307 } 308 309 /* 310 * Calculate the big block id of a given address. 311 */ 312 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, 313 uint64_t addr) 314 { 315 return addr / vm->bbm.bb_size; 316 } 317 318 /* 319 * Calculate the physical start address of a given big block id. 320 */ 321 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, 322 unsigned long bb_id) 323 { 324 return bb_id * vm->bbm.bb_size; 325 } 326 327 /* 328 * Calculate the subblock id of a given address. 329 */ 330 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, 331 unsigned long addr) 332 { 333 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr); 334 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id); 335 336 return (addr - mb_addr) / vm->sbm.sb_size; 337 } 338 339 /* 340 * Set the state of a big block, taking care of the state counter. 341 */ 342 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm, 343 unsigned long bb_id, 344 enum virtio_mem_bbm_bb_state state) 345 { 346 const unsigned long idx = bb_id - vm->bbm.first_bb_id; 347 enum virtio_mem_bbm_bb_state old_state; 348 349 old_state = vm->bbm.bb_states[idx]; 350 vm->bbm.bb_states[idx] = state; 351 352 BUG_ON(vm->bbm.bb_count[old_state] == 0); 353 vm->bbm.bb_count[old_state]--; 354 vm->bbm.bb_count[state]++; 355 } 356 357 /* 358 * Get the state of a big block. 359 */ 360 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm, 361 unsigned long bb_id) 362 { 363 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id]; 364 } 365 366 /* 367 * Prepare the big block state array for the next big block. 368 */ 369 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm) 370 { 371 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id; 372 unsigned long new_bytes = old_bytes + 1; 373 int old_pages = PFN_UP(old_bytes); 374 int new_pages = PFN_UP(new_bytes); 375 uint8_t *new_array; 376 377 if (vm->bbm.bb_states && old_pages == new_pages) 378 return 0; 379 380 new_array = vzalloc(new_pages * PAGE_SIZE); 381 if (!new_array) 382 return -ENOMEM; 383 384 mutex_lock(&vm->hotplug_mutex); 385 if (vm->bbm.bb_states) 386 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE); 387 vfree(vm->bbm.bb_states); 388 vm->bbm.bb_states = new_array; 389 mutex_unlock(&vm->hotplug_mutex); 390 391 return 0; 392 } 393 394 #define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \ 395 for (_bb_id = vm->bbm.first_bb_id; \ 396 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \ 397 _bb_id++) \ 398 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state) 399 400 #define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \ 401 for (_bb_id = vm->bbm.next_bb_id - 1; \ 402 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \ 403 _bb_id--) \ 404 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state) 405 406 /* 407 * Set the state of a memory block, taking care of the state counter. 408 */ 409 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm, 410 unsigned long mb_id, uint8_t state) 411 { 412 const unsigned long idx = mb_id - vm->sbm.first_mb_id; 413 uint8_t old_state; 414 415 old_state = vm->sbm.mb_states[idx]; 416 vm->sbm.mb_states[idx] = state; 417 418 BUG_ON(vm->sbm.mb_count[old_state] == 0); 419 vm->sbm.mb_count[old_state]--; 420 vm->sbm.mb_count[state]++; 421 } 422 423 /* 424 * Get the state of a memory block. 425 */ 426 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm, 427 unsigned long mb_id) 428 { 429 const unsigned long idx = mb_id - vm->sbm.first_mb_id; 430 431 return vm->sbm.mb_states[idx]; 432 } 433 434 /* 435 * Prepare the state array for the next memory block. 436 */ 437 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm) 438 { 439 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id); 440 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1); 441 uint8_t *new_array; 442 443 if (vm->sbm.mb_states && old_pages == new_pages) 444 return 0; 445 446 new_array = vzalloc(new_pages * PAGE_SIZE); 447 if (!new_array) 448 return -ENOMEM; 449 450 mutex_lock(&vm->hotplug_mutex); 451 if (vm->sbm.mb_states) 452 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE); 453 vfree(vm->sbm.mb_states); 454 vm->sbm.mb_states = new_array; 455 mutex_unlock(&vm->hotplug_mutex); 456 457 return 0; 458 } 459 460 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \ 461 for (_mb_id = _vm->sbm.first_mb_id; \ 462 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \ 463 _mb_id++) \ 464 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state) 465 466 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \ 467 for (_mb_id = _vm->sbm.next_mb_id - 1; \ 468 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \ 469 _mb_id--) \ 470 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state) 471 472 /* 473 * Calculate the bit number in the subblock bitmap for the given subblock 474 * inside the given memory block. 475 */ 476 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm, 477 unsigned long mb_id, int sb_id) 478 { 479 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id; 480 } 481 482 /* 483 * Mark all selected subblocks plugged. 484 * 485 * Will not modify the state of the memory block. 486 */ 487 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm, 488 unsigned long mb_id, int sb_id, 489 int count) 490 { 491 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 492 493 __bitmap_set(vm->sbm.sb_states, bit, count); 494 } 495 496 /* 497 * Mark all selected subblocks unplugged. 498 * 499 * Will not modify the state of the memory block. 500 */ 501 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm, 502 unsigned long mb_id, int sb_id, 503 int count) 504 { 505 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 506 507 __bitmap_clear(vm->sbm.sb_states, bit, count); 508 } 509 510 /* 511 * Test if all selected subblocks are plugged. 512 */ 513 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm, 514 unsigned long mb_id, int sb_id, 515 int count) 516 { 517 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 518 519 if (count == 1) 520 return test_bit(bit, vm->sbm.sb_states); 521 522 /* TODO: Helper similar to bitmap_set() */ 523 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >= 524 bit + count; 525 } 526 527 /* 528 * Test if all selected subblocks are unplugged. 529 */ 530 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm, 531 unsigned long mb_id, int sb_id, 532 int count) 533 { 534 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); 535 536 /* TODO: Helper similar to bitmap_set() */ 537 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >= 538 bit + count; 539 } 540 541 /* 542 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is 543 * none. 544 */ 545 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm, 546 unsigned long mb_id) 547 { 548 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0); 549 550 return find_next_zero_bit(vm->sbm.sb_states, 551 bit + vm->sbm.sbs_per_mb, bit) - bit; 552 } 553 554 /* 555 * Prepare the subblock bitmap for the next memory block. 556 */ 557 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm) 558 { 559 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id; 560 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb; 561 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb; 562 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long)); 563 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long)); 564 unsigned long *new_bitmap, *old_bitmap; 565 566 if (vm->sbm.sb_states && old_pages == new_pages) 567 return 0; 568 569 new_bitmap = vzalloc(new_pages * PAGE_SIZE); 570 if (!new_bitmap) 571 return -ENOMEM; 572 573 mutex_lock(&vm->hotplug_mutex); 574 if (new_bitmap) 575 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE); 576 577 old_bitmap = vm->sbm.sb_states; 578 vm->sbm.sb_states = new_bitmap; 579 mutex_unlock(&vm->hotplug_mutex); 580 581 vfree(old_bitmap); 582 return 0; 583 } 584 585 /* 586 * Test if we could add memory without creating too much offline memory - 587 * to avoid running OOM if memory is getting onlined deferred. 588 */ 589 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size) 590 { 591 if (WARN_ON_ONCE(size > vm->offline_threshold)) 592 return false; 593 594 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold; 595 } 596 597 /* 598 * Try adding memory to Linux. Will usually only fail if out of memory. 599 * 600 * Must not be called with the vm->hotplug_mutex held (possible deadlock with 601 * onlining code). 602 * 603 * Will not modify the state of memory blocks in virtio-mem. 604 */ 605 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr, 606 uint64_t size) 607 { 608 int rc; 609 610 /* 611 * When force-unloading the driver and we still have memory added to 612 * Linux, the resource name has to stay. 613 */ 614 if (!vm->resource_name) { 615 vm->resource_name = kstrdup_const("System RAM (virtio_mem)", 616 GFP_KERNEL); 617 if (!vm->resource_name) 618 return -ENOMEM; 619 } 620 621 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr, 622 addr + size - 1); 623 /* Memory might get onlined immediately. */ 624 atomic64_add(size, &vm->offline_size); 625 rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name, 626 MEMHP_MERGE_RESOURCE); 627 if (rc) { 628 atomic64_sub(size, &vm->offline_size); 629 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc); 630 /* 631 * TODO: Linux MM does not properly clean up yet in all cases 632 * where adding of memory failed - especially on -ENOMEM. 633 */ 634 } 635 return rc; 636 } 637 638 /* 639 * See virtio_mem_add_memory(): Try adding a single Linux memory block. 640 */ 641 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id) 642 { 643 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); 644 const uint64_t size = memory_block_size_bytes(); 645 646 return virtio_mem_add_memory(vm, addr, size); 647 } 648 649 /* 650 * See virtio_mem_add_memory(): Try adding a big block. 651 */ 652 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id) 653 { 654 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 655 const uint64_t size = vm->bbm.bb_size; 656 657 return virtio_mem_add_memory(vm, addr, size); 658 } 659 660 /* 661 * Try removing memory from Linux. Will only fail if memory blocks aren't 662 * offline. 663 * 664 * Must not be called with the vm->hotplug_mutex held (possible deadlock with 665 * onlining code). 666 * 667 * Will not modify the state of memory blocks in virtio-mem. 668 */ 669 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr, 670 uint64_t size) 671 { 672 int rc; 673 674 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr, 675 addr + size - 1); 676 rc = remove_memory(vm->nid, addr, size); 677 if (!rc) { 678 atomic64_sub(size, &vm->offline_size); 679 /* 680 * We might have freed up memory we can now unplug, retry 681 * immediately instead of waiting. 682 */ 683 virtio_mem_retry(vm); 684 } else { 685 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc); 686 } 687 return rc; 688 } 689 690 /* 691 * See virtio_mem_remove_memory(): Try removing a single Linux memory block. 692 */ 693 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id) 694 { 695 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); 696 const uint64_t size = memory_block_size_bytes(); 697 698 return virtio_mem_remove_memory(vm, addr, size); 699 } 700 701 /* 702 * See virtio_mem_remove_memory(): Try to remove all Linux memory blocks covered 703 * by the big block. 704 */ 705 static int virtio_mem_bbm_remove_bb(struct virtio_mem *vm, unsigned long bb_id) 706 { 707 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 708 const uint64_t size = vm->bbm.bb_size; 709 710 return virtio_mem_remove_memory(vm, addr, size); 711 } 712 713 /* 714 * Try offlining and removing memory from Linux. 715 * 716 * Must not be called with the vm->hotplug_mutex held (possible deadlock with 717 * onlining code). 718 * 719 * Will not modify the state of memory blocks in virtio-mem. 720 */ 721 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm, 722 uint64_t addr, 723 uint64_t size) 724 { 725 int rc; 726 727 dev_dbg(&vm->vdev->dev, 728 "offlining and removing memory: 0x%llx - 0x%llx\n", addr, 729 addr + size - 1); 730 731 rc = offline_and_remove_memory(vm->nid, addr, size); 732 if (!rc) { 733 atomic64_sub(size, &vm->offline_size); 734 /* 735 * We might have freed up memory we can now unplug, retry 736 * immediately instead of waiting. 737 */ 738 virtio_mem_retry(vm); 739 } else { 740 dev_dbg(&vm->vdev->dev, 741 "offlining and removing memory failed: %d\n", rc); 742 } 743 return rc; 744 } 745 746 /* 747 * See virtio_mem_offline_and_remove_memory(): Try offlining and removing 748 * a single Linux memory block. 749 */ 750 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm, 751 unsigned long mb_id) 752 { 753 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); 754 const uint64_t size = memory_block_size_bytes(); 755 756 return virtio_mem_offline_and_remove_memory(vm, addr, size); 757 } 758 759 /* 760 * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a 761 * all Linux memory blocks covered by the big block. 762 */ 763 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm, 764 unsigned long bb_id) 765 { 766 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 767 const uint64_t size = vm->bbm.bb_size; 768 769 return virtio_mem_offline_and_remove_memory(vm, addr, size); 770 } 771 772 /* 773 * Trigger the workqueue so the device can perform its magic. 774 */ 775 static void virtio_mem_retry(struct virtio_mem *vm) 776 { 777 unsigned long flags; 778 779 spin_lock_irqsave(&vm->removal_lock, flags); 780 if (!vm->removing) 781 queue_work(system_freezable_wq, &vm->wq); 782 spin_unlock_irqrestore(&vm->removal_lock, flags); 783 } 784 785 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id) 786 { 787 int node = NUMA_NO_NODE; 788 789 #if defined(CONFIG_ACPI_NUMA) 790 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM)) 791 node = pxm_to_node(node_id); 792 #endif 793 return node; 794 } 795 796 /* 797 * Test if a virtio-mem device overlaps with the given range. Can be called 798 * from (notifier) callbacks lockless. 799 */ 800 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start, 801 uint64_t size) 802 { 803 return start < vm->addr + vm->region_size && vm->addr < start + size; 804 } 805 806 /* 807 * Test if a virtio-mem device contains a given range. Can be called from 808 * (notifier) callbacks lockless. 809 */ 810 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start, 811 uint64_t size) 812 { 813 return start >= vm->addr && start + size <= vm->addr + vm->region_size; 814 } 815 816 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm, 817 unsigned long mb_id) 818 { 819 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { 820 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL: 821 case VIRTIO_MEM_SBM_MB_OFFLINE: 822 return NOTIFY_OK; 823 default: 824 break; 825 } 826 dev_warn_ratelimited(&vm->vdev->dev, 827 "memory block onlining denied\n"); 828 return NOTIFY_BAD; 829 } 830 831 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm, 832 unsigned long mb_id) 833 { 834 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { 835 case VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL: 836 virtio_mem_sbm_set_mb_state(vm, mb_id, 837 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL); 838 break; 839 case VIRTIO_MEM_SBM_MB_ONLINE: 840 virtio_mem_sbm_set_mb_state(vm, mb_id, 841 VIRTIO_MEM_SBM_MB_OFFLINE); 842 break; 843 default: 844 BUG(); 845 break; 846 } 847 } 848 849 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm, 850 unsigned long mb_id) 851 { 852 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { 853 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL: 854 virtio_mem_sbm_set_mb_state(vm, mb_id, 855 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL); 856 break; 857 case VIRTIO_MEM_SBM_MB_OFFLINE: 858 virtio_mem_sbm_set_mb_state(vm, mb_id, 859 VIRTIO_MEM_SBM_MB_ONLINE); 860 break; 861 default: 862 BUG(); 863 break; 864 } 865 } 866 867 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm, 868 unsigned long mb_id) 869 { 870 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size); 871 unsigned long pfn; 872 int sb_id; 873 874 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) { 875 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) 876 continue; 877 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 878 sb_id * vm->sbm.sb_size); 879 virtio_mem_fake_offline_going_offline(pfn, nr_pages); 880 } 881 } 882 883 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm, 884 unsigned long mb_id) 885 { 886 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size); 887 unsigned long pfn; 888 int sb_id; 889 890 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) { 891 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) 892 continue; 893 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 894 sb_id * vm->sbm.sb_size); 895 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages); 896 } 897 } 898 899 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm, 900 unsigned long bb_id, 901 unsigned long pfn, 902 unsigned long nr_pages) 903 { 904 /* 905 * When marked as "fake-offline", all online memory of this device block 906 * is allocated by us. Otherwise, we don't have any memory allocated. 907 */ 908 if (virtio_mem_bbm_get_bb_state(vm, bb_id) != 909 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE) 910 return; 911 virtio_mem_fake_offline_going_offline(pfn, nr_pages); 912 } 913 914 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm, 915 unsigned long bb_id, 916 unsigned long pfn, 917 unsigned long nr_pages) 918 { 919 if (virtio_mem_bbm_get_bb_state(vm, bb_id) != 920 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE) 921 return; 922 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages); 923 } 924 925 /* 926 * This callback will either be called synchronously from add_memory() or 927 * asynchronously (e.g., triggered via user space). We have to be careful 928 * with locking when calling add_memory(). 929 */ 930 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, 931 unsigned long action, void *arg) 932 { 933 struct virtio_mem *vm = container_of(nb, struct virtio_mem, 934 memory_notifier); 935 struct memory_notify *mhp = arg; 936 const unsigned long start = PFN_PHYS(mhp->start_pfn); 937 const unsigned long size = PFN_PHYS(mhp->nr_pages); 938 int rc = NOTIFY_OK; 939 unsigned long id; 940 941 if (!virtio_mem_overlaps_range(vm, start, size)) 942 return NOTIFY_DONE; 943 944 if (vm->in_sbm) { 945 id = virtio_mem_phys_to_mb_id(start); 946 /* 947 * In SBM, we add memory in separate memory blocks - we expect 948 * it to be onlined/offlined in the same granularity. Bail out 949 * if this ever changes. 950 */ 951 if (WARN_ON_ONCE(size != memory_block_size_bytes() || 952 !IS_ALIGNED(start, memory_block_size_bytes()))) 953 return NOTIFY_BAD; 954 } else { 955 id = virtio_mem_phys_to_bb_id(vm, start); 956 /* 957 * In BBM, we only care about onlining/offlining happening 958 * within a single big block, we don't care about the 959 * actual granularity as we don't track individual Linux 960 * memory blocks. 961 */ 962 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1))) 963 return NOTIFY_BAD; 964 } 965 966 /* 967 * Avoid circular locking lockdep warnings. We lock the mutex 968 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The 969 * blocking_notifier_call_chain() has it's own lock, which gets unlocked 970 * between both notifier calls and will bail out. False positive. 971 */ 972 lockdep_off(); 973 974 switch (action) { 975 case MEM_GOING_OFFLINE: 976 mutex_lock(&vm->hotplug_mutex); 977 if (vm->removing) { 978 rc = notifier_from_errno(-EBUSY); 979 mutex_unlock(&vm->hotplug_mutex); 980 break; 981 } 982 vm->hotplug_active = true; 983 if (vm->in_sbm) 984 virtio_mem_sbm_notify_going_offline(vm, id); 985 else 986 virtio_mem_bbm_notify_going_offline(vm, id, 987 mhp->start_pfn, 988 mhp->nr_pages); 989 break; 990 case MEM_GOING_ONLINE: 991 mutex_lock(&vm->hotplug_mutex); 992 if (vm->removing) { 993 rc = notifier_from_errno(-EBUSY); 994 mutex_unlock(&vm->hotplug_mutex); 995 break; 996 } 997 vm->hotplug_active = true; 998 if (vm->in_sbm) 999 rc = virtio_mem_sbm_notify_going_online(vm, id); 1000 break; 1001 case MEM_OFFLINE: 1002 if (vm->in_sbm) 1003 virtio_mem_sbm_notify_offline(vm, id); 1004 1005 atomic64_add(size, &vm->offline_size); 1006 /* 1007 * Trigger the workqueue. Now that we have some offline memory, 1008 * maybe we can handle pending unplug requests. 1009 */ 1010 if (!unplug_online) 1011 virtio_mem_retry(vm); 1012 1013 vm->hotplug_active = false; 1014 mutex_unlock(&vm->hotplug_mutex); 1015 break; 1016 case MEM_ONLINE: 1017 if (vm->in_sbm) 1018 virtio_mem_sbm_notify_online(vm, id); 1019 1020 atomic64_sub(size, &vm->offline_size); 1021 /* 1022 * Start adding more memory once we onlined half of our 1023 * threshold. Don't trigger if it's possibly due to our actipn 1024 * (e.g., us adding memory which gets onlined immediately from 1025 * the core). 1026 */ 1027 if (!atomic_read(&vm->wq_active) && 1028 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2)) 1029 virtio_mem_retry(vm); 1030 1031 vm->hotplug_active = false; 1032 mutex_unlock(&vm->hotplug_mutex); 1033 break; 1034 case MEM_CANCEL_OFFLINE: 1035 if (!vm->hotplug_active) 1036 break; 1037 if (vm->in_sbm) 1038 virtio_mem_sbm_notify_cancel_offline(vm, id); 1039 else 1040 virtio_mem_bbm_notify_cancel_offline(vm, id, 1041 mhp->start_pfn, 1042 mhp->nr_pages); 1043 vm->hotplug_active = false; 1044 mutex_unlock(&vm->hotplug_mutex); 1045 break; 1046 case MEM_CANCEL_ONLINE: 1047 if (!vm->hotplug_active) 1048 break; 1049 vm->hotplug_active = false; 1050 mutex_unlock(&vm->hotplug_mutex); 1051 break; 1052 default: 1053 break; 1054 } 1055 1056 lockdep_on(); 1057 1058 return rc; 1059 } 1060 1061 /* 1062 * Set a range of pages PG_offline. Remember pages that were never onlined 1063 * (via generic_online_page()) using PageDirty(). 1064 */ 1065 static void virtio_mem_set_fake_offline(unsigned long pfn, 1066 unsigned long nr_pages, bool onlined) 1067 { 1068 for (; nr_pages--; pfn++) { 1069 struct page *page = pfn_to_page(pfn); 1070 1071 __SetPageOffline(page); 1072 if (!onlined) { 1073 SetPageDirty(page); 1074 /* FIXME: remove after cleanups */ 1075 ClearPageReserved(page); 1076 } 1077 } 1078 } 1079 1080 /* 1081 * Clear PG_offline from a range of pages. If the pages were never onlined, 1082 * (via generic_online_page()), clear PageDirty(). 1083 */ 1084 static void virtio_mem_clear_fake_offline(unsigned long pfn, 1085 unsigned long nr_pages, bool onlined) 1086 { 1087 for (; nr_pages--; pfn++) { 1088 struct page *page = pfn_to_page(pfn); 1089 1090 __ClearPageOffline(page); 1091 if (!onlined) 1092 ClearPageDirty(page); 1093 } 1094 } 1095 1096 /* 1097 * Release a range of fake-offline pages to the buddy, effectively 1098 * fake-onlining them. 1099 */ 1100 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) 1101 { 1102 const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES; 1103 unsigned long i; 1104 1105 /* 1106 * We are always called at least with MAX_ORDER_NR_PAGES 1107 * granularity/alignment (e.g., the way subblocks work). All pages 1108 * inside such a block are alike. 1109 */ 1110 for (i = 0; i < nr_pages; i += max_nr_pages) { 1111 struct page *page = pfn_to_page(pfn + i); 1112 1113 /* 1114 * If the page is PageDirty(), it was kept fake-offline when 1115 * onlining the memory block. Otherwise, it was allocated 1116 * using alloc_contig_range(). All pages in a subblock are 1117 * alike. 1118 */ 1119 if (PageDirty(page)) { 1120 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, 1121 false); 1122 generic_online_page(page, MAX_ORDER - 1); 1123 } else { 1124 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, 1125 true); 1126 free_contig_range(pfn + i, max_nr_pages); 1127 adjust_managed_page_count(page, max_nr_pages); 1128 } 1129 } 1130 } 1131 1132 /* 1133 * Try to allocate a range, marking pages fake-offline, effectively 1134 * fake-offlining them. 1135 */ 1136 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages) 1137 { 1138 const bool is_movable = zone_idx(page_zone(pfn_to_page(pfn))) == 1139 ZONE_MOVABLE; 1140 int rc, retry_count; 1141 1142 /* 1143 * TODO: We want an alloc_contig_range() mode that tries to allocate 1144 * harder (e.g., dealing with temporarily pinned pages, PCP), especially 1145 * with ZONE_MOVABLE. So for now, retry a couple of times with 1146 * ZONE_MOVABLE before giving up - because that zone is supposed to give 1147 * some guarantees. 1148 */ 1149 for (retry_count = 0; retry_count < 5; retry_count++) { 1150 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE, 1151 GFP_KERNEL); 1152 if (rc == -ENOMEM) 1153 /* whoops, out of memory */ 1154 return rc; 1155 else if (rc && !is_movable) 1156 break; 1157 else if (rc) 1158 continue; 1159 1160 virtio_mem_set_fake_offline(pfn, nr_pages, true); 1161 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages); 1162 return 0; 1163 } 1164 1165 return -EBUSY; 1166 } 1167 1168 /* 1169 * Handle fake-offline pages when memory is going offline - such that the 1170 * pages can be skipped by mm-core when offlining. 1171 */ 1172 static void virtio_mem_fake_offline_going_offline(unsigned long pfn, 1173 unsigned long nr_pages) 1174 { 1175 struct page *page; 1176 unsigned long i; 1177 1178 /* 1179 * Drop our reference to the pages so the memory can get offlined 1180 * and add the unplugged pages to the managed page counters (so 1181 * offlining code can correctly subtract them again). 1182 */ 1183 adjust_managed_page_count(pfn_to_page(pfn), nr_pages); 1184 /* Drop our reference to the pages so the memory can get offlined. */ 1185 for (i = 0; i < nr_pages; i++) { 1186 page = pfn_to_page(pfn + i); 1187 if (WARN_ON(!page_ref_dec_and_test(page))) 1188 dump_page(page, "fake-offline page referenced"); 1189 } 1190 } 1191 1192 /* 1193 * Handle fake-offline pages when memory offlining is canceled - to undo 1194 * what we did in virtio_mem_fake_offline_going_offline(). 1195 */ 1196 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, 1197 unsigned long nr_pages) 1198 { 1199 unsigned long i; 1200 1201 /* 1202 * Get the reference we dropped when going offline and subtract the 1203 * unplugged pages from the managed page counters. 1204 */ 1205 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages); 1206 for (i = 0; i < nr_pages; i++) 1207 page_ref_inc(pfn_to_page(pfn + i)); 1208 } 1209 1210 static void virtio_mem_online_page_cb(struct page *page, unsigned int order) 1211 { 1212 const unsigned long addr = page_to_phys(page); 1213 unsigned long id, sb_id; 1214 struct virtio_mem *vm; 1215 bool do_online; 1216 1217 rcu_read_lock(); 1218 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { 1219 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order))) 1220 continue; 1221 1222 if (vm->in_sbm) { 1223 /* 1224 * We exploit here that subblocks have at least 1225 * MAX_ORDER_NR_PAGES size/alignment - so we cannot 1226 * cross subblocks within one call. 1227 */ 1228 id = virtio_mem_phys_to_mb_id(addr); 1229 sb_id = virtio_mem_phys_to_sb_id(vm, addr); 1230 do_online = virtio_mem_sbm_test_sb_plugged(vm, id, 1231 sb_id, 1); 1232 } else { 1233 /* 1234 * If the whole block is marked fake offline, keep 1235 * everything that way. 1236 */ 1237 id = virtio_mem_phys_to_bb_id(vm, addr); 1238 do_online = virtio_mem_bbm_get_bb_state(vm, id) != 1239 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE; 1240 } 1241 if (do_online) 1242 generic_online_page(page, order); 1243 else 1244 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order, 1245 false); 1246 rcu_read_unlock(); 1247 return; 1248 } 1249 rcu_read_unlock(); 1250 1251 /* not virtio-mem memory, but e.g., a DIMM. online it */ 1252 generic_online_page(page, order); 1253 } 1254 1255 static uint64_t virtio_mem_send_request(struct virtio_mem *vm, 1256 const struct virtio_mem_req *req) 1257 { 1258 struct scatterlist *sgs[2], sg_req, sg_resp; 1259 unsigned int len; 1260 int rc; 1261 1262 /* don't use the request residing on the stack (vaddr) */ 1263 vm->req = *req; 1264 1265 /* out: buffer for request */ 1266 sg_init_one(&sg_req, &vm->req, sizeof(vm->req)); 1267 sgs[0] = &sg_req; 1268 1269 /* in: buffer for response */ 1270 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp)); 1271 sgs[1] = &sg_resp; 1272 1273 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL); 1274 if (rc < 0) 1275 return rc; 1276 1277 virtqueue_kick(vm->vq); 1278 1279 /* wait for a response */ 1280 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len)); 1281 1282 return virtio16_to_cpu(vm->vdev, vm->resp.type); 1283 } 1284 1285 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, 1286 uint64_t size) 1287 { 1288 const uint64_t nb_vm_blocks = size / vm->device_block_size; 1289 const struct virtio_mem_req req = { 1290 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG), 1291 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr), 1292 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), 1293 }; 1294 int rc = -ENOMEM; 1295 1296 if (atomic_read(&vm->config_changed)) 1297 return -EAGAIN; 1298 1299 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr, 1300 addr + size - 1); 1301 1302 switch (virtio_mem_send_request(vm, &req)) { 1303 case VIRTIO_MEM_RESP_ACK: 1304 vm->plugged_size += size; 1305 return 0; 1306 case VIRTIO_MEM_RESP_NACK: 1307 rc = -EAGAIN; 1308 break; 1309 case VIRTIO_MEM_RESP_BUSY: 1310 rc = -ETXTBSY; 1311 break; 1312 case VIRTIO_MEM_RESP_ERROR: 1313 rc = -EINVAL; 1314 break; 1315 default: 1316 break; 1317 } 1318 1319 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc); 1320 return rc; 1321 } 1322 1323 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, 1324 uint64_t size) 1325 { 1326 const uint64_t nb_vm_blocks = size / vm->device_block_size; 1327 const struct virtio_mem_req req = { 1328 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG), 1329 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr), 1330 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), 1331 }; 1332 int rc = -ENOMEM; 1333 1334 if (atomic_read(&vm->config_changed)) 1335 return -EAGAIN; 1336 1337 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, 1338 addr + size - 1); 1339 1340 switch (virtio_mem_send_request(vm, &req)) { 1341 case VIRTIO_MEM_RESP_ACK: 1342 vm->plugged_size -= size; 1343 return 0; 1344 case VIRTIO_MEM_RESP_BUSY: 1345 rc = -ETXTBSY; 1346 break; 1347 case VIRTIO_MEM_RESP_ERROR: 1348 rc = -EINVAL; 1349 break; 1350 default: 1351 break; 1352 } 1353 1354 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc); 1355 return rc; 1356 } 1357 1358 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) 1359 { 1360 const struct virtio_mem_req req = { 1361 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL), 1362 }; 1363 int rc = -ENOMEM; 1364 1365 dev_dbg(&vm->vdev->dev, "unplugging all memory"); 1366 1367 switch (virtio_mem_send_request(vm, &req)) { 1368 case VIRTIO_MEM_RESP_ACK: 1369 vm->unplug_all_required = false; 1370 vm->plugged_size = 0; 1371 /* usable region might have shrunk */ 1372 atomic_set(&vm->config_changed, 1); 1373 return 0; 1374 case VIRTIO_MEM_RESP_BUSY: 1375 rc = -ETXTBSY; 1376 break; 1377 default: 1378 break; 1379 } 1380 1381 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc); 1382 return rc; 1383 } 1384 1385 /* 1386 * Plug selected subblocks. Updates the plugged state, but not the state 1387 * of the memory block. 1388 */ 1389 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id, 1390 int sb_id, int count) 1391 { 1392 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) + 1393 sb_id * vm->sbm.sb_size; 1394 const uint64_t size = count * vm->sbm.sb_size; 1395 int rc; 1396 1397 rc = virtio_mem_send_plug_request(vm, addr, size); 1398 if (!rc) 1399 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count); 1400 return rc; 1401 } 1402 1403 /* 1404 * Unplug selected subblocks. Updates the plugged state, but not the state 1405 * of the memory block. 1406 */ 1407 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id, 1408 int sb_id, int count) 1409 { 1410 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) + 1411 sb_id * vm->sbm.sb_size; 1412 const uint64_t size = count * vm->sbm.sb_size; 1413 int rc; 1414 1415 rc = virtio_mem_send_unplug_request(vm, addr, size); 1416 if (!rc) 1417 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count); 1418 return rc; 1419 } 1420 1421 /* 1422 * Request to unplug a big block. 1423 * 1424 * Will not modify the state of the big block. 1425 */ 1426 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id) 1427 { 1428 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 1429 const uint64_t size = vm->bbm.bb_size; 1430 1431 return virtio_mem_send_unplug_request(vm, addr, size); 1432 } 1433 1434 /* 1435 * Request to plug a big block. 1436 * 1437 * Will not modify the state of the big block. 1438 */ 1439 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id) 1440 { 1441 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); 1442 const uint64_t size = vm->bbm.bb_size; 1443 1444 return virtio_mem_send_plug_request(vm, addr, size); 1445 } 1446 1447 /* 1448 * Unplug the desired number of plugged subblocks of a offline or not-added 1449 * memory block. Will fail if any subblock cannot get unplugged (instead of 1450 * skipping it). 1451 * 1452 * Will not modify the state of the memory block. 1453 * 1454 * Note: can fail after some subblocks were unplugged. 1455 */ 1456 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm, 1457 unsigned long mb_id, uint64_t *nb_sb) 1458 { 1459 int sb_id, count; 1460 int rc; 1461 1462 sb_id = vm->sbm.sbs_per_mb - 1; 1463 while (*nb_sb) { 1464 /* Find the next candidate subblock */ 1465 while (sb_id >= 0 && 1466 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1)) 1467 sb_id--; 1468 if (sb_id < 0) 1469 break; 1470 /* Try to unplug multiple subblocks at a time */ 1471 count = 1; 1472 while (count < *nb_sb && sb_id > 0 && 1473 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) { 1474 count++; 1475 sb_id--; 1476 } 1477 1478 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count); 1479 if (rc) 1480 return rc; 1481 *nb_sb -= count; 1482 sb_id--; 1483 } 1484 1485 return 0; 1486 } 1487 1488 /* 1489 * Unplug all plugged subblocks of an offline or not-added memory block. 1490 * 1491 * Will not modify the state of the memory block. 1492 * 1493 * Note: can fail after some subblocks were unplugged. 1494 */ 1495 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id) 1496 { 1497 uint64_t nb_sb = vm->sbm.sbs_per_mb; 1498 1499 return virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb); 1500 } 1501 1502 /* 1503 * Prepare tracking data for the next memory block. 1504 */ 1505 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm, 1506 unsigned long *mb_id) 1507 { 1508 int rc; 1509 1510 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id) 1511 return -ENOSPC; 1512 1513 /* Resize the state array if required. */ 1514 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm); 1515 if (rc) 1516 return rc; 1517 1518 /* Resize the subblock bitmap if required. */ 1519 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm); 1520 if (rc) 1521 return rc; 1522 1523 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++; 1524 *mb_id = vm->sbm.next_mb_id++; 1525 return 0; 1526 } 1527 1528 /* 1529 * Try to plug the desired number of subblocks and add the memory block 1530 * to Linux. 1531 * 1532 * Will modify the state of the memory block. 1533 */ 1534 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm, 1535 unsigned long mb_id, uint64_t *nb_sb) 1536 { 1537 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb); 1538 int rc; 1539 1540 if (WARN_ON_ONCE(!count)) 1541 return -EINVAL; 1542 1543 /* 1544 * Plug the requested number of subblocks before adding it to linux, 1545 * so that onlining will directly online all plugged subblocks. 1546 */ 1547 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count); 1548 if (rc) 1549 return rc; 1550 1551 /* 1552 * Mark the block properly offline before adding it to Linux, 1553 * so the memory notifiers will find the block in the right state. 1554 */ 1555 if (count == vm->sbm.sbs_per_mb) 1556 virtio_mem_sbm_set_mb_state(vm, mb_id, 1557 VIRTIO_MEM_SBM_MB_OFFLINE); 1558 else 1559 virtio_mem_sbm_set_mb_state(vm, mb_id, 1560 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL); 1561 1562 /* Add the memory block to linux - if that fails, try to unplug. */ 1563 rc = virtio_mem_sbm_add_mb(vm, mb_id); 1564 if (rc) { 1565 int new_state = VIRTIO_MEM_SBM_MB_UNUSED; 1566 1567 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count)) 1568 new_state = VIRTIO_MEM_SBM_MB_PLUGGED; 1569 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state); 1570 return rc; 1571 } 1572 1573 *nb_sb -= count; 1574 return 0; 1575 } 1576 1577 /* 1578 * Try to plug the desired number of subblocks of a memory block that 1579 * is already added to Linux. 1580 * 1581 * Will modify the state of the memory block. 1582 * 1583 * Note: Can fail after some subblocks were successfully plugged. 1584 */ 1585 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm, 1586 unsigned long mb_id, uint64_t *nb_sb, 1587 bool online) 1588 { 1589 unsigned long pfn, nr_pages; 1590 int sb_id, count; 1591 int rc; 1592 1593 if (WARN_ON_ONCE(!*nb_sb)) 1594 return -EINVAL; 1595 1596 while (*nb_sb) { 1597 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id); 1598 if (sb_id >= vm->sbm.sbs_per_mb) 1599 break; 1600 count = 1; 1601 while (count < *nb_sb && 1602 sb_id + count < vm->sbm.sbs_per_mb && 1603 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1)) 1604 count++; 1605 1606 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count); 1607 if (rc) 1608 return rc; 1609 *nb_sb -= count; 1610 if (!online) 1611 continue; 1612 1613 /* fake-online the pages if the memory block is online */ 1614 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 1615 sb_id * vm->sbm.sb_size); 1616 nr_pages = PFN_DOWN(count * vm->sbm.sb_size); 1617 virtio_mem_fake_online(pfn, nr_pages); 1618 } 1619 1620 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1621 if (online) 1622 virtio_mem_sbm_set_mb_state(vm, mb_id, 1623 VIRTIO_MEM_SBM_MB_ONLINE); 1624 else 1625 virtio_mem_sbm_set_mb_state(vm, mb_id, 1626 VIRTIO_MEM_SBM_MB_OFFLINE); 1627 } 1628 1629 return 0; 1630 } 1631 1632 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff) 1633 { 1634 uint64_t nb_sb = diff / vm->sbm.sb_size; 1635 unsigned long mb_id; 1636 int rc; 1637 1638 if (!nb_sb) 1639 return 0; 1640 1641 /* Don't race with onlining/offlining */ 1642 mutex_lock(&vm->hotplug_mutex); 1643 1644 /* Try to plug subblocks of partially plugged online blocks. */ 1645 virtio_mem_sbm_for_each_mb(vm, mb_id, 1646 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) { 1647 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, true); 1648 if (rc || !nb_sb) 1649 goto out_unlock; 1650 cond_resched(); 1651 } 1652 1653 /* Try to plug subblocks of partially plugged offline blocks. */ 1654 virtio_mem_sbm_for_each_mb(vm, mb_id, 1655 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) { 1656 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, false); 1657 if (rc || !nb_sb) 1658 goto out_unlock; 1659 cond_resched(); 1660 } 1661 1662 /* 1663 * We won't be working on online/offline memory blocks from this point, 1664 * so we can't race with memory onlining/offlining. Drop the mutex. 1665 */ 1666 mutex_unlock(&vm->hotplug_mutex); 1667 1668 /* Try to plug and add unused blocks */ 1669 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) { 1670 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes())) 1671 return -ENOSPC; 1672 1673 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb); 1674 if (rc || !nb_sb) 1675 return rc; 1676 cond_resched(); 1677 } 1678 1679 /* Try to prepare, plug and add new blocks */ 1680 while (nb_sb) { 1681 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes())) 1682 return -ENOSPC; 1683 1684 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id); 1685 if (rc) 1686 return rc; 1687 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb); 1688 if (rc) 1689 return rc; 1690 cond_resched(); 1691 } 1692 1693 return 0; 1694 out_unlock: 1695 mutex_unlock(&vm->hotplug_mutex); 1696 return rc; 1697 } 1698 1699 /* 1700 * Plug a big block and add it to Linux. 1701 * 1702 * Will modify the state of the big block. 1703 */ 1704 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm, 1705 unsigned long bb_id) 1706 { 1707 int rc; 1708 1709 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != 1710 VIRTIO_MEM_BBM_BB_UNUSED)) 1711 return -EINVAL; 1712 1713 rc = virtio_mem_bbm_plug_bb(vm, bb_id); 1714 if (rc) 1715 return rc; 1716 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED); 1717 1718 rc = virtio_mem_bbm_add_bb(vm, bb_id); 1719 if (rc) { 1720 if (!virtio_mem_bbm_unplug_bb(vm, bb_id)) 1721 virtio_mem_bbm_set_bb_state(vm, bb_id, 1722 VIRTIO_MEM_BBM_BB_UNUSED); 1723 else 1724 /* Retry from the main loop. */ 1725 virtio_mem_bbm_set_bb_state(vm, bb_id, 1726 VIRTIO_MEM_BBM_BB_PLUGGED); 1727 return rc; 1728 } 1729 return 0; 1730 } 1731 1732 /* 1733 * Prepare tracking data for the next big block. 1734 */ 1735 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm, 1736 unsigned long *bb_id) 1737 { 1738 int rc; 1739 1740 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id) 1741 return -ENOSPC; 1742 1743 /* Resize the big block state array if required. */ 1744 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm); 1745 if (rc) 1746 return rc; 1747 1748 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++; 1749 *bb_id = vm->bbm.next_bb_id; 1750 vm->bbm.next_bb_id++; 1751 return 0; 1752 } 1753 1754 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff) 1755 { 1756 uint64_t nb_bb = diff / vm->bbm.bb_size; 1757 unsigned long bb_id; 1758 int rc; 1759 1760 if (!nb_bb) 1761 return 0; 1762 1763 /* Try to plug and add unused big blocks */ 1764 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) { 1765 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size)) 1766 return -ENOSPC; 1767 1768 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id); 1769 if (!rc) 1770 nb_bb--; 1771 if (rc || !nb_bb) 1772 return rc; 1773 cond_resched(); 1774 } 1775 1776 /* Try to prepare, plug and add new big blocks */ 1777 while (nb_bb) { 1778 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size)) 1779 return -ENOSPC; 1780 1781 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id); 1782 if (rc) 1783 return rc; 1784 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id); 1785 if (!rc) 1786 nb_bb--; 1787 if (rc) 1788 return rc; 1789 cond_resched(); 1790 } 1791 1792 return 0; 1793 } 1794 1795 /* 1796 * Try to plug the requested amount of memory. 1797 */ 1798 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff) 1799 { 1800 if (vm->in_sbm) 1801 return virtio_mem_sbm_plug_request(vm, diff); 1802 return virtio_mem_bbm_plug_request(vm, diff); 1803 } 1804 1805 /* 1806 * Unplug the desired number of plugged subblocks of an offline memory block. 1807 * Will fail if any subblock cannot get unplugged (instead of skipping it). 1808 * 1809 * Will modify the state of the memory block. Might temporarily drop the 1810 * hotplug_mutex. 1811 * 1812 * Note: Can fail after some subblocks were successfully unplugged. 1813 */ 1814 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm, 1815 unsigned long mb_id, 1816 uint64_t *nb_sb) 1817 { 1818 int rc; 1819 1820 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, nb_sb); 1821 1822 /* some subblocks might have been unplugged even on failure */ 1823 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) 1824 virtio_mem_sbm_set_mb_state(vm, mb_id, 1825 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL); 1826 if (rc) 1827 return rc; 1828 1829 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1830 /* 1831 * Remove the block from Linux - this should never fail. 1832 * Hinder the block from getting onlined by marking it 1833 * unplugged. Temporarily drop the mutex, so 1834 * any pending GOING_ONLINE requests can be serviced/rejected. 1835 */ 1836 virtio_mem_sbm_set_mb_state(vm, mb_id, 1837 VIRTIO_MEM_SBM_MB_UNUSED); 1838 1839 mutex_unlock(&vm->hotplug_mutex); 1840 rc = virtio_mem_sbm_remove_mb(vm, mb_id); 1841 BUG_ON(rc); 1842 mutex_lock(&vm->hotplug_mutex); 1843 } 1844 return 0; 1845 } 1846 1847 /* 1848 * Unplug the given plugged subblocks of an online memory block. 1849 * 1850 * Will modify the state of the memory block. 1851 */ 1852 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm, 1853 unsigned long mb_id, int sb_id, 1854 int count) 1855 { 1856 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count; 1857 unsigned long start_pfn; 1858 int rc; 1859 1860 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + 1861 sb_id * vm->sbm.sb_size); 1862 1863 rc = virtio_mem_fake_offline(start_pfn, nr_pages); 1864 if (rc) 1865 return rc; 1866 1867 /* Try to unplug the allocated memory */ 1868 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count); 1869 if (rc) { 1870 /* Return the memory to the buddy. */ 1871 virtio_mem_fake_online(start_pfn, nr_pages); 1872 return rc; 1873 } 1874 1875 virtio_mem_sbm_set_mb_state(vm, mb_id, 1876 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL); 1877 return 0; 1878 } 1879 1880 /* 1881 * Unplug the desired number of plugged subblocks of an online memory block. 1882 * Will skip subblock that are busy. 1883 * 1884 * Will modify the state of the memory block. Might temporarily drop the 1885 * hotplug_mutex. 1886 * 1887 * Note: Can fail after some subblocks were successfully unplugged. Can 1888 * return 0 even if subblocks were busy and could not get unplugged. 1889 */ 1890 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm, 1891 unsigned long mb_id, 1892 uint64_t *nb_sb) 1893 { 1894 int rc, sb_id; 1895 1896 /* If possible, try to unplug the complete block in one shot. */ 1897 if (*nb_sb >= vm->sbm.sbs_per_mb && 1898 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1899 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0, 1900 vm->sbm.sbs_per_mb); 1901 if (!rc) { 1902 *nb_sb -= vm->sbm.sbs_per_mb; 1903 goto unplugged; 1904 } else if (rc != -EBUSY) 1905 return rc; 1906 } 1907 1908 /* Fallback to single subblocks. */ 1909 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) { 1910 /* Find the next candidate subblock */ 1911 while (sb_id >= 0 && 1912 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) 1913 sb_id--; 1914 if (sb_id < 0) 1915 break; 1916 1917 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1); 1918 if (rc == -EBUSY) 1919 continue; 1920 else if (rc) 1921 return rc; 1922 *nb_sb -= 1; 1923 } 1924 1925 unplugged: 1926 /* 1927 * Once all subblocks of a memory block were unplugged, offline and 1928 * remove it. This will usually not fail, as no memory is in use 1929 * anymore - however some other notifiers might NACK the request. 1930 */ 1931 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { 1932 mutex_unlock(&vm->hotplug_mutex); 1933 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id); 1934 mutex_lock(&vm->hotplug_mutex); 1935 if (!rc) 1936 virtio_mem_sbm_set_mb_state(vm, mb_id, 1937 VIRTIO_MEM_SBM_MB_UNUSED); 1938 } 1939 1940 return 0; 1941 } 1942 1943 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff) 1944 { 1945 uint64_t nb_sb = diff / vm->sbm.sb_size; 1946 unsigned long mb_id; 1947 int rc; 1948 1949 if (!nb_sb) 1950 return 0; 1951 1952 /* 1953 * We'll drop the mutex a couple of times when it is safe to do so. 1954 * This might result in some blocks switching the state (online/offline) 1955 * and we could miss them in this run - we will retry again later. 1956 */ 1957 mutex_lock(&vm->hotplug_mutex); 1958 1959 /* Try to unplug subblocks of partially plugged offline blocks. */ 1960 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, 1961 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) { 1962 rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb); 1963 if (rc || !nb_sb) 1964 goto out_unlock; 1965 cond_resched(); 1966 } 1967 1968 /* Try to unplug subblocks of plugged offline blocks. */ 1969 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_OFFLINE) { 1970 rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb); 1971 if (rc || !nb_sb) 1972 goto out_unlock; 1973 cond_resched(); 1974 } 1975 1976 if (!unplug_online) { 1977 mutex_unlock(&vm->hotplug_mutex); 1978 return 0; 1979 } 1980 1981 /* Try to unplug subblocks of partially plugged online blocks. */ 1982 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, 1983 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) { 1984 rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb); 1985 if (rc || !nb_sb) 1986 goto out_unlock; 1987 mutex_unlock(&vm->hotplug_mutex); 1988 cond_resched(); 1989 mutex_lock(&vm->hotplug_mutex); 1990 } 1991 1992 /* Try to unplug subblocks of plugged online blocks. */ 1993 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_ONLINE) { 1994 rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb); 1995 if (rc || !nb_sb) 1996 goto out_unlock; 1997 mutex_unlock(&vm->hotplug_mutex); 1998 cond_resched(); 1999 mutex_lock(&vm->hotplug_mutex); 2000 } 2001 2002 mutex_unlock(&vm->hotplug_mutex); 2003 return nb_sb ? -EBUSY : 0; 2004 out_unlock: 2005 mutex_unlock(&vm->hotplug_mutex); 2006 return rc; 2007 } 2008 2009 /* 2010 * Try to offline and remove a big block from Linux and unplug it. Will fail 2011 * with -EBUSY if some memory is busy and cannot get unplugged. 2012 * 2013 * Will modify the state of the memory block. Might temporarily drop the 2014 * hotplug_mutex. 2015 */ 2016 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm, 2017 unsigned long bb_id) 2018 { 2019 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); 2020 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); 2021 unsigned long end_pfn = start_pfn + nr_pages; 2022 unsigned long pfn; 2023 struct page *page; 2024 int rc; 2025 2026 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != 2027 VIRTIO_MEM_BBM_BB_ADDED)) 2028 return -EINVAL; 2029 2030 if (bbm_safe_unplug) { 2031 /* 2032 * Start by fake-offlining all memory. Once we marked the device 2033 * block as fake-offline, all newly onlined memory will 2034 * automatically be kept fake-offline. Protect from concurrent 2035 * onlining/offlining until we have a consistent state. 2036 */ 2037 mutex_lock(&vm->hotplug_mutex); 2038 virtio_mem_bbm_set_bb_state(vm, bb_id, 2039 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE); 2040 2041 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2042 page = pfn_to_online_page(pfn); 2043 if (!page) 2044 continue; 2045 2046 rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION); 2047 if (rc) { 2048 end_pfn = pfn; 2049 goto rollback_safe_unplug; 2050 } 2051 } 2052 mutex_unlock(&vm->hotplug_mutex); 2053 } 2054 2055 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id); 2056 if (rc) { 2057 if (bbm_safe_unplug) { 2058 mutex_lock(&vm->hotplug_mutex); 2059 goto rollback_safe_unplug; 2060 } 2061 return rc; 2062 } 2063 2064 rc = virtio_mem_bbm_unplug_bb(vm, bb_id); 2065 if (rc) 2066 virtio_mem_bbm_set_bb_state(vm, bb_id, 2067 VIRTIO_MEM_BBM_BB_PLUGGED); 2068 else 2069 virtio_mem_bbm_set_bb_state(vm, bb_id, 2070 VIRTIO_MEM_BBM_BB_UNUSED); 2071 return rc; 2072 2073 rollback_safe_unplug: 2074 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2075 page = pfn_to_online_page(pfn); 2076 if (!page) 2077 continue; 2078 virtio_mem_fake_online(pfn, PAGES_PER_SECTION); 2079 } 2080 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED); 2081 mutex_unlock(&vm->hotplug_mutex); 2082 return rc; 2083 } 2084 2085 /* 2086 * Try to remove a big block from Linux and unplug it. Will fail with 2087 * -EBUSY if some memory is online. 2088 * 2089 * Will modify the state of the memory block. 2090 */ 2091 static int virtio_mem_bbm_remove_and_unplug_bb(struct virtio_mem *vm, 2092 unsigned long bb_id) 2093 { 2094 int rc; 2095 2096 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != 2097 VIRTIO_MEM_BBM_BB_ADDED)) 2098 return -EINVAL; 2099 2100 rc = virtio_mem_bbm_remove_bb(vm, bb_id); 2101 if (rc) 2102 return -EBUSY; 2103 2104 rc = virtio_mem_bbm_unplug_bb(vm, bb_id); 2105 if (rc) 2106 virtio_mem_bbm_set_bb_state(vm, bb_id, 2107 VIRTIO_MEM_BBM_BB_PLUGGED); 2108 else 2109 virtio_mem_bbm_set_bb_state(vm, bb_id, 2110 VIRTIO_MEM_BBM_BB_UNUSED); 2111 return rc; 2112 } 2113 2114 /* 2115 * Test if a big block is completely offline. 2116 */ 2117 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm, 2118 unsigned long bb_id) 2119 { 2120 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); 2121 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); 2122 unsigned long pfn; 2123 2124 for (pfn = start_pfn; pfn < start_pfn + nr_pages; 2125 pfn += PAGES_PER_SECTION) { 2126 if (pfn_to_online_page(pfn)) 2127 return false; 2128 } 2129 2130 return true; 2131 } 2132 2133 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff) 2134 { 2135 uint64_t nb_bb = diff / vm->bbm.bb_size; 2136 uint64_t bb_id; 2137 int rc; 2138 2139 if (!nb_bb) 2140 return 0; 2141 2142 /* Try to unplug completely offline big blocks first. */ 2143 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) { 2144 cond_resched(); 2145 /* 2146 * As we're holding no locks, this check is racy as memory 2147 * can get onlined in the meantime - but we'll fail gracefully. 2148 */ 2149 if (!virtio_mem_bbm_bb_is_offline(vm, bb_id)) 2150 continue; 2151 rc = virtio_mem_bbm_remove_and_unplug_bb(vm, bb_id); 2152 if (rc == -EBUSY) 2153 continue; 2154 if (!rc) 2155 nb_bb--; 2156 if (rc || !nb_bb) 2157 return rc; 2158 } 2159 2160 if (!unplug_online) 2161 return 0; 2162 2163 /* Try to unplug any big blocks. */ 2164 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) { 2165 cond_resched(); 2166 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id); 2167 if (rc == -EBUSY) 2168 continue; 2169 if (!rc) 2170 nb_bb--; 2171 if (rc || !nb_bb) 2172 return rc; 2173 } 2174 2175 return nb_bb ? -EBUSY : 0; 2176 } 2177 2178 /* 2179 * Try to unplug the requested amount of memory. 2180 */ 2181 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) 2182 { 2183 if (vm->in_sbm) 2184 return virtio_mem_sbm_unplug_request(vm, diff); 2185 return virtio_mem_bbm_unplug_request(vm, diff); 2186 } 2187 2188 /* 2189 * Try to unplug all blocks that couldn't be unplugged before, for example, 2190 * because the hypervisor was busy. 2191 */ 2192 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) 2193 { 2194 unsigned long id; 2195 int rc; 2196 2197 if (!vm->in_sbm) { 2198 virtio_mem_bbm_for_each_bb(vm, id, 2199 VIRTIO_MEM_BBM_BB_PLUGGED) { 2200 rc = virtio_mem_bbm_unplug_bb(vm, id); 2201 if (rc) 2202 return rc; 2203 virtio_mem_bbm_set_bb_state(vm, id, 2204 VIRTIO_MEM_BBM_BB_UNUSED); 2205 } 2206 return 0; 2207 } 2208 2209 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) { 2210 rc = virtio_mem_sbm_unplug_mb(vm, id); 2211 if (rc) 2212 return rc; 2213 virtio_mem_sbm_set_mb_state(vm, id, 2214 VIRTIO_MEM_SBM_MB_UNUSED); 2215 } 2216 2217 return 0; 2218 } 2219 2220 /* 2221 * Update all parts of the config that could have changed. 2222 */ 2223 static void virtio_mem_refresh_config(struct virtio_mem *vm) 2224 { 2225 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS; 2226 uint64_t new_plugged_size, usable_region_size, end_addr; 2227 2228 /* the plugged_size is just a reflection of what _we_ did previously */ 2229 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, 2230 &new_plugged_size); 2231 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) 2232 vm->plugged_size = new_plugged_size; 2233 2234 /* calculate the last usable memory block id */ 2235 virtio_cread_le(vm->vdev, struct virtio_mem_config, 2236 usable_region_size, &usable_region_size); 2237 end_addr = vm->addr + usable_region_size; 2238 end_addr = min(end_addr, phys_limit); 2239 2240 if (vm->in_sbm) 2241 vm->sbm.last_usable_mb_id = 2242 virtio_mem_phys_to_mb_id(end_addr) - 1; 2243 else 2244 vm->bbm.last_usable_bb_id = 2245 virtio_mem_phys_to_bb_id(vm, end_addr) - 1; 2246 2247 /* see if there is a request to change the size */ 2248 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size, 2249 &vm->requested_size); 2250 2251 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); 2252 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size); 2253 } 2254 2255 /* 2256 * Workqueue function for handling plug/unplug requests and config updates. 2257 */ 2258 static void virtio_mem_run_wq(struct work_struct *work) 2259 { 2260 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq); 2261 uint64_t diff; 2262 int rc; 2263 2264 hrtimer_cancel(&vm->retry_timer); 2265 2266 if (vm->broken) 2267 return; 2268 2269 atomic_set(&vm->wq_active, 1); 2270 retry: 2271 rc = 0; 2272 2273 /* Make sure we start with a clean state if there are leftovers. */ 2274 if (unlikely(vm->unplug_all_required)) 2275 rc = virtio_mem_send_unplug_all_request(vm); 2276 2277 if (atomic_read(&vm->config_changed)) { 2278 atomic_set(&vm->config_changed, 0); 2279 virtio_mem_refresh_config(vm); 2280 } 2281 2282 /* Unplug any leftovers from previous runs */ 2283 if (!rc) 2284 rc = virtio_mem_unplug_pending_mb(vm); 2285 2286 if (!rc && vm->requested_size != vm->plugged_size) { 2287 if (vm->requested_size > vm->plugged_size) { 2288 diff = vm->requested_size - vm->plugged_size; 2289 rc = virtio_mem_plug_request(vm, diff); 2290 } else { 2291 diff = vm->plugged_size - vm->requested_size; 2292 rc = virtio_mem_unplug_request(vm, diff); 2293 } 2294 } 2295 2296 switch (rc) { 2297 case 0: 2298 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; 2299 break; 2300 case -ENOSPC: 2301 /* 2302 * We cannot add any more memory (alignment, physical limit) 2303 * or we have too many offline memory blocks. 2304 */ 2305 break; 2306 case -ETXTBSY: 2307 /* 2308 * The hypervisor cannot process our request right now 2309 * (e.g., out of memory, migrating); 2310 */ 2311 case -EBUSY: 2312 /* 2313 * We cannot free up any memory to unplug it (all plugged memory 2314 * is busy). 2315 */ 2316 case -ENOMEM: 2317 /* Out of memory, try again later. */ 2318 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms), 2319 HRTIMER_MODE_REL); 2320 break; 2321 case -EAGAIN: 2322 /* Retry immediately (e.g., the config changed). */ 2323 goto retry; 2324 default: 2325 /* Unknown error, mark as broken */ 2326 dev_err(&vm->vdev->dev, 2327 "unknown error, marking device broken: %d\n", rc); 2328 vm->broken = true; 2329 } 2330 2331 atomic_set(&vm->wq_active, 0); 2332 } 2333 2334 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer) 2335 { 2336 struct virtio_mem *vm = container_of(timer, struct virtio_mem, 2337 retry_timer); 2338 2339 virtio_mem_retry(vm); 2340 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2, 2341 VIRTIO_MEM_RETRY_TIMER_MAX_MS); 2342 return HRTIMER_NORESTART; 2343 } 2344 2345 static void virtio_mem_handle_response(struct virtqueue *vq) 2346 { 2347 struct virtio_mem *vm = vq->vdev->priv; 2348 2349 wake_up(&vm->host_resp); 2350 } 2351 2352 static int virtio_mem_init_vq(struct virtio_mem *vm) 2353 { 2354 struct virtqueue *vq; 2355 2356 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response, 2357 "guest-request"); 2358 if (IS_ERR(vq)) 2359 return PTR_ERR(vq); 2360 vm->vq = vq; 2361 2362 return 0; 2363 } 2364 2365 static int virtio_mem_init(struct virtio_mem *vm) 2366 { 2367 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS; 2368 uint64_t sb_size, addr; 2369 uint16_t node_id; 2370 2371 if (!vm->vdev->config->get) { 2372 dev_err(&vm->vdev->dev, "config access disabled\n"); 2373 return -EINVAL; 2374 } 2375 2376 /* 2377 * We don't want to (un)plug or reuse any memory when in kdump. The 2378 * memory is still accessible (but not mapped). 2379 */ 2380 if (is_kdump_kernel()) { 2381 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n"); 2382 return -EBUSY; 2383 } 2384 2385 /* Fetch all properties that can't change. */ 2386 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, 2387 &vm->plugged_size); 2388 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, 2389 &vm->device_block_size); 2390 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, 2391 &node_id); 2392 vm->nid = virtio_mem_translate_node_id(vm, node_id); 2393 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); 2394 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, 2395 &vm->region_size); 2396 2397 /* Determine the nid for the device based on the lowest address. */ 2398 if (vm->nid == NUMA_NO_NODE) 2399 vm->nid = memory_add_physaddr_to_nid(vm->addr); 2400 2401 /* bad device setup - warn only */ 2402 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes())) 2403 dev_warn(&vm->vdev->dev, 2404 "The alignment of the physical start address can make some memory unusable.\n"); 2405 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes())) 2406 dev_warn(&vm->vdev->dev, 2407 "The alignment of the physical end address can make some memory unusable.\n"); 2408 if (vm->addr + vm->region_size > phys_limit) 2409 dev_warn(&vm->vdev->dev, 2410 "Some memory is not addressable. This can make some memory unusable.\n"); 2411 2412 /* 2413 * We want subblocks to span at least MAX_ORDER_NR_PAGES and 2414 * pageblock_nr_pages pages. This: 2415 * - Simplifies our page onlining code (virtio_mem_online_page_cb) 2416 * and fake page onlining code (virtio_mem_fake_online). 2417 * - Is required for now for alloc_contig_range() to work reliably - 2418 * it doesn't properly handle smaller granularity on ZONE_NORMAL. 2419 */ 2420 sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES, 2421 pageblock_nr_pages) * PAGE_SIZE; 2422 sb_size = max_t(uint64_t, vm->device_block_size, sb_size); 2423 2424 if (sb_size < memory_block_size_bytes() && !force_bbm) { 2425 /* SBM: At least two subblocks per Linux memory block. */ 2426 vm->in_sbm = true; 2427 vm->sbm.sb_size = sb_size; 2428 vm->sbm.sbs_per_mb = memory_block_size_bytes() / 2429 vm->sbm.sb_size; 2430 2431 /* Round up to the next full memory block */ 2432 addr = vm->addr + memory_block_size_bytes() - 1; 2433 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr); 2434 vm->sbm.next_mb_id = vm->sbm.first_mb_id; 2435 } else { 2436 /* BBM: At least one Linux memory block. */ 2437 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size, 2438 memory_block_size_bytes()); 2439 2440 if (bbm_block_size) { 2441 if (!is_power_of_2(bbm_block_size)) { 2442 dev_warn(&vm->vdev->dev, 2443 "bbm_block_size is not a power of 2"); 2444 } else if (bbm_block_size < vm->bbm.bb_size) { 2445 dev_warn(&vm->vdev->dev, 2446 "bbm_block_size is too small"); 2447 } else { 2448 vm->bbm.bb_size = bbm_block_size; 2449 } 2450 } 2451 2452 /* Round up to the next aligned big block */ 2453 addr = vm->addr + vm->bbm.bb_size - 1; 2454 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr); 2455 vm->bbm.next_bb_id = vm->bbm.first_bb_id; 2456 } 2457 2458 /* Prepare the offline threshold - make sure we can add two blocks. */ 2459 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(), 2460 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD); 2461 /* In BBM, we also want at least two big blocks. */ 2462 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size, 2463 vm->offline_threshold); 2464 2465 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); 2466 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); 2467 dev_info(&vm->vdev->dev, "device block size: 0x%llx", 2468 (unsigned long long)vm->device_block_size); 2469 dev_info(&vm->vdev->dev, "memory block size: 0x%lx", 2470 memory_block_size_bytes()); 2471 if (vm->in_sbm) 2472 dev_info(&vm->vdev->dev, "subblock size: 0x%llx", 2473 (unsigned long long)vm->sbm.sb_size); 2474 else 2475 dev_info(&vm->vdev->dev, "big block size: 0x%llx", 2476 (unsigned long long)vm->bbm.bb_size); 2477 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA)) 2478 dev_info(&vm->vdev->dev, "nid: %d", vm->nid); 2479 2480 return 0; 2481 } 2482 2483 static int virtio_mem_create_resource(struct virtio_mem *vm) 2484 { 2485 /* 2486 * When force-unloading the driver and removing the device, we 2487 * could have a garbage pointer. Duplicate the string. 2488 */ 2489 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL); 2490 2491 if (!name) 2492 return -ENOMEM; 2493 2494 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, 2495 name, IORESOURCE_SYSTEM_RAM); 2496 if (!vm->parent_resource) { 2497 kfree(name); 2498 dev_warn(&vm->vdev->dev, "could not reserve device region\n"); 2499 dev_info(&vm->vdev->dev, 2500 "reloading the driver is not supported\n"); 2501 return -EBUSY; 2502 } 2503 2504 /* The memory is not actually busy - make add_memory() work. */ 2505 vm->parent_resource->flags &= ~IORESOURCE_BUSY; 2506 return 0; 2507 } 2508 2509 static void virtio_mem_delete_resource(struct virtio_mem *vm) 2510 { 2511 const char *name; 2512 2513 if (!vm->parent_resource) 2514 return; 2515 2516 name = vm->parent_resource->name; 2517 release_resource(vm->parent_resource); 2518 kfree(vm->parent_resource); 2519 kfree(name); 2520 vm->parent_resource = NULL; 2521 } 2522 2523 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg) 2524 { 2525 return 1; 2526 } 2527 2528 static bool virtio_mem_has_memory_added(struct virtio_mem *vm) 2529 { 2530 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 2531 2532 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr, 2533 vm->addr + vm->region_size, NULL, 2534 virtio_mem_range_has_system_ram) == 1; 2535 } 2536 2537 static int virtio_mem_probe(struct virtio_device *vdev) 2538 { 2539 struct virtio_mem *vm; 2540 int rc; 2541 2542 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24); 2543 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10); 2544 2545 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL); 2546 if (!vm) 2547 return -ENOMEM; 2548 2549 init_waitqueue_head(&vm->host_resp); 2550 vm->vdev = vdev; 2551 INIT_WORK(&vm->wq, virtio_mem_run_wq); 2552 mutex_init(&vm->hotplug_mutex); 2553 INIT_LIST_HEAD(&vm->next); 2554 spin_lock_init(&vm->removal_lock); 2555 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2556 vm->retry_timer.function = virtio_mem_timer_expired; 2557 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; 2558 2559 /* register the virtqueue */ 2560 rc = virtio_mem_init_vq(vm); 2561 if (rc) 2562 goto out_free_vm; 2563 2564 /* initialize the device by querying the config */ 2565 rc = virtio_mem_init(vm); 2566 if (rc) 2567 goto out_del_vq; 2568 2569 /* create the parent resource for all memory */ 2570 rc = virtio_mem_create_resource(vm); 2571 if (rc) 2572 goto out_del_vq; 2573 2574 /* 2575 * If we still have memory plugged, we have to unplug all memory first. 2576 * Registering our parent resource makes sure that this memory isn't 2577 * actually in use (e.g., trying to reload the driver). 2578 */ 2579 if (vm->plugged_size) { 2580 vm->unplug_all_required = 1; 2581 dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); 2582 } 2583 2584 /* register callbacks */ 2585 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; 2586 rc = register_memory_notifier(&vm->memory_notifier); 2587 if (rc) 2588 goto out_del_resource; 2589 rc = register_virtio_mem_device(vm); 2590 if (rc) 2591 goto out_unreg_mem; 2592 2593 virtio_device_ready(vdev); 2594 2595 /* trigger a config update to start processing the requested_size */ 2596 atomic_set(&vm->config_changed, 1); 2597 queue_work(system_freezable_wq, &vm->wq); 2598 2599 return 0; 2600 out_unreg_mem: 2601 unregister_memory_notifier(&vm->memory_notifier); 2602 out_del_resource: 2603 virtio_mem_delete_resource(vm); 2604 out_del_vq: 2605 vdev->config->del_vqs(vdev); 2606 out_free_vm: 2607 kfree(vm); 2608 vdev->priv = NULL; 2609 2610 return rc; 2611 } 2612 2613 static void virtio_mem_remove(struct virtio_device *vdev) 2614 { 2615 struct virtio_mem *vm = vdev->priv; 2616 unsigned long mb_id; 2617 int rc; 2618 2619 /* 2620 * Make sure the workqueue won't be triggered anymore and no memory 2621 * blocks can be onlined/offlined until we're finished here. 2622 */ 2623 mutex_lock(&vm->hotplug_mutex); 2624 spin_lock_irq(&vm->removal_lock); 2625 vm->removing = true; 2626 spin_unlock_irq(&vm->removal_lock); 2627 mutex_unlock(&vm->hotplug_mutex); 2628 2629 /* wait until the workqueue stopped */ 2630 cancel_work_sync(&vm->wq); 2631 hrtimer_cancel(&vm->retry_timer); 2632 2633 if (vm->in_sbm) { 2634 /* 2635 * After we unregistered our callbacks, user space can online 2636 * partially plugged offline blocks. Make sure to remove them. 2637 */ 2638 virtio_mem_sbm_for_each_mb(vm, mb_id, 2639 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) { 2640 rc = virtio_mem_sbm_remove_mb(vm, mb_id); 2641 BUG_ON(rc); 2642 virtio_mem_sbm_set_mb_state(vm, mb_id, 2643 VIRTIO_MEM_SBM_MB_UNUSED); 2644 } 2645 /* 2646 * After we unregistered our callbacks, user space can no longer 2647 * offline partially plugged online memory blocks. No need to 2648 * worry about them. 2649 */ 2650 } 2651 2652 /* unregister callbacks */ 2653 unregister_virtio_mem_device(vm); 2654 unregister_memory_notifier(&vm->memory_notifier); 2655 2656 /* 2657 * There is no way we could reliably remove all memory we have added to 2658 * the system. And there is no way to stop the driver/device from going 2659 * away. Warn at least. 2660 */ 2661 if (virtio_mem_has_memory_added(vm)) { 2662 dev_warn(&vdev->dev, "device still has system memory added\n"); 2663 } else { 2664 virtio_mem_delete_resource(vm); 2665 kfree_const(vm->resource_name); 2666 } 2667 2668 /* remove all tracking data - no locking needed */ 2669 if (vm->in_sbm) { 2670 vfree(vm->sbm.mb_states); 2671 vfree(vm->sbm.sb_states); 2672 } else { 2673 vfree(vm->bbm.bb_states); 2674 } 2675 2676 /* reset the device and cleanup the queues */ 2677 vdev->config->reset(vdev); 2678 vdev->config->del_vqs(vdev); 2679 2680 kfree(vm); 2681 vdev->priv = NULL; 2682 } 2683 2684 static void virtio_mem_config_changed(struct virtio_device *vdev) 2685 { 2686 struct virtio_mem *vm = vdev->priv; 2687 2688 atomic_set(&vm->config_changed, 1); 2689 virtio_mem_retry(vm); 2690 } 2691 2692 #ifdef CONFIG_PM_SLEEP 2693 static int virtio_mem_freeze(struct virtio_device *vdev) 2694 { 2695 /* 2696 * When restarting the VM, all memory is usually unplugged. Don't 2697 * allow to suspend/hibernate. 2698 */ 2699 dev_err(&vdev->dev, "save/restore not supported.\n"); 2700 return -EPERM; 2701 } 2702 2703 static int virtio_mem_restore(struct virtio_device *vdev) 2704 { 2705 return -EPERM; 2706 } 2707 #endif 2708 2709 static unsigned int virtio_mem_features[] = { 2710 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA) 2711 VIRTIO_MEM_F_ACPI_PXM, 2712 #endif 2713 }; 2714 2715 static const struct virtio_device_id virtio_mem_id_table[] = { 2716 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID }, 2717 { 0 }, 2718 }; 2719 2720 static struct virtio_driver virtio_mem_driver = { 2721 .feature_table = virtio_mem_features, 2722 .feature_table_size = ARRAY_SIZE(virtio_mem_features), 2723 .driver.name = KBUILD_MODNAME, 2724 .driver.owner = THIS_MODULE, 2725 .id_table = virtio_mem_id_table, 2726 .probe = virtio_mem_probe, 2727 .remove = virtio_mem_remove, 2728 .config_changed = virtio_mem_config_changed, 2729 #ifdef CONFIG_PM_SLEEP 2730 .freeze = virtio_mem_freeze, 2731 .restore = virtio_mem_restore, 2732 #endif 2733 }; 2734 2735 module_virtio_driver(virtio_mem_driver); 2736 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table); 2737 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>"); 2738 MODULE_DESCRIPTION("Virtio-mem driver"); 2739 MODULE_LICENSE("GPL"); 2740