1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VMware Balloon driver. 4 * 5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved. 6 * 7 * This is VMware physical memory management driver for Linux. The driver 8 * acts like a "balloon" that can be inflated to reclaim physical pages by 9 * reserving them in the guest and invalidating them in the monitor, 10 * freeing up the underlying machine pages so they can be allocated to 11 * other guests. The balloon can also be deflated to allow the guest to 12 * use more physical memory. Higher level policies can control the sizes 13 * of balloons in VMs in order to manage physical memory resources. 14 */ 15 16 //#define DEBUG 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/vmalloc.h> 23 #include <linux/sched.h> 24 #include <linux/module.h> 25 #include <linux/workqueue.h> 26 #include <linux/debugfs.h> 27 #include <linux/seq_file.h> 28 #include <linux/rwsem.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/mount.h> 32 #include <linux/balloon_compaction.h> 33 #include <linux/vmw_vmci_defs.h> 34 #include <linux/vmw_vmci_api.h> 35 #include <asm/hypervisor.h> 36 37 MODULE_AUTHOR("VMware, Inc."); 38 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); 39 MODULE_ALIAS("dmi:*:svnVMware*:*"); 40 MODULE_ALIAS("vmware_vmmemctl"); 41 MODULE_LICENSE("GPL"); 42 43 static bool __read_mostly vmwballoon_shrinker_enable; 44 module_param(vmwballoon_shrinker_enable, bool, 0444); 45 MODULE_PARM_DESC(vmwballoon_shrinker_enable, 46 "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance."); 47 48 /* Delay in seconds after shrink before inflation. */ 49 #define VMBALLOON_SHRINK_DELAY (5) 50 51 /* Maximum number of refused pages we accumulate during inflation cycle */ 52 #define VMW_BALLOON_MAX_REFUSED 16 53 54 /* Magic number for the balloon mount-point */ 55 #define BALLOON_VMW_MAGIC 0x0ba11007 56 57 /* 58 * Hypervisor communication port definitions. 59 */ 60 #define VMW_BALLOON_HV_PORT 0x5670 61 #define VMW_BALLOON_HV_MAGIC 0x456c6d6f 62 #define VMW_BALLOON_GUEST_ID 1 /* Linux */ 63 64 enum vmwballoon_capabilities { 65 /* 66 * Bit 0 is reserved and not associated to any capability. 67 */ 68 VMW_BALLOON_BASIC_CMDS = (1 << 1), 69 VMW_BALLOON_BATCHED_CMDS = (1 << 2), 70 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3), 71 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4), 72 VMW_BALLOON_64_BIT_TARGET = (1 << 5) 73 }; 74 75 #define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \ 76 | VMW_BALLOON_BATCHED_CMDS \ 77 | VMW_BALLOON_BATCHED_2M_CMDS \ 78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD) 79 80 #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT) 81 82 /* 83 * 64-bit targets are only supported in 64-bit 84 */ 85 #ifdef CONFIG_64BIT 86 #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \ 87 | VMW_BALLOON_64_BIT_TARGET) 88 #else 89 #define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON 90 #endif 91 92 enum vmballoon_page_size_type { 93 VMW_BALLOON_4K_PAGE, 94 VMW_BALLOON_2M_PAGE, 95 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE 96 }; 97 98 #define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1) 99 100 static const char * const vmballoon_page_size_names[] = { 101 [VMW_BALLOON_4K_PAGE] = "4k", 102 [VMW_BALLOON_2M_PAGE] = "2M" 103 }; 104 105 enum vmballoon_op { 106 VMW_BALLOON_INFLATE, 107 VMW_BALLOON_DEFLATE 108 }; 109 110 enum vmballoon_op_stat_type { 111 VMW_BALLOON_OP_STAT, 112 VMW_BALLOON_OP_FAIL_STAT 113 }; 114 115 #define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1) 116 117 /** 118 * enum vmballoon_cmd_type - backdoor commands. 119 * 120 * Availability of the commands is as followed: 121 * 122 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and 123 * %VMW_BALLOON_CMD_GUEST_ID are always available. 124 * 125 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then 126 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available. 127 * 128 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then 129 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands 130 * are available. 131 * 132 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then 133 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 134 * are supported. 135 * 136 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then 137 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported. 138 * 139 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor. 140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size. 141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page. 142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about 143 * to be deflated from the balloon. 144 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that 145 * runs in the VM. 146 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of 147 * ballooned pages (up to 512). 148 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of 149 * pages that are about to be deflated from the 150 * balloon (up to 512). 151 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK 152 * for 2MB pages. 153 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to 154 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB 155 * pages. 156 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification 157 * that would be invoked when the balloon 158 * size changes. 159 * @VMW_BALLOON_CMD_LAST: Value of the last command. 160 */ 161 enum vmballoon_cmd_type { 162 VMW_BALLOON_CMD_START, 163 VMW_BALLOON_CMD_GET_TARGET, 164 VMW_BALLOON_CMD_LOCK, 165 VMW_BALLOON_CMD_UNLOCK, 166 VMW_BALLOON_CMD_GUEST_ID, 167 /* No command 5 */ 168 VMW_BALLOON_CMD_BATCHED_LOCK = 6, 169 VMW_BALLOON_CMD_BATCHED_UNLOCK, 170 VMW_BALLOON_CMD_BATCHED_2M_LOCK, 171 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK, 172 VMW_BALLOON_CMD_VMCI_DOORBELL_SET, 173 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET, 174 }; 175 176 #define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1) 177 178 enum vmballoon_error_codes { 179 VMW_BALLOON_SUCCESS, 180 VMW_BALLOON_ERROR_CMD_INVALID, 181 VMW_BALLOON_ERROR_PPN_INVALID, 182 VMW_BALLOON_ERROR_PPN_LOCKED, 183 VMW_BALLOON_ERROR_PPN_UNLOCKED, 184 VMW_BALLOON_ERROR_PPN_PINNED, 185 VMW_BALLOON_ERROR_PPN_NOTNEEDED, 186 VMW_BALLOON_ERROR_RESET, 187 VMW_BALLOON_ERROR_BUSY 188 }; 189 190 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000) 191 192 #define VMW_BALLOON_CMD_WITH_TARGET_MASK \ 193 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \ 194 (1UL << VMW_BALLOON_CMD_LOCK) | \ 195 (1UL << VMW_BALLOON_CMD_UNLOCK) | \ 196 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \ 197 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \ 198 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \ 199 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK)) 200 201 static const char * const vmballoon_cmd_names[] = { 202 [VMW_BALLOON_CMD_START] = "start", 203 [VMW_BALLOON_CMD_GET_TARGET] = "target", 204 [VMW_BALLOON_CMD_LOCK] = "lock", 205 [VMW_BALLOON_CMD_UNLOCK] = "unlock", 206 [VMW_BALLOON_CMD_GUEST_ID] = "guestType", 207 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock", 208 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock", 209 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock", 210 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock", 211 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet" 212 }; 213 214 enum vmballoon_stat_page { 215 VMW_BALLOON_PAGE_STAT_ALLOC, 216 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL, 217 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC, 218 VMW_BALLOON_PAGE_STAT_REFUSED_FREE, 219 VMW_BALLOON_PAGE_STAT_FREE, 220 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE 221 }; 222 223 #define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1) 224 225 enum vmballoon_stat_general { 226 VMW_BALLOON_STAT_TIMER, 227 VMW_BALLOON_STAT_DOORBELL, 228 VMW_BALLOON_STAT_RESET, 229 VMW_BALLOON_STAT_SHRINK, 230 VMW_BALLOON_STAT_SHRINK_FREE, 231 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE 232 }; 233 234 #define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1) 235 236 static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching); 237 static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled); 238 239 struct vmballoon_ctl { 240 struct list_head pages; 241 struct list_head refused_pages; 242 struct list_head prealloc_pages; 243 unsigned int n_refused_pages; 244 unsigned int n_pages; 245 enum vmballoon_page_size_type page_size; 246 enum vmballoon_op op; 247 }; 248 249 /** 250 * struct vmballoon_batch_entry - a batch entry for lock or unlock. 251 * 252 * @status: the status of the operation, which is written by the hypervisor. 253 * @reserved: reserved for future use. Must be set to zero. 254 * @pfn: the physical frame number of the page to be locked or unlocked. 255 */ 256 struct vmballoon_batch_entry { 257 u64 status : 5; 258 u64 reserved : PAGE_SHIFT - 5; 259 u64 pfn : 52; 260 } __packed; 261 262 struct vmballoon { 263 /** 264 * @max_page_size: maximum supported page size for ballooning. 265 * 266 * Protected by @conf_sem 267 */ 268 enum vmballoon_page_size_type max_page_size; 269 270 /** 271 * @size: balloon actual size in basic page size (frames). 272 * 273 * While we currently do not support size which is bigger than 32-bit, 274 * in preparation for future support, use 64-bits. 275 */ 276 atomic64_t size; 277 278 /** 279 * @target: balloon target size in basic page size (frames). 280 * 281 * We do not protect the target under the assumption that setting the 282 * value is always done through a single write. If this assumption ever 283 * breaks, we would have to use X_ONCE for accesses, and suffer the less 284 * optimized code. Although we may read stale target value if multiple 285 * accesses happen at once, the performance impact should be minor. 286 */ 287 unsigned long target; 288 289 /** 290 * @reset_required: reset flag 291 * 292 * Setting this flag may introduce races, but the code is expected to 293 * handle them gracefully. In the worst case, another operation will 294 * fail as reset did not take place. Clearing the flag is done while 295 * holding @conf_sem for write. 296 */ 297 bool reset_required; 298 299 /** 300 * @capabilities: hypervisor balloon capabilities. 301 * 302 * Protected by @conf_sem. 303 */ 304 unsigned long capabilities; 305 306 /** 307 * @batch_page: pointer to communication batch page. 308 * 309 * When batching is used, batch_page points to a page, which holds up to 310 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking. 311 */ 312 struct vmballoon_batch_entry *batch_page; 313 314 /** 315 * @batch_max_pages: maximum pages that can be locked/unlocked. 316 * 317 * Indicates the number of pages that the hypervisor can lock or unlock 318 * at once, according to whether batching is enabled. If batching is 319 * disabled, only a single page can be locked/unlock on each operation. 320 * 321 * Protected by @conf_sem. 322 */ 323 unsigned int batch_max_pages; 324 325 /** 326 * @page: page to be locked/unlocked by the hypervisor 327 * 328 * @page is only used when batching is disabled and a single page is 329 * reclaimed on each iteration. 330 * 331 * Protected by @comm_lock. 332 */ 333 struct page *page; 334 335 /** 336 * @shrink_timeout: timeout until the next inflation. 337 * 338 * After an shrink event, indicates the time in jiffies after which 339 * inflation is allowed again. Can be written concurrently with reads, 340 * so must use READ_ONCE/WRITE_ONCE when accessing. 341 */ 342 unsigned long shrink_timeout; 343 344 /* statistics */ 345 struct vmballoon_stats *stats; 346 347 #ifdef CONFIG_DEBUG_FS 348 /* debugfs file exporting statistics */ 349 struct dentry *dbg_entry; 350 #endif 351 352 /** 353 * @b_dev_info: balloon device information descriptor. 354 */ 355 struct balloon_dev_info b_dev_info; 356 357 struct delayed_work dwork; 358 359 /** 360 * @huge_pages - list of the inflated 2MB pages. 361 * 362 * Protected by @b_dev_info.pages_lock . 363 */ 364 struct list_head huge_pages; 365 366 /** 367 * @vmci_doorbell. 368 * 369 * Protected by @conf_sem. 370 */ 371 struct vmci_handle vmci_doorbell; 372 373 /** 374 * @conf_sem: semaphore to protect the configuration and the statistics. 375 */ 376 struct rw_semaphore conf_sem; 377 378 /** 379 * @comm_lock: lock to protect the communication with the host. 380 * 381 * Lock ordering: @conf_sem -> @comm_lock . 382 */ 383 spinlock_t comm_lock; 384 385 /** 386 * @shrinker: shrinker interface that is used to avoid over-inflation. 387 */ 388 struct shrinker shrinker; 389 390 /** 391 * @shrinker_registered: whether the shrinker was registered. 392 * 393 * The shrinker interface does not handle gracefully the removal of 394 * shrinker that was not registered before. This indication allows to 395 * simplify the unregistration process. 396 */ 397 bool shrinker_registered; 398 }; 399 400 static struct vmballoon balloon; 401 402 struct vmballoon_stats { 403 /* timer / doorbell operations */ 404 atomic64_t general_stat[VMW_BALLOON_STAT_NUM]; 405 406 /* allocation statistics for huge and small pages */ 407 atomic64_t 408 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES]; 409 410 /* Monitor operations: total operations, and failures */ 411 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES]; 412 }; 413 414 static inline bool is_vmballoon_stats_on(void) 415 { 416 return IS_ENABLED(CONFIG_DEBUG_FS) && 417 static_branch_unlikely(&balloon_stat_enabled); 418 } 419 420 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op, 421 enum vmballoon_op_stat_type type) 422 { 423 if (is_vmballoon_stats_on()) 424 atomic64_inc(&b->stats->ops[op][type]); 425 } 426 427 static inline void vmballoon_stats_gen_inc(struct vmballoon *b, 428 enum vmballoon_stat_general stat) 429 { 430 if (is_vmballoon_stats_on()) 431 atomic64_inc(&b->stats->general_stat[stat]); 432 } 433 434 static inline void vmballoon_stats_gen_add(struct vmballoon *b, 435 enum vmballoon_stat_general stat, 436 unsigned int val) 437 { 438 if (is_vmballoon_stats_on()) 439 atomic64_add(val, &b->stats->general_stat[stat]); 440 } 441 442 static inline void vmballoon_stats_page_inc(struct vmballoon *b, 443 enum vmballoon_stat_page stat, 444 enum vmballoon_page_size_type size) 445 { 446 if (is_vmballoon_stats_on()) 447 atomic64_inc(&b->stats->page_stat[stat][size]); 448 } 449 450 static inline void vmballoon_stats_page_add(struct vmballoon *b, 451 enum vmballoon_stat_page stat, 452 enum vmballoon_page_size_type size, 453 unsigned int val) 454 { 455 if (is_vmballoon_stats_on()) 456 atomic64_add(val, &b->stats->page_stat[stat][size]); 457 } 458 459 static inline unsigned long 460 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, 461 unsigned long arg2, unsigned long *result) 462 { 463 unsigned long status, dummy1, dummy2, dummy3, local_result; 464 465 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT); 466 467 asm volatile ("inl %%dx" : 468 "=a"(status), 469 "=c"(dummy1), 470 "=d"(dummy2), 471 "=b"(local_result), 472 "=S"(dummy3) : 473 "0"(VMW_BALLOON_HV_MAGIC), 474 "1"(cmd), 475 "2"(VMW_BALLOON_HV_PORT), 476 "3"(arg1), 477 "4"(arg2) : 478 "memory"); 479 480 /* update the result if needed */ 481 if (result) 482 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 : 483 local_result; 484 485 /* update target when applicable */ 486 if (status == VMW_BALLOON_SUCCESS && 487 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK)) 488 WRITE_ONCE(b->target, local_result); 489 490 if (status != VMW_BALLOON_SUCCESS && 491 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) { 492 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT); 493 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n", 494 __func__, vmballoon_cmd_names[cmd], arg1, arg2, 495 status); 496 } 497 498 /* mark reset required accordingly */ 499 if (status == VMW_BALLOON_ERROR_RESET) 500 b->reset_required = true; 501 502 return status; 503 } 504 505 static __always_inline unsigned long 506 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, 507 unsigned long arg2) 508 { 509 unsigned long dummy; 510 511 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy); 512 } 513 514 /* 515 * Send "start" command to the host, communicating supported version 516 * of the protocol. 517 */ 518 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) 519 { 520 unsigned long status, capabilities; 521 522 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0, 523 &capabilities); 524 525 switch (status) { 526 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES: 527 b->capabilities = capabilities; 528 break; 529 case VMW_BALLOON_SUCCESS: 530 b->capabilities = VMW_BALLOON_BASIC_CMDS; 531 break; 532 default: 533 return -EIO; 534 } 535 536 /* 537 * 2MB pages are only supported with batching. If batching is for some 538 * reason disabled, do not use 2MB pages, since otherwise the legacy 539 * mechanism is used with 2MB pages, causing a failure. 540 */ 541 b->max_page_size = VMW_BALLOON_4K_PAGE; 542 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && 543 (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) 544 b->max_page_size = VMW_BALLOON_2M_PAGE; 545 546 547 return 0; 548 } 549 550 /** 551 * vmballoon_send_guest_id - communicate guest type to the host. 552 * 553 * @b: pointer to the balloon. 554 * 555 * Communicate guest type to the host so that it can adjust ballooning 556 * algorithm to the one most appropriate for the guest. This command 557 * is normally issued after sending "start" command and is part of 558 * standard reset sequence. 559 * 560 * Return: zero on success or appropriate error code. 561 */ 562 static int vmballoon_send_guest_id(struct vmballoon *b) 563 { 564 unsigned long status; 565 566 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID, 567 VMW_BALLOON_GUEST_ID, 0); 568 569 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO; 570 } 571 572 /** 573 * vmballoon_page_order() - return the order of the page 574 * @page_size: the size of the page. 575 * 576 * Return: the allocation order. 577 */ 578 static inline 579 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size) 580 { 581 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0; 582 } 583 584 /** 585 * vmballoon_page_in_frames() - returns the number of frames in a page. 586 * @page_size: the size of the page. 587 * 588 * Return: the number of 4k frames. 589 */ 590 static inline unsigned int 591 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size) 592 { 593 return 1 << vmballoon_page_order(page_size); 594 } 595 596 /** 597 * vmballoon_mark_page_offline() - mark a page as offline 598 * @page: pointer for the page. 599 * @page_size: the size of the page. 600 */ 601 static void 602 vmballoon_mark_page_offline(struct page *page, 603 enum vmballoon_page_size_type page_size) 604 { 605 int i; 606 607 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) 608 __SetPageOffline(page + i); 609 } 610 611 /** 612 * vmballoon_mark_page_online() - mark a page as online 613 * @page: pointer for the page. 614 * @page_size: the size of the page. 615 */ 616 static void 617 vmballoon_mark_page_online(struct page *page, 618 enum vmballoon_page_size_type page_size) 619 { 620 int i; 621 622 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) 623 __ClearPageOffline(page + i); 624 } 625 626 /** 627 * vmballoon_send_get_target() - Retrieve desired balloon size from the host. 628 * 629 * @b: pointer to the balloon. 630 * 631 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required 632 * by the host-guest protocol and EIO if an error occurred in communicating with 633 * the host. 634 */ 635 static int vmballoon_send_get_target(struct vmballoon *b) 636 { 637 unsigned long status; 638 unsigned long limit; 639 640 limit = totalram_pages(); 641 642 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */ 643 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) && 644 limit != (u32)limit) 645 return -EINVAL; 646 647 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0); 648 649 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO; 650 } 651 652 /** 653 * vmballoon_alloc_page_list - allocates a list of pages. 654 * 655 * @b: pointer to the balloon. 656 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation. 657 * @req_n_pages: the number of requested pages. 658 * 659 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in 660 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages. 661 * 662 * Return: zero on success or error code otherwise. 663 */ 664 static int vmballoon_alloc_page_list(struct vmballoon *b, 665 struct vmballoon_ctl *ctl, 666 unsigned int req_n_pages) 667 { 668 struct page *page; 669 unsigned int i; 670 671 for (i = 0; i < req_n_pages; i++) { 672 /* 673 * First check if we happen to have pages that were allocated 674 * before. This happens when 2MB page rejected during inflation 675 * by the hypervisor, and then split into 4KB pages. 676 */ 677 if (!list_empty(&ctl->prealloc_pages)) { 678 page = list_first_entry(&ctl->prealloc_pages, 679 struct page, lru); 680 list_del(&page->lru); 681 } else { 682 if (ctl->page_size == VMW_BALLOON_2M_PAGE) 683 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN| 684 __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER); 685 else 686 page = balloon_page_alloc(); 687 688 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC, 689 ctl->page_size); 690 } 691 692 if (page) { 693 vmballoon_mark_page_offline(page, ctl->page_size); 694 /* Success. Add the page to the list and continue. */ 695 list_add(&page->lru, &ctl->pages); 696 continue; 697 } 698 699 /* Allocation failed. Update statistics and stop. */ 700 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL, 701 ctl->page_size); 702 break; 703 } 704 705 ctl->n_pages = i; 706 707 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM; 708 } 709 710 /** 711 * vmballoon_handle_one_result - Handle lock/unlock result for a single page. 712 * 713 * @b: pointer for %struct vmballoon. 714 * @page: pointer for the page whose result should be handled. 715 * @page_size: size of the page. 716 * @status: status of the operation as provided by the hypervisor. 717 */ 718 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page, 719 enum vmballoon_page_size_type page_size, 720 unsigned long status) 721 { 722 /* On success do nothing. The page is already on the balloon list. */ 723 if (likely(status == VMW_BALLOON_SUCCESS)) 724 return 0; 725 726 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__, 727 page_to_pfn(page), status, 728 vmballoon_page_size_names[page_size]); 729 730 /* Error occurred */ 731 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC, 732 page_size); 733 734 return -EIO; 735 } 736 737 /** 738 * vmballoon_status_page - returns the status of (un)lock operation 739 * 740 * @b: pointer to the balloon. 741 * @idx: index for the page for which the operation is performed. 742 * @p: pointer to where the page struct is returned. 743 * 744 * Following a lock or unlock operation, returns the status of the operation for 745 * an individual page. Provides the page that the operation was performed on on 746 * the @page argument. 747 * 748 * Returns: The status of a lock or unlock operation for an individual page. 749 */ 750 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx, 751 struct page **p) 752 { 753 if (static_branch_likely(&vmw_balloon_batching)) { 754 /* batching mode */ 755 *p = pfn_to_page(b->batch_page[idx].pfn); 756 return b->batch_page[idx].status; 757 } 758 759 /* non-batching mode */ 760 *p = b->page; 761 762 /* 763 * If a failure occurs, the indication will be provided in the status 764 * of the entire operation, which is considered before the individual 765 * page status. So for non-batching mode, the indication is always of 766 * success. 767 */ 768 return VMW_BALLOON_SUCCESS; 769 } 770 771 /** 772 * vmballoon_lock_op - notifies the host about inflated/deflated pages. 773 * @b: pointer to the balloon. 774 * @num_pages: number of inflated/deflated pages. 775 * @page_size: size of the page. 776 * @op: the type of operation (lock or unlock). 777 * 778 * Notify the host about page(s) that were ballooned (or removed from the 779 * balloon) so that host can use it without fear that guest will need it (or 780 * stop using them since the VM does). Host may reject some pages, we need to 781 * check the return value and maybe submit a different page. The pages that are 782 * inflated/deflated are pointed by @b->page. 783 * 784 * Return: result as provided by the hypervisor. 785 */ 786 static unsigned long vmballoon_lock_op(struct vmballoon *b, 787 unsigned int num_pages, 788 enum vmballoon_page_size_type page_size, 789 enum vmballoon_op op) 790 { 791 unsigned long cmd, pfn; 792 793 lockdep_assert_held(&b->comm_lock); 794 795 if (static_branch_likely(&vmw_balloon_batching)) { 796 if (op == VMW_BALLOON_INFLATE) 797 cmd = page_size == VMW_BALLOON_2M_PAGE ? 798 VMW_BALLOON_CMD_BATCHED_2M_LOCK : 799 VMW_BALLOON_CMD_BATCHED_LOCK; 800 else 801 cmd = page_size == VMW_BALLOON_2M_PAGE ? 802 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK : 803 VMW_BALLOON_CMD_BATCHED_UNLOCK; 804 805 pfn = PHYS_PFN(virt_to_phys(b->batch_page)); 806 } else { 807 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK : 808 VMW_BALLOON_CMD_UNLOCK; 809 pfn = page_to_pfn(b->page); 810 811 /* In non-batching mode, PFNs must fit in 32-bit */ 812 if (unlikely(pfn != (u32)pfn)) 813 return VMW_BALLOON_ERROR_PPN_INVALID; 814 } 815 816 return vmballoon_cmd(b, cmd, pfn, num_pages); 817 } 818 819 /** 820 * vmballoon_add_page - adds a page towards lock/unlock operation. 821 * 822 * @b: pointer to the balloon. 823 * @idx: index of the page to be ballooned in this batch. 824 * @p: pointer to the page that is about to be ballooned. 825 * 826 * Adds the page to be ballooned. Must be called while holding @comm_lock. 827 */ 828 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx, 829 struct page *p) 830 { 831 lockdep_assert_held(&b->comm_lock); 832 833 if (static_branch_likely(&vmw_balloon_batching)) 834 b->batch_page[idx] = (struct vmballoon_batch_entry) 835 { .pfn = page_to_pfn(p) }; 836 else 837 b->page = p; 838 } 839 840 /** 841 * vmballoon_lock - lock or unlock a batch of pages. 842 * 843 * @b: pointer to the balloon. 844 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation. 845 * 846 * Notifies the host of about ballooned pages (after inflation or deflation, 847 * according to @ctl). If the host rejects the page put it on the 848 * @ctl refuse list. These refused page are then released when moving to the 849 * next size of pages. 850 * 851 * Note that we neither free any @page here nor put them back on the ballooned 852 * pages list. Instead we queue it for later processing. We do that for several 853 * reasons. First, we do not want to free the page under the lock. Second, it 854 * allows us to unify the handling of lock and unlock. In the inflate case, the 855 * caller will check if there are too many refused pages and release them. 856 * Although it is not identical to the past behavior, it should not affect 857 * performance. 858 */ 859 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl) 860 { 861 unsigned long batch_status; 862 struct page *page; 863 unsigned int i, num_pages; 864 865 num_pages = ctl->n_pages; 866 if (num_pages == 0) 867 return 0; 868 869 /* communication with the host is done under the communication lock */ 870 spin_lock(&b->comm_lock); 871 872 i = 0; 873 list_for_each_entry(page, &ctl->pages, lru) 874 vmballoon_add_page(b, i++, page); 875 876 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size, 877 ctl->op); 878 879 /* 880 * Iterate over the pages in the provided list. Since we are changing 881 * @ctl->n_pages we are saving the original value in @num_pages and 882 * use this value to bound the loop. 883 */ 884 for (i = 0; i < num_pages; i++) { 885 unsigned long status; 886 887 status = vmballoon_status_page(b, i, &page); 888 889 /* 890 * Failure of the whole batch overrides a single operation 891 * results. 892 */ 893 if (batch_status != VMW_BALLOON_SUCCESS) 894 status = batch_status; 895 896 /* Continue if no error happened */ 897 if (!vmballoon_handle_one_result(b, page, ctl->page_size, 898 status)) 899 continue; 900 901 /* 902 * Error happened. Move the pages to the refused list and update 903 * the pages number. 904 */ 905 list_move(&page->lru, &ctl->refused_pages); 906 ctl->n_pages--; 907 ctl->n_refused_pages++; 908 } 909 910 spin_unlock(&b->comm_lock); 911 912 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO; 913 } 914 915 /** 916 * vmballoon_release_page_list() - Releases a page list 917 * 918 * @page_list: list of pages to release. 919 * @n_pages: pointer to the number of pages. 920 * @page_size: whether the pages in the list are 2MB (or else 4KB). 921 * 922 * Releases the list of pages and zeros the number of pages. 923 */ 924 static void vmballoon_release_page_list(struct list_head *page_list, 925 int *n_pages, 926 enum vmballoon_page_size_type page_size) 927 { 928 struct page *page, *tmp; 929 930 list_for_each_entry_safe(page, tmp, page_list, lru) { 931 list_del(&page->lru); 932 vmballoon_mark_page_online(page, page_size); 933 __free_pages(page, vmballoon_page_order(page_size)); 934 } 935 936 if (n_pages) 937 *n_pages = 0; 938 } 939 940 941 /* 942 * Release pages that were allocated while attempting to inflate the 943 * balloon but were refused by the host for one reason or another. 944 */ 945 static void vmballoon_release_refused_pages(struct vmballoon *b, 946 struct vmballoon_ctl *ctl) 947 { 948 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE, 949 ctl->page_size); 950 951 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages, 952 ctl->page_size); 953 } 954 955 /** 956 * vmballoon_change - retrieve the required balloon change 957 * 958 * @b: pointer for the balloon. 959 * 960 * Return: the required change for the balloon size. A positive number 961 * indicates inflation, a negative number indicates a deflation. 962 */ 963 static int64_t vmballoon_change(struct vmballoon *b) 964 { 965 int64_t size, target; 966 967 size = atomic64_read(&b->size); 968 target = READ_ONCE(b->target); 969 970 /* 971 * We must cast first because of int sizes 972 * Otherwise we might get huge positives instead of negatives 973 */ 974 975 if (b->reset_required) 976 return 0; 977 978 /* consider a 2MB slack on deflate, unless the balloon is emptied */ 979 if (target < size && target != 0 && 980 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)) 981 return 0; 982 983 /* If an out-of-memory recently occurred, inflation is disallowed. */ 984 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout))) 985 return 0; 986 987 return target - size; 988 } 989 990 /** 991 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation. 992 * 993 * @b: pointer to balloon. 994 * @pages: list of pages to enqueue. 995 * @n_pages: pointer to number of pages in list. The value is zeroed. 996 * @page_size: whether the pages are 2MB or 4KB pages. 997 * 998 * Enqueues the provides list of pages in the ballooned page list, clears the 999 * list and zeroes the number of pages that was provided. 1000 */ 1001 static void vmballoon_enqueue_page_list(struct vmballoon *b, 1002 struct list_head *pages, 1003 unsigned int *n_pages, 1004 enum vmballoon_page_size_type page_size) 1005 { 1006 unsigned long flags; 1007 1008 if (page_size == VMW_BALLOON_4K_PAGE) { 1009 balloon_page_list_enqueue(&b->b_dev_info, pages); 1010 } else { 1011 /* 1012 * Keep the huge pages in a local list which is not available 1013 * for the balloon compaction mechanism. 1014 */ 1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1016 list_splice_init(pages, &b->huge_pages); 1017 __count_vm_events(BALLOON_INFLATE, *n_pages * 1018 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); 1019 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); 1020 } 1021 1022 *n_pages = 0; 1023 } 1024 1025 /** 1026 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation. 1027 * 1028 * @b: pointer to balloon. 1029 * @pages: list of pages to enqueue. 1030 * @n_pages: pointer to number of pages in list. The value is zeroed. 1031 * @page_size: whether the pages are 2MB or 4KB pages. 1032 * @n_req_pages: the number of requested pages. 1033 * 1034 * Dequeues the number of requested pages from the balloon for deflation. The 1035 * number of dequeued pages may be lower, if not enough pages in the requested 1036 * size are available. 1037 */ 1038 static void vmballoon_dequeue_page_list(struct vmballoon *b, 1039 struct list_head *pages, 1040 unsigned int *n_pages, 1041 enum vmballoon_page_size_type page_size, 1042 unsigned int n_req_pages) 1043 { 1044 struct page *page, *tmp; 1045 unsigned int i = 0; 1046 unsigned long flags; 1047 1048 /* In the case of 4k pages, use the compaction infrastructure */ 1049 if (page_size == VMW_BALLOON_4K_PAGE) { 1050 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages, 1051 n_req_pages); 1052 return; 1053 } 1054 1055 /* 2MB pages */ 1056 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1057 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { 1058 list_move(&page->lru, pages); 1059 if (++i == n_req_pages) 1060 break; 1061 } 1062 1063 __count_vm_events(BALLOON_DEFLATE, 1064 i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); 1065 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); 1066 *n_pages = i; 1067 } 1068 1069 /** 1070 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k. 1071 * 1072 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be 1073 * due to one or few 4KB pages. These 2MB pages may keep being allocated and 1074 * then being refused. To prevent this case, this function splits the refused 1075 * pages into 4KB pages and adds them into @prealloc_pages list. 1076 * 1077 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation. 1078 */ 1079 static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl) 1080 { 1081 struct page *page, *tmp; 1082 unsigned int i, order; 1083 1084 order = vmballoon_page_order(ctl->page_size); 1085 1086 list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) { 1087 list_del(&page->lru); 1088 split_page(page, order); 1089 for (i = 0; i < (1 << order); i++) 1090 list_add(&page[i].lru, &ctl->prealloc_pages); 1091 } 1092 ctl->n_refused_pages = 0; 1093 } 1094 1095 /** 1096 * vmballoon_inflate() - Inflate the balloon towards its target size. 1097 * 1098 * @b: pointer to the balloon. 1099 */ 1100 static void vmballoon_inflate(struct vmballoon *b) 1101 { 1102 int64_t to_inflate_frames; 1103 struct vmballoon_ctl ctl = { 1104 .pages = LIST_HEAD_INIT(ctl.pages), 1105 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages), 1106 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages), 1107 .page_size = b->max_page_size, 1108 .op = VMW_BALLOON_INFLATE 1109 }; 1110 1111 while ((to_inflate_frames = vmballoon_change(b)) > 0) { 1112 unsigned int to_inflate_pages, page_in_frames; 1113 int alloc_error, lock_error = 0; 1114 1115 VM_BUG_ON(!list_empty(&ctl.pages)); 1116 VM_BUG_ON(ctl.n_pages != 0); 1117 1118 page_in_frames = vmballoon_page_in_frames(ctl.page_size); 1119 1120 to_inflate_pages = min_t(unsigned long, b->batch_max_pages, 1121 DIV_ROUND_UP_ULL(to_inflate_frames, 1122 page_in_frames)); 1123 1124 /* Start by allocating */ 1125 alloc_error = vmballoon_alloc_page_list(b, &ctl, 1126 to_inflate_pages); 1127 1128 /* Actually lock the pages by telling the hypervisor */ 1129 lock_error = vmballoon_lock(b, &ctl); 1130 1131 /* 1132 * If an error indicates that something serious went wrong, 1133 * stop the inflation. 1134 */ 1135 if (lock_error) 1136 break; 1137 1138 /* Update the balloon size */ 1139 atomic64_add(ctl.n_pages * page_in_frames, &b->size); 1140 1141 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages, 1142 ctl.page_size); 1143 1144 /* 1145 * If allocation failed or the number of refused pages exceeds 1146 * the maximum allowed, move to the next page size. 1147 */ 1148 if (alloc_error || 1149 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) { 1150 if (ctl.page_size == VMW_BALLOON_4K_PAGE) 1151 break; 1152 1153 /* 1154 * Split the refused pages to 4k. This will also empty 1155 * the refused pages list. 1156 */ 1157 vmballoon_split_refused_pages(&ctl); 1158 ctl.page_size--; 1159 } 1160 1161 cond_resched(); 1162 } 1163 1164 /* 1165 * Release pages that were allocated while attempting to inflate the 1166 * balloon but were refused by the host for one reason or another, 1167 * and update the statistics. 1168 */ 1169 if (ctl.n_refused_pages != 0) 1170 vmballoon_release_refused_pages(b, &ctl); 1171 1172 vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size); 1173 } 1174 1175 /** 1176 * vmballoon_deflate() - Decrease the size of the balloon. 1177 * 1178 * @b: pointer to the balloon 1179 * @n_frames: the number of frames to deflate. If zero, automatically 1180 * calculated according to the target size. 1181 * @coordinated: whether to coordinate with the host 1182 * 1183 * Decrease the size of the balloon allowing guest to use more memory. 1184 * 1185 * Return: The number of deflated frames (i.e., basic page size units) 1186 */ 1187 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames, 1188 bool coordinated) 1189 { 1190 unsigned long deflated_frames = 0; 1191 unsigned long tried_frames = 0; 1192 struct vmballoon_ctl ctl = { 1193 .pages = LIST_HEAD_INIT(ctl.pages), 1194 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages), 1195 .page_size = VMW_BALLOON_4K_PAGE, 1196 .op = VMW_BALLOON_DEFLATE 1197 }; 1198 1199 /* free pages to reach target */ 1200 while (true) { 1201 unsigned int to_deflate_pages, n_unlocked_frames; 1202 unsigned int page_in_frames; 1203 int64_t to_deflate_frames; 1204 bool deflated_all; 1205 1206 page_in_frames = vmballoon_page_in_frames(ctl.page_size); 1207 1208 VM_BUG_ON(!list_empty(&ctl.pages)); 1209 VM_BUG_ON(ctl.n_pages); 1210 VM_BUG_ON(!list_empty(&ctl.refused_pages)); 1211 VM_BUG_ON(ctl.n_refused_pages); 1212 1213 /* 1214 * If we were requested a specific number of frames, we try to 1215 * deflate this number of frames. Otherwise, deflation is 1216 * performed according to the target and balloon size. 1217 */ 1218 to_deflate_frames = n_frames ? n_frames - tried_frames : 1219 -vmballoon_change(b); 1220 1221 /* break if no work to do */ 1222 if (to_deflate_frames <= 0) 1223 break; 1224 1225 /* 1226 * Calculate the number of frames based on current page size, 1227 * but limit the deflated frames to a single chunk 1228 */ 1229 to_deflate_pages = min_t(unsigned long, b->batch_max_pages, 1230 DIV_ROUND_UP_ULL(to_deflate_frames, 1231 page_in_frames)); 1232 1233 /* First take the pages from the balloon pages. */ 1234 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages, 1235 ctl.page_size, to_deflate_pages); 1236 1237 /* 1238 * Before pages are moving to the refused list, count their 1239 * frames as frames that we tried to deflate. 1240 */ 1241 tried_frames += ctl.n_pages * page_in_frames; 1242 1243 /* 1244 * Unlock the pages by communicating with the hypervisor if the 1245 * communication is coordinated (i.e., not pop). We ignore the 1246 * return code. Instead we check if all the pages we manage to 1247 * unlock all the pages. If we failed, we will move to the next 1248 * page size, and would eventually try again later. 1249 */ 1250 if (coordinated) 1251 vmballoon_lock(b, &ctl); 1252 1253 /* 1254 * Check if we deflated enough. We will move to the next page 1255 * size if we did not manage to do so. This calculation takes 1256 * place now, as once the pages are released, the number of 1257 * pages is zeroed. 1258 */ 1259 deflated_all = (ctl.n_pages == to_deflate_pages); 1260 1261 /* Update local and global counters */ 1262 n_unlocked_frames = ctl.n_pages * page_in_frames; 1263 atomic64_sub(n_unlocked_frames, &b->size); 1264 deflated_frames += n_unlocked_frames; 1265 1266 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE, 1267 ctl.page_size, ctl.n_pages); 1268 1269 /* free the ballooned pages */ 1270 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages, 1271 ctl.page_size); 1272 1273 /* Return the refused pages to the ballooned list. */ 1274 vmballoon_enqueue_page_list(b, &ctl.refused_pages, 1275 &ctl.n_refused_pages, 1276 ctl.page_size); 1277 1278 /* If we failed to unlock all the pages, move to next size. */ 1279 if (!deflated_all) { 1280 if (ctl.page_size == b->max_page_size) 1281 break; 1282 ctl.page_size++; 1283 } 1284 1285 cond_resched(); 1286 } 1287 1288 return deflated_frames; 1289 } 1290 1291 /** 1292 * vmballoon_deinit_batching - disables batching mode. 1293 * 1294 * @b: pointer to &struct vmballoon. 1295 * 1296 * Disables batching, by deallocating the page for communication with the 1297 * hypervisor and disabling the static key to indicate that batching is off. 1298 */ 1299 static void vmballoon_deinit_batching(struct vmballoon *b) 1300 { 1301 free_page((unsigned long)b->batch_page); 1302 b->batch_page = NULL; 1303 static_branch_disable(&vmw_balloon_batching); 1304 b->batch_max_pages = 1; 1305 } 1306 1307 /** 1308 * vmballoon_init_batching - enable batching mode. 1309 * 1310 * @b: pointer to &struct vmballoon. 1311 * 1312 * Enables batching, by allocating a page for communication with the hypervisor 1313 * and enabling the static_key to use batching. 1314 * 1315 * Return: zero on success or an appropriate error-code. 1316 */ 1317 static int vmballoon_init_batching(struct vmballoon *b) 1318 { 1319 struct page *page; 1320 1321 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1322 if (!page) 1323 return -ENOMEM; 1324 1325 b->batch_page = page_address(page); 1326 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry); 1327 1328 static_branch_enable(&vmw_balloon_batching); 1329 1330 return 0; 1331 } 1332 1333 /* 1334 * Receive notification and resize balloon 1335 */ 1336 static void vmballoon_doorbell(void *client_data) 1337 { 1338 struct vmballoon *b = client_data; 1339 1340 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL); 1341 1342 mod_delayed_work(system_freezable_wq, &b->dwork, 0); 1343 } 1344 1345 /* 1346 * Clean up vmci doorbell 1347 */ 1348 static void vmballoon_vmci_cleanup(struct vmballoon *b) 1349 { 1350 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, 1351 VMCI_INVALID_ID, VMCI_INVALID_ID); 1352 1353 if (!vmci_handle_is_invalid(b->vmci_doorbell)) { 1354 vmci_doorbell_destroy(b->vmci_doorbell); 1355 b->vmci_doorbell = VMCI_INVALID_HANDLE; 1356 } 1357 } 1358 1359 /** 1360 * vmballoon_vmci_init - Initialize vmci doorbell. 1361 * 1362 * @b: pointer to the balloon. 1363 * 1364 * Return: zero on success or when wakeup command not supported. Error-code 1365 * otherwise. 1366 * 1367 * Initialize vmci doorbell, to get notified as soon as balloon changes. 1368 */ 1369 static int vmballoon_vmci_init(struct vmballoon *b) 1370 { 1371 unsigned long error; 1372 1373 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) 1374 return 0; 1375 1376 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, 1377 VMCI_PRIVILEGE_FLAG_RESTRICTED, 1378 vmballoon_doorbell, b); 1379 1380 if (error != VMCI_SUCCESS) 1381 goto fail; 1382 1383 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, 1384 b->vmci_doorbell.context, 1385 b->vmci_doorbell.resource, NULL); 1386 1387 if (error != VMW_BALLOON_SUCCESS) 1388 goto fail; 1389 1390 return 0; 1391 fail: 1392 vmballoon_vmci_cleanup(b); 1393 return -EIO; 1394 } 1395 1396 /** 1397 * vmballoon_pop - Quickly release all pages allocate for the balloon. 1398 * 1399 * @b: pointer to the balloon. 1400 * 1401 * This function is called when host decides to "reset" balloon for one reason 1402 * or another. Unlike normal "deflate" we do not (shall not) notify host of the 1403 * pages being released. 1404 */ 1405 static void vmballoon_pop(struct vmballoon *b) 1406 { 1407 unsigned long size; 1408 1409 while ((size = atomic64_read(&b->size))) 1410 vmballoon_deflate(b, size, false); 1411 } 1412 1413 /* 1414 * Perform standard reset sequence by popping the balloon (in case it 1415 * is not empty) and then restarting protocol. This operation normally 1416 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. 1417 */ 1418 static void vmballoon_reset(struct vmballoon *b) 1419 { 1420 int error; 1421 1422 down_write(&b->conf_sem); 1423 1424 vmballoon_vmci_cleanup(b); 1425 1426 /* free all pages, skipping monitor unlock */ 1427 vmballoon_pop(b); 1428 1429 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) 1430 goto unlock; 1431 1432 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { 1433 if (vmballoon_init_batching(b)) { 1434 /* 1435 * We failed to initialize batching, inform the monitor 1436 * about it by sending a null capability. 1437 * 1438 * The guest will retry in one second. 1439 */ 1440 vmballoon_send_start(b, 0); 1441 goto unlock; 1442 } 1443 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { 1444 vmballoon_deinit_batching(b); 1445 } 1446 1447 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET); 1448 b->reset_required = false; 1449 1450 error = vmballoon_vmci_init(b); 1451 if (error) 1452 pr_err("failed to initialize vmci doorbell\n"); 1453 1454 if (vmballoon_send_guest_id(b)) 1455 pr_err("failed to send guest ID to the host\n"); 1456 1457 unlock: 1458 up_write(&b->conf_sem); 1459 } 1460 1461 /** 1462 * vmballoon_work - periodic balloon worker for reset, inflation and deflation. 1463 * 1464 * @work: pointer to the &work_struct which is provided by the workqueue. 1465 * 1466 * Resets the protocol if needed, gets the new size and adjusts balloon as 1467 * needed. Repeat in 1 sec. 1468 */ 1469 static void vmballoon_work(struct work_struct *work) 1470 { 1471 struct delayed_work *dwork = to_delayed_work(work); 1472 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); 1473 int64_t change = 0; 1474 1475 if (b->reset_required) 1476 vmballoon_reset(b); 1477 1478 down_read(&b->conf_sem); 1479 1480 /* 1481 * Update the stats while holding the semaphore to ensure that 1482 * @stats_enabled is consistent with whether the stats are actually 1483 * enabled 1484 */ 1485 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER); 1486 1487 if (!vmballoon_send_get_target(b)) 1488 change = vmballoon_change(b); 1489 1490 if (change != 0) { 1491 pr_debug("%s - size: %llu, target %lu\n", __func__, 1492 atomic64_read(&b->size), READ_ONCE(b->target)); 1493 1494 if (change > 0) 1495 vmballoon_inflate(b); 1496 else /* (change < 0) */ 1497 vmballoon_deflate(b, 0, true); 1498 } 1499 1500 up_read(&b->conf_sem); 1501 1502 /* 1503 * We are using a freezable workqueue so that balloon operations are 1504 * stopped while the system transitions to/from sleep/hibernation. 1505 */ 1506 queue_delayed_work(system_freezable_wq, 1507 dwork, round_jiffies_relative(HZ)); 1508 1509 } 1510 1511 /** 1512 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure. 1513 * @shrinker: pointer to the balloon shrinker. 1514 * @sc: page reclaim information. 1515 * 1516 * Returns: number of pages that were freed during deflation. 1517 */ 1518 static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker, 1519 struct shrink_control *sc) 1520 { 1521 struct vmballoon *b = &balloon; 1522 unsigned long deflated_frames; 1523 1524 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size)); 1525 1526 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK); 1527 1528 /* 1529 * If the lock is also contended for read, we cannot easily reclaim and 1530 * we bail out. 1531 */ 1532 if (!down_read_trylock(&b->conf_sem)) 1533 return 0; 1534 1535 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true); 1536 1537 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE, 1538 deflated_frames); 1539 1540 /* 1541 * Delay future inflation for some time to mitigate the situations in 1542 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since 1543 * the access is asynchronous. 1544 */ 1545 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY); 1546 1547 up_read(&b->conf_sem); 1548 1549 return deflated_frames; 1550 } 1551 1552 /** 1553 * vmballoon_shrinker_count() - return the number of ballooned pages. 1554 * @shrinker: pointer to the balloon shrinker. 1555 * @sc: page reclaim information. 1556 * 1557 * Returns: number of 4k pages that are allocated for the balloon and can 1558 * therefore be reclaimed under pressure. 1559 */ 1560 static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker, 1561 struct shrink_control *sc) 1562 { 1563 struct vmballoon *b = &balloon; 1564 1565 return atomic64_read(&b->size); 1566 } 1567 1568 static void vmballoon_unregister_shrinker(struct vmballoon *b) 1569 { 1570 if (b->shrinker_registered) 1571 unregister_shrinker(&b->shrinker); 1572 b->shrinker_registered = false; 1573 } 1574 1575 static int vmballoon_register_shrinker(struct vmballoon *b) 1576 { 1577 int r; 1578 1579 /* Do nothing if the shrinker is not enabled */ 1580 if (!vmwballoon_shrinker_enable) 1581 return 0; 1582 1583 b->shrinker.scan_objects = vmballoon_shrinker_scan; 1584 b->shrinker.count_objects = vmballoon_shrinker_count; 1585 b->shrinker.seeks = DEFAULT_SEEKS; 1586 1587 r = register_shrinker(&b->shrinker); 1588 1589 if (r == 0) 1590 b->shrinker_registered = true; 1591 1592 return r; 1593 } 1594 1595 /* 1596 * DEBUGFS Interface 1597 */ 1598 #ifdef CONFIG_DEBUG_FS 1599 1600 static const char * const vmballoon_stat_page_names[] = { 1601 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc", 1602 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail", 1603 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc", 1604 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree", 1605 [VMW_BALLOON_PAGE_STAT_FREE] = "free" 1606 }; 1607 1608 static const char * const vmballoon_stat_names[] = { 1609 [VMW_BALLOON_STAT_TIMER] = "timer", 1610 [VMW_BALLOON_STAT_DOORBELL] = "doorbell", 1611 [VMW_BALLOON_STAT_RESET] = "reset", 1612 [VMW_BALLOON_STAT_SHRINK] = "shrink", 1613 [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree" 1614 }; 1615 1616 static int vmballoon_enable_stats(struct vmballoon *b) 1617 { 1618 int r = 0; 1619 1620 down_write(&b->conf_sem); 1621 1622 /* did we somehow race with another reader which enabled stats? */ 1623 if (b->stats) 1624 goto out; 1625 1626 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL); 1627 1628 if (!b->stats) { 1629 /* allocation failed */ 1630 r = -ENOMEM; 1631 goto out; 1632 } 1633 static_key_enable(&balloon_stat_enabled.key); 1634 out: 1635 up_write(&b->conf_sem); 1636 return r; 1637 } 1638 1639 /** 1640 * vmballoon_debug_show - shows statistics of balloon operations. 1641 * @f: pointer to the &struct seq_file. 1642 * @offset: ignored. 1643 * 1644 * Provides the statistics that can be accessed in vmmemctl in the debugfs. 1645 * To avoid the overhead - mainly that of memory - of collecting the statistics, 1646 * we only collect statistics after the first time the counters are read. 1647 * 1648 * Return: zero on success or an error code. 1649 */ 1650 static int vmballoon_debug_show(struct seq_file *f, void *offset) 1651 { 1652 struct vmballoon *b = f->private; 1653 int i, j; 1654 1655 /* enables stats if they are disabled */ 1656 if (!b->stats) { 1657 int r = vmballoon_enable_stats(b); 1658 1659 if (r) 1660 return r; 1661 } 1662 1663 /* format capabilities info */ 1664 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities", 1665 VMW_BALLOON_CAPABILITIES); 1666 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities); 1667 seq_printf(f, "%-22s: %16s\n", "is resetting", 1668 b->reset_required ? "y" : "n"); 1669 1670 /* format size info */ 1671 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target)); 1672 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size)); 1673 1674 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) { 1675 if (vmballoon_cmd_names[i] == NULL) 1676 continue; 1677 1678 seq_printf(f, "%-22s: %16llu (%llu failed)\n", 1679 vmballoon_cmd_names[i], 1680 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]), 1681 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT])); 1682 } 1683 1684 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++) 1685 seq_printf(f, "%-22s: %16llu\n", 1686 vmballoon_stat_names[i], 1687 atomic64_read(&b->stats->general_stat[i])); 1688 1689 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) { 1690 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++) 1691 seq_printf(f, "%-18s(%s): %16llu\n", 1692 vmballoon_stat_page_names[i], 1693 vmballoon_page_size_names[j], 1694 atomic64_read(&b->stats->page_stat[i][j])); 1695 } 1696 1697 return 0; 1698 } 1699 1700 DEFINE_SHOW_ATTRIBUTE(vmballoon_debug); 1701 1702 static int __init vmballoon_debugfs_init(struct vmballoon *b) 1703 { 1704 int error; 1705 1706 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, 1707 &vmballoon_debug_fops); 1708 if (IS_ERR(b->dbg_entry)) { 1709 error = PTR_ERR(b->dbg_entry); 1710 pr_err("failed to create debugfs entry, error: %d\n", error); 1711 return error; 1712 } 1713 1714 return 0; 1715 } 1716 1717 static void __exit vmballoon_debugfs_exit(struct vmballoon *b) 1718 { 1719 static_key_disable(&balloon_stat_enabled.key); 1720 debugfs_remove(b->dbg_entry); 1721 kfree(b->stats); 1722 b->stats = NULL; 1723 } 1724 1725 #else 1726 1727 static inline int vmballoon_debugfs_init(struct vmballoon *b) 1728 { 1729 return 0; 1730 } 1731 1732 static inline void vmballoon_debugfs_exit(struct vmballoon *b) 1733 { 1734 } 1735 1736 #endif /* CONFIG_DEBUG_FS */ 1737 1738 1739 #ifdef CONFIG_BALLOON_COMPACTION 1740 1741 static struct dentry *vmballoon_mount(struct file_system_type *fs_type, 1742 int flags, const char *dev_name, 1743 void *data) 1744 { 1745 static const struct dentry_operations ops = { 1746 .d_dname = simple_dname, 1747 }; 1748 1749 return mount_pseudo(fs_type, "balloon-vmware:", NULL, &ops, 1750 BALLOON_VMW_MAGIC); 1751 } 1752 1753 static struct file_system_type vmballoon_fs = { 1754 .name = "balloon-vmware", 1755 .mount = vmballoon_mount, 1756 .kill_sb = kill_anon_super, 1757 }; 1758 1759 static struct vfsmount *vmballoon_mnt; 1760 1761 /** 1762 * vmballoon_migratepage() - migrates a balloon page. 1763 * @b_dev_info: balloon device information descriptor. 1764 * @newpage: the page to which @page should be migrated. 1765 * @page: a ballooned page that should be migrated. 1766 * @mode: migration mode, ignored. 1767 * 1768 * This function is really open-coded, but that is according to the interface 1769 * that balloon_compaction provides. 1770 * 1771 * Return: zero on success, -EAGAIN when migration cannot be performed 1772 * momentarily, and -EBUSY if migration failed and should be retried 1773 * with that specific page. 1774 */ 1775 static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, 1776 struct page *newpage, struct page *page, 1777 enum migrate_mode mode) 1778 { 1779 unsigned long status, flags; 1780 struct vmballoon *b; 1781 int ret; 1782 1783 b = container_of(b_dev_info, struct vmballoon, b_dev_info); 1784 1785 /* 1786 * If the semaphore is taken, there is ongoing configuration change 1787 * (i.e., balloon reset), so try again. 1788 */ 1789 if (!down_read_trylock(&b->conf_sem)) 1790 return -EAGAIN; 1791 1792 spin_lock(&b->comm_lock); 1793 /* 1794 * We must start by deflating and not inflating, as otherwise the 1795 * hypervisor may tell us that it has enough memory and the new page is 1796 * not needed. Since the old page is isolated, we cannot use the list 1797 * interface to unlock it, as the LRU field is used for isolation. 1798 * Instead, we use the native interface directly. 1799 */ 1800 vmballoon_add_page(b, 0, page); 1801 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, 1802 VMW_BALLOON_DEFLATE); 1803 1804 if (status == VMW_BALLOON_SUCCESS) 1805 status = vmballoon_status_page(b, 0, &page); 1806 1807 /* 1808 * If a failure happened, let the migration mechanism know that it 1809 * should not retry. 1810 */ 1811 if (status != VMW_BALLOON_SUCCESS) { 1812 spin_unlock(&b->comm_lock); 1813 ret = -EBUSY; 1814 goto out_unlock; 1815 } 1816 1817 /* 1818 * The page is isolated, so it is safe to delete it without holding 1819 * @pages_lock . We keep holding @comm_lock since we will need it in a 1820 * second. 1821 */ 1822 balloon_page_delete(page); 1823 1824 put_page(page); 1825 1826 /* Inflate */ 1827 vmballoon_add_page(b, 0, newpage); 1828 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, 1829 VMW_BALLOON_INFLATE); 1830 1831 if (status == VMW_BALLOON_SUCCESS) 1832 status = vmballoon_status_page(b, 0, &newpage); 1833 1834 spin_unlock(&b->comm_lock); 1835 1836 if (status != VMW_BALLOON_SUCCESS) { 1837 /* 1838 * A failure happened. While we can deflate the page we just 1839 * inflated, this deflation can also encounter an error. Instead 1840 * we will decrease the size of the balloon to reflect the 1841 * change and report failure. 1842 */ 1843 atomic64_dec(&b->size); 1844 ret = -EBUSY; 1845 } else { 1846 /* 1847 * Success. Take a reference for the page, and we will add it to 1848 * the list after acquiring the lock. 1849 */ 1850 get_page(newpage); 1851 ret = MIGRATEPAGE_SUCCESS; 1852 } 1853 1854 /* Update the balloon list under the @pages_lock */ 1855 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1856 1857 /* 1858 * On inflation success, we already took a reference for the @newpage. 1859 * If we succeed just insert it to the list and update the statistics 1860 * under the lock. 1861 */ 1862 if (ret == MIGRATEPAGE_SUCCESS) { 1863 balloon_page_insert(&b->b_dev_info, newpage); 1864 __count_vm_event(BALLOON_MIGRATE); 1865 } 1866 1867 /* 1868 * We deflated successfully, so regardless to the inflation success, we 1869 * need to reduce the number of isolated_pages. 1870 */ 1871 b->b_dev_info.isolated_pages--; 1872 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); 1873 1874 out_unlock: 1875 up_read(&b->conf_sem); 1876 return ret; 1877 } 1878 1879 /** 1880 * vmballoon_compaction_deinit() - removes compaction related data. 1881 * 1882 * @b: pointer to the balloon. 1883 */ 1884 static void vmballoon_compaction_deinit(struct vmballoon *b) 1885 { 1886 if (!IS_ERR(b->b_dev_info.inode)) 1887 iput(b->b_dev_info.inode); 1888 1889 b->b_dev_info.inode = NULL; 1890 kern_unmount(vmballoon_mnt); 1891 vmballoon_mnt = NULL; 1892 } 1893 1894 /** 1895 * vmballoon_compaction_init() - initialized compaction for the balloon. 1896 * 1897 * @b: pointer to the balloon. 1898 * 1899 * If during the initialization a failure occurred, this function does not 1900 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this 1901 * case. 1902 * 1903 * Return: zero on success or error code on failure. 1904 */ 1905 static __init int vmballoon_compaction_init(struct vmballoon *b) 1906 { 1907 vmballoon_mnt = kern_mount(&vmballoon_fs); 1908 if (IS_ERR(vmballoon_mnt)) 1909 return PTR_ERR(vmballoon_mnt); 1910 1911 b->b_dev_info.migratepage = vmballoon_migratepage; 1912 b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb); 1913 1914 if (IS_ERR(b->b_dev_info.inode)) 1915 return PTR_ERR(b->b_dev_info.inode); 1916 1917 b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops; 1918 return 0; 1919 } 1920 1921 #else /* CONFIG_BALLOON_COMPACTION */ 1922 1923 static void vmballoon_compaction_deinit(struct vmballoon *b) 1924 { 1925 } 1926 1927 static int vmballoon_compaction_init(struct vmballoon *b) 1928 { 1929 return 0; 1930 } 1931 1932 #endif /* CONFIG_BALLOON_COMPACTION */ 1933 1934 static int __init vmballoon_init(void) 1935 { 1936 int error; 1937 1938 /* 1939 * Check if we are running on VMware's hypervisor and bail out 1940 * if we are not. 1941 */ 1942 if (x86_hyper_type != X86_HYPER_VMWARE) 1943 return -ENODEV; 1944 1945 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); 1946 1947 error = vmballoon_register_shrinker(&balloon); 1948 if (error) 1949 goto fail; 1950 1951 error = vmballoon_debugfs_init(&balloon); 1952 if (error) 1953 goto fail; 1954 1955 /* 1956 * Initialization of compaction must be done after the call to 1957 * balloon_devinfo_init() . 1958 */ 1959 balloon_devinfo_init(&balloon.b_dev_info); 1960 error = vmballoon_compaction_init(&balloon); 1961 if (error) 1962 goto fail; 1963 1964 INIT_LIST_HEAD(&balloon.huge_pages); 1965 spin_lock_init(&balloon.comm_lock); 1966 init_rwsem(&balloon.conf_sem); 1967 balloon.vmci_doorbell = VMCI_INVALID_HANDLE; 1968 balloon.batch_page = NULL; 1969 balloon.page = NULL; 1970 balloon.reset_required = true; 1971 1972 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0); 1973 1974 return 0; 1975 fail: 1976 vmballoon_unregister_shrinker(&balloon); 1977 vmballoon_compaction_deinit(&balloon); 1978 return error; 1979 } 1980 1981 /* 1982 * Using late_initcall() instead of module_init() allows the balloon to use the 1983 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the 1984 * VMCI is probed only after the balloon is initialized. If the balloon is used 1985 * as a module, late_initcall() is equivalent to module_init(). 1986 */ 1987 late_initcall(vmballoon_init); 1988 1989 static void __exit vmballoon_exit(void) 1990 { 1991 vmballoon_unregister_shrinker(&balloon); 1992 vmballoon_vmci_cleanup(&balloon); 1993 cancel_delayed_work_sync(&balloon.dwork); 1994 1995 vmballoon_debugfs_exit(&balloon); 1996 1997 /* 1998 * Deallocate all reserved memory, and reset connection with monitor. 1999 * Reset connection before deallocating memory to avoid potential for 2000 * additional spurious resets from guest touching deallocated pages. 2001 */ 2002 vmballoon_send_start(&balloon, 0); 2003 vmballoon_pop(&balloon); 2004 2005 /* Only once we popped the balloon, compaction can be deinit */ 2006 vmballoon_compaction_deinit(&balloon); 2007 } 2008 module_exit(vmballoon_exit); 2009