1 /* 2 * Copyright (c) 2012, Microsoft Corporation. 3 * 4 * Author: 5 * K. Y. Srinivasan <kys@microsoft.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more 15 * details. 16 * 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/jiffies.h> 23 #include <linux/mman.h> 24 #include <linux/delay.h> 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/kthread.h> 29 #include <linux/completion.h> 30 #include <linux/memory_hotplug.h> 31 #include <linux/memory.h> 32 #include <linux/notifier.h> 33 #include <linux/percpu_counter.h> 34 35 #include <linux/hyperv.h> 36 37 /* 38 * We begin with definitions supporting the Dynamic Memory protocol 39 * with the host. 40 * 41 * Begin protocol definitions. 42 */ 43 44 45 46 /* 47 * Protocol versions. The low word is the minor version, the high word the major 48 * version. 49 * 50 * History: 51 * Initial version 1.0 52 * Changed to 0.1 on 2009/03/25 53 * Changes to 0.2 on 2009/05/14 54 * Changes to 0.3 on 2009/12/03 55 * Changed to 1.0 on 2011/04/05 56 */ 57 58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor))) 59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16) 60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff) 61 62 enum { 63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3), 64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0), 65 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0), 66 67 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1, 68 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2, 69 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3, 70 71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10 72 }; 73 74 75 76 /* 77 * Message Types 78 */ 79 80 enum dm_message_type { 81 /* 82 * Version 0.3 83 */ 84 DM_ERROR = 0, 85 DM_VERSION_REQUEST = 1, 86 DM_VERSION_RESPONSE = 2, 87 DM_CAPABILITIES_REPORT = 3, 88 DM_CAPABILITIES_RESPONSE = 4, 89 DM_STATUS_REPORT = 5, 90 DM_BALLOON_REQUEST = 6, 91 DM_BALLOON_RESPONSE = 7, 92 DM_UNBALLOON_REQUEST = 8, 93 DM_UNBALLOON_RESPONSE = 9, 94 DM_MEM_HOT_ADD_REQUEST = 10, 95 DM_MEM_HOT_ADD_RESPONSE = 11, 96 DM_VERSION_03_MAX = 11, 97 /* 98 * Version 1.0. 99 */ 100 DM_INFO_MESSAGE = 12, 101 DM_VERSION_1_MAX = 12 102 }; 103 104 105 /* 106 * Structures defining the dynamic memory management 107 * protocol. 108 */ 109 110 union dm_version { 111 struct { 112 __u16 minor_version; 113 __u16 major_version; 114 }; 115 __u32 version; 116 } __packed; 117 118 119 union dm_caps { 120 struct { 121 __u64 balloon:1; 122 __u64 hot_add:1; 123 /* 124 * To support guests that may have alignment 125 * limitations on hot-add, the guest can specify 126 * its alignment requirements; a value of n 127 * represents an alignment of 2^n in mega bytes. 128 */ 129 __u64 hot_add_alignment:4; 130 __u64 reservedz:58; 131 } cap_bits; 132 __u64 caps; 133 } __packed; 134 135 union dm_mem_page_range { 136 struct { 137 /* 138 * The PFN number of the first page in the range. 139 * 40 bits is the architectural limit of a PFN 140 * number for AMD64. 141 */ 142 __u64 start_page:40; 143 /* 144 * The number of pages in the range. 145 */ 146 __u64 page_cnt:24; 147 } finfo; 148 __u64 page_range; 149 } __packed; 150 151 152 153 /* 154 * The header for all dynamic memory messages: 155 * 156 * type: Type of the message. 157 * size: Size of the message in bytes; including the header. 158 * trans_id: The guest is responsible for manufacturing this ID. 159 */ 160 161 struct dm_header { 162 __u16 type; 163 __u16 size; 164 __u32 trans_id; 165 } __packed; 166 167 /* 168 * A generic message format for dynamic memory. 169 * Specific message formats are defined later in the file. 170 */ 171 172 struct dm_message { 173 struct dm_header hdr; 174 __u8 data[]; /* enclosed message */ 175 } __packed; 176 177 178 /* 179 * Specific message types supporting the dynamic memory protocol. 180 */ 181 182 /* 183 * Version negotiation message. Sent from the guest to the host. 184 * The guest is free to try different versions until the host 185 * accepts the version. 186 * 187 * dm_version: The protocol version requested. 188 * is_last_attempt: If TRUE, this is the last version guest will request. 189 * reservedz: Reserved field, set to zero. 190 */ 191 192 struct dm_version_request { 193 struct dm_header hdr; 194 union dm_version version; 195 __u32 is_last_attempt:1; 196 __u32 reservedz:31; 197 } __packed; 198 199 /* 200 * Version response message; Host to Guest and indicates 201 * if the host has accepted the version sent by the guest. 202 * 203 * is_accepted: If TRUE, host has accepted the version and the guest 204 * should proceed to the next stage of the protocol. FALSE indicates that 205 * guest should re-try with a different version. 206 * 207 * reservedz: Reserved field, set to zero. 208 */ 209 210 struct dm_version_response { 211 struct dm_header hdr; 212 __u64 is_accepted:1; 213 __u64 reservedz:63; 214 } __packed; 215 216 /* 217 * Message reporting capabilities. This is sent from the guest to the 218 * host. 219 */ 220 221 struct dm_capabilities { 222 struct dm_header hdr; 223 union dm_caps caps; 224 __u64 min_page_cnt; 225 __u64 max_page_number; 226 } __packed; 227 228 /* 229 * Response to the capabilities message. This is sent from the host to the 230 * guest. This message notifies if the host has accepted the guest's 231 * capabilities. If the host has not accepted, the guest must shutdown 232 * the service. 233 * 234 * is_accepted: Indicates if the host has accepted guest's capabilities. 235 * reservedz: Must be 0. 236 */ 237 238 struct dm_capabilities_resp_msg { 239 struct dm_header hdr; 240 __u64 is_accepted:1; 241 __u64 reservedz:63; 242 } __packed; 243 244 /* 245 * This message is used to report memory pressure from the guest. 246 * This message is not part of any transaction and there is no 247 * response to this message. 248 * 249 * num_avail: Available memory in pages. 250 * num_committed: Committed memory in pages. 251 * page_file_size: The accumulated size of all page files 252 * in the system in pages. 253 * zero_free: The nunber of zero and free pages. 254 * page_file_writes: The writes to the page file in pages. 255 * io_diff: An indicator of file cache efficiency or page file activity, 256 * calculated as File Cache Page Fault Count - Page Read Count. 257 * This value is in pages. 258 * 259 * Some of these metrics are Windows specific and fortunately 260 * the algorithm on the host side that computes the guest memory 261 * pressure only uses num_committed value. 262 */ 263 264 struct dm_status { 265 struct dm_header hdr; 266 __u64 num_avail; 267 __u64 num_committed; 268 __u64 page_file_size; 269 __u64 zero_free; 270 __u32 page_file_writes; 271 __u32 io_diff; 272 } __packed; 273 274 275 /* 276 * Message to ask the guest to allocate memory - balloon up message. 277 * This message is sent from the host to the guest. The guest may not be 278 * able to allocate as much memory as requested. 279 * 280 * num_pages: number of pages to allocate. 281 */ 282 283 struct dm_balloon { 284 struct dm_header hdr; 285 __u32 num_pages; 286 __u32 reservedz; 287 } __packed; 288 289 290 /* 291 * Balloon response message; this message is sent from the guest 292 * to the host in response to the balloon message. 293 * 294 * reservedz: Reserved; must be set to zero. 295 * more_pages: If FALSE, this is the last message of the transaction. 296 * if TRUE there will atleast one more message from the guest. 297 * 298 * range_count: The number of ranges in the range array. 299 * 300 * range_array: An array of page ranges returned to the host. 301 * 302 */ 303 304 struct dm_balloon_response { 305 struct dm_header hdr; 306 __u32 reservedz; 307 __u32 more_pages:1; 308 __u32 range_count:31; 309 union dm_mem_page_range range_array[]; 310 } __packed; 311 312 /* 313 * Un-balloon message; this message is sent from the host 314 * to the guest to give guest more memory. 315 * 316 * more_pages: If FALSE, this is the last message of the transaction. 317 * if TRUE there will atleast one more message from the guest. 318 * 319 * reservedz: Reserved; must be set to zero. 320 * 321 * range_count: The number of ranges in the range array. 322 * 323 * range_array: An array of page ranges returned to the host. 324 * 325 */ 326 327 struct dm_unballoon_request { 328 struct dm_header hdr; 329 __u32 more_pages:1; 330 __u32 reservedz:31; 331 __u32 range_count; 332 union dm_mem_page_range range_array[]; 333 } __packed; 334 335 /* 336 * Un-balloon response message; this message is sent from the guest 337 * to the host in response to an unballoon request. 338 * 339 */ 340 341 struct dm_unballoon_response { 342 struct dm_header hdr; 343 } __packed; 344 345 346 /* 347 * Hot add request message. Message sent from the host to the guest. 348 * 349 * mem_range: Memory range to hot add. 350 * 351 * On Linux we currently don't support this since we cannot hot add 352 * arbitrary granularity of memory. 353 */ 354 355 struct dm_hot_add { 356 struct dm_header hdr; 357 union dm_mem_page_range range; 358 } __packed; 359 360 /* 361 * Hot add response message. 362 * This message is sent by the guest to report the status of a hot add request. 363 * If page_count is less than the requested page count, then the host should 364 * assume all further hot add requests will fail, since this indicates that 365 * the guest has hit an upper physical memory barrier. 366 * 367 * Hot adds may also fail due to low resources; in this case, the guest must 368 * not complete this message until the hot add can succeed, and the host must 369 * not send a new hot add request until the response is sent. 370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS 371 * times it fails the request. 372 * 373 * 374 * page_count: number of pages that were successfully hot added. 375 * 376 * result: result of the operation 1: success, 0: failure. 377 * 378 */ 379 380 struct dm_hot_add_response { 381 struct dm_header hdr; 382 __u32 page_count; 383 __u32 result; 384 } __packed; 385 386 /* 387 * Types of information sent from host to the guest. 388 */ 389 390 enum dm_info_type { 391 INFO_TYPE_MAX_PAGE_CNT = 0, 392 MAX_INFO_TYPE 393 }; 394 395 396 /* 397 * Header for the information message. 398 */ 399 400 struct dm_info_header { 401 enum dm_info_type type; 402 __u32 data_size; 403 } __packed; 404 405 /* 406 * This message is sent from the host to the guest to pass 407 * some relevant information (win8 addition). 408 * 409 * reserved: no used. 410 * info_size: size of the information blob. 411 * info: information blob. 412 */ 413 414 struct dm_info_msg { 415 struct dm_header hdr; 416 __u32 reserved; 417 __u32 info_size; 418 __u8 info[]; 419 }; 420 421 /* 422 * End protocol definitions. 423 */ 424 425 /* 426 * State to manage hot adding memory into the guest. 427 * The range start_pfn : end_pfn specifies the range 428 * that the host has asked us to hot add. The range 429 * start_pfn : ha_end_pfn specifies the range that we have 430 * currently hot added. We hot add in multiples of 128M 431 * chunks; it is possible that we may not be able to bring 432 * online all the pages in the region. The range 433 * covered_start_pfn:covered_end_pfn defines the pages that can 434 * be brough online. 435 */ 436 437 struct hv_hotadd_state { 438 struct list_head list; 439 unsigned long start_pfn; 440 unsigned long covered_start_pfn; 441 unsigned long covered_end_pfn; 442 unsigned long ha_end_pfn; 443 unsigned long end_pfn; 444 /* 445 * A list of gaps. 446 */ 447 struct list_head gap_list; 448 }; 449 450 struct hv_hotadd_gap { 451 struct list_head list; 452 unsigned long start_pfn; 453 unsigned long end_pfn; 454 }; 455 456 struct balloon_state { 457 __u32 num_pages; 458 struct work_struct wrk; 459 }; 460 461 struct hot_add_wrk { 462 union dm_mem_page_range ha_page_range; 463 union dm_mem_page_range ha_region_range; 464 struct work_struct wrk; 465 }; 466 467 static bool hot_add = true; 468 static bool do_hot_add; 469 /* 470 * Delay reporting memory pressure by 471 * the specified number of seconds. 472 */ 473 static uint pressure_report_delay = 45; 474 475 /* 476 * The last time we posted a pressure report to host. 477 */ 478 static unsigned long last_post_time; 479 480 module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 481 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 482 483 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); 484 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); 485 static atomic_t trans_id = ATOMIC_INIT(0); 486 487 static int dm_ring_size = (5 * PAGE_SIZE); 488 489 /* 490 * Driver specific state. 491 */ 492 493 enum hv_dm_state { 494 DM_INITIALIZING = 0, 495 DM_INITIALIZED, 496 DM_BALLOON_UP, 497 DM_BALLOON_DOWN, 498 DM_HOT_ADD, 499 DM_INIT_ERROR 500 }; 501 502 503 static __u8 recv_buffer[PAGE_SIZE]; 504 static __u8 *send_buffer; 505 #define PAGES_IN_2M 512 506 #define HA_CHUNK (32 * 1024) 507 508 struct hv_dynmem_device { 509 struct hv_device *dev; 510 enum hv_dm_state state; 511 struct completion host_event; 512 struct completion config_event; 513 514 /* 515 * Number of pages we have currently ballooned out. 516 */ 517 unsigned int num_pages_ballooned; 518 unsigned int num_pages_onlined; 519 unsigned int num_pages_added; 520 521 /* 522 * State to manage the ballooning (up) operation. 523 */ 524 struct balloon_state balloon_wrk; 525 526 /* 527 * State to execute the "hot-add" operation. 528 */ 529 struct hot_add_wrk ha_wrk; 530 531 /* 532 * This state tracks if the host has specified a hot-add 533 * region. 534 */ 535 bool host_specified_ha_region; 536 537 /* 538 * State to synchronize hot-add. 539 */ 540 struct completion ol_waitevent; 541 bool ha_waiting; 542 /* 543 * This thread handles hot-add 544 * requests from the host as well as notifying 545 * the host with regards to memory pressure in 546 * the guest. 547 */ 548 struct task_struct *thread; 549 550 /* 551 * Protects ha_region_list, num_pages_onlined counter and individual 552 * regions from ha_region_list. 553 */ 554 spinlock_t ha_lock; 555 556 /* 557 * A list of hot-add regions. 558 */ 559 struct list_head ha_region_list; 560 561 /* 562 * We start with the highest version we can support 563 * and downgrade based on the host; we save here the 564 * next version to try. 565 */ 566 __u32 next_version; 567 568 /* 569 * The negotiated version agreed by host. 570 */ 571 __u32 version; 572 }; 573 574 static struct hv_dynmem_device dm_device; 575 576 static void post_status(struct hv_dynmem_device *dm); 577 578 #ifdef CONFIG_MEMORY_HOTPLUG 579 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, 580 void *v) 581 { 582 struct memory_notify *mem = (struct memory_notify *)v; 583 unsigned long flags; 584 585 switch (val) { 586 case MEM_ONLINE: 587 spin_lock_irqsave(&dm_device.ha_lock, flags); 588 dm_device.num_pages_onlined += mem->nr_pages; 589 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 590 /* Fall through */ 591 case MEM_CANCEL_ONLINE: 592 if (dm_device.ha_waiting) { 593 dm_device.ha_waiting = false; 594 complete(&dm_device.ol_waitevent); 595 } 596 break; 597 598 case MEM_OFFLINE: 599 spin_lock_irqsave(&dm_device.ha_lock, flags); 600 dm_device.num_pages_onlined -= mem->nr_pages; 601 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 602 break; 603 case MEM_GOING_ONLINE: 604 case MEM_GOING_OFFLINE: 605 case MEM_CANCEL_OFFLINE: 606 break; 607 } 608 return NOTIFY_OK; 609 } 610 611 static struct notifier_block hv_memory_nb = { 612 .notifier_call = hv_memory_notifier, 613 .priority = 0 614 }; 615 616 /* Check if the particular page is backed and can be onlined and online it. */ 617 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) 618 { 619 unsigned long cur_start_pgp; 620 unsigned long cur_end_pgp; 621 struct hv_hotadd_gap *gap; 622 623 cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn); 624 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); 625 626 /* The page is not backed. */ 627 if (((unsigned long)pg < cur_start_pgp) || 628 ((unsigned long)pg >= cur_end_pgp)) 629 return; 630 631 /* Check for gaps. */ 632 list_for_each_entry(gap, &has->gap_list, list) { 633 cur_start_pgp = (unsigned long) 634 pfn_to_page(gap->start_pfn); 635 cur_end_pgp = (unsigned long) 636 pfn_to_page(gap->end_pfn); 637 if (((unsigned long)pg >= cur_start_pgp) && 638 ((unsigned long)pg < cur_end_pgp)) { 639 return; 640 } 641 } 642 643 /* This frame is currently backed; online the page. */ 644 __online_page_set_limits(pg); 645 __online_page_increment_counters(pg); 646 __online_page_free(pg); 647 } 648 649 static void hv_bring_pgs_online(struct hv_hotadd_state *has, 650 unsigned long start_pfn, unsigned long size) 651 { 652 int i; 653 654 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); 655 for (i = 0; i < size; i++) 656 hv_page_online_one(has, pfn_to_page(start_pfn + i)); 657 } 658 659 static void hv_mem_hot_add(unsigned long start, unsigned long size, 660 unsigned long pfn_count, 661 struct hv_hotadd_state *has) 662 { 663 int ret = 0; 664 int i, nid; 665 unsigned long start_pfn; 666 unsigned long processed_pfn; 667 unsigned long total_pfn = pfn_count; 668 unsigned long flags; 669 670 for (i = 0; i < (size/HA_CHUNK); i++) { 671 start_pfn = start + (i * HA_CHUNK); 672 673 spin_lock_irqsave(&dm_device.ha_lock, flags); 674 has->ha_end_pfn += HA_CHUNK; 675 676 if (total_pfn > HA_CHUNK) { 677 processed_pfn = HA_CHUNK; 678 total_pfn -= HA_CHUNK; 679 } else { 680 processed_pfn = total_pfn; 681 total_pfn = 0; 682 } 683 684 has->covered_end_pfn += processed_pfn; 685 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 686 687 init_completion(&dm_device.ol_waitevent); 688 dm_device.ha_waiting = !memhp_auto_online; 689 690 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); 691 ret = add_memory(nid, PFN_PHYS((start_pfn)), 692 (HA_CHUNK << PAGE_SHIFT)); 693 694 if (ret) { 695 pr_warn("hot_add memory failed error is %d\n", ret); 696 if (ret == -EEXIST) { 697 /* 698 * This error indicates that the error 699 * is not a transient failure. This is the 700 * case where the guest's physical address map 701 * precludes hot adding memory. Stop all further 702 * memory hot-add. 703 */ 704 do_hot_add = false; 705 } 706 spin_lock_irqsave(&dm_device.ha_lock, flags); 707 has->ha_end_pfn -= HA_CHUNK; 708 has->covered_end_pfn -= processed_pfn; 709 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 710 break; 711 } 712 713 /* 714 * Wait for the memory block to be onlined when memory onlining 715 * is done outside of kernel (memhp_auto_online). Since the hot 716 * add has succeeded, it is ok to proceed even if the pages in 717 * the hot added region have not been "onlined" within the 718 * allowed time. 719 */ 720 if (dm_device.ha_waiting) 721 wait_for_completion_timeout(&dm_device.ol_waitevent, 722 5*HZ); 723 post_status(&dm_device); 724 } 725 726 return; 727 } 728 729 static void hv_online_page(struct page *pg) 730 { 731 struct hv_hotadd_state *has; 732 unsigned long cur_start_pgp; 733 unsigned long cur_end_pgp; 734 unsigned long flags; 735 736 spin_lock_irqsave(&dm_device.ha_lock, flags); 737 list_for_each_entry(has, &dm_device.ha_region_list, list) { 738 cur_start_pgp = (unsigned long) 739 pfn_to_page(has->start_pfn); 740 cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn); 741 742 /* The page belongs to a different HAS. */ 743 if (((unsigned long)pg < cur_start_pgp) || 744 ((unsigned long)pg >= cur_end_pgp)) 745 continue; 746 747 hv_page_online_one(has, pg); 748 break; 749 } 750 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 751 } 752 753 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) 754 { 755 struct hv_hotadd_state *has; 756 struct hv_hotadd_gap *gap; 757 unsigned long residual, new_inc; 758 int ret = 0; 759 unsigned long flags; 760 761 spin_lock_irqsave(&dm_device.ha_lock, flags); 762 list_for_each_entry(has, &dm_device.ha_region_list, list) { 763 /* 764 * If the pfn range we are dealing with is not in the current 765 * "hot add block", move on. 766 */ 767 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) 768 continue; 769 770 /* 771 * If the current start pfn is not where the covered_end 772 * is, create a gap and update covered_end_pfn. 773 */ 774 if (has->covered_end_pfn != start_pfn) { 775 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); 776 if (!gap) { 777 ret = -ENOMEM; 778 break; 779 } 780 781 INIT_LIST_HEAD(&gap->list); 782 gap->start_pfn = has->covered_end_pfn; 783 gap->end_pfn = start_pfn; 784 list_add_tail(&gap->list, &has->gap_list); 785 786 has->covered_end_pfn = start_pfn; 787 } 788 789 /* 790 * If the current hot add-request extends beyond 791 * our current limit; extend it. 792 */ 793 if ((start_pfn + pfn_cnt) > has->end_pfn) { 794 residual = (start_pfn + pfn_cnt - has->end_pfn); 795 /* 796 * Extend the region by multiples of HA_CHUNK. 797 */ 798 new_inc = (residual / HA_CHUNK) * HA_CHUNK; 799 if (residual % HA_CHUNK) 800 new_inc += HA_CHUNK; 801 802 has->end_pfn += new_inc; 803 } 804 805 ret = 1; 806 break; 807 } 808 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 809 810 return ret; 811 } 812 813 static unsigned long handle_pg_range(unsigned long pg_start, 814 unsigned long pg_count) 815 { 816 unsigned long start_pfn = pg_start; 817 unsigned long pfn_cnt = pg_count; 818 unsigned long size; 819 struct hv_hotadd_state *has; 820 unsigned long pgs_ol = 0; 821 unsigned long old_covered_state; 822 unsigned long res = 0, flags; 823 824 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, 825 pg_start); 826 827 spin_lock_irqsave(&dm_device.ha_lock, flags); 828 list_for_each_entry(has, &dm_device.ha_region_list, list) { 829 /* 830 * If the pfn range we are dealing with is not in the current 831 * "hot add block", move on. 832 */ 833 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) 834 continue; 835 836 old_covered_state = has->covered_end_pfn; 837 838 if (start_pfn < has->ha_end_pfn) { 839 /* 840 * This is the case where we are backing pages 841 * in an already hot added region. Bring 842 * these pages online first. 843 */ 844 pgs_ol = has->ha_end_pfn - start_pfn; 845 if (pgs_ol > pfn_cnt) 846 pgs_ol = pfn_cnt; 847 848 has->covered_end_pfn += pgs_ol; 849 pfn_cnt -= pgs_ol; 850 /* 851 * Check if the corresponding memory block is already 852 * online by checking its last previously backed page. 853 * In case it is we need to bring rest (which was not 854 * backed previously) online too. 855 */ 856 if (start_pfn > has->start_pfn && 857 !PageReserved(pfn_to_page(start_pfn - 1))) 858 hv_bring_pgs_online(has, start_pfn, pgs_ol); 859 860 } 861 862 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { 863 /* 864 * We have some residual hot add range 865 * that needs to be hot added; hot add 866 * it now. Hot add a multiple of 867 * of HA_CHUNK that fully covers the pages 868 * we have. 869 */ 870 size = (has->end_pfn - has->ha_end_pfn); 871 if (pfn_cnt <= size) { 872 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK); 873 if (pfn_cnt % HA_CHUNK) 874 size += HA_CHUNK; 875 } else { 876 pfn_cnt = size; 877 } 878 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 879 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has); 880 spin_lock_irqsave(&dm_device.ha_lock, flags); 881 } 882 /* 883 * If we managed to online any pages that were given to us, 884 * we declare success. 885 */ 886 res = has->covered_end_pfn - old_covered_state; 887 break; 888 } 889 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 890 891 return res; 892 } 893 894 static unsigned long process_hot_add(unsigned long pg_start, 895 unsigned long pfn_cnt, 896 unsigned long rg_start, 897 unsigned long rg_size) 898 { 899 struct hv_hotadd_state *ha_region = NULL; 900 int covered; 901 unsigned long flags; 902 903 if (pfn_cnt == 0) 904 return 0; 905 906 if (!dm_device.host_specified_ha_region) { 907 covered = pfn_covered(pg_start, pfn_cnt); 908 if (covered < 0) 909 return 0; 910 911 if (covered) 912 goto do_pg_range; 913 } 914 915 /* 916 * If the host has specified a hot-add range; deal with it first. 917 */ 918 919 if (rg_size != 0) { 920 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL); 921 if (!ha_region) 922 return 0; 923 924 INIT_LIST_HEAD(&ha_region->list); 925 INIT_LIST_HEAD(&ha_region->gap_list); 926 927 ha_region->start_pfn = rg_start; 928 ha_region->ha_end_pfn = rg_start; 929 ha_region->covered_start_pfn = pg_start; 930 ha_region->covered_end_pfn = pg_start; 931 ha_region->end_pfn = rg_start + rg_size; 932 933 spin_lock_irqsave(&dm_device.ha_lock, flags); 934 list_add_tail(&ha_region->list, &dm_device.ha_region_list); 935 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 936 } 937 938 do_pg_range: 939 /* 940 * Process the page range specified; bringing them 941 * online if possible. 942 */ 943 return handle_pg_range(pg_start, pfn_cnt); 944 } 945 946 #endif 947 948 static void hot_add_req(struct work_struct *dummy) 949 { 950 struct dm_hot_add_response resp; 951 #ifdef CONFIG_MEMORY_HOTPLUG 952 unsigned long pg_start, pfn_cnt; 953 unsigned long rg_start, rg_sz; 954 #endif 955 struct hv_dynmem_device *dm = &dm_device; 956 957 memset(&resp, 0, sizeof(struct dm_hot_add_response)); 958 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE; 959 resp.hdr.size = sizeof(struct dm_hot_add_response); 960 961 #ifdef CONFIG_MEMORY_HOTPLUG 962 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; 963 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt; 964 965 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page; 966 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; 967 968 if ((rg_start == 0) && (!dm->host_specified_ha_region)) { 969 unsigned long region_size; 970 unsigned long region_start; 971 972 /* 973 * The host has not specified the hot-add region. 974 * Based on the hot-add page range being specified, 975 * compute a hot-add region that can cover the pages 976 * that need to be hot-added while ensuring the alignment 977 * and size requirements of Linux as it relates to hot-add. 978 */ 979 region_start = pg_start; 980 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK; 981 if (pfn_cnt % HA_CHUNK) 982 region_size += HA_CHUNK; 983 984 region_start = (pg_start / HA_CHUNK) * HA_CHUNK; 985 986 rg_start = region_start; 987 rg_sz = region_size; 988 } 989 990 if (do_hot_add) 991 resp.page_count = process_hot_add(pg_start, pfn_cnt, 992 rg_start, rg_sz); 993 994 dm->num_pages_added += resp.page_count; 995 #endif 996 /* 997 * The result field of the response structure has the 998 * following semantics: 999 * 1000 * 1. If all or some pages hot-added: Guest should return success. 1001 * 1002 * 2. If no pages could be hot-added: 1003 * 1004 * If the guest returns success, then the host 1005 * will not attempt any further hot-add operations. This 1006 * signifies a permanent failure. 1007 * 1008 * If the guest returns failure, then this failure will be 1009 * treated as a transient failure and the host may retry the 1010 * hot-add operation after some delay. 1011 */ 1012 if (resp.page_count > 0) 1013 resp.result = 1; 1014 else if (!do_hot_add) 1015 resp.result = 1; 1016 else 1017 resp.result = 0; 1018 1019 if (!do_hot_add || (resp.page_count == 0)) 1020 pr_info("Memory hot add failed\n"); 1021 1022 dm->state = DM_INITIALIZED; 1023 resp.hdr.trans_id = atomic_inc_return(&trans_id); 1024 vmbus_sendpacket(dm->dev->channel, &resp, 1025 sizeof(struct dm_hot_add_response), 1026 (unsigned long)NULL, 1027 VM_PKT_DATA_INBAND, 0); 1028 } 1029 1030 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) 1031 { 1032 struct dm_info_header *info_hdr; 1033 1034 info_hdr = (struct dm_info_header *)msg->info; 1035 1036 switch (info_hdr->type) { 1037 case INFO_TYPE_MAX_PAGE_CNT: 1038 if (info_hdr->data_size == sizeof(__u64)) { 1039 __u64 *max_page_count = (__u64 *)&info_hdr[1]; 1040 1041 pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n", 1042 *max_page_count); 1043 } 1044 1045 break; 1046 default: 1047 pr_info("Received Unknown type: %d\n", info_hdr->type); 1048 } 1049 } 1050 1051 static unsigned long compute_balloon_floor(void) 1052 { 1053 unsigned long min_pages; 1054 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 1055 /* Simple continuous piecewiese linear function: 1056 * max MiB -> min MiB gradient 1057 * 0 0 1058 * 16 16 1059 * 32 24 1060 * 128 72 (1/2) 1061 * 512 168 (1/4) 1062 * 2048 360 (1/8) 1063 * 8192 744 (1/16) 1064 * 32768 1512 (1/32) 1065 */ 1066 if (totalram_pages < MB2PAGES(128)) 1067 min_pages = MB2PAGES(8) + (totalram_pages >> 1); 1068 else if (totalram_pages < MB2PAGES(512)) 1069 min_pages = MB2PAGES(40) + (totalram_pages >> 2); 1070 else if (totalram_pages < MB2PAGES(2048)) 1071 min_pages = MB2PAGES(104) + (totalram_pages >> 3); 1072 else if (totalram_pages < MB2PAGES(8192)) 1073 min_pages = MB2PAGES(232) + (totalram_pages >> 4); 1074 else 1075 min_pages = MB2PAGES(488) + (totalram_pages >> 5); 1076 #undef MB2PAGES 1077 return min_pages; 1078 } 1079 1080 /* 1081 * Post our status as it relates memory pressure to the 1082 * host. Host expects the guests to post this status 1083 * periodically at 1 second intervals. 1084 * 1085 * The metrics specified in this protocol are very Windows 1086 * specific and so we cook up numbers here to convey our memory 1087 * pressure. 1088 */ 1089 1090 static void post_status(struct hv_dynmem_device *dm) 1091 { 1092 struct dm_status status; 1093 unsigned long now = jiffies; 1094 unsigned long last_post = last_post_time; 1095 1096 if (pressure_report_delay > 0) { 1097 --pressure_report_delay; 1098 return; 1099 } 1100 1101 if (!time_after(now, (last_post_time + HZ))) 1102 return; 1103 1104 memset(&status, 0, sizeof(struct dm_status)); 1105 status.hdr.type = DM_STATUS_REPORT; 1106 status.hdr.size = sizeof(struct dm_status); 1107 status.hdr.trans_id = atomic_inc_return(&trans_id); 1108 1109 /* 1110 * The host expects the guest to report free and committed memory. 1111 * Furthermore, the host expects the pressure information to include 1112 * the ballooned out pages. For a given amount of memory that we are 1113 * managing we need to compute a floor below which we should not 1114 * balloon. Compute this and add it to the pressure report. 1115 * We also need to report all offline pages (num_pages_added - 1116 * num_pages_onlined) as committed to the host, otherwise it can try 1117 * asking us to balloon them out. 1118 */ 1119 status.num_avail = si_mem_available(); 1120 status.num_committed = vm_memory_committed() + 1121 dm->num_pages_ballooned + 1122 (dm->num_pages_added > dm->num_pages_onlined ? 1123 dm->num_pages_added - dm->num_pages_onlined : 0) + 1124 compute_balloon_floor(); 1125 1126 /* 1127 * If our transaction ID is no longer current, just don't 1128 * send the status. This can happen if we were interrupted 1129 * after we picked our transaction ID. 1130 */ 1131 if (status.hdr.trans_id != atomic_read(&trans_id)) 1132 return; 1133 1134 /* 1135 * If the last post time that we sampled has changed, 1136 * we have raced, don't post the status. 1137 */ 1138 if (last_post != last_post_time) 1139 return; 1140 1141 last_post_time = jiffies; 1142 vmbus_sendpacket(dm->dev->channel, &status, 1143 sizeof(struct dm_status), 1144 (unsigned long)NULL, 1145 VM_PKT_DATA_INBAND, 0); 1146 1147 } 1148 1149 static void free_balloon_pages(struct hv_dynmem_device *dm, 1150 union dm_mem_page_range *range_array) 1151 { 1152 int num_pages = range_array->finfo.page_cnt; 1153 __u64 start_frame = range_array->finfo.start_page; 1154 struct page *pg; 1155 int i; 1156 1157 for (i = 0; i < num_pages; i++) { 1158 pg = pfn_to_page(i + start_frame); 1159 __free_page(pg); 1160 dm->num_pages_ballooned--; 1161 } 1162 } 1163 1164 1165 1166 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, 1167 unsigned int num_pages, 1168 struct dm_balloon_response *bl_resp, 1169 int alloc_unit) 1170 { 1171 unsigned int i = 0; 1172 struct page *pg; 1173 1174 if (num_pages < alloc_unit) 1175 return 0; 1176 1177 for (i = 0; (i * alloc_unit) < num_pages; i++) { 1178 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > 1179 PAGE_SIZE) 1180 return i * alloc_unit; 1181 1182 /* 1183 * We execute this code in a thread context. Furthermore, 1184 * we don't want the kernel to try too hard. 1185 */ 1186 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY | 1187 __GFP_NOMEMALLOC | __GFP_NOWARN, 1188 get_order(alloc_unit << PAGE_SHIFT)); 1189 1190 if (!pg) 1191 return i * alloc_unit; 1192 1193 dm->num_pages_ballooned += alloc_unit; 1194 1195 /* 1196 * If we allocatted 2M pages; split them so we 1197 * can free them in any order we get. 1198 */ 1199 1200 if (alloc_unit != 1) 1201 split_page(pg, get_order(alloc_unit << PAGE_SHIFT)); 1202 1203 bl_resp->range_count++; 1204 bl_resp->range_array[i].finfo.start_page = 1205 page_to_pfn(pg); 1206 bl_resp->range_array[i].finfo.page_cnt = alloc_unit; 1207 bl_resp->hdr.size += sizeof(union dm_mem_page_range); 1208 1209 } 1210 1211 return num_pages; 1212 } 1213 1214 static void balloon_up(struct work_struct *dummy) 1215 { 1216 unsigned int num_pages = dm_device.balloon_wrk.num_pages; 1217 unsigned int num_ballooned = 0; 1218 struct dm_balloon_response *bl_resp; 1219 int alloc_unit; 1220 int ret; 1221 bool done = false; 1222 int i; 1223 long avail_pages; 1224 unsigned long floor; 1225 1226 /* The host balloons pages in 2M granularity. */ 1227 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0); 1228 1229 /* 1230 * We will attempt 2M allocations. However, if we fail to 1231 * allocate 2M chunks, we will go back to 4k allocations. 1232 */ 1233 alloc_unit = 512; 1234 1235 avail_pages = si_mem_available(); 1236 floor = compute_balloon_floor(); 1237 1238 /* Refuse to balloon below the floor, keep the 2M granularity. */ 1239 if (avail_pages < num_pages || avail_pages - num_pages < floor) { 1240 pr_warn("Balloon request will be partially fulfilled. %s\n", 1241 avail_pages < num_pages ? "Not enough memory." : 1242 "Balloon floor reached."); 1243 1244 num_pages = avail_pages > floor ? (avail_pages - floor) : 0; 1245 num_pages -= num_pages % PAGES_IN_2M; 1246 } 1247 1248 while (!done) { 1249 bl_resp = (struct dm_balloon_response *)send_buffer; 1250 memset(send_buffer, 0, PAGE_SIZE); 1251 bl_resp->hdr.type = DM_BALLOON_RESPONSE; 1252 bl_resp->hdr.size = sizeof(struct dm_balloon_response); 1253 bl_resp->more_pages = 1; 1254 1255 num_pages -= num_ballooned; 1256 num_ballooned = alloc_balloon_pages(&dm_device, num_pages, 1257 bl_resp, alloc_unit); 1258 1259 if (alloc_unit != 1 && num_ballooned == 0) { 1260 alloc_unit = 1; 1261 continue; 1262 } 1263 1264 if (num_ballooned == 0 || num_ballooned == num_pages) { 1265 pr_debug("Ballooned %u out of %u requested pages.\n", 1266 num_pages, dm_device.balloon_wrk.num_pages); 1267 1268 bl_resp->more_pages = 0; 1269 done = true; 1270 dm_device.state = DM_INITIALIZED; 1271 } 1272 1273 /* 1274 * We are pushing a lot of data through the channel; 1275 * deal with transient failures caused because of the 1276 * lack of space in the ring buffer. 1277 */ 1278 1279 do { 1280 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); 1281 ret = vmbus_sendpacket(dm_device.dev->channel, 1282 bl_resp, 1283 bl_resp->hdr.size, 1284 (unsigned long)NULL, 1285 VM_PKT_DATA_INBAND, 0); 1286 1287 if (ret == -EAGAIN) 1288 msleep(20); 1289 post_status(&dm_device); 1290 } while (ret == -EAGAIN); 1291 1292 if (ret) { 1293 /* 1294 * Free up the memory we allocatted. 1295 */ 1296 pr_info("Balloon response failed\n"); 1297 1298 for (i = 0; i < bl_resp->range_count; i++) 1299 free_balloon_pages(&dm_device, 1300 &bl_resp->range_array[i]); 1301 1302 done = true; 1303 } 1304 } 1305 1306 } 1307 1308 static void balloon_down(struct hv_dynmem_device *dm, 1309 struct dm_unballoon_request *req) 1310 { 1311 union dm_mem_page_range *range_array = req->range_array; 1312 int range_count = req->range_count; 1313 struct dm_unballoon_response resp; 1314 int i; 1315 unsigned int prev_pages_ballooned = dm->num_pages_ballooned; 1316 1317 for (i = 0; i < range_count; i++) { 1318 free_balloon_pages(dm, &range_array[i]); 1319 complete(&dm_device.config_event); 1320 } 1321 1322 pr_debug("Freed %u ballooned pages.\n", 1323 prev_pages_ballooned - dm->num_pages_ballooned); 1324 1325 if (req->more_pages == 1) 1326 return; 1327 1328 memset(&resp, 0, sizeof(struct dm_unballoon_response)); 1329 resp.hdr.type = DM_UNBALLOON_RESPONSE; 1330 resp.hdr.trans_id = atomic_inc_return(&trans_id); 1331 resp.hdr.size = sizeof(struct dm_unballoon_response); 1332 1333 vmbus_sendpacket(dm_device.dev->channel, &resp, 1334 sizeof(struct dm_unballoon_response), 1335 (unsigned long)NULL, 1336 VM_PKT_DATA_INBAND, 0); 1337 1338 dm->state = DM_INITIALIZED; 1339 } 1340 1341 static void balloon_onchannelcallback(void *context); 1342 1343 static int dm_thread_func(void *dm_dev) 1344 { 1345 struct hv_dynmem_device *dm = dm_dev; 1346 1347 while (!kthread_should_stop()) { 1348 wait_for_completion_interruptible_timeout( 1349 &dm_device.config_event, 1*HZ); 1350 /* 1351 * The host expects us to post information on the memory 1352 * pressure every second. 1353 */ 1354 reinit_completion(&dm_device.config_event); 1355 post_status(dm); 1356 } 1357 1358 return 0; 1359 } 1360 1361 1362 static void version_resp(struct hv_dynmem_device *dm, 1363 struct dm_version_response *vresp) 1364 { 1365 struct dm_version_request version_req; 1366 int ret; 1367 1368 if (vresp->is_accepted) { 1369 /* 1370 * We are done; wakeup the 1371 * context waiting for version 1372 * negotiation. 1373 */ 1374 complete(&dm->host_event); 1375 return; 1376 } 1377 /* 1378 * If there are more versions to try, continue 1379 * with negotiations; if not 1380 * shutdown the service since we are not able 1381 * to negotiate a suitable version number 1382 * with the host. 1383 */ 1384 if (dm->next_version == 0) 1385 goto version_error; 1386 1387 memset(&version_req, 0, sizeof(struct dm_version_request)); 1388 version_req.hdr.type = DM_VERSION_REQUEST; 1389 version_req.hdr.size = sizeof(struct dm_version_request); 1390 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 1391 version_req.version.version = dm->next_version; 1392 dm->version = version_req.version.version; 1393 1394 /* 1395 * Set the next version to try in case current version fails. 1396 * Win7 protocol ought to be the last one to try. 1397 */ 1398 switch (version_req.version.version) { 1399 case DYNMEM_PROTOCOL_VERSION_WIN8: 1400 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7; 1401 version_req.is_last_attempt = 0; 1402 break; 1403 default: 1404 dm->next_version = 0; 1405 version_req.is_last_attempt = 1; 1406 } 1407 1408 ret = vmbus_sendpacket(dm->dev->channel, &version_req, 1409 sizeof(struct dm_version_request), 1410 (unsigned long)NULL, 1411 VM_PKT_DATA_INBAND, 0); 1412 1413 if (ret) 1414 goto version_error; 1415 1416 return; 1417 1418 version_error: 1419 dm->state = DM_INIT_ERROR; 1420 complete(&dm->host_event); 1421 } 1422 1423 static void cap_resp(struct hv_dynmem_device *dm, 1424 struct dm_capabilities_resp_msg *cap_resp) 1425 { 1426 if (!cap_resp->is_accepted) { 1427 pr_info("Capabilities not accepted by host\n"); 1428 dm->state = DM_INIT_ERROR; 1429 } 1430 complete(&dm->host_event); 1431 } 1432 1433 static void balloon_onchannelcallback(void *context) 1434 { 1435 struct hv_device *dev = context; 1436 u32 recvlen; 1437 u64 requestid; 1438 struct dm_message *dm_msg; 1439 struct dm_header *dm_hdr; 1440 struct hv_dynmem_device *dm = hv_get_drvdata(dev); 1441 struct dm_balloon *bal_msg; 1442 struct dm_hot_add *ha_msg; 1443 union dm_mem_page_range *ha_pg_range; 1444 union dm_mem_page_range *ha_region; 1445 1446 memset(recv_buffer, 0, sizeof(recv_buffer)); 1447 vmbus_recvpacket(dev->channel, recv_buffer, 1448 PAGE_SIZE, &recvlen, &requestid); 1449 1450 if (recvlen > 0) { 1451 dm_msg = (struct dm_message *)recv_buffer; 1452 dm_hdr = &dm_msg->hdr; 1453 1454 switch (dm_hdr->type) { 1455 case DM_VERSION_RESPONSE: 1456 version_resp(dm, 1457 (struct dm_version_response *)dm_msg); 1458 break; 1459 1460 case DM_CAPABILITIES_RESPONSE: 1461 cap_resp(dm, 1462 (struct dm_capabilities_resp_msg *)dm_msg); 1463 break; 1464 1465 case DM_BALLOON_REQUEST: 1466 if (dm->state == DM_BALLOON_UP) 1467 pr_warn("Currently ballooning\n"); 1468 bal_msg = (struct dm_balloon *)recv_buffer; 1469 dm->state = DM_BALLOON_UP; 1470 dm_device.balloon_wrk.num_pages = bal_msg->num_pages; 1471 schedule_work(&dm_device.balloon_wrk.wrk); 1472 break; 1473 1474 case DM_UNBALLOON_REQUEST: 1475 dm->state = DM_BALLOON_DOWN; 1476 balloon_down(dm, 1477 (struct dm_unballoon_request *)recv_buffer); 1478 break; 1479 1480 case DM_MEM_HOT_ADD_REQUEST: 1481 if (dm->state == DM_HOT_ADD) 1482 pr_warn("Currently hot-adding\n"); 1483 dm->state = DM_HOT_ADD; 1484 ha_msg = (struct dm_hot_add *)recv_buffer; 1485 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) { 1486 /* 1487 * This is a normal hot-add request specifying 1488 * hot-add memory. 1489 */ 1490 dm->host_specified_ha_region = false; 1491 ha_pg_range = &ha_msg->range; 1492 dm->ha_wrk.ha_page_range = *ha_pg_range; 1493 dm->ha_wrk.ha_region_range.page_range = 0; 1494 } else { 1495 /* 1496 * Host is specifying that we first hot-add 1497 * a region and then partially populate this 1498 * region. 1499 */ 1500 dm->host_specified_ha_region = true; 1501 ha_pg_range = &ha_msg->range; 1502 ha_region = &ha_pg_range[1]; 1503 dm->ha_wrk.ha_page_range = *ha_pg_range; 1504 dm->ha_wrk.ha_region_range = *ha_region; 1505 } 1506 schedule_work(&dm_device.ha_wrk.wrk); 1507 break; 1508 1509 case DM_INFO_MESSAGE: 1510 process_info(dm, (struct dm_info_msg *)dm_msg); 1511 break; 1512 1513 default: 1514 pr_err("Unhandled message: type: %d\n", dm_hdr->type); 1515 1516 } 1517 } 1518 1519 } 1520 1521 static int balloon_probe(struct hv_device *dev, 1522 const struct hv_vmbus_device_id *dev_id) 1523 { 1524 int ret; 1525 unsigned long t; 1526 struct dm_version_request version_req; 1527 struct dm_capabilities cap_msg; 1528 1529 #ifdef CONFIG_MEMORY_HOTPLUG 1530 do_hot_add = hot_add; 1531 #else 1532 do_hot_add = false; 1533 #endif 1534 1535 /* 1536 * First allocate a send buffer. 1537 */ 1538 1539 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 1540 if (!send_buffer) 1541 return -ENOMEM; 1542 1543 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, 1544 balloon_onchannelcallback, dev); 1545 1546 if (ret) 1547 goto probe_error0; 1548 1549 dm_device.dev = dev; 1550 dm_device.state = DM_INITIALIZING; 1551 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8; 1552 init_completion(&dm_device.host_event); 1553 init_completion(&dm_device.config_event); 1554 INIT_LIST_HEAD(&dm_device.ha_region_list); 1555 spin_lock_init(&dm_device.ha_lock); 1556 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); 1557 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); 1558 dm_device.host_specified_ha_region = false; 1559 1560 dm_device.thread = 1561 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); 1562 if (IS_ERR(dm_device.thread)) { 1563 ret = PTR_ERR(dm_device.thread); 1564 goto probe_error1; 1565 } 1566 1567 #ifdef CONFIG_MEMORY_HOTPLUG 1568 set_online_page_callback(&hv_online_page); 1569 register_memory_notifier(&hv_memory_nb); 1570 #endif 1571 1572 hv_set_drvdata(dev, &dm_device); 1573 /* 1574 * Initiate the hand shake with the host and negotiate 1575 * a version that the host can support. We start with the 1576 * highest version number and go down if the host cannot 1577 * support it. 1578 */ 1579 memset(&version_req, 0, sizeof(struct dm_version_request)); 1580 version_req.hdr.type = DM_VERSION_REQUEST; 1581 version_req.hdr.size = sizeof(struct dm_version_request); 1582 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 1583 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10; 1584 version_req.is_last_attempt = 0; 1585 dm_device.version = version_req.version.version; 1586 1587 ret = vmbus_sendpacket(dev->channel, &version_req, 1588 sizeof(struct dm_version_request), 1589 (unsigned long)NULL, 1590 VM_PKT_DATA_INBAND, 0); 1591 if (ret) 1592 goto probe_error2; 1593 1594 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1595 if (t == 0) { 1596 ret = -ETIMEDOUT; 1597 goto probe_error2; 1598 } 1599 1600 /* 1601 * If we could not negotiate a compatible version with the host 1602 * fail the probe function. 1603 */ 1604 if (dm_device.state == DM_INIT_ERROR) { 1605 ret = -ETIMEDOUT; 1606 goto probe_error2; 1607 } 1608 1609 pr_info("Using Dynamic Memory protocol version %u.%u\n", 1610 DYNMEM_MAJOR_VERSION(dm_device.version), 1611 DYNMEM_MINOR_VERSION(dm_device.version)); 1612 1613 /* 1614 * Now submit our capabilities to the host. 1615 */ 1616 memset(&cap_msg, 0, sizeof(struct dm_capabilities)); 1617 cap_msg.hdr.type = DM_CAPABILITIES_REPORT; 1618 cap_msg.hdr.size = sizeof(struct dm_capabilities); 1619 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id); 1620 1621 cap_msg.caps.cap_bits.balloon = 1; 1622 cap_msg.caps.cap_bits.hot_add = 1; 1623 1624 /* 1625 * Specify our alignment requirements as it relates 1626 * memory hot-add. Specify 128MB alignment. 1627 */ 1628 cap_msg.caps.cap_bits.hot_add_alignment = 7; 1629 1630 /* 1631 * Currently the host does not use these 1632 * values and we set them to what is done in the 1633 * Windows driver. 1634 */ 1635 cap_msg.min_page_cnt = 0; 1636 cap_msg.max_page_number = -1; 1637 1638 ret = vmbus_sendpacket(dev->channel, &cap_msg, 1639 sizeof(struct dm_capabilities), 1640 (unsigned long)NULL, 1641 VM_PKT_DATA_INBAND, 0); 1642 if (ret) 1643 goto probe_error2; 1644 1645 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1646 if (t == 0) { 1647 ret = -ETIMEDOUT; 1648 goto probe_error2; 1649 } 1650 1651 /* 1652 * If the host does not like our capabilities, 1653 * fail the probe function. 1654 */ 1655 if (dm_device.state == DM_INIT_ERROR) { 1656 ret = -ETIMEDOUT; 1657 goto probe_error2; 1658 } 1659 1660 dm_device.state = DM_INITIALIZED; 1661 1662 return 0; 1663 1664 probe_error2: 1665 #ifdef CONFIG_MEMORY_HOTPLUG 1666 restore_online_page_callback(&hv_online_page); 1667 #endif 1668 kthread_stop(dm_device.thread); 1669 1670 probe_error1: 1671 vmbus_close(dev->channel); 1672 probe_error0: 1673 kfree(send_buffer); 1674 return ret; 1675 } 1676 1677 static int balloon_remove(struct hv_device *dev) 1678 { 1679 struct hv_dynmem_device *dm = hv_get_drvdata(dev); 1680 struct hv_hotadd_state *has, *tmp; 1681 struct hv_hotadd_gap *gap, *tmp_gap; 1682 unsigned long flags; 1683 1684 if (dm->num_pages_ballooned != 0) 1685 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); 1686 1687 cancel_work_sync(&dm->balloon_wrk.wrk); 1688 cancel_work_sync(&dm->ha_wrk.wrk); 1689 1690 vmbus_close(dev->channel); 1691 kthread_stop(dm->thread); 1692 kfree(send_buffer); 1693 #ifdef CONFIG_MEMORY_HOTPLUG 1694 restore_online_page_callback(&hv_online_page); 1695 unregister_memory_notifier(&hv_memory_nb); 1696 #endif 1697 spin_lock_irqsave(&dm_device.ha_lock, flags); 1698 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { 1699 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { 1700 list_del(&gap->list); 1701 kfree(gap); 1702 } 1703 list_del(&has->list); 1704 kfree(has); 1705 } 1706 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 1707 1708 return 0; 1709 } 1710 1711 static const struct hv_vmbus_device_id id_table[] = { 1712 /* Dynamic Memory Class ID */ 1713 /* 525074DC-8985-46e2-8057-A307DC18A502 */ 1714 { HV_DM_GUID, }, 1715 { }, 1716 }; 1717 1718 MODULE_DEVICE_TABLE(vmbus, id_table); 1719 1720 static struct hv_driver balloon_drv = { 1721 .name = "hv_balloon", 1722 .id_table = id_table, 1723 .probe = balloon_probe, 1724 .remove = balloon_remove, 1725 }; 1726 1727 static int __init init_balloon_drv(void) 1728 { 1729 1730 return vmbus_driver_register(&balloon_drv); 1731 } 1732 1733 module_init(init_balloon_drv); 1734 1735 MODULE_DESCRIPTION("Hyper-V Balloon"); 1736 MODULE_LICENSE("GPL"); 1737