1 /* 2 * Copyright (c) 2012, Microsoft Corporation. 3 * 4 * Author: 5 * K. Y. Srinivasan <kys@microsoft.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more 15 * details. 16 * 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/mman.h> 23 #include <linux/delay.h> 24 #include <linux/init.h> 25 #include <linux/module.h> 26 #include <linux/slab.h> 27 #include <linux/kthread.h> 28 #include <linux/completion.h> 29 #include <linux/memory_hotplug.h> 30 #include <linux/memory.h> 31 #include <linux/notifier.h> 32 #include <linux/percpu_counter.h> 33 34 #include <linux/hyperv.h> 35 36 /* 37 * We begin with definitions supporting the Dynamic Memory protocol 38 * with the host. 39 * 40 * Begin protocol definitions. 41 */ 42 43 44 45 /* 46 * Protocol versions. The low word is the minor version, the high word the major 47 * version. 48 * 49 * History: 50 * Initial version 1.0 51 * Changed to 0.1 on 2009/03/25 52 * Changes to 0.2 on 2009/05/14 53 * Changes to 0.3 on 2009/12/03 54 * Changed to 1.0 on 2011/04/05 55 */ 56 57 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor))) 58 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16) 59 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff) 60 61 enum { 62 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3), 63 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0), 64 65 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1, 66 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2, 67 68 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8 69 }; 70 71 72 73 /* 74 * Message Types 75 */ 76 77 enum dm_message_type { 78 /* 79 * Version 0.3 80 */ 81 DM_ERROR = 0, 82 DM_VERSION_REQUEST = 1, 83 DM_VERSION_RESPONSE = 2, 84 DM_CAPABILITIES_REPORT = 3, 85 DM_CAPABILITIES_RESPONSE = 4, 86 DM_STATUS_REPORT = 5, 87 DM_BALLOON_REQUEST = 6, 88 DM_BALLOON_RESPONSE = 7, 89 DM_UNBALLOON_REQUEST = 8, 90 DM_UNBALLOON_RESPONSE = 9, 91 DM_MEM_HOT_ADD_REQUEST = 10, 92 DM_MEM_HOT_ADD_RESPONSE = 11, 93 DM_VERSION_03_MAX = 11, 94 /* 95 * Version 1.0. 96 */ 97 DM_INFO_MESSAGE = 12, 98 DM_VERSION_1_MAX = 12 99 }; 100 101 102 /* 103 * Structures defining the dynamic memory management 104 * protocol. 105 */ 106 107 union dm_version { 108 struct { 109 __u16 minor_version; 110 __u16 major_version; 111 }; 112 __u32 version; 113 } __packed; 114 115 116 union dm_caps { 117 struct { 118 __u64 balloon:1; 119 __u64 hot_add:1; 120 __u64 reservedz:62; 121 } cap_bits; 122 __u64 caps; 123 } __packed; 124 125 union dm_mem_page_range { 126 struct { 127 /* 128 * The PFN number of the first page in the range. 129 * 40 bits is the architectural limit of a PFN 130 * number for AMD64. 131 */ 132 __u64 start_page:40; 133 /* 134 * The number of pages in the range. 135 */ 136 __u64 page_cnt:24; 137 } finfo; 138 __u64 page_range; 139 } __packed; 140 141 142 143 /* 144 * The header for all dynamic memory messages: 145 * 146 * type: Type of the message. 147 * size: Size of the message in bytes; including the header. 148 * trans_id: The guest is responsible for manufacturing this ID. 149 */ 150 151 struct dm_header { 152 __u16 type; 153 __u16 size; 154 __u32 trans_id; 155 } __packed; 156 157 /* 158 * A generic message format for dynamic memory. 159 * Specific message formats are defined later in the file. 160 */ 161 162 struct dm_message { 163 struct dm_header hdr; 164 __u8 data[]; /* enclosed message */ 165 } __packed; 166 167 168 /* 169 * Specific message types supporting the dynamic memory protocol. 170 */ 171 172 /* 173 * Version negotiation message. Sent from the guest to the host. 174 * The guest is free to try different versions until the host 175 * accepts the version. 176 * 177 * dm_version: The protocol version requested. 178 * is_last_attempt: If TRUE, this is the last version guest will request. 179 * reservedz: Reserved field, set to zero. 180 */ 181 182 struct dm_version_request { 183 struct dm_header hdr; 184 union dm_version version; 185 __u32 is_last_attempt:1; 186 __u32 reservedz:31; 187 } __packed; 188 189 /* 190 * Version response message; Host to Guest and indicates 191 * if the host has accepted the version sent by the guest. 192 * 193 * is_accepted: If TRUE, host has accepted the version and the guest 194 * should proceed to the next stage of the protocol. FALSE indicates that 195 * guest should re-try with a different version. 196 * 197 * reservedz: Reserved field, set to zero. 198 */ 199 200 struct dm_version_response { 201 struct dm_header hdr; 202 __u64 is_accepted:1; 203 __u64 reservedz:63; 204 } __packed; 205 206 /* 207 * Message reporting capabilities. This is sent from the guest to the 208 * host. 209 */ 210 211 struct dm_capabilities { 212 struct dm_header hdr; 213 union dm_caps caps; 214 __u64 min_page_cnt; 215 __u64 max_page_number; 216 } __packed; 217 218 /* 219 * Response to the capabilities message. This is sent from the host to the 220 * guest. This message notifies if the host has accepted the guest's 221 * capabilities. If the host has not accepted, the guest must shutdown 222 * the service. 223 * 224 * is_accepted: Indicates if the host has accepted guest's capabilities. 225 * reservedz: Must be 0. 226 */ 227 228 struct dm_capabilities_resp_msg { 229 struct dm_header hdr; 230 __u64 is_accepted:1; 231 __u64 reservedz:63; 232 } __packed; 233 234 /* 235 * This message is used to report memory pressure from the guest. 236 * This message is not part of any transaction and there is no 237 * response to this message. 238 * 239 * num_avail: Available memory in pages. 240 * num_committed: Committed memory in pages. 241 * page_file_size: The accumulated size of all page files 242 * in the system in pages. 243 * zero_free: The nunber of zero and free pages. 244 * page_file_writes: The writes to the page file in pages. 245 * io_diff: An indicator of file cache efficiency or page file activity, 246 * calculated as File Cache Page Fault Count - Page Read Count. 247 * This value is in pages. 248 * 249 * Some of these metrics are Windows specific and fortunately 250 * the algorithm on the host side that computes the guest memory 251 * pressure only uses num_committed value. 252 */ 253 254 struct dm_status { 255 struct dm_header hdr; 256 __u64 num_avail; 257 __u64 num_committed; 258 __u64 page_file_size; 259 __u64 zero_free; 260 __u32 page_file_writes; 261 __u32 io_diff; 262 } __packed; 263 264 265 /* 266 * Message to ask the guest to allocate memory - balloon up message. 267 * This message is sent from the host to the guest. The guest may not be 268 * able to allocate as much memory as requested. 269 * 270 * num_pages: number of pages to allocate. 271 */ 272 273 struct dm_balloon { 274 struct dm_header hdr; 275 __u32 num_pages; 276 __u32 reservedz; 277 } __packed; 278 279 280 /* 281 * Balloon response message; this message is sent from the guest 282 * to the host in response to the balloon message. 283 * 284 * reservedz: Reserved; must be set to zero. 285 * more_pages: If FALSE, this is the last message of the transaction. 286 * if TRUE there will atleast one more message from the guest. 287 * 288 * range_count: The number of ranges in the range array. 289 * 290 * range_array: An array of page ranges returned to the host. 291 * 292 */ 293 294 struct dm_balloon_response { 295 struct dm_header hdr; 296 __u32 reservedz; 297 __u32 more_pages:1; 298 __u32 range_count:31; 299 union dm_mem_page_range range_array[]; 300 } __packed; 301 302 /* 303 * Un-balloon message; this message is sent from the host 304 * to the guest to give guest more memory. 305 * 306 * more_pages: If FALSE, this is the last message of the transaction. 307 * if TRUE there will atleast one more message from the guest. 308 * 309 * reservedz: Reserved; must be set to zero. 310 * 311 * range_count: The number of ranges in the range array. 312 * 313 * range_array: An array of page ranges returned to the host. 314 * 315 */ 316 317 struct dm_unballoon_request { 318 struct dm_header hdr; 319 __u32 more_pages:1; 320 __u32 reservedz:31; 321 __u32 range_count; 322 union dm_mem_page_range range_array[]; 323 } __packed; 324 325 /* 326 * Un-balloon response message; this message is sent from the guest 327 * to the host in response to an unballoon request. 328 * 329 */ 330 331 struct dm_unballoon_response { 332 struct dm_header hdr; 333 } __packed; 334 335 336 /* 337 * Hot add request message. Message sent from the host to the guest. 338 * 339 * mem_range: Memory range to hot add. 340 * 341 * On Linux we currently don't support this since we cannot hot add 342 * arbitrary granularity of memory. 343 */ 344 345 struct dm_hot_add { 346 struct dm_header hdr; 347 union dm_mem_page_range range; 348 } __packed; 349 350 /* 351 * Hot add response message. 352 * This message is sent by the guest to report the status of a hot add request. 353 * If page_count is less than the requested page count, then the host should 354 * assume all further hot add requests will fail, since this indicates that 355 * the guest has hit an upper physical memory barrier. 356 * 357 * Hot adds may also fail due to low resources; in this case, the guest must 358 * not complete this message until the hot add can succeed, and the host must 359 * not send a new hot add request until the response is sent. 360 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS 361 * times it fails the request. 362 * 363 * 364 * page_count: number of pages that were successfully hot added. 365 * 366 * result: result of the operation 1: success, 0: failure. 367 * 368 */ 369 370 struct dm_hot_add_response { 371 struct dm_header hdr; 372 __u32 page_count; 373 __u32 result; 374 } __packed; 375 376 /* 377 * Types of information sent from host to the guest. 378 */ 379 380 enum dm_info_type { 381 INFO_TYPE_MAX_PAGE_CNT = 0, 382 MAX_INFO_TYPE 383 }; 384 385 386 /* 387 * Header for the information message. 388 */ 389 390 struct dm_info_header { 391 enum dm_info_type type; 392 __u32 data_size; 393 } __packed; 394 395 /* 396 * This message is sent from the host to the guest to pass 397 * some relevant information (win8 addition). 398 * 399 * reserved: no used. 400 * info_size: size of the information blob. 401 * info: information blob. 402 */ 403 404 struct dm_info_msg { 405 struct dm_header hdr; 406 __u32 reserved; 407 __u32 info_size; 408 __u8 info[]; 409 }; 410 411 /* 412 * End protocol definitions. 413 */ 414 415 static bool hot_add; 416 static bool do_hot_add; 417 /* 418 * Delay reporting memory pressure by 419 * the specified number of seconds. 420 */ 421 static uint pressure_report_delay = 30; 422 423 module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 424 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 425 426 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); 427 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); 428 static atomic_t trans_id = ATOMIC_INIT(0); 429 430 static int dm_ring_size = (5 * PAGE_SIZE); 431 432 /* 433 * Driver specific state. 434 */ 435 436 enum hv_dm_state { 437 DM_INITIALIZING = 0, 438 DM_INITIALIZED, 439 DM_BALLOON_UP, 440 DM_BALLOON_DOWN, 441 DM_HOT_ADD, 442 DM_INIT_ERROR 443 }; 444 445 446 static __u8 recv_buffer[PAGE_SIZE]; 447 static __u8 *send_buffer; 448 #define PAGES_IN_2M 512 449 450 struct hv_dynmem_device { 451 struct hv_device *dev; 452 enum hv_dm_state state; 453 struct completion host_event; 454 struct completion config_event; 455 456 /* 457 * Number of pages we have currently ballooned out. 458 */ 459 unsigned int num_pages_ballooned; 460 461 /* 462 * This thread handles both balloon/hot-add 463 * requests from the host as well as notifying 464 * the host with regards to memory pressure in 465 * the guest. 466 */ 467 struct task_struct *thread; 468 469 /* 470 * We start with the highest version we can support 471 * and downgrade based on the host; we save here the 472 * next version to try. 473 */ 474 __u32 next_version; 475 }; 476 477 static struct hv_dynmem_device dm_device; 478 479 static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg) 480 { 481 482 struct dm_hot_add_response resp; 483 484 if (do_hot_add) { 485 486 pr_info("Memory hot add not supported\n"); 487 488 /* 489 * Currently we do not support hot add. 490 * Just fail the request. 491 */ 492 } 493 494 memset(&resp, 0, sizeof(struct dm_hot_add_response)); 495 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE; 496 resp.hdr.size = sizeof(struct dm_hot_add_response); 497 resp.hdr.trans_id = atomic_inc_return(&trans_id); 498 499 resp.page_count = 0; 500 resp.result = 0; 501 502 dm->state = DM_INITIALIZED; 503 vmbus_sendpacket(dm->dev->channel, &resp, 504 sizeof(struct dm_hot_add_response), 505 (unsigned long)NULL, 506 VM_PKT_DATA_INBAND, 0); 507 508 } 509 510 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) 511 { 512 struct dm_info_header *info_hdr; 513 514 info_hdr = (struct dm_info_header *)msg->info; 515 516 switch (info_hdr->type) { 517 case INFO_TYPE_MAX_PAGE_CNT: 518 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); 519 pr_info("Data Size is %d\n", info_hdr->data_size); 520 break; 521 default: 522 pr_info("Received Unknown type: %d\n", info_hdr->type); 523 } 524 } 525 526 unsigned long compute_balloon_floor(void) 527 { 528 unsigned long min_pages; 529 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 530 /* Simple continuous piecewiese linear function: 531 * max MiB -> min MiB gradient 532 * 0 0 533 * 16 16 534 * 32 24 535 * 128 72 (1/2) 536 * 512 168 (1/4) 537 * 2048 360 (1/8) 538 * 8192 552 (1/32) 539 * 32768 1320 540 * 131072 4392 541 */ 542 if (totalram_pages < MB2PAGES(128)) 543 min_pages = MB2PAGES(8) + (totalram_pages >> 1); 544 else if (totalram_pages < MB2PAGES(512)) 545 min_pages = MB2PAGES(40) + (totalram_pages >> 2); 546 else if (totalram_pages < MB2PAGES(2048)) 547 min_pages = MB2PAGES(104) + (totalram_pages >> 3); 548 else 549 min_pages = MB2PAGES(296) + (totalram_pages >> 5); 550 #undef MB2PAGES 551 return min_pages; 552 } 553 554 /* 555 * Post our status as it relates memory pressure to the 556 * host. Host expects the guests to post this status 557 * periodically at 1 second intervals. 558 * 559 * The metrics specified in this protocol are very Windows 560 * specific and so we cook up numbers here to convey our memory 561 * pressure. 562 */ 563 564 static void post_status(struct hv_dynmem_device *dm) 565 { 566 struct dm_status status; 567 struct sysinfo val; 568 569 if (pressure_report_delay > 0) { 570 --pressure_report_delay; 571 return; 572 } 573 si_meminfo(&val); 574 memset(&status, 0, sizeof(struct dm_status)); 575 status.hdr.type = DM_STATUS_REPORT; 576 status.hdr.size = sizeof(struct dm_status); 577 status.hdr.trans_id = atomic_inc_return(&trans_id); 578 579 /* 580 * The host expects the guest to report free memory. 581 * Further, the host expects the pressure information to 582 * include the ballooned out pages. 583 * For a given amount of memory that we are managing, we 584 * need to compute a floor below which we should not balloon. 585 * Compute this and add it to the pressure report. 586 */ 587 status.num_avail = val.freeram; 588 status.num_committed = vm_memory_committed() + 589 dm->num_pages_ballooned + 590 compute_balloon_floor(); 591 592 vmbus_sendpacket(dm->dev->channel, &status, 593 sizeof(struct dm_status), 594 (unsigned long)NULL, 595 VM_PKT_DATA_INBAND, 0); 596 597 } 598 599 static void free_balloon_pages(struct hv_dynmem_device *dm, 600 union dm_mem_page_range *range_array) 601 { 602 int num_pages = range_array->finfo.page_cnt; 603 __u64 start_frame = range_array->finfo.start_page; 604 struct page *pg; 605 int i; 606 607 for (i = 0; i < num_pages; i++) { 608 pg = pfn_to_page(i + start_frame); 609 __free_page(pg); 610 dm->num_pages_ballooned--; 611 } 612 } 613 614 615 616 static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages, 617 struct dm_balloon_response *bl_resp, int alloc_unit, 618 bool *alloc_error) 619 { 620 int i = 0; 621 struct page *pg; 622 623 if (num_pages < alloc_unit) 624 return 0; 625 626 for (i = 0; (i * alloc_unit) < num_pages; i++) { 627 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > 628 PAGE_SIZE) 629 return i * alloc_unit; 630 631 /* 632 * We execute this code in a thread context. Furthermore, 633 * we don't want the kernel to try too hard. 634 */ 635 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY | 636 __GFP_NOMEMALLOC | __GFP_NOWARN, 637 get_order(alloc_unit << PAGE_SHIFT)); 638 639 if (!pg) { 640 *alloc_error = true; 641 return i * alloc_unit; 642 } 643 644 645 dm->num_pages_ballooned += alloc_unit; 646 647 bl_resp->range_count++; 648 bl_resp->range_array[i].finfo.start_page = 649 page_to_pfn(pg); 650 bl_resp->range_array[i].finfo.page_cnt = alloc_unit; 651 bl_resp->hdr.size += sizeof(union dm_mem_page_range); 652 653 } 654 655 return num_pages; 656 } 657 658 659 660 static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req) 661 { 662 int num_pages = req->num_pages; 663 int num_ballooned = 0; 664 struct dm_balloon_response *bl_resp; 665 int alloc_unit; 666 int ret; 667 bool alloc_error = false; 668 bool done = false; 669 int i; 670 671 672 /* 673 * Currently, we only support 4k allocations. 674 */ 675 alloc_unit = 1; 676 677 while (!done) { 678 bl_resp = (struct dm_balloon_response *)send_buffer; 679 memset(send_buffer, 0, PAGE_SIZE); 680 bl_resp->hdr.type = DM_BALLOON_RESPONSE; 681 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); 682 bl_resp->hdr.size = sizeof(struct dm_balloon_response); 683 bl_resp->more_pages = 1; 684 685 686 num_pages -= num_ballooned; 687 num_ballooned = alloc_balloon_pages(dm, num_pages, 688 bl_resp, alloc_unit, 689 &alloc_error); 690 691 if ((alloc_error) || (num_ballooned == num_pages)) { 692 bl_resp->more_pages = 0; 693 done = true; 694 dm->state = DM_INITIALIZED; 695 } 696 697 /* 698 * We are pushing a lot of data through the channel; 699 * deal with transient failures caused because of the 700 * lack of space in the ring buffer. 701 */ 702 703 do { 704 ret = vmbus_sendpacket(dm_device.dev->channel, 705 bl_resp, 706 bl_resp->hdr.size, 707 (unsigned long)NULL, 708 VM_PKT_DATA_INBAND, 0); 709 710 if (ret == -EAGAIN) 711 msleep(20); 712 713 } while (ret == -EAGAIN); 714 715 if (ret) { 716 /* 717 * Free up the memory we allocatted. 718 */ 719 pr_info("Balloon response failed\n"); 720 721 for (i = 0; i < bl_resp->range_count; i++) 722 free_balloon_pages(dm, 723 &bl_resp->range_array[i]); 724 725 done = true; 726 } 727 } 728 729 } 730 731 static void balloon_down(struct hv_dynmem_device *dm, 732 struct dm_unballoon_request *req) 733 { 734 union dm_mem_page_range *range_array = req->range_array; 735 int range_count = req->range_count; 736 struct dm_unballoon_response resp; 737 int i; 738 739 for (i = 0; i < range_count; i++) 740 free_balloon_pages(dm, &range_array[i]); 741 742 if (req->more_pages == 1) 743 return; 744 745 memset(&resp, 0, sizeof(struct dm_unballoon_response)); 746 resp.hdr.type = DM_UNBALLOON_RESPONSE; 747 resp.hdr.trans_id = atomic_inc_return(&trans_id); 748 resp.hdr.size = sizeof(struct dm_unballoon_response); 749 750 vmbus_sendpacket(dm_device.dev->channel, &resp, 751 sizeof(struct dm_unballoon_response), 752 (unsigned long)NULL, 753 VM_PKT_DATA_INBAND, 0); 754 755 dm->state = DM_INITIALIZED; 756 } 757 758 static void balloon_onchannelcallback(void *context); 759 760 static int dm_thread_func(void *dm_dev) 761 { 762 struct hv_dynmem_device *dm = dm_dev; 763 int t; 764 unsigned long scan_start; 765 766 while (!kthread_should_stop()) { 767 t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ); 768 /* 769 * The host expects us to post information on the memory 770 * pressure every second. 771 */ 772 773 if (t == 0) 774 post_status(dm); 775 776 scan_start = jiffies; 777 switch (dm->state) { 778 case DM_BALLOON_UP: 779 balloon_up(dm, (struct dm_balloon *)recv_buffer); 780 break; 781 782 case DM_HOT_ADD: 783 hot_add_req(dm, (struct dm_hot_add *)recv_buffer); 784 break; 785 default: 786 break; 787 } 788 789 if (!time_in_range(jiffies, scan_start, scan_start + HZ)) 790 post_status(dm); 791 792 } 793 794 return 0; 795 } 796 797 798 static void version_resp(struct hv_dynmem_device *dm, 799 struct dm_version_response *vresp) 800 { 801 struct dm_version_request version_req; 802 int ret; 803 804 if (vresp->is_accepted) { 805 /* 806 * We are done; wakeup the 807 * context waiting for version 808 * negotiation. 809 */ 810 complete(&dm->host_event); 811 return; 812 } 813 /* 814 * If there are more versions to try, continue 815 * with negotiations; if not 816 * shutdown the service since we are not able 817 * to negotiate a suitable version number 818 * with the host. 819 */ 820 if (dm->next_version == 0) 821 goto version_error; 822 823 dm->next_version = 0; 824 memset(&version_req, 0, sizeof(struct dm_version_request)); 825 version_req.hdr.type = DM_VERSION_REQUEST; 826 version_req.hdr.size = sizeof(struct dm_version_request); 827 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 828 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7; 829 version_req.is_last_attempt = 1; 830 831 ret = vmbus_sendpacket(dm->dev->channel, &version_req, 832 sizeof(struct dm_version_request), 833 (unsigned long)NULL, 834 VM_PKT_DATA_INBAND, 0); 835 836 if (ret) 837 goto version_error; 838 839 return; 840 841 version_error: 842 dm->state = DM_INIT_ERROR; 843 complete(&dm->host_event); 844 } 845 846 static void cap_resp(struct hv_dynmem_device *dm, 847 struct dm_capabilities_resp_msg *cap_resp) 848 { 849 if (!cap_resp->is_accepted) { 850 pr_info("Capabilities not accepted by host\n"); 851 dm->state = DM_INIT_ERROR; 852 } 853 complete(&dm->host_event); 854 } 855 856 static void balloon_onchannelcallback(void *context) 857 { 858 struct hv_device *dev = context; 859 u32 recvlen; 860 u64 requestid; 861 struct dm_message *dm_msg; 862 struct dm_header *dm_hdr; 863 struct hv_dynmem_device *dm = hv_get_drvdata(dev); 864 865 memset(recv_buffer, 0, sizeof(recv_buffer)); 866 vmbus_recvpacket(dev->channel, recv_buffer, 867 PAGE_SIZE, &recvlen, &requestid); 868 869 if (recvlen > 0) { 870 dm_msg = (struct dm_message *)recv_buffer; 871 dm_hdr = &dm_msg->hdr; 872 873 switch (dm_hdr->type) { 874 case DM_VERSION_RESPONSE: 875 version_resp(dm, 876 (struct dm_version_response *)dm_msg); 877 break; 878 879 case DM_CAPABILITIES_RESPONSE: 880 cap_resp(dm, 881 (struct dm_capabilities_resp_msg *)dm_msg); 882 break; 883 884 case DM_BALLOON_REQUEST: 885 dm->state = DM_BALLOON_UP; 886 complete(&dm->config_event); 887 break; 888 889 case DM_UNBALLOON_REQUEST: 890 dm->state = DM_BALLOON_DOWN; 891 balloon_down(dm, 892 (struct dm_unballoon_request *)recv_buffer); 893 break; 894 895 case DM_MEM_HOT_ADD_REQUEST: 896 dm->state = DM_HOT_ADD; 897 complete(&dm->config_event); 898 break; 899 900 case DM_INFO_MESSAGE: 901 process_info(dm, (struct dm_info_msg *)dm_msg); 902 break; 903 904 default: 905 pr_err("Unhandled message: type: %d\n", dm_hdr->type); 906 907 } 908 } 909 910 } 911 912 static int balloon_probe(struct hv_device *dev, 913 const struct hv_vmbus_device_id *dev_id) 914 { 915 int ret, t; 916 struct dm_version_request version_req; 917 struct dm_capabilities cap_msg; 918 919 do_hot_add = hot_add; 920 921 /* 922 * First allocate a send buffer. 923 */ 924 925 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 926 if (!send_buffer) 927 return -ENOMEM; 928 929 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, 930 balloon_onchannelcallback, dev); 931 932 if (ret) 933 goto probe_error0; 934 935 dm_device.dev = dev; 936 dm_device.state = DM_INITIALIZING; 937 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7; 938 init_completion(&dm_device.host_event); 939 init_completion(&dm_device.config_event); 940 941 dm_device.thread = 942 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); 943 if (IS_ERR(dm_device.thread)) { 944 ret = PTR_ERR(dm_device.thread); 945 goto probe_error1; 946 } 947 948 hv_set_drvdata(dev, &dm_device); 949 /* 950 * Initiate the hand shake with the host and negotiate 951 * a version that the host can support. We start with the 952 * highest version number and go down if the host cannot 953 * support it. 954 */ 955 memset(&version_req, 0, sizeof(struct dm_version_request)); 956 version_req.hdr.type = DM_VERSION_REQUEST; 957 version_req.hdr.size = sizeof(struct dm_version_request); 958 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 959 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8; 960 version_req.is_last_attempt = 0; 961 962 ret = vmbus_sendpacket(dev->channel, &version_req, 963 sizeof(struct dm_version_request), 964 (unsigned long)NULL, 965 VM_PKT_DATA_INBAND, 966 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 967 if (ret) 968 goto probe_error2; 969 970 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 971 if (t == 0) { 972 ret = -ETIMEDOUT; 973 goto probe_error2; 974 } 975 976 /* 977 * If we could not negotiate a compatible version with the host 978 * fail the probe function. 979 */ 980 if (dm_device.state == DM_INIT_ERROR) { 981 ret = -ETIMEDOUT; 982 goto probe_error2; 983 } 984 /* 985 * Now submit our capabilities to the host. 986 */ 987 memset(&cap_msg, 0, sizeof(struct dm_capabilities)); 988 cap_msg.hdr.type = DM_CAPABILITIES_REPORT; 989 cap_msg.hdr.size = sizeof(struct dm_capabilities); 990 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id); 991 992 cap_msg.caps.cap_bits.balloon = 1; 993 /* 994 * While we currently don't support hot-add, 995 * we still advertise this capability since the 996 * host requires that guests partcipating in the 997 * dynamic memory protocol support hot add. 998 */ 999 cap_msg.caps.cap_bits.hot_add = 1; 1000 1001 /* 1002 * Currently the host does not use these 1003 * values and we set them to what is done in the 1004 * Windows driver. 1005 */ 1006 cap_msg.min_page_cnt = 0; 1007 cap_msg.max_page_number = -1; 1008 1009 ret = vmbus_sendpacket(dev->channel, &cap_msg, 1010 sizeof(struct dm_capabilities), 1011 (unsigned long)NULL, 1012 VM_PKT_DATA_INBAND, 1013 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1014 if (ret) 1015 goto probe_error2; 1016 1017 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1018 if (t == 0) { 1019 ret = -ETIMEDOUT; 1020 goto probe_error2; 1021 } 1022 1023 /* 1024 * If the host does not like our capabilities, 1025 * fail the probe function. 1026 */ 1027 if (dm_device.state == DM_INIT_ERROR) { 1028 ret = -ETIMEDOUT; 1029 goto probe_error2; 1030 } 1031 1032 dm_device.state = DM_INITIALIZED; 1033 1034 return 0; 1035 1036 probe_error2: 1037 kthread_stop(dm_device.thread); 1038 1039 probe_error1: 1040 vmbus_close(dev->channel); 1041 probe_error0: 1042 kfree(send_buffer); 1043 return ret; 1044 } 1045 1046 static int balloon_remove(struct hv_device *dev) 1047 { 1048 struct hv_dynmem_device *dm = hv_get_drvdata(dev); 1049 1050 if (dm->num_pages_ballooned != 0) 1051 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); 1052 1053 vmbus_close(dev->channel); 1054 kthread_stop(dm->thread); 1055 kfree(send_buffer); 1056 1057 return 0; 1058 } 1059 1060 static const struct hv_vmbus_device_id id_table[] = { 1061 /* Dynamic Memory Class ID */ 1062 /* 525074DC-8985-46e2-8057-A307DC18A502 */ 1063 { HV_DM_GUID, }, 1064 { }, 1065 }; 1066 1067 MODULE_DEVICE_TABLE(vmbus, id_table); 1068 1069 static struct hv_driver balloon_drv = { 1070 .name = "hv_balloon", 1071 .id_table = id_table, 1072 .probe = balloon_probe, 1073 .remove = balloon_remove, 1074 }; 1075 1076 static int __init init_balloon_drv(void) 1077 { 1078 1079 return vmbus_driver_register(&balloon_drv); 1080 } 1081 1082 static void exit_balloon_drv(void) 1083 { 1084 1085 vmbus_driver_unregister(&balloon_drv); 1086 } 1087 1088 module_init(init_balloon_drv); 1089 module_exit(exit_balloon_drv); 1090 1091 MODULE_DESCRIPTION("Hyper-V Balloon"); 1092 MODULE_VERSION(HV_DRV_VERSION); 1093 MODULE_LICENSE("GPL"); 1094