1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright (c) 2011, Microsoft Corporation. 5 * 6 * Authors: 7 * Haiyang Zhang <haiyangz@microsoft.com> 8 * Hank Janssen <hjanssen@microsoft.com> 9 * K. Y. Srinivasan <kys@microsoft.com> 10 */ 11 12 #ifndef _HYPERV_H 13 #define _HYPERV_H 14 15 #include <uapi/linux/hyperv.h> 16 17 #include <linux/mm.h> 18 #include <linux/types.h> 19 #include <linux/scatterlist.h> 20 #include <linux/list.h> 21 #include <linux/timer.h> 22 #include <linux/completion.h> 23 #include <linux/device.h> 24 #include <linux/mod_devicetable.h> 25 #include <linux/interrupt.h> 26 #include <linux/reciprocal_div.h> 27 #include <asm/hyperv-tlfs.h> 28 29 #define MAX_PAGE_BUFFER_COUNT 32 30 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 31 32 #pragma pack(push, 1) 33 34 /* 35 * Types for GPADL, decides is how GPADL header is created. 36 * 37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the 38 * same as HV_HYP_PAGE_SIZE. 39 * 40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers 41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put 42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each 43 * HV_HYP_PAGE will be different between different types of GPADL, for example 44 * if PAGE_SIZE is 64K: 45 * 46 * BUFFER: 47 * 48 * gva: |-- 64k --|-- 64k --| ... | 49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | 50 * index: 0 1 2 15 16 17 18 .. 31 32 ... 51 * | | ... | | | ... | ... 52 * v V V V V V 53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... | 54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ... 55 * 56 * RING: 57 * 58 * | header | data | header | data | 59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... | 60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... | 61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n 62 * | / / / | / / 63 * | / / / | / / 64 * | / / ... / ... | / ... / 65 * | / / / | / / 66 * | / / / | / / 67 * V V V V V V v 68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... | 69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30 70 */ 71 enum hv_gpadl_type { 72 HV_GPADL_BUFFER, 73 HV_GPADL_RING 74 }; 75 76 /* Single-page buffer */ 77 struct hv_page_buffer { 78 u32 len; 79 u32 offset; 80 u64 pfn; 81 }; 82 83 /* Multiple-page buffer */ 84 struct hv_multipage_buffer { 85 /* Length and Offset determines the # of pfns in the array */ 86 u32 len; 87 u32 offset; 88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 89 }; 90 91 /* 92 * Multiple-page buffer array; the pfn array is variable size: 93 * The number of entries in the PFN array is determined by 94 * "len" and "offset". 95 */ 96 struct hv_mpb_array { 97 /* Length and Offset determines the # of pfns in the array */ 98 u32 len; 99 u32 offset; 100 u64 pfn_array[]; 101 }; 102 103 /* 0x18 includes the proprietary packet header */ 104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \ 105 (sizeof(struct hv_page_buffer) * \ 106 MAX_PAGE_BUFFER_COUNT)) 107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ 108 sizeof(struct hv_multipage_buffer)) 109 110 111 #pragma pack(pop) 112 113 struct hv_ring_buffer { 114 /* Offset in bytes from the start of ring data below */ 115 u32 write_index; 116 117 /* Offset in bytes from the start of ring data below */ 118 u32 read_index; 119 120 u32 interrupt_mask; 121 122 /* 123 * WS2012/Win8 and later versions of Hyper-V implement interrupt 124 * driven flow management. The feature bit feat_pending_send_sz 125 * is set by the host on the host->guest ring buffer, and by the 126 * guest on the guest->host ring buffer. 127 * 128 * The meaning of the feature bit is a bit complex in that it has 129 * semantics that apply to both ring buffers. If the guest sets 130 * the feature bit in the guest->host ring buffer, the guest is 131 * telling the host that: 132 * 1) It will set the pending_send_sz field in the guest->host ring 133 * buffer when it is waiting for space to become available, and 134 * 2) It will read the pending_send_sz field in the host->guest 135 * ring buffer and interrupt the host when it frees enough space 136 * 137 * Similarly, if the host sets the feature bit in the host->guest 138 * ring buffer, the host is telling the guest that: 139 * 1) It will set the pending_send_sz field in the host->guest ring 140 * buffer when it is waiting for space to become available, and 141 * 2) It will read the pending_send_sz field in the guest->host 142 * ring buffer and interrupt the guest when it frees enough space 143 * 144 * If either the guest or host does not set the feature bit that it 145 * owns, that guest or host must do polling if it encounters a full 146 * ring buffer, and not signal the other end with an interrupt. 147 */ 148 u32 pending_send_sz; 149 u32 reserved1[12]; 150 union { 151 struct { 152 u32 feat_pending_send_sz:1; 153 }; 154 u32 value; 155 } feature_bits; 156 157 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 158 u8 reserved2[PAGE_SIZE - 68]; 159 160 /* 161 * Ring data starts here + RingDataStartOffset 162 * !!! DO NOT place any fields below this !!! 163 */ 164 u8 buffer[]; 165 } __packed; 166 167 /* Calculate the proper size of a ringbuffer, it must be page-aligned */ 168 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \ 169 (payload_sz)) 170 171 struct hv_ring_buffer_info { 172 struct hv_ring_buffer *ring_buffer; 173 u32 ring_size; /* Include the shared header */ 174 struct reciprocal_value ring_size_div10_reciprocal; 175 spinlock_t ring_lock; 176 177 u32 ring_datasize; /* < ring_size */ 178 u32 priv_read_index; 179 /* 180 * The ring buffer mutex lock. This lock prevents the ring buffer from 181 * being freed while the ring buffer is being accessed. 182 */ 183 struct mutex ring_buffer_mutex; 184 185 /* Buffer that holds a copy of an incoming host packet */ 186 void *pkt_buffer; 187 u32 pkt_buffer_size; 188 }; 189 190 191 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) 192 { 193 u32 read_loc, write_loc, dsize, read; 194 195 dsize = rbi->ring_datasize; 196 read_loc = rbi->ring_buffer->read_index; 197 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 198 199 read = write_loc >= read_loc ? (write_loc - read_loc) : 200 (dsize - read_loc) + write_loc; 201 202 return read; 203 } 204 205 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) 206 { 207 u32 read_loc, write_loc, dsize, write; 208 209 dsize = rbi->ring_datasize; 210 read_loc = READ_ONCE(rbi->ring_buffer->read_index); 211 write_loc = rbi->ring_buffer->write_index; 212 213 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 214 read_loc - write_loc; 215 return write; 216 } 217 218 static inline u32 hv_get_avail_to_write_percent( 219 const struct hv_ring_buffer_info *rbi) 220 { 221 u32 avail_write = hv_get_bytes_to_write(rbi); 222 223 return reciprocal_divide( 224 (avail_write << 3) + (avail_write << 1), 225 rbi->ring_size_div10_reciprocal); 226 } 227 228 /* 229 * VMBUS version is 32 bit entity broken up into 230 * two 16 bit quantities: major_number. minor_number. 231 * 232 * 0 . 13 (Windows Server 2008) 233 * 1 . 1 (Windows 7) 234 * 2 . 4 (Windows 8) 235 * 3 . 0 (Windows 8 R2) 236 * 4 . 0 (Windows 10) 237 * 4 . 1 (Windows 10 RS3) 238 * 5 . 0 (Newer Windows 10) 239 * 5 . 1 (Windows 10 RS4) 240 * 5 . 2 (Windows Server 2019, RS5) 241 * 5 . 3 (Windows Server 2022) 242 */ 243 244 #define VERSION_WS2008 ((0 << 16) | (13)) 245 #define VERSION_WIN7 ((1 << 16) | (1)) 246 #define VERSION_WIN8 ((2 << 16) | (4)) 247 #define VERSION_WIN8_1 ((3 << 16) | (0)) 248 #define VERSION_WIN10 ((4 << 16) | (0)) 249 #define VERSION_WIN10_V4_1 ((4 << 16) | (1)) 250 #define VERSION_WIN10_V5 ((5 << 16) | (0)) 251 #define VERSION_WIN10_V5_1 ((5 << 16) | (1)) 252 #define VERSION_WIN10_V5_2 ((5 << 16) | (2)) 253 #define VERSION_WIN10_V5_3 ((5 << 16) | (3)) 254 255 /* Make maximum size of pipe payload of 16K */ 256 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 257 258 /* Define PipeMode values. */ 259 #define VMBUS_PIPE_TYPE_BYTE 0x00000000 260 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 261 262 /* The size of the user defined data buffer for non-pipe offers. */ 263 #define MAX_USER_DEFINED_BYTES 120 264 265 /* The size of the user defined data buffer for pipe offers. */ 266 #define MAX_PIPE_USER_DEFINED_BYTES 116 267 268 /* 269 * At the center of the Channel Management library is the Channel Offer. This 270 * struct contains the fundamental information about an offer. 271 */ 272 struct vmbus_channel_offer { 273 guid_t if_type; 274 guid_t if_instance; 275 276 /* 277 * These two fields are not currently used. 278 */ 279 u64 reserved1; 280 u64 reserved2; 281 282 u16 chn_flags; 283 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 284 285 union { 286 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ 287 struct { 288 unsigned char user_def[MAX_USER_DEFINED_BYTES]; 289 } std; 290 291 /* 292 * Pipes: 293 * The following structure is an integrated pipe protocol, which 294 * is implemented on top of standard user-defined data. Pipe 295 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own 296 * use. 297 */ 298 struct { 299 u32 pipe_mode; 300 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 301 } pipe; 302 } u; 303 /* 304 * The sub_channel_index is defined in Win8: a value of zero means a 305 * primary channel and a value of non-zero means a sub-channel. 306 * 307 * Before Win8, the field is reserved, meaning it's always zero. 308 */ 309 u16 sub_channel_index; 310 u16 reserved3; 311 } __packed; 312 313 /* Server Flags */ 314 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 315 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 316 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 317 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 318 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 319 #define VMBUS_CHANNEL_PARENT_OFFER 0x200 320 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 321 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 322 323 struct vmpacket_descriptor { 324 u16 type; 325 u16 offset8; 326 u16 len8; 327 u16 flags; 328 u64 trans_id; 329 } __packed; 330 331 struct vmpacket_header { 332 u32 prev_pkt_start_offset; 333 struct vmpacket_descriptor descriptor; 334 } __packed; 335 336 struct vmtransfer_page_range { 337 u32 byte_count; 338 u32 byte_offset; 339 } __packed; 340 341 struct vmtransfer_page_packet_header { 342 struct vmpacket_descriptor d; 343 u16 xfer_pageset_id; 344 u8 sender_owns_set; 345 u8 reserved; 346 u32 range_cnt; 347 struct vmtransfer_page_range ranges[1]; 348 } __packed; 349 350 struct vmgpadl_packet_header { 351 struct vmpacket_descriptor d; 352 u32 gpadl; 353 u32 reserved; 354 } __packed; 355 356 struct vmadd_remove_transfer_page_set { 357 struct vmpacket_descriptor d; 358 u32 gpadl; 359 u16 xfer_pageset_id; 360 u16 reserved; 361 } __packed; 362 363 /* 364 * This structure defines a range in guest physical space that can be made to 365 * look virtually contiguous. 366 */ 367 struct gpa_range { 368 u32 byte_count; 369 u32 byte_offset; 370 u64 pfn_array[]; 371 }; 372 373 /* 374 * This is the format for an Establish Gpadl packet, which contains a handle by 375 * which this GPADL will be known and a set of GPA ranges associated with it. 376 * This can be converted to a MDL by the guest OS. If there are multiple GPA 377 * ranges, then the resulting MDL will be "chained," representing multiple VA 378 * ranges. 379 */ 380 struct vmestablish_gpadl { 381 struct vmpacket_descriptor d; 382 u32 gpadl; 383 u32 range_cnt; 384 struct gpa_range range[1]; 385 } __packed; 386 387 /* 388 * This is the format for a Teardown Gpadl packet, which indicates that the 389 * GPADL handle in the Establish Gpadl packet will never be referenced again. 390 */ 391 struct vmteardown_gpadl { 392 struct vmpacket_descriptor d; 393 u32 gpadl; 394 u32 reserved; /* for alignment to a 8-byte boundary */ 395 } __packed; 396 397 /* 398 * This is the format for a GPA-Direct packet, which contains a set of GPA 399 * ranges, in addition to commands and/or data. 400 */ 401 struct vmdata_gpa_direct { 402 struct vmpacket_descriptor d; 403 u32 reserved; 404 u32 range_cnt; 405 struct gpa_range range[1]; 406 } __packed; 407 408 /* This is the format for a Additional Data Packet. */ 409 struct vmadditional_data { 410 struct vmpacket_descriptor d; 411 u64 total_bytes; 412 u32 offset; 413 u32 byte_cnt; 414 unsigned char data[1]; 415 } __packed; 416 417 union vmpacket_largest_possible_header { 418 struct vmpacket_descriptor simple_hdr; 419 struct vmtransfer_page_packet_header xfer_page_hdr; 420 struct vmgpadl_packet_header gpadl_hdr; 421 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; 422 struct vmestablish_gpadl establish_gpadl_hdr; 423 struct vmteardown_gpadl teardown_gpadl_hdr; 424 struct vmdata_gpa_direct data_gpa_direct_hdr; 425 }; 426 427 #define VMPACKET_DATA_START_ADDRESS(__packet) \ 428 (void *)(((unsigned char *)__packet) + \ 429 ((struct vmpacket_descriptor)__packet)->offset8 * 8) 430 431 #define VMPACKET_DATA_LENGTH(__packet) \ 432 ((((struct vmpacket_descriptor)__packet)->len8 - \ 433 ((struct vmpacket_descriptor)__packet)->offset8) * 8) 434 435 #define VMPACKET_TRANSFER_MODE(__packet) \ 436 (((struct IMPACT)__packet)->type) 437 438 enum vmbus_packet_type { 439 VM_PKT_INVALID = 0x0, 440 VM_PKT_SYNCH = 0x1, 441 VM_PKT_ADD_XFER_PAGESET = 0x2, 442 VM_PKT_RM_XFER_PAGESET = 0x3, 443 VM_PKT_ESTABLISH_GPADL = 0x4, 444 VM_PKT_TEARDOWN_GPADL = 0x5, 445 VM_PKT_DATA_INBAND = 0x6, 446 VM_PKT_DATA_USING_XFER_PAGES = 0x7, 447 VM_PKT_DATA_USING_GPADL = 0x8, 448 VM_PKT_DATA_USING_GPA_DIRECT = 0x9, 449 VM_PKT_CANCEL_REQUEST = 0xa, 450 VM_PKT_COMP = 0xb, 451 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, 452 VM_PKT_ADDITIONAL_DATA = 0xd 453 }; 454 455 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 456 457 458 /* Version 1 messages */ 459 enum vmbus_channel_message_type { 460 CHANNELMSG_INVALID = 0, 461 CHANNELMSG_OFFERCHANNEL = 1, 462 CHANNELMSG_RESCIND_CHANNELOFFER = 2, 463 CHANNELMSG_REQUESTOFFERS = 3, 464 CHANNELMSG_ALLOFFERS_DELIVERED = 4, 465 CHANNELMSG_OPENCHANNEL = 5, 466 CHANNELMSG_OPENCHANNEL_RESULT = 6, 467 CHANNELMSG_CLOSECHANNEL = 7, 468 CHANNELMSG_GPADL_HEADER = 8, 469 CHANNELMSG_GPADL_BODY = 9, 470 CHANNELMSG_GPADL_CREATED = 10, 471 CHANNELMSG_GPADL_TEARDOWN = 11, 472 CHANNELMSG_GPADL_TORNDOWN = 12, 473 CHANNELMSG_RELID_RELEASED = 13, 474 CHANNELMSG_INITIATE_CONTACT = 14, 475 CHANNELMSG_VERSION_RESPONSE = 15, 476 CHANNELMSG_UNLOAD = 16, 477 CHANNELMSG_UNLOAD_RESPONSE = 17, 478 CHANNELMSG_18 = 18, 479 CHANNELMSG_19 = 19, 480 CHANNELMSG_20 = 20, 481 CHANNELMSG_TL_CONNECT_REQUEST = 21, 482 CHANNELMSG_MODIFYCHANNEL = 22, 483 CHANNELMSG_TL_CONNECT_RESULT = 23, 484 CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24, 485 CHANNELMSG_COUNT 486 }; 487 488 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ 489 #define INVALID_RELID U32_MAX 490 491 struct vmbus_channel_message_header { 492 enum vmbus_channel_message_type msgtype; 493 u32 padding; 494 } __packed; 495 496 /* Query VMBus Version parameters */ 497 struct vmbus_channel_query_vmbus_version { 498 struct vmbus_channel_message_header header; 499 u32 version; 500 } __packed; 501 502 /* VMBus Version Supported parameters */ 503 struct vmbus_channel_version_supported { 504 struct vmbus_channel_message_header header; 505 u8 version_supported; 506 } __packed; 507 508 /* Offer Channel parameters */ 509 struct vmbus_channel_offer_channel { 510 struct vmbus_channel_message_header header; 511 struct vmbus_channel_offer offer; 512 u32 child_relid; 513 u8 monitorid; 514 /* 515 * win7 and beyond splits this field into a bit field. 516 */ 517 u8 monitor_allocated:1; 518 u8 reserved:7; 519 /* 520 * These are new fields added in win7 and later. 521 * Do not access these fields without checking the 522 * negotiated protocol. 523 * 524 * If "is_dedicated_interrupt" is set, we must not set the 525 * associated bit in the channel bitmap while sending the 526 * interrupt to the host. 527 * 528 * connection_id is to be used in signaling the host. 529 */ 530 u16 is_dedicated_interrupt:1; 531 u16 reserved1:15; 532 u32 connection_id; 533 } __packed; 534 535 /* Rescind Offer parameters */ 536 struct vmbus_channel_rescind_offer { 537 struct vmbus_channel_message_header header; 538 u32 child_relid; 539 } __packed; 540 541 static inline u32 542 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi) 543 { 544 return rbi->ring_buffer->pending_send_sz; 545 } 546 547 /* 548 * Request Offer -- no parameters, SynIC message contains the partition ID 549 * Set Snoop -- no parameters, SynIC message contains the partition ID 550 * Clear Snoop -- no parameters, SynIC message contains the partition ID 551 * All Offers Delivered -- no parameters, SynIC message contains the partition 552 * ID 553 * Flush Client -- no parameters, SynIC message contains the partition ID 554 */ 555 556 /* Open Channel parameters */ 557 struct vmbus_channel_open_channel { 558 struct vmbus_channel_message_header header; 559 560 /* Identifies the specific VMBus channel that is being opened. */ 561 u32 child_relid; 562 563 /* ID making a particular open request at a channel offer unique. */ 564 u32 openid; 565 566 /* GPADL for the channel's ring buffer. */ 567 u32 ringbuffer_gpadlhandle; 568 569 /* 570 * Starting with win8, this field will be used to specify 571 * the target virtual processor on which to deliver the interrupt for 572 * the host to guest communication. 573 * Prior to win8, incoming channel interrupts would only 574 * be delivered on cpu 0. Setting this value to 0 would 575 * preserve the earlier behavior. 576 */ 577 u32 target_vp; 578 579 /* 580 * The upstream ring buffer begins at offset zero in the memory 581 * described by RingBufferGpadlHandle. The downstream ring buffer 582 * follows it at this offset (in pages). 583 */ 584 u32 downstream_ringbuffer_pageoffset; 585 586 /* User-specific data to be passed along to the server endpoint. */ 587 unsigned char userdata[MAX_USER_DEFINED_BYTES]; 588 } __packed; 589 590 /* Open Channel Result parameters */ 591 struct vmbus_channel_open_result { 592 struct vmbus_channel_message_header header; 593 u32 child_relid; 594 u32 openid; 595 u32 status; 596 } __packed; 597 598 /* Modify Channel Result parameters */ 599 struct vmbus_channel_modifychannel_response { 600 struct vmbus_channel_message_header header; 601 u32 child_relid; 602 u32 status; 603 } __packed; 604 605 /* Close channel parameters; */ 606 struct vmbus_channel_close_channel { 607 struct vmbus_channel_message_header header; 608 u32 child_relid; 609 } __packed; 610 611 /* Channel Message GPADL */ 612 #define GPADL_TYPE_RING_BUFFER 1 613 #define GPADL_TYPE_SERVER_SAVE_AREA 2 614 #define GPADL_TYPE_TRANSACTION 8 615 616 /* 617 * The number of PFNs in a GPADL message is defined by the number of 618 * pages that would be spanned by ByteCount and ByteOffset. If the 619 * implied number of PFNs won't fit in this packet, there will be a 620 * follow-up packet that contains more. 621 */ 622 struct vmbus_channel_gpadl_header { 623 struct vmbus_channel_message_header header; 624 u32 child_relid; 625 u32 gpadl; 626 u16 range_buflen; 627 u16 rangecount; 628 struct gpa_range range[]; 629 } __packed; 630 631 /* This is the followup packet that contains more PFNs. */ 632 struct vmbus_channel_gpadl_body { 633 struct vmbus_channel_message_header header; 634 u32 msgnumber; 635 u32 gpadl; 636 u64 pfn[]; 637 } __packed; 638 639 struct vmbus_channel_gpadl_created { 640 struct vmbus_channel_message_header header; 641 u32 child_relid; 642 u32 gpadl; 643 u32 creation_status; 644 } __packed; 645 646 struct vmbus_channel_gpadl_teardown { 647 struct vmbus_channel_message_header header; 648 u32 child_relid; 649 u32 gpadl; 650 } __packed; 651 652 struct vmbus_channel_gpadl_torndown { 653 struct vmbus_channel_message_header header; 654 u32 gpadl; 655 } __packed; 656 657 struct vmbus_channel_relid_released { 658 struct vmbus_channel_message_header header; 659 u32 child_relid; 660 } __packed; 661 662 struct vmbus_channel_initiate_contact { 663 struct vmbus_channel_message_header header; 664 u32 vmbus_version_requested; 665 u32 target_vcpu; /* The VCPU the host should respond to */ 666 union { 667 u64 interrupt_page; 668 struct { 669 u8 msg_sint; 670 u8 padding1[3]; 671 u32 padding2; 672 }; 673 }; 674 u64 monitor_page1; 675 u64 monitor_page2; 676 } __packed; 677 678 /* Hyper-V socket: guest's connect()-ing to host */ 679 struct vmbus_channel_tl_connect_request { 680 struct vmbus_channel_message_header header; 681 guid_t guest_endpoint_id; 682 guid_t host_service_id; 683 } __packed; 684 685 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */ 686 struct vmbus_channel_modifychannel { 687 struct vmbus_channel_message_header header; 688 u32 child_relid; 689 u32 target_vp; 690 } __packed; 691 692 struct vmbus_channel_version_response { 693 struct vmbus_channel_message_header header; 694 u8 version_supported; 695 696 u8 connection_state; 697 u16 padding; 698 699 /* 700 * On new hosts that support VMBus protocol 5.0, we must use 701 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, 702 * and for subsequent messages, we must use the Message Connection ID 703 * field in the host-returned Version Response Message. 704 * 705 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). 706 */ 707 u32 msg_conn_id; 708 } __packed; 709 710 enum vmbus_channel_state { 711 CHANNEL_OFFER_STATE, 712 CHANNEL_OPENING_STATE, 713 CHANNEL_OPEN_STATE, 714 CHANNEL_OPENED_STATE, 715 }; 716 717 /* 718 * Represents each channel msg on the vmbus connection This is a 719 * variable-size data structure depending on the msg type itself 720 */ 721 struct vmbus_channel_msginfo { 722 /* Bookkeeping stuff */ 723 struct list_head msglistentry; 724 725 /* So far, this is only used to handle gpadl body message */ 726 struct list_head submsglist; 727 728 /* Synchronize the request/response if needed */ 729 struct completion waitevent; 730 struct vmbus_channel *waiting_channel; 731 union { 732 struct vmbus_channel_version_supported version_supported; 733 struct vmbus_channel_open_result open_result; 734 struct vmbus_channel_gpadl_torndown gpadl_torndown; 735 struct vmbus_channel_gpadl_created gpadl_created; 736 struct vmbus_channel_version_response version_response; 737 struct vmbus_channel_modifychannel_response modify_response; 738 } response; 739 740 u32 msgsize; 741 /* 742 * The channel message that goes out on the "wire". 743 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header 744 */ 745 unsigned char msg[]; 746 }; 747 748 struct vmbus_close_msg { 749 struct vmbus_channel_msginfo info; 750 struct vmbus_channel_close_channel msg; 751 }; 752 753 /* Define connection identifier type. */ 754 union hv_connection_id { 755 u32 asu32; 756 struct { 757 u32 id:24; 758 u32 reserved:8; 759 } u; 760 }; 761 762 enum vmbus_device_type { 763 HV_IDE = 0, 764 HV_SCSI, 765 HV_FC, 766 HV_NIC, 767 HV_ND, 768 HV_PCIE, 769 HV_FB, 770 HV_KBD, 771 HV_MOUSE, 772 HV_KVP, 773 HV_TS, 774 HV_HB, 775 HV_SHUTDOWN, 776 HV_FCOPY, 777 HV_BACKUP, 778 HV_DM, 779 HV_UNKNOWN, 780 }; 781 782 /* 783 * Provides request ids for VMBus. Encapsulates guest memory 784 * addresses and stores the next available slot in req_arr 785 * to generate new ids in constant time. 786 */ 787 struct vmbus_requestor { 788 u64 *req_arr; 789 unsigned long *req_bitmap; /* is a given slot available? */ 790 u32 size; 791 u64 next_request_id; 792 spinlock_t req_lock; /* provides atomicity */ 793 }; 794 795 #define VMBUS_NO_RQSTOR U64_MAX 796 #define VMBUS_RQST_ERROR (U64_MAX - 1) 797 /* NetVSC-specific */ 798 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2) 799 /* StorVSC-specific */ 800 #define VMBUS_RQST_INIT (U64_MAX - 2) 801 #define VMBUS_RQST_RESET (U64_MAX - 3) 802 803 struct vmbus_device { 804 u16 dev_type; 805 guid_t guid; 806 bool perf_device; 807 bool allowed_in_isolated; 808 }; 809 810 #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096 811 812 struct vmbus_channel { 813 struct list_head listentry; 814 815 struct hv_device *device_obj; 816 817 enum vmbus_channel_state state; 818 819 struct vmbus_channel_offer_channel offermsg; 820 /* 821 * These are based on the OfferMsg.MonitorId. 822 * Save it here for easy access. 823 */ 824 u8 monitor_grp; 825 u8 monitor_bit; 826 827 bool rescind; /* got rescind msg */ 828 bool rescind_ref; /* got rescind msg, got channel reference */ 829 struct completion rescind_event; 830 831 u32 ringbuffer_gpadlhandle; 832 833 /* Allocated memory for ring buffer */ 834 struct page *ringbuffer_page; 835 u32 ringbuffer_pagecount; 836 u32 ringbuffer_send_offset; 837 struct hv_ring_buffer_info outbound; /* send to parent */ 838 struct hv_ring_buffer_info inbound; /* receive from parent */ 839 840 struct vmbus_close_msg close_msg; 841 842 /* Statistics */ 843 u64 interrupts; /* Host to Guest interrupts */ 844 u64 sig_events; /* Guest to Host events */ 845 846 /* 847 * Guest to host interrupts caused by the outbound ring buffer changing 848 * from empty to not empty. 849 */ 850 u64 intr_out_empty; 851 852 /* 853 * Indicates that a full outbound ring buffer was encountered. The flag 854 * is set to true when a full outbound ring buffer is encountered and 855 * set to false when a write to the outbound ring buffer is completed. 856 */ 857 bool out_full_flag; 858 859 /* Channel callback's invoked in softirq context */ 860 struct tasklet_struct callback_event; 861 void (*onchannel_callback)(void *context); 862 void *channel_callback_context; 863 864 void (*change_target_cpu_callback)(struct vmbus_channel *channel, 865 u32 old, u32 new); 866 867 /* 868 * Synchronize channel scheduling and channel removal; see the inline 869 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb(). 870 */ 871 spinlock_t sched_lock; 872 873 /* 874 * A channel can be marked for one of three modes of reading: 875 * BATCHED - callback called from taslket and should read 876 * channel until empty. Interrupts from the host 877 * are masked while read is in process (default). 878 * DIRECT - callback called from tasklet (softirq). 879 * ISR - callback called in interrupt context and must 880 * invoke its own deferred processing. 881 * Host interrupts are disabled and must be re-enabled 882 * when ring is empty. 883 */ 884 enum hv_callback_mode { 885 HV_CALL_BATCHED, 886 HV_CALL_DIRECT, 887 HV_CALL_ISR 888 } callback_mode; 889 890 bool is_dedicated_interrupt; 891 u64 sig_event; 892 893 /* 894 * Starting with win8, this field will be used to specify the 895 * target CPU on which to deliver the interrupt for the host 896 * to guest communication. 897 * 898 * Prior to win8, incoming channel interrupts would only be 899 * delivered on CPU 0. Setting this value to 0 would preserve 900 * the earlier behavior. 901 */ 902 u32 target_cpu; 903 /* 904 * Support for sub-channels. For high performance devices, 905 * it will be useful to have multiple sub-channels to support 906 * a scalable communication infrastructure with the host. 907 * The support for sub-channels is implemented as an extension 908 * to the current infrastructure. 909 * The initial offer is considered the primary channel and this 910 * offer message will indicate if the host supports sub-channels. 911 * The guest is free to ask for sub-channels to be offered and can 912 * open these sub-channels as a normal "primary" channel. However, 913 * all sub-channels will have the same type and instance guids as the 914 * primary channel. Requests sent on a given channel will result in a 915 * response on the same channel. 916 */ 917 918 /* 919 * Sub-channel creation callback. This callback will be called in 920 * process context when a sub-channel offer is received from the host. 921 * The guest can open the sub-channel in the context of this callback. 922 */ 923 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 924 925 /* 926 * Channel rescind callback. Some channels (the hvsock ones), need to 927 * register a callback which is invoked in vmbus_onoffer_rescind(). 928 */ 929 void (*chn_rescind_callback)(struct vmbus_channel *channel); 930 931 /* 932 * All Sub-channels of a primary channel are linked here. 933 */ 934 struct list_head sc_list; 935 /* 936 * The primary channel this sub-channel belongs to. 937 * This will be NULL for the primary channel. 938 */ 939 struct vmbus_channel *primary_channel; 940 /* 941 * Support per-channel state for use by vmbus drivers. 942 */ 943 void *per_channel_state; 944 945 /* 946 * Defer freeing channel until after all cpu's have 947 * gone through grace period. 948 */ 949 struct rcu_head rcu; 950 951 /* 952 * For sysfs per-channel properties. 953 */ 954 struct kobject kobj; 955 956 /* 957 * For performance critical channels (storage, networking 958 * etc,), Hyper-V has a mechanism to enhance the throughput 959 * at the expense of latency: 960 * When the host is to be signaled, we just set a bit in a shared page 961 * and this bit will be inspected by the hypervisor within a certain 962 * window and if the bit is set, the host will be signaled. The window 963 * of time is the monitor latency - currently around 100 usecs. This 964 * mechanism improves throughput by: 965 * 966 * A) Making the host more efficient - each time it wakes up, 967 * potentially it will process morev number of packets. The 968 * monitor latency allows a batch to build up. 969 * B) By deferring the hypercall to signal, we will also minimize 970 * the interrupts. 971 * 972 * Clearly, these optimizations improve throughput at the expense of 973 * latency. Furthermore, since the channel is shared for both 974 * control and data messages, control messages currently suffer 975 * unnecessary latency adversely impacting performance and boot 976 * time. To fix this issue, permit tagging the channel as being 977 * in "low latency" mode. In this mode, we will bypass the monitor 978 * mechanism. 979 */ 980 bool low_latency; 981 982 bool probe_done; 983 984 /* 985 * Cache the device ID here for easy access; this is useful, in 986 * particular, in situations where the channel's device_obj has 987 * not been allocated/initialized yet. 988 */ 989 u16 device_id; 990 991 /* 992 * We must offload the handling of the primary/sub channels 993 * from the single-threaded vmbus_connection.work_queue to 994 * two different workqueue, otherwise we can block 995 * vmbus_connection.work_queue and hang: see vmbus_process_offer(). 996 */ 997 struct work_struct add_channel_work; 998 999 /* 1000 * Guest to host interrupts caused by the inbound ring buffer changing 1001 * from full to not full while a packet is waiting. 1002 */ 1003 u64 intr_in_full; 1004 1005 /* 1006 * The total number of write operations that encountered a full 1007 * outbound ring buffer. 1008 */ 1009 u64 out_full_total; 1010 1011 /* 1012 * The number of write operations that were the first to encounter a 1013 * full outbound ring buffer. 1014 */ 1015 u64 out_full_first; 1016 1017 /* enabling/disabling fuzz testing on the channel (default is false)*/ 1018 bool fuzz_testing_state; 1019 1020 /* 1021 * Interrupt delay will delay the guest from emptying the ring buffer 1022 * for a specific amount of time. The delay is in microseconds and will 1023 * be between 1 to a maximum of 1000, its default is 0 (no delay). 1024 * The Message delay will delay guest reading on a per message basis 1025 * in microseconds between 1 to 1000 with the default being 0 1026 * (no delay). 1027 */ 1028 u32 fuzz_testing_interrupt_delay; 1029 u32 fuzz_testing_message_delay; 1030 1031 /* callback to generate a request ID from a request address */ 1032 u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr); 1033 /* callback to retrieve a request address from a request ID */ 1034 u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id); 1035 1036 /* request/transaction ids for VMBus */ 1037 struct vmbus_requestor requestor; 1038 u32 rqstor_size; 1039 1040 /* The max size of a packet on this channel */ 1041 u32 max_pkt_size; 1042 }; 1043 1044 u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr); 1045 u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id); 1046 1047 static inline bool is_hvsock_channel(const struct vmbus_channel *c) 1048 { 1049 return !!(c->offermsg.offer.chn_flags & 1050 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 1051 } 1052 1053 static inline bool is_sub_channel(const struct vmbus_channel *c) 1054 { 1055 return c->offermsg.offer.sub_channel_index != 0; 1056 } 1057 1058 static inline void set_channel_read_mode(struct vmbus_channel *c, 1059 enum hv_callback_mode mode) 1060 { 1061 c->callback_mode = mode; 1062 } 1063 1064 static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 1065 { 1066 c->per_channel_state = s; 1067 } 1068 1069 static inline void *get_per_channel_state(struct vmbus_channel *c) 1070 { 1071 return c->per_channel_state; 1072 } 1073 1074 static inline void set_channel_pending_send_size(struct vmbus_channel *c, 1075 u32 size) 1076 { 1077 unsigned long flags; 1078 1079 if (size) { 1080 spin_lock_irqsave(&c->outbound.ring_lock, flags); 1081 ++c->out_full_total; 1082 1083 if (!c->out_full_flag) { 1084 ++c->out_full_first; 1085 c->out_full_flag = true; 1086 } 1087 spin_unlock_irqrestore(&c->outbound.ring_lock, flags); 1088 } else { 1089 c->out_full_flag = false; 1090 } 1091 1092 c->outbound.ring_buffer->pending_send_sz = size; 1093 } 1094 1095 static inline void set_low_latency_mode(struct vmbus_channel *c) 1096 { 1097 c->low_latency = true; 1098 } 1099 1100 static inline void clear_low_latency_mode(struct vmbus_channel *c) 1101 { 1102 c->low_latency = false; 1103 } 1104 1105 void vmbus_onmessage(struct vmbus_channel_message_header *hdr); 1106 1107 int vmbus_request_offers(void); 1108 1109 /* 1110 * APIs for managing sub-channels. 1111 */ 1112 1113 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 1114 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 1115 1116 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, 1117 void (*chn_rescind_cb)(struct vmbus_channel *)); 1118 1119 /* 1120 * Check if sub-channels have already been offerred. This API will be useful 1121 * when the driver is unloaded after establishing sub-channels. In this case, 1122 * when the driver is re-loaded, the driver would have to check if the 1123 * subchannels have already been established before attempting to request 1124 * the creation of sub-channels. 1125 * This function returns TRUE to indicate that subchannels have already been 1126 * created. 1127 * This function should be invoked after setting the callback function for 1128 * sub-channel creation. 1129 */ 1130 bool vmbus_are_subchannels_present(struct vmbus_channel *primary); 1131 1132 /* The format must be the same as struct vmdata_gpa_direct */ 1133 struct vmbus_channel_packet_page_buffer { 1134 u16 type; 1135 u16 dataoffset8; 1136 u16 length8; 1137 u16 flags; 1138 u64 transactionid; 1139 u32 reserved; 1140 u32 rangecount; 1141 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; 1142 } __packed; 1143 1144 /* The format must be the same as struct vmdata_gpa_direct */ 1145 struct vmbus_channel_packet_multipage_buffer { 1146 u16 type; 1147 u16 dataoffset8; 1148 u16 length8; 1149 u16 flags; 1150 u64 transactionid; 1151 u32 reserved; 1152 u32 rangecount; /* Always 1 in this case */ 1153 struct hv_multipage_buffer range; 1154 } __packed; 1155 1156 /* The format must be the same as struct vmdata_gpa_direct */ 1157 struct vmbus_packet_mpb_array { 1158 u16 type; 1159 u16 dataoffset8; 1160 u16 length8; 1161 u16 flags; 1162 u64 transactionid; 1163 u32 reserved; 1164 u32 rangecount; /* Always 1 in this case */ 1165 struct hv_mpb_array range; 1166 } __packed; 1167 1168 int vmbus_alloc_ring(struct vmbus_channel *channel, 1169 u32 send_size, u32 recv_size); 1170 void vmbus_free_ring(struct vmbus_channel *channel); 1171 1172 int vmbus_connect_ring(struct vmbus_channel *channel, 1173 void (*onchannel_callback)(void *context), 1174 void *context); 1175 int vmbus_disconnect_ring(struct vmbus_channel *channel); 1176 1177 extern int vmbus_open(struct vmbus_channel *channel, 1178 u32 send_ringbuffersize, 1179 u32 recv_ringbuffersize, 1180 void *userdata, 1181 u32 userdatalen, 1182 void (*onchannel_callback)(void *context), 1183 void *context); 1184 1185 extern void vmbus_close(struct vmbus_channel *channel); 1186 1187 extern int vmbus_sendpacket(struct vmbus_channel *channel, 1188 void *buffer, 1189 u32 bufferLen, 1190 u64 requestid, 1191 enum vmbus_packet_type type, 1192 u32 flags); 1193 1194 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 1195 struct hv_page_buffer pagebuffers[], 1196 u32 pagecount, 1197 void *buffer, 1198 u32 bufferlen, 1199 u64 requestid); 1200 1201 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 1202 struct vmbus_packet_mpb_array *mpb, 1203 u32 desc_size, 1204 void *buffer, 1205 u32 bufferlen, 1206 u64 requestid); 1207 1208 extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 1209 void *kbuffer, 1210 u32 size, 1211 u32 *gpadl_handle); 1212 1213 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, 1214 u32 gpadl_handle); 1215 1216 void vmbus_reset_channel_cb(struct vmbus_channel *channel); 1217 1218 extern int vmbus_recvpacket(struct vmbus_channel *channel, 1219 void *buffer, 1220 u32 bufferlen, 1221 u32 *buffer_actual_len, 1222 u64 *requestid); 1223 1224 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, 1225 void *buffer, 1226 u32 bufferlen, 1227 u32 *buffer_actual_len, 1228 u64 *requestid); 1229 1230 1231 extern void vmbus_ontimer(unsigned long data); 1232 1233 /* Base driver object */ 1234 struct hv_driver { 1235 const char *name; 1236 1237 /* 1238 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 1239 * channel flag, actually doesn't mean a synthetic device because the 1240 * offer's if_type/if_instance can change for every new hvsock 1241 * connection. 1242 * 1243 * However, to facilitate the notification of new-offer/rescind-offer 1244 * from vmbus driver to hvsock driver, we can handle hvsock offer as 1245 * a special vmbus device, and hence we need the below flag to 1246 * indicate if the driver is the hvsock driver or not: we need to 1247 * specially treat the hvosck offer & driver in vmbus_match(). 1248 */ 1249 bool hvsock; 1250 1251 /* the device type supported by this driver */ 1252 guid_t dev_type; 1253 const struct hv_vmbus_device_id *id_table; 1254 1255 struct device_driver driver; 1256 1257 /* dynamic device GUID's */ 1258 struct { 1259 spinlock_t lock; 1260 struct list_head list; 1261 } dynids; 1262 1263 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 1264 int (*remove)(struct hv_device *); 1265 void (*shutdown)(struct hv_device *); 1266 1267 int (*suspend)(struct hv_device *); 1268 int (*resume)(struct hv_device *); 1269 1270 }; 1271 1272 /* Base device object */ 1273 struct hv_device { 1274 /* the device type id of this device */ 1275 guid_t dev_type; 1276 1277 /* the device instance id of this device */ 1278 guid_t dev_instance; 1279 u16 vendor_id; 1280 u16 device_id; 1281 1282 struct device device; 1283 char *driver_override; /* Driver name to force a match */ 1284 1285 struct vmbus_channel *channel; 1286 struct kset *channels_kset; 1287 1288 /* place holder to keep track of the dir for hv device in debugfs */ 1289 struct dentry *debug_dir; 1290 1291 }; 1292 1293 1294 static inline struct hv_device *device_to_hv_device(struct device *d) 1295 { 1296 return container_of(d, struct hv_device, device); 1297 } 1298 1299 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 1300 { 1301 return container_of(d, struct hv_driver, driver); 1302 } 1303 1304 static inline void hv_set_drvdata(struct hv_device *dev, void *data) 1305 { 1306 dev_set_drvdata(&dev->device, data); 1307 } 1308 1309 static inline void *hv_get_drvdata(struct hv_device *dev) 1310 { 1311 return dev_get_drvdata(&dev->device); 1312 } 1313 1314 struct hv_ring_buffer_debug_info { 1315 u32 current_interrupt_mask; 1316 u32 current_read_index; 1317 u32 current_write_index; 1318 u32 bytes_avail_toread; 1319 u32 bytes_avail_towrite; 1320 }; 1321 1322 1323 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 1324 struct hv_ring_buffer_debug_info *debug_info); 1325 1326 /* Vmbus interface */ 1327 #define vmbus_driver_register(driver) \ 1328 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 1329 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, 1330 struct module *owner, 1331 const char *mod_name); 1332 void vmbus_driver_unregister(struct hv_driver *hv_driver); 1333 1334 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1335 1336 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1337 resource_size_t min, resource_size_t max, 1338 resource_size_t size, resource_size_t align, 1339 bool fb_overlap_ok); 1340 void vmbus_free_mmio(resource_size_t start, resource_size_t size); 1341 1342 /* 1343 * GUID definitions of various offer types - services offered to the guest. 1344 */ 1345 1346 /* 1347 * Network GUID 1348 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1349 */ 1350 #define HV_NIC_GUID \ 1351 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 1352 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) 1353 1354 /* 1355 * IDE GUID 1356 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1357 */ 1358 #define HV_IDE_GUID \ 1359 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 1360 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1361 1362 /* 1363 * SCSI GUID 1364 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1365 */ 1366 #define HV_SCSI_GUID \ 1367 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 1368 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1369 1370 /* 1371 * Shutdown GUID 1372 * {0e0b6031-5213-4934-818b-38d90ced39db} 1373 */ 1374 #define HV_SHUTDOWN_GUID \ 1375 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 1376 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) 1377 1378 /* 1379 * Time Synch GUID 1380 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1381 */ 1382 #define HV_TS_GUID \ 1383 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 1384 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 1385 1386 /* 1387 * Heartbeat GUID 1388 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1389 */ 1390 #define HV_HEART_BEAT_GUID \ 1391 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 1392 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 1393 1394 /* 1395 * KVP GUID 1396 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1397 */ 1398 #define HV_KVP_GUID \ 1399 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 1400 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) 1401 1402 /* 1403 * Dynamic memory GUID 1404 * {525074dc-8985-46e2-8057-a307dc18a502} 1405 */ 1406 #define HV_DM_GUID \ 1407 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 1408 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) 1409 1410 /* 1411 * Mouse GUID 1412 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1413 */ 1414 #define HV_MOUSE_GUID \ 1415 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 1416 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) 1417 1418 /* 1419 * Keyboard GUID 1420 * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 1421 */ 1422 #define HV_KBD_GUID \ 1423 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 1424 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) 1425 1426 /* 1427 * VSS (Backup/Restore) GUID 1428 */ 1429 #define HV_VSS_GUID \ 1430 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 1431 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) 1432 /* 1433 * Synthetic Video GUID 1434 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1435 */ 1436 #define HV_SYNTHVID_GUID \ 1437 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 1438 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) 1439 1440 /* 1441 * Synthetic FC GUID 1442 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1443 */ 1444 #define HV_SYNTHFC_GUID \ 1445 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 1446 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) 1447 1448 /* 1449 * Guest File Copy Service 1450 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} 1451 */ 1452 1453 #define HV_FCOPY_GUID \ 1454 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 1455 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) 1456 1457 /* 1458 * NetworkDirect. This is the guest RDMA service. 1459 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1460 */ 1461 #define HV_ND_GUID \ 1462 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 1463 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) 1464 1465 /* 1466 * PCI Express Pass Through 1467 * {44C4F61D-4444-4400-9D52-802E27EDE19F} 1468 */ 1469 1470 #define HV_PCIE_GUID \ 1471 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 1472 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) 1473 1474 /* 1475 * Linux doesn't support the 3 devices: the first two are for 1476 * Automatic Virtual Machine Activation, and the third is for 1477 * Remote Desktop Virtualization. 1478 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5} 1479 * {3375baf4-9e15-4b30-b765-67acb10d607b} 1480 * {276aacf4-ac15-426c-98dd-7521ad3f01fe} 1481 */ 1482 1483 #define HV_AVMA1_GUID \ 1484 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ 1485 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) 1486 1487 #define HV_AVMA2_GUID \ 1488 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ 1489 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) 1490 1491 #define HV_RDV_GUID \ 1492 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ 1493 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) 1494 1495 /* 1496 * Common header for Hyper-V ICs 1497 */ 1498 1499 #define ICMSGTYPE_NEGOTIATE 0 1500 #define ICMSGTYPE_HEARTBEAT 1 1501 #define ICMSGTYPE_KVPEXCHANGE 2 1502 #define ICMSGTYPE_SHUTDOWN 3 1503 #define ICMSGTYPE_TIMESYNC 4 1504 #define ICMSGTYPE_VSS 5 1505 #define ICMSGTYPE_FCOPY 7 1506 1507 #define ICMSGHDRFLAG_TRANSACTION 1 1508 #define ICMSGHDRFLAG_REQUEST 2 1509 #define ICMSGHDRFLAG_RESPONSE 4 1510 1511 1512 /* 1513 * While we want to handle util services as regular devices, 1514 * there is only one instance of each of these services; so 1515 * we statically allocate the service specific state. 1516 */ 1517 1518 struct hv_util_service { 1519 u8 *recv_buffer; 1520 void *channel; 1521 void (*util_cb)(void *); 1522 int (*util_init)(struct hv_util_service *); 1523 void (*util_deinit)(void); 1524 int (*util_pre_suspend)(void); 1525 int (*util_pre_resume)(void); 1526 }; 1527 1528 struct vmbuspipe_hdr { 1529 u32 flags; 1530 u32 msgsize; 1531 } __packed; 1532 1533 struct ic_version { 1534 u16 major; 1535 u16 minor; 1536 } __packed; 1537 1538 struct icmsg_hdr { 1539 struct ic_version icverframe; 1540 u16 icmsgtype; 1541 struct ic_version icvermsg; 1542 u16 icmsgsize; 1543 u32 status; 1544 u8 ictransaction_id; 1545 u8 icflags; 1546 u8 reserved[2]; 1547 } __packed; 1548 1549 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100 1550 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)) 1551 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \ 1552 (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \ 1553 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version))) 1554 1555 struct icmsg_negotiate { 1556 u16 icframe_vercnt; 1557 u16 icmsg_vercnt; 1558 u32 reserved; 1559 struct ic_version icversion_data[]; /* any size array */ 1560 } __packed; 1561 1562 struct shutdown_msg_data { 1563 u32 reason_code; 1564 u32 timeout_seconds; 1565 u32 flags; 1566 u8 display_message[2048]; 1567 } __packed; 1568 1569 struct heartbeat_msg_data { 1570 u64 seq_num; 1571 u32 reserved[8]; 1572 } __packed; 1573 1574 /* Time Sync IC defs */ 1575 #define ICTIMESYNCFLAG_PROBE 0 1576 #define ICTIMESYNCFLAG_SYNC 1 1577 #define ICTIMESYNCFLAG_SAMPLE 2 1578 1579 #ifdef __x86_64__ 1580 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ 1581 #else 1582 #define WLTIMEDELTA 116444736000000000LL 1583 #endif 1584 1585 struct ictimesync_data { 1586 u64 parenttime; 1587 u64 childtime; 1588 u64 roundtriptime; 1589 u8 flags; 1590 } __packed; 1591 1592 struct ictimesync_ref_data { 1593 u64 parenttime; 1594 u64 vmreferencetime; 1595 u8 flags; 1596 char leapflags; 1597 char stratum; 1598 u8 reserved[3]; 1599 } __packed; 1600 1601 struct hyperv_service_callback { 1602 u8 msg_type; 1603 char *log_msg; 1604 guid_t data; 1605 struct vmbus_channel *channel; 1606 void (*callback)(void *context); 1607 }; 1608 1609 #define MAX_SRV_VER 0x7ffffff 1610 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen, 1611 const int *fw_version, int fw_vercnt, 1612 const int *srv_version, int srv_vercnt, 1613 int *nego_fw_version, int *nego_srv_version); 1614 1615 void hv_process_channel_removal(struct vmbus_channel *channel); 1616 1617 void vmbus_setevent(struct vmbus_channel *channel); 1618 /* 1619 * Negotiated version with the Host. 1620 */ 1621 1622 extern __u32 vmbus_proto_version; 1623 1624 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, 1625 const guid_t *shv_host_servie_id); 1626 int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp); 1627 void vmbus_set_event(struct vmbus_channel *channel); 1628 1629 /* Get the start of the ring buffer. */ 1630 static inline void * 1631 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) 1632 { 1633 return ring_info->ring_buffer->buffer; 1634 } 1635 1636 /* 1637 * Mask off host interrupt callback notifications 1638 */ 1639 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) 1640 { 1641 rbi->ring_buffer->interrupt_mask = 1; 1642 1643 /* make sure mask update is not reordered */ 1644 virt_mb(); 1645 } 1646 1647 /* 1648 * Re-enable host callback and return number of outstanding bytes 1649 */ 1650 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) 1651 { 1652 1653 rbi->ring_buffer->interrupt_mask = 0; 1654 1655 /* make sure mask update is not reordered */ 1656 virt_mb(); 1657 1658 /* 1659 * Now check to see if the ring buffer is still empty. 1660 * If it is not, we raced and we need to process new 1661 * incoming messages. 1662 */ 1663 return hv_get_bytes_to_read(rbi); 1664 } 1665 1666 /* 1667 * An API to support in-place processing of incoming VMBUS packets. 1668 */ 1669 1670 /* Get data payload associated with descriptor */ 1671 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) 1672 { 1673 return (void *)((unsigned long)desc + (desc->offset8 << 3)); 1674 } 1675 1676 /* Get data size associated with descriptor */ 1677 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) 1678 { 1679 return (desc->len8 << 3) - (desc->offset8 << 3); 1680 } 1681 1682 1683 struct vmpacket_descriptor * 1684 hv_pkt_iter_first_raw(struct vmbus_channel *channel); 1685 1686 struct vmpacket_descriptor * 1687 hv_pkt_iter_first(struct vmbus_channel *channel); 1688 1689 struct vmpacket_descriptor * 1690 __hv_pkt_iter_next(struct vmbus_channel *channel, 1691 const struct vmpacket_descriptor *pkt, 1692 bool copy); 1693 1694 void hv_pkt_iter_close(struct vmbus_channel *channel); 1695 1696 static inline struct vmpacket_descriptor * 1697 hv_pkt_iter_next_pkt(struct vmbus_channel *channel, 1698 const struct vmpacket_descriptor *pkt, 1699 bool copy) 1700 { 1701 struct vmpacket_descriptor *nxt; 1702 1703 nxt = __hv_pkt_iter_next(channel, pkt, copy); 1704 if (!nxt) 1705 hv_pkt_iter_close(channel); 1706 1707 return nxt; 1708 } 1709 1710 /* 1711 * Get next packet descriptor without copying it out of the ring buffer 1712 * If at end of list, return NULL and update host. 1713 */ 1714 static inline struct vmpacket_descriptor * 1715 hv_pkt_iter_next_raw(struct vmbus_channel *channel, 1716 const struct vmpacket_descriptor *pkt) 1717 { 1718 return hv_pkt_iter_next_pkt(channel, pkt, false); 1719 } 1720 1721 /* 1722 * Get next packet descriptor from iterator 1723 * If at end of list, return NULL and update host. 1724 */ 1725 static inline struct vmpacket_descriptor * 1726 hv_pkt_iter_next(struct vmbus_channel *channel, 1727 const struct vmpacket_descriptor *pkt) 1728 { 1729 return hv_pkt_iter_next_pkt(channel, pkt, true); 1730 } 1731 1732 #define foreach_vmbus_pkt(pkt, channel) \ 1733 for (pkt = hv_pkt_iter_first(channel); pkt; \ 1734 pkt = hv_pkt_iter_next(channel, pkt)) 1735 1736 /* 1737 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver 1738 * sends requests to read and write blocks. Each block must be 128 bytes or 1739 * smaller. Optionally, the VF driver can register a callback function which 1740 * will be invoked when the host says that one or more of the first 64 block 1741 * IDs is "invalid" which means that the VF driver should reread them. 1742 */ 1743 #define HV_CONFIG_BLOCK_SIZE_MAX 128 1744 1745 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, 1746 unsigned int block_id, unsigned int *bytes_returned); 1747 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, 1748 unsigned int block_id); 1749 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, 1750 void (*block_invalidate)(void *context, 1751 u64 block_mask)); 1752 1753 struct hyperv_pci_block_ops { 1754 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, 1755 unsigned int block_id, unsigned int *bytes_returned); 1756 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, 1757 unsigned int block_id); 1758 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, 1759 void (*block_invalidate)(void *context, 1760 u64 block_mask)); 1761 }; 1762 1763 extern struct hyperv_pci_block_ops hvpci_block_ops; 1764 1765 static inline unsigned long virt_to_hvpfn(void *addr) 1766 { 1767 phys_addr_t paddr; 1768 1769 if (is_vmalloc_addr(addr)) 1770 paddr = page_to_phys(vmalloc_to_page(addr)) + 1771 offset_in_page(addr); 1772 else 1773 paddr = __pa(addr); 1774 1775 return paddr >> HV_HYP_PAGE_SHIFT; 1776 } 1777 1778 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE) 1779 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK) 1780 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT) 1781 #define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT) 1782 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE) 1783 1784 #endif /* _HYPERV_H */ 1785