1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright (c) 2011, Microsoft Corporation. 5 * 6 * Authors: 7 * Haiyang Zhang <haiyangz@microsoft.com> 8 * Hank Janssen <hjanssen@microsoft.com> 9 * K. Y. Srinivasan <kys@microsoft.com> 10 */ 11 12 #ifndef _HYPERV_H 13 #define _HYPERV_H 14 15 #include <uapi/linux/hyperv.h> 16 17 #include <linux/mm.h> 18 #include <linux/types.h> 19 #include <linux/scatterlist.h> 20 #include <linux/list.h> 21 #include <linux/timer.h> 22 #include <linux/completion.h> 23 #include <linux/device.h> 24 #include <linux/mod_devicetable.h> 25 #include <linux/interrupt.h> 26 #include <linux/reciprocal_div.h> 27 #include <asm/hyperv-tlfs.h> 28 29 #define MAX_PAGE_BUFFER_COUNT 32 30 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 31 32 #pragma pack(push, 1) 33 34 /* 35 * Types for GPADL, decides is how GPADL header is created. 36 * 37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the 38 * same as HV_HYP_PAGE_SIZE. 39 * 40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers 41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put 42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each 43 * HV_HYP_PAGE will be different between different types of GPADL, for example 44 * if PAGE_SIZE is 64K: 45 * 46 * BUFFER: 47 * 48 * gva: |-- 64k --|-- 64k --| ... | 49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | 50 * index: 0 1 2 15 16 17 18 .. 31 32 ... 51 * | | ... | | | ... | ... 52 * v V V V V V 53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... | 54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ... 55 * 56 * RING: 57 * 58 * | header | data | header | data | 59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... | 60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... | 61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n 62 * | / / / | / / 63 * | / / / | / / 64 * | / / ... / ... | / ... / 65 * | / / / | / / 66 * | / / / | / / 67 * V V V V V V v 68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... | 69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30 70 */ 71 enum hv_gpadl_type { 72 HV_GPADL_BUFFER, 73 HV_GPADL_RING 74 }; 75 76 /* Single-page buffer */ 77 struct hv_page_buffer { 78 u32 len; 79 u32 offset; 80 u64 pfn; 81 }; 82 83 /* Multiple-page buffer */ 84 struct hv_multipage_buffer { 85 /* Length and Offset determines the # of pfns in the array */ 86 u32 len; 87 u32 offset; 88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 89 }; 90 91 /* 92 * Multiple-page buffer array; the pfn array is variable size: 93 * The number of entries in the PFN array is determined by 94 * "len" and "offset". 95 */ 96 struct hv_mpb_array { 97 /* Length and Offset determines the # of pfns in the array */ 98 u32 len; 99 u32 offset; 100 u64 pfn_array[]; 101 }; 102 103 /* 0x18 includes the proprietary packet header */ 104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \ 105 (sizeof(struct hv_page_buffer) * \ 106 MAX_PAGE_BUFFER_COUNT)) 107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ 108 sizeof(struct hv_multipage_buffer)) 109 110 111 #pragma pack(pop) 112 113 struct hv_ring_buffer { 114 /* Offset in bytes from the start of ring data below */ 115 u32 write_index; 116 117 /* Offset in bytes from the start of ring data below */ 118 u32 read_index; 119 120 u32 interrupt_mask; 121 122 /* 123 * WS2012/Win8 and later versions of Hyper-V implement interrupt 124 * driven flow management. The feature bit feat_pending_send_sz 125 * is set by the host on the host->guest ring buffer, and by the 126 * guest on the guest->host ring buffer. 127 * 128 * The meaning of the feature bit is a bit complex in that it has 129 * semantics that apply to both ring buffers. If the guest sets 130 * the feature bit in the guest->host ring buffer, the guest is 131 * telling the host that: 132 * 1) It will set the pending_send_sz field in the guest->host ring 133 * buffer when it is waiting for space to become available, and 134 * 2) It will read the pending_send_sz field in the host->guest 135 * ring buffer and interrupt the host when it frees enough space 136 * 137 * Similarly, if the host sets the feature bit in the host->guest 138 * ring buffer, the host is telling the guest that: 139 * 1) It will set the pending_send_sz field in the host->guest ring 140 * buffer when it is waiting for space to become available, and 141 * 2) It will read the pending_send_sz field in the guest->host 142 * ring buffer and interrupt the guest when it frees enough space 143 * 144 * If either the guest or host does not set the feature bit that it 145 * owns, that guest or host must do polling if it encounters a full 146 * ring buffer, and not signal the other end with an interrupt. 147 */ 148 u32 pending_send_sz; 149 u32 reserved1[12]; 150 union { 151 struct { 152 u32 feat_pending_send_sz:1; 153 }; 154 u32 value; 155 } feature_bits; 156 157 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 158 u8 reserved2[PAGE_SIZE - 68]; 159 160 /* 161 * Ring data starts here + RingDataStartOffset 162 * !!! DO NOT place any fields below this !!! 163 */ 164 u8 buffer[]; 165 } __packed; 166 167 168 /* 169 * If the requested ring buffer size is at least 8 times the size of the 170 * header, steal space from the ring buffer for the header. Otherwise, add 171 * space for the header so that is doesn't take too much of the ring buffer 172 * space. 173 * 174 * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a 175 * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring 176 * buffer size (such as 128 Kbytes) and so end up making a nearly twice as 177 * large allocation that will be almost half wasted. As a contrasting example, 178 * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the 179 * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring. 180 * In this latter case, we must add 64 Kbytes for the header and not worry 181 * about what's wasted. 182 */ 183 #define VMBUS_HEADER_ADJ(payload_sz) \ 184 ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \ 185 0 : sizeof(struct hv_ring_buffer)) 186 187 /* Calculate the proper size of a ringbuffer, it must be page-aligned */ 188 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \ 189 (payload_sz)) 190 191 struct hv_ring_buffer_info { 192 struct hv_ring_buffer *ring_buffer; 193 u32 ring_size; /* Include the shared header */ 194 struct reciprocal_value ring_size_div10_reciprocal; 195 spinlock_t ring_lock; 196 197 u32 ring_datasize; /* < ring_size */ 198 u32 priv_read_index; 199 /* 200 * The ring buffer mutex lock. This lock prevents the ring buffer from 201 * being freed while the ring buffer is being accessed. 202 */ 203 struct mutex ring_buffer_mutex; 204 205 /* Buffer that holds a copy of an incoming host packet */ 206 void *pkt_buffer; 207 u32 pkt_buffer_size; 208 }; 209 210 hv_get_bytes_to_read(const struct hv_ring_buffer_info * rbi)211 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) 212 { 213 u32 read_loc, write_loc, dsize, read; 214 215 dsize = rbi->ring_datasize; 216 read_loc = rbi->ring_buffer->read_index; 217 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 218 219 read = write_loc >= read_loc ? (write_loc - read_loc) : 220 (dsize - read_loc) + write_loc; 221 222 return read; 223 } 224 hv_get_bytes_to_write(const struct hv_ring_buffer_info * rbi)225 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) 226 { 227 u32 read_loc, write_loc, dsize, write; 228 229 dsize = rbi->ring_datasize; 230 read_loc = READ_ONCE(rbi->ring_buffer->read_index); 231 write_loc = rbi->ring_buffer->write_index; 232 233 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 234 read_loc - write_loc; 235 return write; 236 } 237 hv_get_avail_to_write_percent(const struct hv_ring_buffer_info * rbi)238 static inline u32 hv_get_avail_to_write_percent( 239 const struct hv_ring_buffer_info *rbi) 240 { 241 u32 avail_write = hv_get_bytes_to_write(rbi); 242 243 return reciprocal_divide( 244 (avail_write << 3) + (avail_write << 1), 245 rbi->ring_size_div10_reciprocal); 246 } 247 248 /* 249 * VMBUS version is 32 bit entity broken up into 250 * two 16 bit quantities: major_number. minor_number. 251 * 252 * 0 . 13 (Windows Server 2008) 253 * 1 . 1 (Windows 7, WS2008 R2) 254 * 2 . 4 (Windows 8, WS2012) 255 * 3 . 0 (Windows 8.1, WS2012 R2) 256 * 4 . 0 (Windows 10) 257 * 4 . 1 (Windows 10 RS3) 258 * 5 . 0 (Newer Windows 10) 259 * 5 . 1 (Windows 10 RS4) 260 * 5 . 2 (Windows Server 2019, RS5) 261 * 5 . 3 (Windows Server 2022) 262 * 263 * The WS2008 and WIN7 versions are listed here for 264 * completeness but are no longer supported in the 265 * Linux kernel. 266 */ 267 268 #define VERSION_WS2008 ((0 << 16) | (13)) 269 #define VERSION_WIN7 ((1 << 16) | (1)) 270 #define VERSION_WIN8 ((2 << 16) | (4)) 271 #define VERSION_WIN8_1 ((3 << 16) | (0)) 272 #define VERSION_WIN10 ((4 << 16) | (0)) 273 #define VERSION_WIN10_V4_1 ((4 << 16) | (1)) 274 #define VERSION_WIN10_V5 ((5 << 16) | (0)) 275 #define VERSION_WIN10_V5_1 ((5 << 16) | (1)) 276 #define VERSION_WIN10_V5_2 ((5 << 16) | (2)) 277 #define VERSION_WIN10_V5_3 ((5 << 16) | (3)) 278 279 /* Make maximum size of pipe payload of 16K */ 280 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 281 282 /* Define PipeMode values. */ 283 #define VMBUS_PIPE_TYPE_BYTE 0x00000000 284 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 285 286 /* The size of the user defined data buffer for non-pipe offers. */ 287 #define MAX_USER_DEFINED_BYTES 120 288 289 /* The size of the user defined data buffer for pipe offers. */ 290 #define MAX_PIPE_USER_DEFINED_BYTES 116 291 292 /* 293 * At the center of the Channel Management library is the Channel Offer. This 294 * struct contains the fundamental information about an offer. 295 */ 296 struct vmbus_channel_offer { 297 guid_t if_type; 298 guid_t if_instance; 299 300 /* 301 * These two fields are not currently used. 302 */ 303 u64 reserved1; 304 u64 reserved2; 305 306 u16 chn_flags; 307 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 308 309 union { 310 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ 311 struct { 312 unsigned char user_def[MAX_USER_DEFINED_BYTES]; 313 } std; 314 315 /* 316 * Pipes: 317 * The following structure is an integrated pipe protocol, which 318 * is implemented on top of standard user-defined data. Pipe 319 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own 320 * use. 321 */ 322 struct { 323 u32 pipe_mode; 324 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 325 } pipe; 326 } u; 327 /* 328 * The sub_channel_index is defined in Win8: a value of zero means a 329 * primary channel and a value of non-zero means a sub-channel. 330 * 331 * Before Win8, the field is reserved, meaning it's always zero. 332 */ 333 u16 sub_channel_index; 334 u16 reserved3; 335 } __packed; 336 337 /* Server Flags */ 338 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 339 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 340 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 341 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 342 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 343 #define VMBUS_CHANNEL_PARENT_OFFER 0x200 344 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 345 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 346 347 struct vmpacket_descriptor { 348 u16 type; 349 u16 offset8; 350 u16 len8; 351 u16 flags; 352 u64 trans_id; 353 } __packed; 354 355 struct vmpacket_header { 356 u32 prev_pkt_start_offset; 357 struct vmpacket_descriptor descriptor; 358 } __packed; 359 360 struct vmtransfer_page_range { 361 u32 byte_count; 362 u32 byte_offset; 363 } __packed; 364 365 struct vmtransfer_page_packet_header { 366 struct vmpacket_descriptor d; 367 u16 xfer_pageset_id; 368 u8 sender_owns_set; 369 u8 reserved; 370 u32 range_cnt; 371 struct vmtransfer_page_range ranges[]; 372 } __packed; 373 374 struct vmgpadl_packet_header { 375 struct vmpacket_descriptor d; 376 u32 gpadl; 377 u32 reserved; 378 } __packed; 379 380 struct vmadd_remove_transfer_page_set { 381 struct vmpacket_descriptor d; 382 u32 gpadl; 383 u16 xfer_pageset_id; 384 u16 reserved; 385 } __packed; 386 387 /* 388 * This structure defines a range in guest physical space that can be made to 389 * look virtually contiguous. 390 */ 391 struct gpa_range { 392 u32 byte_count; 393 u32 byte_offset; 394 u64 pfn_array[]; 395 }; 396 397 /* 398 * This is the format for an Establish Gpadl packet, which contains a handle by 399 * which this GPADL will be known and a set of GPA ranges associated with it. 400 * This can be converted to a MDL by the guest OS. If there are multiple GPA 401 * ranges, then the resulting MDL will be "chained," representing multiple VA 402 * ranges. 403 */ 404 struct vmestablish_gpadl { 405 struct vmpacket_descriptor d; 406 u32 gpadl; 407 u32 range_cnt; 408 struct gpa_range range[1]; 409 } __packed; 410 411 /* 412 * This is the format for a Teardown Gpadl packet, which indicates that the 413 * GPADL handle in the Establish Gpadl packet will never be referenced again. 414 */ 415 struct vmteardown_gpadl { 416 struct vmpacket_descriptor d; 417 u32 gpadl; 418 u32 reserved; /* for alignment to a 8-byte boundary */ 419 } __packed; 420 421 /* 422 * This is the format for a GPA-Direct packet, which contains a set of GPA 423 * ranges, in addition to commands and/or data. 424 */ 425 struct vmdata_gpa_direct { 426 struct vmpacket_descriptor d; 427 u32 reserved; 428 u32 range_cnt; 429 struct gpa_range range[1]; 430 } __packed; 431 432 /* This is the format for a Additional Data Packet. */ 433 struct vmadditional_data { 434 struct vmpacket_descriptor d; 435 u64 total_bytes; 436 u32 offset; 437 u32 byte_cnt; 438 unsigned char data[1]; 439 } __packed; 440 441 union vmpacket_largest_possible_header { 442 struct vmpacket_descriptor simple_hdr; 443 struct vmtransfer_page_packet_header xfer_page_hdr; 444 struct vmgpadl_packet_header gpadl_hdr; 445 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; 446 struct vmestablish_gpadl establish_gpadl_hdr; 447 struct vmteardown_gpadl teardown_gpadl_hdr; 448 struct vmdata_gpa_direct data_gpa_direct_hdr; 449 }; 450 451 #define VMPACKET_DATA_START_ADDRESS(__packet) \ 452 (void *)(((unsigned char *)__packet) + \ 453 ((struct vmpacket_descriptor)__packet)->offset8 * 8) 454 455 #define VMPACKET_DATA_LENGTH(__packet) \ 456 ((((struct vmpacket_descriptor)__packet)->len8 - \ 457 ((struct vmpacket_descriptor)__packet)->offset8) * 8) 458 459 #define VMPACKET_TRANSFER_MODE(__packet) \ 460 (((struct IMPACT)__packet)->type) 461 462 enum vmbus_packet_type { 463 VM_PKT_INVALID = 0x0, 464 VM_PKT_SYNCH = 0x1, 465 VM_PKT_ADD_XFER_PAGESET = 0x2, 466 VM_PKT_RM_XFER_PAGESET = 0x3, 467 VM_PKT_ESTABLISH_GPADL = 0x4, 468 VM_PKT_TEARDOWN_GPADL = 0x5, 469 VM_PKT_DATA_INBAND = 0x6, 470 VM_PKT_DATA_USING_XFER_PAGES = 0x7, 471 VM_PKT_DATA_USING_GPADL = 0x8, 472 VM_PKT_DATA_USING_GPA_DIRECT = 0x9, 473 VM_PKT_CANCEL_REQUEST = 0xa, 474 VM_PKT_COMP = 0xb, 475 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, 476 VM_PKT_ADDITIONAL_DATA = 0xd 477 }; 478 479 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 480 481 482 /* Version 1 messages */ 483 enum vmbus_channel_message_type { 484 CHANNELMSG_INVALID = 0, 485 CHANNELMSG_OFFERCHANNEL = 1, 486 CHANNELMSG_RESCIND_CHANNELOFFER = 2, 487 CHANNELMSG_REQUESTOFFERS = 3, 488 CHANNELMSG_ALLOFFERS_DELIVERED = 4, 489 CHANNELMSG_OPENCHANNEL = 5, 490 CHANNELMSG_OPENCHANNEL_RESULT = 6, 491 CHANNELMSG_CLOSECHANNEL = 7, 492 CHANNELMSG_GPADL_HEADER = 8, 493 CHANNELMSG_GPADL_BODY = 9, 494 CHANNELMSG_GPADL_CREATED = 10, 495 CHANNELMSG_GPADL_TEARDOWN = 11, 496 CHANNELMSG_GPADL_TORNDOWN = 12, 497 CHANNELMSG_RELID_RELEASED = 13, 498 CHANNELMSG_INITIATE_CONTACT = 14, 499 CHANNELMSG_VERSION_RESPONSE = 15, 500 CHANNELMSG_UNLOAD = 16, 501 CHANNELMSG_UNLOAD_RESPONSE = 17, 502 CHANNELMSG_18 = 18, 503 CHANNELMSG_19 = 19, 504 CHANNELMSG_20 = 20, 505 CHANNELMSG_TL_CONNECT_REQUEST = 21, 506 CHANNELMSG_MODIFYCHANNEL = 22, 507 CHANNELMSG_TL_CONNECT_RESULT = 23, 508 CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24, 509 CHANNELMSG_COUNT 510 }; 511 512 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ 513 #define INVALID_RELID U32_MAX 514 515 struct vmbus_channel_message_header { 516 enum vmbus_channel_message_type msgtype; 517 u32 padding; 518 } __packed; 519 520 /* Query VMBus Version parameters */ 521 struct vmbus_channel_query_vmbus_version { 522 struct vmbus_channel_message_header header; 523 u32 version; 524 } __packed; 525 526 /* VMBus Version Supported parameters */ 527 struct vmbus_channel_version_supported { 528 struct vmbus_channel_message_header header; 529 u8 version_supported; 530 } __packed; 531 532 /* Offer Channel parameters */ 533 struct vmbus_channel_offer_channel { 534 struct vmbus_channel_message_header header; 535 struct vmbus_channel_offer offer; 536 u32 child_relid; 537 u8 monitorid; 538 /* 539 * win7 and beyond splits this field into a bit field. 540 */ 541 u8 monitor_allocated:1; 542 u8 reserved:7; 543 /* 544 * These are new fields added in win7 and later. 545 * Do not access these fields without checking the 546 * negotiated protocol. 547 * 548 * If "is_dedicated_interrupt" is set, we must not set the 549 * associated bit in the channel bitmap while sending the 550 * interrupt to the host. 551 * 552 * connection_id is to be used in signaling the host. 553 */ 554 u16 is_dedicated_interrupt:1; 555 u16 reserved1:15; 556 u32 connection_id; 557 } __packed; 558 559 /* Rescind Offer parameters */ 560 struct vmbus_channel_rescind_offer { 561 struct vmbus_channel_message_header header; 562 u32 child_relid; 563 } __packed; 564 565 /* 566 * Request Offer -- no parameters, SynIC message contains the partition ID 567 * Set Snoop -- no parameters, SynIC message contains the partition ID 568 * Clear Snoop -- no parameters, SynIC message contains the partition ID 569 * All Offers Delivered -- no parameters, SynIC message contains the partition 570 * ID 571 * Flush Client -- no parameters, SynIC message contains the partition ID 572 */ 573 574 /* Open Channel parameters */ 575 struct vmbus_channel_open_channel { 576 struct vmbus_channel_message_header header; 577 578 /* Identifies the specific VMBus channel that is being opened. */ 579 u32 child_relid; 580 581 /* ID making a particular open request at a channel offer unique. */ 582 u32 openid; 583 584 /* GPADL for the channel's ring buffer. */ 585 u32 ringbuffer_gpadlhandle; 586 587 /* 588 * Starting with win8, this field will be used to specify 589 * the target virtual processor on which to deliver the interrupt for 590 * the host to guest communication. 591 * Prior to win8, incoming channel interrupts would only 592 * be delivered on cpu 0. Setting this value to 0 would 593 * preserve the earlier behavior. 594 */ 595 u32 target_vp; 596 597 /* 598 * The upstream ring buffer begins at offset zero in the memory 599 * described by RingBufferGpadlHandle. The downstream ring buffer 600 * follows it at this offset (in pages). 601 */ 602 u32 downstream_ringbuffer_pageoffset; 603 604 /* User-specific data to be passed along to the server endpoint. */ 605 unsigned char userdata[MAX_USER_DEFINED_BYTES]; 606 } __packed; 607 608 /* Open Channel Result parameters */ 609 struct vmbus_channel_open_result { 610 struct vmbus_channel_message_header header; 611 u32 child_relid; 612 u32 openid; 613 u32 status; 614 } __packed; 615 616 /* Modify Channel Result parameters */ 617 struct vmbus_channel_modifychannel_response { 618 struct vmbus_channel_message_header header; 619 u32 child_relid; 620 u32 status; 621 } __packed; 622 623 /* Close channel parameters; */ 624 struct vmbus_channel_close_channel { 625 struct vmbus_channel_message_header header; 626 u32 child_relid; 627 } __packed; 628 629 /* Channel Message GPADL */ 630 #define GPADL_TYPE_RING_BUFFER 1 631 #define GPADL_TYPE_SERVER_SAVE_AREA 2 632 #define GPADL_TYPE_TRANSACTION 8 633 634 /* 635 * The number of PFNs in a GPADL message is defined by the number of 636 * pages that would be spanned by ByteCount and ByteOffset. If the 637 * implied number of PFNs won't fit in this packet, there will be a 638 * follow-up packet that contains more. 639 */ 640 struct vmbus_channel_gpadl_header { 641 struct vmbus_channel_message_header header; 642 u32 child_relid; 643 u32 gpadl; 644 u16 range_buflen; 645 u16 rangecount; 646 struct gpa_range range[]; 647 } __packed; 648 649 /* This is the followup packet that contains more PFNs. */ 650 struct vmbus_channel_gpadl_body { 651 struct vmbus_channel_message_header header; 652 u32 msgnumber; 653 u32 gpadl; 654 u64 pfn[]; 655 } __packed; 656 657 struct vmbus_channel_gpadl_created { 658 struct vmbus_channel_message_header header; 659 u32 child_relid; 660 u32 gpadl; 661 u32 creation_status; 662 } __packed; 663 664 struct vmbus_channel_gpadl_teardown { 665 struct vmbus_channel_message_header header; 666 u32 child_relid; 667 u32 gpadl; 668 } __packed; 669 670 struct vmbus_channel_gpadl_torndown { 671 struct vmbus_channel_message_header header; 672 u32 gpadl; 673 } __packed; 674 675 struct vmbus_channel_relid_released { 676 struct vmbus_channel_message_header header; 677 u32 child_relid; 678 } __packed; 679 680 struct vmbus_channel_initiate_contact { 681 struct vmbus_channel_message_header header; 682 u32 vmbus_version_requested; 683 u32 target_vcpu; /* The VCPU the host should respond to */ 684 union { 685 u64 interrupt_page; 686 struct { 687 u8 msg_sint; 688 u8 msg_vtl; 689 u8 reserved[6]; 690 }; 691 }; 692 u64 monitor_page1; 693 u64 monitor_page2; 694 } __packed; 695 696 /* Hyper-V socket: guest's connect()-ing to host */ 697 struct vmbus_channel_tl_connect_request { 698 struct vmbus_channel_message_header header; 699 guid_t guest_endpoint_id; 700 guid_t host_service_id; 701 } __packed; 702 703 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */ 704 struct vmbus_channel_modifychannel { 705 struct vmbus_channel_message_header header; 706 u32 child_relid; 707 u32 target_vp; 708 } __packed; 709 710 struct vmbus_channel_version_response { 711 struct vmbus_channel_message_header header; 712 u8 version_supported; 713 714 u8 connection_state; 715 u16 padding; 716 717 /* 718 * On new hosts that support VMBus protocol 5.0, we must use 719 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, 720 * and for subsequent messages, we must use the Message Connection ID 721 * field in the host-returned Version Response Message. 722 * 723 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). 724 */ 725 u32 msg_conn_id; 726 } __packed; 727 728 enum vmbus_channel_state { 729 CHANNEL_OFFER_STATE, 730 CHANNEL_OPENING_STATE, 731 CHANNEL_OPEN_STATE, 732 CHANNEL_OPENED_STATE, 733 }; 734 735 /* 736 * Represents each channel msg on the vmbus connection This is a 737 * variable-size data structure depending on the msg type itself 738 */ 739 struct vmbus_channel_msginfo { 740 /* Bookkeeping stuff */ 741 struct list_head msglistentry; 742 743 /* So far, this is only used to handle gpadl body message */ 744 struct list_head submsglist; 745 746 /* Synchronize the request/response if needed */ 747 struct completion waitevent; 748 struct vmbus_channel *waiting_channel; 749 union { 750 struct vmbus_channel_version_supported version_supported; 751 struct vmbus_channel_open_result open_result; 752 struct vmbus_channel_gpadl_torndown gpadl_torndown; 753 struct vmbus_channel_gpadl_created gpadl_created; 754 struct vmbus_channel_version_response version_response; 755 struct vmbus_channel_modifychannel_response modify_response; 756 } response; 757 758 u32 msgsize; 759 /* 760 * The channel message that goes out on the "wire". 761 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header 762 */ 763 unsigned char msg[]; 764 }; 765 766 struct vmbus_close_msg { 767 struct vmbus_channel_msginfo info; 768 struct vmbus_channel_close_channel msg; 769 }; 770 771 /* Define connection identifier type. */ 772 union hv_connection_id { 773 u32 asu32; 774 struct { 775 u32 id:24; 776 u32 reserved:8; 777 } u; 778 }; 779 780 enum vmbus_device_type { 781 HV_IDE = 0, 782 HV_SCSI, 783 HV_FC, 784 HV_NIC, 785 HV_ND, 786 HV_PCIE, 787 HV_FB, 788 HV_KBD, 789 HV_MOUSE, 790 HV_KVP, 791 HV_TS, 792 HV_HB, 793 HV_SHUTDOWN, 794 HV_FCOPY, 795 HV_BACKUP, 796 HV_DM, 797 HV_UNKNOWN, 798 }; 799 800 /* 801 * Provides request ids for VMBus. Encapsulates guest memory 802 * addresses and stores the next available slot in req_arr 803 * to generate new ids in constant time. 804 */ 805 struct vmbus_requestor { 806 u64 *req_arr; 807 unsigned long *req_bitmap; /* is a given slot available? */ 808 u32 size; 809 u64 next_request_id; 810 spinlock_t req_lock; /* provides atomicity */ 811 }; 812 813 #define VMBUS_NO_RQSTOR U64_MAX 814 #define VMBUS_RQST_ERROR (U64_MAX - 1) 815 #define VMBUS_RQST_ADDR_ANY U64_MAX 816 /* NetVSC-specific */ 817 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2) 818 /* StorVSC-specific */ 819 #define VMBUS_RQST_INIT (U64_MAX - 2) 820 #define VMBUS_RQST_RESET (U64_MAX - 3) 821 822 struct vmbus_device { 823 u16 dev_type; 824 guid_t guid; 825 bool perf_device; 826 bool allowed_in_isolated; 827 }; 828 829 #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096 830 831 struct vmbus_gpadl { 832 u32 gpadl_handle; 833 u32 size; 834 void *buffer; 835 bool decrypted; 836 }; 837 838 struct vmbus_channel { 839 struct list_head listentry; 840 841 struct hv_device *device_obj; 842 843 enum vmbus_channel_state state; 844 845 struct vmbus_channel_offer_channel offermsg; 846 /* 847 * These are based on the OfferMsg.MonitorId. 848 * Save it here for easy access. 849 */ 850 u8 monitor_grp; 851 u8 monitor_bit; 852 853 bool rescind; /* got rescind msg */ 854 bool rescind_ref; /* got rescind msg, got channel reference */ 855 struct completion rescind_event; 856 857 struct vmbus_gpadl ringbuffer_gpadlhandle; 858 859 /* Allocated memory for ring buffer */ 860 struct page *ringbuffer_page; 861 u32 ringbuffer_pagecount; 862 u32 ringbuffer_send_offset; 863 struct hv_ring_buffer_info outbound; /* send to parent */ 864 struct hv_ring_buffer_info inbound; /* receive from parent */ 865 866 struct vmbus_close_msg close_msg; 867 868 /* Statistics */ 869 u64 interrupts; /* Host to Guest interrupts */ 870 u64 sig_events; /* Guest to Host events */ 871 872 /* 873 * Guest to host interrupts caused by the outbound ring buffer changing 874 * from empty to not empty. 875 */ 876 u64 intr_out_empty; 877 878 /* 879 * Indicates that a full outbound ring buffer was encountered. The flag 880 * is set to true when a full outbound ring buffer is encountered and 881 * set to false when a write to the outbound ring buffer is completed. 882 */ 883 bool out_full_flag; 884 885 /* Channel callback's invoked in softirq context */ 886 struct tasklet_struct callback_event; 887 void (*onchannel_callback)(void *context); 888 void *channel_callback_context; 889 890 void (*change_target_cpu_callback)(struct vmbus_channel *channel, 891 u32 old, u32 new); 892 893 /* 894 * Synchronize channel scheduling and channel removal; see the inline 895 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb(). 896 */ 897 spinlock_t sched_lock; 898 899 /* 900 * A channel can be marked for one of three modes of reading: 901 * BATCHED - callback called from taslket and should read 902 * channel until empty. Interrupts from the host 903 * are masked while read is in process (default). 904 * DIRECT - callback called from tasklet (softirq). 905 * ISR - callback called in interrupt context and must 906 * invoke its own deferred processing. 907 * Host interrupts are disabled and must be re-enabled 908 * when ring is empty. 909 */ 910 enum hv_callback_mode { 911 HV_CALL_BATCHED, 912 HV_CALL_DIRECT, 913 HV_CALL_ISR 914 } callback_mode; 915 916 bool is_dedicated_interrupt; 917 u64 sig_event; 918 919 /* 920 * Starting with win8, this field will be used to specify the 921 * target CPU on which to deliver the interrupt for the host 922 * to guest communication. 923 * 924 * Prior to win8, incoming channel interrupts would only be 925 * delivered on CPU 0. Setting this value to 0 would preserve 926 * the earlier behavior. 927 */ 928 u32 target_cpu; 929 /* 930 * Support for sub-channels. For high performance devices, 931 * it will be useful to have multiple sub-channels to support 932 * a scalable communication infrastructure with the host. 933 * The support for sub-channels is implemented as an extension 934 * to the current infrastructure. 935 * The initial offer is considered the primary channel and this 936 * offer message will indicate if the host supports sub-channels. 937 * The guest is free to ask for sub-channels to be offered and can 938 * open these sub-channels as a normal "primary" channel. However, 939 * all sub-channels will have the same type and instance guids as the 940 * primary channel. Requests sent on a given channel will result in a 941 * response on the same channel. 942 */ 943 944 /* 945 * Sub-channel creation callback. This callback will be called in 946 * process context when a sub-channel offer is received from the host. 947 * The guest can open the sub-channel in the context of this callback. 948 */ 949 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 950 951 /* 952 * Channel rescind callback. Some channels (the hvsock ones), need to 953 * register a callback which is invoked in vmbus_onoffer_rescind(). 954 */ 955 void (*chn_rescind_callback)(struct vmbus_channel *channel); 956 957 /* 958 * All Sub-channels of a primary channel are linked here. 959 */ 960 struct list_head sc_list; 961 /* 962 * The primary channel this sub-channel belongs to. 963 * This will be NULL for the primary channel. 964 */ 965 struct vmbus_channel *primary_channel; 966 /* 967 * Support per-channel state for use by vmbus drivers. 968 */ 969 void *per_channel_state; 970 971 /* 972 * Defer freeing channel until after all cpu's have 973 * gone through grace period. 974 */ 975 struct rcu_head rcu; 976 977 /* 978 * For sysfs per-channel properties. 979 */ 980 struct kobject kobj; 981 982 /* 983 * For performance critical channels (storage, networking 984 * etc,), Hyper-V has a mechanism to enhance the throughput 985 * at the expense of latency: 986 * When the host is to be signaled, we just set a bit in a shared page 987 * and this bit will be inspected by the hypervisor within a certain 988 * window and if the bit is set, the host will be signaled. The window 989 * of time is the monitor latency - currently around 100 usecs. This 990 * mechanism improves throughput by: 991 * 992 * A) Making the host more efficient - each time it wakes up, 993 * potentially it will process more number of packets. The 994 * monitor latency allows a batch to build up. 995 * B) By deferring the hypercall to signal, we will also minimize 996 * the interrupts. 997 * 998 * Clearly, these optimizations improve throughput at the expense of 999 * latency. Furthermore, since the channel is shared for both 1000 * control and data messages, control messages currently suffer 1001 * unnecessary latency adversely impacting performance and boot 1002 * time. To fix this issue, permit tagging the channel as being 1003 * in "low latency" mode. In this mode, we will bypass the monitor 1004 * mechanism. 1005 */ 1006 bool low_latency; 1007 1008 bool probe_done; 1009 1010 /* 1011 * Cache the device ID here for easy access; this is useful, in 1012 * particular, in situations where the channel's device_obj has 1013 * not been allocated/initialized yet. 1014 */ 1015 u16 device_id; 1016 1017 /* 1018 * We must offload the handling of the primary/sub channels 1019 * from the single-threaded vmbus_connection.work_queue to 1020 * two different workqueue, otherwise we can block 1021 * vmbus_connection.work_queue and hang: see vmbus_process_offer(). 1022 */ 1023 struct work_struct add_channel_work; 1024 1025 /* 1026 * Guest to host interrupts caused by the inbound ring buffer changing 1027 * from full to not full while a packet is waiting. 1028 */ 1029 u64 intr_in_full; 1030 1031 /* 1032 * The total number of write operations that encountered a full 1033 * outbound ring buffer. 1034 */ 1035 u64 out_full_total; 1036 1037 /* 1038 * The number of write operations that were the first to encounter a 1039 * full outbound ring buffer. 1040 */ 1041 u64 out_full_first; 1042 1043 /* enabling/disabling fuzz testing on the channel (default is false)*/ 1044 bool fuzz_testing_state; 1045 1046 /* 1047 * Interrupt delay will delay the guest from emptying the ring buffer 1048 * for a specific amount of time. The delay is in microseconds and will 1049 * be between 1 to a maximum of 1000, its default is 0 (no delay). 1050 * The Message delay will delay guest reading on a per message basis 1051 * in microseconds between 1 to 1000 with the default being 0 1052 * (no delay). 1053 */ 1054 u32 fuzz_testing_interrupt_delay; 1055 u32 fuzz_testing_message_delay; 1056 1057 /* callback to generate a request ID from a request address */ 1058 u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr); 1059 /* callback to retrieve a request address from a request ID */ 1060 u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id); 1061 1062 /* request/transaction ids for VMBus */ 1063 struct vmbus_requestor requestor; 1064 u32 rqstor_size; 1065 1066 /* The max size of a packet on this channel */ 1067 u32 max_pkt_size; 1068 }; 1069 1070 #define lock_requestor(channel, flags) \ 1071 do { \ 1072 struct vmbus_requestor *rqstor = &(channel)->requestor; \ 1073 \ 1074 spin_lock_irqsave(&rqstor->req_lock, flags); \ 1075 } while (0) 1076 unlock_requestor(struct vmbus_channel * channel,unsigned long flags)1077 static __always_inline void unlock_requestor(struct vmbus_channel *channel, 1078 unsigned long flags) 1079 { 1080 struct vmbus_requestor *rqstor = &channel->requestor; 1081 1082 spin_unlock_irqrestore(&rqstor->req_lock, flags); 1083 } 1084 1085 u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr); 1086 u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id, 1087 u64 rqst_addr); 1088 u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id, 1089 u64 rqst_addr); 1090 u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id); 1091 is_hvsock_offer(const struct vmbus_channel_offer_channel * o)1092 static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o) 1093 { 1094 return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 1095 } 1096 is_hvsock_channel(const struct vmbus_channel * c)1097 static inline bool is_hvsock_channel(const struct vmbus_channel *c) 1098 { 1099 return is_hvsock_offer(&c->offermsg); 1100 } 1101 is_sub_channel(const struct vmbus_channel * c)1102 static inline bool is_sub_channel(const struct vmbus_channel *c) 1103 { 1104 return c->offermsg.offer.sub_channel_index != 0; 1105 } 1106 set_channel_read_mode(struct vmbus_channel * c,enum hv_callback_mode mode)1107 static inline void set_channel_read_mode(struct vmbus_channel *c, 1108 enum hv_callback_mode mode) 1109 { 1110 c->callback_mode = mode; 1111 } 1112 set_per_channel_state(struct vmbus_channel * c,void * s)1113 static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 1114 { 1115 c->per_channel_state = s; 1116 } 1117 get_per_channel_state(struct vmbus_channel * c)1118 static inline void *get_per_channel_state(struct vmbus_channel *c) 1119 { 1120 return c->per_channel_state; 1121 } 1122 set_channel_pending_send_size(struct vmbus_channel * c,u32 size)1123 static inline void set_channel_pending_send_size(struct vmbus_channel *c, 1124 u32 size) 1125 { 1126 unsigned long flags; 1127 1128 if (size) { 1129 spin_lock_irqsave(&c->outbound.ring_lock, flags); 1130 ++c->out_full_total; 1131 1132 if (!c->out_full_flag) { 1133 ++c->out_full_first; 1134 c->out_full_flag = true; 1135 } 1136 spin_unlock_irqrestore(&c->outbound.ring_lock, flags); 1137 } else { 1138 c->out_full_flag = false; 1139 } 1140 1141 c->outbound.ring_buffer->pending_send_sz = size; 1142 } 1143 1144 void vmbus_onmessage(struct vmbus_channel_message_header *hdr); 1145 1146 int vmbus_request_offers(void); 1147 1148 /* 1149 * APIs for managing sub-channels. 1150 */ 1151 1152 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 1153 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 1154 1155 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, 1156 void (*chn_rescind_cb)(struct vmbus_channel *)); 1157 1158 /* The format must be the same as struct vmdata_gpa_direct */ 1159 struct vmbus_channel_packet_page_buffer { 1160 u16 type; 1161 u16 dataoffset8; 1162 u16 length8; 1163 u16 flags; 1164 u64 transactionid; 1165 u32 reserved; 1166 u32 rangecount; 1167 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; 1168 } __packed; 1169 1170 /* The format must be the same as struct vmdata_gpa_direct */ 1171 struct vmbus_channel_packet_multipage_buffer { 1172 u16 type; 1173 u16 dataoffset8; 1174 u16 length8; 1175 u16 flags; 1176 u64 transactionid; 1177 u32 reserved; 1178 u32 rangecount; /* Always 1 in this case */ 1179 struct hv_multipage_buffer range; 1180 } __packed; 1181 1182 /* The format must be the same as struct vmdata_gpa_direct */ 1183 struct vmbus_packet_mpb_array { 1184 u16 type; 1185 u16 dataoffset8; 1186 u16 length8; 1187 u16 flags; 1188 u64 transactionid; 1189 u32 reserved; 1190 u32 rangecount; /* Always 1 in this case */ 1191 struct hv_mpb_array range; 1192 } __packed; 1193 1194 int vmbus_alloc_ring(struct vmbus_channel *channel, 1195 u32 send_size, u32 recv_size); 1196 void vmbus_free_ring(struct vmbus_channel *channel); 1197 1198 int vmbus_connect_ring(struct vmbus_channel *channel, 1199 void (*onchannel_callback)(void *context), 1200 void *context); 1201 int vmbus_disconnect_ring(struct vmbus_channel *channel); 1202 1203 extern int vmbus_open(struct vmbus_channel *channel, 1204 u32 send_ringbuffersize, 1205 u32 recv_ringbuffersize, 1206 void *userdata, 1207 u32 userdatalen, 1208 void (*onchannel_callback)(void *context), 1209 void *context); 1210 1211 extern void vmbus_close(struct vmbus_channel *channel); 1212 1213 extern int vmbus_sendpacket_getid(struct vmbus_channel *channel, 1214 void *buffer, 1215 u32 bufferLen, 1216 u64 requestid, 1217 u64 *trans_id, 1218 enum vmbus_packet_type type, 1219 u32 flags); 1220 extern int vmbus_sendpacket(struct vmbus_channel *channel, 1221 void *buffer, 1222 u32 bufferLen, 1223 u64 requestid, 1224 enum vmbus_packet_type type, 1225 u32 flags); 1226 1227 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 1228 struct hv_page_buffer pagebuffers[], 1229 u32 pagecount, 1230 void *buffer, 1231 u32 bufferlen, 1232 u64 requestid); 1233 1234 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 1235 struct vmbus_packet_mpb_array *mpb, 1236 u32 desc_size, 1237 void *buffer, 1238 u32 bufferlen, 1239 u64 requestid); 1240 1241 extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 1242 void *kbuffer, 1243 u32 size, 1244 struct vmbus_gpadl *gpadl); 1245 1246 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, 1247 struct vmbus_gpadl *gpadl); 1248 1249 void vmbus_reset_channel_cb(struct vmbus_channel *channel); 1250 1251 extern int vmbus_recvpacket(struct vmbus_channel *channel, 1252 void *buffer, 1253 u32 bufferlen, 1254 u32 *buffer_actual_len, 1255 u64 *requestid); 1256 1257 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, 1258 void *buffer, 1259 u32 bufferlen, 1260 u32 *buffer_actual_len, 1261 u64 *requestid); 1262 1263 /* Base driver object */ 1264 struct hv_driver { 1265 const char *name; 1266 1267 /* 1268 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 1269 * channel flag, actually doesn't mean a synthetic device because the 1270 * offer's if_type/if_instance can change for every new hvsock 1271 * connection. 1272 * 1273 * However, to facilitate the notification of new-offer/rescind-offer 1274 * from vmbus driver to hvsock driver, we can handle hvsock offer as 1275 * a special vmbus device, and hence we need the below flag to 1276 * indicate if the driver is the hvsock driver or not: we need to 1277 * specially treat the hvosck offer & driver in vmbus_match(). 1278 */ 1279 bool hvsock; 1280 1281 /* the device type supported by this driver */ 1282 guid_t dev_type; 1283 const struct hv_vmbus_device_id *id_table; 1284 1285 struct device_driver driver; 1286 1287 /* dynamic device GUID's */ 1288 struct { 1289 spinlock_t lock; 1290 struct list_head list; 1291 } dynids; 1292 1293 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 1294 void (*remove)(struct hv_device *dev); 1295 void (*shutdown)(struct hv_device *); 1296 1297 int (*suspend)(struct hv_device *); 1298 int (*resume)(struct hv_device *); 1299 1300 }; 1301 1302 /* Base device object */ 1303 struct hv_device { 1304 /* the device type id of this device */ 1305 guid_t dev_type; 1306 1307 /* the device instance id of this device */ 1308 guid_t dev_instance; 1309 u16 vendor_id; 1310 u16 device_id; 1311 1312 struct device device; 1313 /* 1314 * Driver name to force a match. Do not set directly, because core 1315 * frees it. Use driver_set_override() to set or clear it. 1316 */ 1317 const char *driver_override; 1318 1319 struct vmbus_channel *channel; 1320 struct kset *channels_kset; 1321 struct device_dma_parameters dma_parms; 1322 u64 dma_mask; 1323 1324 /* place holder to keep track of the dir for hv device in debugfs */ 1325 struct dentry *debug_dir; 1326 1327 }; 1328 1329 1330 #define device_to_hv_device(d) container_of_const(d, struct hv_device, device) 1331 drv_to_hv_drv(struct device_driver * d)1332 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 1333 { 1334 return container_of(d, struct hv_driver, driver); 1335 } 1336 hv_set_drvdata(struct hv_device * dev,void * data)1337 static inline void hv_set_drvdata(struct hv_device *dev, void *data) 1338 { 1339 dev_set_drvdata(&dev->device, data); 1340 } 1341 hv_get_drvdata(struct hv_device * dev)1342 static inline void *hv_get_drvdata(struct hv_device *dev) 1343 { 1344 return dev_get_drvdata(&dev->device); 1345 } 1346 1347 struct hv_ring_buffer_debug_info { 1348 u32 current_interrupt_mask; 1349 u32 current_read_index; 1350 u32 current_write_index; 1351 u32 bytes_avail_toread; 1352 u32 bytes_avail_towrite; 1353 }; 1354 1355 1356 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 1357 struct hv_ring_buffer_debug_info *debug_info); 1358 1359 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel); 1360 1361 /* Vmbus interface */ 1362 #define vmbus_driver_register(driver) \ 1363 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 1364 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, 1365 struct module *owner, 1366 const char *mod_name); 1367 void vmbus_driver_unregister(struct hv_driver *hv_driver); 1368 1369 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1370 1371 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1372 resource_size_t min, resource_size_t max, 1373 resource_size_t size, resource_size_t align, 1374 bool fb_overlap_ok); 1375 void vmbus_free_mmio(resource_size_t start, resource_size_t size); 1376 1377 /* 1378 * GUID definitions of various offer types - services offered to the guest. 1379 */ 1380 1381 /* 1382 * Network GUID 1383 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1384 */ 1385 #define HV_NIC_GUID \ 1386 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 1387 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) 1388 1389 /* 1390 * IDE GUID 1391 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1392 */ 1393 #define HV_IDE_GUID \ 1394 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 1395 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1396 1397 /* 1398 * SCSI GUID 1399 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1400 */ 1401 #define HV_SCSI_GUID \ 1402 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 1403 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1404 1405 /* 1406 * Shutdown GUID 1407 * {0e0b6031-5213-4934-818b-38d90ced39db} 1408 */ 1409 #define HV_SHUTDOWN_GUID \ 1410 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 1411 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) 1412 1413 /* 1414 * Time Synch GUID 1415 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1416 */ 1417 #define HV_TS_GUID \ 1418 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 1419 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 1420 1421 /* 1422 * Heartbeat GUID 1423 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1424 */ 1425 #define HV_HEART_BEAT_GUID \ 1426 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 1427 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 1428 1429 /* 1430 * KVP GUID 1431 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1432 */ 1433 #define HV_KVP_GUID \ 1434 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 1435 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) 1436 1437 /* 1438 * Dynamic memory GUID 1439 * {525074dc-8985-46e2-8057-a307dc18a502} 1440 */ 1441 #define HV_DM_GUID \ 1442 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 1443 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) 1444 1445 /* 1446 * Mouse GUID 1447 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1448 */ 1449 #define HV_MOUSE_GUID \ 1450 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 1451 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) 1452 1453 /* 1454 * Keyboard GUID 1455 * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 1456 */ 1457 #define HV_KBD_GUID \ 1458 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 1459 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) 1460 1461 /* 1462 * VSS (Backup/Restore) GUID 1463 */ 1464 #define HV_VSS_GUID \ 1465 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 1466 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) 1467 /* 1468 * Synthetic Video GUID 1469 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1470 */ 1471 #define HV_SYNTHVID_GUID \ 1472 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 1473 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) 1474 1475 /* 1476 * Synthetic FC GUID 1477 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1478 */ 1479 #define HV_SYNTHFC_GUID \ 1480 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 1481 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) 1482 1483 /* 1484 * Guest File Copy Service 1485 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} 1486 */ 1487 1488 #define HV_FCOPY_GUID \ 1489 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 1490 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) 1491 1492 /* 1493 * NetworkDirect. This is the guest RDMA service. 1494 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1495 */ 1496 #define HV_ND_GUID \ 1497 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 1498 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) 1499 1500 /* 1501 * PCI Express Pass Through 1502 * {44C4F61D-4444-4400-9D52-802E27EDE19F} 1503 */ 1504 1505 #define HV_PCIE_GUID \ 1506 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 1507 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) 1508 1509 /* 1510 * Linux doesn't support these 4 devices: the first two are for 1511 * Automatic Virtual Machine Activation, the third is for 1512 * Remote Desktop Virtualization, and the fourth is Initial 1513 * Machine Configuration (IMC) used only by Windows guests. 1514 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5} 1515 * {3375baf4-9e15-4b30-b765-67acb10d607b} 1516 * {276aacf4-ac15-426c-98dd-7521ad3f01fe} 1517 * {c376c1c3-d276-48d2-90a9-c04748072c60} 1518 */ 1519 1520 #define HV_AVMA1_GUID \ 1521 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ 1522 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) 1523 1524 #define HV_AVMA2_GUID \ 1525 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ 1526 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) 1527 1528 #define HV_RDV_GUID \ 1529 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ 1530 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) 1531 1532 #define HV_IMC_GUID \ 1533 .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \ 1534 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60) 1535 1536 /* 1537 * Common header for Hyper-V ICs 1538 */ 1539 1540 #define ICMSGTYPE_NEGOTIATE 0 1541 #define ICMSGTYPE_HEARTBEAT 1 1542 #define ICMSGTYPE_KVPEXCHANGE 2 1543 #define ICMSGTYPE_SHUTDOWN 3 1544 #define ICMSGTYPE_TIMESYNC 4 1545 #define ICMSGTYPE_VSS 5 1546 #define ICMSGTYPE_FCOPY 7 1547 1548 #define ICMSGHDRFLAG_TRANSACTION 1 1549 #define ICMSGHDRFLAG_REQUEST 2 1550 #define ICMSGHDRFLAG_RESPONSE 4 1551 1552 1553 /* 1554 * While we want to handle util services as regular devices, 1555 * there is only one instance of each of these services; so 1556 * we statically allocate the service specific state. 1557 */ 1558 1559 struct hv_util_service { 1560 u8 *recv_buffer; 1561 void *channel; 1562 void (*util_cb)(void *); 1563 int (*util_init)(struct hv_util_service *); 1564 int (*util_init_transport)(void); 1565 void (*util_deinit)(void); 1566 int (*util_pre_suspend)(void); 1567 int (*util_pre_resume)(void); 1568 }; 1569 1570 struct vmbuspipe_hdr { 1571 u32 flags; 1572 u32 msgsize; 1573 } __packed; 1574 1575 struct ic_version { 1576 u16 major; 1577 u16 minor; 1578 } __packed; 1579 1580 struct icmsg_hdr { 1581 struct ic_version icverframe; 1582 u16 icmsgtype; 1583 struct ic_version icvermsg; 1584 u16 icmsgsize; 1585 u32 status; 1586 u8 ictransaction_id; 1587 u8 icflags; 1588 u8 reserved[2]; 1589 } __packed; 1590 1591 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100 1592 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)) 1593 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \ 1594 (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \ 1595 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version))) 1596 1597 struct icmsg_negotiate { 1598 u16 icframe_vercnt; 1599 u16 icmsg_vercnt; 1600 u32 reserved; 1601 struct ic_version icversion_data[]; /* any size array */ 1602 } __packed; 1603 1604 struct shutdown_msg_data { 1605 u32 reason_code; 1606 u32 timeout_seconds; 1607 u32 flags; 1608 u8 display_message[2048]; 1609 } __packed; 1610 1611 struct heartbeat_msg_data { 1612 u64 seq_num; 1613 u32 reserved[8]; 1614 } __packed; 1615 1616 /* Time Sync IC defs */ 1617 #define ICTIMESYNCFLAG_PROBE 0 1618 #define ICTIMESYNCFLAG_SYNC 1 1619 #define ICTIMESYNCFLAG_SAMPLE 2 1620 1621 #ifdef __x86_64__ 1622 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ 1623 #else 1624 #define WLTIMEDELTA 116444736000000000LL 1625 #endif 1626 1627 struct ictimesync_data { 1628 u64 parenttime; 1629 u64 childtime; 1630 u64 roundtriptime; 1631 u8 flags; 1632 } __packed; 1633 1634 struct ictimesync_ref_data { 1635 u64 parenttime; 1636 u64 vmreferencetime; 1637 u8 flags; 1638 char leapflags; 1639 char stratum; 1640 u8 reserved[3]; 1641 } __packed; 1642 1643 struct hyperv_service_callback { 1644 u8 msg_type; 1645 char *log_msg; 1646 guid_t data; 1647 struct vmbus_channel *channel; 1648 void (*callback)(void *context); 1649 }; 1650 1651 struct hv_dma_range { 1652 dma_addr_t dma; 1653 u32 mapping_size; 1654 }; 1655 1656 #define MAX_SRV_VER 0x7ffffff 1657 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen, 1658 const int *fw_version, int fw_vercnt, 1659 const int *srv_version, int srv_vercnt, 1660 int *nego_fw_version, int *nego_srv_version); 1661 1662 void hv_process_channel_removal(struct vmbus_channel *channel); 1663 1664 void vmbus_setevent(struct vmbus_channel *channel); 1665 /* 1666 * Negotiated version with the Host. 1667 */ 1668 1669 extern __u32 vmbus_proto_version; 1670 1671 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, 1672 const guid_t *shv_host_servie_id); 1673 int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp); 1674 void vmbus_set_event(struct vmbus_channel *channel); 1675 1676 /* Get the start of the ring buffer. */ 1677 static inline void * hv_get_ring_buffer(const struct hv_ring_buffer_info * ring_info)1678 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) 1679 { 1680 return ring_info->ring_buffer->buffer; 1681 } 1682 1683 /* 1684 * Mask off host interrupt callback notifications 1685 */ hv_begin_read(struct hv_ring_buffer_info * rbi)1686 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) 1687 { 1688 rbi->ring_buffer->interrupt_mask = 1; 1689 1690 /* make sure mask update is not reordered */ 1691 virt_mb(); 1692 } 1693 1694 /* 1695 * Re-enable host callback and return number of outstanding bytes 1696 */ hv_end_read(struct hv_ring_buffer_info * rbi)1697 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) 1698 { 1699 1700 rbi->ring_buffer->interrupt_mask = 0; 1701 1702 /* make sure mask update is not reordered */ 1703 virt_mb(); 1704 1705 /* 1706 * Now check to see if the ring buffer is still empty. 1707 * If it is not, we raced and we need to process new 1708 * incoming messages. 1709 */ 1710 return hv_get_bytes_to_read(rbi); 1711 } 1712 1713 /* 1714 * An API to support in-place processing of incoming VMBUS packets. 1715 */ 1716 1717 /* Get data payload associated with descriptor */ hv_pkt_data(const struct vmpacket_descriptor * desc)1718 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) 1719 { 1720 return (void *)((unsigned long)desc + (desc->offset8 << 3)); 1721 } 1722 1723 /* Get data size associated with descriptor */ hv_pkt_datalen(const struct vmpacket_descriptor * desc)1724 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) 1725 { 1726 return (desc->len8 << 3) - (desc->offset8 << 3); 1727 } 1728 1729 /* Get packet length associated with descriptor */ hv_pkt_len(const struct vmpacket_descriptor * desc)1730 static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc) 1731 { 1732 return desc->len8 << 3; 1733 } 1734 1735 struct vmpacket_descriptor * 1736 hv_pkt_iter_first(struct vmbus_channel *channel); 1737 1738 struct vmpacket_descriptor * 1739 __hv_pkt_iter_next(struct vmbus_channel *channel, 1740 const struct vmpacket_descriptor *pkt); 1741 1742 void hv_pkt_iter_close(struct vmbus_channel *channel); 1743 1744 static inline struct vmpacket_descriptor * hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1745 hv_pkt_iter_next(struct vmbus_channel *channel, 1746 const struct vmpacket_descriptor *pkt) 1747 { 1748 struct vmpacket_descriptor *nxt; 1749 1750 nxt = __hv_pkt_iter_next(channel, pkt); 1751 if (!nxt) 1752 hv_pkt_iter_close(channel); 1753 1754 return nxt; 1755 } 1756 1757 #define foreach_vmbus_pkt(pkt, channel) \ 1758 for (pkt = hv_pkt_iter_first(channel); pkt; \ 1759 pkt = hv_pkt_iter_next(channel, pkt)) 1760 1761 /* 1762 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver 1763 * sends requests to read and write blocks. Each block must be 128 bytes or 1764 * smaller. Optionally, the VF driver can register a callback function which 1765 * will be invoked when the host says that one or more of the first 64 block 1766 * IDs is "invalid" which means that the VF driver should reread them. 1767 */ 1768 #define HV_CONFIG_BLOCK_SIZE_MAX 128 1769 1770 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, 1771 unsigned int block_id, unsigned int *bytes_returned); 1772 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, 1773 unsigned int block_id); 1774 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, 1775 void (*block_invalidate)(void *context, 1776 u64 block_mask)); 1777 1778 struct hyperv_pci_block_ops { 1779 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, 1780 unsigned int block_id, unsigned int *bytes_returned); 1781 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, 1782 unsigned int block_id); 1783 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, 1784 void (*block_invalidate)(void *context, 1785 u64 block_mask)); 1786 }; 1787 1788 extern struct hyperv_pci_block_ops hvpci_block_ops; 1789 virt_to_hvpfn(void * addr)1790 static inline unsigned long virt_to_hvpfn(void *addr) 1791 { 1792 phys_addr_t paddr; 1793 1794 if (is_vmalloc_addr(addr)) 1795 paddr = page_to_phys(vmalloc_to_page(addr)) + 1796 offset_in_page(addr); 1797 else 1798 paddr = __pa(addr); 1799 1800 return paddr >> HV_HYP_PAGE_SHIFT; 1801 } 1802 1803 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE) 1804 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK) 1805 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT) 1806 #define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT) 1807 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE) 1808 1809 #endif /* _HYPERV_H */ 1810