1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 */ 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/wait.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/module.h> 29 #include <linux/hyperv.h> 30 #include <linux/uio.h> 31 #include <linux/interrupt.h> 32 33 #include "hyperv_vmbus.h" 34 35 #define NUM_PAGES_SPANNED(addr, len) \ 36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT)) 37 38 /* 39 * vmbus_setevent- Trigger an event notification on the specified 40 * channel. 41 */ 42 void vmbus_setevent(struct vmbus_channel *channel) 43 { 44 struct hv_monitor_page *monitorpage; 45 46 /* 47 * For channels marked as in "low latency" mode 48 * bypass the monitor page mechanism. 49 */ 50 if (channel->offermsg.monitor_allocated && !channel->low_latency) { 51 vmbus_send_interrupt(channel->offermsg.child_relid); 52 53 /* Get the child to parent monitor page */ 54 monitorpage = vmbus_connection.monitor_pages[1]; 55 56 sync_set_bit(channel->monitor_bit, 57 (unsigned long *)&monitorpage->trigger_group 58 [channel->monitor_grp].pending); 59 60 } else { 61 vmbus_set_event(channel); 62 } 63 } 64 EXPORT_SYMBOL_GPL(vmbus_setevent); 65 66 /* 67 * vmbus_open - Open the specified channel. 68 */ 69 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, 70 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen, 71 void (*onchannelcallback)(void *context), void *context) 72 { 73 struct vmbus_channel_open_channel *open_msg; 74 struct vmbus_channel_msginfo *open_info = NULL; 75 unsigned long flags; 76 int ret, err = 0; 77 struct page *page; 78 79 if (send_ringbuffer_size % PAGE_SIZE || 80 recv_ringbuffer_size % PAGE_SIZE) 81 return -EINVAL; 82 83 spin_lock_irqsave(&newchannel->lock, flags); 84 if (newchannel->state == CHANNEL_OPEN_STATE) { 85 newchannel->state = CHANNEL_OPENING_STATE; 86 } else { 87 spin_unlock_irqrestore(&newchannel->lock, flags); 88 return -EINVAL; 89 } 90 spin_unlock_irqrestore(&newchannel->lock, flags); 91 92 newchannel->onchannel_callback = onchannelcallback; 93 newchannel->channel_callback_context = context; 94 95 /* Allocate the ring buffer */ 96 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), 97 GFP_KERNEL|__GFP_ZERO, 98 get_order(send_ringbuffer_size + 99 recv_ringbuffer_size)); 100 101 if (!page) 102 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 103 get_order(send_ringbuffer_size + 104 recv_ringbuffer_size)); 105 106 if (!page) { 107 err = -ENOMEM; 108 goto error_set_chnstate; 109 } 110 111 newchannel->ringbuffer_pages = page_address(page); 112 newchannel->ringbuffer_pagecount = (send_ringbuffer_size + 113 recv_ringbuffer_size) >> PAGE_SHIFT; 114 115 ret = hv_ringbuffer_init(&newchannel->outbound, page, 116 send_ringbuffer_size >> PAGE_SHIFT); 117 118 if (ret != 0) { 119 err = ret; 120 goto error_free_pages; 121 } 122 123 ret = hv_ringbuffer_init(&newchannel->inbound, 124 &page[send_ringbuffer_size >> PAGE_SHIFT], 125 recv_ringbuffer_size >> PAGE_SHIFT); 126 if (ret != 0) { 127 err = ret; 128 goto error_free_pages; 129 } 130 131 132 /* Establish the gpadl for the ring buffer */ 133 newchannel->ringbuffer_gpadlhandle = 0; 134 135 ret = vmbus_establish_gpadl(newchannel, 136 page_address(page), 137 send_ringbuffer_size + 138 recv_ringbuffer_size, 139 &newchannel->ringbuffer_gpadlhandle); 140 141 if (ret != 0) { 142 err = ret; 143 goto error_free_pages; 144 } 145 146 /* Create and init the channel open message */ 147 open_info = kmalloc(sizeof(*open_info) + 148 sizeof(struct vmbus_channel_open_channel), 149 GFP_KERNEL); 150 if (!open_info) { 151 err = -ENOMEM; 152 goto error_free_gpadl; 153 } 154 155 init_completion(&open_info->waitevent); 156 open_info->waiting_channel = newchannel; 157 158 open_msg = (struct vmbus_channel_open_channel *)open_info->msg; 159 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL; 160 open_msg->openid = newchannel->offermsg.child_relid; 161 open_msg->child_relid = newchannel->offermsg.child_relid; 162 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; 163 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> 164 PAGE_SHIFT; 165 open_msg->target_vp = newchannel->target_vp; 166 167 if (userdatalen > MAX_USER_DEFINED_BYTES) { 168 err = -EINVAL; 169 goto error_free_gpadl; 170 } 171 172 if (userdatalen) 173 memcpy(open_msg->userdata, userdata, userdatalen); 174 175 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 176 list_add_tail(&open_info->msglistentry, 177 &vmbus_connection.chn_msg_list); 178 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 179 180 if (newchannel->rescind) { 181 err = -ENODEV; 182 goto error_free_gpadl; 183 } 184 185 ret = vmbus_post_msg(open_msg, 186 sizeof(struct vmbus_channel_open_channel), true); 187 188 if (ret != 0) { 189 err = ret; 190 goto error_clean_msglist; 191 } 192 193 wait_for_completion(&open_info->waitevent); 194 195 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 196 list_del(&open_info->msglistentry); 197 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 198 199 if (newchannel->rescind) { 200 err = -ENODEV; 201 goto error_free_gpadl; 202 } 203 204 if (open_info->response.open_result.status) { 205 err = -EAGAIN; 206 goto error_free_gpadl; 207 } 208 209 newchannel->state = CHANNEL_OPENED_STATE; 210 kfree(open_info); 211 return 0; 212 213 error_clean_msglist: 214 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 215 list_del(&open_info->msglistentry); 216 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 217 218 error_free_gpadl: 219 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); 220 kfree(open_info); 221 error_free_pages: 222 hv_ringbuffer_cleanup(&newchannel->outbound); 223 hv_ringbuffer_cleanup(&newchannel->inbound); 224 __free_pages(page, 225 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 226 error_set_chnstate: 227 newchannel->state = CHANNEL_OPEN_STATE; 228 return err; 229 } 230 EXPORT_SYMBOL_GPL(vmbus_open); 231 232 /* Used for Hyper-V Socket: a guest client's connect() to the host */ 233 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, 234 const uuid_le *shv_host_servie_id) 235 { 236 struct vmbus_channel_tl_connect_request conn_msg; 237 238 memset(&conn_msg, 0, sizeof(conn_msg)); 239 conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST; 240 conn_msg.guest_endpoint_id = *shv_guest_servie_id; 241 conn_msg.host_service_id = *shv_host_servie_id; 242 243 return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true); 244 } 245 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request); 246 247 /* 248 * create_gpadl_header - Creates a gpadl for the specified buffer 249 */ 250 static int create_gpadl_header(void *kbuffer, u32 size, 251 struct vmbus_channel_msginfo **msginfo) 252 { 253 int i; 254 int pagecount; 255 struct vmbus_channel_gpadl_header *gpadl_header; 256 struct vmbus_channel_gpadl_body *gpadl_body; 257 struct vmbus_channel_msginfo *msgheader; 258 struct vmbus_channel_msginfo *msgbody = NULL; 259 u32 msgsize; 260 261 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize; 262 263 pagecount = size >> PAGE_SHIFT; 264 265 /* do we need a gpadl body msg */ 266 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 267 sizeof(struct vmbus_channel_gpadl_header) - 268 sizeof(struct gpa_range); 269 pfncount = pfnsize / sizeof(u64); 270 271 if (pagecount > pfncount) { 272 /* we need a gpadl body */ 273 /* fill in the header */ 274 msgsize = sizeof(struct vmbus_channel_msginfo) + 275 sizeof(struct vmbus_channel_gpadl_header) + 276 sizeof(struct gpa_range) + pfncount * sizeof(u64); 277 msgheader = kzalloc(msgsize, GFP_KERNEL); 278 if (!msgheader) 279 goto nomem; 280 281 INIT_LIST_HEAD(&msgheader->submsglist); 282 msgheader->msgsize = msgsize; 283 284 gpadl_header = (struct vmbus_channel_gpadl_header *) 285 msgheader->msg; 286 gpadl_header->rangecount = 1; 287 gpadl_header->range_buflen = sizeof(struct gpa_range) + 288 pagecount * sizeof(u64); 289 gpadl_header->range[0].byte_offset = 0; 290 gpadl_header->range[0].byte_count = size; 291 for (i = 0; i < pfncount; i++) 292 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( 293 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; 294 *msginfo = msgheader; 295 296 pfnsum = pfncount; 297 pfnleft = pagecount - pfncount; 298 299 /* how many pfns can we fit */ 300 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 301 sizeof(struct vmbus_channel_gpadl_body); 302 pfncount = pfnsize / sizeof(u64); 303 304 /* fill in the body */ 305 while (pfnleft) { 306 if (pfnleft > pfncount) 307 pfncurr = pfncount; 308 else 309 pfncurr = pfnleft; 310 311 msgsize = sizeof(struct vmbus_channel_msginfo) + 312 sizeof(struct vmbus_channel_gpadl_body) + 313 pfncurr * sizeof(u64); 314 msgbody = kzalloc(msgsize, GFP_KERNEL); 315 316 if (!msgbody) { 317 struct vmbus_channel_msginfo *pos = NULL; 318 struct vmbus_channel_msginfo *tmp = NULL; 319 /* 320 * Free up all the allocated messages. 321 */ 322 list_for_each_entry_safe(pos, tmp, 323 &msgheader->submsglist, 324 msglistentry) { 325 326 list_del(&pos->msglistentry); 327 kfree(pos); 328 } 329 330 goto nomem; 331 } 332 333 msgbody->msgsize = msgsize; 334 gpadl_body = 335 (struct vmbus_channel_gpadl_body *)msgbody->msg; 336 337 /* 338 * Gpadl is u32 and we are using a pointer which could 339 * be 64-bit 340 * This is governed by the guest/host protocol and 341 * so the hypervisor guarantees that this is ok. 342 */ 343 for (i = 0; i < pfncurr; i++) 344 gpadl_body->pfn[i] = slow_virt_to_phys( 345 kbuffer + PAGE_SIZE * (pfnsum + i)) >> 346 PAGE_SHIFT; 347 348 /* add to msg header */ 349 list_add_tail(&msgbody->msglistentry, 350 &msgheader->submsglist); 351 pfnsum += pfncurr; 352 pfnleft -= pfncurr; 353 } 354 } else { 355 /* everything fits in a header */ 356 msgsize = sizeof(struct vmbus_channel_msginfo) + 357 sizeof(struct vmbus_channel_gpadl_header) + 358 sizeof(struct gpa_range) + pagecount * sizeof(u64); 359 msgheader = kzalloc(msgsize, GFP_KERNEL); 360 if (msgheader == NULL) 361 goto nomem; 362 363 INIT_LIST_HEAD(&msgheader->submsglist); 364 msgheader->msgsize = msgsize; 365 366 gpadl_header = (struct vmbus_channel_gpadl_header *) 367 msgheader->msg; 368 gpadl_header->rangecount = 1; 369 gpadl_header->range_buflen = sizeof(struct gpa_range) + 370 pagecount * sizeof(u64); 371 gpadl_header->range[0].byte_offset = 0; 372 gpadl_header->range[0].byte_count = size; 373 for (i = 0; i < pagecount; i++) 374 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( 375 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; 376 377 *msginfo = msgheader; 378 } 379 380 return 0; 381 nomem: 382 kfree(msgheader); 383 kfree(msgbody); 384 return -ENOMEM; 385 } 386 387 /* 388 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer 389 * 390 * @channel: a channel 391 * @kbuffer: from kmalloc or vmalloc 392 * @size: page-size multiple 393 * @gpadl_handle: some funky thing 394 */ 395 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, 396 u32 size, u32 *gpadl_handle) 397 { 398 struct vmbus_channel_gpadl_header *gpadlmsg; 399 struct vmbus_channel_gpadl_body *gpadl_body; 400 struct vmbus_channel_msginfo *msginfo = NULL; 401 struct vmbus_channel_msginfo *submsginfo, *tmp; 402 struct list_head *curr; 403 u32 next_gpadl_handle; 404 unsigned long flags; 405 int ret = 0; 406 407 next_gpadl_handle = 408 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); 409 410 ret = create_gpadl_header(kbuffer, size, &msginfo); 411 if (ret) 412 return ret; 413 414 init_completion(&msginfo->waitevent); 415 msginfo->waiting_channel = channel; 416 417 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg; 418 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER; 419 gpadlmsg->child_relid = channel->offermsg.child_relid; 420 gpadlmsg->gpadl = next_gpadl_handle; 421 422 423 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 424 list_add_tail(&msginfo->msglistentry, 425 &vmbus_connection.chn_msg_list); 426 427 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 428 429 if (channel->rescind) { 430 ret = -ENODEV; 431 goto cleanup; 432 } 433 434 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize - 435 sizeof(*msginfo), true); 436 if (ret != 0) 437 goto cleanup; 438 439 list_for_each(curr, &msginfo->submsglist) { 440 submsginfo = (struct vmbus_channel_msginfo *)curr; 441 gpadl_body = 442 (struct vmbus_channel_gpadl_body *)submsginfo->msg; 443 444 gpadl_body->header.msgtype = 445 CHANNELMSG_GPADL_BODY; 446 gpadl_body->gpadl = next_gpadl_handle; 447 448 ret = vmbus_post_msg(gpadl_body, 449 submsginfo->msgsize - sizeof(*submsginfo), 450 true); 451 if (ret != 0) 452 goto cleanup; 453 454 } 455 wait_for_completion(&msginfo->waitevent); 456 457 if (channel->rescind) { 458 ret = -ENODEV; 459 goto cleanup; 460 } 461 462 /* At this point, we received the gpadl created msg */ 463 *gpadl_handle = gpadlmsg->gpadl; 464 465 cleanup: 466 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 467 list_del(&msginfo->msglistentry); 468 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 469 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, 470 msglistentry) { 471 kfree(submsginfo); 472 } 473 474 kfree(msginfo); 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl); 478 479 /* 480 * vmbus_teardown_gpadl -Teardown the specified GPADL handle 481 */ 482 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) 483 { 484 struct vmbus_channel_gpadl_teardown *msg; 485 struct vmbus_channel_msginfo *info; 486 unsigned long flags; 487 int ret; 488 489 info = kmalloc(sizeof(*info) + 490 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); 491 if (!info) 492 return -ENOMEM; 493 494 init_completion(&info->waitevent); 495 info->waiting_channel = channel; 496 497 msg = (struct vmbus_channel_gpadl_teardown *)info->msg; 498 499 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN; 500 msg->child_relid = channel->offermsg.child_relid; 501 msg->gpadl = gpadl_handle; 502 503 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 504 list_add_tail(&info->msglistentry, 505 &vmbus_connection.chn_msg_list); 506 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 507 508 if (channel->rescind) 509 goto post_msg_err; 510 511 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown), 512 true); 513 514 if (ret) 515 goto post_msg_err; 516 517 wait_for_completion(&info->waitevent); 518 519 post_msg_err: 520 /* 521 * If the channel has been rescinded; 522 * we will be awakened by the rescind 523 * handler; set the error code to zero so we don't leak memory. 524 */ 525 if (channel->rescind) 526 ret = 0; 527 528 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 529 list_del(&info->msglistentry); 530 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 531 532 kfree(info); 533 return ret; 534 } 535 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); 536 537 static void reset_channel_cb(void *arg) 538 { 539 struct vmbus_channel *channel = arg; 540 541 channel->onchannel_callback = NULL; 542 } 543 544 static int vmbus_close_internal(struct vmbus_channel *channel) 545 { 546 struct vmbus_channel_close_channel *msg; 547 int ret; 548 549 /* 550 * vmbus_on_event(), running in the per-channel tasklet, can race 551 * with vmbus_close_internal() in the case of SMP guest, e.g., when 552 * the former is accessing channel->inbound.ring_buffer, the latter 553 * could be freeing the ring_buffer pages, so here we must stop it 554 * first. 555 */ 556 tasklet_disable(&channel->callback_event); 557 558 /* 559 * In case a device driver's probe() fails (e.g., 560 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is 561 * rescinded later (e.g., we dynamically disable an Integrated Service 562 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): 563 * here we should skip most of the below cleanup work. 564 */ 565 if (channel->state != CHANNEL_OPENED_STATE) { 566 ret = -EINVAL; 567 goto out; 568 } 569 570 channel->state = CHANNEL_OPEN_STATE; 571 channel->sc_creation_callback = NULL; 572 /* Stop callback and cancel the timer asap */ 573 if (channel->target_cpu != get_cpu()) { 574 put_cpu(); 575 smp_call_function_single(channel->target_cpu, reset_channel_cb, 576 channel, true); 577 } else { 578 reset_channel_cb(channel); 579 put_cpu(); 580 } 581 582 /* Send a closing message */ 583 584 msg = &channel->close_msg.msg; 585 586 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL; 587 msg->child_relid = channel->offermsg.child_relid; 588 589 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel), 590 true); 591 592 if (ret) { 593 pr_err("Close failed: close post msg return is %d\n", ret); 594 /* 595 * If we failed to post the close msg, 596 * it is perhaps better to leak memory. 597 */ 598 goto out; 599 } 600 601 /* Tear down the gpadl for the channel's ring buffer */ 602 if (channel->ringbuffer_gpadlhandle) { 603 ret = vmbus_teardown_gpadl(channel, 604 channel->ringbuffer_gpadlhandle); 605 if (ret) { 606 pr_err("Close failed: teardown gpadl return %d\n", ret); 607 /* 608 * If we failed to teardown gpadl, 609 * it is perhaps better to leak memory. 610 */ 611 goto out; 612 } 613 } 614 615 /* Cleanup the ring buffers for this channel */ 616 hv_ringbuffer_cleanup(&channel->outbound); 617 hv_ringbuffer_cleanup(&channel->inbound); 618 619 free_pages((unsigned long)channel->ringbuffer_pages, 620 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 621 622 out: 623 /* re-enable tasklet for use on re-open */ 624 tasklet_enable(&channel->callback_event); 625 return ret; 626 } 627 628 /* 629 * vmbus_close - Close the specified channel 630 */ 631 void vmbus_close(struct vmbus_channel *channel) 632 { 633 struct list_head *cur, *tmp; 634 struct vmbus_channel *cur_channel; 635 636 if (channel->primary_channel != NULL) { 637 /* 638 * We will only close sub-channels when 639 * the primary is closed. 640 */ 641 return; 642 } 643 /* 644 * Close all the sub-channels first and then close the 645 * primary channel. 646 */ 647 list_for_each_safe(cur, tmp, &channel->sc_list) { 648 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 649 vmbus_close_internal(cur_channel); 650 if (cur_channel->rescind) { 651 mutex_lock(&vmbus_connection.channel_mutex); 652 hv_process_channel_removal(cur_channel, 653 cur_channel->offermsg.child_relid); 654 mutex_unlock(&vmbus_connection.channel_mutex); 655 } 656 } 657 /* 658 * Now close the primary. 659 */ 660 vmbus_close_internal(channel); 661 } 662 EXPORT_SYMBOL_GPL(vmbus_close); 663 664 /** 665 * vmbus_sendpacket() - Send the specified buffer on the given channel 666 * @channel: Pointer to vmbus_channel structure. 667 * @buffer: Pointer to the buffer you want to receive the data into. 668 * @bufferlen: Maximum size of what the the buffer will hold 669 * @requestid: Identifier of the request 670 * @type: Type of packet that is being send e.g. negotiate, time 671 * packet etc. 672 * 673 * Sends data in @buffer directly to hyper-v via the vmbus 674 * This will send the data unparsed to hyper-v. 675 * 676 * Mainly used by Hyper-V drivers. 677 */ 678 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, 679 u32 bufferlen, u64 requestid, 680 enum vmbus_packet_type type, u32 flags) 681 { 682 struct vmpacket_descriptor desc; 683 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; 684 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 685 struct kvec bufferlist[3]; 686 u64 aligned_data = 0; 687 int num_vecs = ((bufferlen != 0) ? 3 : 1); 688 689 690 /* Setup the descriptor */ 691 desc.type = type; /* VmbusPacketTypeDataInBand; */ 692 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */ 693 /* in 8-bytes granularity */ 694 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3; 695 desc.len8 = (u16)(packetlen_aligned >> 3); 696 desc.trans_id = requestid; 697 698 bufferlist[0].iov_base = &desc; 699 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor); 700 bufferlist[1].iov_base = buffer; 701 bufferlist[1].iov_len = bufferlen; 702 bufferlist[2].iov_base = &aligned_data; 703 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 704 705 return hv_ringbuffer_write(channel, bufferlist, num_vecs); 706 } 707 EXPORT_SYMBOL(vmbus_sendpacket); 708 709 /* 710 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer 711 * packets using a GPADL Direct packet type. This interface allows you 712 * to control notifying the host. This will be useful for sending 713 * batched data. Also the sender can control the send flags 714 * explicitly. 715 */ 716 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 717 struct hv_page_buffer pagebuffers[], 718 u32 pagecount, void *buffer, u32 bufferlen, 719 u64 requestid) 720 { 721 int i; 722 struct vmbus_channel_packet_page_buffer desc; 723 u32 descsize; 724 u32 packetlen; 725 u32 packetlen_aligned; 726 struct kvec bufferlist[3]; 727 u64 aligned_data = 0; 728 729 if (pagecount > MAX_PAGE_BUFFER_COUNT) 730 return -EINVAL; 731 732 /* 733 * Adjust the size down since vmbus_channel_packet_page_buffer is the 734 * largest size we support 735 */ 736 descsize = sizeof(struct vmbus_channel_packet_page_buffer) - 737 ((MAX_PAGE_BUFFER_COUNT - pagecount) * 738 sizeof(struct hv_page_buffer)); 739 packetlen = descsize + bufferlen; 740 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 741 742 /* Setup the descriptor */ 743 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; 744 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 745 desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ 746 desc.length8 = (u16)(packetlen_aligned >> 3); 747 desc.transactionid = requestid; 748 desc.rangecount = pagecount; 749 750 for (i = 0; i < pagecount; i++) { 751 desc.range[i].len = pagebuffers[i].len; 752 desc.range[i].offset = pagebuffers[i].offset; 753 desc.range[i].pfn = pagebuffers[i].pfn; 754 } 755 756 bufferlist[0].iov_base = &desc; 757 bufferlist[0].iov_len = descsize; 758 bufferlist[1].iov_base = buffer; 759 bufferlist[1].iov_len = bufferlen; 760 bufferlist[2].iov_base = &aligned_data; 761 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 762 763 return hv_ringbuffer_write(channel, bufferlist, 3); 764 } 765 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); 766 767 /* 768 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet 769 * using a GPADL Direct packet type. 770 * The buffer includes the vmbus descriptor. 771 */ 772 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 773 struct vmbus_packet_mpb_array *desc, 774 u32 desc_size, 775 void *buffer, u32 bufferlen, u64 requestid) 776 { 777 u32 packetlen; 778 u32 packetlen_aligned; 779 struct kvec bufferlist[3]; 780 u64 aligned_data = 0; 781 782 packetlen = desc_size + bufferlen; 783 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 784 785 /* Setup the descriptor */ 786 desc->type = VM_PKT_DATA_USING_GPA_DIRECT; 787 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 788 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */ 789 desc->length8 = (u16)(packetlen_aligned >> 3); 790 desc->transactionid = requestid; 791 desc->rangecount = 1; 792 793 bufferlist[0].iov_base = desc; 794 bufferlist[0].iov_len = desc_size; 795 bufferlist[1].iov_base = buffer; 796 bufferlist[1].iov_len = bufferlen; 797 bufferlist[2].iov_base = &aligned_data; 798 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 799 800 return hv_ringbuffer_write(channel, bufferlist, 3); 801 } 802 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 803 804 /** 805 * vmbus_recvpacket() - Retrieve the user packet on the specified channel 806 * @channel: Pointer to vmbus_channel structure. 807 * @buffer: Pointer to the buffer you want to receive the data into. 808 * @bufferlen: Maximum size of what the the buffer will hold 809 * @buffer_actual_len: The actual size of the data after it was received 810 * @requestid: Identifier of the request 811 * 812 * Receives directly from the hyper-v vmbus and puts the data it received 813 * into Buffer. This will receive the data unparsed from hyper-v. 814 * 815 * Mainly used by Hyper-V drivers. 816 */ 817 static inline int 818 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 819 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, 820 bool raw) 821 { 822 return hv_ringbuffer_read(channel, buffer, bufferlen, 823 buffer_actual_len, requestid, raw); 824 825 } 826 827 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 828 u32 bufferlen, u32 *buffer_actual_len, 829 u64 *requestid) 830 { 831 return __vmbus_recvpacket(channel, buffer, bufferlen, 832 buffer_actual_len, requestid, false); 833 } 834 EXPORT_SYMBOL(vmbus_recvpacket); 835 836 /* 837 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel 838 */ 839 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer, 840 u32 bufferlen, u32 *buffer_actual_len, 841 u64 *requestid) 842 { 843 return __vmbus_recvpacket(channel, buffer, bufferlen, 844 buffer_actual_len, requestid, true); 845 } 846 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw); 847