1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 */ 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/wait.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/module.h> 29 #include <linux/hyperv.h> 30 #include <linux/uio.h> 31 #include <linux/interrupt.h> 32 33 #include "hyperv_vmbus.h" 34 35 #define NUM_PAGES_SPANNED(addr, len) \ 36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT)) 37 38 /* 39 * vmbus_setevent- Trigger an event notification on the specified 40 * channel. 41 */ 42 void vmbus_setevent(struct vmbus_channel *channel) 43 { 44 struct hv_monitor_page *monitorpage; 45 46 /* 47 * For channels marked as in "low latency" mode 48 * bypass the monitor page mechanism. 49 */ 50 if (channel->offermsg.monitor_allocated && !channel->low_latency) { 51 vmbus_send_interrupt(channel->offermsg.child_relid); 52 53 /* Get the child to parent monitor page */ 54 monitorpage = vmbus_connection.monitor_pages[1]; 55 56 sync_set_bit(channel->monitor_bit, 57 (unsigned long *)&monitorpage->trigger_group 58 [channel->monitor_grp].pending); 59 60 } else { 61 vmbus_set_event(channel); 62 } 63 } 64 EXPORT_SYMBOL_GPL(vmbus_setevent); 65 66 /* 67 * vmbus_open - Open the specified channel. 68 */ 69 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, 70 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen, 71 void (*onchannelcallback)(void *context), void *context) 72 { 73 struct vmbus_channel_open_channel *open_msg; 74 struct vmbus_channel_msginfo *open_info = NULL; 75 unsigned long flags; 76 int ret, err = 0; 77 struct page *page; 78 79 if (send_ringbuffer_size % PAGE_SIZE || 80 recv_ringbuffer_size % PAGE_SIZE) 81 return -EINVAL; 82 83 spin_lock_irqsave(&newchannel->lock, flags); 84 if (newchannel->state == CHANNEL_OPEN_STATE) { 85 newchannel->state = CHANNEL_OPENING_STATE; 86 } else { 87 spin_unlock_irqrestore(&newchannel->lock, flags); 88 return -EINVAL; 89 } 90 spin_unlock_irqrestore(&newchannel->lock, flags); 91 92 newchannel->onchannel_callback = onchannelcallback; 93 newchannel->channel_callback_context = context; 94 95 /* Allocate the ring buffer */ 96 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), 97 GFP_KERNEL|__GFP_ZERO, 98 get_order(send_ringbuffer_size + 99 recv_ringbuffer_size)); 100 101 if (!page) 102 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 103 get_order(send_ringbuffer_size + 104 recv_ringbuffer_size)); 105 106 if (!page) { 107 err = -ENOMEM; 108 goto error_set_chnstate; 109 } 110 111 newchannel->ringbuffer_pages = page_address(page); 112 newchannel->ringbuffer_pagecount = (send_ringbuffer_size + 113 recv_ringbuffer_size) >> PAGE_SHIFT; 114 115 ret = hv_ringbuffer_init(&newchannel->outbound, page, 116 send_ringbuffer_size >> PAGE_SHIFT); 117 118 if (ret != 0) { 119 err = ret; 120 goto error_free_pages; 121 } 122 123 ret = hv_ringbuffer_init(&newchannel->inbound, 124 &page[send_ringbuffer_size >> PAGE_SHIFT], 125 recv_ringbuffer_size >> PAGE_SHIFT); 126 if (ret != 0) { 127 err = ret; 128 goto error_free_pages; 129 } 130 131 132 /* Establish the gpadl for the ring buffer */ 133 newchannel->ringbuffer_gpadlhandle = 0; 134 135 ret = vmbus_establish_gpadl(newchannel, 136 page_address(page), 137 send_ringbuffer_size + 138 recv_ringbuffer_size, 139 &newchannel->ringbuffer_gpadlhandle); 140 141 if (ret != 0) { 142 err = ret; 143 goto error_free_pages; 144 } 145 146 /* Create and init the channel open message */ 147 open_info = kmalloc(sizeof(*open_info) + 148 sizeof(struct vmbus_channel_open_channel), 149 GFP_KERNEL); 150 if (!open_info) { 151 err = -ENOMEM; 152 goto error_free_gpadl; 153 } 154 155 init_completion(&open_info->waitevent); 156 open_info->waiting_channel = newchannel; 157 158 open_msg = (struct vmbus_channel_open_channel *)open_info->msg; 159 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL; 160 open_msg->openid = newchannel->offermsg.child_relid; 161 open_msg->child_relid = newchannel->offermsg.child_relid; 162 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; 163 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> 164 PAGE_SHIFT; 165 open_msg->target_vp = newchannel->target_vp; 166 167 if (userdatalen > MAX_USER_DEFINED_BYTES) { 168 err = -EINVAL; 169 goto error_free_gpadl; 170 } 171 172 if (userdatalen) 173 memcpy(open_msg->userdata, userdata, userdatalen); 174 175 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 176 list_add_tail(&open_info->msglistentry, 177 &vmbus_connection.chn_msg_list); 178 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 179 180 ret = vmbus_post_msg(open_msg, 181 sizeof(struct vmbus_channel_open_channel), true); 182 183 if (ret != 0) { 184 err = ret; 185 goto error_clean_msglist; 186 } 187 188 wait_for_completion(&open_info->waitevent); 189 190 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 191 list_del(&open_info->msglistentry); 192 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 193 194 if (newchannel->rescind) { 195 err = -ENODEV; 196 goto error_free_gpadl; 197 } 198 199 if (open_info->response.open_result.status) { 200 err = -EAGAIN; 201 goto error_free_gpadl; 202 } 203 204 newchannel->state = CHANNEL_OPENED_STATE; 205 kfree(open_info); 206 return 0; 207 208 error_clean_msglist: 209 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 210 list_del(&open_info->msglistentry); 211 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 212 213 error_free_gpadl: 214 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); 215 kfree(open_info); 216 error_free_pages: 217 hv_ringbuffer_cleanup(&newchannel->outbound); 218 hv_ringbuffer_cleanup(&newchannel->inbound); 219 __free_pages(page, 220 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 221 error_set_chnstate: 222 newchannel->state = CHANNEL_OPEN_STATE; 223 return err; 224 } 225 EXPORT_SYMBOL_GPL(vmbus_open); 226 227 /* Used for Hyper-V Socket: a guest client's connect() to the host */ 228 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, 229 const uuid_le *shv_host_servie_id) 230 { 231 struct vmbus_channel_tl_connect_request conn_msg; 232 233 memset(&conn_msg, 0, sizeof(conn_msg)); 234 conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST; 235 conn_msg.guest_endpoint_id = *shv_guest_servie_id; 236 conn_msg.host_service_id = *shv_host_servie_id; 237 238 return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true); 239 } 240 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request); 241 242 /* 243 * create_gpadl_header - Creates a gpadl for the specified buffer 244 */ 245 static int create_gpadl_header(void *kbuffer, u32 size, 246 struct vmbus_channel_msginfo **msginfo) 247 { 248 int i; 249 int pagecount; 250 struct vmbus_channel_gpadl_header *gpadl_header; 251 struct vmbus_channel_gpadl_body *gpadl_body; 252 struct vmbus_channel_msginfo *msgheader; 253 struct vmbus_channel_msginfo *msgbody = NULL; 254 u32 msgsize; 255 256 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize; 257 258 pagecount = size >> PAGE_SHIFT; 259 260 /* do we need a gpadl body msg */ 261 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 262 sizeof(struct vmbus_channel_gpadl_header) - 263 sizeof(struct gpa_range); 264 pfncount = pfnsize / sizeof(u64); 265 266 if (pagecount > pfncount) { 267 /* we need a gpadl body */ 268 /* fill in the header */ 269 msgsize = sizeof(struct vmbus_channel_msginfo) + 270 sizeof(struct vmbus_channel_gpadl_header) + 271 sizeof(struct gpa_range) + pfncount * sizeof(u64); 272 msgheader = kzalloc(msgsize, GFP_KERNEL); 273 if (!msgheader) 274 goto nomem; 275 276 INIT_LIST_HEAD(&msgheader->submsglist); 277 msgheader->msgsize = msgsize; 278 279 gpadl_header = (struct vmbus_channel_gpadl_header *) 280 msgheader->msg; 281 gpadl_header->rangecount = 1; 282 gpadl_header->range_buflen = sizeof(struct gpa_range) + 283 pagecount * sizeof(u64); 284 gpadl_header->range[0].byte_offset = 0; 285 gpadl_header->range[0].byte_count = size; 286 for (i = 0; i < pfncount; i++) 287 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( 288 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; 289 *msginfo = msgheader; 290 291 pfnsum = pfncount; 292 pfnleft = pagecount - pfncount; 293 294 /* how many pfns can we fit */ 295 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 296 sizeof(struct vmbus_channel_gpadl_body); 297 pfncount = pfnsize / sizeof(u64); 298 299 /* fill in the body */ 300 while (pfnleft) { 301 if (pfnleft > pfncount) 302 pfncurr = pfncount; 303 else 304 pfncurr = pfnleft; 305 306 msgsize = sizeof(struct vmbus_channel_msginfo) + 307 sizeof(struct vmbus_channel_gpadl_body) + 308 pfncurr * sizeof(u64); 309 msgbody = kzalloc(msgsize, GFP_KERNEL); 310 311 if (!msgbody) { 312 struct vmbus_channel_msginfo *pos = NULL; 313 struct vmbus_channel_msginfo *tmp = NULL; 314 /* 315 * Free up all the allocated messages. 316 */ 317 list_for_each_entry_safe(pos, tmp, 318 &msgheader->submsglist, 319 msglistentry) { 320 321 list_del(&pos->msglistentry); 322 kfree(pos); 323 } 324 325 goto nomem; 326 } 327 328 msgbody->msgsize = msgsize; 329 gpadl_body = 330 (struct vmbus_channel_gpadl_body *)msgbody->msg; 331 332 /* 333 * Gpadl is u32 and we are using a pointer which could 334 * be 64-bit 335 * This is governed by the guest/host protocol and 336 * so the hypervisor guarantees that this is ok. 337 */ 338 for (i = 0; i < pfncurr; i++) 339 gpadl_body->pfn[i] = slow_virt_to_phys( 340 kbuffer + PAGE_SIZE * (pfnsum + i)) >> 341 PAGE_SHIFT; 342 343 /* add to msg header */ 344 list_add_tail(&msgbody->msglistentry, 345 &msgheader->submsglist); 346 pfnsum += pfncurr; 347 pfnleft -= pfncurr; 348 } 349 } else { 350 /* everything fits in a header */ 351 msgsize = sizeof(struct vmbus_channel_msginfo) + 352 sizeof(struct vmbus_channel_gpadl_header) + 353 sizeof(struct gpa_range) + pagecount * sizeof(u64); 354 msgheader = kzalloc(msgsize, GFP_KERNEL); 355 if (msgheader == NULL) 356 goto nomem; 357 358 INIT_LIST_HEAD(&msgheader->submsglist); 359 msgheader->msgsize = msgsize; 360 361 gpadl_header = (struct vmbus_channel_gpadl_header *) 362 msgheader->msg; 363 gpadl_header->rangecount = 1; 364 gpadl_header->range_buflen = sizeof(struct gpa_range) + 365 pagecount * sizeof(u64); 366 gpadl_header->range[0].byte_offset = 0; 367 gpadl_header->range[0].byte_count = size; 368 for (i = 0; i < pagecount; i++) 369 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( 370 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; 371 372 *msginfo = msgheader; 373 } 374 375 return 0; 376 nomem: 377 kfree(msgheader); 378 kfree(msgbody); 379 return -ENOMEM; 380 } 381 382 /* 383 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer 384 * 385 * @channel: a channel 386 * @kbuffer: from kmalloc or vmalloc 387 * @size: page-size multiple 388 * @gpadl_handle: some funky thing 389 */ 390 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, 391 u32 size, u32 *gpadl_handle) 392 { 393 struct vmbus_channel_gpadl_header *gpadlmsg; 394 struct vmbus_channel_gpadl_body *gpadl_body; 395 struct vmbus_channel_msginfo *msginfo = NULL; 396 struct vmbus_channel_msginfo *submsginfo, *tmp; 397 struct list_head *curr; 398 u32 next_gpadl_handle; 399 unsigned long flags; 400 int ret = 0; 401 402 next_gpadl_handle = 403 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); 404 405 ret = create_gpadl_header(kbuffer, size, &msginfo); 406 if (ret) 407 return ret; 408 409 init_completion(&msginfo->waitevent); 410 msginfo->waiting_channel = channel; 411 412 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg; 413 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER; 414 gpadlmsg->child_relid = channel->offermsg.child_relid; 415 gpadlmsg->gpadl = next_gpadl_handle; 416 417 418 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 419 list_add_tail(&msginfo->msglistentry, 420 &vmbus_connection.chn_msg_list); 421 422 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 423 424 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize - 425 sizeof(*msginfo), true); 426 if (ret != 0) 427 goto cleanup; 428 429 list_for_each(curr, &msginfo->submsglist) { 430 submsginfo = (struct vmbus_channel_msginfo *)curr; 431 gpadl_body = 432 (struct vmbus_channel_gpadl_body *)submsginfo->msg; 433 434 gpadl_body->header.msgtype = 435 CHANNELMSG_GPADL_BODY; 436 gpadl_body->gpadl = next_gpadl_handle; 437 438 ret = vmbus_post_msg(gpadl_body, 439 submsginfo->msgsize - sizeof(*submsginfo), 440 true); 441 if (ret != 0) 442 goto cleanup; 443 444 } 445 wait_for_completion(&msginfo->waitevent); 446 447 if (channel->rescind) { 448 ret = -ENODEV; 449 goto cleanup; 450 } 451 452 /* At this point, we received the gpadl created msg */ 453 *gpadl_handle = gpadlmsg->gpadl; 454 455 cleanup: 456 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 457 list_del(&msginfo->msglistentry); 458 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 459 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, 460 msglistentry) { 461 kfree(submsginfo); 462 } 463 464 kfree(msginfo); 465 return ret; 466 } 467 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl); 468 469 /* 470 * vmbus_teardown_gpadl -Teardown the specified GPADL handle 471 */ 472 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) 473 { 474 struct vmbus_channel_gpadl_teardown *msg; 475 struct vmbus_channel_msginfo *info; 476 unsigned long flags; 477 int ret; 478 479 info = kmalloc(sizeof(*info) + 480 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); 481 if (!info) 482 return -ENOMEM; 483 484 init_completion(&info->waitevent); 485 info->waiting_channel = channel; 486 487 msg = (struct vmbus_channel_gpadl_teardown *)info->msg; 488 489 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN; 490 msg->child_relid = channel->offermsg.child_relid; 491 msg->gpadl = gpadl_handle; 492 493 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 494 list_add_tail(&info->msglistentry, 495 &vmbus_connection.chn_msg_list); 496 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 497 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown), 498 true); 499 500 if (ret) 501 goto post_msg_err; 502 503 wait_for_completion(&info->waitevent); 504 505 post_msg_err: 506 /* 507 * If the channel has been rescinded; 508 * we will be awakened by the rescind 509 * handler; set the error code to zero so we don't leak memory. 510 */ 511 if (channel->rescind) 512 ret = 0; 513 514 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 515 list_del(&info->msglistentry); 516 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 517 518 kfree(info); 519 return ret; 520 } 521 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); 522 523 static void reset_channel_cb(void *arg) 524 { 525 struct vmbus_channel *channel = arg; 526 527 channel->onchannel_callback = NULL; 528 } 529 530 static int vmbus_close_internal(struct vmbus_channel *channel) 531 { 532 struct vmbus_channel_close_channel *msg; 533 int ret; 534 535 /* 536 * vmbus_on_event(), running in the per-channel tasklet, can race 537 * with vmbus_close_internal() in the case of SMP guest, e.g., when 538 * the former is accessing channel->inbound.ring_buffer, the latter 539 * could be freeing the ring_buffer pages, so here we must stop it 540 * first. 541 */ 542 tasklet_disable(&channel->callback_event); 543 544 /* 545 * In case a device driver's probe() fails (e.g., 546 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is 547 * rescinded later (e.g., we dynamically disable an Integrated Service 548 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): 549 * here we should skip most of the below cleanup work. 550 */ 551 if (channel->state != CHANNEL_OPENED_STATE) { 552 ret = -EINVAL; 553 goto out; 554 } 555 556 channel->state = CHANNEL_OPEN_STATE; 557 channel->sc_creation_callback = NULL; 558 /* Stop callback and cancel the timer asap */ 559 if (channel->target_cpu != get_cpu()) { 560 put_cpu(); 561 smp_call_function_single(channel->target_cpu, reset_channel_cb, 562 channel, true); 563 } else { 564 reset_channel_cb(channel); 565 put_cpu(); 566 } 567 568 /* Send a closing message */ 569 570 msg = &channel->close_msg.msg; 571 572 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL; 573 msg->child_relid = channel->offermsg.child_relid; 574 575 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel), 576 true); 577 578 if (ret) { 579 pr_err("Close failed: close post msg return is %d\n", ret); 580 /* 581 * If we failed to post the close msg, 582 * it is perhaps better to leak memory. 583 */ 584 goto out; 585 } 586 587 /* Tear down the gpadl for the channel's ring buffer */ 588 if (channel->ringbuffer_gpadlhandle) { 589 ret = vmbus_teardown_gpadl(channel, 590 channel->ringbuffer_gpadlhandle); 591 if (ret) { 592 pr_err("Close failed: teardown gpadl return %d\n", ret); 593 /* 594 * If we failed to teardown gpadl, 595 * it is perhaps better to leak memory. 596 */ 597 goto out; 598 } 599 } 600 601 /* Cleanup the ring buffers for this channel */ 602 hv_ringbuffer_cleanup(&channel->outbound); 603 hv_ringbuffer_cleanup(&channel->inbound); 604 605 free_pages((unsigned long)channel->ringbuffer_pages, 606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 607 608 out: 609 return ret; 610 } 611 612 /* 613 * vmbus_close - Close the specified channel 614 */ 615 void vmbus_close(struct vmbus_channel *channel) 616 { 617 struct list_head *cur, *tmp; 618 struct vmbus_channel *cur_channel; 619 620 if (channel->primary_channel != NULL) { 621 /* 622 * We will only close sub-channels when 623 * the primary is closed. 624 */ 625 return; 626 } 627 /* 628 * Close all the sub-channels first and then close the 629 * primary channel. 630 */ 631 list_for_each_safe(cur, tmp, &channel->sc_list) { 632 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 633 vmbus_close_internal(cur_channel); 634 if (cur_channel->rescind) { 635 mutex_lock(&vmbus_connection.channel_mutex); 636 hv_process_channel_removal(cur_channel, 637 cur_channel->offermsg.child_relid); 638 mutex_unlock(&vmbus_connection.channel_mutex); 639 } 640 } 641 /* 642 * Now close the primary. 643 */ 644 vmbus_close_internal(channel); 645 } 646 EXPORT_SYMBOL_GPL(vmbus_close); 647 648 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, 649 u32 bufferlen, u64 requestid, 650 enum vmbus_packet_type type, u32 flags) 651 { 652 struct vmpacket_descriptor desc; 653 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; 654 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 655 struct kvec bufferlist[3]; 656 u64 aligned_data = 0; 657 int num_vecs = ((bufferlen != 0) ? 3 : 1); 658 659 660 /* Setup the descriptor */ 661 desc.type = type; /* VmbusPacketTypeDataInBand; */ 662 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */ 663 /* in 8-bytes granularity */ 664 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3; 665 desc.len8 = (u16)(packetlen_aligned >> 3); 666 desc.trans_id = requestid; 667 668 bufferlist[0].iov_base = &desc; 669 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor); 670 bufferlist[1].iov_base = buffer; 671 bufferlist[1].iov_len = bufferlen; 672 bufferlist[2].iov_base = &aligned_data; 673 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 674 675 return hv_ringbuffer_write(channel, bufferlist, num_vecs); 676 } 677 EXPORT_SYMBOL(vmbus_sendpacket_ctl); 678 679 /** 680 * vmbus_sendpacket() - Send the specified buffer on the given channel 681 * @channel: Pointer to vmbus_channel structure. 682 * @buffer: Pointer to the buffer you want to receive the data into. 683 * @bufferlen: Maximum size of what the the buffer will hold 684 * @requestid: Identifier of the request 685 * @type: Type of packet that is being send e.g. negotiate, time 686 * packet etc. 687 * 688 * Sends data in @buffer directly to hyper-v via the vmbus 689 * This will send the data unparsed to hyper-v. 690 * 691 * Mainly used by Hyper-V drivers. 692 */ 693 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, 694 u32 bufferlen, u64 requestid, 695 enum vmbus_packet_type type, u32 flags) 696 { 697 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid, 698 type, flags); 699 } 700 EXPORT_SYMBOL(vmbus_sendpacket); 701 702 /* 703 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer 704 * packets using a GPADL Direct packet type. This interface allows you 705 * to control notifying the host. This will be useful for sending 706 * batched data. Also the sender can control the send flags 707 * explicitly. 708 */ 709 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, 710 struct hv_page_buffer pagebuffers[], 711 u32 pagecount, void *buffer, u32 bufferlen, 712 u64 requestid, u32 flags) 713 { 714 int i; 715 struct vmbus_channel_packet_page_buffer desc; 716 u32 descsize; 717 u32 packetlen; 718 u32 packetlen_aligned; 719 struct kvec bufferlist[3]; 720 u64 aligned_data = 0; 721 722 if (pagecount > MAX_PAGE_BUFFER_COUNT) 723 return -EINVAL; 724 725 /* 726 * Adjust the size down since vmbus_channel_packet_page_buffer is the 727 * largest size we support 728 */ 729 descsize = sizeof(struct vmbus_channel_packet_page_buffer) - 730 ((MAX_PAGE_BUFFER_COUNT - pagecount) * 731 sizeof(struct hv_page_buffer)); 732 packetlen = descsize + bufferlen; 733 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 734 735 /* Setup the descriptor */ 736 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; 737 desc.flags = flags; 738 desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ 739 desc.length8 = (u16)(packetlen_aligned >> 3); 740 desc.transactionid = requestid; 741 desc.rangecount = pagecount; 742 743 for (i = 0; i < pagecount; i++) { 744 desc.range[i].len = pagebuffers[i].len; 745 desc.range[i].offset = pagebuffers[i].offset; 746 desc.range[i].pfn = pagebuffers[i].pfn; 747 } 748 749 bufferlist[0].iov_base = &desc; 750 bufferlist[0].iov_len = descsize; 751 bufferlist[1].iov_base = buffer; 752 bufferlist[1].iov_len = bufferlen; 753 bufferlist[2].iov_base = &aligned_data; 754 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 755 756 return hv_ringbuffer_write(channel, bufferlist, 3); 757 } 758 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); 759 760 /* 761 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer 762 * packets using a GPADL Direct packet type. 763 */ 764 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 765 struct hv_page_buffer pagebuffers[], 766 u32 pagecount, void *buffer, u32 bufferlen, 767 u64 requestid) 768 { 769 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 770 771 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount, 772 buffer, bufferlen, 773 requestid, flags); 774 775 } 776 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); 777 778 /* 779 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet 780 * using a GPADL Direct packet type. 781 * The buffer includes the vmbus descriptor. 782 */ 783 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 784 struct vmbus_packet_mpb_array *desc, 785 u32 desc_size, 786 void *buffer, u32 bufferlen, u64 requestid) 787 { 788 u32 packetlen; 789 u32 packetlen_aligned; 790 struct kvec bufferlist[3]; 791 u64 aligned_data = 0; 792 793 packetlen = desc_size + bufferlen; 794 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 795 796 /* Setup the descriptor */ 797 desc->type = VM_PKT_DATA_USING_GPA_DIRECT; 798 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 799 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */ 800 desc->length8 = (u16)(packetlen_aligned >> 3); 801 desc->transactionid = requestid; 802 desc->rangecount = 1; 803 804 bufferlist[0].iov_base = desc; 805 bufferlist[0].iov_len = desc_size; 806 bufferlist[1].iov_base = buffer; 807 bufferlist[1].iov_len = bufferlen; 808 bufferlist[2].iov_base = &aligned_data; 809 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 810 811 return hv_ringbuffer_write(channel, bufferlist, 3); 812 } 813 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 814 815 /* 816 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet 817 * using a GPADL Direct packet type. 818 */ 819 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 820 struct hv_multipage_buffer *multi_pagebuffer, 821 void *buffer, u32 bufferlen, u64 requestid) 822 { 823 struct vmbus_channel_packet_multipage_buffer desc; 824 u32 descsize; 825 u32 packetlen; 826 u32 packetlen_aligned; 827 struct kvec bufferlist[3]; 828 u64 aligned_data = 0; 829 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 830 multi_pagebuffer->len); 831 832 if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT) 833 return -EINVAL; 834 835 /* 836 * Adjust the size down since vmbus_channel_packet_multipage_buffer is 837 * the largest size we support 838 */ 839 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) - 840 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) * 841 sizeof(u64)); 842 packetlen = descsize + bufferlen; 843 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 844 845 846 /* Setup the descriptor */ 847 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; 848 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 849 desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ 850 desc.length8 = (u16)(packetlen_aligned >> 3); 851 desc.transactionid = requestid; 852 desc.rangecount = 1; 853 854 desc.range.len = multi_pagebuffer->len; 855 desc.range.offset = multi_pagebuffer->offset; 856 857 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, 858 pfncount * sizeof(u64)); 859 860 bufferlist[0].iov_base = &desc; 861 bufferlist[0].iov_len = descsize; 862 bufferlist[1].iov_base = buffer; 863 bufferlist[1].iov_len = bufferlen; 864 bufferlist[2].iov_base = &aligned_data; 865 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 866 867 return hv_ringbuffer_write(channel, bufferlist, 3); 868 } 869 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); 870 871 /** 872 * vmbus_recvpacket() - Retrieve the user packet on the specified channel 873 * @channel: Pointer to vmbus_channel structure. 874 * @buffer: Pointer to the buffer you want to receive the data into. 875 * @bufferlen: Maximum size of what the the buffer will hold 876 * @buffer_actual_len: The actual size of the data after it was received 877 * @requestid: Identifier of the request 878 * 879 * Receives directly from the hyper-v vmbus and puts the data it received 880 * into Buffer. This will receive the data unparsed from hyper-v. 881 * 882 * Mainly used by Hyper-V drivers. 883 */ 884 static inline int 885 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 886 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, 887 bool raw) 888 { 889 return hv_ringbuffer_read(channel, buffer, bufferlen, 890 buffer_actual_len, requestid, raw); 891 892 } 893 894 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 895 u32 bufferlen, u32 *buffer_actual_len, 896 u64 *requestid) 897 { 898 return __vmbus_recvpacket(channel, buffer, bufferlen, 899 buffer_actual_len, requestid, false); 900 } 901 EXPORT_SYMBOL(vmbus_recvpacket); 902 903 /* 904 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel 905 */ 906 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer, 907 u32 bufferlen, u32 *buffer_actual_len, 908 u64 *requestid) 909 { 910 return __vmbus_recvpacket(channel, buffer, bufferlen, 911 buffer_actual_len, requestid, true); 912 } 913 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw); 914