1 /* 2 * 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/hyperv.h> 29 #include <linux/uio.h> 30 #include <linux/vmalloc.h> 31 #include <linux/slab.h> 32 #include <linux/prefetch.h> 33 34 #include "hyperv_vmbus.h" 35 36 #define VMBUS_PKT_TRAILER 8 37 38 /* 39 * When we write to the ring buffer, check if the host needs to 40 * be signaled. Here is the details of this protocol: 41 * 42 * 1. The host guarantees that while it is draining the 43 * ring buffer, it will set the interrupt_mask to 44 * indicate it does not need to be interrupted when 45 * new data is placed. 46 * 47 * 2. The host guarantees that it will completely drain 48 * the ring buffer before exiting the read loop. Further, 49 * once the ring buffer is empty, it will clear the 50 * interrupt_mask and re-check to see if new data has 51 * arrived. 52 * 53 * KYS: Oct. 30, 2016: 54 * It looks like Windows hosts have logic to deal with DOS attacks that 55 * can be triggered if it receives interrupts when it is not expecting 56 * the interrupt. The host expects interrupts only when the ring 57 * transitions from empty to non-empty (or full to non full on the guest 58 * to host ring). 59 * So, base the signaling decision solely on the ring state until the 60 * host logic is fixed. 61 */ 62 63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) 64 { 65 struct hv_ring_buffer_info *rbi = &channel->outbound; 66 67 virt_mb(); 68 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 69 return; 70 71 /* check interrupt_mask before read_index */ 72 virt_rmb(); 73 /* 74 * This is the only case we need to signal when the 75 * ring transitions from being empty to non-empty. 76 */ 77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) { 78 ++channel->intr_out_empty; 79 vmbus_setevent(channel); 80 } 81 } 82 83 /* Get the next write location for the specified ring buffer. */ 84 static inline u32 85 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 86 { 87 u32 next = ring_info->ring_buffer->write_index; 88 89 return next; 90 } 91 92 /* Set the next write location for the specified ring buffer. */ 93 static inline void 94 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 95 u32 next_write_location) 96 { 97 ring_info->ring_buffer->write_index = next_write_location; 98 } 99 100 /* Set the next read location for the specified ring buffer. */ 101 static inline void 102 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 103 u32 next_read_location) 104 { 105 ring_info->ring_buffer->read_index = next_read_location; 106 ring_info->priv_read_index = next_read_location; 107 } 108 109 /* Get the size of the ring buffer. */ 110 static inline u32 111 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) 112 { 113 return ring_info->ring_datasize; 114 } 115 116 /* Get the read and write indices as u64 of the specified ring buffer. */ 117 static inline u64 118 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 119 { 120 return (u64)ring_info->ring_buffer->write_index << 32; 121 } 122 123 /* 124 * Helper routine to copy from source to ring buffer. 125 * Assume there is enough room. Handles wrap-around in dest case only!! 126 */ 127 static u32 hv_copyto_ringbuffer( 128 struct hv_ring_buffer_info *ring_info, 129 u32 start_write_offset, 130 const void *src, 131 u32 srclen) 132 { 133 void *ring_buffer = hv_get_ring_buffer(ring_info); 134 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 135 136 memcpy(ring_buffer + start_write_offset, src, srclen); 137 138 start_write_offset += srclen; 139 if (start_write_offset >= ring_buffer_size) 140 start_write_offset -= ring_buffer_size; 141 142 return start_write_offset; 143 } 144 145 /* 146 * 147 * hv_get_ringbuffer_availbytes() 148 * 149 * Get number of bytes available to read and to write to 150 * for the specified ring buffer 151 */ 152 static void 153 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, 154 u32 *read, u32 *write) 155 { 156 u32 read_loc, write_loc, dsize; 157 158 /* Capture the read/write indices before they changed */ 159 read_loc = READ_ONCE(rbi->ring_buffer->read_index); 160 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 161 dsize = rbi->ring_datasize; 162 163 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 164 read_loc - write_loc; 165 *read = dsize - *write; 166 } 167 168 /* Get various debug metrics for the specified ring buffer. */ 169 int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 170 struct hv_ring_buffer_debug_info *debug_info) 171 { 172 u32 bytes_avail_towrite; 173 u32 bytes_avail_toread; 174 175 if (!ring_info->ring_buffer) 176 return -EINVAL; 177 178 hv_get_ringbuffer_availbytes(ring_info, 179 &bytes_avail_toread, 180 &bytes_avail_towrite); 181 debug_info->bytes_avail_toread = bytes_avail_toread; 182 debug_info->bytes_avail_towrite = bytes_avail_towrite; 183 debug_info->current_read_index = ring_info->ring_buffer->read_index; 184 debug_info->current_write_index = ring_info->ring_buffer->write_index; 185 debug_info->current_interrupt_mask 186 = ring_info->ring_buffer->interrupt_mask; 187 return 0; 188 } 189 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 190 191 /* Initialize the ring buffer. */ 192 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 193 struct page *pages, u32 page_cnt) 194 { 195 int i; 196 struct page **pages_wraparound; 197 198 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); 199 200 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 201 202 /* 203 * First page holds struct hv_ring_buffer, do wraparound mapping for 204 * the rest. 205 */ 206 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), 207 GFP_KERNEL); 208 if (!pages_wraparound) 209 return -ENOMEM; 210 211 pages_wraparound[0] = pages; 212 for (i = 0; i < 2 * (page_cnt - 1); i++) 213 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; 214 215 ring_info->ring_buffer = (struct hv_ring_buffer *) 216 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); 217 218 kfree(pages_wraparound); 219 220 221 if (!ring_info->ring_buffer) 222 return -ENOMEM; 223 224 ring_info->ring_buffer->read_index = 225 ring_info->ring_buffer->write_index = 0; 226 227 /* Set the feature bit for enabling flow control. */ 228 ring_info->ring_buffer->feature_bits.value = 1; 229 230 ring_info->ring_size = page_cnt << PAGE_SHIFT; 231 ring_info->ring_size_div10_reciprocal = 232 reciprocal_value(ring_info->ring_size / 10); 233 ring_info->ring_datasize = ring_info->ring_size - 234 sizeof(struct hv_ring_buffer); 235 236 spin_lock_init(&ring_info->ring_lock); 237 238 return 0; 239 } 240 241 /* Cleanup the ring buffer. */ 242 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 243 { 244 vunmap(ring_info->ring_buffer); 245 ring_info->ring_buffer = NULL; 246 } 247 248 /* Write to the ring buffer. */ 249 int hv_ringbuffer_write(struct vmbus_channel *channel, 250 const struct kvec *kv_list, u32 kv_count) 251 { 252 int i; 253 u32 bytes_avail_towrite; 254 u32 totalbytes_towrite = sizeof(u64); 255 u32 next_write_location; 256 u32 old_write; 257 u64 prev_indices; 258 unsigned long flags; 259 struct hv_ring_buffer_info *outring_info = &channel->outbound; 260 261 if (channel->rescind) 262 return -ENODEV; 263 264 for (i = 0; i < kv_count; i++) 265 totalbytes_towrite += kv_list[i].iov_len; 266 267 spin_lock_irqsave(&outring_info->ring_lock, flags); 268 269 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 270 271 /* 272 * If there is only room for the packet, assume it is full. 273 * Otherwise, the next time around, we think the ring buffer 274 * is empty since the read index == write index. 275 */ 276 if (bytes_avail_towrite <= totalbytes_towrite) { 277 ++channel->out_full_total; 278 279 if (!channel->out_full_flag) { 280 ++channel->out_full_first; 281 channel->out_full_flag = true; 282 } 283 284 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 285 return -EAGAIN; 286 } 287 288 channel->out_full_flag = false; 289 290 /* Write to the ring buffer */ 291 next_write_location = hv_get_next_write_location(outring_info); 292 293 old_write = next_write_location; 294 295 for (i = 0; i < kv_count; i++) { 296 next_write_location = hv_copyto_ringbuffer(outring_info, 297 next_write_location, 298 kv_list[i].iov_base, 299 kv_list[i].iov_len); 300 } 301 302 /* Set previous packet start */ 303 prev_indices = hv_get_ring_bufferindices(outring_info); 304 305 next_write_location = hv_copyto_ringbuffer(outring_info, 306 next_write_location, 307 &prev_indices, 308 sizeof(u64)); 309 310 /* Issue a full memory barrier before updating the write index */ 311 virt_mb(); 312 313 /* Now, update the write location */ 314 hv_set_next_write_location(outring_info, next_write_location); 315 316 317 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 318 319 hv_signal_on_write(old_write, channel); 320 321 if (channel->rescind) 322 return -ENODEV; 323 324 return 0; 325 } 326 327 int hv_ringbuffer_read(struct vmbus_channel *channel, 328 void *buffer, u32 buflen, u32 *buffer_actual_len, 329 u64 *requestid, bool raw) 330 { 331 struct vmpacket_descriptor *desc; 332 u32 packetlen, offset; 333 334 if (unlikely(buflen == 0)) 335 return -EINVAL; 336 337 *buffer_actual_len = 0; 338 *requestid = 0; 339 340 /* Make sure there is something to read */ 341 desc = hv_pkt_iter_first(channel); 342 if (desc == NULL) { 343 /* 344 * No error is set when there is even no header, drivers are 345 * supposed to analyze buffer_actual_len. 346 */ 347 return 0; 348 } 349 350 offset = raw ? 0 : (desc->offset8 << 3); 351 packetlen = (desc->len8 << 3) - offset; 352 *buffer_actual_len = packetlen; 353 *requestid = desc->trans_id; 354 355 if (unlikely(packetlen > buflen)) 356 return -ENOBUFS; 357 358 /* since ring is double mapped, only one copy is necessary */ 359 memcpy(buffer, (const char *)desc + offset, packetlen); 360 361 /* Advance ring index to next packet descriptor */ 362 __hv_pkt_iter_next(channel, desc); 363 364 /* Notify host of update */ 365 hv_pkt_iter_close(channel); 366 367 return 0; 368 } 369 370 /* 371 * Determine number of bytes available in ring buffer after 372 * the current iterator (priv_read_index) location. 373 * 374 * This is similar to hv_get_bytes_to_read but with private 375 * read index instead. 376 */ 377 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) 378 { 379 u32 priv_read_loc = rbi->priv_read_index; 380 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 381 382 if (write_loc >= priv_read_loc) 383 return write_loc - priv_read_loc; 384 else 385 return (rbi->ring_datasize - priv_read_loc) + write_loc; 386 } 387 388 /* 389 * Get first vmbus packet from ring buffer after read_index 390 * 391 * If ring buffer is empty, returns NULL and no other action needed. 392 */ 393 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) 394 { 395 struct hv_ring_buffer_info *rbi = &channel->inbound; 396 struct vmpacket_descriptor *desc; 397 398 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) 399 return NULL; 400 401 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index; 402 if (desc) 403 prefetch((char *)desc + (desc->len8 << 3)); 404 405 return desc; 406 } 407 EXPORT_SYMBOL_GPL(hv_pkt_iter_first); 408 409 /* 410 * Get next vmbus packet from ring buffer. 411 * 412 * Advances the current location (priv_read_index) and checks for more 413 * data. If the end of the ring buffer is reached, then return NULL. 414 */ 415 struct vmpacket_descriptor * 416 __hv_pkt_iter_next(struct vmbus_channel *channel, 417 const struct vmpacket_descriptor *desc) 418 { 419 struct hv_ring_buffer_info *rbi = &channel->inbound; 420 u32 packetlen = desc->len8 << 3; 421 u32 dsize = rbi->ring_datasize; 422 423 /* bump offset to next potential packet */ 424 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; 425 if (rbi->priv_read_index >= dsize) 426 rbi->priv_read_index -= dsize; 427 428 /* more data? */ 429 return hv_pkt_iter_first(channel); 430 } 431 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); 432 433 /* How many bytes were read in this iterator cycle */ 434 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, 435 u32 start_read_index) 436 { 437 if (rbi->priv_read_index >= start_read_index) 438 return rbi->priv_read_index - start_read_index; 439 else 440 return rbi->ring_datasize - start_read_index + 441 rbi->priv_read_index; 442 } 443 444 /* 445 * Update host ring buffer after iterating over packets. If the host has 446 * stopped queuing new entries because it found the ring buffer full, and 447 * sufficient space is being freed up, signal the host. But be careful to 448 * only signal the host when necessary, both for performance reasons and 449 * because Hyper-V protects itself by throttling guests that signal 450 * inappropriately. 451 * 452 * Determining when to signal is tricky. There are three key data inputs 453 * that must be handled in this order to avoid race conditions: 454 * 455 * 1. Update the read_index 456 * 2. Read the pending_send_sz 457 * 3. Read the current write_index 458 * 459 * The interrupt_mask is not used to determine when to signal. The 460 * interrupt_mask is used only on the guest->host ring buffer when 461 * sending requests to the host. The host does not use it on the host-> 462 * guest ring buffer to indicate whether it should be signaled. 463 */ 464 void hv_pkt_iter_close(struct vmbus_channel *channel) 465 { 466 struct hv_ring_buffer_info *rbi = &channel->inbound; 467 u32 curr_write_sz, pending_sz, bytes_read, start_read_index; 468 469 /* 470 * Make sure all reads are done before we update the read index since 471 * the writer may start writing to the read area once the read index 472 * is updated. 473 */ 474 virt_rmb(); 475 start_read_index = rbi->ring_buffer->read_index; 476 rbi->ring_buffer->read_index = rbi->priv_read_index; 477 478 /* 479 * Older versions of Hyper-V (before WS2102 and Win8) do not 480 * implement pending_send_sz and simply poll if the host->guest 481 * ring buffer is full. No signaling is needed or expected. 482 */ 483 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) 484 return; 485 486 /* 487 * Issue a full memory barrier before making the signaling decision. 488 * If reading pending_send_sz were to be reordered and happen 489 * before we commit the new read_index, a race could occur. If the 490 * host were to set the pending_send_sz after we have sampled 491 * pending_send_sz, and the ring buffer blocks before we commit the 492 * read index, we could miss sending the interrupt. Issue a full 493 * memory barrier to address this. 494 */ 495 virt_mb(); 496 497 /* 498 * If the pending_send_sz is zero, then the ring buffer is not 499 * blocked and there is no need to signal. This is far by the 500 * most common case, so exit quickly for best performance. 501 */ 502 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); 503 if (!pending_sz) 504 return; 505 506 /* 507 * Ensure the read of write_index in hv_get_bytes_to_write() 508 * happens after the read of pending_send_sz. 509 */ 510 virt_rmb(); 511 curr_write_sz = hv_get_bytes_to_write(rbi); 512 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); 513 514 /* 515 * We want to signal the host only if we're transitioning 516 * from a "not enough free space" state to a "enough free 517 * space" state. For example, it's possible that this function 518 * could run and free up enough space to signal the host, and then 519 * run again and free up additional space before the host has a 520 * chance to clear the pending_send_sz. The 2nd invocation would 521 * be a null transition from "enough free space" to "enough free 522 * space", which doesn't warrant a signal. 523 * 524 * Exactly filling the ring buffer is treated as "not enough 525 * space". The ring buffer always must have at least one byte 526 * empty so the empty and full conditions are distinguishable. 527 * hv_get_bytes_to_write() doesn't fully tell the truth in 528 * this regard. 529 * 530 * So first check if we were in the "enough free space" state 531 * before we began the iteration. If so, the host was not 532 * blocked, and there's no need to signal. 533 */ 534 if (curr_write_sz - bytes_read > pending_sz) 535 return; 536 537 /* 538 * Similarly, if the new state is "not enough space", then 539 * there's no need to signal. 540 */ 541 if (curr_write_sz <= pending_sz) 542 return; 543 544 ++channel->intr_in_full; 545 vmbus_setevent(channel); 546 } 547 EXPORT_SYMBOL_GPL(hv_pkt_iter_close); 548