1 /* 2 * 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/hyperv.h> 29 #include <linux/uio.h> 30 #include <linux/vmalloc.h> 31 #include <linux/slab.h> 32 #include <linux/prefetch.h> 33 34 #include "hyperv_vmbus.h" 35 36 #define VMBUS_PKT_TRAILER 8 37 38 /* 39 * When we write to the ring buffer, check if the host needs to 40 * be signaled. Here is the details of this protocol: 41 * 42 * 1. The host guarantees that while it is draining the 43 * ring buffer, it will set the interrupt_mask to 44 * indicate it does not need to be interrupted when 45 * new data is placed. 46 * 47 * 2. The host guarantees that it will completely drain 48 * the ring buffer before exiting the read loop. Further, 49 * once the ring buffer is empty, it will clear the 50 * interrupt_mask and re-check to see if new data has 51 * arrived. 52 * 53 * KYS: Oct. 30, 2016: 54 * It looks like Windows hosts have logic to deal with DOS attacks that 55 * can be triggered if it receives interrupts when it is not expecting 56 * the interrupt. The host expects interrupts only when the ring 57 * transitions from empty to non-empty (or full to non full on the guest 58 * to host ring). 59 * So, base the signaling decision solely on the ring state until the 60 * host logic is fixed. 61 */ 62 63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) 64 { 65 struct hv_ring_buffer_info *rbi = &channel->outbound; 66 67 virt_mb(); 68 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 69 return; 70 71 /* check interrupt_mask before read_index */ 72 virt_rmb(); 73 /* 74 * This is the only case we need to signal when the 75 * ring transitions from being empty to non-empty. 76 */ 77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 78 vmbus_setevent(channel); 79 } 80 81 /* Get the next write location for the specified ring buffer. */ 82 static inline u32 83 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 84 { 85 u32 next = ring_info->ring_buffer->write_index; 86 87 return next; 88 } 89 90 /* Set the next write location for the specified ring buffer. */ 91 static inline void 92 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 93 u32 next_write_location) 94 { 95 ring_info->ring_buffer->write_index = next_write_location; 96 } 97 98 /* Set the next read location for the specified ring buffer. */ 99 static inline void 100 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 101 u32 next_read_location) 102 { 103 ring_info->ring_buffer->read_index = next_read_location; 104 ring_info->priv_read_index = next_read_location; 105 } 106 107 /* Get the size of the ring buffer. */ 108 static inline u32 109 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) 110 { 111 return ring_info->ring_datasize; 112 } 113 114 /* Get the read and write indices as u64 of the specified ring buffer. */ 115 static inline u64 116 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 117 { 118 return (u64)ring_info->ring_buffer->write_index << 32; 119 } 120 121 /* 122 * Helper routine to copy from source to ring buffer. 123 * Assume there is enough room. Handles wrap-around in dest case only!! 124 */ 125 static u32 hv_copyto_ringbuffer( 126 struct hv_ring_buffer_info *ring_info, 127 u32 start_write_offset, 128 const void *src, 129 u32 srclen) 130 { 131 void *ring_buffer = hv_get_ring_buffer(ring_info); 132 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 133 134 memcpy(ring_buffer + start_write_offset, src, srclen); 135 136 start_write_offset += srclen; 137 if (start_write_offset >= ring_buffer_size) 138 start_write_offset -= ring_buffer_size; 139 140 return start_write_offset; 141 } 142 143 /* 144 * 145 * hv_get_ringbuffer_availbytes() 146 * 147 * Get number of bytes available to read and to write to 148 * for the specified ring buffer 149 */ 150 static void 151 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, 152 u32 *read, u32 *write) 153 { 154 u32 read_loc, write_loc, dsize; 155 156 /* Capture the read/write indices before they changed */ 157 read_loc = READ_ONCE(rbi->ring_buffer->read_index); 158 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 159 dsize = rbi->ring_datasize; 160 161 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 162 read_loc - write_loc; 163 *read = dsize - *write; 164 } 165 166 /* Get various debug metrics for the specified ring buffer. */ 167 int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 168 struct hv_ring_buffer_debug_info *debug_info) 169 { 170 u32 bytes_avail_towrite; 171 u32 bytes_avail_toread; 172 173 if (!ring_info->ring_buffer) 174 return -EINVAL; 175 176 hv_get_ringbuffer_availbytes(ring_info, 177 &bytes_avail_toread, 178 &bytes_avail_towrite); 179 debug_info->bytes_avail_toread = bytes_avail_toread; 180 debug_info->bytes_avail_towrite = bytes_avail_towrite; 181 debug_info->current_read_index = ring_info->ring_buffer->read_index; 182 debug_info->current_write_index = ring_info->ring_buffer->write_index; 183 debug_info->current_interrupt_mask 184 = ring_info->ring_buffer->interrupt_mask; 185 return 0; 186 } 187 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 188 189 /* Initialize the ring buffer. */ 190 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 191 struct page *pages, u32 page_cnt) 192 { 193 int i; 194 struct page **pages_wraparound; 195 196 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); 197 198 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 199 200 /* 201 * First page holds struct hv_ring_buffer, do wraparound mapping for 202 * the rest. 203 */ 204 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), 205 GFP_KERNEL); 206 if (!pages_wraparound) 207 return -ENOMEM; 208 209 pages_wraparound[0] = pages; 210 for (i = 0; i < 2 * (page_cnt - 1); i++) 211 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; 212 213 ring_info->ring_buffer = (struct hv_ring_buffer *) 214 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); 215 216 kfree(pages_wraparound); 217 218 219 if (!ring_info->ring_buffer) 220 return -ENOMEM; 221 222 ring_info->ring_buffer->read_index = 223 ring_info->ring_buffer->write_index = 0; 224 225 /* Set the feature bit for enabling flow control. */ 226 ring_info->ring_buffer->feature_bits.value = 1; 227 228 ring_info->ring_size = page_cnt << PAGE_SHIFT; 229 ring_info->ring_size_div10_reciprocal = 230 reciprocal_value(ring_info->ring_size / 10); 231 ring_info->ring_datasize = ring_info->ring_size - 232 sizeof(struct hv_ring_buffer); 233 234 spin_lock_init(&ring_info->ring_lock); 235 236 return 0; 237 } 238 239 /* Cleanup the ring buffer. */ 240 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 241 { 242 vunmap(ring_info->ring_buffer); 243 ring_info->ring_buffer = NULL; 244 } 245 246 /* Write to the ring buffer. */ 247 int hv_ringbuffer_write(struct vmbus_channel *channel, 248 const struct kvec *kv_list, u32 kv_count) 249 { 250 int i; 251 u32 bytes_avail_towrite; 252 u32 totalbytes_towrite = sizeof(u64); 253 u32 next_write_location; 254 u32 old_write; 255 u64 prev_indices; 256 unsigned long flags; 257 struct hv_ring_buffer_info *outring_info = &channel->outbound; 258 259 if (channel->rescind) 260 return -ENODEV; 261 262 for (i = 0; i < kv_count; i++) 263 totalbytes_towrite += kv_list[i].iov_len; 264 265 spin_lock_irqsave(&outring_info->ring_lock, flags); 266 267 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 268 269 /* 270 * If there is only room for the packet, assume it is full. 271 * Otherwise, the next time around, we think the ring buffer 272 * is empty since the read index == write index. 273 */ 274 if (bytes_avail_towrite <= totalbytes_towrite) { 275 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 276 return -EAGAIN; 277 } 278 279 /* Write to the ring buffer */ 280 next_write_location = hv_get_next_write_location(outring_info); 281 282 old_write = next_write_location; 283 284 for (i = 0; i < kv_count; i++) { 285 next_write_location = hv_copyto_ringbuffer(outring_info, 286 next_write_location, 287 kv_list[i].iov_base, 288 kv_list[i].iov_len); 289 } 290 291 /* Set previous packet start */ 292 prev_indices = hv_get_ring_bufferindices(outring_info); 293 294 next_write_location = hv_copyto_ringbuffer(outring_info, 295 next_write_location, 296 &prev_indices, 297 sizeof(u64)); 298 299 /* Issue a full memory barrier before updating the write index */ 300 virt_mb(); 301 302 /* Now, update the write location */ 303 hv_set_next_write_location(outring_info, next_write_location); 304 305 306 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 307 308 hv_signal_on_write(old_write, channel); 309 310 if (channel->rescind) 311 return -ENODEV; 312 313 return 0; 314 } 315 316 int hv_ringbuffer_read(struct vmbus_channel *channel, 317 void *buffer, u32 buflen, u32 *buffer_actual_len, 318 u64 *requestid, bool raw) 319 { 320 struct vmpacket_descriptor *desc; 321 u32 packetlen, offset; 322 323 if (unlikely(buflen == 0)) 324 return -EINVAL; 325 326 *buffer_actual_len = 0; 327 *requestid = 0; 328 329 /* Make sure there is something to read */ 330 desc = hv_pkt_iter_first(channel); 331 if (desc == NULL) { 332 /* 333 * No error is set when there is even no header, drivers are 334 * supposed to analyze buffer_actual_len. 335 */ 336 return 0; 337 } 338 339 offset = raw ? 0 : (desc->offset8 << 3); 340 packetlen = (desc->len8 << 3) - offset; 341 *buffer_actual_len = packetlen; 342 *requestid = desc->trans_id; 343 344 if (unlikely(packetlen > buflen)) 345 return -ENOBUFS; 346 347 /* since ring is double mapped, only one copy is necessary */ 348 memcpy(buffer, (const char *)desc + offset, packetlen); 349 350 /* Advance ring index to next packet descriptor */ 351 __hv_pkt_iter_next(channel, desc); 352 353 /* Notify host of update */ 354 hv_pkt_iter_close(channel); 355 356 return 0; 357 } 358 359 /* 360 * Determine number of bytes available in ring buffer after 361 * the current iterator (priv_read_index) location. 362 * 363 * This is similar to hv_get_bytes_to_read but with private 364 * read index instead. 365 */ 366 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) 367 { 368 u32 priv_read_loc = rbi->priv_read_index; 369 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 370 371 if (write_loc >= priv_read_loc) 372 return write_loc - priv_read_loc; 373 else 374 return (rbi->ring_datasize - priv_read_loc) + write_loc; 375 } 376 377 /* 378 * Get first vmbus packet from ring buffer after read_index 379 * 380 * If ring buffer is empty, returns NULL and no other action needed. 381 */ 382 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) 383 { 384 struct hv_ring_buffer_info *rbi = &channel->inbound; 385 struct vmpacket_descriptor *desc; 386 387 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) 388 return NULL; 389 390 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index; 391 if (desc) 392 prefetch((char *)desc + (desc->len8 << 3)); 393 394 return desc; 395 } 396 EXPORT_SYMBOL_GPL(hv_pkt_iter_first); 397 398 /* 399 * Get next vmbus packet from ring buffer. 400 * 401 * Advances the current location (priv_read_index) and checks for more 402 * data. If the end of the ring buffer is reached, then return NULL. 403 */ 404 struct vmpacket_descriptor * 405 __hv_pkt_iter_next(struct vmbus_channel *channel, 406 const struct vmpacket_descriptor *desc) 407 { 408 struct hv_ring_buffer_info *rbi = &channel->inbound; 409 u32 packetlen = desc->len8 << 3; 410 u32 dsize = rbi->ring_datasize; 411 412 /* bump offset to next potential packet */ 413 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; 414 if (rbi->priv_read_index >= dsize) 415 rbi->priv_read_index -= dsize; 416 417 /* more data? */ 418 return hv_pkt_iter_first(channel); 419 } 420 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); 421 422 /* How many bytes were read in this iterator cycle */ 423 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, 424 u32 start_read_index) 425 { 426 if (rbi->priv_read_index >= start_read_index) 427 return rbi->priv_read_index - start_read_index; 428 else 429 return rbi->ring_datasize - start_read_index + 430 rbi->priv_read_index; 431 } 432 433 /* 434 * Update host ring buffer after iterating over packets. If the host has 435 * stopped queuing new entries because it found the ring buffer full, and 436 * sufficient space is being freed up, signal the host. But be careful to 437 * only signal the host when necessary, both for performance reasons and 438 * because Hyper-V protects itself by throttling guests that signal 439 * inappropriately. 440 * 441 * Determining when to signal is tricky. There are three key data inputs 442 * that must be handled in this order to avoid race conditions: 443 * 444 * 1. Update the read_index 445 * 2. Read the pending_send_sz 446 * 3. Read the current write_index 447 * 448 * The interrupt_mask is not used to determine when to signal. The 449 * interrupt_mask is used only on the guest->host ring buffer when 450 * sending requests to the host. The host does not use it on the host-> 451 * guest ring buffer to indicate whether it should be signaled. 452 */ 453 void hv_pkt_iter_close(struct vmbus_channel *channel) 454 { 455 struct hv_ring_buffer_info *rbi = &channel->inbound; 456 u32 curr_write_sz, pending_sz, bytes_read, start_read_index; 457 458 /* 459 * Make sure all reads are done before we update the read index since 460 * the writer may start writing to the read area once the read index 461 * is updated. 462 */ 463 virt_rmb(); 464 start_read_index = rbi->ring_buffer->read_index; 465 rbi->ring_buffer->read_index = rbi->priv_read_index; 466 467 /* 468 * Older versions of Hyper-V (before WS2102 and Win8) do not 469 * implement pending_send_sz and simply poll if the host->guest 470 * ring buffer is full. No signaling is needed or expected. 471 */ 472 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) 473 return; 474 475 /* 476 * Issue a full memory barrier before making the signaling decision. 477 * If reading pending_send_sz were to be reordered and happen 478 * before we commit the new read_index, a race could occur. If the 479 * host were to set the pending_send_sz after we have sampled 480 * pending_send_sz, and the ring buffer blocks before we commit the 481 * read index, we could miss sending the interrupt. Issue a full 482 * memory barrier to address this. 483 */ 484 virt_mb(); 485 486 /* 487 * If the pending_send_sz is zero, then the ring buffer is not 488 * blocked and there is no need to signal. This is far by the 489 * most common case, so exit quickly for best performance. 490 */ 491 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); 492 if (!pending_sz) 493 return; 494 495 /* 496 * Ensure the read of write_index in hv_get_bytes_to_write() 497 * happens after the read of pending_send_sz. 498 */ 499 virt_rmb(); 500 curr_write_sz = hv_get_bytes_to_write(rbi); 501 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); 502 503 /* 504 * We want to signal the host only if we're transitioning 505 * from a "not enough free space" state to a "enough free 506 * space" state. For example, it's possible that this function 507 * could run and free up enough space to signal the host, and then 508 * run again and free up additional space before the host has a 509 * chance to clear the pending_send_sz. The 2nd invocation would 510 * be a null transition from "enough free space" to "enough free 511 * space", which doesn't warrant a signal. 512 * 513 * Exactly filling the ring buffer is treated as "not enough 514 * space". The ring buffer always must have at least one byte 515 * empty so the empty and full conditions are distinguishable. 516 * hv_get_bytes_to_write() doesn't fully tell the truth in 517 * this regard. 518 * 519 * So first check if we were in the "enough free space" state 520 * before we began the iteration. If so, the host was not 521 * blocked, and there's no need to signal. 522 */ 523 if (curr_write_sz - bytes_read > pending_sz) 524 return; 525 526 /* 527 * Similarly, if the new state is "not enough space", then 528 * there's no need to signal. 529 */ 530 if (curr_write_sz <= pending_sz) 531 return; 532 533 vmbus_setevent(channel); 534 } 535 EXPORT_SYMBOL_GPL(hv_pkt_iter_close); 536