1 /* 2 * 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/hyperv.h> 29 #include <linux/uio.h> 30 #include <linux/vmalloc.h> 31 #include <linux/slab.h> 32 #include <linux/prefetch.h> 33 34 #include "hyperv_vmbus.h" 35 36 #define VMBUS_PKT_TRAILER 8 37 38 /* 39 * When we write to the ring buffer, check if the host needs to 40 * be signaled. Here is the details of this protocol: 41 * 42 * 1. The host guarantees that while it is draining the 43 * ring buffer, it will set the interrupt_mask to 44 * indicate it does not need to be interrupted when 45 * new data is placed. 46 * 47 * 2. The host guarantees that it will completely drain 48 * the ring buffer before exiting the read loop. Further, 49 * once the ring buffer is empty, it will clear the 50 * interrupt_mask and re-check to see if new data has 51 * arrived. 52 * 53 * KYS: Oct. 30, 2016: 54 * It looks like Windows hosts have logic to deal with DOS attacks that 55 * can be triggered if it receives interrupts when it is not expecting 56 * the interrupt. The host expects interrupts only when the ring 57 * transitions from empty to non-empty (or full to non full on the guest 58 * to host ring). 59 * So, base the signaling decision solely on the ring state until the 60 * host logic is fixed. 61 */ 62 63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) 64 { 65 struct hv_ring_buffer_info *rbi = &channel->outbound; 66 67 virt_mb(); 68 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 69 return; 70 71 /* check interrupt_mask before read_index */ 72 virt_rmb(); 73 /* 74 * This is the only case we need to signal when the 75 * ring transitions from being empty to non-empty. 76 */ 77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 78 vmbus_setevent(channel); 79 } 80 81 /* Get the next write location for the specified ring buffer. */ 82 static inline u32 83 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 84 { 85 u32 next = ring_info->ring_buffer->write_index; 86 87 return next; 88 } 89 90 /* Set the next write location for the specified ring buffer. */ 91 static inline void 92 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 93 u32 next_write_location) 94 { 95 ring_info->ring_buffer->write_index = next_write_location; 96 } 97 98 /* Set the next read location for the specified ring buffer. */ 99 static inline void 100 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 101 u32 next_read_location) 102 { 103 ring_info->ring_buffer->read_index = next_read_location; 104 ring_info->priv_read_index = next_read_location; 105 } 106 107 /* Get the size of the ring buffer. */ 108 static inline u32 109 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) 110 { 111 return ring_info->ring_datasize; 112 } 113 114 /* Get the read and write indices as u64 of the specified ring buffer. */ 115 static inline u64 116 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 117 { 118 return (u64)ring_info->ring_buffer->write_index << 32; 119 } 120 121 /* 122 * Helper routine to copy from source to ring buffer. 123 * Assume there is enough room. Handles wrap-around in dest case only!! 124 */ 125 static u32 hv_copyto_ringbuffer( 126 struct hv_ring_buffer_info *ring_info, 127 u32 start_write_offset, 128 const void *src, 129 u32 srclen) 130 { 131 void *ring_buffer = hv_get_ring_buffer(ring_info); 132 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 133 134 memcpy(ring_buffer + start_write_offset, src, srclen); 135 136 start_write_offset += srclen; 137 if (start_write_offset >= ring_buffer_size) 138 start_write_offset -= ring_buffer_size; 139 140 return start_write_offset; 141 } 142 143 /* 144 * 145 * hv_get_ringbuffer_availbytes() 146 * 147 * Get number of bytes available to read and to write to 148 * for the specified ring buffer 149 */ 150 static void 151 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, 152 u32 *read, u32 *write) 153 { 154 u32 read_loc, write_loc, dsize; 155 156 /* Capture the read/write indices before they changed */ 157 read_loc = READ_ONCE(rbi->ring_buffer->read_index); 158 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 159 dsize = rbi->ring_datasize; 160 161 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 162 read_loc - write_loc; 163 *read = dsize - *write; 164 } 165 166 /* Get various debug metrics for the specified ring buffer. */ 167 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 168 struct hv_ring_buffer_debug_info *debug_info) 169 { 170 u32 bytes_avail_towrite; 171 u32 bytes_avail_toread; 172 173 if (ring_info->ring_buffer) { 174 hv_get_ringbuffer_availbytes(ring_info, 175 &bytes_avail_toread, 176 &bytes_avail_towrite); 177 178 debug_info->bytes_avail_toread = bytes_avail_toread; 179 debug_info->bytes_avail_towrite = bytes_avail_towrite; 180 debug_info->current_read_index = 181 ring_info->ring_buffer->read_index; 182 debug_info->current_write_index = 183 ring_info->ring_buffer->write_index; 184 debug_info->current_interrupt_mask = 185 ring_info->ring_buffer->interrupt_mask; 186 } 187 } 188 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 189 190 /* Initialize the ring buffer. */ 191 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 192 struct page *pages, u32 page_cnt) 193 { 194 int i; 195 struct page **pages_wraparound; 196 197 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); 198 199 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 200 201 /* 202 * First page holds struct hv_ring_buffer, do wraparound mapping for 203 * the rest. 204 */ 205 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), 206 GFP_KERNEL); 207 if (!pages_wraparound) 208 return -ENOMEM; 209 210 pages_wraparound[0] = pages; 211 for (i = 0; i < 2 * (page_cnt - 1); i++) 212 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; 213 214 ring_info->ring_buffer = (struct hv_ring_buffer *) 215 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); 216 217 kfree(pages_wraparound); 218 219 220 if (!ring_info->ring_buffer) 221 return -ENOMEM; 222 223 ring_info->ring_buffer->read_index = 224 ring_info->ring_buffer->write_index = 0; 225 226 /* Set the feature bit for enabling flow control. */ 227 ring_info->ring_buffer->feature_bits.value = 1; 228 229 ring_info->ring_size = page_cnt << PAGE_SHIFT; 230 ring_info->ring_size_div10_reciprocal = 231 reciprocal_value(ring_info->ring_size / 10); 232 ring_info->ring_datasize = ring_info->ring_size - 233 sizeof(struct hv_ring_buffer); 234 235 spin_lock_init(&ring_info->ring_lock); 236 237 return 0; 238 } 239 240 /* Cleanup the ring buffer. */ 241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 242 { 243 vunmap(ring_info->ring_buffer); 244 ring_info->ring_buffer = NULL; 245 } 246 247 /* Write to the ring buffer. */ 248 int hv_ringbuffer_write(struct vmbus_channel *channel, 249 const struct kvec *kv_list, u32 kv_count) 250 { 251 int i; 252 u32 bytes_avail_towrite; 253 u32 totalbytes_towrite = sizeof(u64); 254 u32 next_write_location; 255 u32 old_write; 256 u64 prev_indices; 257 unsigned long flags; 258 struct hv_ring_buffer_info *outring_info = &channel->outbound; 259 260 if (channel->rescind) 261 return -ENODEV; 262 263 for (i = 0; i < kv_count; i++) 264 totalbytes_towrite += kv_list[i].iov_len; 265 266 spin_lock_irqsave(&outring_info->ring_lock, flags); 267 268 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 269 270 /* 271 * If there is only room for the packet, assume it is full. 272 * Otherwise, the next time around, we think the ring buffer 273 * is empty since the read index == write index. 274 */ 275 if (bytes_avail_towrite <= totalbytes_towrite) { 276 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 277 return -EAGAIN; 278 } 279 280 /* Write to the ring buffer */ 281 next_write_location = hv_get_next_write_location(outring_info); 282 283 old_write = next_write_location; 284 285 for (i = 0; i < kv_count; i++) { 286 next_write_location = hv_copyto_ringbuffer(outring_info, 287 next_write_location, 288 kv_list[i].iov_base, 289 kv_list[i].iov_len); 290 } 291 292 /* Set previous packet start */ 293 prev_indices = hv_get_ring_bufferindices(outring_info); 294 295 next_write_location = hv_copyto_ringbuffer(outring_info, 296 next_write_location, 297 &prev_indices, 298 sizeof(u64)); 299 300 /* Issue a full memory barrier before updating the write index */ 301 virt_mb(); 302 303 /* Now, update the write location */ 304 hv_set_next_write_location(outring_info, next_write_location); 305 306 307 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 308 309 hv_signal_on_write(old_write, channel); 310 311 if (channel->rescind) 312 return -ENODEV; 313 314 return 0; 315 } 316 317 int hv_ringbuffer_read(struct vmbus_channel *channel, 318 void *buffer, u32 buflen, u32 *buffer_actual_len, 319 u64 *requestid, bool raw) 320 { 321 struct vmpacket_descriptor *desc; 322 u32 packetlen, offset; 323 324 if (unlikely(buflen == 0)) 325 return -EINVAL; 326 327 *buffer_actual_len = 0; 328 *requestid = 0; 329 330 /* Make sure there is something to read */ 331 desc = hv_pkt_iter_first(channel); 332 if (desc == NULL) { 333 /* 334 * No error is set when there is even no header, drivers are 335 * supposed to analyze buffer_actual_len. 336 */ 337 return 0; 338 } 339 340 offset = raw ? 0 : (desc->offset8 << 3); 341 packetlen = (desc->len8 << 3) - offset; 342 *buffer_actual_len = packetlen; 343 *requestid = desc->trans_id; 344 345 if (unlikely(packetlen > buflen)) 346 return -ENOBUFS; 347 348 /* since ring is double mapped, only one copy is necessary */ 349 memcpy(buffer, (const char *)desc + offset, packetlen); 350 351 /* Advance ring index to next packet descriptor */ 352 __hv_pkt_iter_next(channel, desc); 353 354 /* Notify host of update */ 355 hv_pkt_iter_close(channel); 356 357 return 0; 358 } 359 360 /* 361 * Determine number of bytes available in ring buffer after 362 * the current iterator (priv_read_index) location. 363 * 364 * This is similar to hv_get_bytes_to_read but with private 365 * read index instead. 366 */ 367 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) 368 { 369 u32 priv_read_loc = rbi->priv_read_index; 370 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 371 372 if (write_loc >= priv_read_loc) 373 return write_loc - priv_read_loc; 374 else 375 return (rbi->ring_datasize - priv_read_loc) + write_loc; 376 } 377 378 /* 379 * Get first vmbus packet from ring buffer after read_index 380 * 381 * If ring buffer is empty, returns NULL and no other action needed. 382 */ 383 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) 384 { 385 struct hv_ring_buffer_info *rbi = &channel->inbound; 386 struct vmpacket_descriptor *desc; 387 388 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) 389 return NULL; 390 391 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index; 392 if (desc) 393 prefetch((char *)desc + (desc->len8 << 3)); 394 395 return desc; 396 } 397 EXPORT_SYMBOL_GPL(hv_pkt_iter_first); 398 399 /* 400 * Get next vmbus packet from ring buffer. 401 * 402 * Advances the current location (priv_read_index) and checks for more 403 * data. If the end of the ring buffer is reached, then return NULL. 404 */ 405 struct vmpacket_descriptor * 406 __hv_pkt_iter_next(struct vmbus_channel *channel, 407 const struct vmpacket_descriptor *desc) 408 { 409 struct hv_ring_buffer_info *rbi = &channel->inbound; 410 u32 packetlen = desc->len8 << 3; 411 u32 dsize = rbi->ring_datasize; 412 413 /* bump offset to next potential packet */ 414 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; 415 if (rbi->priv_read_index >= dsize) 416 rbi->priv_read_index -= dsize; 417 418 /* more data? */ 419 return hv_pkt_iter_first(channel); 420 } 421 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); 422 423 /* How many bytes were read in this iterator cycle */ 424 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, 425 u32 start_read_index) 426 { 427 if (rbi->priv_read_index >= start_read_index) 428 return rbi->priv_read_index - start_read_index; 429 else 430 return rbi->ring_datasize - start_read_index + 431 rbi->priv_read_index; 432 } 433 434 /* 435 * Update host ring buffer after iterating over packets. If the host has 436 * stopped queuing new entries because it found the ring buffer full, and 437 * sufficient space is being freed up, signal the host. But be careful to 438 * only signal the host when necessary, both for performance reasons and 439 * because Hyper-V protects itself by throttling guests that signal 440 * inappropriately. 441 * 442 * Determining when to signal is tricky. There are three key data inputs 443 * that must be handled in this order to avoid race conditions: 444 * 445 * 1. Update the read_index 446 * 2. Read the pending_send_sz 447 * 3. Read the current write_index 448 * 449 * The interrupt_mask is not used to determine when to signal. The 450 * interrupt_mask is used only on the guest->host ring buffer when 451 * sending requests to the host. The host does not use it on the host-> 452 * guest ring buffer to indicate whether it should be signaled. 453 */ 454 void hv_pkt_iter_close(struct vmbus_channel *channel) 455 { 456 struct hv_ring_buffer_info *rbi = &channel->inbound; 457 u32 curr_write_sz, pending_sz, bytes_read, start_read_index; 458 459 /* 460 * Make sure all reads are done before we update the read index since 461 * the writer may start writing to the read area once the read index 462 * is updated. 463 */ 464 virt_rmb(); 465 start_read_index = rbi->ring_buffer->read_index; 466 rbi->ring_buffer->read_index = rbi->priv_read_index; 467 468 /* 469 * Older versions of Hyper-V (before WS2102 and Win8) do not 470 * implement pending_send_sz and simply poll if the host->guest 471 * ring buffer is full. No signaling is needed or expected. 472 */ 473 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) 474 return; 475 476 /* 477 * Issue a full memory barrier before making the signaling decision. 478 * If reading pending_send_sz were to be reordered and happen 479 * before we commit the new read_index, a race could occur. If the 480 * host were to set the pending_send_sz after we have sampled 481 * pending_send_sz, and the ring buffer blocks before we commit the 482 * read index, we could miss sending the interrupt. Issue a full 483 * memory barrier to address this. 484 */ 485 virt_mb(); 486 487 /* 488 * If the pending_send_sz is zero, then the ring buffer is not 489 * blocked and there is no need to signal. This is far by the 490 * most common case, so exit quickly for best performance. 491 */ 492 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); 493 if (!pending_sz) 494 return; 495 496 /* 497 * Ensure the read of write_index in hv_get_bytes_to_write() 498 * happens after the read of pending_send_sz. 499 */ 500 virt_rmb(); 501 curr_write_sz = hv_get_bytes_to_write(rbi); 502 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); 503 504 /* 505 * We want to signal the host only if we're transitioning 506 * from a "not enough free space" state to a "enough free 507 * space" state. For example, it's possible that this function 508 * could run and free up enough space to signal the host, and then 509 * run again and free up additional space before the host has a 510 * chance to clear the pending_send_sz. The 2nd invocation would 511 * be a null transition from "enough free space" to "enough free 512 * space", which doesn't warrant a signal. 513 * 514 * Exactly filling the ring buffer is treated as "not enough 515 * space". The ring buffer always must have at least one byte 516 * empty so the empty and full conditions are distinguishable. 517 * hv_get_bytes_to_write() doesn't fully tell the truth in 518 * this regard. 519 * 520 * So first check if we were in the "enough free space" state 521 * before we began the iteration. If so, the host was not 522 * blocked, and there's no need to signal. 523 */ 524 if (curr_write_sz - bytes_read > pending_sz) 525 return; 526 527 /* 528 * Similarly, if the new state is "not enough space", then 529 * there's no need to signal. 530 */ 531 if (curr_write_sz <= pending_sz) 532 return; 533 534 vmbus_setevent(channel); 535 } 536 EXPORT_SYMBOL_GPL(hv_pkt_iter_close); 537