1 /* 2 * 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/hyperv.h> 29 #include <linux/uio.h> 30 31 #include "hyperv_vmbus.h" 32 33 void hv_begin_read(struct hv_ring_buffer_info *rbi) 34 { 35 rbi->ring_buffer->interrupt_mask = 1; 36 virt_mb(); 37 } 38 39 u32 hv_end_read(struct hv_ring_buffer_info *rbi) 40 { 41 42 rbi->ring_buffer->interrupt_mask = 0; 43 virt_mb(); 44 45 /* 46 * Now check to see if the ring buffer is still empty. 47 * If it is not, we raced and we need to process new 48 * incoming messages. 49 */ 50 return hv_get_bytes_to_read(rbi); 51 } 52 53 /* 54 * When we write to the ring buffer, check if the host needs to 55 * be signaled. Here is the details of this protocol: 56 * 57 * 1. The host guarantees that while it is draining the 58 * ring buffer, it will set the interrupt_mask to 59 * indicate it does not need to be interrupted when 60 * new data is placed. 61 * 62 * 2. The host guarantees that it will completely drain 63 * the ring buffer before exiting the read loop. Further, 64 * once the ring buffer is empty, it will clear the 65 * interrupt_mask and re-check to see if new data has 66 * arrived. 67 */ 68 69 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) 70 { 71 virt_mb(); 72 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 73 return false; 74 75 /* check interrupt_mask before read_index */ 76 virt_rmb(); 77 /* 78 * This is the only case we need to signal when the 79 * ring transitions from being empty to non-empty. 80 */ 81 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 82 return true; 83 84 return false; 85 } 86 87 /* Get the next write location for the specified ring buffer. */ 88 static inline u32 89 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 90 { 91 u32 next = ring_info->ring_buffer->write_index; 92 93 return next; 94 } 95 96 /* Set the next write location for the specified ring buffer. */ 97 static inline void 98 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 99 u32 next_write_location) 100 { 101 ring_info->ring_buffer->write_index = next_write_location; 102 } 103 104 /* Get the next read location for the specified ring buffer. */ 105 static inline u32 106 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) 107 { 108 u32 next = ring_info->ring_buffer->read_index; 109 110 return next; 111 } 112 113 /* 114 * Get the next read location + offset for the specified ring buffer. 115 * This allows the caller to skip. 116 */ 117 static inline u32 118 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, 119 u32 offset) 120 { 121 u32 next = ring_info->ring_buffer->read_index; 122 123 next += offset; 124 next %= ring_info->ring_datasize; 125 126 return next; 127 } 128 129 /* Set the next read location for the specified ring buffer. */ 130 static inline void 131 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 132 u32 next_read_location) 133 { 134 ring_info->ring_buffer->read_index = next_read_location; 135 ring_info->priv_read_index = next_read_location; 136 } 137 138 /* Get the size of the ring buffer. */ 139 static inline u32 140 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) 141 { 142 return ring_info->ring_datasize; 143 } 144 145 /* Get the read and write indices as u64 of the specified ring buffer. */ 146 static inline u64 147 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 148 { 149 return (u64)ring_info->ring_buffer->write_index << 32; 150 } 151 152 /* 153 * Helper routine to copy to source from ring buffer. 154 * Assume there is enough room. Handles wrap-around in src case only!! 155 */ 156 static u32 hv_copyfrom_ringbuffer( 157 struct hv_ring_buffer_info *ring_info, 158 void *dest, 159 u32 destlen, 160 u32 start_read_offset) 161 { 162 void *ring_buffer = hv_get_ring_buffer(ring_info); 163 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 164 165 u32 frag_len; 166 167 /* wrap-around detected at the src */ 168 if (destlen > ring_buffer_size - start_read_offset) { 169 frag_len = ring_buffer_size - start_read_offset; 170 171 memcpy(dest, ring_buffer + start_read_offset, frag_len); 172 memcpy(dest + frag_len, ring_buffer, destlen - frag_len); 173 } else 174 175 memcpy(dest, ring_buffer + start_read_offset, destlen); 176 177 178 start_read_offset += destlen; 179 start_read_offset %= ring_buffer_size; 180 181 return start_read_offset; 182 } 183 184 185 /* 186 * Helper routine to copy from source to ring buffer. 187 * Assume there is enough room. Handles wrap-around in dest case only!! 188 */ 189 static u32 hv_copyto_ringbuffer( 190 struct hv_ring_buffer_info *ring_info, 191 u32 start_write_offset, 192 void *src, 193 u32 srclen) 194 { 195 void *ring_buffer = hv_get_ring_buffer(ring_info); 196 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 197 u32 frag_len; 198 199 /* wrap-around detected! */ 200 if (srclen > ring_buffer_size - start_write_offset) { 201 frag_len = ring_buffer_size - start_write_offset; 202 memcpy(ring_buffer + start_write_offset, src, frag_len); 203 memcpy(ring_buffer, src + frag_len, srclen - frag_len); 204 } else 205 memcpy(ring_buffer + start_write_offset, src, srclen); 206 207 start_write_offset += srclen; 208 start_write_offset %= ring_buffer_size; 209 210 return start_write_offset; 211 } 212 213 /* Get various debug metrics for the specified ring buffer. */ 214 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 215 struct hv_ring_buffer_debug_info *debug_info) 216 { 217 u32 bytes_avail_towrite; 218 u32 bytes_avail_toread; 219 220 if (ring_info->ring_buffer) { 221 hv_get_ringbuffer_availbytes(ring_info, 222 &bytes_avail_toread, 223 &bytes_avail_towrite); 224 225 debug_info->bytes_avail_toread = bytes_avail_toread; 226 debug_info->bytes_avail_towrite = bytes_avail_towrite; 227 debug_info->current_read_index = 228 ring_info->ring_buffer->read_index; 229 debug_info->current_write_index = 230 ring_info->ring_buffer->write_index; 231 debug_info->current_interrupt_mask = 232 ring_info->ring_buffer->interrupt_mask; 233 } 234 } 235 236 /* Initialize the ring buffer. */ 237 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 238 void *buffer, u32 buflen) 239 { 240 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) 241 return -EINVAL; 242 243 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 244 245 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; 246 ring_info->ring_buffer->read_index = 247 ring_info->ring_buffer->write_index = 0; 248 249 /* Set the feature bit for enabling flow control. */ 250 ring_info->ring_buffer->feature_bits.value = 1; 251 252 ring_info->ring_size = buflen; 253 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); 254 255 spin_lock_init(&ring_info->ring_lock); 256 257 return 0; 258 } 259 260 /* Cleanup the ring buffer. */ 261 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 262 { 263 } 264 265 /* Write to the ring buffer. */ 266 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 267 struct kvec *kv_list, u32 kv_count, bool *signal, bool lock) 268 { 269 int i = 0; 270 u32 bytes_avail_towrite; 271 u32 totalbytes_towrite = 0; 272 273 u32 next_write_location; 274 u32 old_write; 275 u64 prev_indices = 0; 276 unsigned long flags = 0; 277 278 for (i = 0; i < kv_count; i++) 279 totalbytes_towrite += kv_list[i].iov_len; 280 281 totalbytes_towrite += sizeof(u64); 282 283 if (lock) 284 spin_lock_irqsave(&outring_info->ring_lock, flags); 285 286 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 287 288 /* 289 * If there is only room for the packet, assume it is full. 290 * Otherwise, the next time around, we think the ring buffer 291 * is empty since the read index == write index. 292 */ 293 if (bytes_avail_towrite <= totalbytes_towrite) { 294 if (lock) 295 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 296 return -EAGAIN; 297 } 298 299 /* Write to the ring buffer */ 300 next_write_location = hv_get_next_write_location(outring_info); 301 302 old_write = next_write_location; 303 304 for (i = 0; i < kv_count; i++) { 305 next_write_location = hv_copyto_ringbuffer(outring_info, 306 next_write_location, 307 kv_list[i].iov_base, 308 kv_list[i].iov_len); 309 } 310 311 /* Set previous packet start */ 312 prev_indices = hv_get_ring_bufferindices(outring_info); 313 314 next_write_location = hv_copyto_ringbuffer(outring_info, 315 next_write_location, 316 &prev_indices, 317 sizeof(u64)); 318 319 /* Issue a full memory barrier before updating the write index */ 320 virt_mb(); 321 322 /* Now, update the write location */ 323 hv_set_next_write_location(outring_info, next_write_location); 324 325 326 if (lock) 327 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 328 329 *signal = hv_need_to_signal(old_write, outring_info); 330 return 0; 331 } 332 333 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 334 void *buffer, u32 buflen, u32 *buffer_actual_len, 335 u64 *requestid, bool *signal, bool raw) 336 { 337 u32 bytes_avail_toread; 338 u32 next_read_location = 0; 339 u64 prev_indices = 0; 340 struct vmpacket_descriptor desc; 341 u32 offset; 342 u32 packetlen; 343 int ret = 0; 344 345 if (buflen <= 0) 346 return -EINVAL; 347 348 349 *buffer_actual_len = 0; 350 *requestid = 0; 351 352 bytes_avail_toread = hv_get_bytes_to_read(inring_info); 353 /* Make sure there is something to read */ 354 if (bytes_avail_toread < sizeof(desc)) { 355 /* 356 * No error is set when there is even no header, drivers are 357 * supposed to analyze buffer_actual_len. 358 */ 359 return ret; 360 } 361 362 next_read_location = hv_get_next_read_location(inring_info); 363 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 364 sizeof(desc), 365 next_read_location); 366 367 offset = raw ? 0 : (desc.offset8 << 3); 368 packetlen = (desc.len8 << 3) - offset; 369 *buffer_actual_len = packetlen; 370 *requestid = desc.trans_id; 371 372 if (bytes_avail_toread < packetlen + offset) 373 return -EAGAIN; 374 375 if (packetlen > buflen) 376 return -ENOBUFS; 377 378 next_read_location = 379 hv_get_next_readlocation_withoffset(inring_info, offset); 380 381 next_read_location = hv_copyfrom_ringbuffer(inring_info, 382 buffer, 383 packetlen, 384 next_read_location); 385 386 next_read_location = hv_copyfrom_ringbuffer(inring_info, 387 &prev_indices, 388 sizeof(u64), 389 next_read_location); 390 391 /* 392 * Make sure all reads are done before we update the read index since 393 * the writer may start writing to the read area once the read index 394 * is updated. 395 */ 396 virt_mb(); 397 398 /* Update the read index */ 399 hv_set_next_read_location(inring_info, next_read_location); 400 401 *signal = hv_need_to_signal_on_read(inring_info); 402 403 return ret; 404 } 405