xref: /openbmc/linux/drivers/hv/ring_buffer.c (revision ff148d8a)
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/prefetch.h>
33 
34 #include "hyperv_vmbus.h"
35 
36 #define VMBUS_PKT_TRAILER	8
37 
38 /*
39  * When we write to the ring buffer, check if the host needs to
40  * be signaled. Here is the details of this protocol:
41  *
42  *	1. The host guarantees that while it is draining the
43  *	   ring buffer, it will set the interrupt_mask to
44  *	   indicate it does not need to be interrupted when
45  *	   new data is placed.
46  *
47  *	2. The host guarantees that it will completely drain
48  *	   the ring buffer before exiting the read loop. Further,
49  *	   once the ring buffer is empty, it will clear the
50  *	   interrupt_mask and re-check to see if new data has
51  *	   arrived.
52  *
53  * KYS: Oct. 30, 2016:
54  * It looks like Windows hosts have logic to deal with DOS attacks that
55  * can be triggered if it receives interrupts when it is not expecting
56  * the interrupt. The host expects interrupts only when the ring
57  * transitions from empty to non-empty (or full to non full on the guest
58  * to host ring).
59  * So, base the signaling decision solely on the ring state until the
60  * host logic is fixed.
61  */
62 
63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64 {
65 	struct hv_ring_buffer_info *rbi = &channel->outbound;
66 
67 	virt_mb();
68 	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69 		return;
70 
71 	/* check interrupt_mask before read_index */
72 	virt_rmb();
73 	/*
74 	 * This is the only case we need to signal when the
75 	 * ring transitions from being empty to non-empty.
76 	 */
77 	if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
78 		++channel->intr_out_empty;
79 		vmbus_setevent(channel);
80 	}
81 }
82 
83 /* Get the next write location for the specified ring buffer. */
84 static inline u32
85 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
86 {
87 	u32 next = ring_info->ring_buffer->write_index;
88 
89 	return next;
90 }
91 
92 /* Set the next write location for the specified ring buffer. */
93 static inline void
94 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
95 		     u32 next_write_location)
96 {
97 	ring_info->ring_buffer->write_index = next_write_location;
98 }
99 
100 /* Set the next read location for the specified ring buffer. */
101 static inline void
102 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
103 		    u32 next_read_location)
104 {
105 	ring_info->ring_buffer->read_index = next_read_location;
106 	ring_info->priv_read_index = next_read_location;
107 }
108 
109 /* Get the size of the ring buffer. */
110 static inline u32
111 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
112 {
113 	return ring_info->ring_datasize;
114 }
115 
116 /* Get the read and write indices as u64 of the specified ring buffer. */
117 static inline u64
118 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
119 {
120 	return (u64)ring_info->ring_buffer->write_index << 32;
121 }
122 
123 /*
124  * Helper routine to copy from source to ring buffer.
125  * Assume there is enough room. Handles wrap-around in dest case only!!
126  */
127 static u32 hv_copyto_ringbuffer(
128 	struct hv_ring_buffer_info	*ring_info,
129 	u32				start_write_offset,
130 	const void			*src,
131 	u32				srclen)
132 {
133 	void *ring_buffer = hv_get_ring_buffer(ring_info);
134 	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
135 
136 	memcpy(ring_buffer + start_write_offset, src, srclen);
137 
138 	start_write_offset += srclen;
139 	if (start_write_offset >= ring_buffer_size)
140 		start_write_offset -= ring_buffer_size;
141 
142 	return start_write_offset;
143 }
144 
145 /*
146  *
147  * hv_get_ringbuffer_availbytes()
148  *
149  * Get number of bytes available to read and to write to
150  * for the specified ring buffer
151  */
152 static void
153 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
154 			     u32 *read, u32 *write)
155 {
156 	u32 read_loc, write_loc, dsize;
157 
158 	/* Capture the read/write indices before they changed */
159 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
160 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
161 	dsize = rbi->ring_datasize;
162 
163 	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
164 		read_loc - write_loc;
165 	*read = dsize - *write;
166 }
167 
168 /* Get various debug metrics for the specified ring buffer. */
169 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
170 				struct hv_ring_buffer_debug_info *debug_info)
171 {
172 	u32 bytes_avail_towrite;
173 	u32 bytes_avail_toread;
174 
175 	mutex_lock(&ring_info->ring_buffer_mutex);
176 
177 	if (!ring_info->ring_buffer) {
178 		mutex_unlock(&ring_info->ring_buffer_mutex);
179 		return -EINVAL;
180 	}
181 
182 	hv_get_ringbuffer_availbytes(ring_info,
183 				     &bytes_avail_toread,
184 				     &bytes_avail_towrite);
185 	debug_info->bytes_avail_toread = bytes_avail_toread;
186 	debug_info->bytes_avail_towrite = bytes_avail_towrite;
187 	debug_info->current_read_index = ring_info->ring_buffer->read_index;
188 	debug_info->current_write_index = ring_info->ring_buffer->write_index;
189 	debug_info->current_interrupt_mask
190 		= ring_info->ring_buffer->interrupt_mask;
191 	mutex_unlock(&ring_info->ring_buffer_mutex);
192 
193 	return 0;
194 }
195 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
196 
197 /* Initialize a channel's ring buffer info mutex locks */
198 void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
199 {
200 	mutex_init(&channel->inbound.ring_buffer_mutex);
201 	mutex_init(&channel->outbound.ring_buffer_mutex);
202 }
203 
204 /* Initialize the ring buffer. */
205 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
206 		       struct page *pages, u32 page_cnt)
207 {
208 	int i;
209 	struct page **pages_wraparound;
210 
211 	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
212 
213 	/*
214 	 * First page holds struct hv_ring_buffer, do wraparound mapping for
215 	 * the rest.
216 	 */
217 	pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
218 				   GFP_KERNEL);
219 	if (!pages_wraparound)
220 		return -ENOMEM;
221 
222 	pages_wraparound[0] = pages;
223 	for (i = 0; i < 2 * (page_cnt - 1); i++)
224 		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
225 
226 	ring_info->ring_buffer = (struct hv_ring_buffer *)
227 		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
228 
229 	kfree(pages_wraparound);
230 
231 
232 	if (!ring_info->ring_buffer)
233 		return -ENOMEM;
234 
235 	ring_info->ring_buffer->read_index =
236 		ring_info->ring_buffer->write_index = 0;
237 
238 	/* Set the feature bit for enabling flow control. */
239 	ring_info->ring_buffer->feature_bits.value = 1;
240 
241 	ring_info->ring_size = page_cnt << PAGE_SHIFT;
242 	ring_info->ring_size_div10_reciprocal =
243 		reciprocal_value(ring_info->ring_size / 10);
244 	ring_info->ring_datasize = ring_info->ring_size -
245 		sizeof(struct hv_ring_buffer);
246 	ring_info->priv_read_index = 0;
247 
248 	spin_lock_init(&ring_info->ring_lock);
249 
250 	return 0;
251 }
252 
253 /* Cleanup the ring buffer. */
254 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
255 {
256 	mutex_lock(&ring_info->ring_buffer_mutex);
257 	vunmap(ring_info->ring_buffer);
258 	ring_info->ring_buffer = NULL;
259 	mutex_unlock(&ring_info->ring_buffer_mutex);
260 }
261 
262 /* Write to the ring buffer. */
263 int hv_ringbuffer_write(struct vmbus_channel *channel,
264 			const struct kvec *kv_list, u32 kv_count)
265 {
266 	int i;
267 	u32 bytes_avail_towrite;
268 	u32 totalbytes_towrite = sizeof(u64);
269 	u32 next_write_location;
270 	u32 old_write;
271 	u64 prev_indices;
272 	unsigned long flags;
273 	struct hv_ring_buffer_info *outring_info = &channel->outbound;
274 
275 	if (channel->rescind)
276 		return -ENODEV;
277 
278 	for (i = 0; i < kv_count; i++)
279 		totalbytes_towrite += kv_list[i].iov_len;
280 
281 	spin_lock_irqsave(&outring_info->ring_lock, flags);
282 
283 	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
284 
285 	/*
286 	 * If there is only room for the packet, assume it is full.
287 	 * Otherwise, the next time around, we think the ring buffer
288 	 * is empty since the read index == write index.
289 	 */
290 	if (bytes_avail_towrite <= totalbytes_towrite) {
291 		++channel->out_full_total;
292 
293 		if (!channel->out_full_flag) {
294 			++channel->out_full_first;
295 			channel->out_full_flag = true;
296 		}
297 
298 		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
299 		return -EAGAIN;
300 	}
301 
302 	channel->out_full_flag = false;
303 
304 	/* Write to the ring buffer */
305 	next_write_location = hv_get_next_write_location(outring_info);
306 
307 	old_write = next_write_location;
308 
309 	for (i = 0; i < kv_count; i++) {
310 		next_write_location = hv_copyto_ringbuffer(outring_info,
311 						     next_write_location,
312 						     kv_list[i].iov_base,
313 						     kv_list[i].iov_len);
314 	}
315 
316 	/* Set previous packet start */
317 	prev_indices = hv_get_ring_bufferindices(outring_info);
318 
319 	next_write_location = hv_copyto_ringbuffer(outring_info,
320 					     next_write_location,
321 					     &prev_indices,
322 					     sizeof(u64));
323 
324 	/* Issue a full memory barrier before updating the write index */
325 	virt_mb();
326 
327 	/* Now, update the write location */
328 	hv_set_next_write_location(outring_info, next_write_location);
329 
330 
331 	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
332 
333 	hv_signal_on_write(old_write, channel);
334 
335 	if (channel->rescind)
336 		return -ENODEV;
337 
338 	return 0;
339 }
340 
341 int hv_ringbuffer_read(struct vmbus_channel *channel,
342 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
343 		       u64 *requestid, bool raw)
344 {
345 	struct vmpacket_descriptor *desc;
346 	u32 packetlen, offset;
347 
348 	if (unlikely(buflen == 0))
349 		return -EINVAL;
350 
351 	*buffer_actual_len = 0;
352 	*requestid = 0;
353 
354 	/* Make sure there is something to read */
355 	desc = hv_pkt_iter_first(channel);
356 	if (desc == NULL) {
357 		/*
358 		 * No error is set when there is even no header, drivers are
359 		 * supposed to analyze buffer_actual_len.
360 		 */
361 		return 0;
362 	}
363 
364 	offset = raw ? 0 : (desc->offset8 << 3);
365 	packetlen = (desc->len8 << 3) - offset;
366 	*buffer_actual_len = packetlen;
367 	*requestid = desc->trans_id;
368 
369 	if (unlikely(packetlen > buflen))
370 		return -ENOBUFS;
371 
372 	/* since ring is double mapped, only one copy is necessary */
373 	memcpy(buffer, (const char *)desc + offset, packetlen);
374 
375 	/* Advance ring index to next packet descriptor */
376 	__hv_pkt_iter_next(channel, desc);
377 
378 	/* Notify host of update */
379 	hv_pkt_iter_close(channel);
380 
381 	return 0;
382 }
383 
384 /*
385  * Determine number of bytes available in ring buffer after
386  * the current iterator (priv_read_index) location.
387  *
388  * This is similar to hv_get_bytes_to_read but with private
389  * read index instead.
390  */
391 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
392 {
393 	u32 priv_read_loc = rbi->priv_read_index;
394 	u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
395 
396 	if (write_loc >= priv_read_loc)
397 		return write_loc - priv_read_loc;
398 	else
399 		return (rbi->ring_datasize - priv_read_loc) + write_loc;
400 }
401 
402 /*
403  * Get first vmbus packet from ring buffer after read_index
404  *
405  * If ring buffer is empty, returns NULL and no other action needed.
406  */
407 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
408 {
409 	struct hv_ring_buffer_info *rbi = &channel->inbound;
410 	struct vmpacket_descriptor *desc;
411 
412 	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
413 		return NULL;
414 
415 	desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
416 	if (desc)
417 		prefetch((char *)desc + (desc->len8 << 3));
418 
419 	return desc;
420 }
421 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
422 
423 /*
424  * Get next vmbus packet from ring buffer.
425  *
426  * Advances the current location (priv_read_index) and checks for more
427  * data. If the end of the ring buffer is reached, then return NULL.
428  */
429 struct vmpacket_descriptor *
430 __hv_pkt_iter_next(struct vmbus_channel *channel,
431 		   const struct vmpacket_descriptor *desc)
432 {
433 	struct hv_ring_buffer_info *rbi = &channel->inbound;
434 	u32 packetlen = desc->len8 << 3;
435 	u32 dsize = rbi->ring_datasize;
436 
437 	/* bump offset to next potential packet */
438 	rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
439 	if (rbi->priv_read_index >= dsize)
440 		rbi->priv_read_index -= dsize;
441 
442 	/* more data? */
443 	return hv_pkt_iter_first(channel);
444 }
445 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
446 
447 /* How many bytes were read in this iterator cycle */
448 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
449 					u32 start_read_index)
450 {
451 	if (rbi->priv_read_index >= start_read_index)
452 		return rbi->priv_read_index - start_read_index;
453 	else
454 		return rbi->ring_datasize - start_read_index +
455 			rbi->priv_read_index;
456 }
457 
458 /*
459  * Update host ring buffer after iterating over packets. If the host has
460  * stopped queuing new entries because it found the ring buffer full, and
461  * sufficient space is being freed up, signal the host. But be careful to
462  * only signal the host when necessary, both for performance reasons and
463  * because Hyper-V protects itself by throttling guests that signal
464  * inappropriately.
465  *
466  * Determining when to signal is tricky. There are three key data inputs
467  * that must be handled in this order to avoid race conditions:
468  *
469  * 1. Update the read_index
470  * 2. Read the pending_send_sz
471  * 3. Read the current write_index
472  *
473  * The interrupt_mask is not used to determine when to signal. The
474  * interrupt_mask is used only on the guest->host ring buffer when
475  * sending requests to the host. The host does not use it on the host->
476  * guest ring buffer to indicate whether it should be signaled.
477  */
478 void hv_pkt_iter_close(struct vmbus_channel *channel)
479 {
480 	struct hv_ring_buffer_info *rbi = &channel->inbound;
481 	u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
482 
483 	/*
484 	 * Make sure all reads are done before we update the read index since
485 	 * the writer may start writing to the read area once the read index
486 	 * is updated.
487 	 */
488 	virt_rmb();
489 	start_read_index = rbi->ring_buffer->read_index;
490 	rbi->ring_buffer->read_index = rbi->priv_read_index;
491 
492 	/*
493 	 * Older versions of Hyper-V (before WS2102 and Win8) do not
494 	 * implement pending_send_sz and simply poll if the host->guest
495 	 * ring buffer is full.  No signaling is needed or expected.
496 	 */
497 	if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
498 		return;
499 
500 	/*
501 	 * Issue a full memory barrier before making the signaling decision.
502 	 * If reading pending_send_sz were to be reordered and happen
503 	 * before we commit the new read_index, a race could occur.  If the
504 	 * host were to set the pending_send_sz after we have sampled
505 	 * pending_send_sz, and the ring buffer blocks before we commit the
506 	 * read index, we could miss sending the interrupt. Issue a full
507 	 * memory barrier to address this.
508 	 */
509 	virt_mb();
510 
511 	/*
512 	 * If the pending_send_sz is zero, then the ring buffer is not
513 	 * blocked and there is no need to signal.  This is far by the
514 	 * most common case, so exit quickly for best performance.
515 	 */
516 	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
517 	if (!pending_sz)
518 		return;
519 
520 	/*
521 	 * Ensure the read of write_index in hv_get_bytes_to_write()
522 	 * happens after the read of pending_send_sz.
523 	 */
524 	virt_rmb();
525 	curr_write_sz = hv_get_bytes_to_write(rbi);
526 	bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
527 
528 	/*
529 	 * We want to signal the host only if we're transitioning
530 	 * from a "not enough free space" state to a "enough free
531 	 * space" state.  For example, it's possible that this function
532 	 * could run and free up enough space to signal the host, and then
533 	 * run again and free up additional space before the host has a
534 	 * chance to clear the pending_send_sz.  The 2nd invocation would
535 	 * be a null transition from "enough free space" to "enough free
536 	 * space", which doesn't warrant a signal.
537 	 *
538 	 * Exactly filling the ring buffer is treated as "not enough
539 	 * space". The ring buffer always must have at least one byte
540 	 * empty so the empty and full conditions are distinguishable.
541 	 * hv_get_bytes_to_write() doesn't fully tell the truth in
542 	 * this regard.
543 	 *
544 	 * So first check if we were in the "enough free space" state
545 	 * before we began the iteration. If so, the host was not
546 	 * blocked, and there's no need to signal.
547 	 */
548 	if (curr_write_sz - bytes_read > pending_sz)
549 		return;
550 
551 	/*
552 	 * Similarly, if the new state is "not enough space", then
553 	 * there's no need to signal.
554 	 */
555 	if (curr_write_sz <= pending_sz)
556 		return;
557 
558 	++channel->intr_in_full;
559 	vmbus_setevent(channel);
560 }
561 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
562