xref: /openbmc/linux/drivers/hv/ring_buffer.c (revision 4ed91d48259d9ddd378424d008f2e6559f7e78f8)
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 
33 #include "hyperv_vmbus.h"
34 
35 /*
36  * When we write to the ring buffer, check if the host needs to
37  * be signaled. Here is the details of this protocol:
38  *
39  *	1. The host guarantees that while it is draining the
40  *	   ring buffer, it will set the interrupt_mask to
41  *	   indicate it does not need to be interrupted when
42  *	   new data is placed.
43  *
44  *	2. The host guarantees that it will completely drain
45  *	   the ring buffer before exiting the read loop. Further,
46  *	   once the ring buffer is empty, it will clear the
47  *	   interrupt_mask and re-check to see if new data has
48  *	   arrived.
49  *
50  * KYS: Oct. 30, 2016:
51  * It looks like Windows hosts have logic to deal with DOS attacks that
52  * can be triggered if it receives interrupts when it is not expecting
53  * the interrupt. The host expects interrupts only when the ring
54  * transitions from empty to non-empty (or full to non full on the guest
55  * to host ring).
56  * So, base the signaling decision solely on the ring state until the
57  * host logic is fixed.
58  */
59 
60 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
61 {
62 	struct hv_ring_buffer_info *rbi = &channel->outbound;
63 
64 	virt_mb();
65 	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
66 		return;
67 
68 	/* check interrupt_mask before read_index */
69 	virt_rmb();
70 	/*
71 	 * This is the only case we need to signal when the
72 	 * ring transitions from being empty to non-empty.
73 	 */
74 	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
75 		vmbus_setevent(channel);
76 
77 	return;
78 }
79 
80 /* Get the next write location for the specified ring buffer. */
81 static inline u32
82 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
83 {
84 	u32 next = ring_info->ring_buffer->write_index;
85 
86 	return next;
87 }
88 
89 /* Set the next write location for the specified ring buffer. */
90 static inline void
91 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
92 		     u32 next_write_location)
93 {
94 	ring_info->ring_buffer->write_index = next_write_location;
95 }
96 
97 /* Get the next read location for the specified ring buffer. */
98 static inline u32
99 hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
100 {
101 	return ring_info->ring_buffer->read_index;
102 }
103 
104 /*
105  * Get the next read location + offset for the specified ring buffer.
106  * This allows the caller to skip.
107  */
108 static inline u32
109 hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
110 				    u32 offset)
111 {
112 	u32 next = ring_info->ring_buffer->read_index;
113 
114 	next += offset;
115 	if (next >= ring_info->ring_datasize)
116 		next -= ring_info->ring_datasize;
117 
118 	return next;
119 }
120 
121 /* Set the next read location for the specified ring buffer. */
122 static inline void
123 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
124 		    u32 next_read_location)
125 {
126 	ring_info->ring_buffer->read_index = next_read_location;
127 	ring_info->priv_read_index = next_read_location;
128 }
129 
130 /* Get the size of the ring buffer. */
131 static inline u32
132 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
133 {
134 	return ring_info->ring_datasize;
135 }
136 
137 /* Get the read and write indices as u64 of the specified ring buffer. */
138 static inline u64
139 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
140 {
141 	return (u64)ring_info->ring_buffer->write_index << 32;
142 }
143 
144 /*
145  * Helper routine to copy to source from ring buffer.
146  * Assume there is enough room. Handles wrap-around in src case only!!
147  */
148 static u32 hv_copyfrom_ringbuffer(
149 	const struct hv_ring_buffer_info *ring_info,
150 	void				*dest,
151 	u32				destlen,
152 	u32				start_read_offset)
153 {
154 	void *ring_buffer = hv_get_ring_buffer(ring_info);
155 	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
156 
157 	memcpy(dest, ring_buffer + start_read_offset, destlen);
158 
159 	start_read_offset += destlen;
160 	if (start_read_offset >= ring_buffer_size)
161 		start_read_offset -= ring_buffer_size;
162 
163 	return start_read_offset;
164 }
165 
166 
167 /*
168  * Helper routine to copy from source to ring buffer.
169  * Assume there is enough room. Handles wrap-around in dest case only!!
170  */
171 static u32 hv_copyto_ringbuffer(
172 	struct hv_ring_buffer_info	*ring_info,
173 	u32				start_write_offset,
174 	const void			*src,
175 	u32				srclen)
176 {
177 	void *ring_buffer = hv_get_ring_buffer(ring_info);
178 	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
179 
180 	memcpy(ring_buffer + start_write_offset, src, srclen);
181 
182 	start_write_offset += srclen;
183 	if (start_write_offset >= ring_buffer_size)
184 		start_write_offset -= ring_buffer_size;
185 
186 	return start_write_offset;
187 }
188 
189 /* Get various debug metrics for the specified ring buffer. */
190 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
191 				 struct hv_ring_buffer_debug_info *debug_info)
192 {
193 	u32 bytes_avail_towrite;
194 	u32 bytes_avail_toread;
195 
196 	if (ring_info->ring_buffer) {
197 		hv_get_ringbuffer_availbytes(ring_info,
198 					&bytes_avail_toread,
199 					&bytes_avail_towrite);
200 
201 		debug_info->bytes_avail_toread = bytes_avail_toread;
202 		debug_info->bytes_avail_towrite = bytes_avail_towrite;
203 		debug_info->current_read_index =
204 			ring_info->ring_buffer->read_index;
205 		debug_info->current_write_index =
206 			ring_info->ring_buffer->write_index;
207 		debug_info->current_interrupt_mask =
208 			ring_info->ring_buffer->interrupt_mask;
209 	}
210 }
211 
212 /* Initialize the ring buffer. */
213 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
214 		       struct page *pages, u32 page_cnt)
215 {
216 	int i;
217 	struct page **pages_wraparound;
218 
219 	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
220 
221 	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
222 
223 	/*
224 	 * First page holds struct hv_ring_buffer, do wraparound mapping for
225 	 * the rest.
226 	 */
227 	pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
228 				   GFP_KERNEL);
229 	if (!pages_wraparound)
230 		return -ENOMEM;
231 
232 	pages_wraparound[0] = pages;
233 	for (i = 0; i < 2 * (page_cnt - 1); i++)
234 		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
235 
236 	ring_info->ring_buffer = (struct hv_ring_buffer *)
237 		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
238 
239 	kfree(pages_wraparound);
240 
241 
242 	if (!ring_info->ring_buffer)
243 		return -ENOMEM;
244 
245 	ring_info->ring_buffer->read_index =
246 		ring_info->ring_buffer->write_index = 0;
247 
248 	/* Set the feature bit for enabling flow control. */
249 	ring_info->ring_buffer->feature_bits.value = 1;
250 
251 	ring_info->ring_size = page_cnt << PAGE_SHIFT;
252 	ring_info->ring_datasize = ring_info->ring_size -
253 		sizeof(struct hv_ring_buffer);
254 
255 	spin_lock_init(&ring_info->ring_lock);
256 
257 	return 0;
258 }
259 
260 /* Cleanup the ring buffer. */
261 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
262 {
263 	vunmap(ring_info->ring_buffer);
264 }
265 
266 /* Write to the ring buffer. */
267 int hv_ringbuffer_write(struct vmbus_channel *channel,
268 			const struct kvec *kv_list, u32 kv_count)
269 {
270 	int i = 0;
271 	u32 bytes_avail_towrite;
272 	u32 totalbytes_towrite = 0;
273 
274 	u32 next_write_location;
275 	u32 old_write;
276 	u64 prev_indices = 0;
277 	unsigned long flags = 0;
278 	struct hv_ring_buffer_info *outring_info = &channel->outbound;
279 
280 	if (channel->rescind)
281 		return -ENODEV;
282 
283 	for (i = 0; i < kv_count; i++)
284 		totalbytes_towrite += kv_list[i].iov_len;
285 
286 	totalbytes_towrite += sizeof(u64);
287 
288 	spin_lock_irqsave(&outring_info->ring_lock, flags);
289 
290 	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
291 
292 	/*
293 	 * If there is only room for the packet, assume it is full.
294 	 * Otherwise, the next time around, we think the ring buffer
295 	 * is empty since the read index == write index.
296 	 */
297 	if (bytes_avail_towrite <= totalbytes_towrite) {
298 		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
299 		return -EAGAIN;
300 	}
301 
302 	/* Write to the ring buffer */
303 	next_write_location = hv_get_next_write_location(outring_info);
304 
305 	old_write = next_write_location;
306 
307 	for (i = 0; i < kv_count; i++) {
308 		next_write_location = hv_copyto_ringbuffer(outring_info,
309 						     next_write_location,
310 						     kv_list[i].iov_base,
311 						     kv_list[i].iov_len);
312 	}
313 
314 	/* Set previous packet start */
315 	prev_indices = hv_get_ring_bufferindices(outring_info);
316 
317 	next_write_location = hv_copyto_ringbuffer(outring_info,
318 					     next_write_location,
319 					     &prev_indices,
320 					     sizeof(u64));
321 
322 	/* Issue a full memory barrier before updating the write index */
323 	virt_mb();
324 
325 	/* Now, update the write location */
326 	hv_set_next_write_location(outring_info, next_write_location);
327 
328 
329 	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
330 
331 	hv_signal_on_write(old_write, channel);
332 
333 	if (channel->rescind)
334 		return -ENODEV;
335 
336 	return 0;
337 }
338 
339 int hv_ringbuffer_read(struct vmbus_channel *channel,
340 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
341 		       u64 *requestid, bool raw)
342 {
343 	u32 bytes_avail_toread;
344 	u32 next_read_location = 0;
345 	u64 prev_indices = 0;
346 	struct vmpacket_descriptor desc;
347 	u32 offset;
348 	u32 packetlen;
349 	int ret = 0;
350 	struct hv_ring_buffer_info *inring_info = &channel->inbound;
351 
352 	if (buflen <= 0)
353 		return -EINVAL;
354 
355 
356 	*buffer_actual_len = 0;
357 	*requestid = 0;
358 
359 	bytes_avail_toread = hv_get_bytes_to_read(inring_info);
360 	/* Make sure there is something to read */
361 	if (bytes_avail_toread < sizeof(desc)) {
362 		/*
363 		 * No error is set when there is even no header, drivers are
364 		 * supposed to analyze buffer_actual_len.
365 		 */
366 		return ret;
367 	}
368 
369 	init_cached_read_index(channel);
370 	next_read_location = hv_get_next_read_location(inring_info);
371 	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
372 						    sizeof(desc),
373 						    next_read_location);
374 
375 	offset = raw ? 0 : (desc.offset8 << 3);
376 	packetlen = (desc.len8 << 3) - offset;
377 	*buffer_actual_len = packetlen;
378 	*requestid = desc.trans_id;
379 
380 	if (bytes_avail_toread < packetlen + offset)
381 		return -EAGAIN;
382 
383 	if (packetlen > buflen)
384 		return -ENOBUFS;
385 
386 	next_read_location =
387 		hv_get_next_readlocation_withoffset(inring_info, offset);
388 
389 	next_read_location = hv_copyfrom_ringbuffer(inring_info,
390 						buffer,
391 						packetlen,
392 						next_read_location);
393 
394 	next_read_location = hv_copyfrom_ringbuffer(inring_info,
395 						&prev_indices,
396 						sizeof(u64),
397 						next_read_location);
398 
399 	/*
400 	 * Make sure all reads are done before we update the read index since
401 	 * the writer may start writing to the read area once the read index
402 	 * is updated.
403 	 */
404 	virt_mb();
405 
406 	/* Update the read index */
407 	hv_set_next_read_location(inring_info, next_read_location);
408 
409 	hv_signal_on_read(channel);
410 
411 	return ret;
412 }
413