xref: /openbmc/linux/include/linux/virtio_config.h (revision 6726d552)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11 
12 struct irq_affinity;
13 
14 struct virtio_shm_region {
15 	u64 addr;
16 	u64 len;
17 };
18 
19 /**
20  * virtio_config_ops - operations for configuring a virtio device
21  * Note: Do not assume that a transport implements all of the operations
22  *       getting/setting a value as a simple read/write! Generally speaking,
23  *       any of @get/@set, @get_status/@set_status, or @get_features/
24  *       @finalize_features are NOT safe to be called from an atomic
25  *       context.
26  * @get: read the value of a configuration field
27  *	vdev: the virtio_device
28  *	offset: the offset of the configuration field
29  *	buf: the buffer to write the field value into.
30  *	len: the length of the buffer
31  * @set: write the value of a configuration field
32  *	vdev: the virtio_device
33  *	offset: the offset of the configuration field
34  *	buf: the buffer to read the field value from.
35  *	len: the length of the buffer
36  * @generation: config generation counter (optional)
37  *	vdev: the virtio_device
38  *	Returns the config generation counter
39  * @get_status: read the status byte
40  *	vdev: the virtio_device
41  *	Returns the status byte
42  * @set_status: write the status byte
43  *	vdev: the virtio_device
44  *	status: the new status byte
45  * @reset: reset the device
46  *	vdev: the virtio device
47  *	After this, status and feature negotiation must be done again
48  *	Device must not be reset from its vq/config callbacks, or in
49  *	parallel with being added/removed.
50  * @find_vqs: find virtqueues and instantiate them.
51  *	vdev: the virtio_device
52  *	nvqs: the number of virtqueues to find
53  *	vqs: on success, includes new virtqueues
54  *	callbacks: array of callbacks, for each virtqueue
55  *		include a NULL entry for vqs that do not need a callback
56  *	names: array of virtqueue names (mainly for debugging)
57  *		include a NULL entry for vqs unused by driver
58  *	sizes: array of virtqueue sizes
59  *	Returns 0 on success or error status
60  * @del_vqs: free virtqueues found by find_vqs().
61  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
62  *      The function guarantees that all memory operations on the
63  *      queue before it are visible to the vring_interrupt() that is
64  *      called after it.
65  *      vdev: the virtio_device
66  * @get_features: get the array of feature bits for this device.
67  *	vdev: the virtio_device
68  *	Returns the first 64 feature bits (all we currently need).
69  * @finalize_features: confirm what device features we'll be using.
70  *	vdev: the virtio_device
71  *	This sends the driver feature bits to the device: it can change
72  *	the dev->feature bits if it wants.
73  * Note: despite the name this can be called any number of times.
74  *	Returns 0 on success or error status
75  * @bus_name: return the bus name associated with the device (optional)
76  *	vdev: the virtio_device
77  *      This returns a pointer to the bus name a la pci_name from which
78  *      the caller can then copy.
79  * @set_vq_affinity: set the affinity for a virtqueue (optional).
80  * @get_vq_affinity: get the affinity for a virtqueue (optional).
81  * @get_shm_region: get a shared memory region based on the index.
82  * @disable_vq_and_reset: reset a queue individually (optional).
83  *	vq: the virtqueue
84  *	Returns 0 on success or error status
85  *	disable_vq_and_reset will guarantee that the callbacks are disabled and
86  *	synchronized.
87  *	Except for the callback, the caller should guarantee that the vring is
88  *	not accessed by any functions of virtqueue.
89  * @enable_vq_after_reset: enable a reset queue
90  *	vq: the virtqueue
91  *	Returns 0 on success or error status
92  *	If disable_vq_and_reset is set, then enable_vq_after_reset must also be
93  *	set.
94  */
95 typedef void vq_callback_t(struct virtqueue *);
96 struct virtio_config_ops {
97 	void (*get)(struct virtio_device *vdev, unsigned offset,
98 		    void *buf, unsigned len);
99 	void (*set)(struct virtio_device *vdev, unsigned offset,
100 		    const void *buf, unsigned len);
101 	u32 (*generation)(struct virtio_device *vdev);
102 	u8 (*get_status)(struct virtio_device *vdev);
103 	void (*set_status)(struct virtio_device *vdev, u8 status);
104 	void (*reset)(struct virtio_device *vdev);
105 	int (*find_vqs)(struct virtio_device *, unsigned nvqs,
106 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
107 			const char * const names[],
108 			u32 sizes[],
109 			const bool *ctx,
110 			struct irq_affinity *desc);
111 	void (*del_vqs)(struct virtio_device *);
112 	void (*synchronize_cbs)(struct virtio_device *);
113 	u64 (*get_features)(struct virtio_device *vdev);
114 	int (*finalize_features)(struct virtio_device *vdev);
115 	const char *(*bus_name)(struct virtio_device *vdev);
116 	int (*set_vq_affinity)(struct virtqueue *vq,
117 			       const struct cpumask *cpu_mask);
118 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
119 			int index);
120 	bool (*get_shm_region)(struct virtio_device *vdev,
121 			       struct virtio_shm_region *region, u8 id);
122 	int (*disable_vq_and_reset)(struct virtqueue *vq);
123 	int (*enable_vq_after_reset)(struct virtqueue *vq);
124 };
125 
126 /* If driver didn't advertise the feature, it will never appear. */
127 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
128 					 unsigned int fbit);
129 
130 /**
131  * __virtio_test_bit - helper to test feature bits. For use by transports.
132  *                     Devices should normally use virtio_has_feature,
133  *                     which includes more checks.
134  * @vdev: the device
135  * @fbit: the feature bit
136  */
137 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
138 				     unsigned int fbit)
139 {
140 	/* Did you forget to fix assumptions on max features? */
141 	if (__builtin_constant_p(fbit))
142 		BUILD_BUG_ON(fbit >= 64);
143 	else
144 		BUG_ON(fbit >= 64);
145 
146 	return vdev->features & BIT_ULL(fbit);
147 }
148 
149 /**
150  * __virtio_set_bit - helper to set feature bits. For use by transports.
151  * @vdev: the device
152  * @fbit: the feature bit
153  */
154 static inline void __virtio_set_bit(struct virtio_device *vdev,
155 				    unsigned int fbit)
156 {
157 	/* Did you forget to fix assumptions on max features? */
158 	if (__builtin_constant_p(fbit))
159 		BUILD_BUG_ON(fbit >= 64);
160 	else
161 		BUG_ON(fbit >= 64);
162 
163 	vdev->features |= BIT_ULL(fbit);
164 }
165 
166 /**
167  * __virtio_clear_bit - helper to clear feature bits. For use by transports.
168  * @vdev: the device
169  * @fbit: the feature bit
170  */
171 static inline void __virtio_clear_bit(struct virtio_device *vdev,
172 				      unsigned int fbit)
173 {
174 	/* Did you forget to fix assumptions on max features? */
175 	if (__builtin_constant_p(fbit))
176 		BUILD_BUG_ON(fbit >= 64);
177 	else
178 		BUG_ON(fbit >= 64);
179 
180 	vdev->features &= ~BIT_ULL(fbit);
181 }
182 
183 /**
184  * virtio_has_feature - helper to determine if this device has this feature.
185  * @vdev: the device
186  * @fbit: the feature bit
187  */
188 static inline bool virtio_has_feature(const struct virtio_device *vdev,
189 				      unsigned int fbit)
190 {
191 	if (fbit < VIRTIO_TRANSPORT_F_START)
192 		virtio_check_driver_offered_feature(vdev, fbit);
193 
194 	return __virtio_test_bit(vdev, fbit);
195 }
196 
197 /**
198  * virtio_has_dma_quirk - determine whether this device has the DMA quirk
199  * @vdev: the device
200  */
201 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
202 {
203 	/*
204 	 * Note the reverse polarity of the quirk feature (compared to most
205 	 * other features), this is for compatibility with legacy systems.
206 	 */
207 	return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
208 }
209 
210 static inline
211 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
212 					vq_callback_t *c, const char *n)
213 {
214 	vq_callback_t *callbacks[] = { c };
215 	const char *names[] = { n };
216 	struct virtqueue *vq;
217 	int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
218 					 NULL, NULL);
219 	if (err < 0)
220 		return ERR_PTR(err);
221 	return vq;
222 }
223 
224 static inline
225 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
226 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
227 			const char * const names[],
228 			struct irq_affinity *desc)
229 {
230 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
231 				      NULL, desc);
232 }
233 
234 static inline
235 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
236 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
237 			const char * const names[], const bool *ctx,
238 			struct irq_affinity *desc)
239 {
240 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
241 				      ctx, desc);
242 }
243 
244 static inline
245 int virtio_find_vqs_ctx_size(struct virtio_device *vdev, u32 nvqs,
246 			     struct virtqueue *vqs[],
247 			     vq_callback_t *callbacks[],
248 			     const char * const names[],
249 			     u32 sizes[],
250 			     const bool *ctx, struct irq_affinity *desc)
251 {
252 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, sizes,
253 				      ctx, desc);
254 }
255 
256 /**
257  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
258  * @vdev: the device
259  */
260 static inline
261 void virtio_synchronize_cbs(struct virtio_device *dev)
262 {
263 	if (dev->config->synchronize_cbs) {
264 		dev->config->synchronize_cbs(dev);
265 	} else {
266 		/*
267 		 * A best effort fallback to synchronize with
268 		 * interrupts, preemption and softirq disabled
269 		 * regions. See comment above synchronize_rcu().
270 		 */
271 		synchronize_rcu();
272 	}
273 }
274 
275 /**
276  * virtio_device_ready - enable vq use in probe function
277  * @vdev: the device
278  *
279  * Driver must call this to use vqs in the probe function.
280  *
281  * Note: vqs are enabled automatically after probe returns.
282  */
283 static inline
284 void virtio_device_ready(struct virtio_device *dev)
285 {
286 	unsigned status = dev->config->get_status(dev);
287 
288 	WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
289 
290 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
291 	/*
292 	 * The virtio_synchronize_cbs() makes sure vring_interrupt()
293 	 * will see the driver specific setup if it sees vq->broken
294 	 * as false (even if the notifications come before DRIVER_OK).
295 	 */
296 	virtio_synchronize_cbs(dev);
297 	__virtio_unbreak_device(dev);
298 #endif
299 	/*
300 	 * The transport should ensure the visibility of vq->broken
301 	 * before setting DRIVER_OK. See the comments for the transport
302 	 * specific set_status() method.
303 	 *
304 	 * A well behaved device will only notify a virtqueue after
305 	 * DRIVER_OK, this means the device should "see" the coherenct
306 	 * memory write that set vq->broken as false which is done by
307 	 * the driver when it sees DRIVER_OK, then the following
308 	 * driver's vring_interrupt() will see vq->broken as false so
309 	 * we won't lose any notification.
310 	 */
311 	dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
312 }
313 
314 static inline
315 const char *virtio_bus_name(struct virtio_device *vdev)
316 {
317 	if (!vdev->config->bus_name)
318 		return "virtio";
319 	return vdev->config->bus_name(vdev);
320 }
321 
322 /**
323  * virtqueue_set_affinity - setting affinity for a virtqueue
324  * @vq: the virtqueue
325  * @cpu: the cpu no.
326  *
327  * Pay attention the function are best-effort: the affinity hint may not be set
328  * due to config support, irq type and sharing.
329  *
330  */
331 static inline
332 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
333 {
334 	struct virtio_device *vdev = vq->vdev;
335 	if (vdev->config->set_vq_affinity)
336 		return vdev->config->set_vq_affinity(vq, cpu_mask);
337 	return 0;
338 }
339 
340 static inline
341 bool virtio_get_shm_region(struct virtio_device *vdev,
342 			   struct virtio_shm_region *region, u8 id)
343 {
344 	if (!vdev->config->get_shm_region)
345 		return false;
346 	return vdev->config->get_shm_region(vdev, region, id);
347 }
348 
349 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
350 {
351 	return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
352 		virtio_legacy_is_little_endian();
353 }
354 
355 /* Memory accessors */
356 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
357 {
358 	return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
359 }
360 
361 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
362 {
363 	return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
364 }
365 
366 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
367 {
368 	return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
369 }
370 
371 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
372 {
373 	return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
374 }
375 
376 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
377 {
378 	return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
379 }
380 
381 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
382 {
383 	return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
384 }
385 
386 #define virtio_to_cpu(vdev, x) \
387 	_Generic((x), \
388 		__u8: (x), \
389 		__virtio16: virtio16_to_cpu((vdev), (x)), \
390 		__virtio32: virtio32_to_cpu((vdev), (x)), \
391 		__virtio64: virtio64_to_cpu((vdev), (x)) \
392 		)
393 
394 #define cpu_to_virtio(vdev, x, m) \
395 	_Generic((m), \
396 		__u8: (x), \
397 		__virtio16: cpu_to_virtio16((vdev), (x)), \
398 		__virtio32: cpu_to_virtio32((vdev), (x)), \
399 		__virtio64: cpu_to_virtio64((vdev), (x)) \
400 		)
401 
402 #define __virtio_native_type(structname, member) \
403 	typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
404 
405 /* Config space accessors. */
406 #define virtio_cread(vdev, structname, member, ptr)			\
407 	do {								\
408 		typeof(((structname*)0)->member) virtio_cread_v;	\
409 									\
410 		might_sleep();						\
411 		/* Sanity check: must match the member's type */	\
412 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
413 									\
414 		switch (sizeof(virtio_cread_v)) {			\
415 		case 1:							\
416 		case 2:							\
417 		case 4:							\
418 			vdev->config->get((vdev), 			\
419 					  offsetof(structname, member), \
420 					  &virtio_cread_v,		\
421 					  sizeof(virtio_cread_v));	\
422 			break;						\
423 		default:						\
424 			__virtio_cread_many((vdev), 			\
425 					  offsetof(structname, member), \
426 					  &virtio_cread_v,		\
427 					  1,				\
428 					  sizeof(virtio_cread_v));	\
429 			break;						\
430 		}							\
431 		*(ptr) = virtio_to_cpu(vdev, virtio_cread_v);		\
432 	} while(0)
433 
434 /* Config space accessors. */
435 #define virtio_cwrite(vdev, structname, member, ptr)			\
436 	do {								\
437 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
438 			cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
439 									\
440 		might_sleep();						\
441 		/* Sanity check: must match the member's type */	\
442 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
443 									\
444 		vdev->config->set((vdev), offsetof(structname, member),	\
445 				  &virtio_cwrite_v,			\
446 				  sizeof(virtio_cwrite_v));		\
447 	} while(0)
448 
449 /*
450  * Nothing virtio-specific about these, but let's worry about generalizing
451  * these later.
452  */
453 #define virtio_le_to_cpu(x) \
454 	_Generic((x), \
455 		__u8: (u8)(x), \
456 		 __le16: (u16)le16_to_cpu(x), \
457 		 __le32: (u32)le32_to_cpu(x), \
458 		 __le64: (u64)le64_to_cpu(x) \
459 		)
460 
461 #define virtio_cpu_to_le(x, m) \
462 	_Generic((m), \
463 		 __u8: (x), \
464 		 __le16: cpu_to_le16(x), \
465 		 __le32: cpu_to_le32(x), \
466 		 __le64: cpu_to_le64(x) \
467 		)
468 
469 /* LE (e.g. modern) Config space accessors. */
470 #define virtio_cread_le(vdev, structname, member, ptr)			\
471 	do {								\
472 		typeof(((structname*)0)->member) virtio_cread_v;	\
473 									\
474 		might_sleep();						\
475 		/* Sanity check: must match the member's type */	\
476 		typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
477 									\
478 		switch (sizeof(virtio_cread_v)) {			\
479 		case 1:							\
480 		case 2:							\
481 		case 4:							\
482 			vdev->config->get((vdev), 			\
483 					  offsetof(structname, member), \
484 					  &virtio_cread_v,		\
485 					  sizeof(virtio_cread_v));	\
486 			break;						\
487 		default:						\
488 			__virtio_cread_many((vdev), 			\
489 					  offsetof(structname, member), \
490 					  &virtio_cread_v,		\
491 					  1,				\
492 					  sizeof(virtio_cread_v));	\
493 			break;						\
494 		}							\
495 		*(ptr) = virtio_le_to_cpu(virtio_cread_v);		\
496 	} while(0)
497 
498 #define virtio_cwrite_le(vdev, structname, member, ptr)			\
499 	do {								\
500 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
501 			virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
502 									\
503 		might_sleep();						\
504 		/* Sanity check: must match the member's type */	\
505 		typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
506 									\
507 		vdev->config->set((vdev), offsetof(structname, member),	\
508 				  &virtio_cwrite_v,			\
509 				  sizeof(virtio_cwrite_v));		\
510 	} while(0)
511 
512 
513 /* Read @count fields, @bytes each. */
514 static inline void __virtio_cread_many(struct virtio_device *vdev,
515 				       unsigned int offset,
516 				       void *buf, size_t count, size_t bytes)
517 {
518 	u32 old, gen = vdev->config->generation ?
519 		vdev->config->generation(vdev) : 0;
520 	int i;
521 
522 	might_sleep();
523 	do {
524 		old = gen;
525 
526 		for (i = 0; i < count; i++)
527 			vdev->config->get(vdev, offset + bytes * i,
528 					  buf + i * bytes, bytes);
529 
530 		gen = vdev->config->generation ?
531 			vdev->config->generation(vdev) : 0;
532 	} while (gen != old);
533 }
534 
535 static inline void virtio_cread_bytes(struct virtio_device *vdev,
536 				      unsigned int offset,
537 				      void *buf, size_t len)
538 {
539 	__virtio_cread_many(vdev, offset, buf, len, 1);
540 }
541 
542 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
543 {
544 	u8 ret;
545 
546 	might_sleep();
547 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
548 	return ret;
549 }
550 
551 static inline void virtio_cwrite8(struct virtio_device *vdev,
552 				  unsigned int offset, u8 val)
553 {
554 	might_sleep();
555 	vdev->config->set(vdev, offset, &val, sizeof(val));
556 }
557 
558 static inline u16 virtio_cread16(struct virtio_device *vdev,
559 				 unsigned int offset)
560 {
561 	__virtio16 ret;
562 
563 	might_sleep();
564 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
565 	return virtio16_to_cpu(vdev, ret);
566 }
567 
568 static inline void virtio_cwrite16(struct virtio_device *vdev,
569 				   unsigned int offset, u16 val)
570 {
571 	__virtio16 v;
572 
573 	might_sleep();
574 	v = cpu_to_virtio16(vdev, val);
575 	vdev->config->set(vdev, offset, &v, sizeof(v));
576 }
577 
578 static inline u32 virtio_cread32(struct virtio_device *vdev,
579 				 unsigned int offset)
580 {
581 	__virtio32 ret;
582 
583 	might_sleep();
584 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
585 	return virtio32_to_cpu(vdev, ret);
586 }
587 
588 static inline void virtio_cwrite32(struct virtio_device *vdev,
589 				   unsigned int offset, u32 val)
590 {
591 	__virtio32 v;
592 
593 	might_sleep();
594 	v = cpu_to_virtio32(vdev, val);
595 	vdev->config->set(vdev, offset, &v, sizeof(v));
596 }
597 
598 static inline u64 virtio_cread64(struct virtio_device *vdev,
599 				 unsigned int offset)
600 {
601 	__virtio64 ret;
602 
603 	__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
604 	return virtio64_to_cpu(vdev, ret);
605 }
606 
607 static inline void virtio_cwrite64(struct virtio_device *vdev,
608 				   unsigned int offset, u64 val)
609 {
610 	__virtio64 v;
611 
612 	might_sleep();
613 	v = cpu_to_virtio64(vdev, val);
614 	vdev->config->set(vdev, offset, &v, sizeof(v));
615 }
616 
617 /* Conditional config space accessors. */
618 #define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
619 	({								\
620 		int _r = 0;						\
621 		if (!virtio_has_feature(vdev, fbit))			\
622 			_r = -ENOENT;					\
623 		else							\
624 			virtio_cread((vdev), structname, member, ptr);	\
625 		_r;							\
626 	})
627 
628 /* Conditional config space accessors. */
629 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)	\
630 	({								\
631 		int _r = 0;						\
632 		if (!virtio_has_feature(vdev, fbit))			\
633 			_r = -ENOENT;					\
634 		else							\
635 			virtio_cread_le((vdev), structname, member, ptr); \
636 		_r;							\
637 	})
638 
639 #endif /* _LINUX_VIRTIO_CONFIG_H */
640