xref: /openbmc/linux/include/linux/virtio_config.h (revision 465191d6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11 
12 struct irq_affinity;
13 
14 struct virtio_shm_region {
15 	u64 addr;
16 	u64 len;
17 };
18 
19 /**
20  * virtio_config_ops - operations for configuring a virtio device
21  * Note: Do not assume that a transport implements all of the operations
22  *       getting/setting a value as a simple read/write! Generally speaking,
23  *       any of @get/@set, @get_status/@set_status, or @get_features/
24  *       @finalize_features are NOT safe to be called from an atomic
25  *       context.
26  * @get: read the value of a configuration field
27  *	vdev: the virtio_device
28  *	offset: the offset of the configuration field
29  *	buf: the buffer to write the field value into.
30  *	len: the length of the buffer
31  * @set: write the value of a configuration field
32  *	vdev: the virtio_device
33  *	offset: the offset of the configuration field
34  *	buf: the buffer to read the field value from.
35  *	len: the length of the buffer
36  * @generation: config generation counter (optional)
37  *	vdev: the virtio_device
38  *	Returns the config generation counter
39  * @get_status: read the status byte
40  *	vdev: the virtio_device
41  *	Returns the status byte
42  * @set_status: write the status byte
43  *	vdev: the virtio_device
44  *	status: the new status byte
45  * @reset: reset the device
46  *	vdev: the virtio device
47  *	After this, status and feature negotiation must be done again
48  *	Device must not be reset from its vq/config callbacks, or in
49  *	parallel with being added/removed.
50  * @find_vqs: find virtqueues and instantiate them.
51  *	vdev: the virtio_device
52  *	nvqs: the number of virtqueues to find
53  *	vqs: on success, includes new virtqueues
54  *	callbacks: array of callbacks, for each virtqueue
55  *		include a NULL entry for vqs that do not need a callback
56  *	names: array of virtqueue names (mainly for debugging)
57  *		include a NULL entry for vqs unused by driver
58  *	Returns 0 on success or error status
59  * @del_vqs: free virtqueues found by find_vqs().
60  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
61  *      The function guarantees that all memory operations on the
62  *      queue before it are visible to the vring_interrupt() that is
63  *      called after it.
64  *      vdev: the virtio_device
65  * @get_features: get the array of feature bits for this device.
66  *	vdev: the virtio_device
67  *	Returns the first 64 feature bits (all we currently need).
68  * @finalize_features: confirm what device features we'll be using.
69  *	vdev: the virtio_device
70  *	This sends the driver feature bits to the device: it can change
71  *	the dev->feature bits if it wants.
72  * Note: despite the name this can be called any number of times.
73  *	Returns 0 on success or error status
74  * @bus_name: return the bus name associated with the device (optional)
75  *	vdev: the virtio_device
76  *      This returns a pointer to the bus name a la pci_name from which
77  *      the caller can then copy.
78  * @set_vq_affinity: set the affinity for a virtqueue (optional).
79  * @get_vq_affinity: get the affinity for a virtqueue (optional).
80  * @get_shm_region: get a shared memory region based on the index.
81  */
82 typedef void vq_callback_t(struct virtqueue *);
83 struct virtio_config_ops {
84 	void (*get)(struct virtio_device *vdev, unsigned offset,
85 		    void *buf, unsigned len);
86 	void (*set)(struct virtio_device *vdev, unsigned offset,
87 		    const void *buf, unsigned len);
88 	u32 (*generation)(struct virtio_device *vdev);
89 	u8 (*get_status)(struct virtio_device *vdev);
90 	void (*set_status)(struct virtio_device *vdev, u8 status);
91 	void (*reset)(struct virtio_device *vdev);
92 	int (*find_vqs)(struct virtio_device *, unsigned nvqs,
93 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
94 			const char * const names[], const bool *ctx,
95 			struct irq_affinity *desc);
96 	void (*del_vqs)(struct virtio_device *);
97 	void (*synchronize_cbs)(struct virtio_device *);
98 	u64 (*get_features)(struct virtio_device *vdev);
99 	int (*finalize_features)(struct virtio_device *vdev);
100 	const char *(*bus_name)(struct virtio_device *vdev);
101 	int (*set_vq_affinity)(struct virtqueue *vq,
102 			       const struct cpumask *cpu_mask);
103 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
104 			int index);
105 	bool (*get_shm_region)(struct virtio_device *vdev,
106 			       struct virtio_shm_region *region, u8 id);
107 };
108 
109 /* If driver didn't advertise the feature, it will never appear. */
110 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
111 					 unsigned int fbit);
112 
113 /**
114  * __virtio_test_bit - helper to test feature bits. For use by transports.
115  *                     Devices should normally use virtio_has_feature,
116  *                     which includes more checks.
117  * @vdev: the device
118  * @fbit: the feature bit
119  */
120 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
121 				     unsigned int fbit)
122 {
123 	/* Did you forget to fix assumptions on max features? */
124 	if (__builtin_constant_p(fbit))
125 		BUILD_BUG_ON(fbit >= 64);
126 	else
127 		BUG_ON(fbit >= 64);
128 
129 	return vdev->features & BIT_ULL(fbit);
130 }
131 
132 /**
133  * __virtio_set_bit - helper to set feature bits. For use by transports.
134  * @vdev: the device
135  * @fbit: the feature bit
136  */
137 static inline void __virtio_set_bit(struct virtio_device *vdev,
138 				    unsigned int fbit)
139 {
140 	/* Did you forget to fix assumptions on max features? */
141 	if (__builtin_constant_p(fbit))
142 		BUILD_BUG_ON(fbit >= 64);
143 	else
144 		BUG_ON(fbit >= 64);
145 
146 	vdev->features |= BIT_ULL(fbit);
147 }
148 
149 /**
150  * __virtio_clear_bit - helper to clear feature bits. For use by transports.
151  * @vdev: the device
152  * @fbit: the feature bit
153  */
154 static inline void __virtio_clear_bit(struct virtio_device *vdev,
155 				      unsigned int fbit)
156 {
157 	/* Did you forget to fix assumptions on max features? */
158 	if (__builtin_constant_p(fbit))
159 		BUILD_BUG_ON(fbit >= 64);
160 	else
161 		BUG_ON(fbit >= 64);
162 
163 	vdev->features &= ~BIT_ULL(fbit);
164 }
165 
166 /**
167  * virtio_has_feature - helper to determine if this device has this feature.
168  * @vdev: the device
169  * @fbit: the feature bit
170  */
171 static inline bool virtio_has_feature(const struct virtio_device *vdev,
172 				      unsigned int fbit)
173 {
174 	if (fbit < VIRTIO_TRANSPORT_F_START)
175 		virtio_check_driver_offered_feature(vdev, fbit);
176 
177 	return __virtio_test_bit(vdev, fbit);
178 }
179 
180 /**
181  * virtio_has_dma_quirk - determine whether this device has the DMA quirk
182  * @vdev: the device
183  */
184 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
185 {
186 	/*
187 	 * Note the reverse polarity of the quirk feature (compared to most
188 	 * other features), this is for compatibility with legacy systems.
189 	 */
190 	return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
191 }
192 
193 static inline
194 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
195 					vq_callback_t *c, const char *n)
196 {
197 	vq_callback_t *callbacks[] = { c };
198 	const char *names[] = { n };
199 	struct virtqueue *vq;
200 	int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
201 					 NULL);
202 	if (err < 0)
203 		return ERR_PTR(err);
204 	return vq;
205 }
206 
207 static inline
208 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
209 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
210 			const char * const names[],
211 			struct irq_affinity *desc)
212 {
213 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
214 }
215 
216 static inline
217 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
218 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
219 			const char * const names[], const bool *ctx,
220 			struct irq_affinity *desc)
221 {
222 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
223 				      desc);
224 }
225 
226 /**
227  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
228  * @vdev: the device
229  */
230 static inline
231 void virtio_synchronize_cbs(struct virtio_device *dev)
232 {
233 	if (dev->config->synchronize_cbs) {
234 		dev->config->synchronize_cbs(dev);
235 	} else {
236 		/*
237 		 * A best effort fallback to synchronize with
238 		 * interrupts, preemption and softirq disabled
239 		 * regions. See comment above synchronize_rcu().
240 		 */
241 		synchronize_rcu();
242 	}
243 }
244 
245 /**
246  * virtio_device_ready - enable vq use in probe function
247  * @vdev: the device
248  *
249  * Driver must call this to use vqs in the probe function.
250  *
251  * Note: vqs are enabled automatically after probe returns.
252  */
253 static inline
254 void virtio_device_ready(struct virtio_device *dev)
255 {
256 	unsigned status = dev->config->get_status(dev);
257 
258 	WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
259 
260 	/*
261 	 * The virtio_synchronize_cbs() makes sure vring_interrupt()
262 	 * will see the driver specific setup if it sees vq->broken
263 	 * as false (even if the notifications come before DRIVER_OK).
264 	 */
265 	virtio_synchronize_cbs(dev);
266 	__virtio_unbreak_device(dev);
267 	/*
268 	 * The transport should ensure the visibility of vq->broken
269 	 * before setting DRIVER_OK. See the comments for the transport
270 	 * specific set_status() method.
271 	 *
272 	 * A well behaved device will only notify a virtqueue after
273 	 * DRIVER_OK, this means the device should "see" the coherenct
274 	 * memory write that set vq->broken as false which is done by
275 	 * the driver when it sees DRIVER_OK, then the following
276 	 * driver's vring_interrupt() will see vq->broken as false so
277 	 * we won't lose any notification.
278 	 */
279 	dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
280 }
281 
282 static inline
283 const char *virtio_bus_name(struct virtio_device *vdev)
284 {
285 	if (!vdev->config->bus_name)
286 		return "virtio";
287 	return vdev->config->bus_name(vdev);
288 }
289 
290 /**
291  * virtqueue_set_affinity - setting affinity for a virtqueue
292  * @vq: the virtqueue
293  * @cpu: the cpu no.
294  *
295  * Pay attention the function are best-effort: the affinity hint may not be set
296  * due to config support, irq type and sharing.
297  *
298  */
299 static inline
300 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
301 {
302 	struct virtio_device *vdev = vq->vdev;
303 	if (vdev->config->set_vq_affinity)
304 		return vdev->config->set_vq_affinity(vq, cpu_mask);
305 	return 0;
306 }
307 
308 static inline
309 bool virtio_get_shm_region(struct virtio_device *vdev,
310 			   struct virtio_shm_region *region, u8 id)
311 {
312 	if (!vdev->config->get_shm_region)
313 		return false;
314 	return vdev->config->get_shm_region(vdev, region, id);
315 }
316 
317 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
318 {
319 	return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
320 		virtio_legacy_is_little_endian();
321 }
322 
323 /* Memory accessors */
324 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
325 {
326 	return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
327 }
328 
329 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
330 {
331 	return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
332 }
333 
334 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
335 {
336 	return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
337 }
338 
339 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
340 {
341 	return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
342 }
343 
344 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
345 {
346 	return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
347 }
348 
349 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
350 {
351 	return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
352 }
353 
354 #define virtio_to_cpu(vdev, x) \
355 	_Generic((x), \
356 		__u8: (x), \
357 		__virtio16: virtio16_to_cpu((vdev), (x)), \
358 		__virtio32: virtio32_to_cpu((vdev), (x)), \
359 		__virtio64: virtio64_to_cpu((vdev), (x)) \
360 		)
361 
362 #define cpu_to_virtio(vdev, x, m) \
363 	_Generic((m), \
364 		__u8: (x), \
365 		__virtio16: cpu_to_virtio16((vdev), (x)), \
366 		__virtio32: cpu_to_virtio32((vdev), (x)), \
367 		__virtio64: cpu_to_virtio64((vdev), (x)) \
368 		)
369 
370 #define __virtio_native_type(structname, member) \
371 	typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
372 
373 /* Config space accessors. */
374 #define virtio_cread(vdev, structname, member, ptr)			\
375 	do {								\
376 		typeof(((structname*)0)->member) virtio_cread_v;	\
377 									\
378 		might_sleep();						\
379 		/* Sanity check: must match the member's type */	\
380 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
381 									\
382 		switch (sizeof(virtio_cread_v)) {			\
383 		case 1:							\
384 		case 2:							\
385 		case 4:							\
386 			vdev->config->get((vdev), 			\
387 					  offsetof(structname, member), \
388 					  &virtio_cread_v,		\
389 					  sizeof(virtio_cread_v));	\
390 			break;						\
391 		default:						\
392 			__virtio_cread_many((vdev), 			\
393 					  offsetof(structname, member), \
394 					  &virtio_cread_v,		\
395 					  1,				\
396 					  sizeof(virtio_cread_v));	\
397 			break;						\
398 		}							\
399 		*(ptr) = virtio_to_cpu(vdev, virtio_cread_v);		\
400 	} while(0)
401 
402 /* Config space accessors. */
403 #define virtio_cwrite(vdev, structname, member, ptr)			\
404 	do {								\
405 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
406 			cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
407 									\
408 		might_sleep();						\
409 		/* Sanity check: must match the member's type */	\
410 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
411 									\
412 		vdev->config->set((vdev), offsetof(structname, member),	\
413 				  &virtio_cwrite_v,			\
414 				  sizeof(virtio_cwrite_v));		\
415 	} while(0)
416 
417 /*
418  * Nothing virtio-specific about these, but let's worry about generalizing
419  * these later.
420  */
421 #define virtio_le_to_cpu(x) \
422 	_Generic((x), \
423 		__u8: (u8)(x), \
424 		 __le16: (u16)le16_to_cpu(x), \
425 		 __le32: (u32)le32_to_cpu(x), \
426 		 __le64: (u64)le64_to_cpu(x) \
427 		)
428 
429 #define virtio_cpu_to_le(x, m) \
430 	_Generic((m), \
431 		 __u8: (x), \
432 		 __le16: cpu_to_le16(x), \
433 		 __le32: cpu_to_le32(x), \
434 		 __le64: cpu_to_le64(x) \
435 		)
436 
437 /* LE (e.g. modern) Config space accessors. */
438 #define virtio_cread_le(vdev, structname, member, ptr)			\
439 	do {								\
440 		typeof(((structname*)0)->member) virtio_cread_v;	\
441 									\
442 		might_sleep();						\
443 		/* Sanity check: must match the member's type */	\
444 		typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
445 									\
446 		switch (sizeof(virtio_cread_v)) {			\
447 		case 1:							\
448 		case 2:							\
449 		case 4:							\
450 			vdev->config->get((vdev), 			\
451 					  offsetof(structname, member), \
452 					  &virtio_cread_v,		\
453 					  sizeof(virtio_cread_v));	\
454 			break;						\
455 		default:						\
456 			__virtio_cread_many((vdev), 			\
457 					  offsetof(structname, member), \
458 					  &virtio_cread_v,		\
459 					  1,				\
460 					  sizeof(virtio_cread_v));	\
461 			break;						\
462 		}							\
463 		*(ptr) = virtio_le_to_cpu(virtio_cread_v);		\
464 	} while(0)
465 
466 #define virtio_cwrite_le(vdev, structname, member, ptr)			\
467 	do {								\
468 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
469 			virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
470 									\
471 		might_sleep();						\
472 		/* Sanity check: must match the member's type */	\
473 		typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
474 									\
475 		vdev->config->set((vdev), offsetof(structname, member),	\
476 				  &virtio_cwrite_v,			\
477 				  sizeof(virtio_cwrite_v));		\
478 	} while(0)
479 
480 
481 /* Read @count fields, @bytes each. */
482 static inline void __virtio_cread_many(struct virtio_device *vdev,
483 				       unsigned int offset,
484 				       void *buf, size_t count, size_t bytes)
485 {
486 	u32 old, gen = vdev->config->generation ?
487 		vdev->config->generation(vdev) : 0;
488 	int i;
489 
490 	might_sleep();
491 	do {
492 		old = gen;
493 
494 		for (i = 0; i < count; i++)
495 			vdev->config->get(vdev, offset + bytes * i,
496 					  buf + i * bytes, bytes);
497 
498 		gen = vdev->config->generation ?
499 			vdev->config->generation(vdev) : 0;
500 	} while (gen != old);
501 }
502 
503 static inline void virtio_cread_bytes(struct virtio_device *vdev,
504 				      unsigned int offset,
505 				      void *buf, size_t len)
506 {
507 	__virtio_cread_many(vdev, offset, buf, len, 1);
508 }
509 
510 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
511 {
512 	u8 ret;
513 
514 	might_sleep();
515 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
516 	return ret;
517 }
518 
519 static inline void virtio_cwrite8(struct virtio_device *vdev,
520 				  unsigned int offset, u8 val)
521 {
522 	might_sleep();
523 	vdev->config->set(vdev, offset, &val, sizeof(val));
524 }
525 
526 static inline u16 virtio_cread16(struct virtio_device *vdev,
527 				 unsigned int offset)
528 {
529 	__virtio16 ret;
530 
531 	might_sleep();
532 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
533 	return virtio16_to_cpu(vdev, ret);
534 }
535 
536 static inline void virtio_cwrite16(struct virtio_device *vdev,
537 				   unsigned int offset, u16 val)
538 {
539 	__virtio16 v;
540 
541 	might_sleep();
542 	v = cpu_to_virtio16(vdev, val);
543 	vdev->config->set(vdev, offset, &v, sizeof(v));
544 }
545 
546 static inline u32 virtio_cread32(struct virtio_device *vdev,
547 				 unsigned int offset)
548 {
549 	__virtio32 ret;
550 
551 	might_sleep();
552 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
553 	return virtio32_to_cpu(vdev, ret);
554 }
555 
556 static inline void virtio_cwrite32(struct virtio_device *vdev,
557 				   unsigned int offset, u32 val)
558 {
559 	__virtio32 v;
560 
561 	might_sleep();
562 	v = cpu_to_virtio32(vdev, val);
563 	vdev->config->set(vdev, offset, &v, sizeof(v));
564 }
565 
566 static inline u64 virtio_cread64(struct virtio_device *vdev,
567 				 unsigned int offset)
568 {
569 	__virtio64 ret;
570 
571 	__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
572 	return virtio64_to_cpu(vdev, ret);
573 }
574 
575 static inline void virtio_cwrite64(struct virtio_device *vdev,
576 				   unsigned int offset, u64 val)
577 {
578 	__virtio64 v;
579 
580 	might_sleep();
581 	v = cpu_to_virtio64(vdev, val);
582 	vdev->config->set(vdev, offset, &v, sizeof(v));
583 }
584 
585 /* Conditional config space accessors. */
586 #define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
587 	({								\
588 		int _r = 0;						\
589 		if (!virtio_has_feature(vdev, fbit))			\
590 			_r = -ENOENT;					\
591 		else							\
592 			virtio_cread((vdev), structname, member, ptr);	\
593 		_r;							\
594 	})
595 
596 /* Conditional config space accessors. */
597 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)	\
598 	({								\
599 		int _r = 0;						\
600 		if (!virtio_has_feature(vdev, fbit))			\
601 			_r = -ENOENT;					\
602 		else							\
603 			virtio_cread_le((vdev), structname, member, ptr); \
604 		_r;							\
605 	})
606 
607 #ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
608 int arch_has_restricted_virtio_memory_access(void);
609 #else
610 static inline int arch_has_restricted_virtio_memory_access(void)
611 {
612 	return 0;
613 }
614 #endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
615 
616 #endif /* _LINUX_VIRTIO_CONFIG_H */
617