xref: /openbmc/linux/tools/virtio/ringtest/main.h (revision 81931012)
17a338472SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2481eaec3SMichael S. Tsirkin /*
3481eaec3SMichael S. Tsirkin  * Copyright (C) 2016 Red Hat, Inc.
4481eaec3SMichael S. Tsirkin  * Author: Michael S. Tsirkin <mst@redhat.com>
5481eaec3SMichael S. Tsirkin  *
6481eaec3SMichael S. Tsirkin  * Common macros and functions for ring benchmarking.
7481eaec3SMichael S. Tsirkin  */
8481eaec3SMichael S. Tsirkin #ifndef MAIN_H
9481eaec3SMichael S. Tsirkin #define MAIN_H
10481eaec3SMichael S. Tsirkin 
11481eaec3SMichael S. Tsirkin #include <stdbool.h>
12481eaec3SMichael S. Tsirkin 
13a4979505SMichael S. Tsirkin extern int param;
14a4979505SMichael S. Tsirkin 
15481eaec3SMichael S. Tsirkin extern bool do_exit;
16481eaec3SMichael S. Tsirkin 
17481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__)
18481eaec3SMichael S. Tsirkin #include "x86intrin.h"
19481eaec3SMichael S. Tsirkin 
20481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles)
21481eaec3SMichael S. Tsirkin {
22481eaec3SMichael S. Tsirkin 	unsigned long long t;
23481eaec3SMichael S. Tsirkin 
24481eaec3SMichael S. Tsirkin 	t = __rdtsc();
25481eaec3SMichael S. Tsirkin 	while (__rdtsc() - t < cycles) {}
26481eaec3SMichael S. Tsirkin }
27481eaec3SMichael S. Tsirkin 
28481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 500
29481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 500
30481eaec3SMichael S. Tsirkin 
3147a4c49aSHalil Pasic #elif defined(__s390x__)
3247a4c49aSHalil Pasic static inline void wait_cycles(unsigned long long cycles)
3347a4c49aSHalil Pasic {
3447a4c49aSHalil Pasic 	asm volatile("0: brctg %0,0b" : : "d" (cycles));
3547a4c49aSHalil Pasic }
3647a4c49aSHalil Pasic 
3747a4c49aSHalil Pasic /* tweak me */
3847a4c49aSHalil Pasic #define VMEXIT_CYCLES 200
3947a4c49aSHalil Pasic #define VMENTRY_CYCLES 200
4047a4c49aSHalil Pasic 
41481eaec3SMichael S. Tsirkin #else
42481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles)
43481eaec3SMichael S. Tsirkin {
44481eaec3SMichael S. Tsirkin 	_Exit(5);
45481eaec3SMichael S. Tsirkin }
46481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 0
47481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 0
48481eaec3SMichael S. Tsirkin #endif
49481eaec3SMichael S. Tsirkin 
50481eaec3SMichael S. Tsirkin static inline void vmexit(void)
51481eaec3SMichael S. Tsirkin {
52481eaec3SMichael S. Tsirkin 	if (!do_exit)
53481eaec3SMichael S. Tsirkin 		return;
54481eaec3SMichael S. Tsirkin 
55481eaec3SMichael S. Tsirkin 	wait_cycles(VMEXIT_CYCLES);
56481eaec3SMichael S. Tsirkin }
57481eaec3SMichael S. Tsirkin static inline void vmentry(void)
58481eaec3SMichael S. Tsirkin {
59481eaec3SMichael S. Tsirkin 	if (!do_exit)
60481eaec3SMichael S. Tsirkin 		return;
61481eaec3SMichael S. Tsirkin 
62481eaec3SMichael S. Tsirkin 	wait_cycles(VMENTRY_CYCLES);
63481eaec3SMichael S. Tsirkin }
64481eaec3SMichael S. Tsirkin 
65481eaec3SMichael S. Tsirkin /* implemented by ring */
66481eaec3SMichael S. Tsirkin void alloc_ring(void);
67481eaec3SMichael S. Tsirkin /* guest side */
68481eaec3SMichael S. Tsirkin int add_inbuf(unsigned, void *, void *);
69481eaec3SMichael S. Tsirkin void *get_buf(unsigned *, void **);
70481eaec3SMichael S. Tsirkin void disable_call();
71d3c3589bSPaolo Bonzini bool used_empty();
72481eaec3SMichael S. Tsirkin bool enable_call();
73481eaec3SMichael S. Tsirkin void kick_available();
74481eaec3SMichael S. Tsirkin /* host side */
75481eaec3SMichael S. Tsirkin void disable_kick();
76d3c3589bSPaolo Bonzini bool avail_empty();
77481eaec3SMichael S. Tsirkin bool enable_kick();
78481eaec3SMichael S. Tsirkin bool use_buf(unsigned *, void **);
79481eaec3SMichael S. Tsirkin void call_used();
80481eaec3SMichael S. Tsirkin 
81481eaec3SMichael S. Tsirkin /* implemented by main */
82481eaec3SMichael S. Tsirkin extern bool do_sleep;
83481eaec3SMichael S. Tsirkin void kick(void);
84481eaec3SMichael S. Tsirkin void wait_for_kick(void);
85481eaec3SMichael S. Tsirkin void call(void);
86481eaec3SMichael S. Tsirkin void wait_for_call(void);
87481eaec3SMichael S. Tsirkin 
88481eaec3SMichael S. Tsirkin extern unsigned ring_size;
89481eaec3SMichael S. Tsirkin 
90481eaec3SMichael S. Tsirkin /* Compiler barrier - similar to what Linux uses */
91481eaec3SMichael S. Tsirkin #define barrier() asm volatile("" ::: "memory")
92481eaec3SMichael S. Tsirkin 
93481eaec3SMichael S. Tsirkin /* Is there a portable way to do this? */
94481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__)
95481eaec3SMichael S. Tsirkin #define cpu_relax() asm ("rep; nop" ::: "memory")
9647a4c49aSHalil Pasic #elif defined(__s390x__)
9747a4c49aSHalil Pasic #define cpu_relax() barrier()
98481eaec3SMichael S. Tsirkin #else
99481eaec3SMichael S. Tsirkin #define cpu_relax() assert(0)
100481eaec3SMichael S. Tsirkin #endif
101481eaec3SMichael S. Tsirkin 
102481eaec3SMichael S. Tsirkin extern bool do_relax;
103481eaec3SMichael S. Tsirkin 
104481eaec3SMichael S. Tsirkin static inline void busy_wait(void)
105481eaec3SMichael S. Tsirkin {
106481eaec3SMichael S. Tsirkin 	if (do_relax)
107481eaec3SMichael S. Tsirkin 		cpu_relax();
108481eaec3SMichael S. Tsirkin 	else
109481eaec3SMichael S. Tsirkin 		/* prevent compiler from removing busy loops */
110481eaec3SMichael S. Tsirkin 		barrier();
111481eaec3SMichael S. Tsirkin }
112481eaec3SMichael S. Tsirkin 
113450cbdd0SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__)
114491847f3SMichael S. Tsirkin #define smp_mb()     asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
115450cbdd0SMichael S. Tsirkin #else
116481eaec3SMichael S. Tsirkin /*
117481eaec3SMichael S. Tsirkin  * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
118481eaec3SMichael S. Tsirkin  * with other __ATOMIC_SEQ_CST calls.
119481eaec3SMichael S. Tsirkin  */
120481eaec3SMichael S. Tsirkin #define smp_mb() __sync_synchronize()
121450cbdd0SMichael S. Tsirkin #endif
122481eaec3SMichael S. Tsirkin 
123481eaec3SMichael S. Tsirkin /*
124481eaec3SMichael S. Tsirkin  * This abuses the atomic builtins for thread fences, and
125481eaec3SMichael S. Tsirkin  * adds a compiler barrier.
126481eaec3SMichael S. Tsirkin  */
127481eaec3SMichael S. Tsirkin #define smp_release() do { \
128481eaec3SMichael S. Tsirkin     barrier(); \
129481eaec3SMichael S. Tsirkin     __atomic_thread_fence(__ATOMIC_RELEASE); \
130481eaec3SMichael S. Tsirkin } while (0)
131481eaec3SMichael S. Tsirkin 
132481eaec3SMichael S. Tsirkin #define smp_acquire() do { \
133481eaec3SMichael S. Tsirkin     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
134481eaec3SMichael S. Tsirkin     barrier(); \
135481eaec3SMichael S. Tsirkin } while (0)
136481eaec3SMichael S. Tsirkin 
137b4eab7deSMichael S. Tsirkin #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
138b4eab7deSMichael S. Tsirkin #define smp_wmb() barrier()
139b4eab7deSMichael S. Tsirkin #else
140b4eab7deSMichael S. Tsirkin #define smp_wmb() smp_release()
141b4eab7deSMichael S. Tsirkin #endif
142b4eab7deSMichael S. Tsirkin 
143b4eab7deSMichael S. Tsirkin static __always_inline
144b4eab7deSMichael S. Tsirkin void __read_once_size(const volatile void *p, void *res, int size)
145b4eab7deSMichael S. Tsirkin {
1468aeac42dSDavidlohr Bueso 	switch (size) {
1478aeac42dSDavidlohr Bueso 	case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;
1488aeac42dSDavidlohr Bueso 	case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;
1498aeac42dSDavidlohr Bueso 	case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;
1508aeac42dSDavidlohr Bueso 	case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;
1518aeac42dSDavidlohr Bueso 	default:
1528aeac42dSDavidlohr Bueso 		barrier();
1538aeac42dSDavidlohr Bueso 		__builtin_memcpy((void *)res, (const void *)p, size);
1548aeac42dSDavidlohr Bueso 		barrier();
1558aeac42dSDavidlohr Bueso 	}
156b4eab7deSMichael S. Tsirkin }
157b4eab7deSMichael S. Tsirkin 
158b4eab7deSMichael S. Tsirkin static __always_inline void __write_once_size(volatile void *p, void *res, int size)
159b4eab7deSMichael S. Tsirkin {
160b4eab7deSMichael S. Tsirkin 	switch (size) {
161b4eab7deSMichael S. Tsirkin 	case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
162b4eab7deSMichael S. Tsirkin 	case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
163b4eab7deSMichael S. Tsirkin 	case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
164b4eab7deSMichael S. Tsirkin 	case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
165b4eab7deSMichael S. Tsirkin 	default:
166b4eab7deSMichael S. Tsirkin 		barrier();
167b4eab7deSMichael S. Tsirkin 		__builtin_memcpy((void *)p, (const void *)res, size);
168b4eab7deSMichael S. Tsirkin 		barrier();
169b4eab7deSMichael S. Tsirkin 	}
170b4eab7deSMichael S. Tsirkin }
171b4eab7deSMichael S. Tsirkin 
172*81931012SDavidlohr Bueso #ifdef __alpha__
173b4eab7deSMichael S. Tsirkin #define READ_ONCE(x) \
174b4eab7deSMichael S. Tsirkin ({									\
175b4eab7deSMichael S. Tsirkin 	union { typeof(x) __val; char __c[1]; } __u;			\
176b4eab7deSMichael S. Tsirkin 	__read_once_size(&(x), __u.__c, sizeof(x));		\
177*81931012SDavidlohr Bueso 	smp_mb(); /* Enforce dependency ordering from x */		\
178b4eab7deSMichael S. Tsirkin 	__u.__val;							\
179b4eab7deSMichael S. Tsirkin })
180*81931012SDavidlohr Bueso #else
181*81931012SDavidlohr Bueso #define READ_ONCE(x)							\
182*81931012SDavidlohr Bueso ({									\
183*81931012SDavidlohr Bueso 	union { typeof(x) __val; char __c[1]; } __u;			\
184*81931012SDavidlohr Bueso 	__read_once_size(&(x), __u.__c, sizeof(x));			\
185*81931012SDavidlohr Bueso 	__u.__val;							\
186*81931012SDavidlohr Bueso })
187*81931012SDavidlohr Bueso #endif
188b4eab7deSMichael S. Tsirkin 
189b4eab7deSMichael S. Tsirkin #define WRITE_ONCE(x, val) \
190b4eab7deSMichael S. Tsirkin ({							\
191b4eab7deSMichael S. Tsirkin 	union { typeof(x) __val; char __c[1]; } __u =	\
192b4eab7deSMichael S. Tsirkin 		{ .__val = (typeof(x)) (val) }; \
193b4eab7deSMichael S. Tsirkin 	__write_once_size(&(x), __u.__c, sizeof(x));	\
194b4eab7deSMichael S. Tsirkin 	__u.__val;					\
195b4eab7deSMichael S. Tsirkin })
196b4eab7deSMichael S. Tsirkin 
197481eaec3SMichael S. Tsirkin #endif
198