xref: /openbmc/linux/tools/virtio/ringtest/main.h (revision d0e22329)
1 /*
2  * Copyright (C) 2016 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * Common macros and functions for ring benchmarking.
7  */
8 #ifndef MAIN_H
9 #define MAIN_H
10 
11 #include <stdbool.h>
12 
13 extern int param;
14 
15 extern bool do_exit;
16 
17 #if defined(__x86_64__) || defined(__i386__)
18 #include "x86intrin.h"
19 
20 static inline void wait_cycles(unsigned long long cycles)
21 {
22 	unsigned long long t;
23 
24 	t = __rdtsc();
25 	while (__rdtsc() - t < cycles) {}
26 }
27 
28 #define VMEXIT_CYCLES 500
29 #define VMENTRY_CYCLES 500
30 
31 #elif defined(__s390x__)
32 static inline void wait_cycles(unsigned long long cycles)
33 {
34 	asm volatile("0: brctg %0,0b" : : "d" (cycles));
35 }
36 
37 /* tweak me */
38 #define VMEXIT_CYCLES 200
39 #define VMENTRY_CYCLES 200
40 
41 #else
42 static inline void wait_cycles(unsigned long long cycles)
43 {
44 	_Exit(5);
45 }
46 #define VMEXIT_CYCLES 0
47 #define VMENTRY_CYCLES 0
48 #endif
49 
50 static inline void vmexit(void)
51 {
52 	if (!do_exit)
53 		return;
54 
55 	wait_cycles(VMEXIT_CYCLES);
56 }
57 static inline void vmentry(void)
58 {
59 	if (!do_exit)
60 		return;
61 
62 	wait_cycles(VMENTRY_CYCLES);
63 }
64 
65 /* implemented by ring */
66 void alloc_ring(void);
67 /* guest side */
68 int add_inbuf(unsigned, void *, void *);
69 void *get_buf(unsigned *, void **);
70 void disable_call();
71 bool used_empty();
72 bool enable_call();
73 void kick_available();
74 /* host side */
75 void disable_kick();
76 bool avail_empty();
77 bool enable_kick();
78 bool use_buf(unsigned *, void **);
79 void call_used();
80 
81 /* implemented by main */
82 extern bool do_sleep;
83 void kick(void);
84 void wait_for_kick(void);
85 void call(void);
86 void wait_for_call(void);
87 
88 extern unsigned ring_size;
89 
90 /* Compiler barrier - similar to what Linux uses */
91 #define barrier() asm volatile("" ::: "memory")
92 
93 /* Is there a portable way to do this? */
94 #if defined(__x86_64__) || defined(__i386__)
95 #define cpu_relax() asm ("rep; nop" ::: "memory")
96 #elif defined(__s390x__)
97 #define cpu_relax() barrier()
98 #else
99 #define cpu_relax() assert(0)
100 #endif
101 
102 extern bool do_relax;
103 
104 static inline void busy_wait(void)
105 {
106 	if (do_relax)
107 		cpu_relax();
108 	else
109 		/* prevent compiler from removing busy loops */
110 		barrier();
111 }
112 
113 #if defined(__x86_64__) || defined(__i386__)
114 #define smp_mb()     asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
115 #else
116 /*
117  * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
118  * with other __ATOMIC_SEQ_CST calls.
119  */
120 #define smp_mb() __sync_synchronize()
121 #endif
122 
123 /*
124  * This abuses the atomic builtins for thread fences, and
125  * adds a compiler barrier.
126  */
127 #define smp_release() do { \
128     barrier(); \
129     __atomic_thread_fence(__ATOMIC_RELEASE); \
130 } while (0)
131 
132 #define smp_acquire() do { \
133     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
134     barrier(); \
135 } while (0)
136 
137 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
138 #define smp_wmb() barrier()
139 #else
140 #define smp_wmb() smp_release()
141 #endif
142 
143 #ifdef __alpha__
144 #define smp_read_barrier_depends() smp_acquire()
145 #else
146 #define smp_read_barrier_depends() do {} while(0)
147 #endif
148 
149 static __always_inline
150 void __read_once_size(const volatile void *p, void *res, int size)
151 {
152         switch (size) {                                                 \
153         case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
154         case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
155         case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
156         case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
157         default:                                                        \
158                 barrier();                                              \
159                 __builtin_memcpy((void *)res, (const void *)p, size);   \
160                 barrier();                                              \
161         }                                                               \
162 }
163 
164 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
165 {
166 	switch (size) {
167 	case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
168 	case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
169 	case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
170 	case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
171 	default:
172 		barrier();
173 		__builtin_memcpy((void *)p, (const void *)res, size);
174 		barrier();
175 	}
176 }
177 
178 #define READ_ONCE(x) \
179 ({									\
180 	union { typeof(x) __val; char __c[1]; } __u;			\
181 	__read_once_size(&(x), __u.__c, sizeof(x));		\
182 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
183 	__u.__val;							\
184 })
185 
186 #define WRITE_ONCE(x, val) \
187 ({							\
188 	union { typeof(x) __val; char __c[1]; } __u =	\
189 		{ .__val = (typeof(x)) (val) }; \
190 	__write_once_size(&(x), __u.__c, sizeof(x));	\
191 	__u.__val;					\
192 })
193 
194 #endif
195