xref: /openbmc/linux/tools/virtio/ringtest/main.h (revision f3a8b664)
1 /*
2  * Copyright (C) 2016 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * Common macros and functions for ring benchmarking.
7  */
8 #ifndef MAIN_H
9 #define MAIN_H
10 
11 #include <stdbool.h>
12 
13 extern bool do_exit;
14 
15 #if defined(__x86_64__) || defined(__i386__)
16 #include "x86intrin.h"
17 
18 static inline void wait_cycles(unsigned long long cycles)
19 {
20 	unsigned long long t;
21 
22 	t = __rdtsc();
23 	while (__rdtsc() - t < cycles) {}
24 }
25 
26 #define VMEXIT_CYCLES 500
27 #define VMENTRY_CYCLES 500
28 
29 #else
30 static inline void wait_cycles(unsigned long long cycles)
31 {
32 	_Exit(5);
33 }
34 #define VMEXIT_CYCLES 0
35 #define VMENTRY_CYCLES 0
36 #endif
37 
38 static inline void vmexit(void)
39 {
40 	if (!do_exit)
41 		return;
42 
43 	wait_cycles(VMEXIT_CYCLES);
44 }
45 static inline void vmentry(void)
46 {
47 	if (!do_exit)
48 		return;
49 
50 	wait_cycles(VMENTRY_CYCLES);
51 }
52 
53 /* implemented by ring */
54 void alloc_ring(void);
55 /* guest side */
56 int add_inbuf(unsigned, void *, void *);
57 void *get_buf(unsigned *, void **);
58 void disable_call();
59 bool used_empty();
60 bool enable_call();
61 void kick_available();
62 /* host side */
63 void disable_kick();
64 bool avail_empty();
65 bool enable_kick();
66 bool use_buf(unsigned *, void **);
67 void call_used();
68 
69 /* implemented by main */
70 extern bool do_sleep;
71 void kick(void);
72 void wait_for_kick(void);
73 void call(void);
74 void wait_for_call(void);
75 
76 extern unsigned ring_size;
77 
78 /* Compiler barrier - similar to what Linux uses */
79 #define barrier() asm volatile("" ::: "memory")
80 
81 /* Is there a portable way to do this? */
82 #if defined(__x86_64__) || defined(__i386__)
83 #define cpu_relax() asm ("rep; nop" ::: "memory")
84 #else
85 #define cpu_relax() assert(0)
86 #endif
87 
88 extern bool do_relax;
89 
90 static inline void busy_wait(void)
91 {
92 	if (do_relax)
93 		cpu_relax();
94 	else
95 		/* prevent compiler from removing busy loops */
96 		barrier();
97 }
98 
99 /*
100  * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
101  * with other __ATOMIC_SEQ_CST calls.
102  */
103 #define smp_mb() __sync_synchronize()
104 
105 /*
106  * This abuses the atomic builtins for thread fences, and
107  * adds a compiler barrier.
108  */
109 #define smp_release() do { \
110     barrier(); \
111     __atomic_thread_fence(__ATOMIC_RELEASE); \
112 } while (0)
113 
114 #define smp_acquire() do { \
115     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
116     barrier(); \
117 } while (0)
118 
119 #endif
120