1481eaec3SMichael S. Tsirkin /* 2481eaec3SMichael S. Tsirkin * Copyright (C) 2016 Red Hat, Inc. 3481eaec3SMichael S. Tsirkin * Author: Michael S. Tsirkin <mst@redhat.com> 4481eaec3SMichael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2. 5481eaec3SMichael S. Tsirkin * 6481eaec3SMichael S. Tsirkin * Common macros and functions for ring benchmarking. 7481eaec3SMichael S. Tsirkin */ 8481eaec3SMichael S. Tsirkin #ifndef MAIN_H 9481eaec3SMichael S. Tsirkin #define MAIN_H 10481eaec3SMichael S. Tsirkin 11481eaec3SMichael S. Tsirkin #include <stdbool.h> 12481eaec3SMichael S. Tsirkin 13a4979505SMichael S. Tsirkin extern int param; 14a4979505SMichael S. Tsirkin 15481eaec3SMichael S. Tsirkin extern bool do_exit; 16481eaec3SMichael S. Tsirkin 17481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 18481eaec3SMichael S. Tsirkin #include "x86intrin.h" 19481eaec3SMichael S. Tsirkin 20481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles) 21481eaec3SMichael S. Tsirkin { 22481eaec3SMichael S. Tsirkin unsigned long long t; 23481eaec3SMichael S. Tsirkin 24481eaec3SMichael S. Tsirkin t = __rdtsc(); 25481eaec3SMichael S. Tsirkin while (__rdtsc() - t < cycles) {} 26481eaec3SMichael S. Tsirkin } 27481eaec3SMichael S. Tsirkin 28481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 500 29481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 500 30481eaec3SMichael S. Tsirkin 3147a4c49aSHalil Pasic #elif defined(__s390x__) 3247a4c49aSHalil Pasic static inline void wait_cycles(unsigned long long cycles) 3347a4c49aSHalil Pasic { 3447a4c49aSHalil Pasic asm volatile("0: brctg %0,0b" : : "d" (cycles)); 3547a4c49aSHalil Pasic } 3647a4c49aSHalil Pasic 3747a4c49aSHalil Pasic /* tweak me */ 3847a4c49aSHalil Pasic #define VMEXIT_CYCLES 200 3947a4c49aSHalil Pasic #define VMENTRY_CYCLES 200 4047a4c49aSHalil Pasic 41481eaec3SMichael S. Tsirkin #else 42481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles) 43481eaec3SMichael S. Tsirkin { 44481eaec3SMichael S. Tsirkin _Exit(5); 45481eaec3SMichael S. Tsirkin } 46481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 0 47481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 0 48481eaec3SMichael S. Tsirkin #endif 49481eaec3SMichael S. Tsirkin 50481eaec3SMichael S. Tsirkin static inline void vmexit(void) 51481eaec3SMichael S. Tsirkin { 52481eaec3SMichael S. Tsirkin if (!do_exit) 53481eaec3SMichael S. Tsirkin return; 54481eaec3SMichael S. Tsirkin 55481eaec3SMichael S. Tsirkin wait_cycles(VMEXIT_CYCLES); 56481eaec3SMichael S. Tsirkin } 57481eaec3SMichael S. Tsirkin static inline void vmentry(void) 58481eaec3SMichael S. Tsirkin { 59481eaec3SMichael S. Tsirkin if (!do_exit) 60481eaec3SMichael S. Tsirkin return; 61481eaec3SMichael S. Tsirkin 62481eaec3SMichael S. Tsirkin wait_cycles(VMENTRY_CYCLES); 63481eaec3SMichael S. Tsirkin } 64481eaec3SMichael S. Tsirkin 65481eaec3SMichael S. Tsirkin /* implemented by ring */ 66481eaec3SMichael S. Tsirkin void alloc_ring(void); 67481eaec3SMichael S. Tsirkin /* guest side */ 68481eaec3SMichael S. Tsirkin int add_inbuf(unsigned, void *, void *); 69481eaec3SMichael S. Tsirkin void *get_buf(unsigned *, void **); 70481eaec3SMichael S. Tsirkin void disable_call(); 71d3c3589bSPaolo Bonzini bool used_empty(); 72481eaec3SMichael S. Tsirkin bool enable_call(); 73481eaec3SMichael S. Tsirkin void kick_available(); 74481eaec3SMichael S. Tsirkin /* host side */ 75481eaec3SMichael S. Tsirkin void disable_kick(); 76d3c3589bSPaolo Bonzini bool avail_empty(); 77481eaec3SMichael S. Tsirkin bool enable_kick(); 78481eaec3SMichael S. Tsirkin bool use_buf(unsigned *, void **); 79481eaec3SMichael S. Tsirkin void call_used(); 80481eaec3SMichael S. Tsirkin 81481eaec3SMichael S. Tsirkin /* implemented by main */ 82481eaec3SMichael S. Tsirkin extern bool do_sleep; 83481eaec3SMichael S. Tsirkin void kick(void); 84481eaec3SMichael S. Tsirkin void wait_for_kick(void); 85481eaec3SMichael S. Tsirkin void call(void); 86481eaec3SMichael S. Tsirkin void wait_for_call(void); 87481eaec3SMichael S. Tsirkin 88481eaec3SMichael S. Tsirkin extern unsigned ring_size; 89481eaec3SMichael S. Tsirkin 90481eaec3SMichael S. Tsirkin /* Compiler barrier - similar to what Linux uses */ 91481eaec3SMichael S. Tsirkin #define barrier() asm volatile("" ::: "memory") 92481eaec3SMichael S. Tsirkin 93481eaec3SMichael S. Tsirkin /* Is there a portable way to do this? */ 94481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 95481eaec3SMichael S. Tsirkin #define cpu_relax() asm ("rep; nop" ::: "memory") 9647a4c49aSHalil Pasic #elif defined(__s390x__) 9747a4c49aSHalil Pasic #define cpu_relax() barrier() 98481eaec3SMichael S. Tsirkin #else 99481eaec3SMichael S. Tsirkin #define cpu_relax() assert(0) 100481eaec3SMichael S. Tsirkin #endif 101481eaec3SMichael S. Tsirkin 102481eaec3SMichael S. Tsirkin extern bool do_relax; 103481eaec3SMichael S. Tsirkin 104481eaec3SMichael S. Tsirkin static inline void busy_wait(void) 105481eaec3SMichael S. Tsirkin { 106481eaec3SMichael S. Tsirkin if (do_relax) 107481eaec3SMichael S. Tsirkin cpu_relax(); 108481eaec3SMichael S. Tsirkin else 109481eaec3SMichael S. Tsirkin /* prevent compiler from removing busy loops */ 110481eaec3SMichael S. Tsirkin barrier(); 111481eaec3SMichael S. Tsirkin } 112481eaec3SMichael S. Tsirkin 113*450cbdd0SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 114*450cbdd0SMichael S. Tsirkin #define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc") 115*450cbdd0SMichael S. Tsirkin #else 116481eaec3SMichael S. Tsirkin /* 117481eaec3SMichael S. Tsirkin * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized 118481eaec3SMichael S. Tsirkin * with other __ATOMIC_SEQ_CST calls. 119481eaec3SMichael S. Tsirkin */ 120481eaec3SMichael S. Tsirkin #define smp_mb() __sync_synchronize() 121*450cbdd0SMichael S. Tsirkin #endif 122481eaec3SMichael S. Tsirkin 123481eaec3SMichael S. Tsirkin /* 124481eaec3SMichael S. Tsirkin * This abuses the atomic builtins for thread fences, and 125481eaec3SMichael S. Tsirkin * adds a compiler barrier. 126481eaec3SMichael S. Tsirkin */ 127481eaec3SMichael S. Tsirkin #define smp_release() do { \ 128481eaec3SMichael S. Tsirkin barrier(); \ 129481eaec3SMichael S. Tsirkin __atomic_thread_fence(__ATOMIC_RELEASE); \ 130481eaec3SMichael S. Tsirkin } while (0) 131481eaec3SMichael S. Tsirkin 132481eaec3SMichael S. Tsirkin #define smp_acquire() do { \ 133481eaec3SMichael S. Tsirkin __atomic_thread_fence(__ATOMIC_ACQUIRE); \ 134481eaec3SMichael S. Tsirkin barrier(); \ 135481eaec3SMichael S. Tsirkin } while (0) 136481eaec3SMichael S. Tsirkin 137481eaec3SMichael S. Tsirkin #endif 138