1481eaec3SMichael S. Tsirkin /* 2481eaec3SMichael S. Tsirkin * Copyright (C) 2016 Red Hat, Inc. 3481eaec3SMichael S. Tsirkin * Author: Michael S. Tsirkin <mst@redhat.com> 4481eaec3SMichael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2. 5481eaec3SMichael S. Tsirkin * 6481eaec3SMichael S. Tsirkin * Common macros and functions for ring benchmarking. 7481eaec3SMichael S. Tsirkin */ 8481eaec3SMichael S. Tsirkin #ifndef MAIN_H 9481eaec3SMichael S. Tsirkin #define MAIN_H 10481eaec3SMichael S. Tsirkin 11481eaec3SMichael S. Tsirkin #include <stdbool.h> 12481eaec3SMichael S. Tsirkin 13481eaec3SMichael S. Tsirkin extern bool do_exit; 14481eaec3SMichael S. Tsirkin 15481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 16481eaec3SMichael S. Tsirkin #include "x86intrin.h" 17481eaec3SMichael S. Tsirkin 18481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles) 19481eaec3SMichael S. Tsirkin { 20481eaec3SMichael S. Tsirkin unsigned long long t; 21481eaec3SMichael S. Tsirkin 22481eaec3SMichael S. Tsirkin t = __rdtsc(); 23481eaec3SMichael S. Tsirkin while (__rdtsc() - t < cycles) {} 24481eaec3SMichael S. Tsirkin } 25481eaec3SMichael S. Tsirkin 26481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 500 27481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 500 28481eaec3SMichael S. Tsirkin 29*47a4c49aSHalil Pasic #elif defined(__s390x__) 30*47a4c49aSHalil Pasic static inline void wait_cycles(unsigned long long cycles) 31*47a4c49aSHalil Pasic { 32*47a4c49aSHalil Pasic asm volatile("0: brctg %0,0b" : : "d" (cycles)); 33*47a4c49aSHalil Pasic } 34*47a4c49aSHalil Pasic 35*47a4c49aSHalil Pasic /* tweak me */ 36*47a4c49aSHalil Pasic #define VMEXIT_CYCLES 200 37*47a4c49aSHalil Pasic #define VMENTRY_CYCLES 200 38*47a4c49aSHalil Pasic 39481eaec3SMichael S. Tsirkin #else 40481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles) 41481eaec3SMichael S. Tsirkin { 42481eaec3SMichael S. Tsirkin _Exit(5); 43481eaec3SMichael S. Tsirkin } 44481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 0 45481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 0 46481eaec3SMichael S. Tsirkin #endif 47481eaec3SMichael S. Tsirkin 48481eaec3SMichael S. Tsirkin static inline void vmexit(void) 49481eaec3SMichael S. Tsirkin { 50481eaec3SMichael S. Tsirkin if (!do_exit) 51481eaec3SMichael S. Tsirkin return; 52481eaec3SMichael S. Tsirkin 53481eaec3SMichael S. Tsirkin wait_cycles(VMEXIT_CYCLES); 54481eaec3SMichael S. Tsirkin } 55481eaec3SMichael S. Tsirkin static inline void vmentry(void) 56481eaec3SMichael S. Tsirkin { 57481eaec3SMichael S. Tsirkin if (!do_exit) 58481eaec3SMichael S. Tsirkin return; 59481eaec3SMichael S. Tsirkin 60481eaec3SMichael S. Tsirkin wait_cycles(VMENTRY_CYCLES); 61481eaec3SMichael S. Tsirkin } 62481eaec3SMichael S. Tsirkin 63481eaec3SMichael S. Tsirkin /* implemented by ring */ 64481eaec3SMichael S. Tsirkin void alloc_ring(void); 65481eaec3SMichael S. Tsirkin /* guest side */ 66481eaec3SMichael S. Tsirkin int add_inbuf(unsigned, void *, void *); 67481eaec3SMichael S. Tsirkin void *get_buf(unsigned *, void **); 68481eaec3SMichael S. Tsirkin void disable_call(); 69d3c3589bSPaolo Bonzini bool used_empty(); 70481eaec3SMichael S. Tsirkin bool enable_call(); 71481eaec3SMichael S. Tsirkin void kick_available(); 72481eaec3SMichael S. Tsirkin /* host side */ 73481eaec3SMichael S. Tsirkin void disable_kick(); 74d3c3589bSPaolo Bonzini bool avail_empty(); 75481eaec3SMichael S. Tsirkin bool enable_kick(); 76481eaec3SMichael S. Tsirkin bool use_buf(unsigned *, void **); 77481eaec3SMichael S. Tsirkin void call_used(); 78481eaec3SMichael S. Tsirkin 79481eaec3SMichael S. Tsirkin /* implemented by main */ 80481eaec3SMichael S. Tsirkin extern bool do_sleep; 81481eaec3SMichael S. Tsirkin void kick(void); 82481eaec3SMichael S. Tsirkin void wait_for_kick(void); 83481eaec3SMichael S. Tsirkin void call(void); 84481eaec3SMichael S. Tsirkin void wait_for_call(void); 85481eaec3SMichael S. Tsirkin 86481eaec3SMichael S. Tsirkin extern unsigned ring_size; 87481eaec3SMichael S. Tsirkin 88481eaec3SMichael S. Tsirkin /* Compiler barrier - similar to what Linux uses */ 89481eaec3SMichael S. Tsirkin #define barrier() asm volatile("" ::: "memory") 90481eaec3SMichael S. Tsirkin 91481eaec3SMichael S. Tsirkin /* Is there a portable way to do this? */ 92481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 93481eaec3SMichael S. Tsirkin #define cpu_relax() asm ("rep; nop" ::: "memory") 94*47a4c49aSHalil Pasic #elif defined(__s390x__) 95*47a4c49aSHalil Pasic #define cpu_relax() barrier() 96481eaec3SMichael S. Tsirkin #else 97481eaec3SMichael S. Tsirkin #define cpu_relax() assert(0) 98481eaec3SMichael S. Tsirkin #endif 99481eaec3SMichael S. Tsirkin 100481eaec3SMichael S. Tsirkin extern bool do_relax; 101481eaec3SMichael S. Tsirkin 102481eaec3SMichael S. Tsirkin static inline void busy_wait(void) 103481eaec3SMichael S. Tsirkin { 104481eaec3SMichael S. Tsirkin if (do_relax) 105481eaec3SMichael S. Tsirkin cpu_relax(); 106481eaec3SMichael S. Tsirkin else 107481eaec3SMichael S. Tsirkin /* prevent compiler from removing busy loops */ 108481eaec3SMichael S. Tsirkin barrier(); 109481eaec3SMichael S. Tsirkin } 110481eaec3SMichael S. Tsirkin 111481eaec3SMichael S. Tsirkin /* 112481eaec3SMichael S. Tsirkin * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized 113481eaec3SMichael S. Tsirkin * with other __ATOMIC_SEQ_CST calls. 114481eaec3SMichael S. Tsirkin */ 115481eaec3SMichael S. Tsirkin #define smp_mb() __sync_synchronize() 116481eaec3SMichael S. Tsirkin 117481eaec3SMichael S. Tsirkin /* 118481eaec3SMichael S. Tsirkin * This abuses the atomic builtins for thread fences, and 119481eaec3SMichael S. Tsirkin * adds a compiler barrier. 120481eaec3SMichael S. Tsirkin */ 121481eaec3SMichael S. Tsirkin #define smp_release() do { \ 122481eaec3SMichael S. Tsirkin barrier(); \ 123481eaec3SMichael S. Tsirkin __atomic_thread_fence(__ATOMIC_RELEASE); \ 124481eaec3SMichael S. Tsirkin } while (0) 125481eaec3SMichael S. Tsirkin 126481eaec3SMichael S. Tsirkin #define smp_acquire() do { \ 127481eaec3SMichael S. Tsirkin __atomic_thread_fence(__ATOMIC_ACQUIRE); \ 128481eaec3SMichael S. Tsirkin barrier(); \ 129481eaec3SMichael S. Tsirkin } while (0) 130481eaec3SMichael S. Tsirkin 131481eaec3SMichael S. Tsirkin #endif 132