1481eaec3SMichael S. Tsirkin /* 2481eaec3SMichael S. Tsirkin * Copyright (C) 2016 Red Hat, Inc. 3481eaec3SMichael S. Tsirkin * Author: Michael S. Tsirkin <mst@redhat.com> 4481eaec3SMichael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2. 5481eaec3SMichael S. Tsirkin * 6481eaec3SMichael S. Tsirkin * Common macros and functions for ring benchmarking. 7481eaec3SMichael S. Tsirkin */ 8481eaec3SMichael S. Tsirkin #ifndef MAIN_H 9481eaec3SMichael S. Tsirkin #define MAIN_H 10481eaec3SMichael S. Tsirkin 11481eaec3SMichael S. Tsirkin #include <stdbool.h> 12481eaec3SMichael S. Tsirkin 13481eaec3SMichael S. Tsirkin extern bool do_exit; 14481eaec3SMichael S. Tsirkin 15481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 16481eaec3SMichael S. Tsirkin #include "x86intrin.h" 17481eaec3SMichael S. Tsirkin 18481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles) 19481eaec3SMichael S. Tsirkin { 20481eaec3SMichael S. Tsirkin unsigned long long t; 21481eaec3SMichael S. Tsirkin 22481eaec3SMichael S. Tsirkin t = __rdtsc(); 23481eaec3SMichael S. Tsirkin while (__rdtsc() - t < cycles) {} 24481eaec3SMichael S. Tsirkin } 25481eaec3SMichael S. Tsirkin 26481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 500 27481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 500 28481eaec3SMichael S. Tsirkin 29481eaec3SMichael S. Tsirkin #else 30481eaec3SMichael S. Tsirkin static inline void wait_cycles(unsigned long long cycles) 31481eaec3SMichael S. Tsirkin { 32481eaec3SMichael S. Tsirkin _Exit(5); 33481eaec3SMichael S. Tsirkin } 34481eaec3SMichael S. Tsirkin #define VMEXIT_CYCLES 0 35481eaec3SMichael S. Tsirkin #define VMENTRY_CYCLES 0 36481eaec3SMichael S. Tsirkin #endif 37481eaec3SMichael S. Tsirkin 38481eaec3SMichael S. Tsirkin static inline void vmexit(void) 39481eaec3SMichael S. Tsirkin { 40481eaec3SMichael S. Tsirkin if (!do_exit) 41481eaec3SMichael S. Tsirkin return; 42481eaec3SMichael S. Tsirkin 43481eaec3SMichael S. Tsirkin wait_cycles(VMEXIT_CYCLES); 44481eaec3SMichael S. Tsirkin } 45481eaec3SMichael S. Tsirkin static inline void vmentry(void) 46481eaec3SMichael S. Tsirkin { 47481eaec3SMichael S. Tsirkin if (!do_exit) 48481eaec3SMichael S. Tsirkin return; 49481eaec3SMichael S. Tsirkin 50481eaec3SMichael S. Tsirkin wait_cycles(VMENTRY_CYCLES); 51481eaec3SMichael S. Tsirkin } 52481eaec3SMichael S. Tsirkin 53481eaec3SMichael S. Tsirkin /* implemented by ring */ 54481eaec3SMichael S. Tsirkin void alloc_ring(void); 55481eaec3SMichael S. Tsirkin /* guest side */ 56481eaec3SMichael S. Tsirkin int add_inbuf(unsigned, void *, void *); 57481eaec3SMichael S. Tsirkin void *get_buf(unsigned *, void **); 58481eaec3SMichael S. Tsirkin void disable_call(); 59*d3c3589bSPaolo Bonzini bool used_empty(); 60481eaec3SMichael S. Tsirkin bool enable_call(); 61481eaec3SMichael S. Tsirkin void kick_available(); 62481eaec3SMichael S. Tsirkin /* host side */ 63481eaec3SMichael S. Tsirkin void disable_kick(); 64*d3c3589bSPaolo Bonzini bool avail_empty(); 65481eaec3SMichael S. Tsirkin bool enable_kick(); 66481eaec3SMichael S. Tsirkin bool use_buf(unsigned *, void **); 67481eaec3SMichael S. Tsirkin void call_used(); 68481eaec3SMichael S. Tsirkin 69481eaec3SMichael S. Tsirkin /* implemented by main */ 70481eaec3SMichael S. Tsirkin extern bool do_sleep; 71481eaec3SMichael S. Tsirkin void kick(void); 72481eaec3SMichael S. Tsirkin void wait_for_kick(void); 73481eaec3SMichael S. Tsirkin void call(void); 74481eaec3SMichael S. Tsirkin void wait_for_call(void); 75481eaec3SMichael S. Tsirkin 76481eaec3SMichael S. Tsirkin extern unsigned ring_size; 77481eaec3SMichael S. Tsirkin 78481eaec3SMichael S. Tsirkin /* Compiler barrier - similar to what Linux uses */ 79481eaec3SMichael S. Tsirkin #define barrier() asm volatile("" ::: "memory") 80481eaec3SMichael S. Tsirkin 81481eaec3SMichael S. Tsirkin /* Is there a portable way to do this? */ 82481eaec3SMichael S. Tsirkin #if defined(__x86_64__) || defined(__i386__) 83481eaec3SMichael S. Tsirkin #define cpu_relax() asm ("rep; nop" ::: "memory") 84481eaec3SMichael S. Tsirkin #else 85481eaec3SMichael S. Tsirkin #define cpu_relax() assert(0) 86481eaec3SMichael S. Tsirkin #endif 87481eaec3SMichael S. Tsirkin 88481eaec3SMichael S. Tsirkin extern bool do_relax; 89481eaec3SMichael S. Tsirkin 90481eaec3SMichael S. Tsirkin static inline void busy_wait(void) 91481eaec3SMichael S. Tsirkin { 92481eaec3SMichael S. Tsirkin if (do_relax) 93481eaec3SMichael S. Tsirkin cpu_relax(); 94481eaec3SMichael S. Tsirkin else 95481eaec3SMichael S. Tsirkin /* prevent compiler from removing busy loops */ 96481eaec3SMichael S. Tsirkin barrier(); 97481eaec3SMichael S. Tsirkin } 98481eaec3SMichael S. Tsirkin 99481eaec3SMichael S. Tsirkin /* 100481eaec3SMichael S. Tsirkin * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized 101481eaec3SMichael S. Tsirkin * with other __ATOMIC_SEQ_CST calls. 102481eaec3SMichael S. Tsirkin */ 103481eaec3SMichael S. Tsirkin #define smp_mb() __sync_synchronize() 104481eaec3SMichael S. Tsirkin 105481eaec3SMichael S. Tsirkin /* 106481eaec3SMichael S. Tsirkin * This abuses the atomic builtins for thread fences, and 107481eaec3SMichael S. Tsirkin * adds a compiler barrier. 108481eaec3SMichael S. Tsirkin */ 109481eaec3SMichael S. Tsirkin #define smp_release() do { \ 110481eaec3SMichael S. Tsirkin barrier(); \ 111481eaec3SMichael S. Tsirkin __atomic_thread_fence(__ATOMIC_RELEASE); \ 112481eaec3SMichael S. Tsirkin } while (0) 113481eaec3SMichael S. Tsirkin 114481eaec3SMichael S. Tsirkin #define smp_acquire() do { \ 115481eaec3SMichael S. Tsirkin __atomic_thread_fence(__ATOMIC_ACQUIRE); \ 116481eaec3SMichael S. Tsirkin barrier(); \ 117481eaec3SMichael S. Tsirkin } while (0) 118481eaec3SMichael S. Tsirkin 119481eaec3SMichael S. Tsirkin #endif 120