1 #define _GNU_SOURCE 2 #include "main.h" 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <pthread.h> 7 #include <malloc.h> 8 #include <assert.h> 9 #include <errno.h> 10 #include <limits.h> 11 12 #define SMP_CACHE_BYTES 64 13 #define cache_line_size() SMP_CACHE_BYTES 14 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) 15 #define unlikely(x) (__builtin_expect(!!(x), 0)) 16 #define likely(x) (__builtin_expect(!!(x), 1)) 17 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 18 typedef pthread_spinlock_t spinlock_t; 19 20 typedef int gfp_t; 21 static void *kmalloc(unsigned size, gfp_t gfp) 22 { 23 return memalign(64, size); 24 } 25 26 static void *kzalloc(unsigned size, gfp_t gfp) 27 { 28 void *p = memalign(64, size); 29 if (!p) 30 return p; 31 memset(p, 0, size); 32 33 return p; 34 } 35 36 static void kfree(void *p) 37 { 38 if (p) 39 free(p); 40 } 41 42 static void spin_lock_init(spinlock_t *lock) 43 { 44 int r = pthread_spin_init(lock, 0); 45 assert(!r); 46 } 47 48 static void spin_lock(spinlock_t *lock) 49 { 50 int ret = pthread_spin_lock(lock); 51 assert(!ret); 52 } 53 54 static void spin_unlock(spinlock_t *lock) 55 { 56 int ret = pthread_spin_unlock(lock); 57 assert(!ret); 58 } 59 60 static void spin_lock_bh(spinlock_t *lock) 61 { 62 spin_lock(lock); 63 } 64 65 static void spin_unlock_bh(spinlock_t *lock) 66 { 67 spin_unlock(lock); 68 } 69 70 static void spin_lock_irq(spinlock_t *lock) 71 { 72 spin_lock(lock); 73 } 74 75 static void spin_unlock_irq(spinlock_t *lock) 76 { 77 spin_unlock(lock); 78 } 79 80 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) 81 { 82 spin_lock(lock); 83 } 84 85 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) 86 { 87 spin_unlock(lock); 88 } 89 90 #include "../../../include/linux/ptr_ring.h" 91 92 static unsigned long long headcnt, tailcnt; 93 static struct ptr_ring array ____cacheline_aligned_in_smp; 94 95 /* implemented by ring */ 96 void alloc_ring(void) 97 { 98 int ret = ptr_ring_init(&array, ring_size, 0); 99 assert(!ret); 100 /* Hacky way to poke at ring internals. Useful for testing though. */ 101 if (param) 102 array.batch = param; 103 } 104 105 /* guest side */ 106 int add_inbuf(unsigned len, void *buf, void *datap) 107 { 108 int ret; 109 110 ret = __ptr_ring_produce(&array, buf); 111 if (ret >= 0) { 112 ret = 0; 113 headcnt++; 114 } 115 116 return ret; 117 } 118 119 /* 120 * ptr_ring API provides no way for producer to find out whether a given 121 * buffer was consumed. Our tests merely require that a successful get_buf 122 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, 123 * fake it accordingly. 124 */ 125 void *get_buf(unsigned *lenp, void **bufp) 126 { 127 void *datap; 128 129 if (tailcnt == headcnt || __ptr_ring_full(&array)) 130 datap = NULL; 131 else { 132 datap = "Buffer\n"; 133 ++tailcnt; 134 } 135 136 return datap; 137 } 138 139 bool used_empty() 140 { 141 return (tailcnt == headcnt || __ptr_ring_full(&array)); 142 } 143 144 void disable_call() 145 { 146 assert(0); 147 } 148 149 bool enable_call() 150 { 151 assert(0); 152 } 153 154 void kick_available(void) 155 { 156 assert(0); 157 } 158 159 /* host side */ 160 void disable_kick() 161 { 162 assert(0); 163 } 164 165 bool enable_kick() 166 { 167 assert(0); 168 } 169 170 bool avail_empty() 171 { 172 return !__ptr_ring_peek(&array); 173 } 174 175 bool use_buf(unsigned *lenp, void **bufp) 176 { 177 void *ptr; 178 179 ptr = __ptr_ring_consume(&array); 180 181 return ptr; 182 } 183 184 void call_used(void) 185 { 186 assert(0); 187 } 188