xref: /openbmc/linux/tools/virtio/ringtest/ptr_ring.c (revision bc5aa3a0)
1 #define _GNU_SOURCE
2 #include "main.h"
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <pthread.h>
7 #include <malloc.h>
8 #include <assert.h>
9 #include <errno.h>
10 #include <limits.h>
11 
12 #define SMP_CACHE_BYTES 64
13 #define cache_line_size() SMP_CACHE_BYTES
14 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15 #define unlikely(x)    (__builtin_expect(!!(x), 0))
16 #define likely(x)    (__builtin_expect(!!(x), 1))
17 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
18 typedef pthread_spinlock_t  spinlock_t;
19 
20 typedef int gfp_t;
21 static void *kmalloc(unsigned size, gfp_t gfp)
22 {
23 	return memalign(64, size);
24 }
25 
26 static void *kzalloc(unsigned size, gfp_t gfp)
27 {
28 	void *p = memalign(64, size);
29 	if (!p)
30 		return p;
31 	memset(p, 0, size);
32 
33 	return p;
34 }
35 
36 static void kfree(void *p)
37 {
38 	if (p)
39 		free(p);
40 }
41 
42 static void spin_lock_init(spinlock_t *lock)
43 {
44 	int r = pthread_spin_init(lock, 0);
45 	assert(!r);
46 }
47 
48 static void spin_lock(spinlock_t *lock)
49 {
50 	int ret = pthread_spin_lock(lock);
51 	assert(!ret);
52 }
53 
54 static void spin_unlock(spinlock_t *lock)
55 {
56 	int ret = pthread_spin_unlock(lock);
57 	assert(!ret);
58 }
59 
60 static void spin_lock_bh(spinlock_t *lock)
61 {
62 	spin_lock(lock);
63 }
64 
65 static void spin_unlock_bh(spinlock_t *lock)
66 {
67 	spin_unlock(lock);
68 }
69 
70 static void spin_lock_irq(spinlock_t *lock)
71 {
72 	spin_lock(lock);
73 }
74 
75 static void spin_unlock_irq(spinlock_t *lock)
76 {
77 	spin_unlock(lock);
78 }
79 
80 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
81 {
82 	spin_lock(lock);
83 }
84 
85 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
86 {
87 	spin_unlock(lock);
88 }
89 
90 #include "../../../include/linux/ptr_ring.h"
91 
92 static unsigned long long headcnt, tailcnt;
93 static struct ptr_ring array ____cacheline_aligned_in_smp;
94 
95 /* implemented by ring */
96 void alloc_ring(void)
97 {
98 	int ret = ptr_ring_init(&array, ring_size, 0);
99 	assert(!ret);
100 }
101 
102 /* guest side */
103 int add_inbuf(unsigned len, void *buf, void *datap)
104 {
105 	int ret;
106 
107 	ret = __ptr_ring_produce(&array, buf);
108 	if (ret >= 0) {
109 		ret = 0;
110 		headcnt++;
111 	}
112 
113 	return ret;
114 }
115 
116 /*
117  * ptr_ring API provides no way for producer to find out whether a given
118  * buffer was consumed.  Our tests merely require that a successful get_buf
119  * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
120  * fake it accordingly.
121  */
122 void *get_buf(unsigned *lenp, void **bufp)
123 {
124 	void *datap;
125 
126 	if (tailcnt == headcnt || __ptr_ring_full(&array))
127 		datap = NULL;
128 	else {
129 		datap = "Buffer\n";
130 		++tailcnt;
131 	}
132 
133 	return datap;
134 }
135 
136 void poll_used(void)
137 {
138 	void *b;
139 
140 	do {
141 		if (tailcnt == headcnt || __ptr_ring_full(&array)) {
142 			b = NULL;
143 			barrier();
144 		} else {
145 			b = "Buffer\n";
146 		}
147 	} while (!b);
148 }
149 
150 void disable_call()
151 {
152 	assert(0);
153 }
154 
155 bool enable_call()
156 {
157 	assert(0);
158 }
159 
160 void kick_available(void)
161 {
162 	assert(0);
163 }
164 
165 /* host side */
166 void disable_kick()
167 {
168 	assert(0);
169 }
170 
171 bool enable_kick()
172 {
173 	assert(0);
174 }
175 
176 void poll_avail(void)
177 {
178 	void *b;
179 
180 	do {
181 		barrier();
182 		b = __ptr_ring_peek(&array);
183 	} while (!b);
184 }
185 
186 bool use_buf(unsigned *lenp, void **bufp)
187 {
188 	void *ptr;
189 
190 	ptr = __ptr_ring_consume(&array);
191 
192 	return ptr;
193 }
194 
195 void call_used(void)
196 {
197 	assert(0);
198 }
199