xref: /openbmc/linux/include/xen/interface/io/ring.h (revision 5a244f48)
1 /******************************************************************************
2  * ring.h
3  *
4  * Shared producer-consumer ring macros.
5  *
6  * Tim Deegan and Andrew Warfield November 2004.
7  */
8 
9 #ifndef __XEN_PUBLIC_IO_RING_H__
10 #define __XEN_PUBLIC_IO_RING_H__
11 
12 #include <xen/interface/grant_table.h>
13 
14 typedef unsigned int RING_IDX;
15 
16 /* Round a 32-bit unsigned constant down to the nearest power of two. */
17 #define __RD2(_x)  (((_x) & 0x00000002) ? 0x2		       : ((_x) & 0x1))
18 #define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
19 #define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
20 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
21 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
22 
23 /*
24  * Calculate size of a shared ring, given the total available space for the
25  * ring and indexes (_sz), and the name tag of the request/response structure.
26  * A ring contains as many entries as will fit, rounded down to the nearest
27  * power of two (so we can mask with (size-1) to loop around).
28  */
29 #define __CONST_RING_SIZE(_s, _sz)				\
30 	(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /	\
31 		sizeof(((struct _s##_sring *)0)->ring[0])))
32 
33 /*
34  * The same for passing in an actual pointer instead of a name tag.
35  */
36 #define __RING_SIZE(_s, _sz)						\
37 	(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
38 
39 /*
40  * Macros to make the correct C datatypes for a new kind of ring.
41  *
42  * To make a new ring datatype, you need to have two message structures,
43  * let's say struct request, and struct response already defined.
44  *
45  * In a header where you want the ring datatype declared, you then do:
46  *
47  *     DEFINE_RING_TYPES(mytag, struct request, struct response);
48  *
49  * These expand out to give you a set of types, as you can see below.
50  * The most important of these are:
51  *
52  *     struct mytag_sring      - The shared ring.
53  *     struct mytag_front_ring - The 'front' half of the ring.
54  *     struct mytag_back_ring  - The 'back' half of the ring.
55  *
56  * To initialize a ring in your code you need to know the location and size
57  * of the shared memory area (PAGE_SIZE, for instance). To initialise
58  * the front half:
59  *
60  *     struct mytag_front_ring front_ring;
61  *     SHARED_RING_INIT((struct mytag_sring *)shared_page);
62  *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
63  *		       PAGE_SIZE);
64  *
65  * Initializing the back follows similarly (note that only the front
66  * initializes the shared ring):
67  *
68  *     struct mytag_back_ring back_ring;
69  *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
70  *		      PAGE_SIZE);
71  */
72 
73 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)			\
74 									\
75 /* Shared ring entry */							\
76 union __name##_sring_entry {						\
77     __req_t req;							\
78     __rsp_t rsp;							\
79 };									\
80 									\
81 /* Shared ring page */							\
82 struct __name##_sring {							\
83     RING_IDX req_prod, req_event;					\
84     RING_IDX rsp_prod, rsp_event;					\
85     uint8_t  pad[48];							\
86     union __name##_sring_entry ring[1]; /* variable-length */		\
87 };									\
88 									\
89 /* "Front" end's private variables */					\
90 struct __name##_front_ring {						\
91     RING_IDX req_prod_pvt;						\
92     RING_IDX rsp_cons;							\
93     unsigned int nr_ents;						\
94     struct __name##_sring *sring;					\
95 };									\
96 									\
97 /* "Back" end's private variables */					\
98 struct __name##_back_ring {						\
99     RING_IDX rsp_prod_pvt;						\
100     RING_IDX req_cons;							\
101     unsigned int nr_ents;						\
102     struct __name##_sring *sring;					\
103 };
104 
105 /*
106  * Macros for manipulating rings.
107  *
108  * FRONT_RING_whatever works on the "front end" of a ring: here
109  * requests are pushed on to the ring and responses taken off it.
110  *
111  * BACK_RING_whatever works on the "back end" of a ring: here
112  * requests are taken off the ring and responses put on.
113  *
114  * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
115  * This is OK in 1-for-1 request-response situations where the
116  * requestor (front end) never has more than RING_SIZE()-1
117  * outstanding requests.
118  */
119 
120 /* Initialising empty rings */
121 #define SHARED_RING_INIT(_s) do {					\
122     (_s)->req_prod  = (_s)->rsp_prod  = 0;				\
123     (_s)->req_event = (_s)->rsp_event = 1;				\
124     memset((_s)->pad, 0, sizeof((_s)->pad));				\
125 } while(0)
126 
127 #define FRONT_RING_INIT(_r, _s, __size) do {				\
128     (_r)->req_prod_pvt = 0;						\
129     (_r)->rsp_cons = 0;							\
130     (_r)->nr_ents = __RING_SIZE(_s, __size);				\
131     (_r)->sring = (_s);							\
132 } while (0)
133 
134 #define BACK_RING_INIT(_r, _s, __size) do {				\
135     (_r)->rsp_prod_pvt = 0;						\
136     (_r)->req_cons = 0;							\
137     (_r)->nr_ents = __RING_SIZE(_s, __size);				\
138     (_r)->sring = (_s);							\
139 } while (0)
140 
141 /* Initialize to existing shared indexes -- for recovery */
142 #define FRONT_RING_ATTACH(_r, _s, __size) do {				\
143     (_r)->sring = (_s);							\
144     (_r)->req_prod_pvt = (_s)->req_prod;				\
145     (_r)->rsp_cons = (_s)->rsp_prod;					\
146     (_r)->nr_ents = __RING_SIZE(_s, __size);				\
147 } while (0)
148 
149 #define BACK_RING_ATTACH(_r, _s, __size) do {				\
150     (_r)->sring = (_s);							\
151     (_r)->rsp_prod_pvt = (_s)->rsp_prod;				\
152     (_r)->req_cons = (_s)->req_prod;					\
153     (_r)->nr_ents = __RING_SIZE(_s, __size);				\
154 } while (0)
155 
156 /* How big is this ring? */
157 #define RING_SIZE(_r)							\
158     ((_r)->nr_ents)
159 
160 /* Number of free requests (for use on front side only). */
161 #define RING_FREE_REQUESTS(_r)						\
162     (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
163 
164 /* Test if there is an empty slot available on the front ring.
165  * (This is only meaningful from the front. )
166  */
167 #define RING_FULL(_r)							\
168     (RING_FREE_REQUESTS(_r) == 0)
169 
170 /* Test if there are outstanding messages to be processed on a ring. */
171 #define RING_HAS_UNCONSUMED_RESPONSES(_r)				\
172     ((_r)->sring->rsp_prod - (_r)->rsp_cons)
173 
174 #define RING_HAS_UNCONSUMED_REQUESTS(_r)				\
175     ({									\
176 	unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;	\
177 	unsigned int rsp = RING_SIZE(_r) -				\
178 			   ((_r)->req_cons - (_r)->rsp_prod_pvt);	\
179 	req < rsp ? req : rsp;						\
180     })
181 
182 /* Direct access to individual ring elements, by index. */
183 #define RING_GET_REQUEST(_r, _idx)					\
184     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
185 
186 /*
187  * Get a local copy of a request.
188  *
189  * Use this in preference to RING_GET_REQUEST() so all processing is
190  * done on a local copy that cannot be modified by the other end.
191  *
192  * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
193  * to be ineffective where _req is a struct which consists of only bitfields.
194  */
195 #define RING_COPY_REQUEST(_r, _idx, _req) do {				\
196 	/* Use volatile to force the copy into _req. */			\
197 	*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx);	\
198 } while (0)
199 
200 #define RING_GET_RESPONSE(_r, _idx)					\
201     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
202 
203 /* Loop termination condition: Would the specified index overflow the ring? */
204 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons)				\
205     (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
206 
207 /* Ill-behaved frontend determination: Can there be this many requests? */
208 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod)               \
209     (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
210 
211 
212 #define RING_PUSH_REQUESTS(_r) do {					\
213     virt_wmb(); /* back sees requests /before/ updated producer index */	\
214     (_r)->sring->req_prod = (_r)->req_prod_pvt;				\
215 } while (0)
216 
217 #define RING_PUSH_RESPONSES(_r) do {					\
218     virt_wmb(); /* front sees responses /before/ updated producer index */	\
219     (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;				\
220 } while (0)
221 
222 /*
223  * Notification hold-off (req_event and rsp_event):
224  *
225  * When queueing requests or responses on a shared ring, it may not always be
226  * necessary to notify the remote end. For example, if requests are in flight
227  * in a backend, the front may be able to queue further requests without
228  * notifying the back (if the back checks for new requests when it queues
229  * responses).
230  *
231  * When enqueuing requests or responses:
232  *
233  *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
234  *  is a boolean return value. True indicates that the receiver requires an
235  *  asynchronous notification.
236  *
237  * After dequeuing requests or responses (before sleeping the connection):
238  *
239  *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
240  *  The second argument is a boolean return value. True indicates that there
241  *  are pending messages on the ring (i.e., the connection should not be put
242  *  to sleep).
243  *
244  *  These macros will set the req_event/rsp_event field to trigger a
245  *  notification on the very next message that is enqueued. If you want to
246  *  create batches of work (i.e., only receive a notification after several
247  *  messages have been enqueued) then you will need to create a customised
248  *  version of the FINAL_CHECK macro in your own code, which sets the event
249  *  field appropriately.
250  */
251 
252 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {		\
253     RING_IDX __old = (_r)->sring->req_prod;				\
254     RING_IDX __new = (_r)->req_prod_pvt;				\
255     virt_wmb(); /* back sees requests /before/ updated producer index */	\
256     (_r)->sring->req_prod = __new;					\
257     virt_mb(); /* back sees new requests /before/ we check req_event */	\
258     (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <		\
259 		 (RING_IDX)(__new - __old));				\
260 } while (0)
261 
262 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {		\
263     RING_IDX __old = (_r)->sring->rsp_prod;				\
264     RING_IDX __new = (_r)->rsp_prod_pvt;				\
265     virt_wmb(); /* front sees responses /before/ updated producer index */	\
266     (_r)->sring->rsp_prod = __new;					\
267     virt_mb(); /* front sees new responses /before/ we check rsp_event */	\
268     (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <		\
269 		 (RING_IDX)(__new - __old));				\
270 } while (0)
271 
272 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {		\
273     (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
274     if (_work_to_do) break;						\
275     (_r)->sring->req_event = (_r)->req_cons + 1;			\
276     virt_mb();								\
277     (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
278 } while (0)
279 
280 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {		\
281     (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
282     if (_work_to_do) break;						\
283     (_r)->sring->rsp_event = (_r)->rsp_cons + 1;			\
284     virt_mb();								\
285     (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
286 } while (0)
287 
288 
289 /*
290  * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
291  * functions to check if there is data on the ring, and to read and
292  * write to them.
293  *
294  * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
295  * does not define the indexes page. As different protocols can have
296  * extensions to the basic format, this macro allow them to define their
297  * own struct.
298  *
299  * XEN_FLEX_RING_SIZE
300  *   Convenience macro to calculate the size of one of the two rings
301  *   from the overall order.
302  *
303  * $NAME_mask
304  *   Function to apply the size mask to an index, to reduce the index
305  *   within the range [0-size].
306  *
307  * $NAME_read_packet
308  *   Function to read data from the ring. The amount of data to read is
309  *   specified by the "size" argument.
310  *
311  * $NAME_write_packet
312  *   Function to write data to the ring. The amount of data to write is
313  *   specified by the "size" argument.
314  *
315  * $NAME_get_ring_ptr
316  *   Convenience function that returns a pointer to read/write to the
317  *   ring at the right location.
318  *
319  * $NAME_data_intf
320  *   Indexes page, shared between frontend and backend. It also
321  *   contains the array of grant refs.
322  *
323  * $NAME_queued
324  *   Function to calculate how many bytes are currently on the ring,
325  *   ready to be read. It can also be used to calculate how much free
326  *   space is currently on the ring (XEN_FLEX_RING_SIZE() -
327  *   $NAME_queued()).
328  */
329 
330 #ifndef XEN_PAGE_SHIFT
331 /* The PAGE_SIZE for ring protocols and hypercall interfaces is always
332  * 4K, regardless of the architecture, and page granularity chosen by
333  * operating systems.
334  */
335 #define XEN_PAGE_SHIFT 12
336 #endif
337 #define XEN_FLEX_RING_SIZE(order)                                             \
338     (1UL << ((order) + XEN_PAGE_SHIFT - 1))
339 
340 #define DEFINE_XEN_FLEX_RING(name)                                            \
341 static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size)          \
342 {                                                                             \
343     return idx & (ring_size - 1);                                             \
344 }                                                                             \
345                                                                               \
346 static inline unsigned char *name##_get_ring_ptr(unsigned char *buf,          \
347                                                  RING_IDX idx,                \
348                                                  RING_IDX ring_size)          \
349 {                                                                             \
350     return buf + name##_mask(idx, ring_size);                                 \
351 }                                                                             \
352                                                                               \
353 static inline void name##_read_packet(void *opaque,                           \
354                                       const unsigned char *buf,               \
355                                       size_t size,                            \
356                                       RING_IDX masked_prod,                   \
357                                       RING_IDX *masked_cons,                  \
358                                       RING_IDX ring_size)                     \
359 {                                                                             \
360     if (*masked_cons < masked_prod ||                                         \
361         size <= ring_size - *masked_cons) {                                   \
362         memcpy(opaque, buf + *masked_cons, size);                             \
363     } else {                                                                  \
364         memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons);         \
365         memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf,       \
366                size - (ring_size - *masked_cons));                            \
367     }                                                                         \
368     *masked_cons = name##_mask(*masked_cons + size, ring_size);               \
369 }                                                                             \
370                                                                               \
371 static inline void name##_write_packet(unsigned char *buf,                    \
372                                        const void *opaque,                    \
373                                        size_t size,                           \
374                                        RING_IDX *masked_prod,                 \
375                                        RING_IDX masked_cons,                  \
376                                        RING_IDX ring_size)                    \
377 {                                                                             \
378     if (*masked_prod < masked_cons ||                                         \
379         size <= ring_size - *masked_prod) {                                   \
380         memcpy(buf + *masked_prod, opaque, size);                             \
381     } else {                                                                  \
382         memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod);         \
383         memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod),     \
384                size - (ring_size - *masked_prod));                            \
385     }                                                                         \
386     *masked_prod = name##_mask(*masked_prod + size, ring_size);               \
387 }                                                                             \
388                                                                               \
389 static inline RING_IDX name##_queued(RING_IDX prod,                           \
390                                      RING_IDX cons,                           \
391                                      RING_IDX ring_size)                      \
392 {                                                                             \
393     RING_IDX size;                                                            \
394                                                                               \
395     if (prod == cons)                                                         \
396         return 0;                                                             \
397                                                                               \
398     prod = name##_mask(prod, ring_size);                                      \
399     cons = name##_mask(cons, ring_size);                                      \
400                                                                               \
401     if (prod == cons)                                                         \
402         return ring_size;                                                     \
403                                                                               \
404     if (prod > cons)                                                          \
405         size = prod - cons;                                                   \
406     else                                                                      \
407         size = ring_size - (cons - prod);                                     \
408     return size;                                                              \
409 }                                                                             \
410                                                                               \
411 struct name##_data {                                                          \
412     unsigned char *in; /* half of the allocation */                           \
413     unsigned char *out; /* half of the allocation */                          \
414 }
415 
416 #define DEFINE_XEN_FLEX_RING_AND_INTF(name)                                   \
417 struct name##_data_intf {                                                     \
418     RING_IDX in_cons, in_prod;                                                \
419                                                                               \
420     uint8_t pad1[56];                                                         \
421                                                                               \
422     RING_IDX out_cons, out_prod;                                              \
423                                                                               \
424     uint8_t pad2[56];                                                         \
425                                                                               \
426     RING_IDX ring_order;                                                      \
427     grant_ref_t ref[];                                                        \
428 };                                                                            \
429 DEFINE_XEN_FLEX_RING(name)
430 
431 #endif /* __XEN_PUBLIC_IO_RING_H__ */
432