1 /* SPDX-License-Identifier: MIT */ 2 /****************************************************************************** 3 * ring.h 4 * 5 * Shared producer-consumer ring macros. 6 * 7 * Tim Deegan and Andrew Warfield November 2004. 8 */ 9 10 #ifndef __XEN_PUBLIC_IO_RING_H__ 11 #define __XEN_PUBLIC_IO_RING_H__ 12 13 /* 14 * When #include'ing this header, you need to provide the following 15 * declaration upfront: 16 * - standard integers types (uint8_t, uint16_t, etc) 17 * They are provided by stdint.h of the standard headers. 18 * 19 * In addition, if you intend to use the FLEX macros, you also need to 20 * provide the following, before invoking the FLEX macros: 21 * - size_t 22 * - memcpy 23 * - grant_ref_t 24 * These declarations are provided by string.h of the standard headers, 25 * and grant_table.h from the Xen public headers. 26 */ 27 28 #include "../xen-compat.h" 29 30 #if __XEN_INTERFACE_VERSION__ < 0x00030208 31 #define xen_mb() mb() 32 #define xen_rmb() rmb() 33 #define xen_wmb() wmb() 34 #endif 35 36 typedef unsigned int RING_IDX; 37 38 /* Round a 32-bit unsigned constant down to the nearest power of two. */ 39 #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) 40 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) 41 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) 42 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) 43 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) 44 45 /* 46 * Calculate size of a shared ring, given the total available space for the 47 * ring and indexes (_sz), and the name tag of the request/response structure. 48 * A ring contains as many entries as will fit, rounded down to the nearest 49 * power of two (so we can mask with (size-1) to loop around). 50 */ 51 #define __CONST_RING_SIZE(_s, _sz) \ 52 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 53 sizeof(((struct _s##_sring *)0)->ring[0]))) 54 /* 55 * The same for passing in an actual pointer instead of a name tag. 56 */ 57 #define __RING_SIZE(_s, _sz) \ 58 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 59 60 /* 61 * Macros to make the correct C datatypes for a new kind of ring. 62 * 63 * To make a new ring datatype, you need to have two message structures, 64 * let's say request_t, and response_t already defined. 65 * 66 * In a header where you want the ring datatype declared, you then do: 67 * 68 * DEFINE_RING_TYPES(mytag, request_t, response_t); 69 * 70 * These expand out to give you a set of types, as you can see below. 71 * The most important of these are: 72 * 73 * mytag_sring_t - The shared ring. 74 * mytag_front_ring_t - The 'front' half of the ring. 75 * mytag_back_ring_t - The 'back' half of the ring. 76 * 77 * To initialize a ring in your code you need to know the location and size 78 * of the shared memory area (PAGE_SIZE, for instance). To initialise 79 * the front half: 80 * 81 * mytag_front_ring_t ring; 82 * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 83 * 84 * Initializing the back follows similarly (note that only the front 85 * initializes the shared ring): 86 * 87 * mytag_back_ring_t back_ring; 88 * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 89 */ 90 91 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ 92 \ 93 /* Shared ring entry */ \ 94 union __name##_sring_entry { \ 95 __req_t req; \ 96 __rsp_t rsp; \ 97 }; \ 98 \ 99 /* Shared ring page */ \ 100 struct __name##_sring { \ 101 RING_IDX req_prod, req_event; \ 102 RING_IDX rsp_prod, rsp_event; \ 103 union { \ 104 struct { \ 105 uint8_t smartpoll_active; \ 106 } netif; \ 107 struct { \ 108 uint8_t msg; \ 109 } tapif_user; \ 110 uint8_t pvt_pad[4]; \ 111 } pvt; \ 112 uint8_t __pad[44]; \ 113 union __name##_sring_entry ring[1]; /* variable-length */ \ 114 }; \ 115 \ 116 /* "Front" end's private variables */ \ 117 struct __name##_front_ring { \ 118 RING_IDX req_prod_pvt; \ 119 RING_IDX rsp_cons; \ 120 unsigned int nr_ents; \ 121 struct __name##_sring *sring; \ 122 }; \ 123 \ 124 /* "Back" end's private variables */ \ 125 struct __name##_back_ring { \ 126 RING_IDX rsp_prod_pvt; \ 127 RING_IDX req_cons; \ 128 unsigned int nr_ents; \ 129 struct __name##_sring *sring; \ 130 }; \ 131 \ 132 /* Syntactic sugar */ \ 133 typedef struct __name##_sring __name##_sring_t; \ 134 typedef struct __name##_front_ring __name##_front_ring_t; \ 135 typedef struct __name##_back_ring __name##_back_ring_t 136 137 /* 138 * Macros for manipulating rings. 139 * 140 * FRONT_RING_whatever works on the "front end" of a ring: here 141 * requests are pushed on to the ring and responses taken off it. 142 * 143 * BACK_RING_whatever works on the "back end" of a ring: here 144 * requests are taken off the ring and responses put on. 145 * 146 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 147 * This is OK in 1-for-1 request-response situations where the 148 * requestor (front end) never has more than RING_SIZE()-1 149 * outstanding requests. 150 */ 151 152 /* Initialising empty rings */ 153 #define SHARED_RING_INIT(_s) do { \ 154 (_s)->req_prod = (_s)->rsp_prod = 0; \ 155 (_s)->req_event = (_s)->rsp_event = 1; \ 156 (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ 157 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ 158 } while(0) 159 160 #define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ 161 (_r)->req_prod_pvt = (_i); \ 162 (_r)->rsp_cons = (_i); \ 163 (_r)->nr_ents = __RING_SIZE(_s, __size); \ 164 (_r)->sring = (_s); \ 165 } while (0) 166 167 #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) 168 169 #define XEN_FRONT_RING_INIT(r, s, size) do { \ 170 SHARED_RING_INIT(s); \ 171 FRONT_RING_INIT(r, s, size); \ 172 } while (0) 173 174 #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ 175 (_r)->rsp_prod_pvt = (_i); \ 176 (_r)->req_cons = (_i); \ 177 (_r)->nr_ents = __RING_SIZE(_s, __size); \ 178 (_r)->sring = (_s); \ 179 } while (0) 180 181 #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) 182 183 /* How big is this ring? */ 184 #define RING_SIZE(_r) \ 185 ((_r)->nr_ents) 186 187 /* Number of free requests (for use on front side only). */ 188 #define RING_FREE_REQUESTS(_r) \ 189 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) 190 191 /* Test if there is an empty slot available on the front ring. 192 * (This is only meaningful from the front. ) 193 */ 194 #define RING_FULL(_r) \ 195 (RING_FREE_REQUESTS(_r) == 0) 196 197 /* Test if there are outstanding messages to be processed on a ring. */ 198 #define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \ 199 ((_r)->sring->rsp_prod - (_r)->rsp_cons) 200 201 #ifdef __GNUC__ 202 #define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \ 203 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ 204 unsigned int rsp = RING_SIZE(_r) - \ 205 ((_r)->req_cons - (_r)->rsp_prod_pvt); \ 206 req < rsp ? req : rsp; \ 207 }) 208 #else 209 /* Same as above, but without the nice GCC ({ ... }) syntax. */ 210 #define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) \ 211 ((((_r)->sring->req_prod - (_r)->req_cons) < \ 212 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ 213 ((_r)->sring->req_prod - (_r)->req_cons) : \ 214 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) 215 #endif 216 217 #ifdef XEN_RING_HAS_UNCONSUMED_IS_BOOL 218 /* 219 * These variants should only be used in case no caller is abusing them for 220 * obtaining the number of unconsumed responses/requests. 221 */ 222 #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 223 (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r)) 224 #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ 225 (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r)) 226 #else 227 #define RING_HAS_UNCONSUMED_RESPONSES(_r) XEN_RING_NR_UNCONSUMED_RESPONSES(_r) 228 #define RING_HAS_UNCONSUMED_REQUESTS(_r) XEN_RING_NR_UNCONSUMED_REQUESTS(_r) 229 #endif 230 231 /* Direct access to individual ring elements, by index. */ 232 #define RING_GET_REQUEST(_r, _idx) \ 233 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 234 235 #define RING_GET_RESPONSE(_r, _idx) \ 236 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 237 238 /* 239 * Get a local copy of a request/response. 240 * 241 * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is 242 * done on a local copy that cannot be modified by the other end. 243 * 244 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 245 * to be ineffective where dest is a struct which consists of only bitfields. 246 */ 247 #define RING_COPY_(type, r, idx, dest) do { \ 248 /* Use volatile to force the copy into dest. */ \ 249 *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \ 250 } while (0) 251 252 #define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) 253 #define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) 254 255 /* Loop termination condition: Would the specified index overflow the ring? */ 256 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 257 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 258 259 /* Ill-behaved frontend determination: Can there be this many requests? */ 260 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ 261 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) 262 263 /* Ill-behaved backend determination: Can there be this many responses? */ 264 #define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ 265 (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) 266 267 #define RING_PUSH_REQUESTS(_r) do { \ 268 xen_wmb(); /* back sees requests /before/ updated producer index */ \ 269 (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 270 } while (0) 271 272 #define RING_PUSH_RESPONSES(_r) do { \ 273 xen_wmb(); /* front sees resps /before/ updated producer index */ \ 274 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ 275 } while (0) 276 277 /* 278 * Notification hold-off (req_event and rsp_event): 279 * 280 * When queueing requests or responses on a shared ring, it may not always be 281 * necessary to notify the remote end. For example, if requests are in flight 282 * in a backend, the front may be able to queue further requests without 283 * notifying the back (if the back checks for new requests when it queues 284 * responses). 285 * 286 * When enqueuing requests or responses: 287 * 288 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument 289 * is a boolean return value. True indicates that the receiver requires an 290 * asynchronous notification. 291 * 292 * After dequeuing requests or responses (before sleeping the connection): 293 * 294 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). 295 * The second argument is a boolean return value. True indicates that there 296 * are pending messages on the ring (i.e., the connection should not be put 297 * to sleep). 298 * 299 * These macros will set the req_event/rsp_event field to trigger a 300 * notification on the very next message that is enqueued. If you want to 301 * create batches of work (i.e., only receive a notification after several 302 * messages have been enqueued) then you will need to create a customised 303 * version of the FINAL_CHECK macro in your own code, which sets the event 304 * field appropriately. 305 */ 306 307 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ 308 RING_IDX __old = (_r)->sring->req_prod; \ 309 RING_IDX __new = (_r)->req_prod_pvt; \ 310 xen_wmb(); /* back sees requests /before/ updated producer index */ \ 311 (_r)->sring->req_prod = __new; \ 312 xen_mb(); /* back sees new requests /before/ we check req_event */ \ 313 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ 314 (RING_IDX)(__new - __old)); \ 315 } while (0) 316 317 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ 318 RING_IDX __old = (_r)->sring->rsp_prod; \ 319 RING_IDX __new = (_r)->rsp_prod_pvt; \ 320 xen_wmb(); /* front sees resps /before/ updated producer index */ \ 321 (_r)->sring->rsp_prod = __new; \ 322 xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ 323 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ 324 (RING_IDX)(__new - __old)); \ 325 } while (0) 326 327 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ 328 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 329 if (_work_to_do) break; \ 330 (_r)->sring->req_event = (_r)->req_cons + 1; \ 331 xen_mb(); \ 332 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 333 } while (0) 334 335 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ 336 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 337 if (_work_to_do) break; \ 338 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ 339 xen_mb(); \ 340 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 341 } while (0) 342 343 344 /* 345 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and 346 * functions to check if there is data on the ring, and to read and 347 * write to them. 348 * 349 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but 350 * does not define the indexes page. As different protocols can have 351 * extensions to the basic format, this macro allow them to define their 352 * own struct. 353 * 354 * XEN_FLEX_RING_SIZE 355 * Convenience macro to calculate the size of one of the two rings 356 * from the overall order. 357 * 358 * $NAME_mask 359 * Function to apply the size mask to an index, to reduce the index 360 * within the range [0-size]. 361 * 362 * $NAME_read_packet 363 * Function to read data from the ring. The amount of data to read is 364 * specified by the "size" argument. 365 * 366 * $NAME_write_packet 367 * Function to write data to the ring. The amount of data to write is 368 * specified by the "size" argument. 369 * 370 * $NAME_get_ring_ptr 371 * Convenience function that returns a pointer to read/write to the 372 * ring at the right location. 373 * 374 * $NAME_data_intf 375 * Indexes page, shared between frontend and backend. It also 376 * contains the array of grant refs. 377 * 378 * $NAME_queued 379 * Function to calculate how many bytes are currently on the ring, 380 * ready to be read. It can also be used to calculate how much free 381 * space is currently on the ring (XEN_FLEX_RING_SIZE() - 382 * $NAME_queued()). 383 */ 384 385 #ifndef XEN_PAGE_SHIFT 386 /* The PAGE_SIZE for ring protocols and hypercall interfaces is always 387 * 4K, regardless of the architecture, and page granularity chosen by 388 * operating systems. 389 */ 390 #define XEN_PAGE_SHIFT 12 391 #endif 392 #define XEN_FLEX_RING_SIZE(order) \ 393 (1UL << ((order) + XEN_PAGE_SHIFT - 1)) 394 395 #define DEFINE_XEN_FLEX_RING(name) \ 396 static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ 397 { \ 398 return idx & (ring_size - 1); \ 399 } \ 400 \ 401 static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ 402 RING_IDX idx, \ 403 RING_IDX ring_size) \ 404 { \ 405 return buf + name##_mask(idx, ring_size); \ 406 } \ 407 \ 408 static inline void name##_read_packet(void *opaque, \ 409 const unsigned char *buf, \ 410 size_t size, \ 411 RING_IDX masked_prod, \ 412 RING_IDX *masked_cons, \ 413 RING_IDX ring_size) \ 414 { \ 415 if (*masked_cons < masked_prod || \ 416 size <= ring_size - *masked_cons) { \ 417 memcpy(opaque, buf + *masked_cons, size); \ 418 } else { \ 419 memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ 420 memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ 421 size - (ring_size - *masked_cons)); \ 422 } \ 423 *masked_cons = name##_mask(*masked_cons + size, ring_size); \ 424 } \ 425 \ 426 static inline void name##_write_packet(unsigned char *buf, \ 427 const void *opaque, \ 428 size_t size, \ 429 RING_IDX *masked_prod, \ 430 RING_IDX masked_cons, \ 431 RING_IDX ring_size) \ 432 { \ 433 if (*masked_prod < masked_cons || \ 434 size <= ring_size - *masked_prod) { \ 435 memcpy(buf + *masked_prod, opaque, size); \ 436 } else { \ 437 memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ 438 memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ 439 size - (ring_size - *masked_prod)); \ 440 } \ 441 *masked_prod = name##_mask(*masked_prod + size, ring_size); \ 442 } \ 443 \ 444 static inline RING_IDX name##_queued(RING_IDX prod, \ 445 RING_IDX cons, \ 446 RING_IDX ring_size) \ 447 { \ 448 RING_IDX size; \ 449 \ 450 if (prod == cons) \ 451 return 0; \ 452 \ 453 prod = name##_mask(prod, ring_size); \ 454 cons = name##_mask(cons, ring_size); \ 455 \ 456 if (prod == cons) \ 457 return ring_size; \ 458 \ 459 if (prod > cons) \ 460 size = prod - cons; \ 461 else \ 462 size = ring_size - (cons - prod); \ 463 return size; \ 464 } \ 465 \ 466 struct name##_data { \ 467 unsigned char *in; /* half of the allocation */ \ 468 unsigned char *out; /* half of the allocation */ \ 469 } 470 471 #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ 472 struct name##_data_intf { \ 473 RING_IDX in_cons, in_prod; \ 474 \ 475 uint8_t pad1[56]; \ 476 \ 477 RING_IDX out_cons, out_prod; \ 478 \ 479 uint8_t pad2[56]; \ 480 \ 481 RING_IDX ring_order; \ 482 grant_ref_t ref[]; \ 483 }; \ 484 DEFINE_XEN_FLEX_RING(name) 485 486 #endif /* __XEN_PUBLIC_IO_RING_H__ */ 487 488 /* 489 * Local variables: 490 * mode: C 491 * c-file-style: "BSD" 492 * c-basic-offset: 4 493 * tab-width: 4 494 * indent-tabs-mode: nil 495 * End: 496 */ 497