1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* XDP user-space ring structure 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XSK_QUEUE_H 7 #define _LINUX_XSK_QUEUE_H 8 9 #include <linux/types.h> 10 #include <linux/if_xdp.h> 11 #include <net/xdp_sock.h> 12 #include <net/xsk_buff_pool.h> 13 14 #include "xsk.h" 15 16 struct xdp_ring { 17 u32 producer ____cacheline_aligned_in_smp; 18 /* Hinder the adjacent cache prefetcher to prefetch the consumer 19 * pointer if the producer pointer is touched and vice versa. 20 */ 21 u32 pad1 ____cacheline_aligned_in_smp; 22 u32 consumer ____cacheline_aligned_in_smp; 23 u32 pad2 ____cacheline_aligned_in_smp; 24 u32 flags; 25 u32 pad3 ____cacheline_aligned_in_smp; 26 }; 27 28 /* Used for the RX and TX queues for packets */ 29 struct xdp_rxtx_ring { 30 struct xdp_ring ptrs; 31 struct xdp_desc desc[] ____cacheline_aligned_in_smp; 32 }; 33 34 /* Used for the fill and completion queues for buffers */ 35 struct xdp_umem_ring { 36 struct xdp_ring ptrs; 37 u64 desc[] ____cacheline_aligned_in_smp; 38 }; 39 40 struct xsk_queue { 41 u32 ring_mask; 42 u32 nentries; 43 u32 cached_prod; 44 u32 cached_cons; 45 struct xdp_ring *ring; 46 u64 invalid_descs; 47 u64 queue_empty_descs; 48 }; 49 50 /* The structure of the shared state of the rings are the same as the 51 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion 52 * ring, the kernel is the producer and user space is the consumer. For 53 * the Tx and fill rings, the kernel is the consumer and user space is 54 * the producer. 55 * 56 * producer consumer 57 * 58 * if (LOAD ->consumer) { LOAD ->producer 59 * (A) smp_rmb() (C) 60 * STORE $data LOAD $data 61 * smp_wmb() (B) smp_mb() (D) 62 * STORE ->producer STORE ->consumer 63 * } 64 * 65 * (A) pairs with (D), and (B) pairs with (C). 66 * 67 * Starting with (B), it protects the data from being written after 68 * the producer pointer. If this barrier was missing, the consumer 69 * could observe the producer pointer being set and thus load the data 70 * before the producer has written the new data. The consumer would in 71 * this case load the old data. 72 * 73 * (C) protects the consumer from speculatively loading the data before 74 * the producer pointer actually has been read. If we do not have this 75 * barrier, some architectures could load old data as speculative loads 76 * are not discarded as the CPU does not know there is a dependency 77 * between ->producer and data. 78 * 79 * (A) is a control dependency that separates the load of ->consumer 80 * from the stores of $data. In case ->consumer indicates there is no 81 * room in the buffer to store $data we do not. So no barrier is needed. 82 * 83 * (D) protects the load of the data to be observed to happen after the 84 * store of the consumer pointer. If we did not have this memory 85 * barrier, the producer could observe the consumer pointer being set 86 * and overwrite the data with a new value before the consumer got the 87 * chance to read the old value. The consumer would thus miss reading 88 * the old entry and very likely read the new entry twice, once right 89 * now and again after circling through the ring. 90 */ 91 92 /* The operations on the rings are the following: 93 * 94 * producer consumer 95 * 96 * RESERVE entries PEEK in the ring for entries 97 * WRITE data into the ring READ data from the ring 98 * SUBMIT entries RELEASE entries 99 * 100 * The producer reserves one or more entries in the ring. It can then 101 * fill in these entries and finally submit them so that they can be 102 * seen and read by the consumer. 103 * 104 * The consumer peeks into the ring to see if the producer has written 105 * any new entries. If so, the consumer can then read these entries 106 * and when it is done reading them release them back to the producer 107 * so that the producer can use these slots to fill in new entries. 108 * 109 * The function names below reflect these operations. 110 */ 111 112 /* Functions that read and validate content from consumer rings. */ 113 114 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) 115 { 116 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 117 118 if (q->cached_cons != q->cached_prod) { 119 u32 idx = q->cached_cons & q->ring_mask; 120 121 *addr = ring->desc[idx]; 122 return true; 123 } 124 125 return false; 126 } 127 128 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, 129 struct xdp_desc *desc) 130 { 131 u64 chunk, chunk_end; 132 133 chunk = xp_aligned_extract_addr(pool, desc->addr); 134 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len); 135 if (chunk != chunk_end) 136 return false; 137 138 if (chunk >= pool->addrs_cnt) 139 return false; 140 141 if (desc->options) 142 return false; 143 return true; 144 } 145 146 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, 147 struct xdp_desc *desc) 148 { 149 u64 addr, base_addr; 150 151 base_addr = xp_unaligned_extract_addr(desc->addr); 152 addr = xp_unaligned_add_offset_to_addr(desc->addr); 153 154 if (desc->len > pool->chunk_size) 155 return false; 156 157 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || 158 xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) 159 return false; 160 161 if (desc->options) 162 return false; 163 return true; 164 } 165 166 static inline bool xp_validate_desc(struct xsk_buff_pool *pool, 167 struct xdp_desc *desc) 168 { 169 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : 170 xp_aligned_validate_desc(pool, desc); 171 } 172 173 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, 174 struct xdp_desc *d, 175 struct xsk_buff_pool *pool) 176 { 177 if (!xp_validate_desc(pool, d)) { 178 q->invalid_descs++; 179 return false; 180 } 181 return true; 182 } 183 184 static inline bool xskq_cons_read_desc(struct xsk_queue *q, 185 struct xdp_desc *desc, 186 struct xsk_buff_pool *pool) 187 { 188 while (q->cached_cons != q->cached_prod) { 189 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 190 u32 idx = q->cached_cons & q->ring_mask; 191 192 *desc = ring->desc[idx]; 193 if (xskq_cons_is_valid_desc(q, desc, pool)) 194 return true; 195 196 q->cached_cons++; 197 } 198 199 return false; 200 } 201 202 static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, 203 struct xdp_desc *descs, 204 struct xsk_buff_pool *pool, u32 max) 205 { 206 u32 cached_cons = q->cached_cons, nb_entries = 0; 207 208 while (cached_cons != q->cached_prod && nb_entries < max) { 209 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 210 u32 idx = cached_cons & q->ring_mask; 211 212 descs[nb_entries] = ring->desc[idx]; 213 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) { 214 /* Skip the entry */ 215 cached_cons++; 216 continue; 217 } 218 219 nb_entries++; 220 cached_cons++; 221 } 222 223 return nb_entries; 224 } 225 226 /* Functions for consumers */ 227 228 static inline void __xskq_cons_release(struct xsk_queue *q) 229 { 230 smp_mb(); /* D, matches A */ 231 WRITE_ONCE(q->ring->consumer, q->cached_cons); 232 } 233 234 static inline void __xskq_cons_peek(struct xsk_queue *q) 235 { 236 /* Refresh the local pointer */ 237 q->cached_prod = READ_ONCE(q->ring->producer); 238 smp_rmb(); /* C, matches B */ 239 } 240 241 static inline void xskq_cons_get_entries(struct xsk_queue *q) 242 { 243 __xskq_cons_release(q); 244 __xskq_cons_peek(q); 245 } 246 247 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) 248 { 249 u32 entries = q->cached_prod - q->cached_cons; 250 251 if (entries >= max) 252 return max; 253 254 __xskq_cons_peek(q); 255 entries = q->cached_prod - q->cached_cons; 256 257 return entries >= max ? max : entries; 258 } 259 260 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) 261 { 262 return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false; 263 } 264 265 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) 266 { 267 if (q->cached_prod == q->cached_cons) 268 xskq_cons_get_entries(q); 269 return xskq_cons_read_addr_unchecked(q, addr); 270 } 271 272 static inline bool xskq_cons_peek_desc(struct xsk_queue *q, 273 struct xdp_desc *desc, 274 struct xsk_buff_pool *pool) 275 { 276 if (q->cached_prod == q->cached_cons) 277 xskq_cons_get_entries(q); 278 return xskq_cons_read_desc(q, desc, pool); 279 } 280 281 static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs, 282 struct xsk_buff_pool *pool, u32 max) 283 { 284 u32 entries = xskq_cons_nb_entries(q, max); 285 286 return xskq_cons_read_desc_batch(q, descs, pool, entries); 287 } 288 289 /* To improve performance in the xskq_cons_release functions, only update local state here. 290 * Reflect this to global state when we get new entries from the ring in 291 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. 292 */ 293 static inline void xskq_cons_release(struct xsk_queue *q) 294 { 295 q->cached_cons++; 296 } 297 298 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) 299 { 300 q->cached_cons += cnt; 301 } 302 303 static inline bool xskq_cons_is_full(struct xsk_queue *q) 304 { 305 /* No barriers needed since data is not accessed */ 306 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == 307 q->nentries; 308 } 309 310 static inline u32 xskq_cons_present_entries(struct xsk_queue *q) 311 { 312 /* No barriers needed since data is not accessed */ 313 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); 314 } 315 316 /* Functions for producers */ 317 318 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) 319 { 320 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); 321 322 if (free_entries >= max) 323 return max; 324 325 /* Refresh the local tail pointer */ 326 q->cached_cons = READ_ONCE(q->ring->consumer); 327 free_entries = q->nentries - (q->cached_prod - q->cached_cons); 328 329 return free_entries >= max ? max : free_entries; 330 } 331 332 static inline bool xskq_prod_is_full(struct xsk_queue *q) 333 { 334 return xskq_prod_nb_free(q, 1) ? false : true; 335 } 336 337 static inline int xskq_prod_reserve(struct xsk_queue *q) 338 { 339 if (xskq_prod_is_full(q)) 340 return -ENOSPC; 341 342 /* A, matches D */ 343 q->cached_prod++; 344 return 0; 345 } 346 347 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) 348 { 349 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 350 351 if (xskq_prod_is_full(q)) 352 return -ENOSPC; 353 354 /* A, matches D */ 355 ring->desc[q->cached_prod++ & q->ring_mask] = addr; 356 return 0; 357 } 358 359 static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, 360 u32 max) 361 { 362 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 363 u32 nb_entries, i, cached_prod; 364 365 nb_entries = xskq_prod_nb_free(q, max); 366 367 /* A, matches D */ 368 cached_prod = q->cached_prod; 369 for (i = 0; i < nb_entries; i++) 370 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; 371 q->cached_prod = cached_prod; 372 373 return nb_entries; 374 } 375 376 static inline int xskq_prod_reserve_desc(struct xsk_queue *q, 377 u64 addr, u32 len) 378 { 379 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 380 u32 idx; 381 382 if (xskq_prod_is_full(q)) 383 return -ENOSPC; 384 385 /* A, matches D */ 386 idx = q->cached_prod++ & q->ring_mask; 387 ring->desc[idx].addr = addr; 388 ring->desc[idx].len = len; 389 390 return 0; 391 } 392 393 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) 394 { 395 smp_wmb(); /* B, matches C */ 396 397 WRITE_ONCE(q->ring->producer, idx); 398 } 399 400 static inline void xskq_prod_submit(struct xsk_queue *q) 401 { 402 __xskq_prod_submit(q, q->cached_prod); 403 } 404 405 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) 406 { 407 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 408 u32 idx = q->ring->producer; 409 410 ring->desc[idx++ & q->ring_mask] = addr; 411 412 __xskq_prod_submit(q, idx); 413 } 414 415 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) 416 { 417 __xskq_prod_submit(q, q->ring->producer + nb_entries); 418 } 419 420 static inline bool xskq_prod_is_empty(struct xsk_queue *q) 421 { 422 /* No barriers needed since data is not accessed */ 423 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); 424 } 425 426 /* For both producers and consumers */ 427 428 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) 429 { 430 return q ? q->invalid_descs : 0; 431 } 432 433 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) 434 { 435 return q ? q->queue_empty_descs : 0; 436 } 437 438 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); 439 void xskq_destroy(struct xsk_queue *q_ops); 440 441 #endif /* _LINUX_XSK_QUEUE_H */ 442