1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* XDP user-space ring structure 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XSK_QUEUE_H 7 #define _LINUX_XSK_QUEUE_H 8 9 #include <linux/types.h> 10 #include <linux/if_xdp.h> 11 #include <net/xdp_sock.h> 12 #include <net/xsk_buff_pool.h> 13 14 #include "xsk.h" 15 16 struct xdp_ring { 17 u32 producer ____cacheline_aligned_in_smp; 18 /* Hinder the adjacent cache prefetcher to prefetch the consumer 19 * pointer if the producer pointer is touched and vice versa. 20 */ 21 u32 pad ____cacheline_aligned_in_smp; 22 u32 consumer ____cacheline_aligned_in_smp; 23 u32 flags; 24 }; 25 26 /* Used for the RX and TX queues for packets */ 27 struct xdp_rxtx_ring { 28 struct xdp_ring ptrs; 29 struct xdp_desc desc[] ____cacheline_aligned_in_smp; 30 }; 31 32 /* Used for the fill and completion queues for buffers */ 33 struct xdp_umem_ring { 34 struct xdp_ring ptrs; 35 u64 desc[] ____cacheline_aligned_in_smp; 36 }; 37 38 struct xsk_queue { 39 u32 ring_mask; 40 u32 nentries; 41 u32 cached_prod; 42 u32 cached_cons; 43 struct xdp_ring *ring; 44 u64 invalid_descs; 45 u64 queue_empty_descs; 46 }; 47 48 /* The structure of the shared state of the rings are the same as the 49 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion 50 * ring, the kernel is the producer and user space is the consumer. For 51 * the Tx and fill rings, the kernel is the consumer and user space is 52 * the producer. 53 * 54 * producer consumer 55 * 56 * if (LOAD ->consumer) { LOAD ->producer 57 * (A) smp_rmb() (C) 58 * STORE $data LOAD $data 59 * smp_wmb() (B) smp_mb() (D) 60 * STORE ->producer STORE ->consumer 61 * } 62 * 63 * (A) pairs with (D), and (B) pairs with (C). 64 * 65 * Starting with (B), it protects the data from being written after 66 * the producer pointer. If this barrier was missing, the consumer 67 * could observe the producer pointer being set and thus load the data 68 * before the producer has written the new data. The consumer would in 69 * this case load the old data. 70 * 71 * (C) protects the consumer from speculatively loading the data before 72 * the producer pointer actually has been read. If we do not have this 73 * barrier, some architectures could load old data as speculative loads 74 * are not discarded as the CPU does not know there is a dependency 75 * between ->producer and data. 76 * 77 * (A) is a control dependency that separates the load of ->consumer 78 * from the stores of $data. In case ->consumer indicates there is no 79 * room in the buffer to store $data we do not. So no barrier is needed. 80 * 81 * (D) protects the load of the data to be observed to happen after the 82 * store of the consumer pointer. If we did not have this memory 83 * barrier, the producer could observe the consumer pointer being set 84 * and overwrite the data with a new value before the consumer got the 85 * chance to read the old value. The consumer would thus miss reading 86 * the old entry and very likely read the new entry twice, once right 87 * now and again after circling through the ring. 88 */ 89 90 /* The operations on the rings are the following: 91 * 92 * producer consumer 93 * 94 * RESERVE entries PEEK in the ring for entries 95 * WRITE data into the ring READ data from the ring 96 * SUBMIT entries RELEASE entries 97 * 98 * The producer reserves one or more entries in the ring. It can then 99 * fill in these entries and finally submit them so that they can be 100 * seen and read by the consumer. 101 * 102 * The consumer peeks into the ring to see if the producer has written 103 * any new entries. If so, the consumer can then read these entries 104 * and when it is done reading them release them back to the producer 105 * so that the producer can use these slots to fill in new entries. 106 * 107 * The function names below reflect these operations. 108 */ 109 110 /* Functions that read and validate content from consumer rings. */ 111 112 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) 113 { 114 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 115 116 if (q->cached_cons != q->cached_prod) { 117 u32 idx = q->cached_cons & q->ring_mask; 118 119 *addr = ring->desc[idx]; 120 return true; 121 } 122 123 return false; 124 } 125 126 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, 127 struct xdp_desc *desc) 128 { 129 u64 chunk, chunk_end; 130 131 chunk = xp_aligned_extract_addr(pool, desc->addr); 132 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len); 133 if (chunk != chunk_end) 134 return false; 135 136 if (chunk >= pool->addrs_cnt) 137 return false; 138 139 if (desc->options) 140 return false; 141 return true; 142 } 143 144 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, 145 struct xdp_desc *desc) 146 { 147 u64 addr, base_addr; 148 149 base_addr = xp_unaligned_extract_addr(desc->addr); 150 addr = xp_unaligned_add_offset_to_addr(desc->addr); 151 152 if (desc->len > pool->chunk_size) 153 return false; 154 155 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || 156 xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) 157 return false; 158 159 if (desc->options) 160 return false; 161 return true; 162 } 163 164 static inline bool xp_validate_desc(struct xsk_buff_pool *pool, 165 struct xdp_desc *desc) 166 { 167 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : 168 xp_aligned_validate_desc(pool, desc); 169 } 170 171 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, 172 struct xdp_desc *d, 173 struct xsk_buff_pool *pool) 174 { 175 if (!xp_validate_desc(pool, d)) { 176 q->invalid_descs++; 177 return false; 178 } 179 return true; 180 } 181 182 static inline bool xskq_cons_read_desc(struct xsk_queue *q, 183 struct xdp_desc *desc, 184 struct xsk_buff_pool *pool) 185 { 186 while (q->cached_cons != q->cached_prod) { 187 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 188 u32 idx = q->cached_cons & q->ring_mask; 189 190 *desc = ring->desc[idx]; 191 if (xskq_cons_is_valid_desc(q, desc, pool)) 192 return true; 193 194 q->cached_cons++; 195 } 196 197 return false; 198 } 199 200 /* Functions for consumers */ 201 202 static inline void __xskq_cons_release(struct xsk_queue *q) 203 { 204 smp_mb(); /* D, matches A */ 205 WRITE_ONCE(q->ring->consumer, q->cached_cons); 206 } 207 208 static inline void __xskq_cons_peek(struct xsk_queue *q) 209 { 210 /* Refresh the local pointer */ 211 q->cached_prod = READ_ONCE(q->ring->producer); 212 smp_rmb(); /* C, matches B */ 213 } 214 215 static inline void xskq_cons_get_entries(struct xsk_queue *q) 216 { 217 __xskq_cons_release(q); 218 __xskq_cons_peek(q); 219 } 220 221 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) 222 { 223 u32 entries = q->cached_prod - q->cached_cons; 224 225 if (entries >= cnt) 226 return true; 227 228 __xskq_cons_peek(q); 229 entries = q->cached_prod - q->cached_cons; 230 231 return entries >= cnt; 232 } 233 234 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) 235 { 236 if (q->cached_prod == q->cached_cons) 237 xskq_cons_get_entries(q); 238 return xskq_cons_read_addr_unchecked(q, addr); 239 } 240 241 static inline bool xskq_cons_peek_desc(struct xsk_queue *q, 242 struct xdp_desc *desc, 243 struct xsk_buff_pool *pool) 244 { 245 if (q->cached_prod == q->cached_cons) 246 xskq_cons_get_entries(q); 247 return xskq_cons_read_desc(q, desc, pool); 248 } 249 250 static inline void xskq_cons_release(struct xsk_queue *q) 251 { 252 /* To improve performance, only update local state here. 253 * Reflect this to global state when we get new entries 254 * from the ring in xskq_cons_get_entries() and whenever 255 * Rx or Tx processing are completed in the NAPI loop. 256 */ 257 q->cached_cons++; 258 } 259 260 static inline bool xskq_cons_is_full(struct xsk_queue *q) 261 { 262 /* No barriers needed since data is not accessed */ 263 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == 264 q->nentries; 265 } 266 267 /* Functions for producers */ 268 269 static inline bool xskq_prod_is_full(struct xsk_queue *q) 270 { 271 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); 272 273 if (free_entries) 274 return false; 275 276 /* Refresh the local tail pointer */ 277 q->cached_cons = READ_ONCE(q->ring->consumer); 278 free_entries = q->nentries - (q->cached_prod - q->cached_cons); 279 280 return !free_entries; 281 } 282 283 static inline int xskq_prod_reserve(struct xsk_queue *q) 284 { 285 if (xskq_prod_is_full(q)) 286 return -ENOSPC; 287 288 /* A, matches D */ 289 q->cached_prod++; 290 return 0; 291 } 292 293 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) 294 { 295 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 296 297 if (xskq_prod_is_full(q)) 298 return -ENOSPC; 299 300 /* A, matches D */ 301 ring->desc[q->cached_prod++ & q->ring_mask] = addr; 302 return 0; 303 } 304 305 static inline int xskq_prod_reserve_desc(struct xsk_queue *q, 306 u64 addr, u32 len) 307 { 308 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 309 u32 idx; 310 311 if (xskq_prod_is_full(q)) 312 return -ENOSPC; 313 314 /* A, matches D */ 315 idx = q->cached_prod++ & q->ring_mask; 316 ring->desc[idx].addr = addr; 317 ring->desc[idx].len = len; 318 319 return 0; 320 } 321 322 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) 323 { 324 smp_wmb(); /* B, matches C */ 325 326 WRITE_ONCE(q->ring->producer, idx); 327 } 328 329 static inline void xskq_prod_submit(struct xsk_queue *q) 330 { 331 __xskq_prod_submit(q, q->cached_prod); 332 } 333 334 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) 335 { 336 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 337 u32 idx = q->ring->producer; 338 339 ring->desc[idx++ & q->ring_mask] = addr; 340 341 __xskq_prod_submit(q, idx); 342 } 343 344 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) 345 { 346 __xskq_prod_submit(q, q->ring->producer + nb_entries); 347 } 348 349 static inline bool xskq_prod_is_empty(struct xsk_queue *q) 350 { 351 /* No barriers needed since data is not accessed */ 352 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); 353 } 354 355 /* For both producers and consumers */ 356 357 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) 358 { 359 return q ? q->invalid_descs : 0; 360 } 361 362 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) 363 { 364 return q ? q->queue_empty_descs : 0; 365 } 366 367 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); 368 void xskq_destroy(struct xsk_queue *q_ops); 369 370 #endif /* _LINUX_XSK_QUEUE_H */ 371