1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* XDP user-space ring structure 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XSK_QUEUE_H 7 #define _LINUX_XSK_QUEUE_H 8 9 #include <linux/types.h> 10 #include <linux/if_xdp.h> 11 #include <net/xdp_sock.h> 12 #include <net/xsk_buff_pool.h> 13 14 #include "xsk.h" 15 16 struct xdp_ring { 17 u32 producer ____cacheline_aligned_in_smp; 18 u32 consumer ____cacheline_aligned_in_smp; 19 u32 flags; 20 }; 21 22 /* Used for the RX and TX queues for packets */ 23 struct xdp_rxtx_ring { 24 struct xdp_ring ptrs; 25 struct xdp_desc desc[] ____cacheline_aligned_in_smp; 26 }; 27 28 /* Used for the fill and completion queues for buffers */ 29 struct xdp_umem_ring { 30 struct xdp_ring ptrs; 31 u64 desc[] ____cacheline_aligned_in_smp; 32 }; 33 34 struct xsk_queue { 35 u32 ring_mask; 36 u32 nentries; 37 u32 cached_prod; 38 u32 cached_cons; 39 struct xdp_ring *ring; 40 u64 invalid_descs; 41 u64 queue_empty_descs; 42 }; 43 44 /* The structure of the shared state of the rings are the same as the 45 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion 46 * ring, the kernel is the producer and user space is the consumer. For 47 * the Tx and fill rings, the kernel is the consumer and user space is 48 * the producer. 49 * 50 * producer consumer 51 * 52 * if (LOAD ->consumer) { LOAD ->producer 53 * (A) smp_rmb() (C) 54 * STORE $data LOAD $data 55 * smp_wmb() (B) smp_mb() (D) 56 * STORE ->producer STORE ->consumer 57 * } 58 * 59 * (A) pairs with (D), and (B) pairs with (C). 60 * 61 * Starting with (B), it protects the data from being written after 62 * the producer pointer. If this barrier was missing, the consumer 63 * could observe the producer pointer being set and thus load the data 64 * before the producer has written the new data. The consumer would in 65 * this case load the old data. 66 * 67 * (C) protects the consumer from speculatively loading the data before 68 * the producer pointer actually has been read. If we do not have this 69 * barrier, some architectures could load old data as speculative loads 70 * are not discarded as the CPU does not know there is a dependency 71 * between ->producer and data. 72 * 73 * (A) is a control dependency that separates the load of ->consumer 74 * from the stores of $data. In case ->consumer indicates there is no 75 * room in the buffer to store $data we do not. So no barrier is needed. 76 * 77 * (D) protects the load of the data to be observed to happen after the 78 * store of the consumer pointer. If we did not have this memory 79 * barrier, the producer could observe the consumer pointer being set 80 * and overwrite the data with a new value before the consumer got the 81 * chance to read the old value. The consumer would thus miss reading 82 * the old entry and very likely read the new entry twice, once right 83 * now and again after circling through the ring. 84 */ 85 86 /* The operations on the rings are the following: 87 * 88 * producer consumer 89 * 90 * RESERVE entries PEEK in the ring for entries 91 * WRITE data into the ring READ data from the ring 92 * SUBMIT entries RELEASE entries 93 * 94 * The producer reserves one or more entries in the ring. It can then 95 * fill in these entries and finally submit them so that they can be 96 * seen and read by the consumer. 97 * 98 * The consumer peeks into the ring to see if the producer has written 99 * any new entries. If so, the producer can then read these entries 100 * and when it is done reading them release them back to the producer 101 * so that the producer can use these slots to fill in new entries. 102 * 103 * The function names below reflect these operations. 104 */ 105 106 /* Functions that read and validate content from consumer rings. */ 107 108 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) 109 { 110 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 111 112 if (q->cached_cons != q->cached_prod) { 113 u32 idx = q->cached_cons & q->ring_mask; 114 115 *addr = ring->desc[idx]; 116 return true; 117 } 118 119 return false; 120 } 121 122 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, 123 struct xdp_desc *desc) 124 { 125 u64 chunk, chunk_end; 126 127 chunk = xp_aligned_extract_addr(pool, desc->addr); 128 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len); 129 if (chunk != chunk_end) 130 return false; 131 132 if (chunk >= pool->addrs_cnt) 133 return false; 134 135 if (desc->options) 136 return false; 137 return true; 138 } 139 140 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, 141 struct xdp_desc *desc) 142 { 143 u64 addr, base_addr; 144 145 base_addr = xp_unaligned_extract_addr(desc->addr); 146 addr = xp_unaligned_add_offset_to_addr(desc->addr); 147 148 if (desc->len > pool->chunk_size) 149 return false; 150 151 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || 152 xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) 153 return false; 154 155 if (desc->options) 156 return false; 157 return true; 158 } 159 160 static inline bool xp_validate_desc(struct xsk_buff_pool *pool, 161 struct xdp_desc *desc) 162 { 163 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : 164 xp_aligned_validate_desc(pool, desc); 165 } 166 167 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, 168 struct xdp_desc *d, 169 struct xdp_umem *umem) 170 { 171 if (!xp_validate_desc(umem->pool, d)) { 172 q->invalid_descs++; 173 return false; 174 } 175 return true; 176 } 177 178 static inline bool xskq_cons_read_desc(struct xsk_queue *q, 179 struct xdp_desc *desc, 180 struct xdp_umem *umem) 181 { 182 while (q->cached_cons != q->cached_prod) { 183 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 184 u32 idx = q->cached_cons & q->ring_mask; 185 186 *desc = ring->desc[idx]; 187 if (xskq_cons_is_valid_desc(q, desc, umem)) 188 return true; 189 190 q->cached_cons++; 191 } 192 193 return false; 194 } 195 196 /* Functions for consumers */ 197 198 static inline void __xskq_cons_release(struct xsk_queue *q) 199 { 200 smp_mb(); /* D, matches A */ 201 WRITE_ONCE(q->ring->consumer, q->cached_cons); 202 } 203 204 static inline void __xskq_cons_peek(struct xsk_queue *q) 205 { 206 /* Refresh the local pointer */ 207 q->cached_prod = READ_ONCE(q->ring->producer); 208 smp_rmb(); /* C, matches B */ 209 } 210 211 static inline void xskq_cons_get_entries(struct xsk_queue *q) 212 { 213 __xskq_cons_release(q); 214 __xskq_cons_peek(q); 215 } 216 217 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) 218 { 219 u32 entries = q->cached_prod - q->cached_cons; 220 221 if (entries >= cnt) 222 return true; 223 224 __xskq_cons_peek(q); 225 entries = q->cached_prod - q->cached_cons; 226 227 return entries >= cnt; 228 } 229 230 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) 231 { 232 if (q->cached_prod == q->cached_cons) 233 xskq_cons_get_entries(q); 234 return xskq_cons_read_addr_unchecked(q, addr); 235 } 236 237 static inline bool xskq_cons_peek_desc(struct xsk_queue *q, 238 struct xdp_desc *desc, 239 struct xdp_umem *umem) 240 { 241 if (q->cached_prod == q->cached_cons) 242 xskq_cons_get_entries(q); 243 return xskq_cons_read_desc(q, desc, umem); 244 } 245 246 static inline void xskq_cons_release(struct xsk_queue *q) 247 { 248 /* To improve performance, only update local state here. 249 * Reflect this to global state when we get new entries 250 * from the ring in xskq_cons_get_entries() and whenever 251 * Rx or Tx processing are completed in the NAPI loop. 252 */ 253 q->cached_cons++; 254 } 255 256 static inline bool xskq_cons_is_full(struct xsk_queue *q) 257 { 258 /* No barriers needed since data is not accessed */ 259 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == 260 q->nentries; 261 } 262 263 /* Functions for producers */ 264 265 static inline bool xskq_prod_is_full(struct xsk_queue *q) 266 { 267 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); 268 269 if (free_entries) 270 return false; 271 272 /* Refresh the local tail pointer */ 273 q->cached_cons = READ_ONCE(q->ring->consumer); 274 free_entries = q->nentries - (q->cached_prod - q->cached_cons); 275 276 return !free_entries; 277 } 278 279 static inline int xskq_prod_reserve(struct xsk_queue *q) 280 { 281 if (xskq_prod_is_full(q)) 282 return -ENOSPC; 283 284 /* A, matches D */ 285 q->cached_prod++; 286 return 0; 287 } 288 289 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) 290 { 291 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 292 293 if (xskq_prod_is_full(q)) 294 return -ENOSPC; 295 296 /* A, matches D */ 297 ring->desc[q->cached_prod++ & q->ring_mask] = addr; 298 return 0; 299 } 300 301 static inline int xskq_prod_reserve_desc(struct xsk_queue *q, 302 u64 addr, u32 len) 303 { 304 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 305 u32 idx; 306 307 if (xskq_prod_is_full(q)) 308 return -ENOSPC; 309 310 /* A, matches D */ 311 idx = q->cached_prod++ & q->ring_mask; 312 ring->desc[idx].addr = addr; 313 ring->desc[idx].len = len; 314 315 return 0; 316 } 317 318 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) 319 { 320 smp_wmb(); /* B, matches C */ 321 322 WRITE_ONCE(q->ring->producer, idx); 323 } 324 325 static inline void xskq_prod_submit(struct xsk_queue *q) 326 { 327 __xskq_prod_submit(q, q->cached_prod); 328 } 329 330 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) 331 { 332 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 333 u32 idx = q->ring->producer; 334 335 ring->desc[idx++ & q->ring_mask] = addr; 336 337 __xskq_prod_submit(q, idx); 338 } 339 340 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) 341 { 342 __xskq_prod_submit(q, q->ring->producer + nb_entries); 343 } 344 345 static inline bool xskq_prod_is_empty(struct xsk_queue *q) 346 { 347 /* No barriers needed since data is not accessed */ 348 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); 349 } 350 351 /* For both producers and consumers */ 352 353 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) 354 { 355 return q ? q->invalid_descs : 0; 356 } 357 358 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) 359 { 360 return q ? q->queue_empty_descs : 0; 361 } 362 363 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); 364 void xskq_destroy(struct xsk_queue *q_ops); 365 366 #endif /* _LINUX_XSK_QUEUE_H */ 367