xsk_queue.h (3bec5b6aae830355e786e204b20a7cea38c3a8ed) | xsk_queue.h (1c1efc2af158869795d3334a12fed2afd9c51539) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* XDP user-space ring structure 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6#ifndef _LINUX_XSK_QUEUE_H 7#define _LINUX_XSK_QUEUE_H 8 --- 152 unchanged lines hidden (view full) --- 161 struct xdp_desc *desc) 162{ 163 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : 164 xp_aligned_validate_desc(pool, desc); 165} 166 167static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, 168 struct xdp_desc *d, | 1/* SPDX-License-Identifier: GPL-2.0 */ 2/* XDP user-space ring structure 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6#ifndef _LINUX_XSK_QUEUE_H 7#define _LINUX_XSK_QUEUE_H 8 --- 152 unchanged lines hidden (view full) --- 161 struct xdp_desc *desc) 162{ 163 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : 164 xp_aligned_validate_desc(pool, desc); 165} 166 167static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, 168 struct xdp_desc *d, |
169 struct xdp_umem *umem) | 169 struct xsk_buff_pool *pool) |
170{ | 170{ |
171 if (!xp_validate_desc(umem->pool, d)) { | 171 if (!xp_validate_desc(pool, d)) { |
172 q->invalid_descs++; 173 return false; 174 } 175 return true; 176} 177 178static inline bool xskq_cons_read_desc(struct xsk_queue *q, 179 struct xdp_desc *desc, | 172 q->invalid_descs++; 173 return false; 174 } 175 return true; 176} 177 178static inline bool xskq_cons_read_desc(struct xsk_queue *q, 179 struct xdp_desc *desc, |
180 struct xdp_umem *umem) | 180 struct xsk_buff_pool *pool) |
181{ 182 while (q->cached_cons != q->cached_prod) { 183 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 184 u32 idx = q->cached_cons & q->ring_mask; 185 186 *desc = ring->desc[idx]; | 181{ 182 while (q->cached_cons != q->cached_prod) { 183 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 184 u32 idx = q->cached_cons & q->ring_mask; 185 186 *desc = ring->desc[idx]; |
187 if (xskq_cons_is_valid_desc(q, desc, umem)) | 187 if (xskq_cons_is_valid_desc(q, desc, pool)) |
188 return true; 189 190 q->cached_cons++; 191 } 192 193 return false; 194} 195 --- 35 unchanged lines hidden (view full) --- 231{ 232 if (q->cached_prod == q->cached_cons) 233 xskq_cons_get_entries(q); 234 return xskq_cons_read_addr_unchecked(q, addr); 235} 236 237static inline bool xskq_cons_peek_desc(struct xsk_queue *q, 238 struct xdp_desc *desc, | 188 return true; 189 190 q->cached_cons++; 191 } 192 193 return false; 194} 195 --- 35 unchanged lines hidden (view full) --- 231{ 232 if (q->cached_prod == q->cached_cons) 233 xskq_cons_get_entries(q); 234 return xskq_cons_read_addr_unchecked(q, addr); 235} 236 237static inline bool xskq_cons_peek_desc(struct xsk_queue *q, 238 struct xdp_desc *desc, |
239 struct xdp_umem *umem) | 239 struct xsk_buff_pool *pool) |
240{ 241 if (q->cached_prod == q->cached_cons) 242 xskq_cons_get_entries(q); | 240{ 241 if (q->cached_prod == q->cached_cons) 242 xskq_cons_get_entries(q); |
243 return xskq_cons_read_desc(q, desc, umem); | 243 return xskq_cons_read_desc(q, desc, pool); |
244} 245 246static inline void xskq_cons_release(struct xsk_queue *q) 247{ 248 /* To improve performance, only update local state here. 249 * Reflect this to global state when we get new entries 250 * from the ring in xskq_cons_get_entries() and whenever 251 * Rx or Tx processing are completed in the NAPI loop. --- 115 unchanged lines hidden --- | 244} 245 246static inline void xskq_cons_release(struct xsk_queue *q) 247{ 248 /* To improve performance, only update local state here. 249 * Reflect this to global state when we get new entries 250 * from the ring in xskq_cons_get_entries() and whenever 251 * Rx or Tx processing are completed in the NAPI loop. --- 115 unchanged lines hidden --- |