xref: /openbmc/linux/include/net/xdp_sock.h (revision 715f23b6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8 
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
15 
16 struct net_device;
17 struct xsk_queue;
18 
19 /* Masks for xdp_umem_page flags.
20  * The low 12-bits of the addr will be 0 since this is the page address, so we
21  * can use them for flags.
22  */
23 #define XSK_NEXT_PG_CONTIG_SHIFT 0
24 #define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
25 
26 struct xdp_umem_page {
27 	void *addr;
28 	dma_addr_t dma;
29 };
30 
31 struct xdp_umem_fq_reuse {
32 	u32 nentries;
33 	u32 length;
34 	u64 handles[];
35 };
36 
37 /* Flags for the umem flags field.
38  *
39  * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
40  * flags. See inlude/uapi/include/linux/if_xdp.h.
41  */
42 #define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
43 
44 struct xdp_umem {
45 	struct xsk_queue *fq;
46 	struct xsk_queue *cq;
47 	struct xdp_umem_page *pages;
48 	u64 chunk_mask;
49 	u64 size;
50 	u32 headroom;
51 	u32 chunk_size_nohr;
52 	struct user_struct *user;
53 	unsigned long address;
54 	refcount_t users;
55 	struct work_struct work;
56 	struct page **pgs;
57 	u32 npgs;
58 	u16 queue_id;
59 	u8 need_wakeup;
60 	u8 flags;
61 	int id;
62 	struct net_device *dev;
63 	struct xdp_umem_fq_reuse *fq_reuse;
64 	bool zc;
65 	spinlock_t xsk_list_lock;
66 	struct list_head xsk_list;
67 };
68 
69 /* Nodes are linked in the struct xdp_sock map_list field, and used to
70  * track which maps a certain socket reside in.
71  */
72 
73 struct xsk_map {
74 	struct bpf_map map;
75 	struct list_head __percpu *flush_list;
76 	spinlock_t lock; /* Synchronize map updates */
77 	struct xdp_sock *xsk_map[];
78 };
79 
80 struct xsk_map_node {
81 	struct list_head node;
82 	struct xsk_map *map;
83 	struct xdp_sock **map_entry;
84 };
85 
86 struct xdp_sock {
87 	/* struct sock must be the first member of struct xdp_sock */
88 	struct sock sk;
89 	struct xsk_queue *rx;
90 	struct net_device *dev;
91 	struct xdp_umem *umem;
92 	struct list_head flush_node;
93 	u16 queue_id;
94 	bool zc;
95 	enum {
96 		XSK_READY = 0,
97 		XSK_BOUND,
98 		XSK_UNBOUND,
99 	} state;
100 	/* Protects multiple processes in the control path */
101 	struct mutex mutex;
102 	struct xsk_queue *tx ____cacheline_aligned_in_smp;
103 	struct list_head list;
104 	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
105 	 * in the SKB destructor callback.
106 	 */
107 	spinlock_t tx_completion_lock;
108 	/* Protects generic receive. */
109 	spinlock_t rx_lock;
110 	u64 rx_dropped;
111 	struct list_head map_list;
112 	/* Protects map_list */
113 	spinlock_t map_list_lock;
114 };
115 
116 struct xdp_buff;
117 #ifdef CONFIG_XDP_SOCKETS
118 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
119 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
120 /* Used from netdev driver */
121 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
122 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
123 void xsk_umem_discard_addr(struct xdp_umem *umem);
124 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
125 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
126 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
127 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
128 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
129 					  struct xdp_umem_fq_reuse *newq);
130 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
131 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
132 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
133 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
134 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
135 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
136 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
137 
138 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
139 			     struct xdp_sock **map_entry);
140 int xsk_map_inc(struct xsk_map *map);
141 void xsk_map_put(struct xsk_map *map);
142 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
143 		       struct xdp_sock *xs);
144 void __xsk_map_flush(struct bpf_map *map);
145 
146 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
147 						     u32 key)
148 {
149 	struct xsk_map *m = container_of(map, struct xsk_map, map);
150 	struct xdp_sock *xs;
151 
152 	if (key >= map->max_entries)
153 		return NULL;
154 
155 	xs = READ_ONCE(m->xsk_map[key]);
156 	return xs;
157 }
158 
159 static inline u64 xsk_umem_extract_addr(u64 addr)
160 {
161 	return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
162 }
163 
164 static inline u64 xsk_umem_extract_offset(u64 addr)
165 {
166 	return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
167 }
168 
169 static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
170 {
171 	return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
172 }
173 
174 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
175 {
176 	unsigned long page_addr;
177 
178 	addr = xsk_umem_add_offset_to_addr(addr);
179 	page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
180 
181 	return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
182 }
183 
184 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
185 {
186 	addr = xsk_umem_add_offset_to_addr(addr);
187 
188 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
189 }
190 
191 /* Reuse-queue aware version of FILL queue helpers */
192 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
193 {
194 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
195 
196 	if (rq->length >= cnt)
197 		return true;
198 
199 	return xsk_umem_has_addrs(umem, cnt - rq->length);
200 }
201 
202 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
203 {
204 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
205 
206 	if (!rq->length)
207 		return xsk_umem_peek_addr(umem, addr);
208 
209 	*addr = rq->handles[rq->length - 1];
210 	return addr;
211 }
212 
213 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
214 {
215 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
216 
217 	if (!rq->length)
218 		xsk_umem_discard_addr(umem);
219 	else
220 		rq->length--;
221 }
222 
223 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
224 {
225 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
226 
227 	rq->handles[rq->length++] = addr;
228 }
229 
230 /* Handle the offset appropriately depending on aligned or unaligned mode.
231  * For unaligned mode, we store the offset in the upper 16-bits of the address.
232  * For aligned mode, we simply add the offset to the address.
233  */
234 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
235 					 u64 offset)
236 {
237 	if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
238 		return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
239 	else
240 		return address + offset;
241 }
242 #else
243 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
244 {
245 	return -ENOTSUPP;
246 }
247 
248 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
249 {
250 	return false;
251 }
252 
253 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
254 {
255 	return false;
256 }
257 
258 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
259 {
260 	return NULL;
261 }
262 
263 static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
264 {
265 }
266 
267 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
268 {
269 }
270 
271 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
272 				       struct xdp_desc *desc)
273 {
274 	return false;
275 }
276 
277 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
278 {
279 }
280 
281 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
282 {
283 	return NULL;
284 }
285 
286 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
287 	struct xdp_umem *umem,
288 	struct xdp_umem_fq_reuse *newq)
289 {
290 	return NULL;
291 }
292 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
293 {
294 }
295 
296 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
297 						     u16 queue_id)
298 {
299 	return NULL;
300 }
301 
302 static inline u64 xsk_umem_extract_addr(u64 addr)
303 {
304 	return 0;
305 }
306 
307 static inline u64 xsk_umem_extract_offset(u64 addr)
308 {
309 	return 0;
310 }
311 
312 static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
313 {
314 	return 0;
315 }
316 
317 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
318 {
319 	return NULL;
320 }
321 
322 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
323 {
324 	return 0;
325 }
326 
327 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
328 {
329 	return false;
330 }
331 
332 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
333 {
334 	return NULL;
335 }
336 
337 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
338 {
339 }
340 
341 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
342 {
343 }
344 
345 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
346 {
347 }
348 
349 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
350 {
351 }
352 
353 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
354 {
355 }
356 
357 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
358 {
359 }
360 
361 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
362 {
363 	return false;
364 }
365 
366 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
367 					 u64 offset)
368 {
369 	return 0;
370 }
371 
372 static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
373 				     struct xdp_sock *xs)
374 {
375 	return -EOPNOTSUPP;
376 }
377 
378 static inline void __xsk_map_flush(struct bpf_map *map)
379 {
380 }
381 
382 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
383 						     u32 key)
384 {
385 	return NULL;
386 }
387 #endif /* CONFIG_XDP_SOCKETS */
388 
389 #endif /* _LINUX_XDP_SOCK_H */
390