xref: /openbmc/linux/include/net/xdp_sock_drv.h (revision a80de066)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #ifdef CONFIG_XDP_SOCKETS
13 
14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
17 void xsk_tx_release(struct xsk_buff_pool *pool);
18 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
19 					    u16 queue_id);
20 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
21 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
22 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
23 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
24 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
25 
26 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
27 {
28 	return XDP_PACKET_HEADROOM + pool->headroom;
29 }
30 
31 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
32 {
33 	return pool->chunk_size;
34 }
35 
36 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
37 {
38 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
39 }
40 
41 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
42 					 struct xdp_rxq_info *rxq)
43 {
44 	xp_set_rxq_info(pool, rxq);
45 }
46 
47 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
48 {
49 #ifdef CONFIG_NET_RX_BUSY_POLL
50 	return pool->heads[0].xdp.rxq->napi_id;
51 #else
52 	return 0;
53 #endif
54 }
55 
56 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
57 				      unsigned long attrs)
58 {
59 	xp_dma_unmap(pool, attrs);
60 }
61 
62 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
63 				   struct device *dev, unsigned long attrs)
64 {
65 	struct xdp_umem *umem = pool->umem;
66 
67 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
68 }
69 
70 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
71 {
72 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
73 
74 	return xp_get_dma(xskb);
75 }
76 
77 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
78 {
79 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
80 
81 	return xp_get_frame_dma(xskb);
82 }
83 
84 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
85 {
86 	return xp_alloc(pool);
87 }
88 
89 /* Returns as many entries as possible up to max. 0 <= N <= max. */
90 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
91 {
92 	return xp_alloc_batch(pool, xdp, max);
93 }
94 
95 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
96 {
97 	return xp_can_alloc(pool, count);
98 }
99 
100 static inline void xsk_buff_free(struct xdp_buff *xdp)
101 {
102 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
103 
104 	xp_free(xskb);
105 }
106 
107 static inline void xsk_buff_discard(struct xdp_buff *xdp)
108 {
109 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
110 
111 	xp_release(xskb);
112 }
113 
114 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
115 {
116 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
117 	xdp->data_meta = xdp->data;
118 	xdp->data_end = xdp->data + size;
119 }
120 
121 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
122 					      u64 addr)
123 {
124 	return xp_raw_get_dma(pool, addr);
125 }
126 
127 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
128 {
129 	return xp_raw_get_data(pool, addr);
130 }
131 
132 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
133 {
134 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
135 
136 	if (!pool->dma_need_sync)
137 		return;
138 
139 	xp_dma_sync_for_cpu(xskb);
140 }
141 
142 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
143 						    dma_addr_t dma,
144 						    size_t size)
145 {
146 	xp_dma_sync_for_device(pool, dma, size);
147 }
148 
149 #else
150 
151 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
152 {
153 }
154 
155 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
156 				    struct xdp_desc *desc)
157 {
158 	return false;
159 }
160 
161 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
162 {
163 	return 0;
164 }
165 
166 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
167 {
168 }
169 
170 static inline struct xsk_buff_pool *
171 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
172 {
173 	return NULL;
174 }
175 
176 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
177 {
178 }
179 
180 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
181 {
182 }
183 
184 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
185 {
186 }
187 
188 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
189 {
190 }
191 
192 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
193 {
194 	return false;
195 }
196 
197 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
198 {
199 	return 0;
200 }
201 
202 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
203 {
204 	return 0;
205 }
206 
207 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
208 {
209 	return 0;
210 }
211 
212 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
213 					 struct xdp_rxq_info *rxq)
214 {
215 }
216 
217 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
218 {
219 	return 0;
220 }
221 
222 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
223 				      unsigned long attrs)
224 {
225 }
226 
227 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
228 				   struct device *dev, unsigned long attrs)
229 {
230 	return 0;
231 }
232 
233 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
234 {
235 	return 0;
236 }
237 
238 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
239 {
240 	return 0;
241 }
242 
243 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
244 {
245 	return NULL;
246 }
247 
248 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
249 {
250 	return 0;
251 }
252 
253 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
254 {
255 	return false;
256 }
257 
258 static inline void xsk_buff_free(struct xdp_buff *xdp)
259 {
260 }
261 
262 static inline void xsk_buff_discard(struct xdp_buff *xdp)
263 {
264 }
265 
266 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
267 {
268 }
269 
270 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
271 					      u64 addr)
272 {
273 	return 0;
274 }
275 
276 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
277 {
278 	return NULL;
279 }
280 
281 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
282 {
283 }
284 
285 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
286 						    dma_addr_t dma,
287 						    size_t size)
288 {
289 }
290 
291 #endif /* CONFIG_XDP_SOCKETS */
292 
293 #endif /* _LINUX_XDP_SOCK_DRV_H */
294