xref: /openbmc/linux/net/xdp/xsk_buff_pool.c (revision 08b7cf13)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <net/xsk_buff_pool.h>
4 #include <net/xdp_sock.h>
5 #include <net/xdp_sock_drv.h>
6 
7 #include "xsk_queue.h"
8 #include "xdp_umem.h"
9 #include "xsk.h"
10 
11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
12 {
13 	unsigned long flags;
14 
15 	if (!xs->tx)
16 		return;
17 
18 	spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
19 	list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
20 	spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
21 }
22 
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
24 {
25 	unsigned long flags;
26 
27 	if (!xs->tx)
28 		return;
29 
30 	spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
31 	list_del_rcu(&xs->tx_list);
32 	spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
33 }
34 
35 void xp_destroy(struct xsk_buff_pool *pool)
36 {
37 	if (!pool)
38 		return;
39 
40 	kvfree(pool->tx_descs);
41 	kvfree(pool->heads);
42 	kvfree(pool);
43 }
44 
45 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
46 						struct xdp_umem *umem)
47 {
48 	bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
49 	struct xsk_buff_pool *pool;
50 	struct xdp_buff_xsk *xskb;
51 	u32 i, entries;
52 
53 	entries = unaligned ? umem->chunks : 0;
54 	pool = kvzalloc(struct_size(pool, free_heads, entries),	GFP_KERNEL);
55 	if (!pool)
56 		goto out;
57 
58 	pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
59 	if (!pool->heads)
60 		goto out;
61 
62 	if (xs->tx) {
63 		pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
64 		if (!pool->tx_descs)
65 			goto out;
66 	}
67 
68 	pool->chunk_mask = ~((u64)umem->chunk_size - 1);
69 	pool->addrs_cnt = umem->size;
70 	pool->heads_cnt = umem->chunks;
71 	pool->free_heads_cnt = umem->chunks;
72 	pool->headroom = umem->headroom;
73 	pool->chunk_size = umem->chunk_size;
74 	pool->chunk_shift = ffs(umem->chunk_size) - 1;
75 	pool->unaligned = unaligned;
76 	pool->frame_len = umem->chunk_size - umem->headroom -
77 		XDP_PACKET_HEADROOM;
78 	pool->umem = umem;
79 	pool->addrs = umem->addrs;
80 	INIT_LIST_HEAD(&pool->free_list);
81 	INIT_LIST_HEAD(&pool->xsk_tx_list);
82 	spin_lock_init(&pool->xsk_tx_list_lock);
83 	spin_lock_init(&pool->cq_lock);
84 	refcount_set(&pool->users, 1);
85 
86 	pool->fq = xs->fq_tmp;
87 	pool->cq = xs->cq_tmp;
88 
89 	for (i = 0; i < pool->free_heads_cnt; i++) {
90 		xskb = &pool->heads[i];
91 		xskb->pool = pool;
92 		xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
93 		INIT_LIST_HEAD(&xskb->free_list_node);
94 		if (pool->unaligned)
95 			pool->free_heads[i] = xskb;
96 		else
97 			xp_init_xskb_addr(xskb, pool, i * pool->chunk_size);
98 	}
99 
100 	return pool;
101 
102 out:
103 	xp_destroy(pool);
104 	return NULL;
105 }
106 
107 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
108 {
109 	u32 i;
110 
111 	for (i = 0; i < pool->heads_cnt; i++)
112 		pool->heads[i].xdp.rxq = rxq;
113 }
114 EXPORT_SYMBOL(xp_set_rxq_info);
115 
116 static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
117 {
118 	struct netdev_bpf bpf;
119 	int err;
120 
121 	ASSERT_RTNL();
122 
123 	if (pool->umem->zc) {
124 		bpf.command = XDP_SETUP_XSK_POOL;
125 		bpf.xsk.pool = NULL;
126 		bpf.xsk.queue_id = pool->queue_id;
127 
128 		err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
129 
130 		if (err)
131 			WARN(1, "Failed to disable zero-copy!\n");
132 	}
133 }
134 
135 int xp_assign_dev(struct xsk_buff_pool *pool,
136 		  struct net_device *netdev, u16 queue_id, u16 flags)
137 {
138 	bool force_zc, force_copy;
139 	struct netdev_bpf bpf;
140 	int err = 0;
141 
142 	ASSERT_RTNL();
143 
144 	force_zc = flags & XDP_ZEROCOPY;
145 	force_copy = flags & XDP_COPY;
146 
147 	if (force_zc && force_copy)
148 		return -EINVAL;
149 
150 	if (xsk_get_pool_from_qid(netdev, queue_id))
151 		return -EBUSY;
152 
153 	pool->netdev = netdev;
154 	pool->queue_id = queue_id;
155 	err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
156 	if (err)
157 		return err;
158 
159 	if (flags & XDP_USE_NEED_WAKEUP)
160 		pool->uses_need_wakeup = true;
161 	/* Tx needs to be explicitly woken up the first time.  Also
162 	 * for supporting drivers that do not implement this
163 	 * feature. They will always have to call sendto() or poll().
164 	 */
165 	pool->cached_need_wakeup = XDP_WAKEUP_TX;
166 
167 	dev_hold(netdev);
168 
169 	if (force_copy)
170 		/* For copy-mode, we are done. */
171 		return 0;
172 
173 	if (!netdev->netdev_ops->ndo_bpf ||
174 	    !netdev->netdev_ops->ndo_xsk_wakeup) {
175 		err = -EOPNOTSUPP;
176 		goto err_unreg_pool;
177 	}
178 
179 	bpf.command = XDP_SETUP_XSK_POOL;
180 	bpf.xsk.pool = pool;
181 	bpf.xsk.queue_id = queue_id;
182 
183 	err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
184 	if (err)
185 		goto err_unreg_pool;
186 
187 	if (!pool->dma_pages) {
188 		WARN(1, "Driver did not DMA map zero-copy buffers");
189 		err = -EINVAL;
190 		goto err_unreg_xsk;
191 	}
192 	pool->umem->zc = true;
193 	return 0;
194 
195 err_unreg_xsk:
196 	xp_disable_drv_zc(pool);
197 err_unreg_pool:
198 	if (!force_zc)
199 		err = 0; /* fallback to copy mode */
200 	if (err) {
201 		xsk_clear_pool_at_qid(netdev, queue_id);
202 		dev_put(netdev);
203 	}
204 	return err;
205 }
206 
207 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
208 			 struct net_device *dev, u16 queue_id)
209 {
210 	u16 flags;
211 
212 	/* One fill and completion ring required for each queue id. */
213 	if (!pool->fq || !pool->cq)
214 		return -EINVAL;
215 
216 	flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
217 	if (pool->uses_need_wakeup)
218 		flags |= XDP_USE_NEED_WAKEUP;
219 
220 	return xp_assign_dev(pool, dev, queue_id, flags);
221 }
222 
223 void xp_clear_dev(struct xsk_buff_pool *pool)
224 {
225 	if (!pool->netdev)
226 		return;
227 
228 	xp_disable_drv_zc(pool);
229 	xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
230 	dev_put(pool->netdev);
231 	pool->netdev = NULL;
232 }
233 
234 static void xp_release_deferred(struct work_struct *work)
235 {
236 	struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
237 						  work);
238 
239 	rtnl_lock();
240 	xp_clear_dev(pool);
241 	rtnl_unlock();
242 
243 	if (pool->fq) {
244 		xskq_destroy(pool->fq);
245 		pool->fq = NULL;
246 	}
247 
248 	if (pool->cq) {
249 		xskq_destroy(pool->cq);
250 		pool->cq = NULL;
251 	}
252 
253 	xdp_put_umem(pool->umem, false);
254 	xp_destroy(pool);
255 }
256 
257 void xp_get_pool(struct xsk_buff_pool *pool)
258 {
259 	refcount_inc(&pool->users);
260 }
261 
262 bool xp_put_pool(struct xsk_buff_pool *pool)
263 {
264 	if (!pool)
265 		return false;
266 
267 	if (refcount_dec_and_test(&pool->users)) {
268 		INIT_WORK(&pool->work, xp_release_deferred);
269 		schedule_work(&pool->work);
270 		return true;
271 	}
272 
273 	return false;
274 }
275 
276 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
277 {
278 	struct xsk_dma_map *dma_map;
279 
280 	list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
281 		if (dma_map->netdev == pool->netdev)
282 			return dma_map;
283 	}
284 
285 	return NULL;
286 }
287 
288 static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
289 					     u32 nr_pages, struct xdp_umem *umem)
290 {
291 	struct xsk_dma_map *dma_map;
292 
293 	dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
294 	if (!dma_map)
295 		return NULL;
296 
297 	dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
298 	if (!dma_map->dma_pages) {
299 		kfree(dma_map);
300 		return NULL;
301 	}
302 
303 	dma_map->netdev = netdev;
304 	dma_map->dev = dev;
305 	dma_map->dma_need_sync = false;
306 	dma_map->dma_pages_cnt = nr_pages;
307 	refcount_set(&dma_map->users, 1);
308 	list_add(&dma_map->list, &umem->xsk_dma_list);
309 	return dma_map;
310 }
311 
312 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
313 {
314 	list_del(&dma_map->list);
315 	kvfree(dma_map->dma_pages);
316 	kfree(dma_map);
317 }
318 
319 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
320 {
321 	dma_addr_t *dma;
322 	u32 i;
323 
324 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
325 		dma = &dma_map->dma_pages[i];
326 		if (*dma) {
327 			dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
328 					     DMA_BIDIRECTIONAL, attrs);
329 			*dma = 0;
330 		}
331 	}
332 
333 	xp_destroy_dma_map(dma_map);
334 }
335 
336 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
337 {
338 	struct xsk_dma_map *dma_map;
339 
340 	if (pool->dma_pages_cnt == 0)
341 		return;
342 
343 	dma_map = xp_find_dma_map(pool);
344 	if (!dma_map) {
345 		WARN(1, "Could not find dma_map for device");
346 		return;
347 	}
348 
349 	if (!refcount_dec_and_test(&dma_map->users))
350 		return;
351 
352 	__xp_dma_unmap(dma_map, attrs);
353 	kvfree(pool->dma_pages);
354 	pool->dma_pages_cnt = 0;
355 	pool->dev = NULL;
356 }
357 EXPORT_SYMBOL(xp_dma_unmap);
358 
359 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
360 {
361 	u32 i;
362 
363 	for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
364 		if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
365 			dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
366 		else
367 			dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
368 	}
369 }
370 
371 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
372 {
373 	pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
374 	if (!pool->dma_pages)
375 		return -ENOMEM;
376 
377 	pool->dev = dma_map->dev;
378 	pool->dma_pages_cnt = dma_map->dma_pages_cnt;
379 	pool->dma_need_sync = dma_map->dma_need_sync;
380 	memcpy(pool->dma_pages, dma_map->dma_pages,
381 	       pool->dma_pages_cnt * sizeof(*pool->dma_pages));
382 
383 	return 0;
384 }
385 
386 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
387 	       unsigned long attrs, struct page **pages, u32 nr_pages)
388 {
389 	struct xsk_dma_map *dma_map;
390 	dma_addr_t dma;
391 	int err;
392 	u32 i;
393 
394 	dma_map = xp_find_dma_map(pool);
395 	if (dma_map) {
396 		err = xp_init_dma_info(pool, dma_map);
397 		if (err)
398 			return err;
399 
400 		refcount_inc(&dma_map->users);
401 		return 0;
402 	}
403 
404 	dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
405 	if (!dma_map)
406 		return -ENOMEM;
407 
408 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
409 		dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
410 					 DMA_BIDIRECTIONAL, attrs);
411 		if (dma_mapping_error(dev, dma)) {
412 			__xp_dma_unmap(dma_map, attrs);
413 			return -ENOMEM;
414 		}
415 		if (dma_need_sync(dev, dma))
416 			dma_map->dma_need_sync = true;
417 		dma_map->dma_pages[i] = dma;
418 	}
419 
420 	if (pool->unaligned)
421 		xp_check_dma_contiguity(dma_map);
422 	else
423 		for (i = 0; i < pool->heads_cnt; i++) {
424 			struct xdp_buff_xsk *xskb = &pool->heads[i];
425 
426 			xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
427 		}
428 
429 	err = xp_init_dma_info(pool, dma_map);
430 	if (err) {
431 		__xp_dma_unmap(dma_map, attrs);
432 		return err;
433 	}
434 
435 	return 0;
436 }
437 EXPORT_SYMBOL(xp_dma_map);
438 
439 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
440 					  u64 addr)
441 {
442 	return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
443 }
444 
445 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
446 {
447 	*addr = xp_unaligned_extract_addr(*addr);
448 	if (*addr >= pool->addrs_cnt ||
449 	    *addr + pool->chunk_size > pool->addrs_cnt ||
450 	    xp_addr_crosses_non_contig_pg(pool, *addr))
451 		return false;
452 	return true;
453 }
454 
455 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
456 {
457 	*addr = xp_aligned_extract_addr(pool, *addr);
458 	return *addr < pool->addrs_cnt;
459 }
460 
461 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
462 {
463 	struct xdp_buff_xsk *xskb;
464 	u64 addr;
465 	bool ok;
466 
467 	if (pool->free_heads_cnt == 0)
468 		return NULL;
469 
470 	for (;;) {
471 		if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
472 			pool->fq->queue_empty_descs++;
473 			return NULL;
474 		}
475 
476 		ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
477 		     xp_check_aligned(pool, &addr);
478 		if (!ok) {
479 			pool->fq->invalid_descs++;
480 			xskq_cons_release(pool->fq);
481 			continue;
482 		}
483 		break;
484 	}
485 
486 	if (pool->unaligned) {
487 		xskb = pool->free_heads[--pool->free_heads_cnt];
488 		xp_init_xskb_addr(xskb, pool, addr);
489 		if (pool->dma_pages_cnt)
490 			xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
491 	} else {
492 		xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
493 	}
494 
495 	xskq_cons_release(pool->fq);
496 	return xskb;
497 }
498 
499 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
500 {
501 	struct xdp_buff_xsk *xskb;
502 
503 	if (!pool->free_list_cnt) {
504 		xskb = __xp_alloc(pool);
505 		if (!xskb)
506 			return NULL;
507 	} else {
508 		pool->free_list_cnt--;
509 		xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
510 					free_list_node);
511 		list_del_init(&xskb->free_list_node);
512 	}
513 
514 	xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
515 	xskb->xdp.data_meta = xskb->xdp.data;
516 
517 	if (pool->dma_need_sync) {
518 		dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
519 						 pool->frame_len,
520 						 DMA_BIDIRECTIONAL);
521 	}
522 	return &xskb->xdp;
523 }
524 EXPORT_SYMBOL(xp_alloc);
525 
526 static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
527 {
528 	u32 i, cached_cons, nb_entries;
529 
530 	if (max > pool->free_heads_cnt)
531 		max = pool->free_heads_cnt;
532 	max = xskq_cons_nb_entries(pool->fq, max);
533 
534 	cached_cons = pool->fq->cached_cons;
535 	nb_entries = max;
536 	i = max;
537 	while (i--) {
538 		struct xdp_buff_xsk *xskb;
539 		u64 addr;
540 		bool ok;
541 
542 		__xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
543 
544 		ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
545 			xp_check_aligned(pool, &addr);
546 		if (unlikely(!ok)) {
547 			pool->fq->invalid_descs++;
548 			nb_entries--;
549 			continue;
550 		}
551 
552 		if (pool->unaligned) {
553 			xskb = pool->free_heads[--pool->free_heads_cnt];
554 			xp_init_xskb_addr(xskb, pool, addr);
555 			if (pool->dma_pages_cnt)
556 				xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
557 		} else {
558 			xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
559 		}
560 
561 		*xdp = &xskb->xdp;
562 		xdp++;
563 	}
564 
565 	xskq_cons_release_n(pool->fq, max);
566 	return nb_entries;
567 }
568 
569 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
570 {
571 	struct xdp_buff_xsk *xskb;
572 	u32 i;
573 
574 	nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
575 
576 	i = nb_entries;
577 	while (i--) {
578 		xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
579 		list_del_init(&xskb->free_list_node);
580 
581 		*xdp = &xskb->xdp;
582 		xdp++;
583 	}
584 	pool->free_list_cnt -= nb_entries;
585 
586 	return nb_entries;
587 }
588 
589 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
590 {
591 	u32 nb_entries1 = 0, nb_entries2;
592 
593 	if (unlikely(pool->dma_need_sync)) {
594 		struct xdp_buff *buff;
595 
596 		/* Slow path */
597 		buff = xp_alloc(pool);
598 		if (buff)
599 			*xdp = buff;
600 		return !!buff;
601 	}
602 
603 	if (unlikely(pool->free_list_cnt)) {
604 		nb_entries1 = xp_alloc_reused(pool, xdp, max);
605 		if (nb_entries1 == max)
606 			return nb_entries1;
607 
608 		max -= nb_entries1;
609 		xdp += nb_entries1;
610 	}
611 
612 	nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
613 	if (!nb_entries2)
614 		pool->fq->queue_empty_descs++;
615 
616 	return nb_entries1 + nb_entries2;
617 }
618 EXPORT_SYMBOL(xp_alloc_batch);
619 
620 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
621 {
622 	if (pool->free_list_cnt >= count)
623 		return true;
624 	return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
625 }
626 EXPORT_SYMBOL(xp_can_alloc);
627 
628 void xp_free(struct xdp_buff_xsk *xskb)
629 {
630 	if (!list_empty(&xskb->free_list_node))
631 		return;
632 
633 	xskb->pool->free_list_cnt++;
634 	list_add(&xskb->free_list_node, &xskb->pool->free_list);
635 }
636 EXPORT_SYMBOL(xp_free);
637 
638 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
639 {
640 	addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
641 	return pool->addrs + addr;
642 }
643 EXPORT_SYMBOL(xp_raw_get_data);
644 
645 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
646 {
647 	addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
648 	return (pool->dma_pages[addr >> PAGE_SHIFT] &
649 		~XSK_NEXT_PG_CONTIG_MASK) +
650 		(addr & ~PAGE_MASK);
651 }
652 EXPORT_SYMBOL(xp_raw_get_dma);
653 
654 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
655 {
656 	dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
657 				      xskb->pool->frame_len, DMA_BIDIRECTIONAL);
658 }
659 EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
660 
661 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
662 				 size_t size)
663 {
664 	dma_sync_single_range_for_device(pool->dev, dma, 0,
665 					 size, DMA_BIDIRECTIONAL);
666 }
667 EXPORT_SYMBOL(xp_dma_sync_for_device_slow);
668