xdp_umem.c (f734607e819b951bae3b436b026ec672082e9241) | xdp_umem.c (84c6b86875e01a08a0daa6fdd4a01b36bf0bf0b2) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* XDP user-space packet buffer 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6#include <linux/init.h> 7#include <linux/sched/mm.h> 8#include <linux/sched/signal.h> 9#include <linux/sched/task.h> 10#include <linux/uaccess.h> 11#include <linux/slab.h> 12#include <linux/bpf.h> 13#include <linux/mm.h> | 1// SPDX-License-Identifier: GPL-2.0 2/* XDP user-space packet buffer 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6#include <linux/init.h> 7#include <linux/sched/mm.h> 8#include <linux/sched/signal.h> 9#include <linux/sched/task.h> 10#include <linux/uaccess.h> 11#include <linux/slab.h> 12#include <linux/bpf.h> 13#include <linux/mm.h> |
14#include <linux/netdevice.h> 15#include <linux/rtnetlink.h> |
|
14 15#include "xdp_umem.h" 16#include "xsk_queue.h" 17 18#define XDP_UMEM_MIN_CHUNK_SIZE 2048 19 20void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 21{ --- 13 unchanged lines hidden (view full) --- 35 list_del_rcu(&xs->list); 36 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 37 38 if (umem->zc) 39 synchronize_net(); 40 } 41} 42 | 16 17#include "xdp_umem.h" 18#include "xsk_queue.h" 19 20#define XDP_UMEM_MIN_CHUNK_SIZE 2048 21 22void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 23{ --- 13 unchanged lines hidden (view full) --- 37 list_del_rcu(&xs->list); 38 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 39 40 if (umem->zc) 41 synchronize_net(); 42 } 43} 44 |
45int xdp_umem_query(struct net_device *dev, u16 queue_id) 46{ 47 struct netdev_bpf bpf; 48 49 ASSERT_RTNL(); 50 51 memset(&bpf, 0, sizeof(bpf)); 52 bpf.command = XDP_QUERY_XSK_UMEM; 53 bpf.xsk.queue_id = queue_id; 54 55 if (!dev->netdev_ops->ndo_bpf) 56 return 0; 57 return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem; 58} 59 |
|
43int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 44 u32 queue_id, u16 flags) 45{ 46 bool force_zc, force_copy; 47 struct netdev_bpf bpf; 48 int err; 49 50 force_zc = flags & XDP_ZEROCOPY; --- 6 unchanged lines hidden (view full) --- 57 return 0; 58 59 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) 60 return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ 61 62 bpf.command = XDP_QUERY_XSK_UMEM; 63 64 rtnl_lock(); | 60int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 61 u32 queue_id, u16 flags) 62{ 63 bool force_zc, force_copy; 64 struct netdev_bpf bpf; 65 int err; 66 67 force_zc = flags & XDP_ZEROCOPY; --- 6 unchanged lines hidden (view full) --- 74 return 0; 75 76 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) 77 return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ 78 79 bpf.command = XDP_QUERY_XSK_UMEM; 80 81 rtnl_lock(); |
65 err = dev->netdev_ops->ndo_bpf(dev, &bpf); 66 rtnl_unlock(); | 82 err = xdp_umem_query(dev, queue_id); 83 if (err) { 84 err = err < 0 ? -ENOTSUPP : -EBUSY; 85 goto err_rtnl_unlock; 86 } |
67 | 87 |
68 if (err) 69 return force_zc ? -ENOTSUPP : 0; 70 | |
71 bpf.command = XDP_SETUP_XSK_UMEM; 72 bpf.xsk.umem = umem; 73 bpf.xsk.queue_id = queue_id; 74 | 88 bpf.command = XDP_SETUP_XSK_UMEM; 89 bpf.xsk.umem = umem; 90 bpf.xsk.queue_id = queue_id; 91 |
75 rtnl_lock(); | |
76 err = dev->netdev_ops->ndo_bpf(dev, &bpf); | 92 err = dev->netdev_ops->ndo_bpf(dev, &bpf); |
93 if (err) 94 goto err_rtnl_unlock; |
|
77 rtnl_unlock(); 78 | 95 rtnl_unlock(); 96 |
79 if (err) 80 return force_zc ? err : 0; /* fail or fallback */ 81 | |
82 dev_hold(dev); 83 umem->dev = dev; 84 umem->queue_id = queue_id; 85 umem->zc = true; 86 return 0; | 97 dev_hold(dev); 98 umem->dev = dev; 99 umem->queue_id = queue_id; 100 umem->zc = true; 101 return 0; |
102 103err_rtnl_unlock: 104 rtnl_unlock(); 105 return force_zc ? err : 0; /* fail or fallback */ |
|
87} 88 89static void xdp_umem_clear_dev(struct xdp_umem *umem) 90{ 91 struct netdev_bpf bpf; 92 int err; 93 94 if (umem->dev) { --- 263 unchanged lines hidden --- | 106} 107 108static void xdp_umem_clear_dev(struct xdp_umem *umem) 109{ 110 struct netdev_bpf bpf; 111 int err; 112 113 if (umem->dev) { --- 263 unchanged lines hidden --- |