xref: /openbmc/linux/include/net/xdp_sock.h (revision 95777591)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8 
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
15 
16 struct net_device;
17 struct xsk_queue;
18 
19 struct xdp_umem_page {
20 	void *addr;
21 	dma_addr_t dma;
22 };
23 
24 struct xdp_umem_fq_reuse {
25 	u32 nentries;
26 	u32 length;
27 	u64 handles[];
28 };
29 
30 struct xdp_umem {
31 	struct xsk_queue *fq;
32 	struct xsk_queue *cq;
33 	struct xdp_umem_page *pages;
34 	u64 chunk_mask;
35 	u64 size;
36 	u32 headroom;
37 	u32 chunk_size_nohr;
38 	struct user_struct *user;
39 	struct pid *pid;
40 	unsigned long address;
41 	refcount_t users;
42 	struct work_struct work;
43 	struct page **pgs;
44 	u32 npgs;
45 	int id;
46 	struct net_device *dev;
47 	struct xdp_umem_fq_reuse *fq_reuse;
48 	u16 queue_id;
49 	bool zc;
50 	spinlock_t xsk_list_lock;
51 	struct list_head xsk_list;
52 };
53 
54 struct xdp_sock {
55 	/* struct sock must be the first member of struct xdp_sock */
56 	struct sock sk;
57 	struct xsk_queue *rx;
58 	struct net_device *dev;
59 	struct xdp_umem *umem;
60 	struct list_head flush_node;
61 	u16 queue_id;
62 	struct xsk_queue *tx ____cacheline_aligned_in_smp;
63 	struct list_head list;
64 	bool zc;
65 	/* Protects multiple processes in the control path */
66 	struct mutex mutex;
67 	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
68 	 * in the SKB destructor callback.
69 	 */
70 	spinlock_t tx_completion_lock;
71 	u64 rx_dropped;
72 };
73 
74 struct xdp_buff;
75 #ifdef CONFIG_XDP_SOCKETS
76 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
77 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
78 void xsk_flush(struct xdp_sock *xs);
79 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
80 /* Used from netdev driver */
81 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
82 void xsk_umem_discard_addr(struct xdp_umem *umem);
83 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
84 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
85 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
86 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
87 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
88 					  struct xdp_umem_fq_reuse *newq);
89 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
90 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
91 
92 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
93 {
94 	return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
95 }
96 
97 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
98 {
99 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
100 }
101 
102 /* Reuse-queue aware version of FILL queue helpers */
103 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
104 {
105 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
106 
107 	if (!rq->length)
108 		return xsk_umem_peek_addr(umem, addr);
109 
110 	*addr = rq->handles[rq->length - 1];
111 	return addr;
112 }
113 
114 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
115 {
116 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
117 
118 	if (!rq->length)
119 		xsk_umem_discard_addr(umem);
120 	else
121 		rq->length--;
122 }
123 
124 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
125 {
126 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
127 
128 	rq->handles[rq->length++] = addr;
129 }
130 #else
131 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
132 {
133 	return -ENOTSUPP;
134 }
135 
136 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
137 {
138 	return -ENOTSUPP;
139 }
140 
141 static inline void xsk_flush(struct xdp_sock *xs)
142 {
143 }
144 
145 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
146 {
147 	return false;
148 }
149 
150 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
151 {
152 	return NULL;
153 }
154 
155 static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
156 {
157 }
158 
159 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
160 {
161 }
162 
163 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
164 				       u32 *len)
165 {
166 	return false;
167 }
168 
169 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
170 {
171 }
172 
173 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
174 {
175 	return NULL;
176 }
177 
178 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
179 	struct xdp_umem *umem,
180 	struct xdp_umem_fq_reuse *newq)
181 {
182 	return NULL;
183 }
184 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
185 {
186 }
187 
188 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
189 						     u16 queue_id)
190 {
191 	return NULL;
192 }
193 
194 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
195 {
196 	return NULL;
197 }
198 
199 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
200 {
201 	return 0;
202 }
203 
204 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
205 {
206 	return NULL;
207 }
208 
209 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
210 {
211 }
212 
213 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
214 {
215 }
216 
217 #endif /* CONFIG_XDP_SOCKETS */
218 
219 #endif /* _LINUX_XDP_SOCK_H */
220