xref: /openbmc/linux/net/core/xdp.c (revision 62975d27)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6 #include <linux/bpf.h>
7 #include <linux/filter.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <linux/bug.h>
15 #include <net/page_pool.h>
16 
17 #include <net/xdp.h>
18 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19 #include <trace/events/xdp.h>
20 #include <net/xdp_sock_drv.h>
21 
22 #define REG_STATE_NEW		0x0
23 #define REG_STATE_REGISTERED	0x1
24 #define REG_STATE_UNREGISTERED	0x2
25 #define REG_STATE_UNUSED	0x3
26 
27 static DEFINE_IDA(mem_id_pool);
28 static DEFINE_MUTEX(mem_id_lock);
29 #define MEM_ID_MAX 0xFFFE
30 #define MEM_ID_MIN 1
31 static int mem_id_next = MEM_ID_MIN;
32 
33 static bool mem_id_init; /* false */
34 static struct rhashtable *mem_id_ht;
35 
36 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37 {
38 	const u32 *k = data;
39 	const u32 key = *k;
40 
41 	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
42 		     != sizeof(u32));
43 
44 	/* Use cyclic increasing ID as direct hash key */
45 	return key;
46 }
47 
48 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49 			  const void *ptr)
50 {
51 	const struct xdp_mem_allocator *xa = ptr;
52 	u32 mem_id = *(u32 *)arg->key;
53 
54 	return xa->mem.id != mem_id;
55 }
56 
57 static const struct rhashtable_params mem_id_rht_params = {
58 	.nelem_hint = 64,
59 	.head_offset = offsetof(struct xdp_mem_allocator, node),
60 	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
61 	.key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
62 	.max_size = MEM_ID_MAX,
63 	.min_size = 8,
64 	.automatic_shrinking = true,
65 	.hashfn    = xdp_mem_id_hashfn,
66 	.obj_cmpfn = xdp_mem_id_cmp,
67 };
68 
69 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70 {
71 	struct xdp_mem_allocator *xa;
72 
73 	xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74 
75 	/* Allow this ID to be reused */
76 	ida_simple_remove(&mem_id_pool, xa->mem.id);
77 
78 	kfree(xa);
79 }
80 
81 static void mem_xa_remove(struct xdp_mem_allocator *xa)
82 {
83 	trace_mem_disconnect(xa);
84 
85 	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
87 }
88 
89 static void mem_allocator_disconnect(void *allocator)
90 {
91 	struct xdp_mem_allocator *xa;
92 	struct rhashtable_iter iter;
93 
94 	mutex_lock(&mem_id_lock);
95 
96 	rhashtable_walk_enter(mem_id_ht, &iter);
97 	do {
98 		rhashtable_walk_start(&iter);
99 
100 		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101 			if (xa->allocator == allocator)
102 				mem_xa_remove(xa);
103 		}
104 
105 		rhashtable_walk_stop(&iter);
106 
107 	} while (xa == ERR_PTR(-EAGAIN));
108 	rhashtable_walk_exit(&iter);
109 
110 	mutex_unlock(&mem_id_lock);
111 }
112 
113 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
114 {
115 	struct xdp_mem_allocator *xa;
116 	int id = xdp_rxq->mem.id;
117 
118 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
119 		WARN(1, "Missing register, driver bug");
120 		return;
121 	}
122 
123 	if (id == 0)
124 		return;
125 
126 	if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
127 		rcu_read_lock();
128 		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
129 		page_pool_destroy(xa->page_pool);
130 		rcu_read_unlock();
131 	}
132 }
133 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
134 
135 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
136 {
137 	/* Simplify driver cleanup code paths, allow unreg "unused" */
138 	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
139 		return;
140 
141 	WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
142 
143 	xdp_rxq_info_unreg_mem_model(xdp_rxq);
144 
145 	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
146 	xdp_rxq->dev = NULL;
147 
148 	/* Reset mem info to defaults */
149 	xdp_rxq->mem.id = 0;
150 	xdp_rxq->mem.type = 0;
151 }
152 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
153 
154 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
155 {
156 	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
157 }
158 
159 /* Returns 0 on success, negative on failure */
160 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
161 		     struct net_device *dev, u32 queue_index)
162 {
163 	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
164 		WARN(1, "Driver promised not to register this");
165 		return -EINVAL;
166 	}
167 
168 	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
169 		WARN(1, "Missing unregister, handled but fix driver");
170 		xdp_rxq_info_unreg(xdp_rxq);
171 	}
172 
173 	if (!dev) {
174 		WARN(1, "Missing net_device from driver");
175 		return -ENODEV;
176 	}
177 
178 	/* State either UNREGISTERED or NEW */
179 	xdp_rxq_info_init(xdp_rxq);
180 	xdp_rxq->dev = dev;
181 	xdp_rxq->queue_index = queue_index;
182 
183 	xdp_rxq->reg_state = REG_STATE_REGISTERED;
184 	return 0;
185 }
186 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
187 
188 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
189 {
190 	xdp_rxq->reg_state = REG_STATE_UNUSED;
191 }
192 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
193 
194 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
195 {
196 	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
197 }
198 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
199 
200 static int __mem_id_init_hash_table(void)
201 {
202 	struct rhashtable *rht;
203 	int ret;
204 
205 	if (unlikely(mem_id_init))
206 		return 0;
207 
208 	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
209 	if (!rht)
210 		return -ENOMEM;
211 
212 	ret = rhashtable_init(rht, &mem_id_rht_params);
213 	if (ret < 0) {
214 		kfree(rht);
215 		return ret;
216 	}
217 	mem_id_ht = rht;
218 	smp_mb(); /* mutex lock should provide enough pairing */
219 	mem_id_init = true;
220 
221 	return 0;
222 }
223 
224 /* Allocate a cyclic ID that maps to allocator pointer.
225  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
226  *
227  * Caller must lock mem_id_lock.
228  */
229 static int __mem_id_cyclic_get(gfp_t gfp)
230 {
231 	int retries = 1;
232 	int id;
233 
234 again:
235 	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
236 	if (id < 0) {
237 		if (id == -ENOSPC) {
238 			/* Cyclic allocator, reset next id */
239 			if (retries--) {
240 				mem_id_next = MEM_ID_MIN;
241 				goto again;
242 			}
243 		}
244 		return id; /* errno */
245 	}
246 	mem_id_next = id + 1;
247 
248 	return id;
249 }
250 
251 static bool __is_supported_mem_type(enum xdp_mem_type type)
252 {
253 	if (type == MEM_TYPE_PAGE_POOL)
254 		return is_page_pool_compiled_in();
255 
256 	if (type >= MEM_TYPE_MAX)
257 		return false;
258 
259 	return true;
260 }
261 
262 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
263 			       enum xdp_mem_type type, void *allocator)
264 {
265 	struct xdp_mem_allocator *xdp_alloc;
266 	gfp_t gfp = GFP_KERNEL;
267 	int id, errno, ret;
268 	void *ptr;
269 
270 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
271 		WARN(1, "Missing register, driver bug");
272 		return -EFAULT;
273 	}
274 
275 	if (!__is_supported_mem_type(type))
276 		return -EOPNOTSUPP;
277 
278 	xdp_rxq->mem.type = type;
279 
280 	if (!allocator) {
281 		if (type == MEM_TYPE_PAGE_POOL)
282 			return -EINVAL; /* Setup time check page_pool req */
283 		return 0;
284 	}
285 
286 	/* Delay init of rhashtable to save memory if feature isn't used */
287 	if (!mem_id_init) {
288 		mutex_lock(&mem_id_lock);
289 		ret = __mem_id_init_hash_table();
290 		mutex_unlock(&mem_id_lock);
291 		if (ret < 0) {
292 			WARN_ON(1);
293 			return ret;
294 		}
295 	}
296 
297 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
298 	if (!xdp_alloc)
299 		return -ENOMEM;
300 
301 	mutex_lock(&mem_id_lock);
302 	id = __mem_id_cyclic_get(gfp);
303 	if (id < 0) {
304 		errno = id;
305 		goto err;
306 	}
307 	xdp_rxq->mem.id = id;
308 	xdp_alloc->mem  = xdp_rxq->mem;
309 	xdp_alloc->allocator = allocator;
310 
311 	/* Insert allocator into ID lookup table */
312 	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
313 	if (IS_ERR(ptr)) {
314 		ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
315 		xdp_rxq->mem.id = 0;
316 		errno = PTR_ERR(ptr);
317 		goto err;
318 	}
319 
320 	if (type == MEM_TYPE_PAGE_POOL)
321 		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
322 
323 	mutex_unlock(&mem_id_lock);
324 
325 	trace_mem_connect(xdp_alloc, xdp_rxq);
326 	return 0;
327 err:
328 	mutex_unlock(&mem_id_lock);
329 	kfree(xdp_alloc);
330 	return errno;
331 }
332 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
333 
334 /* XDP RX runs under NAPI protection, and in different delivery error
335  * scenarios (e.g. queue full), it is possible to return the xdp_frame
336  * while still leveraging this protection.  The @napi_direct boolean
337  * is used for those calls sites.  Thus, allowing for faster recycling
338  * of xdp_frames/pages in those cases. This path is never used by the
339  * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
340  * the switch-statement.
341  */
342 static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
343 {
344 	struct xdp_mem_allocator *xa;
345 	struct page *page;
346 
347 	switch (mem->type) {
348 	case MEM_TYPE_PAGE_POOL:
349 		rcu_read_lock();
350 		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
351 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
352 		page = virt_to_head_page(data);
353 		napi_direct &= !xdp_return_frame_no_direct();
354 		page_pool_put_full_page(xa->page_pool, page, napi_direct);
355 		rcu_read_unlock();
356 		break;
357 	case MEM_TYPE_PAGE_SHARED:
358 		page_frag_free(data);
359 		break;
360 	case MEM_TYPE_PAGE_ORDER0:
361 		page = virt_to_page(data); /* Assumes order0 page*/
362 		put_page(page);
363 		break;
364 	default:
365 		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
366 		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
367 		break;
368 	}
369 }
370 
371 void xdp_return_frame(struct xdp_frame *xdpf)
372 {
373 	__xdp_return(xdpf->data, &xdpf->mem, false);
374 }
375 EXPORT_SYMBOL_GPL(xdp_return_frame);
376 
377 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
378 {
379 	__xdp_return(xdpf->data, &xdpf->mem, true);
380 }
381 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
382 
383 void xdp_return_buff(struct xdp_buff *xdp)
384 {
385 	__xdp_return(xdp->data, &xdp->rxq->mem, true);
386 }
387 
388 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
389 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
390 {
391 	struct xdp_mem_allocator *xa;
392 	struct page *page;
393 
394 	rcu_read_lock();
395 	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
396 	page = virt_to_head_page(data);
397 	if (xa)
398 		page_pool_release_page(xa->page_pool, page);
399 	rcu_read_unlock();
400 }
401 EXPORT_SYMBOL_GPL(__xdp_release_frame);
402 
403 int xdp_attachment_query(struct xdp_attachment_info *info,
404 			 struct netdev_bpf *bpf)
405 {
406 	bpf->prog_id = info->prog ? info->prog->aux->id : 0;
407 	bpf->prog_flags = info->prog ? info->flags : 0;
408 	return 0;
409 }
410 EXPORT_SYMBOL_GPL(xdp_attachment_query);
411 
412 bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
413 			     struct netdev_bpf *bpf)
414 {
415 	if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
416 		NL_SET_ERR_MSG(bpf->extack,
417 			       "program loaded with different flags");
418 		return false;
419 	}
420 	return true;
421 }
422 EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
423 
424 void xdp_attachment_setup(struct xdp_attachment_info *info,
425 			  struct netdev_bpf *bpf)
426 {
427 	if (info->prog)
428 		bpf_prog_put(info->prog);
429 	info->prog = bpf->prog;
430 	info->flags = bpf->flags;
431 }
432 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
433 
434 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
435 {
436 	unsigned int metasize, totsize;
437 	void *addr, *data_to_copy;
438 	struct xdp_frame *xdpf;
439 	struct page *page;
440 
441 	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
442 	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
443 		   xdp->data - xdp->data_meta;
444 	totsize = xdp->data_end - xdp->data + metasize;
445 
446 	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
447 		return NULL;
448 
449 	page = dev_alloc_page();
450 	if (!page)
451 		return NULL;
452 
453 	addr = page_to_virt(page);
454 	xdpf = addr;
455 	memset(xdpf, 0, sizeof(*xdpf));
456 
457 	addr += sizeof(*xdpf);
458 	data_to_copy = metasize ? xdp->data_meta : xdp->data;
459 	memcpy(addr, data_to_copy, totsize);
460 
461 	xdpf->data = addr + metasize;
462 	xdpf->len = totsize - metasize;
463 	xdpf->headroom = 0;
464 	xdpf->metasize = metasize;
465 	xdpf->frame_sz = PAGE_SIZE;
466 	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
467 
468 	xsk_buff_free(xdp);
469 	return xdpf;
470 }
471 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
472 
473 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
474 void xdp_warn(const char *msg, const char *func, const int line)
475 {
476 	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
477 };
478 EXPORT_SYMBOL_GPL(xdp_warn);
479