xref: /openbmc/linux/net/core/flow_offload.c (revision 1a340825)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
9 
10 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
11 {
12 	struct flow_rule *rule;
13 	int i;
14 
15 	rule = kzalloc(struct_size(rule, action.entries, num_actions),
16 		       GFP_KERNEL);
17 	if (!rule)
18 		return NULL;
19 
20 	rule->action.num_entries = num_actions;
21 	/* Pre-fill each action hw_stats with DONT_CARE.
22 	 * Caller can override this if it wants stats for a given action.
23 	 */
24 	for (i = 0; i < num_actions; i++)
25 		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
26 
27 	return rule;
28 }
29 EXPORT_SYMBOL(flow_rule_alloc);
30 
31 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
32 {
33 	struct flow_offload_action *fl_action;
34 	int i;
35 
36 	fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
37 			    GFP_KERNEL);
38 	if (!fl_action)
39 		return NULL;
40 
41 	fl_action->action.num_entries = num_actions;
42 	/* Pre-fill each action hw_stats with DONT_CARE.
43 	 * Caller can override this if it wants stats for a given action.
44 	 */
45 	for (i = 0; i < num_actions; i++)
46 		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
47 
48 	return fl_action;
49 }
50 
51 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
52 	const struct flow_match *__m = &(__rule)->match;			\
53 	struct flow_dissector *__d = (__m)->dissector;				\
54 										\
55 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
56 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
57 
58 void flow_rule_match_meta(const struct flow_rule *rule,
59 			  struct flow_match_meta *out)
60 {
61 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_meta);
64 
65 void flow_rule_match_basic(const struct flow_rule *rule,
66 			   struct flow_match_basic *out)
67 {
68 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_basic);
71 
72 void flow_rule_match_control(const struct flow_rule *rule,
73 			     struct flow_match_control *out)
74 {
75 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_control);
78 
79 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
80 			       struct flow_match_eth_addrs *out)
81 {
82 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
85 
86 void flow_rule_match_vlan(const struct flow_rule *rule,
87 			  struct flow_match_vlan *out)
88 {
89 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_vlan);
92 
93 void flow_rule_match_cvlan(const struct flow_rule *rule,
94 			   struct flow_match_vlan *out)
95 {
96 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_cvlan);
99 
100 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
101 				struct flow_match_ipv4_addrs *out)
102 {
103 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
106 
107 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
108 				struct flow_match_ipv6_addrs *out)
109 {
110 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
113 
114 void flow_rule_match_ip(const struct flow_rule *rule,
115 			struct flow_match_ip *out)
116 {
117 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_ip);
120 
121 void flow_rule_match_ports(const struct flow_rule *rule,
122 			   struct flow_match_ports *out)
123 {
124 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_ports);
127 
128 void flow_rule_match_tcp(const struct flow_rule *rule,
129 			 struct flow_match_tcp *out)
130 {
131 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_tcp);
134 
135 void flow_rule_match_icmp(const struct flow_rule *rule,
136 			  struct flow_match_icmp *out)
137 {
138 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_icmp);
141 
142 void flow_rule_match_mpls(const struct flow_rule *rule,
143 			  struct flow_match_mpls *out)
144 {
145 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_mpls);
148 
149 void flow_rule_match_enc_control(const struct flow_rule *rule,
150 				 struct flow_match_control *out)
151 {
152 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_enc_control);
155 
156 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
157 				    struct flow_match_ipv4_addrs *out)
158 {
159 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
162 
163 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
164 				    struct flow_match_ipv6_addrs *out)
165 {
166 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
169 
170 void flow_rule_match_enc_ip(const struct flow_rule *rule,
171 			    struct flow_match_ip *out)
172 {
173 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
174 }
175 EXPORT_SYMBOL(flow_rule_match_enc_ip);
176 
177 void flow_rule_match_enc_ports(const struct flow_rule *rule,
178 			       struct flow_match_ports *out)
179 {
180 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
181 }
182 EXPORT_SYMBOL(flow_rule_match_enc_ports);
183 
184 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
185 			       struct flow_match_enc_keyid *out)
186 {
187 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
188 }
189 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
190 
191 void flow_rule_match_enc_opts(const struct flow_rule *rule,
192 			      struct flow_match_enc_opts *out)
193 {
194 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
195 }
196 EXPORT_SYMBOL(flow_rule_match_enc_opts);
197 
198 struct flow_action_cookie *flow_action_cookie_create(void *data,
199 						     unsigned int len,
200 						     gfp_t gfp)
201 {
202 	struct flow_action_cookie *cookie;
203 
204 	cookie = kmalloc(sizeof(*cookie) + len, gfp);
205 	if (!cookie)
206 		return NULL;
207 	cookie->cookie_len = len;
208 	memcpy(cookie->cookie, data, len);
209 	return cookie;
210 }
211 EXPORT_SYMBOL(flow_action_cookie_create);
212 
213 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
214 {
215 	kfree(cookie);
216 }
217 EXPORT_SYMBOL(flow_action_cookie_destroy);
218 
219 void flow_rule_match_ct(const struct flow_rule *rule,
220 			struct flow_match_ct *out)
221 {
222 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
223 }
224 EXPORT_SYMBOL(flow_rule_match_ct);
225 
226 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
227 					  void *cb_ident, void *cb_priv,
228 					  void (*release)(void *cb_priv))
229 {
230 	struct flow_block_cb *block_cb;
231 
232 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
233 	if (!block_cb)
234 		return ERR_PTR(-ENOMEM);
235 
236 	block_cb->cb = cb;
237 	block_cb->cb_ident = cb_ident;
238 	block_cb->cb_priv = cb_priv;
239 	block_cb->release = release;
240 
241 	return block_cb;
242 }
243 EXPORT_SYMBOL(flow_block_cb_alloc);
244 
245 void flow_block_cb_free(struct flow_block_cb *block_cb)
246 {
247 	if (block_cb->release)
248 		block_cb->release(block_cb->cb_priv);
249 
250 	kfree(block_cb);
251 }
252 EXPORT_SYMBOL(flow_block_cb_free);
253 
254 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
255 					   flow_setup_cb_t *cb, void *cb_ident)
256 {
257 	struct flow_block_cb *block_cb;
258 
259 	list_for_each_entry(block_cb, &block->cb_list, list) {
260 		if (block_cb->cb == cb &&
261 		    block_cb->cb_ident == cb_ident)
262 			return block_cb;
263 	}
264 
265 	return NULL;
266 }
267 EXPORT_SYMBOL(flow_block_cb_lookup);
268 
269 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
270 {
271 	return block_cb->cb_priv;
272 }
273 EXPORT_SYMBOL(flow_block_cb_priv);
274 
275 void flow_block_cb_incref(struct flow_block_cb *block_cb)
276 {
277 	block_cb->refcnt++;
278 }
279 EXPORT_SYMBOL(flow_block_cb_incref);
280 
281 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
282 {
283 	return --block_cb->refcnt;
284 }
285 EXPORT_SYMBOL(flow_block_cb_decref);
286 
287 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
288 			   struct list_head *driver_block_list)
289 {
290 	struct flow_block_cb *block_cb;
291 
292 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
293 		if (block_cb->cb == cb &&
294 		    block_cb->cb_ident == cb_ident)
295 			return true;
296 	}
297 
298 	return false;
299 }
300 EXPORT_SYMBOL(flow_block_cb_is_busy);
301 
302 int flow_block_cb_setup_simple(struct flow_block_offload *f,
303 			       struct list_head *driver_block_list,
304 			       flow_setup_cb_t *cb,
305 			       void *cb_ident, void *cb_priv,
306 			       bool ingress_only)
307 {
308 	struct flow_block_cb *block_cb;
309 
310 	if (ingress_only &&
311 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
312 		return -EOPNOTSUPP;
313 
314 	f->driver_block_list = driver_block_list;
315 
316 	switch (f->command) {
317 	case FLOW_BLOCK_BIND:
318 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
319 			return -EBUSY;
320 
321 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
322 		if (IS_ERR(block_cb))
323 			return PTR_ERR(block_cb);
324 
325 		flow_block_cb_add(block_cb, f);
326 		list_add_tail(&block_cb->driver_list, driver_block_list);
327 		return 0;
328 	case FLOW_BLOCK_UNBIND:
329 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
330 		if (!block_cb)
331 			return -ENOENT;
332 
333 		flow_block_cb_remove(block_cb, f);
334 		list_del(&block_cb->driver_list);
335 		return 0;
336 	default:
337 		return -EOPNOTSUPP;
338 	}
339 }
340 EXPORT_SYMBOL(flow_block_cb_setup_simple);
341 
342 static DEFINE_MUTEX(flow_indr_block_lock);
343 static LIST_HEAD(flow_block_indr_list);
344 static LIST_HEAD(flow_block_indr_dev_list);
345 static LIST_HEAD(flow_indir_dev_list);
346 
347 struct flow_indr_dev {
348 	struct list_head		list;
349 	flow_indr_block_bind_cb_t	*cb;
350 	void				*cb_priv;
351 	refcount_t			refcnt;
352 };
353 
354 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
355 						 void *cb_priv)
356 {
357 	struct flow_indr_dev *indr_dev;
358 
359 	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
360 	if (!indr_dev)
361 		return NULL;
362 
363 	indr_dev->cb		= cb;
364 	indr_dev->cb_priv	= cb_priv;
365 	refcount_set(&indr_dev->refcnt, 1);
366 
367 	return indr_dev;
368 }
369 
370 struct flow_indir_dev_info {
371 	void *data;
372 	struct net_device *dev;
373 	struct Qdisc *sch;
374 	enum tc_setup_type type;
375 	void (*cleanup)(struct flow_block_cb *block_cb);
376 	struct list_head list;
377 	enum flow_block_command command;
378 	enum flow_block_binder_type binder_type;
379 	struct list_head *cb_list;
380 };
381 
382 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
383 {
384 	struct flow_block_offload bo;
385 	struct flow_indir_dev_info *cur;
386 
387 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
388 		memset(&bo, 0, sizeof(bo));
389 		bo.command = cur->command;
390 		bo.binder_type = cur->binder_type;
391 		INIT_LIST_HEAD(&bo.cb_list);
392 		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
393 		list_splice(&bo.cb_list, cur->cb_list);
394 	}
395 }
396 
397 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
398 {
399 	struct flow_indr_dev *indr_dev;
400 
401 	mutex_lock(&flow_indr_block_lock);
402 	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
403 		if (indr_dev->cb == cb &&
404 		    indr_dev->cb_priv == cb_priv) {
405 			refcount_inc(&indr_dev->refcnt);
406 			mutex_unlock(&flow_indr_block_lock);
407 			return 0;
408 		}
409 	}
410 
411 	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
412 	if (!indr_dev) {
413 		mutex_unlock(&flow_indr_block_lock);
414 		return -ENOMEM;
415 	}
416 
417 	list_add(&indr_dev->list, &flow_block_indr_dev_list);
418 	existing_qdiscs_register(cb, cb_priv);
419 	mutex_unlock(&flow_indr_block_lock);
420 
421 	tcf_action_reoffload_cb(cb, cb_priv, true);
422 
423 	return 0;
424 }
425 EXPORT_SYMBOL(flow_indr_dev_register);
426 
427 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
428 				      void *cb_priv,
429 				      struct list_head *cleanup_list)
430 {
431 	struct flow_block_cb *this, *next;
432 
433 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
434 		if (this->release == release &&
435 		    this->indr.cb_priv == cb_priv)
436 			list_move(&this->indr.list, cleanup_list);
437 	}
438 }
439 
440 static void flow_block_indr_notify(struct list_head *cleanup_list)
441 {
442 	struct flow_block_cb *this, *next;
443 
444 	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
445 		list_del(&this->indr.list);
446 		this->indr.cleanup(this);
447 	}
448 }
449 
450 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
451 			      void (*release)(void *cb_priv))
452 {
453 	struct flow_indr_dev *this, *next, *indr_dev = NULL;
454 	LIST_HEAD(cleanup_list);
455 
456 	mutex_lock(&flow_indr_block_lock);
457 	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
458 		if (this->cb == cb &&
459 		    this->cb_priv == cb_priv &&
460 		    refcount_dec_and_test(&this->refcnt)) {
461 			indr_dev = this;
462 			list_del(&indr_dev->list);
463 			break;
464 		}
465 	}
466 
467 	if (!indr_dev) {
468 		mutex_unlock(&flow_indr_block_lock);
469 		return;
470 	}
471 
472 	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
473 	mutex_unlock(&flow_indr_block_lock);
474 
475 	tcf_action_reoffload_cb(cb, cb_priv, false);
476 	flow_block_indr_notify(&cleanup_list);
477 	kfree(indr_dev);
478 }
479 EXPORT_SYMBOL(flow_indr_dev_unregister);
480 
481 static void flow_block_indr_init(struct flow_block_cb *flow_block,
482 				 struct flow_block_offload *bo,
483 				 struct net_device *dev, struct Qdisc *sch, void *data,
484 				 void *cb_priv,
485 				 void (*cleanup)(struct flow_block_cb *block_cb))
486 {
487 	flow_block->indr.binder_type = bo->binder_type;
488 	flow_block->indr.data = data;
489 	flow_block->indr.cb_priv = cb_priv;
490 	flow_block->indr.dev = dev;
491 	flow_block->indr.sch = sch;
492 	flow_block->indr.cleanup = cleanup;
493 }
494 
495 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
496 					       void *cb_ident, void *cb_priv,
497 					       void (*release)(void *cb_priv),
498 					       struct flow_block_offload *bo,
499 					       struct net_device *dev,
500 					       struct Qdisc *sch, void *data,
501 					       void *indr_cb_priv,
502 					       void (*cleanup)(struct flow_block_cb *block_cb))
503 {
504 	struct flow_block_cb *block_cb;
505 
506 	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
507 	if (IS_ERR(block_cb))
508 		goto out;
509 
510 	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
511 	list_add(&block_cb->indr.list, &flow_block_indr_list);
512 
513 out:
514 	return block_cb;
515 }
516 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
517 
518 static struct flow_indir_dev_info *find_indir_dev(void *data)
519 {
520 	struct flow_indir_dev_info *cur;
521 
522 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
523 		if (cur->data == data)
524 			return cur;
525 	}
526 	return NULL;
527 }
528 
529 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
530 			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
531 			 struct flow_block_offload *bo)
532 {
533 	struct flow_indir_dev_info *info;
534 
535 	info = find_indir_dev(data);
536 	if (info)
537 		return -EEXIST;
538 
539 	info = kzalloc(sizeof(*info), GFP_KERNEL);
540 	if (!info)
541 		return -ENOMEM;
542 
543 	info->data = data;
544 	info->dev = dev;
545 	info->sch = sch;
546 	info->type = type;
547 	info->cleanup = cleanup;
548 	info->command = bo->command;
549 	info->binder_type = bo->binder_type;
550 	info->cb_list = bo->cb_list_head;
551 
552 	list_add(&info->list, &flow_indir_dev_list);
553 	return 0;
554 }
555 
556 static int indir_dev_remove(void *data)
557 {
558 	struct flow_indir_dev_info *info;
559 
560 	info = find_indir_dev(data);
561 	if (!info)
562 		return -ENOENT;
563 
564 	list_del(&info->list);
565 
566 	kfree(info);
567 	return 0;
568 }
569 
570 int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
571 				enum tc_setup_type type, void *data,
572 				struct flow_block_offload *bo,
573 				void (*cleanup)(struct flow_block_cb *block_cb))
574 {
575 	struct flow_indr_dev *this;
576 	u32 count = 0;
577 	int err;
578 
579 	mutex_lock(&flow_indr_block_lock);
580 	if (bo) {
581 		if (bo->command == FLOW_BLOCK_BIND)
582 			indir_dev_add(data, dev, sch, type, cleanup, bo);
583 		else if (bo->command == FLOW_BLOCK_UNBIND)
584 			indir_dev_remove(data);
585 	}
586 
587 	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
588 		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
589 		if (!err)
590 			count++;
591 	}
592 
593 	mutex_unlock(&flow_indr_block_lock);
594 
595 	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
596 }
597 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
598 
599 bool flow_indr_dev_exists(void)
600 {
601 	return !list_empty(&flow_block_indr_dev_list);
602 }
603 EXPORT_SYMBOL(flow_indr_dev_exists);
604