xref: /openbmc/linux/net/core/flow_offload.c (revision 8622a0e5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
7 
8 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
9 {
10 	struct flow_rule *rule;
11 
12 	rule = kzalloc(struct_size(rule, action.entries, num_actions),
13 		       GFP_KERNEL);
14 	if (!rule)
15 		return NULL;
16 
17 	rule->action.num_entries = num_actions;
18 
19 	return rule;
20 }
21 EXPORT_SYMBOL(flow_rule_alloc);
22 
23 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
24 	const struct flow_match *__m = &(__rule)->match;			\
25 	struct flow_dissector *__d = (__m)->dissector;				\
26 										\
27 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
28 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
29 
30 void flow_rule_match_meta(const struct flow_rule *rule,
31 			  struct flow_match_meta *out)
32 {
33 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
34 }
35 EXPORT_SYMBOL(flow_rule_match_meta);
36 
37 void flow_rule_match_basic(const struct flow_rule *rule,
38 			   struct flow_match_basic *out)
39 {
40 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
41 }
42 EXPORT_SYMBOL(flow_rule_match_basic);
43 
44 void flow_rule_match_control(const struct flow_rule *rule,
45 			     struct flow_match_control *out)
46 {
47 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
48 }
49 EXPORT_SYMBOL(flow_rule_match_control);
50 
51 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
52 			       struct flow_match_eth_addrs *out)
53 {
54 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
55 }
56 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
57 
58 void flow_rule_match_vlan(const struct flow_rule *rule,
59 			  struct flow_match_vlan *out)
60 {
61 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_vlan);
64 
65 void flow_rule_match_cvlan(const struct flow_rule *rule,
66 			   struct flow_match_vlan *out)
67 {
68 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_cvlan);
71 
72 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
73 				struct flow_match_ipv4_addrs *out)
74 {
75 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
78 
79 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
80 				struct flow_match_ipv6_addrs *out)
81 {
82 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
85 
86 void flow_rule_match_ip(const struct flow_rule *rule,
87 			struct flow_match_ip *out)
88 {
89 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_ip);
92 
93 void flow_rule_match_ports(const struct flow_rule *rule,
94 			   struct flow_match_ports *out)
95 {
96 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_ports);
99 
100 void flow_rule_match_tcp(const struct flow_rule *rule,
101 			 struct flow_match_tcp *out)
102 {
103 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_tcp);
106 
107 void flow_rule_match_icmp(const struct flow_rule *rule,
108 			  struct flow_match_icmp *out)
109 {
110 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_icmp);
113 
114 void flow_rule_match_mpls(const struct flow_rule *rule,
115 			  struct flow_match_mpls *out)
116 {
117 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_mpls);
120 
121 void flow_rule_match_enc_control(const struct flow_rule *rule,
122 				 struct flow_match_control *out)
123 {
124 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_enc_control);
127 
128 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
129 				    struct flow_match_ipv4_addrs *out)
130 {
131 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
134 
135 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
136 				    struct flow_match_ipv6_addrs *out)
137 {
138 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
141 
142 void flow_rule_match_enc_ip(const struct flow_rule *rule,
143 			    struct flow_match_ip *out)
144 {
145 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_enc_ip);
148 
149 void flow_rule_match_enc_ports(const struct flow_rule *rule,
150 			       struct flow_match_ports *out)
151 {
152 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_enc_ports);
155 
156 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
157 			       struct flow_match_enc_keyid *out)
158 {
159 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
162 
163 void flow_rule_match_enc_opts(const struct flow_rule *rule,
164 			      struct flow_match_enc_opts *out)
165 {
166 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_enc_opts);
169 
170 struct flow_action_cookie *flow_action_cookie_create(void *data,
171 						     unsigned int len,
172 						     gfp_t gfp)
173 {
174 	struct flow_action_cookie *cookie;
175 
176 	cookie = kmalloc(sizeof(*cookie) + len, gfp);
177 	if (!cookie)
178 		return NULL;
179 	cookie->cookie_len = len;
180 	memcpy(cookie->cookie, data, len);
181 	return cookie;
182 }
183 EXPORT_SYMBOL(flow_action_cookie_create);
184 
185 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
186 {
187 	kfree(cookie);
188 }
189 EXPORT_SYMBOL(flow_action_cookie_destroy);
190 
191 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
192 					  void *cb_ident, void *cb_priv,
193 					  void (*release)(void *cb_priv))
194 {
195 	struct flow_block_cb *block_cb;
196 
197 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
198 	if (!block_cb)
199 		return ERR_PTR(-ENOMEM);
200 
201 	block_cb->cb = cb;
202 	block_cb->cb_ident = cb_ident;
203 	block_cb->cb_priv = cb_priv;
204 	block_cb->release = release;
205 
206 	return block_cb;
207 }
208 EXPORT_SYMBOL(flow_block_cb_alloc);
209 
210 void flow_block_cb_free(struct flow_block_cb *block_cb)
211 {
212 	if (block_cb->release)
213 		block_cb->release(block_cb->cb_priv);
214 
215 	kfree(block_cb);
216 }
217 EXPORT_SYMBOL(flow_block_cb_free);
218 
219 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
220 					   flow_setup_cb_t *cb, void *cb_ident)
221 {
222 	struct flow_block_cb *block_cb;
223 
224 	list_for_each_entry(block_cb, &block->cb_list, list) {
225 		if (block_cb->cb == cb &&
226 		    block_cb->cb_ident == cb_ident)
227 			return block_cb;
228 	}
229 
230 	return NULL;
231 }
232 EXPORT_SYMBOL(flow_block_cb_lookup);
233 
234 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
235 {
236 	return block_cb->cb_priv;
237 }
238 EXPORT_SYMBOL(flow_block_cb_priv);
239 
240 void flow_block_cb_incref(struct flow_block_cb *block_cb)
241 {
242 	block_cb->refcnt++;
243 }
244 EXPORT_SYMBOL(flow_block_cb_incref);
245 
246 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
247 {
248 	return --block_cb->refcnt;
249 }
250 EXPORT_SYMBOL(flow_block_cb_decref);
251 
252 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
253 			   struct list_head *driver_block_list)
254 {
255 	struct flow_block_cb *block_cb;
256 
257 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
258 		if (block_cb->cb == cb &&
259 		    block_cb->cb_ident == cb_ident)
260 			return true;
261 	}
262 
263 	return false;
264 }
265 EXPORT_SYMBOL(flow_block_cb_is_busy);
266 
267 int flow_block_cb_setup_simple(struct flow_block_offload *f,
268 			       struct list_head *driver_block_list,
269 			       flow_setup_cb_t *cb,
270 			       void *cb_ident, void *cb_priv,
271 			       bool ingress_only)
272 {
273 	struct flow_block_cb *block_cb;
274 
275 	if (ingress_only &&
276 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
277 		return -EOPNOTSUPP;
278 
279 	f->driver_block_list = driver_block_list;
280 
281 	switch (f->command) {
282 	case FLOW_BLOCK_BIND:
283 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
284 			return -EBUSY;
285 
286 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
287 		if (IS_ERR(block_cb))
288 			return PTR_ERR(block_cb);
289 
290 		flow_block_cb_add(block_cb, f);
291 		list_add_tail(&block_cb->driver_list, driver_block_list);
292 		return 0;
293 	case FLOW_BLOCK_UNBIND:
294 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
295 		if (!block_cb)
296 			return -ENOENT;
297 
298 		flow_block_cb_remove(block_cb, f);
299 		list_del(&block_cb->driver_list);
300 		return 0;
301 	default:
302 		return -EOPNOTSUPP;
303 	}
304 }
305 EXPORT_SYMBOL(flow_block_cb_setup_simple);
306 
307 static LIST_HEAD(block_cb_list);
308 
309 static struct rhashtable indr_setup_block_ht;
310 
311 struct flow_indr_block_cb {
312 	struct list_head list;
313 	void *cb_priv;
314 	flow_indr_block_bind_cb_t *cb;
315 	void *cb_ident;
316 };
317 
318 struct flow_indr_block_dev {
319 	struct rhash_head ht_node;
320 	struct net_device *dev;
321 	unsigned int refcnt;
322 	struct list_head cb_list;
323 };
324 
325 static const struct rhashtable_params flow_indr_setup_block_ht_params = {
326 	.key_offset	= offsetof(struct flow_indr_block_dev, dev),
327 	.head_offset	= offsetof(struct flow_indr_block_dev, ht_node),
328 	.key_len	= sizeof(struct net_device *),
329 };
330 
331 static struct flow_indr_block_dev *
332 flow_indr_block_dev_lookup(struct net_device *dev)
333 {
334 	return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
335 				      flow_indr_setup_block_ht_params);
336 }
337 
338 static struct flow_indr_block_dev *
339 flow_indr_block_dev_get(struct net_device *dev)
340 {
341 	struct flow_indr_block_dev *indr_dev;
342 
343 	indr_dev = flow_indr_block_dev_lookup(dev);
344 	if (indr_dev)
345 		goto inc_ref;
346 
347 	indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
348 	if (!indr_dev)
349 		return NULL;
350 
351 	INIT_LIST_HEAD(&indr_dev->cb_list);
352 	indr_dev->dev = dev;
353 	if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
354 				   flow_indr_setup_block_ht_params)) {
355 		kfree(indr_dev);
356 		return NULL;
357 	}
358 
359 inc_ref:
360 	indr_dev->refcnt++;
361 	return indr_dev;
362 }
363 
364 static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
365 {
366 	if (--indr_dev->refcnt)
367 		return;
368 
369 	rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
370 			       flow_indr_setup_block_ht_params);
371 	kfree(indr_dev);
372 }
373 
374 static struct flow_indr_block_cb *
375 flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
376 			  flow_indr_block_bind_cb_t *cb, void *cb_ident)
377 {
378 	struct flow_indr_block_cb *indr_block_cb;
379 
380 	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
381 		if (indr_block_cb->cb == cb &&
382 		    indr_block_cb->cb_ident == cb_ident)
383 			return indr_block_cb;
384 	return NULL;
385 }
386 
387 static struct flow_indr_block_cb *
388 flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
389 		       flow_indr_block_bind_cb_t *cb, void *cb_ident)
390 {
391 	struct flow_indr_block_cb *indr_block_cb;
392 
393 	indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
394 	if (indr_block_cb)
395 		return ERR_PTR(-EEXIST);
396 
397 	indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
398 	if (!indr_block_cb)
399 		return ERR_PTR(-ENOMEM);
400 
401 	indr_block_cb->cb_priv = cb_priv;
402 	indr_block_cb->cb = cb;
403 	indr_block_cb->cb_ident = cb_ident;
404 	list_add(&indr_block_cb->list, &indr_dev->cb_list);
405 
406 	return indr_block_cb;
407 }
408 
409 static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
410 {
411 	list_del(&indr_block_cb->list);
412 	kfree(indr_block_cb);
413 }
414 
415 static DEFINE_MUTEX(flow_indr_block_cb_lock);
416 
417 static void flow_block_cmd(struct net_device *dev,
418 			   flow_indr_block_bind_cb_t *cb, void *cb_priv,
419 			   enum flow_block_command command)
420 {
421 	struct flow_indr_block_entry *entry;
422 
423 	mutex_lock(&flow_indr_block_cb_lock);
424 	list_for_each_entry(entry, &block_cb_list, list) {
425 		entry->cb(dev, cb, cb_priv, command);
426 	}
427 	mutex_unlock(&flow_indr_block_cb_lock);
428 }
429 
430 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
431 				  flow_indr_block_bind_cb_t *cb,
432 				  void *cb_ident)
433 {
434 	struct flow_indr_block_cb *indr_block_cb;
435 	struct flow_indr_block_dev *indr_dev;
436 	int err;
437 
438 	indr_dev = flow_indr_block_dev_get(dev);
439 	if (!indr_dev)
440 		return -ENOMEM;
441 
442 	indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
443 	err = PTR_ERR_OR_ZERO(indr_block_cb);
444 	if (err)
445 		goto err_dev_put;
446 
447 	flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
448 		       FLOW_BLOCK_BIND);
449 
450 	return 0;
451 
452 err_dev_put:
453 	flow_indr_block_dev_put(indr_dev);
454 	return err;
455 }
456 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);
457 
458 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
459 				flow_indr_block_bind_cb_t *cb,
460 				void *cb_ident)
461 {
462 	int err;
463 
464 	rtnl_lock();
465 	err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
466 	rtnl_unlock();
467 
468 	return err;
469 }
470 EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);
471 
472 void __flow_indr_block_cb_unregister(struct net_device *dev,
473 				     flow_indr_block_bind_cb_t *cb,
474 				     void *cb_ident)
475 {
476 	struct flow_indr_block_cb *indr_block_cb;
477 	struct flow_indr_block_dev *indr_dev;
478 
479 	indr_dev = flow_indr_block_dev_lookup(dev);
480 	if (!indr_dev)
481 		return;
482 
483 	indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
484 	if (!indr_block_cb)
485 		return;
486 
487 	flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
488 		       FLOW_BLOCK_UNBIND);
489 
490 	flow_indr_block_cb_del(indr_block_cb);
491 	flow_indr_block_dev_put(indr_dev);
492 }
493 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);
494 
495 void flow_indr_block_cb_unregister(struct net_device *dev,
496 				   flow_indr_block_bind_cb_t *cb,
497 				   void *cb_ident)
498 {
499 	rtnl_lock();
500 	__flow_indr_block_cb_unregister(dev, cb, cb_ident);
501 	rtnl_unlock();
502 }
503 EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
504 
505 void flow_indr_block_call(struct net_device *dev,
506 			  struct flow_block_offload *bo,
507 			  enum flow_block_command command)
508 {
509 	struct flow_indr_block_cb *indr_block_cb;
510 	struct flow_indr_block_dev *indr_dev;
511 
512 	indr_dev = flow_indr_block_dev_lookup(dev);
513 	if (!indr_dev)
514 		return;
515 
516 	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
517 		indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
518 				  bo);
519 }
520 EXPORT_SYMBOL_GPL(flow_indr_block_call);
521 
522 void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
523 {
524 	mutex_lock(&flow_indr_block_cb_lock);
525 	list_add_tail(&entry->list, &block_cb_list);
526 	mutex_unlock(&flow_indr_block_cb_lock);
527 }
528 EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);
529 
530 void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
531 {
532 	mutex_lock(&flow_indr_block_cb_lock);
533 	list_del(&entry->list);
534 	mutex_unlock(&flow_indr_block_cb_lock);
535 }
536 EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);
537 
538 static int __init init_flow_indr_rhashtable(void)
539 {
540 	return rhashtable_init(&indr_setup_block_ht,
541 			       &flow_indr_setup_block_ht_params);
542 }
543 subsys_initcall(init_flow_indr_rhashtable);
544