xref: /openbmc/linux/net/core/flow_offload.c (revision dc6a81c3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
7 
8 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
9 {
10 	struct flow_rule *rule;
11 
12 	rule = kzalloc(struct_size(rule, action.entries, num_actions),
13 		       GFP_KERNEL);
14 	if (!rule)
15 		return NULL;
16 
17 	rule->action.num_entries = num_actions;
18 
19 	return rule;
20 }
21 EXPORT_SYMBOL(flow_rule_alloc);
22 
23 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
24 	const struct flow_match *__m = &(__rule)->match;			\
25 	struct flow_dissector *__d = (__m)->dissector;				\
26 										\
27 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
28 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
29 
30 void flow_rule_match_meta(const struct flow_rule *rule,
31 			  struct flow_match_meta *out)
32 {
33 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
34 }
35 EXPORT_SYMBOL(flow_rule_match_meta);
36 
37 void flow_rule_match_basic(const struct flow_rule *rule,
38 			   struct flow_match_basic *out)
39 {
40 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
41 }
42 EXPORT_SYMBOL(flow_rule_match_basic);
43 
44 void flow_rule_match_control(const struct flow_rule *rule,
45 			     struct flow_match_control *out)
46 {
47 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
48 }
49 EXPORT_SYMBOL(flow_rule_match_control);
50 
51 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
52 			       struct flow_match_eth_addrs *out)
53 {
54 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
55 }
56 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
57 
58 void flow_rule_match_vlan(const struct flow_rule *rule,
59 			  struct flow_match_vlan *out)
60 {
61 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_vlan);
64 
65 void flow_rule_match_cvlan(const struct flow_rule *rule,
66 			   struct flow_match_vlan *out)
67 {
68 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_cvlan);
71 
72 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
73 				struct flow_match_ipv4_addrs *out)
74 {
75 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
78 
79 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
80 				struct flow_match_ipv6_addrs *out)
81 {
82 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
85 
86 void flow_rule_match_ip(const struct flow_rule *rule,
87 			struct flow_match_ip *out)
88 {
89 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_ip);
92 
93 void flow_rule_match_ports(const struct flow_rule *rule,
94 			   struct flow_match_ports *out)
95 {
96 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_ports);
99 
100 void flow_rule_match_tcp(const struct flow_rule *rule,
101 			 struct flow_match_tcp *out)
102 {
103 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_tcp);
106 
107 void flow_rule_match_icmp(const struct flow_rule *rule,
108 			  struct flow_match_icmp *out)
109 {
110 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_icmp);
113 
114 void flow_rule_match_mpls(const struct flow_rule *rule,
115 			  struct flow_match_mpls *out)
116 {
117 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_mpls);
120 
121 void flow_rule_match_enc_control(const struct flow_rule *rule,
122 				 struct flow_match_control *out)
123 {
124 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_enc_control);
127 
128 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
129 				    struct flow_match_ipv4_addrs *out)
130 {
131 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
134 
135 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
136 				    struct flow_match_ipv6_addrs *out)
137 {
138 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
141 
142 void flow_rule_match_enc_ip(const struct flow_rule *rule,
143 			    struct flow_match_ip *out)
144 {
145 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_enc_ip);
148 
149 void flow_rule_match_enc_ports(const struct flow_rule *rule,
150 			       struct flow_match_ports *out)
151 {
152 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_enc_ports);
155 
156 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
157 			       struct flow_match_enc_keyid *out)
158 {
159 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
162 
163 void flow_rule_match_enc_opts(const struct flow_rule *rule,
164 			      struct flow_match_enc_opts *out)
165 {
166 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_enc_opts);
169 
170 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
171 					  void *cb_ident, void *cb_priv,
172 					  void (*release)(void *cb_priv))
173 {
174 	struct flow_block_cb *block_cb;
175 
176 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
177 	if (!block_cb)
178 		return ERR_PTR(-ENOMEM);
179 
180 	block_cb->cb = cb;
181 	block_cb->cb_ident = cb_ident;
182 	block_cb->cb_priv = cb_priv;
183 	block_cb->release = release;
184 
185 	return block_cb;
186 }
187 EXPORT_SYMBOL(flow_block_cb_alloc);
188 
189 void flow_block_cb_free(struct flow_block_cb *block_cb)
190 {
191 	if (block_cb->release)
192 		block_cb->release(block_cb->cb_priv);
193 
194 	kfree(block_cb);
195 }
196 EXPORT_SYMBOL(flow_block_cb_free);
197 
198 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
199 					   flow_setup_cb_t *cb, void *cb_ident)
200 {
201 	struct flow_block_cb *block_cb;
202 
203 	list_for_each_entry(block_cb, &block->cb_list, list) {
204 		if (block_cb->cb == cb &&
205 		    block_cb->cb_ident == cb_ident)
206 			return block_cb;
207 	}
208 
209 	return NULL;
210 }
211 EXPORT_SYMBOL(flow_block_cb_lookup);
212 
213 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
214 {
215 	return block_cb->cb_priv;
216 }
217 EXPORT_SYMBOL(flow_block_cb_priv);
218 
219 void flow_block_cb_incref(struct flow_block_cb *block_cb)
220 {
221 	block_cb->refcnt++;
222 }
223 EXPORT_SYMBOL(flow_block_cb_incref);
224 
225 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
226 {
227 	return --block_cb->refcnt;
228 }
229 EXPORT_SYMBOL(flow_block_cb_decref);
230 
231 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
232 			   struct list_head *driver_block_list)
233 {
234 	struct flow_block_cb *block_cb;
235 
236 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
237 		if (block_cb->cb == cb &&
238 		    block_cb->cb_ident == cb_ident)
239 			return true;
240 	}
241 
242 	return false;
243 }
244 EXPORT_SYMBOL(flow_block_cb_is_busy);
245 
246 int flow_block_cb_setup_simple(struct flow_block_offload *f,
247 			       struct list_head *driver_block_list,
248 			       flow_setup_cb_t *cb,
249 			       void *cb_ident, void *cb_priv,
250 			       bool ingress_only)
251 {
252 	struct flow_block_cb *block_cb;
253 
254 	if (ingress_only &&
255 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
256 		return -EOPNOTSUPP;
257 
258 	f->driver_block_list = driver_block_list;
259 
260 	switch (f->command) {
261 	case FLOW_BLOCK_BIND:
262 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
263 			return -EBUSY;
264 
265 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
266 		if (IS_ERR(block_cb))
267 			return PTR_ERR(block_cb);
268 
269 		flow_block_cb_add(block_cb, f);
270 		list_add_tail(&block_cb->driver_list, driver_block_list);
271 		return 0;
272 	case FLOW_BLOCK_UNBIND:
273 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
274 		if (!block_cb)
275 			return -ENOENT;
276 
277 		flow_block_cb_remove(block_cb, f);
278 		list_del(&block_cb->driver_list);
279 		return 0;
280 	default:
281 		return -EOPNOTSUPP;
282 	}
283 }
284 EXPORT_SYMBOL(flow_block_cb_setup_simple);
285 
286 static LIST_HEAD(block_cb_list);
287 
288 static struct rhashtable indr_setup_block_ht;
289 
290 struct flow_indr_block_cb {
291 	struct list_head list;
292 	void *cb_priv;
293 	flow_indr_block_bind_cb_t *cb;
294 	void *cb_ident;
295 };
296 
297 struct flow_indr_block_dev {
298 	struct rhash_head ht_node;
299 	struct net_device *dev;
300 	unsigned int refcnt;
301 	struct list_head cb_list;
302 };
303 
304 static const struct rhashtable_params flow_indr_setup_block_ht_params = {
305 	.key_offset	= offsetof(struct flow_indr_block_dev, dev),
306 	.head_offset	= offsetof(struct flow_indr_block_dev, ht_node),
307 	.key_len	= sizeof(struct net_device *),
308 };
309 
310 static struct flow_indr_block_dev *
311 flow_indr_block_dev_lookup(struct net_device *dev)
312 {
313 	return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
314 				      flow_indr_setup_block_ht_params);
315 }
316 
317 static struct flow_indr_block_dev *
318 flow_indr_block_dev_get(struct net_device *dev)
319 {
320 	struct flow_indr_block_dev *indr_dev;
321 
322 	indr_dev = flow_indr_block_dev_lookup(dev);
323 	if (indr_dev)
324 		goto inc_ref;
325 
326 	indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
327 	if (!indr_dev)
328 		return NULL;
329 
330 	INIT_LIST_HEAD(&indr_dev->cb_list);
331 	indr_dev->dev = dev;
332 	if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
333 				   flow_indr_setup_block_ht_params)) {
334 		kfree(indr_dev);
335 		return NULL;
336 	}
337 
338 inc_ref:
339 	indr_dev->refcnt++;
340 	return indr_dev;
341 }
342 
343 static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
344 {
345 	if (--indr_dev->refcnt)
346 		return;
347 
348 	rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
349 			       flow_indr_setup_block_ht_params);
350 	kfree(indr_dev);
351 }
352 
353 static struct flow_indr_block_cb *
354 flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
355 			  flow_indr_block_bind_cb_t *cb, void *cb_ident)
356 {
357 	struct flow_indr_block_cb *indr_block_cb;
358 
359 	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
360 		if (indr_block_cb->cb == cb &&
361 		    indr_block_cb->cb_ident == cb_ident)
362 			return indr_block_cb;
363 	return NULL;
364 }
365 
366 static struct flow_indr_block_cb *
367 flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
368 		       flow_indr_block_bind_cb_t *cb, void *cb_ident)
369 {
370 	struct flow_indr_block_cb *indr_block_cb;
371 
372 	indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
373 	if (indr_block_cb)
374 		return ERR_PTR(-EEXIST);
375 
376 	indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
377 	if (!indr_block_cb)
378 		return ERR_PTR(-ENOMEM);
379 
380 	indr_block_cb->cb_priv = cb_priv;
381 	indr_block_cb->cb = cb;
382 	indr_block_cb->cb_ident = cb_ident;
383 	list_add(&indr_block_cb->list, &indr_dev->cb_list);
384 
385 	return indr_block_cb;
386 }
387 
388 static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
389 {
390 	list_del(&indr_block_cb->list);
391 	kfree(indr_block_cb);
392 }
393 
394 static DEFINE_MUTEX(flow_indr_block_cb_lock);
395 
396 static void flow_block_cmd(struct net_device *dev,
397 			   flow_indr_block_bind_cb_t *cb, void *cb_priv,
398 			   enum flow_block_command command)
399 {
400 	struct flow_indr_block_entry *entry;
401 
402 	mutex_lock(&flow_indr_block_cb_lock);
403 	list_for_each_entry(entry, &block_cb_list, list) {
404 		entry->cb(dev, cb, cb_priv, command);
405 	}
406 	mutex_unlock(&flow_indr_block_cb_lock);
407 }
408 
409 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
410 				  flow_indr_block_bind_cb_t *cb,
411 				  void *cb_ident)
412 {
413 	struct flow_indr_block_cb *indr_block_cb;
414 	struct flow_indr_block_dev *indr_dev;
415 	int err;
416 
417 	indr_dev = flow_indr_block_dev_get(dev);
418 	if (!indr_dev)
419 		return -ENOMEM;
420 
421 	indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
422 	err = PTR_ERR_OR_ZERO(indr_block_cb);
423 	if (err)
424 		goto err_dev_put;
425 
426 	flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
427 		       FLOW_BLOCK_BIND);
428 
429 	return 0;
430 
431 err_dev_put:
432 	flow_indr_block_dev_put(indr_dev);
433 	return err;
434 }
435 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);
436 
437 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
438 				flow_indr_block_bind_cb_t *cb,
439 				void *cb_ident)
440 {
441 	int err;
442 
443 	rtnl_lock();
444 	err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
445 	rtnl_unlock();
446 
447 	return err;
448 }
449 EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);
450 
451 void __flow_indr_block_cb_unregister(struct net_device *dev,
452 				     flow_indr_block_bind_cb_t *cb,
453 				     void *cb_ident)
454 {
455 	struct flow_indr_block_cb *indr_block_cb;
456 	struct flow_indr_block_dev *indr_dev;
457 
458 	indr_dev = flow_indr_block_dev_lookup(dev);
459 	if (!indr_dev)
460 		return;
461 
462 	indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
463 	if (!indr_block_cb)
464 		return;
465 
466 	flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
467 		       FLOW_BLOCK_UNBIND);
468 
469 	flow_indr_block_cb_del(indr_block_cb);
470 	flow_indr_block_dev_put(indr_dev);
471 }
472 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);
473 
474 void flow_indr_block_cb_unregister(struct net_device *dev,
475 				   flow_indr_block_bind_cb_t *cb,
476 				   void *cb_ident)
477 {
478 	rtnl_lock();
479 	__flow_indr_block_cb_unregister(dev, cb, cb_ident);
480 	rtnl_unlock();
481 }
482 EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
483 
484 void flow_indr_block_call(struct net_device *dev,
485 			  struct flow_block_offload *bo,
486 			  enum flow_block_command command)
487 {
488 	struct flow_indr_block_cb *indr_block_cb;
489 	struct flow_indr_block_dev *indr_dev;
490 
491 	indr_dev = flow_indr_block_dev_lookup(dev);
492 	if (!indr_dev)
493 		return;
494 
495 	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
496 		indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
497 				  bo);
498 }
499 EXPORT_SYMBOL_GPL(flow_indr_block_call);
500 
501 void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
502 {
503 	mutex_lock(&flow_indr_block_cb_lock);
504 	list_add_tail(&entry->list, &block_cb_list);
505 	mutex_unlock(&flow_indr_block_cb_lock);
506 }
507 EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);
508 
509 void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
510 {
511 	mutex_lock(&flow_indr_block_cb_lock);
512 	list_del(&entry->list);
513 	mutex_unlock(&flow_indr_block_cb_lock);
514 }
515 EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);
516 
517 static int __init init_flow_indr_rhashtable(void)
518 {
519 	return rhashtable_init(&indr_setup_block_ht,
520 			       &flow_indr_setup_block_ht_params);
521 }
522 subsys_initcall(init_flow_indr_rhashtable);
523