1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9 
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11 {
12 	struct nft_flow_rule *flow;
13 
14 	flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 	if (!flow)
16 		return NULL;
17 
18 	flow->rule = flow_rule_alloc(num_actions);
19 	if (!flow->rule) {
20 		kfree(flow);
21 		return NULL;
22 	}
23 
24 	flow->rule->match.dissector	= &flow->match.dissector;
25 	flow->rule->match.mask		= &flow->match.mask;
26 	flow->rule->match.key		= &flow->match.key;
27 
28 	return flow;
29 }
30 
31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 				 enum flow_dissector_key_id addr_type)
33 {
34 	struct nft_flow_match *match = &flow->match;
35 	struct nft_flow_key *mask = &match->mask;
36 	struct nft_flow_key *key = &match->key;
37 
38 	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
39 		return;
40 
41 	key->control.addr_type = addr_type;
42 	mask->control.addr_type = 0xffff;
43 	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
44 	match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 		offsetof(struct nft_flow_key, control);
46 }
47 
48 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
49 					   const struct nft_rule *rule)
50 {
51 	struct nft_offload_ctx *ctx;
52 	struct nft_flow_rule *flow;
53 	int num_actions = 0, err;
54 	struct nft_expr *expr;
55 
56 	expr = nft_expr_first(rule);
57 	while (nft_expr_more(rule, expr)) {
58 		if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
59 			num_actions++;
60 
61 		expr = nft_expr_next(expr);
62 	}
63 
64 	if (num_actions == 0)
65 		return ERR_PTR(-EOPNOTSUPP);
66 
67 	flow = nft_flow_rule_alloc(num_actions);
68 	if (!flow)
69 		return ERR_PTR(-ENOMEM);
70 
71 	expr = nft_expr_first(rule);
72 
73 	ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
74 	if (!ctx) {
75 		err = -ENOMEM;
76 		goto err_out;
77 	}
78 	ctx->net = net;
79 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
80 
81 	while (nft_expr_more(rule, expr)) {
82 		if (!expr->ops->offload) {
83 			err = -EOPNOTSUPP;
84 			goto err_out;
85 		}
86 		err = expr->ops->offload(ctx, flow, expr);
87 		if (err < 0)
88 			goto err_out;
89 
90 		expr = nft_expr_next(expr);
91 	}
92 	flow->proto = ctx->dep.l3num;
93 	kfree(ctx);
94 
95 	return flow;
96 err_out:
97 	kfree(ctx);
98 	nft_flow_rule_destroy(flow);
99 
100 	return ERR_PTR(err);
101 }
102 
103 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
104 {
105 	struct flow_action_entry *entry;
106 	int i;
107 
108 	flow_action_for_each(i, entry, &flow->rule->action) {
109 		switch (entry->id) {
110 		case FLOW_ACTION_REDIRECT:
111 		case FLOW_ACTION_MIRRED:
112 			dev_put(entry->dev);
113 			break;
114 		default:
115 			break;
116 		}
117 	}
118 	kfree(flow->rule);
119 	kfree(flow);
120 }
121 
122 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
123 				enum nft_offload_dep_type type)
124 {
125 	ctx->dep.type = type;
126 }
127 
128 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
129 				   const void *data, u32 len)
130 {
131 	switch (ctx->dep.type) {
132 	case NFT_OFFLOAD_DEP_NETWORK:
133 		WARN_ON(len != sizeof(__u16));
134 		memcpy(&ctx->dep.l3num, data, sizeof(__u16));
135 		break;
136 	case NFT_OFFLOAD_DEP_TRANSPORT:
137 		WARN_ON(len != sizeof(__u8));
138 		memcpy(&ctx->dep.protonum, data, sizeof(__u8));
139 		break;
140 	default:
141 		break;
142 	}
143 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
144 }
145 
146 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
147 					 __be16 proto, int priority,
148 					 struct netlink_ext_ack *extack)
149 {
150 	common->protocol = proto;
151 	common->prio = priority;
152 	common->extack = extack;
153 }
154 
155 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
156 			     struct list_head *cb_list)
157 {
158 	struct flow_block_cb *block_cb;
159 	int err;
160 
161 	list_for_each_entry(block_cb, cb_list, list) {
162 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
163 		if (err < 0)
164 			return err;
165 	}
166 	return 0;
167 }
168 
169 int nft_chain_offload_priority(struct nft_base_chain *basechain)
170 {
171 	if (basechain->ops.priority <= 0 ||
172 	    basechain->ops.priority > USHRT_MAX)
173 		return -1;
174 
175 	return 0;
176 }
177 
178 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
179 				       const struct nft_base_chain *basechain,
180 				       const struct nft_rule *rule,
181 				       const struct nft_flow_rule *flow,
182 				       struct netlink_ext_ack *extack,
183 				       enum flow_cls_command command)
184 {
185 	__be16 proto = ETH_P_ALL;
186 
187 	memset(cls_flow, 0, sizeof(*cls_flow));
188 
189 	if (flow)
190 		proto = flow->proto;
191 
192 	nft_flow_offload_common_init(&cls_flow->common, proto,
193 				     basechain->ops.priority, extack);
194 	cls_flow->command = command;
195 	cls_flow->cookie = (unsigned long) rule;
196 	if (flow)
197 		cls_flow->rule = flow->rule;
198 }
199 
200 static int nft_flow_offload_rule(struct nft_chain *chain,
201 				 struct nft_rule *rule,
202 				 struct nft_flow_rule *flow,
203 				 enum flow_cls_command command)
204 {
205 	struct netlink_ext_ack extack = {};
206 	struct flow_cls_offload cls_flow;
207 	struct nft_base_chain *basechain;
208 
209 	if (!nft_is_base_chain(chain))
210 		return -EOPNOTSUPP;
211 
212 	basechain = nft_base_chain(chain);
213 	nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
214 				   command);
215 
216 	return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
217 				 &basechain->flow_block.cb_list);
218 }
219 
220 static int nft_flow_offload_bind(struct flow_block_offload *bo,
221 				 struct nft_base_chain *basechain)
222 {
223 	list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
224 	return 0;
225 }
226 
227 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
228 				   struct nft_base_chain *basechain)
229 {
230 	struct flow_block_cb *block_cb, *next;
231 	struct flow_cls_offload cls_flow;
232 	struct netlink_ext_ack extack;
233 	struct nft_chain *chain;
234 	struct nft_rule *rule;
235 
236 	chain = &basechain->chain;
237 	list_for_each_entry(rule, &chain->rules, list) {
238 		memset(&extack, 0, sizeof(extack));
239 		nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
240 					   &extack, FLOW_CLS_DESTROY);
241 		nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
242 	}
243 
244 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
245 		list_del(&block_cb->list);
246 		flow_block_cb_free(block_cb);
247 	}
248 
249 	return 0;
250 }
251 
252 static int nft_block_setup(struct nft_base_chain *basechain,
253 			   struct flow_block_offload *bo,
254 			   enum flow_block_command cmd)
255 {
256 	int err;
257 
258 	switch (cmd) {
259 	case FLOW_BLOCK_BIND:
260 		err = nft_flow_offload_bind(bo, basechain);
261 		break;
262 	case FLOW_BLOCK_UNBIND:
263 		err = nft_flow_offload_unbind(bo, basechain);
264 		break;
265 	default:
266 		WARN_ON_ONCE(1);
267 		err = -EOPNOTSUPP;
268 	}
269 
270 	return err;
271 }
272 
273 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
274 					struct net *net,
275 					enum flow_block_command cmd,
276 					struct nft_base_chain *basechain,
277 					struct netlink_ext_ack *extack)
278 {
279 	memset(bo, 0, sizeof(*bo));
280 	bo->net		= net;
281 	bo->block	= &basechain->flow_block;
282 	bo->command	= cmd;
283 	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
284 	bo->extack	= extack;
285 	INIT_LIST_HEAD(&bo->cb_list);
286 }
287 
288 static int nft_block_offload_cmd(struct nft_base_chain *chain,
289 				 struct net_device *dev,
290 				 enum flow_block_command cmd)
291 {
292 	struct netlink_ext_ack extack = {};
293 	struct flow_block_offload bo;
294 	int err;
295 
296 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
297 
298 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
299 	if (err < 0)
300 		return err;
301 
302 	return nft_block_setup(chain, &bo, cmd);
303 }
304 
305 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
306 {
307 	struct nft_base_chain *basechain = block_cb->indr.data;
308 	struct net_device *dev = block_cb->indr.dev;
309 	struct netlink_ext_ack extack = {};
310 	struct net *net = dev_net(dev);
311 	struct flow_block_offload bo;
312 
313 	nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
314 				    basechain, &extack);
315 	mutex_lock(&net->nft.commit_mutex);
316 	list_del(&block_cb->driver_list);
317 	list_move(&block_cb->list, &bo.cb_list);
318 	nft_flow_offload_unbind(&bo, basechain);
319 	mutex_unlock(&net->nft.commit_mutex);
320 }
321 
322 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
323 				      struct net_device *dev,
324 				      enum flow_block_command cmd)
325 {
326 	struct netlink_ext_ack extack = {};
327 	struct flow_block_offload bo;
328 	int err;
329 
330 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
331 
332 	err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
333 					  nft_indr_block_cleanup);
334 	if (err < 0)
335 		return err;
336 
337 	if (list_empty(&bo.cb_list))
338 		return -EOPNOTSUPP;
339 
340 	return nft_block_setup(basechain, &bo, cmd);
341 }
342 
343 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
344 				 struct net_device *dev,
345 				 enum flow_block_command cmd)
346 {
347 	int err;
348 
349 	if (dev->netdev_ops->ndo_setup_tc)
350 		err = nft_block_offload_cmd(basechain, dev, cmd);
351 	else
352 		err = nft_indr_block_offload_cmd(basechain, dev, cmd);
353 
354 	return err;
355 }
356 
357 static int nft_flow_block_chain(struct nft_base_chain *basechain,
358 				const struct net_device *this_dev,
359 				enum flow_block_command cmd)
360 {
361 	struct net_device *dev;
362 	struct nft_hook *hook;
363 	int err, i = 0;
364 
365 	list_for_each_entry(hook, &basechain->hook_list, list) {
366 		dev = hook->ops.dev;
367 		if (this_dev && this_dev != dev)
368 			continue;
369 
370 		err = nft_chain_offload_cmd(basechain, dev, cmd);
371 		if (err < 0 && cmd == FLOW_BLOCK_BIND) {
372 			if (!this_dev)
373 				goto err_flow_block;
374 
375 			return err;
376 		}
377 		i++;
378 	}
379 
380 	return 0;
381 
382 err_flow_block:
383 	list_for_each_entry(hook, &basechain->hook_list, list) {
384 		if (i-- <= 0)
385 			break;
386 
387 		dev = hook->ops.dev;
388 		nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
389 	}
390 	return err;
391 }
392 
393 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
394 				  enum flow_block_command cmd)
395 {
396 	struct nft_base_chain *basechain;
397 	u8 policy;
398 
399 	if (!nft_is_base_chain(chain))
400 		return -EOPNOTSUPP;
401 
402 	basechain = nft_base_chain(chain);
403 	policy = ppolicy ? *ppolicy : basechain->policy;
404 
405 	/* Only default policy to accept is supported for now. */
406 	if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
407 		return -EOPNOTSUPP;
408 
409 	return nft_flow_block_chain(basechain, NULL, cmd);
410 }
411 
412 static void nft_flow_rule_offload_abort(struct net *net,
413 					struct nft_trans *trans)
414 {
415 	int err = 0;
416 
417 	list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) {
418 		if (trans->ctx.family != NFPROTO_NETDEV)
419 			continue;
420 
421 		switch (trans->msg_type) {
422 		case NFT_MSG_NEWCHAIN:
423 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
424 			    nft_trans_chain_update(trans))
425 				continue;
426 
427 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
428 						     FLOW_BLOCK_UNBIND);
429 			break;
430 		case NFT_MSG_DELCHAIN:
431 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
432 				continue;
433 
434 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
435 						     FLOW_BLOCK_BIND);
436 			break;
437 		case NFT_MSG_NEWRULE:
438 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
439 				continue;
440 
441 			err = nft_flow_offload_rule(trans->ctx.chain,
442 						    nft_trans_rule(trans),
443 						    NULL, FLOW_CLS_DESTROY);
444 			break;
445 		case NFT_MSG_DELRULE:
446 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
447 				continue;
448 
449 			err = nft_flow_offload_rule(trans->ctx.chain,
450 						    nft_trans_rule(trans),
451 						    nft_trans_flow_rule(trans),
452 						    FLOW_CLS_REPLACE);
453 			break;
454 		}
455 
456 		if (WARN_ON_ONCE(err))
457 			break;
458 	}
459 }
460 
461 int nft_flow_rule_offload_commit(struct net *net)
462 {
463 	struct nft_trans *trans;
464 	int err = 0;
465 	u8 policy;
466 
467 	list_for_each_entry(trans, &net->nft.commit_list, list) {
468 		if (trans->ctx.family != NFPROTO_NETDEV)
469 			continue;
470 
471 		switch (trans->msg_type) {
472 		case NFT_MSG_NEWCHAIN:
473 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
474 			    nft_trans_chain_update(trans))
475 				continue;
476 
477 			policy = nft_trans_chain_policy(trans);
478 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
479 						     FLOW_BLOCK_BIND);
480 			break;
481 		case NFT_MSG_DELCHAIN:
482 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
483 				continue;
484 
485 			policy = nft_trans_chain_policy(trans);
486 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
487 						     FLOW_BLOCK_UNBIND);
488 			break;
489 		case NFT_MSG_NEWRULE:
490 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
491 				continue;
492 
493 			if (trans->ctx.flags & NLM_F_REPLACE ||
494 			    !(trans->ctx.flags & NLM_F_APPEND)) {
495 				err = -EOPNOTSUPP;
496 				break;
497 			}
498 			err = nft_flow_offload_rule(trans->ctx.chain,
499 						    nft_trans_rule(trans),
500 						    nft_trans_flow_rule(trans),
501 						    FLOW_CLS_REPLACE);
502 			break;
503 		case NFT_MSG_DELRULE:
504 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
505 				continue;
506 
507 			err = nft_flow_offload_rule(trans->ctx.chain,
508 						    nft_trans_rule(trans),
509 						    NULL, FLOW_CLS_DESTROY);
510 			break;
511 		}
512 
513 		if (err) {
514 			nft_flow_rule_offload_abort(net, trans);
515 			break;
516 		}
517 	}
518 
519 	list_for_each_entry(trans, &net->nft.commit_list, list) {
520 		if (trans->ctx.family != NFPROTO_NETDEV)
521 			continue;
522 
523 		switch (trans->msg_type) {
524 		case NFT_MSG_NEWRULE:
525 		case NFT_MSG_DELRULE:
526 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
527 				continue;
528 
529 			nft_flow_rule_destroy(nft_trans_flow_rule(trans));
530 			break;
531 		default:
532 			break;
533 		}
534 	}
535 
536 	return err;
537 }
538 
539 static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
540 {
541 	struct nft_base_chain *basechain;
542 	struct net *net = dev_net(dev);
543 	struct nft_hook *hook, *found;
544 	const struct nft_table *table;
545 	struct nft_chain *chain;
546 
547 	list_for_each_entry(table, &net->nft.tables, list) {
548 		if (table->family != NFPROTO_NETDEV)
549 			continue;
550 
551 		list_for_each_entry(chain, &table->chains, list) {
552 			if (!nft_is_base_chain(chain) ||
553 			    !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
554 				continue;
555 
556 			found = NULL;
557 			basechain = nft_base_chain(chain);
558 			list_for_each_entry(hook, &basechain->hook_list, list) {
559 				if (hook->ops.dev != dev)
560 					continue;
561 
562 				found = hook;
563 				break;
564 			}
565 			if (!found)
566 				continue;
567 
568 			return chain;
569 		}
570 	}
571 
572 	return NULL;
573 }
574 
575 static int nft_offload_netdev_event(struct notifier_block *this,
576 				    unsigned long event, void *ptr)
577 {
578 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
579 	struct net *net = dev_net(dev);
580 	struct nft_chain *chain;
581 
582 	if (event != NETDEV_UNREGISTER)
583 		return NOTIFY_DONE;
584 
585 	mutex_lock(&net->nft.commit_mutex);
586 	chain = __nft_offload_get_chain(dev);
587 	if (chain)
588 		nft_flow_block_chain(nft_base_chain(chain), dev,
589 				     FLOW_BLOCK_UNBIND);
590 
591 	mutex_unlock(&net->nft.commit_mutex);
592 
593 	return NOTIFY_DONE;
594 }
595 
596 static struct notifier_block nft_offload_netdev_notifier = {
597 	.notifier_call	= nft_offload_netdev_event,
598 };
599 
600 int nft_offload_init(void)
601 {
602 	return register_netdevice_notifier(&nft_offload_netdev_notifier);
603 }
604 
605 void nft_offload_exit(void)
606 {
607 	unregister_netdevice_notifier(&nft_offload_netdev_notifier);
608 }
609