1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9 
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11 {
12 	struct nft_flow_rule *flow;
13 
14 	flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 	if (!flow)
16 		return NULL;
17 
18 	flow->rule = flow_rule_alloc(num_actions);
19 	if (!flow->rule) {
20 		kfree(flow);
21 		return NULL;
22 	}
23 
24 	flow->rule->match.dissector	= &flow->match.dissector;
25 	flow->rule->match.mask		= &flow->match.mask;
26 	flow->rule->match.key		= &flow->match.key;
27 
28 	return flow;
29 }
30 
31 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
32 					   const struct nft_rule *rule)
33 {
34 	struct nft_offload_ctx *ctx;
35 	struct nft_flow_rule *flow;
36 	int num_actions = 0, err;
37 	struct nft_expr *expr;
38 
39 	expr = nft_expr_first(rule);
40 	while (expr->ops && expr != nft_expr_last(rule)) {
41 		if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
42 			num_actions++;
43 
44 		expr = nft_expr_next(expr);
45 	}
46 
47 	if (num_actions == 0)
48 		return ERR_PTR(-EOPNOTSUPP);
49 
50 	flow = nft_flow_rule_alloc(num_actions);
51 	if (!flow)
52 		return ERR_PTR(-ENOMEM);
53 
54 	expr = nft_expr_first(rule);
55 
56 	ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
57 	if (!ctx) {
58 		err = -ENOMEM;
59 		goto err_out;
60 	}
61 	ctx->net = net;
62 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
63 
64 	while (expr->ops && expr != nft_expr_last(rule)) {
65 		if (!expr->ops->offload) {
66 			err = -EOPNOTSUPP;
67 			goto err_out;
68 		}
69 		err = expr->ops->offload(ctx, flow, expr);
70 		if (err < 0)
71 			goto err_out;
72 
73 		expr = nft_expr_next(expr);
74 	}
75 	flow->proto = ctx->dep.l3num;
76 	kfree(ctx);
77 
78 	return flow;
79 err_out:
80 	kfree(ctx);
81 	nft_flow_rule_destroy(flow);
82 
83 	return ERR_PTR(err);
84 }
85 
86 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
87 {
88 	struct flow_action_entry *entry;
89 	int i;
90 
91 	flow_action_for_each(i, entry, &flow->rule->action) {
92 		switch (entry->id) {
93 		case FLOW_ACTION_REDIRECT:
94 		case FLOW_ACTION_MIRRED:
95 			dev_put(entry->dev);
96 			break;
97 		default:
98 			break;
99 		}
100 	}
101 	kfree(flow->rule);
102 	kfree(flow);
103 }
104 
105 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
106 				enum nft_offload_dep_type type)
107 {
108 	ctx->dep.type = type;
109 }
110 
111 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
112 				   const void *data, u32 len)
113 {
114 	switch (ctx->dep.type) {
115 	case NFT_OFFLOAD_DEP_NETWORK:
116 		WARN_ON(len != sizeof(__u16));
117 		memcpy(&ctx->dep.l3num, data, sizeof(__u16));
118 		break;
119 	case NFT_OFFLOAD_DEP_TRANSPORT:
120 		WARN_ON(len != sizeof(__u8));
121 		memcpy(&ctx->dep.protonum, data, sizeof(__u8));
122 		break;
123 	default:
124 		break;
125 	}
126 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
127 }
128 
129 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
130 					 __be16 proto, int priority,
131 					 struct netlink_ext_ack *extack)
132 {
133 	common->protocol = proto;
134 	common->prio = priority;
135 	common->extack = extack;
136 }
137 
138 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
139 			     struct list_head *cb_list)
140 {
141 	struct flow_block_cb *block_cb;
142 	int err;
143 
144 	list_for_each_entry(block_cb, cb_list, list) {
145 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
146 		if (err < 0)
147 			return err;
148 	}
149 	return 0;
150 }
151 
152 int nft_chain_offload_priority(struct nft_base_chain *basechain)
153 {
154 	if (basechain->ops.priority <= 0 ||
155 	    basechain->ops.priority > USHRT_MAX)
156 		return -1;
157 
158 	return 0;
159 }
160 
161 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
162 				       const struct nft_base_chain *basechain,
163 				       const struct nft_rule *rule,
164 				       const struct nft_flow_rule *flow,
165 				       struct netlink_ext_ack *extack,
166 				       enum flow_cls_command command)
167 {
168 	__be16 proto = ETH_P_ALL;
169 
170 	memset(cls_flow, 0, sizeof(*cls_flow));
171 
172 	if (flow)
173 		proto = flow->proto;
174 
175 	nft_flow_offload_common_init(&cls_flow->common, proto,
176 				     basechain->ops.priority, extack);
177 	cls_flow->command = command;
178 	cls_flow->cookie = (unsigned long) rule;
179 	if (flow)
180 		cls_flow->rule = flow->rule;
181 }
182 
183 static int nft_flow_offload_rule(struct nft_chain *chain,
184 				 struct nft_rule *rule,
185 				 struct nft_flow_rule *flow,
186 				 enum flow_cls_command command)
187 {
188 	struct netlink_ext_ack extack = {};
189 	struct flow_cls_offload cls_flow;
190 	struct nft_base_chain *basechain;
191 
192 	if (!nft_is_base_chain(chain))
193 		return -EOPNOTSUPP;
194 
195 	basechain = nft_base_chain(chain);
196 	nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
197 				   command);
198 
199 	return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
200 				 &basechain->flow_block.cb_list);
201 }
202 
203 static int nft_flow_offload_bind(struct flow_block_offload *bo,
204 				 struct nft_base_chain *basechain)
205 {
206 	list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
207 	return 0;
208 }
209 
210 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
211 				   struct nft_base_chain *basechain)
212 {
213 	struct flow_block_cb *block_cb, *next;
214 	struct flow_cls_offload cls_flow;
215 	struct netlink_ext_ack extack;
216 	struct nft_chain *chain;
217 	struct nft_rule *rule;
218 
219 	chain = &basechain->chain;
220 	list_for_each_entry(rule, &chain->rules, list) {
221 		memset(&extack, 0, sizeof(extack));
222 		nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
223 					   &extack, FLOW_CLS_DESTROY);
224 		nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
225 	}
226 
227 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
228 		list_del(&block_cb->list);
229 		flow_block_cb_free(block_cb);
230 	}
231 
232 	return 0;
233 }
234 
235 static int nft_block_setup(struct nft_base_chain *basechain,
236 			   struct flow_block_offload *bo,
237 			   enum flow_block_command cmd)
238 {
239 	int err;
240 
241 	switch (cmd) {
242 	case FLOW_BLOCK_BIND:
243 		err = nft_flow_offload_bind(bo, basechain);
244 		break;
245 	case FLOW_BLOCK_UNBIND:
246 		err = nft_flow_offload_unbind(bo, basechain);
247 		break;
248 	default:
249 		WARN_ON_ONCE(1);
250 		err = -EOPNOTSUPP;
251 	}
252 
253 	return err;
254 }
255 
256 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
257 					struct net *net,
258 					enum flow_block_command cmd,
259 					struct nft_base_chain *basechain,
260 					struct netlink_ext_ack *extack)
261 {
262 	memset(bo, 0, sizeof(*bo));
263 	bo->net		= net;
264 	bo->block	= &basechain->flow_block;
265 	bo->command	= cmd;
266 	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
267 	bo->extack	= extack;
268 	INIT_LIST_HEAD(&bo->cb_list);
269 }
270 
271 static int nft_block_offload_cmd(struct nft_base_chain *chain,
272 				 struct net_device *dev,
273 				 enum flow_block_command cmd)
274 {
275 	struct netlink_ext_ack extack = {};
276 	struct flow_block_offload bo;
277 	int err;
278 
279 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
280 
281 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
282 	if (err < 0)
283 		return err;
284 
285 	return nft_block_setup(chain, &bo, cmd);
286 }
287 
288 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
289 {
290 	struct nft_base_chain *basechain = block_cb->indr.data;
291 	struct net_device *dev = block_cb->indr.dev;
292 	struct netlink_ext_ack extack = {};
293 	struct net *net = dev_net(dev);
294 	struct flow_block_offload bo;
295 
296 	nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
297 				    basechain, &extack);
298 	mutex_lock(&net->nft.commit_mutex);
299 	list_move(&block_cb->list, &bo.cb_list);
300 	nft_flow_offload_unbind(&bo, basechain);
301 	mutex_unlock(&net->nft.commit_mutex);
302 }
303 
304 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
305 				      struct net_device *dev,
306 				      enum flow_block_command cmd)
307 {
308 	struct netlink_ext_ack extack = {};
309 	struct flow_block_offload bo;
310 	int err;
311 
312 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
313 
314 	err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo,
315 					  nft_indr_block_cleanup);
316 	if (err < 0)
317 		return err;
318 
319 	if (list_empty(&bo.cb_list))
320 		return -EOPNOTSUPP;
321 
322 	return nft_block_setup(basechain, &bo, cmd);
323 }
324 
325 #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
326 
327 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
328 				 struct net_device *dev,
329 				 enum flow_block_command cmd)
330 {
331 	int err;
332 
333 	if (dev->netdev_ops->ndo_setup_tc)
334 		err = nft_block_offload_cmd(basechain, dev, cmd);
335 	else
336 		err = nft_indr_block_offload_cmd(basechain, dev, cmd);
337 
338 	return err;
339 }
340 
341 static int nft_flow_block_chain(struct nft_base_chain *basechain,
342 				const struct net_device *this_dev,
343 				enum flow_block_command cmd)
344 {
345 	struct net_device *dev;
346 	struct nft_hook *hook;
347 	int err, i = 0;
348 
349 	list_for_each_entry(hook, &basechain->hook_list, list) {
350 		dev = hook->ops.dev;
351 		if (this_dev && this_dev != dev)
352 			continue;
353 
354 		err = nft_chain_offload_cmd(basechain, dev, cmd);
355 		if (err < 0 && cmd == FLOW_BLOCK_BIND) {
356 			if (!this_dev)
357 				goto err_flow_block;
358 
359 			return err;
360 		}
361 		i++;
362 	}
363 
364 	return 0;
365 
366 err_flow_block:
367 	list_for_each_entry(hook, &basechain->hook_list, list) {
368 		if (i-- <= 0)
369 			break;
370 
371 		dev = hook->ops.dev;
372 		nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
373 	}
374 	return err;
375 }
376 
377 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
378 				  enum flow_block_command cmd)
379 {
380 	struct nft_base_chain *basechain;
381 	u8 policy;
382 
383 	if (!nft_is_base_chain(chain))
384 		return -EOPNOTSUPP;
385 
386 	basechain = nft_base_chain(chain);
387 	policy = ppolicy ? *ppolicy : basechain->policy;
388 
389 	/* Only default policy to accept is supported for now. */
390 	if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
391 		return -EOPNOTSUPP;
392 
393 	return nft_flow_block_chain(basechain, NULL, cmd);
394 }
395 
396 static void nft_flow_rule_offload_abort(struct net *net,
397 					struct nft_trans *trans)
398 {
399 	int err = 0;
400 
401 	list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) {
402 		if (trans->ctx.family != NFPROTO_NETDEV)
403 			continue;
404 
405 		switch (trans->msg_type) {
406 		case NFT_MSG_NEWCHAIN:
407 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
408 			    nft_trans_chain_update(trans))
409 				continue;
410 
411 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
412 						     FLOW_BLOCK_UNBIND);
413 			break;
414 		case NFT_MSG_DELCHAIN:
415 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
416 				continue;
417 
418 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
419 						     FLOW_BLOCK_BIND);
420 			break;
421 		case NFT_MSG_NEWRULE:
422 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
423 				continue;
424 
425 			err = nft_flow_offload_rule(trans->ctx.chain,
426 						    nft_trans_rule(trans),
427 						    NULL, FLOW_CLS_DESTROY);
428 			break;
429 		case NFT_MSG_DELRULE:
430 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
431 				continue;
432 
433 			err = nft_flow_offload_rule(trans->ctx.chain,
434 						    nft_trans_rule(trans),
435 						    nft_trans_flow_rule(trans),
436 						    FLOW_CLS_REPLACE);
437 			break;
438 		}
439 
440 		if (WARN_ON_ONCE(err))
441 			break;
442 	}
443 }
444 
445 int nft_flow_rule_offload_commit(struct net *net)
446 {
447 	struct nft_trans *trans;
448 	int err = 0;
449 	u8 policy;
450 
451 	list_for_each_entry(trans, &net->nft.commit_list, list) {
452 		if (trans->ctx.family != NFPROTO_NETDEV)
453 			continue;
454 
455 		switch (trans->msg_type) {
456 		case NFT_MSG_NEWCHAIN:
457 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
458 			    nft_trans_chain_update(trans))
459 				continue;
460 
461 			policy = nft_trans_chain_policy(trans);
462 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
463 						     FLOW_BLOCK_BIND);
464 			break;
465 		case NFT_MSG_DELCHAIN:
466 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
467 				continue;
468 
469 			policy = nft_trans_chain_policy(trans);
470 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
471 						     FLOW_BLOCK_UNBIND);
472 			break;
473 		case NFT_MSG_NEWRULE:
474 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
475 				continue;
476 
477 			if (trans->ctx.flags & NLM_F_REPLACE ||
478 			    !(trans->ctx.flags & NLM_F_APPEND)) {
479 				err = -EOPNOTSUPP;
480 				break;
481 			}
482 			err = nft_flow_offload_rule(trans->ctx.chain,
483 						    nft_trans_rule(trans),
484 						    nft_trans_flow_rule(trans),
485 						    FLOW_CLS_REPLACE);
486 			break;
487 		case NFT_MSG_DELRULE:
488 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
489 				continue;
490 
491 			err = nft_flow_offload_rule(trans->ctx.chain,
492 						    nft_trans_rule(trans),
493 						    NULL, FLOW_CLS_DESTROY);
494 			break;
495 		}
496 
497 		if (err) {
498 			nft_flow_rule_offload_abort(net, trans);
499 			break;
500 		}
501 	}
502 
503 	list_for_each_entry(trans, &net->nft.commit_list, list) {
504 		if (trans->ctx.family != NFPROTO_NETDEV)
505 			continue;
506 
507 		switch (trans->msg_type) {
508 		case NFT_MSG_NEWRULE:
509 		case NFT_MSG_DELRULE:
510 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
511 				continue;
512 
513 			nft_flow_rule_destroy(nft_trans_flow_rule(trans));
514 			break;
515 		default:
516 			break;
517 		}
518 	}
519 
520 	return err;
521 }
522 
523 static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
524 {
525 	struct nft_base_chain *basechain;
526 	struct net *net = dev_net(dev);
527 	struct nft_hook *hook, *found;
528 	const struct nft_table *table;
529 	struct nft_chain *chain;
530 
531 	list_for_each_entry(table, &net->nft.tables, list) {
532 		if (table->family != NFPROTO_NETDEV)
533 			continue;
534 
535 		list_for_each_entry(chain, &table->chains, list) {
536 			if (!nft_is_base_chain(chain) ||
537 			    !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
538 				continue;
539 
540 			found = NULL;
541 			basechain = nft_base_chain(chain);
542 			list_for_each_entry(hook, &basechain->hook_list, list) {
543 				if (hook->ops.dev != dev)
544 					continue;
545 
546 				found = hook;
547 				break;
548 			}
549 			if (!found)
550 				continue;
551 
552 			return chain;
553 		}
554 	}
555 
556 	return NULL;
557 }
558 
559 static int nft_offload_netdev_event(struct notifier_block *this,
560 				    unsigned long event, void *ptr)
561 {
562 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
563 	struct net *net = dev_net(dev);
564 	struct nft_chain *chain;
565 
566 	if (event != NETDEV_UNREGISTER)
567 		return NOTIFY_DONE;
568 
569 	mutex_lock(&net->nft.commit_mutex);
570 	chain = __nft_offload_get_chain(dev);
571 	if (chain)
572 		nft_flow_block_chain(nft_base_chain(chain), dev,
573 				     FLOW_BLOCK_UNBIND);
574 
575 	mutex_unlock(&net->nft.commit_mutex);
576 
577 	return NOTIFY_DONE;
578 }
579 
580 static struct notifier_block nft_offload_netdev_notifier = {
581 	.notifier_call	= nft_offload_netdev_event,
582 };
583 
584 int nft_offload_init(void)
585 {
586 	return register_netdevice_notifier(&nft_offload_netdev_notifier);
587 }
588 
589 void nft_offload_exit(void)
590 {
591 	unregister_netdevice_notifier(&nft_offload_netdev_notifier);
592 }
593