1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/flow_offload.h>
11 #include <net/pkt_cls.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_wed.h"
15 
16 struct mtk_flow_data {
17 	struct ethhdr eth;
18 
19 	union {
20 		struct {
21 			__be32 src_addr;
22 			__be32 dst_addr;
23 		} v4;
24 
25 		struct {
26 			struct in6_addr src_addr;
27 			struct in6_addr dst_addr;
28 		} v6;
29 	};
30 
31 	__be16 src_port;
32 	__be16 dst_port;
33 
34 	u16 vlan_in;
35 
36 	struct {
37 		u16 id;
38 		__be16 proto;
39 		u8 num;
40 	} vlan;
41 	struct {
42 		u16 sid;
43 		u8 num;
44 	} pppoe;
45 };
46 
47 static const struct rhashtable_params mtk_flow_ht_params = {
48 	.head_offset = offsetof(struct mtk_flow_entry, node),
49 	.key_offset = offsetof(struct mtk_flow_entry, cookie),
50 	.key_len = sizeof(unsigned long),
51 	.automatic_shrinking = true,
52 };
53 
54 static int
55 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
56 		       bool egress)
57 {
58 	return mtk_foe_entry_set_ipv4_tuple(foe, egress,
59 					    data->v4.src_addr, data->src_port,
60 					    data->v4.dst_addr, data->dst_port);
61 }
62 
63 static int
64 mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
65 {
66 	return mtk_foe_entry_set_ipv6_tuple(foe,
67 					    data->v6.src_addr.s6_addr32, data->src_port,
68 					    data->v6.dst_addr.s6_addr32, data->dst_port);
69 }
70 
71 static void
72 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
73 {
74 	void *dest = eth + act->mangle.offset;
75 	const void *src = &act->mangle.val;
76 
77 	if (act->mangle.offset > 8)
78 		return;
79 
80 	if (act->mangle.mask == 0xffff) {
81 		src += 2;
82 		dest += 2;
83 	}
84 
85 	memcpy(dest, src, act->mangle.mask ? 2 : 4);
86 }
87 
88 static int
89 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
90 {
91 	struct net_device_path_ctx ctx = {
92 		.dev = dev,
93 	};
94 	struct net_device_path path = {};
95 
96 	if (!ctx.dev)
97 		return -ENODEV;
98 
99 	memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
100 
101 	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
102 		return -1;
103 
104 	if (!dev->netdev_ops->ndo_fill_forward_path)
105 		return -1;
106 
107 	if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
108 		return -1;
109 
110 	if (path.type != DEV_PATH_MTK_WDMA)
111 		return -1;
112 
113 	info->wdma_idx = path.mtk_wdma.wdma_idx;
114 	info->queue = path.mtk_wdma.queue;
115 	info->bss = path.mtk_wdma.bss;
116 	info->wcid = path.mtk_wdma.wcid;
117 
118 	return 0;
119 }
120 
121 
122 static int
123 mtk_flow_mangle_ports(const struct flow_action_entry *act,
124 		      struct mtk_flow_data *data)
125 {
126 	u32 val = ntohl(act->mangle.val);
127 
128 	switch (act->mangle.offset) {
129 	case 0:
130 		if (act->mangle.mask == ~htonl(0xffff))
131 			data->dst_port = cpu_to_be16(val);
132 		else
133 			data->src_port = cpu_to_be16(val >> 16);
134 		break;
135 	case 2:
136 		data->dst_port = cpu_to_be16(val);
137 		break;
138 	default:
139 		return -EINVAL;
140 	}
141 
142 	return 0;
143 }
144 
145 static int
146 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
147 		     struct mtk_flow_data *data)
148 {
149 	__be32 *dest;
150 
151 	switch (act->mangle.offset) {
152 	case offsetof(struct iphdr, saddr):
153 		dest = &data->v4.src_addr;
154 		break;
155 	case offsetof(struct iphdr, daddr):
156 		dest = &data->v4.dst_addr;
157 		break;
158 	default:
159 		return -EINVAL;
160 	}
161 
162 	memcpy(dest, &act->mangle.val, sizeof(u32));
163 
164 	return 0;
165 }
166 
167 static int
168 mtk_flow_get_dsa_port(struct net_device **dev)
169 {
170 #if IS_ENABLED(CONFIG_NET_DSA)
171 	struct dsa_port *dp;
172 
173 	dp = dsa_port_from_netdev(*dev);
174 	if (IS_ERR(dp))
175 		return -ENODEV;
176 
177 	if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
178 		return -ENODEV;
179 
180 	*dev = dp->cpu_dp->master;
181 
182 	return dp->index;
183 #else
184 	return -ENODEV;
185 #endif
186 }
187 
188 static int
189 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
190 			   struct net_device *dev, const u8 *dest_mac,
191 			   int *wed_index)
192 {
193 	struct mtk_wdma_info info = {};
194 	int pse_port, dsa_port;
195 
196 	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
197 		mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
198 				       info.wcid);
199 		pse_port = 3;
200 		*wed_index = info.wdma_idx;
201 		goto out;
202 	}
203 
204 	dsa_port = mtk_flow_get_dsa_port(&dev);
205 	if (dsa_port >= 0)
206 		mtk_foe_entry_set_dsa(foe, dsa_port);
207 
208 	if (dev == eth->netdev[0])
209 		pse_port = 1;
210 	else if (dev == eth->netdev[1])
211 		pse_port = 2;
212 	else
213 		return -EOPNOTSUPP;
214 
215 out:
216 	mtk_foe_entry_set_pse_port(foe, pse_port);
217 
218 	return 0;
219 }
220 
221 static int
222 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
223 {
224 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
225 	struct flow_action_entry *act;
226 	struct mtk_flow_data data = {};
227 	struct mtk_foe_entry foe;
228 	struct net_device *odev = NULL;
229 	struct mtk_flow_entry *entry;
230 	int offload_type = 0;
231 	int wed_index = -1;
232 	u16 addr_type = 0;
233 	u8 l4proto = 0;
234 	int err = 0;
235 	int i;
236 
237 	if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
238 		return -EEXIST;
239 
240 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
241 		struct flow_match_meta match;
242 
243 		flow_rule_match_meta(rule, &match);
244 	} else {
245 		return -EOPNOTSUPP;
246 	}
247 
248 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
249 		struct flow_match_control match;
250 
251 		flow_rule_match_control(rule, &match);
252 		addr_type = match.key->addr_type;
253 	} else {
254 		return -EOPNOTSUPP;
255 	}
256 
257 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
258 		struct flow_match_basic match;
259 
260 		flow_rule_match_basic(rule, &match);
261 		l4proto = match.key->ip_proto;
262 	} else {
263 		return -EOPNOTSUPP;
264 	}
265 
266 	switch (addr_type) {
267 	case 0:
268 		offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
269 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
270 			struct flow_match_eth_addrs match;
271 
272 			flow_rule_match_eth_addrs(rule, &match);
273 			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
274 			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
275 		} else {
276 			return -EOPNOTSUPP;
277 		}
278 
279 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
280 			struct flow_match_vlan match;
281 
282 			flow_rule_match_vlan(rule, &match);
283 
284 			if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
285 				return -EOPNOTSUPP;
286 
287 			data.vlan_in = match.key->vlan_id;
288 		}
289 		break;
290 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
291 		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
292 		break;
293 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
294 		offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
295 		break;
296 	default:
297 		return -EOPNOTSUPP;
298 	}
299 
300 	flow_action_for_each(i, act, &rule->action) {
301 		switch (act->id) {
302 		case FLOW_ACTION_MANGLE:
303 			if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
304 				return -EOPNOTSUPP;
305 			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
306 				mtk_flow_offload_mangle_eth(act, &data.eth);
307 			break;
308 		case FLOW_ACTION_REDIRECT:
309 			odev = act->dev;
310 			break;
311 		case FLOW_ACTION_CSUM:
312 			break;
313 		case FLOW_ACTION_VLAN_PUSH:
314 			if (data.vlan.num == 1 ||
315 			    act->vlan.proto != htons(ETH_P_8021Q))
316 				return -EOPNOTSUPP;
317 
318 			data.vlan.id = act->vlan.vid;
319 			data.vlan.proto = act->vlan.proto;
320 			data.vlan.num++;
321 			break;
322 		case FLOW_ACTION_VLAN_POP:
323 			break;
324 		case FLOW_ACTION_PPPOE_PUSH:
325 			if (data.pppoe.num == 1)
326 				return -EOPNOTSUPP;
327 
328 			data.pppoe.sid = act->pppoe.sid;
329 			data.pppoe.num++;
330 			break;
331 		default:
332 			return -EOPNOTSUPP;
333 		}
334 	}
335 
336 	if (!is_valid_ether_addr(data.eth.h_source) ||
337 	    !is_valid_ether_addr(data.eth.h_dest))
338 		return -EINVAL;
339 
340 	err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
341 				    data.eth.h_source,
342 				    data.eth.h_dest);
343 	if (err)
344 		return err;
345 
346 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
347 		struct flow_match_ports ports;
348 
349 		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
350 			return -EOPNOTSUPP;
351 
352 		flow_rule_match_ports(rule, &ports);
353 		data.src_port = ports.key->src;
354 		data.dst_port = ports.key->dst;
355 	} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
356 		return -EOPNOTSUPP;
357 	}
358 
359 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
360 		struct flow_match_ipv4_addrs addrs;
361 
362 		flow_rule_match_ipv4_addrs(rule, &addrs);
363 
364 		data.v4.src_addr = addrs.key->src;
365 		data.v4.dst_addr = addrs.key->dst;
366 
367 		mtk_flow_set_ipv4_addr(&foe, &data, false);
368 	}
369 
370 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
371 		struct flow_match_ipv6_addrs addrs;
372 
373 		flow_rule_match_ipv6_addrs(rule, &addrs);
374 
375 		data.v6.src_addr = addrs.key->src;
376 		data.v6.dst_addr = addrs.key->dst;
377 
378 		mtk_flow_set_ipv6_addr(&foe, &data);
379 	}
380 
381 	flow_action_for_each(i, act, &rule->action) {
382 		if (act->id != FLOW_ACTION_MANGLE)
383 			continue;
384 
385 		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
386 			return -EOPNOTSUPP;
387 
388 		switch (act->mangle.htype) {
389 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
390 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
391 			err = mtk_flow_mangle_ports(act, &data);
392 			break;
393 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
394 			err = mtk_flow_mangle_ipv4(act, &data);
395 			break;
396 		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
397 			/* handled earlier */
398 			break;
399 		default:
400 			return -EOPNOTSUPP;
401 		}
402 
403 		if (err)
404 			return err;
405 	}
406 
407 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
408 		err = mtk_flow_set_ipv4_addr(&foe, &data, true);
409 		if (err)
410 			return err;
411 	}
412 
413 	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
414 		foe.bridge.vlan = data.vlan_in;
415 
416 	if (data.vlan.num == 1) {
417 		if (data.vlan.proto != htons(ETH_P_8021Q))
418 			return -EOPNOTSUPP;
419 
420 		mtk_foe_entry_set_vlan(&foe, data.vlan.id);
421 	}
422 	if (data.pppoe.num == 1)
423 		mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
424 
425 	err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
426 					 &wed_index);
427 	if (err)
428 		return err;
429 
430 	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
431 		return err;
432 
433 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
434 	if (!entry)
435 		return -ENOMEM;
436 
437 	entry->cookie = f->cookie;
438 	memcpy(&entry->data, &foe, sizeof(entry->data));
439 	entry->wed_index = wed_index;
440 
441 	err = mtk_foe_entry_commit(eth->ppe, entry);
442 	if (err < 0)
443 		goto free;
444 
445 	err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
446 				     mtk_flow_ht_params);
447 	if (err < 0)
448 		goto clear;
449 
450 	return 0;
451 
452 clear:
453 	mtk_foe_entry_clear(eth->ppe, entry);
454 free:
455 	kfree(entry);
456 	if (wed_index >= 0)
457 	    mtk_wed_flow_remove(wed_index);
458 	return err;
459 }
460 
461 static int
462 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
463 {
464 	struct mtk_flow_entry *entry;
465 
466 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
467 				  mtk_flow_ht_params);
468 	if (!entry)
469 		return -ENOENT;
470 
471 	mtk_foe_entry_clear(eth->ppe, entry);
472 	rhashtable_remove_fast(&eth->flow_table, &entry->node,
473 			       mtk_flow_ht_params);
474 	if (entry->wed_index >= 0)
475 		mtk_wed_flow_remove(entry->wed_index);
476 	kfree(entry);
477 
478 	return 0;
479 }
480 
481 static int
482 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
483 {
484 	struct mtk_flow_entry *entry;
485 	u32 idle;
486 
487 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
488 				  mtk_flow_ht_params);
489 	if (!entry)
490 		return -ENOENT;
491 
492 	idle = mtk_foe_entry_idle_time(eth->ppe, entry);
493 	f->stats.lastused = jiffies - idle * HZ;
494 
495 	return 0;
496 }
497 
498 static DEFINE_MUTEX(mtk_flow_offload_mutex);
499 
500 static int
501 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
502 {
503 	struct flow_cls_offload *cls = type_data;
504 	struct net_device *dev = cb_priv;
505 	struct mtk_mac *mac = netdev_priv(dev);
506 	struct mtk_eth *eth = mac->hw;
507 	int err;
508 
509 	if (!tc_can_offload(dev))
510 		return -EOPNOTSUPP;
511 
512 	if (type != TC_SETUP_CLSFLOWER)
513 		return -EOPNOTSUPP;
514 
515 	mutex_lock(&mtk_flow_offload_mutex);
516 	switch (cls->command) {
517 	case FLOW_CLS_REPLACE:
518 		err = mtk_flow_offload_replace(eth, cls);
519 		break;
520 	case FLOW_CLS_DESTROY:
521 		err = mtk_flow_offload_destroy(eth, cls);
522 		break;
523 	case FLOW_CLS_STATS:
524 		err = mtk_flow_offload_stats(eth, cls);
525 		break;
526 	default:
527 		err = -EOPNOTSUPP;
528 		break;
529 	}
530 	mutex_unlock(&mtk_flow_offload_mutex);
531 
532 	return err;
533 }
534 
535 static int
536 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
537 {
538 	struct mtk_mac *mac = netdev_priv(dev);
539 	struct mtk_eth *eth = mac->hw;
540 	static LIST_HEAD(block_cb_list);
541 	struct flow_block_cb *block_cb;
542 	flow_setup_cb_t *cb;
543 
544 	if (!eth->ppe || !eth->ppe->foe_table)
545 		return -EOPNOTSUPP;
546 
547 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
548 		return -EOPNOTSUPP;
549 
550 	cb = mtk_eth_setup_tc_block_cb;
551 	f->driver_block_list = &block_cb_list;
552 
553 	switch (f->command) {
554 	case FLOW_BLOCK_BIND:
555 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
556 		if (block_cb) {
557 			flow_block_cb_incref(block_cb);
558 			return 0;
559 		}
560 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
561 		if (IS_ERR(block_cb))
562 			return PTR_ERR(block_cb);
563 
564 		flow_block_cb_add(block_cb, f);
565 		list_add_tail(&block_cb->driver_list, &block_cb_list);
566 		return 0;
567 	case FLOW_BLOCK_UNBIND:
568 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
569 		if (!block_cb)
570 			return -ENOENT;
571 
572 		if (flow_block_cb_decref(block_cb)) {
573 			flow_block_cb_remove(block_cb, f);
574 			list_del(&block_cb->driver_list);
575 		}
576 		return 0;
577 	default:
578 		return -EOPNOTSUPP;
579 	}
580 }
581 
582 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
583 		     void *type_data)
584 {
585 	switch (type) {
586 	case TC_SETUP_BLOCK:
587 	case TC_SETUP_FT:
588 		return mtk_eth_setup_tc_block(dev, type_data);
589 	default:
590 		return -EOPNOTSUPP;
591 	}
592 }
593 
594 int mtk_eth_offload_init(struct mtk_eth *eth)
595 {
596 	if (!eth->ppe || !eth->ppe->foe_table)
597 		return 0;
598 
599 	return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
600 }
601