1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dst_metadata.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_ppe.h"
15 #include "mtk_ppe_regs.h"
16 
17 static DEFINE_SPINLOCK(ppe_lock);
18 
19 static const struct rhashtable_params mtk_flow_l2_ht_params = {
20 	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
21 	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
22 	.key_len = offsetof(struct mtk_foe_bridge, key_end),
23 	.automatic_shrinking = true,
24 };
25 
26 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27 {
28 	writel(val, ppe->base + reg);
29 }
30 
31 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32 {
33 	return readl(ppe->base + reg);
34 }
35 
36 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
37 {
38 	u32 val;
39 
40 	val = ppe_r32(ppe, reg);
41 	val &= ~mask;
42 	val |= set;
43 	ppe_w32(ppe, reg, val);
44 
45 	return val;
46 }
47 
48 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49 {
50 	return ppe_m32(ppe, reg, 0, val);
51 }
52 
53 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54 {
55 	return ppe_m32(ppe, reg, val, 0);
56 }
57 
58 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59 {
60 	return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
61 }
62 
63 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
64 {
65 	int ret;
66 	u32 val;
67 
68 	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
69 				 !(val & MTK_PPE_GLO_CFG_BUSY),
70 				 20, MTK_PPE_WAIT_TIMEOUT_US);
71 
72 	if (ret)
73 		dev_err(ppe->dev, "PPE table busy");
74 
75 	return ret;
76 }
77 
78 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
79 {
80 	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
81 	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
82 }
83 
84 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
85 {
86 	mtk_ppe_cache_clear(ppe);
87 
88 	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
89 		enable * MTK_PPE_CACHE_CTL_EN);
90 }
91 
92 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
93 {
94 	u32 hv1, hv2, hv3;
95 	u32 hash;
96 
97 	switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
98 		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
99 		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
100 			hv1 = e->ipv4.orig.ports;
101 			hv2 = e->ipv4.orig.dest_ip;
102 			hv3 = e->ipv4.orig.src_ip;
103 			break;
104 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
105 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
106 			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
107 			hv1 ^= e->ipv6.ports;
108 
109 			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
110 			hv2 ^= e->ipv6.dest_ip[0];
111 
112 			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
113 			hv3 ^= e->ipv6.src_ip[0];
114 			break;
115 		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
116 		case MTK_PPE_PKT_TYPE_IPV6_6RD:
117 		default:
118 			WARN_ON_ONCE(1);
119 			return MTK_PPE_HASH_MASK;
120 	}
121 
122 	hash = (hv1 & hv2) | ((~hv1) & hv3);
123 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
124 	hash ^= hv1 ^ hv2 ^ hv3;
125 	hash ^= hash >> 16;
126 	hash <<= (ffs(eth->soc->hash_offset) - 1);
127 	hash &= MTK_PPE_ENTRIES - 1;
128 
129 	return hash;
130 }
131 
132 static inline struct mtk_foe_mac_info *
133 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
134 {
135 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
136 
137 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
138 		return &entry->bridge.l2;
139 
140 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
141 		return &entry->ipv6.l2;
142 
143 	return &entry->ipv4.l2;
144 }
145 
146 static inline u32 *
147 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
148 {
149 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
150 
151 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
152 		return &entry->bridge.ib2;
153 
154 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
155 		return &entry->ipv6.ib2;
156 
157 	return &entry->ipv4.ib2;
158 }
159 
160 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
161 			  int type, int l4proto, u8 pse_port, u8 *src_mac,
162 			  u8 *dest_mac)
163 {
164 	struct mtk_foe_mac_info *l2;
165 	u32 ports_pad, val;
166 
167 	memset(entry, 0, sizeof(*entry));
168 
169 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
170 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
171 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
172 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
173 		      MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
174 		entry->ib1 = val;
175 
176 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
177 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
178 	} else {
179 		int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
180 
181 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
182 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
183 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
184 		      MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
185 		entry->ib1 = val;
186 
187 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
188 		      FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
189 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
190 	}
191 
192 	if (is_multicast_ether_addr(dest_mac))
193 		val |= mtk_get_ib2_multicast_mask(eth);
194 
195 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
196 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
197 		entry->ipv4.orig.ports = ports_pad;
198 	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
199 		entry->ipv6.ports = ports_pad;
200 
201 	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
202 		ether_addr_copy(entry->bridge.src_mac, src_mac);
203 		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
204 		entry->bridge.ib2 = val;
205 		l2 = &entry->bridge.l2;
206 	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
207 		entry->ipv6.ib2 = val;
208 		l2 = &entry->ipv6.l2;
209 	} else {
210 		entry->ipv4.ib2 = val;
211 		l2 = &entry->ipv4.l2;
212 	}
213 
214 	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
215 	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
216 	l2->src_mac_hi = get_unaligned_be32(src_mac);
217 	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
218 
219 	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
220 		l2->etype = ETH_P_IPV6;
221 	else
222 		l2->etype = ETH_P_IP;
223 
224 	return 0;
225 }
226 
227 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
228 			       struct mtk_foe_entry *entry, u8 port)
229 {
230 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
231 	u32 val = *ib2;
232 
233 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
234 		val &= ~MTK_FOE_IB2_DEST_PORT_V2;
235 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
236 	} else {
237 		val &= ~MTK_FOE_IB2_DEST_PORT;
238 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
239 	}
240 	*ib2 = val;
241 
242 	return 0;
243 }
244 
245 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
246 				 struct mtk_foe_entry *entry, bool egress,
247 				 __be32 src_addr, __be16 src_port,
248 				 __be32 dest_addr, __be16 dest_port)
249 {
250 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
251 	struct mtk_ipv4_tuple *t;
252 
253 	switch (type) {
254 	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
255 		if (egress) {
256 			t = &entry->ipv4.new;
257 			break;
258 		}
259 		fallthrough;
260 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
261 	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
262 		t = &entry->ipv4.orig;
263 		break;
264 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
265 		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
266 		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
267 		return 0;
268 	default:
269 		WARN_ON_ONCE(1);
270 		return -EINVAL;
271 	}
272 
273 	t->src_ip = be32_to_cpu(src_addr);
274 	t->dest_ip = be32_to_cpu(dest_addr);
275 
276 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
277 		return 0;
278 
279 	t->src_port = be16_to_cpu(src_port);
280 	t->dest_port = be16_to_cpu(dest_port);
281 
282 	return 0;
283 }
284 
285 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
286 				 struct mtk_foe_entry *entry,
287 				 __be32 *src_addr, __be16 src_port,
288 				 __be32 *dest_addr, __be16 dest_port)
289 {
290 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
291 	u32 *src, *dest;
292 	int i;
293 
294 	switch (type) {
295 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
296 		src = entry->dslite.tunnel_src_ip;
297 		dest = entry->dslite.tunnel_dest_ip;
298 		break;
299 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
300 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
301 		entry->ipv6.src_port = be16_to_cpu(src_port);
302 		entry->ipv6.dest_port = be16_to_cpu(dest_port);
303 		fallthrough;
304 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
305 		src = entry->ipv6.src_ip;
306 		dest = entry->ipv6.dest_ip;
307 		break;
308 	default:
309 		WARN_ON_ONCE(1);
310 		return -EINVAL;
311 	}
312 
313 	for (i = 0; i < 4; i++)
314 		src[i] = be32_to_cpu(src_addr[i]);
315 	for (i = 0; i < 4; i++)
316 		dest[i] = be32_to_cpu(dest_addr[i]);
317 
318 	return 0;
319 }
320 
321 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
322 			  int port)
323 {
324 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
325 
326 	l2->etype = BIT(port);
327 
328 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
329 		entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
330 	else
331 		l2->etype |= BIT(8);
332 
333 	entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
334 
335 	return 0;
336 }
337 
338 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
339 			   int vid)
340 {
341 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
342 
343 	switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
344 	case 0:
345 		entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
346 			      mtk_prep_ib1_vlan_layer(eth, 1);
347 		l2->vlan1 = vid;
348 		return 0;
349 	case 1:
350 		if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
351 			l2->vlan1 = vid;
352 			l2->etype |= BIT(8);
353 		} else {
354 			l2->vlan2 = vid;
355 			entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
356 		}
357 		return 0;
358 	default:
359 		return -ENOSPC;
360 	}
361 }
362 
363 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
364 			    int sid)
365 {
366 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
367 
368 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
369 	    (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
370 		l2->etype = ETH_P_PPP_SES;
371 
372 	entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
373 	l2->pppoe_id = sid;
374 
375 	return 0;
376 }
377 
378 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
379 			   int wdma_idx, int txq, int bss, int wcid)
380 {
381 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
382 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
383 
384 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
385 		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
386 		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
387 			 MTK_FOE_IB2_WDMA_WINFO_V2;
388 		l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
389 			    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
390 	} else {
391 		*ib2 &= ~MTK_FOE_IB2_PORT_MG;
392 		*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
393 		if (wdma_idx)
394 			*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
395 		l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
396 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
397 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
398 	}
399 
400 	return 0;
401 }
402 
403 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
404 			    unsigned int queue)
405 {
406 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
407 
408 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
409 		*ib2 &= ~MTK_FOE_IB2_QID_V2;
410 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
411 		*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
412 	} else {
413 		*ib2 &= ~MTK_FOE_IB2_QID;
414 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
415 		*ib2 |= MTK_FOE_IB2_PSE_QOS;
416 	}
417 
418 	return 0;
419 }
420 
421 static bool
422 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
423 		     struct mtk_foe_entry *data)
424 {
425 	int type, len;
426 
427 	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
428 		return false;
429 
430 	type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
431 	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
432 		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
433 	else
434 		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
435 
436 	return !memcmp(&entry->data.data, &data->data, len - 4);
437 }
438 
439 static void
440 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
441 {
442 	struct hlist_head *head;
443 	struct hlist_node *tmp;
444 
445 	if (entry->type == MTK_FLOW_TYPE_L2) {
446 		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
447 				       mtk_flow_l2_ht_params);
448 
449 		head = &entry->l2_flows;
450 		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
451 			__mtk_foe_entry_clear(ppe, entry);
452 		return;
453 	}
454 
455 	hlist_del_init(&entry->list);
456 	if (entry->hash != 0xffff) {
457 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
458 
459 		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
460 		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
461 		dma_wmb();
462 		mtk_ppe_cache_clear(ppe);
463 	}
464 	entry->hash = 0xffff;
465 
466 	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
467 		return;
468 
469 	hlist_del_init(&entry->l2_data.list);
470 	kfree(entry);
471 }
472 
473 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
474 {
475 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
476 	u16 now = mtk_eth_timestamp(ppe->eth);
477 	u16 timestamp = ib1 & ib1_ts_mask;
478 
479 	if (timestamp > now)
480 		return ib1_ts_mask + 1 - timestamp + now;
481 	else
482 		return now - timestamp;
483 }
484 
485 static void
486 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
487 {
488 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
489 	struct mtk_flow_entry *cur;
490 	struct mtk_foe_entry *hwe;
491 	struct hlist_node *tmp;
492 	int idle;
493 
494 	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
495 	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
496 		int cur_idle;
497 		u32 ib1;
498 
499 		hwe = mtk_foe_get_entry(ppe, cur->hash);
500 		ib1 = READ_ONCE(hwe->ib1);
501 
502 		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
503 			cur->hash = 0xffff;
504 			__mtk_foe_entry_clear(ppe, cur);
505 			continue;
506 		}
507 
508 		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
509 		if (cur_idle >= idle)
510 			continue;
511 
512 		idle = cur_idle;
513 		entry->data.ib1 &= ~ib1_ts_mask;
514 		entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
515 	}
516 }
517 
518 static void
519 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
520 {
521 	struct mtk_foe_entry foe = {};
522 	struct mtk_foe_entry *hwe;
523 
524 	spin_lock_bh(&ppe_lock);
525 
526 	if (entry->type == MTK_FLOW_TYPE_L2) {
527 		mtk_flow_entry_update_l2(ppe, entry);
528 		goto out;
529 	}
530 
531 	if (entry->hash == 0xffff)
532 		goto out;
533 
534 	hwe = mtk_foe_get_entry(ppe, entry->hash);
535 	memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
536 	if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
537 		entry->hash = 0xffff;
538 		goto out;
539 	}
540 
541 	entry->data.ib1 = foe.ib1;
542 
543 out:
544 	spin_unlock_bh(&ppe_lock);
545 }
546 
547 static void
548 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
549 		       u16 hash)
550 {
551 	struct mtk_eth *eth = ppe->eth;
552 	u16 timestamp = mtk_eth_timestamp(eth);
553 	struct mtk_foe_entry *hwe;
554 
555 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
556 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
557 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
558 					 timestamp);
559 	} else {
560 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
561 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
562 					 timestamp);
563 	}
564 
565 	hwe = mtk_foe_get_entry(ppe, hash);
566 	memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
567 	wmb();
568 	hwe->ib1 = entry->ib1;
569 
570 	dma_wmb();
571 
572 	mtk_ppe_cache_clear(ppe);
573 }
574 
575 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
576 {
577 	spin_lock_bh(&ppe_lock);
578 	__mtk_foe_entry_clear(ppe, entry);
579 	spin_unlock_bh(&ppe_lock);
580 }
581 
582 static int
583 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
584 {
585 	entry->type = MTK_FLOW_TYPE_L2;
586 
587 	return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
588 				      mtk_flow_l2_ht_params);
589 }
590 
591 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
592 {
593 	const struct mtk_soc_data *soc = ppe->eth->soc;
594 	int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
595 	u32 hash;
596 
597 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
598 		return mtk_foe_entry_commit_l2(ppe, entry);
599 
600 	hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
601 	entry->hash = 0xffff;
602 	spin_lock_bh(&ppe_lock);
603 	hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
604 	spin_unlock_bh(&ppe_lock);
605 
606 	return 0;
607 }
608 
609 static void
610 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
611 			     u16 hash)
612 {
613 	const struct mtk_soc_data *soc = ppe->eth->soc;
614 	struct mtk_flow_entry *flow_info;
615 	struct mtk_foe_entry foe = {}, *hwe;
616 	struct mtk_foe_mac_info *l2;
617 	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
618 	int type;
619 
620 	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
621 	if (!flow_info)
622 		return;
623 
624 	flow_info->l2_data.base_flow = entry;
625 	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
626 	flow_info->hash = hash;
627 	hlist_add_head(&flow_info->list,
628 		       &ppe->foe_flow[hash / soc->hash_offset]);
629 	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
630 
631 	hwe = mtk_foe_get_entry(ppe, hash);
632 	memcpy(&foe, hwe, soc->foe_entry_size);
633 	foe.ib1 &= ib1_mask;
634 	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
635 
636 	l2 = mtk_foe_entry_l2(ppe->eth, &foe);
637 	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
638 
639 	type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
640 	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
641 		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
642 	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
643 		l2->etype = ETH_P_IPV6;
644 
645 	*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
646 
647 	__mtk_foe_entry_commit(ppe, &foe, hash);
648 }
649 
650 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
651 {
652 	const struct mtk_soc_data *soc = ppe->eth->soc;
653 	struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
654 	struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
655 	struct mtk_flow_entry *entry;
656 	struct mtk_foe_bridge key = {};
657 	struct hlist_node *n;
658 	struct ethhdr *eh;
659 	bool found = false;
660 	u8 *tag;
661 
662 	spin_lock_bh(&ppe_lock);
663 
664 	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
665 		goto out;
666 
667 	hlist_for_each_entry_safe(entry, n, head, list) {
668 		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
669 			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
670 				     MTK_FOE_STATE_BIND))
671 				continue;
672 
673 			entry->hash = 0xffff;
674 			__mtk_foe_entry_clear(ppe, entry);
675 			continue;
676 		}
677 
678 		if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
679 			if (entry->hash != 0xffff)
680 				entry->hash = 0xffff;
681 			continue;
682 		}
683 
684 		entry->hash = hash;
685 		__mtk_foe_entry_commit(ppe, &entry->data, hash);
686 		found = true;
687 	}
688 
689 	if (found)
690 		goto out;
691 
692 	eh = eth_hdr(skb);
693 	ether_addr_copy(key.dest_mac, eh->h_dest);
694 	ether_addr_copy(key.src_mac, eh->h_source);
695 	tag = skb->data - 2;
696 	key.vlan = 0;
697 	switch (skb->protocol) {
698 #if IS_ENABLED(CONFIG_NET_DSA)
699 	case htons(ETH_P_XDSA):
700 		if (!netdev_uses_dsa(skb->dev) ||
701 		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
702 			goto out;
703 
704 		if (!skb_metadata_dst(skb))
705 			tag += 4;
706 
707 		if (get_unaligned_be16(tag) != ETH_P_8021Q)
708 			break;
709 
710 		fallthrough;
711 #endif
712 	case htons(ETH_P_8021Q):
713 		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
714 		break;
715 	default:
716 		break;
717 	}
718 
719 	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
720 	if (!entry)
721 		goto out;
722 
723 	mtk_foe_entry_commit_subflow(ppe, entry, hash);
724 
725 out:
726 	spin_unlock_bh(&ppe_lock);
727 }
728 
729 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
730 {
731 	mtk_flow_entry_update(ppe, entry);
732 
733 	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
734 }
735 
736 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
737 {
738 	if (!ppe)
739 		return -EINVAL;
740 
741 	/* disable KA */
742 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
743 	ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
744 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
745 	usleep_range(10000, 11000);
746 
747 	/* set KA timer to maximum */
748 	ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
749 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
750 
751 	/* set KA tick select */
752 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
753 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
754 	usleep_range(10000, 11000);
755 
756 	/* disable scan mode */
757 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
758 	usleep_range(10000, 11000);
759 
760 	return mtk_ppe_wait_busy(ppe);
761 }
762 
763 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
764 			     int version, int index)
765 {
766 	const struct mtk_soc_data *soc = eth->soc;
767 	struct device *dev = eth->dev;
768 	struct mtk_ppe *ppe;
769 	u32 foe_flow_size;
770 	void *foe;
771 
772 	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
773 	if (!ppe)
774 		return NULL;
775 
776 	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
777 
778 	/* need to allocate a separate device, since it PPE DMA access is
779 	 * not coherent.
780 	 */
781 	ppe->base = base;
782 	ppe->eth = eth;
783 	ppe->dev = dev;
784 	ppe->version = version;
785 
786 	foe = dmam_alloc_coherent(ppe->dev,
787 				  MTK_PPE_ENTRIES * soc->foe_entry_size,
788 				  &ppe->foe_phys, GFP_KERNEL);
789 	if (!foe)
790 		goto err_free_l2_flows;
791 
792 	ppe->foe_table = foe;
793 
794 	foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
795 			sizeof(*ppe->foe_flow);
796 	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
797 	if (!ppe->foe_flow)
798 		goto err_free_l2_flows;
799 
800 	mtk_ppe_debugfs_init(ppe, index);
801 
802 	return ppe;
803 
804 err_free_l2_flows:
805 	rhashtable_destroy(&ppe->l2_flows);
806 	return NULL;
807 }
808 
809 void mtk_ppe_deinit(struct mtk_eth *eth)
810 {
811 	int i;
812 
813 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
814 		if (!eth->ppe[i])
815 			return;
816 		rhashtable_destroy(&eth->ppe[i]->l2_flows);
817 	}
818 }
819 
820 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
821 {
822 	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
823 	int i, k;
824 
825 	memset(ppe->foe_table, 0,
826 	       MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
827 
828 	if (!IS_ENABLED(CONFIG_SOC_MT7621))
829 		return;
830 
831 	/* skip all entries that cross the 1024 byte boundary */
832 	for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
833 		for (k = 0; k < ARRAY_SIZE(skip); k++) {
834 			struct mtk_foe_entry *hwe;
835 
836 			hwe = mtk_foe_get_entry(ppe, i + skip[k]);
837 			hwe->ib1 |= MTK_FOE_IB1_STATIC;
838 		}
839 	}
840 }
841 
842 void mtk_ppe_start(struct mtk_ppe *ppe)
843 {
844 	u32 val;
845 
846 	if (!ppe)
847 		return;
848 
849 	mtk_ppe_init_foe_table(ppe);
850 	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
851 
852 	val = MTK_PPE_TB_CFG_ENTRY_80B |
853 	      MTK_PPE_TB_CFG_AGE_NON_L4 |
854 	      MTK_PPE_TB_CFG_AGE_UNBIND |
855 	      MTK_PPE_TB_CFG_AGE_TCP |
856 	      MTK_PPE_TB_CFG_AGE_UDP |
857 	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
858 	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
859 			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
860 	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
861 			 MTK_PPE_KEEPALIVE_DISABLE) |
862 	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
863 	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
864 			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
865 	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
866 			 MTK_PPE_ENTRIES_SHIFT);
867 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
868 		val |= MTK_PPE_TB_CFG_INFO_SEL;
869 	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
870 
871 	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
872 		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
873 
874 	mtk_ppe_cache_enable(ppe, true);
875 
876 	val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
877 	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
878 	      MTK_PPE_FLOW_CFG_IP6_6RD |
879 	      MTK_PPE_FLOW_CFG_IP4_NAT |
880 	      MTK_PPE_FLOW_CFG_IP4_NAPT |
881 	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
882 	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
883 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
884 		val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
885 		       MTK_PPE_MD_TOAP_BYP_CRSN1 |
886 		       MTK_PPE_MD_TOAP_BYP_CRSN2 |
887 		       MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
888 	else
889 		val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
890 		       MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
891 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
892 
893 	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
894 	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
895 	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
896 
897 	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
898 	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
899 	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
900 
901 	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
902 	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
903 	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
904 
905 	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
906 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
907 
908 	val = MTK_PPE_BIND_LIMIT1_FULL |
909 	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
910 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
911 
912 	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
913 	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
914 	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
915 
916 	/* enable PPE */
917 	val = MTK_PPE_GLO_CFG_EN |
918 	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
919 	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
920 	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
921 	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
922 
923 	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
924 
925 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
926 		ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
927 		ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
928 	}
929 }
930 
931 int mtk_ppe_stop(struct mtk_ppe *ppe)
932 {
933 	u32 val;
934 	int i;
935 
936 	if (!ppe)
937 		return 0;
938 
939 	for (i = 0; i < MTK_PPE_ENTRIES; i++) {
940 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
941 
942 		hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
943 				      MTK_FOE_STATE_INVALID);
944 	}
945 
946 	mtk_ppe_cache_enable(ppe, false);
947 
948 	/* disable offload engine */
949 	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
950 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
951 
952 	/* disable aging */
953 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
954 	      MTK_PPE_TB_CFG_AGE_UNBIND |
955 	      MTK_PPE_TB_CFG_AGE_TCP |
956 	      MTK_PPE_TB_CFG_AGE_UDP |
957 	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
958 	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
959 
960 	return mtk_ppe_wait_busy(ppe);
961 }
962