xref: /openbmc/linux/drivers/net/ethernet/mediatek/mtk_ppe.c (revision 2b3082c6ef3b0104d822f6f18d2afbe5fc9a5c2c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dst_metadata.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_ppe.h"
15 #include "mtk_ppe_regs.h"
16 
17 static DEFINE_SPINLOCK(ppe_lock);
18 
19 static const struct rhashtable_params mtk_flow_l2_ht_params = {
20 	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
21 	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
22 	.key_len = offsetof(struct mtk_foe_bridge, key_end),
23 	.automatic_shrinking = true,
24 };
25 
26 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27 {
28 	writel(val, ppe->base + reg);
29 }
30 
31 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32 {
33 	return readl(ppe->base + reg);
34 }
35 
36 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
37 {
38 	u32 val;
39 
40 	val = ppe_r32(ppe, reg);
41 	val &= ~mask;
42 	val |= set;
43 	ppe_w32(ppe, reg, val);
44 
45 	return val;
46 }
47 
48 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49 {
50 	return ppe_m32(ppe, reg, 0, val);
51 }
52 
53 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54 {
55 	return ppe_m32(ppe, reg, val, 0);
56 }
57 
58 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59 {
60 	return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
61 }
62 
63 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
64 {
65 	int ret;
66 	u32 val;
67 
68 	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
69 				 !(val & MTK_PPE_GLO_CFG_BUSY),
70 				 20, MTK_PPE_WAIT_TIMEOUT_US);
71 
72 	if (ret)
73 		dev_err(ppe->dev, "PPE table busy");
74 
75 	return ret;
76 }
77 
78 static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
79 {
80 	int ret;
81 	u32 val;
82 
83 	ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
84 				 !(val & MTK_PPE_MIB_SER_CR_ST),
85 				 20, MTK_PPE_WAIT_TIMEOUT_US);
86 
87 	if (ret)
88 		dev_err(ppe->dev, "MIB table busy");
89 
90 	return ret;
91 }
92 
93 static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
94 {
95 	u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
96 	u32 val, cnt_r0, cnt_r1, cnt_r2;
97 	int ret;
98 
99 	val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
100 	ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
101 
102 	ret = mtk_ppe_mib_wait_busy(ppe);
103 	if (ret)
104 		return ret;
105 
106 	cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
107 	cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
108 	cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
109 
110 	byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
111 	byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
112 	pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
113 	pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
114 	*bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
115 	*packets = (pkt_cnt_high << 16) | pkt_cnt_low;
116 
117 	return 0;
118 }
119 
120 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
121 {
122 	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
123 	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
124 }
125 
126 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
127 {
128 	mtk_ppe_cache_clear(ppe);
129 
130 	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
131 		enable * MTK_PPE_CACHE_CTL_EN);
132 }
133 
134 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
135 {
136 	u32 hv1, hv2, hv3;
137 	u32 hash;
138 
139 	switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
140 		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
141 		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
142 			hv1 = e->ipv4.orig.ports;
143 			hv2 = e->ipv4.orig.dest_ip;
144 			hv3 = e->ipv4.orig.src_ip;
145 			break;
146 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
147 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
148 			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
149 			hv1 ^= e->ipv6.ports;
150 
151 			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
152 			hv2 ^= e->ipv6.dest_ip[0];
153 
154 			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
155 			hv3 ^= e->ipv6.src_ip[0];
156 			break;
157 		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
158 		case MTK_PPE_PKT_TYPE_IPV6_6RD:
159 		default:
160 			WARN_ON_ONCE(1);
161 			return MTK_PPE_HASH_MASK;
162 	}
163 
164 	hash = (hv1 & hv2) | ((~hv1) & hv3);
165 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
166 	hash ^= hv1 ^ hv2 ^ hv3;
167 	hash ^= hash >> 16;
168 	hash <<= (ffs(eth->soc->hash_offset) - 1);
169 	hash &= MTK_PPE_ENTRIES - 1;
170 
171 	return hash;
172 }
173 
174 static inline struct mtk_foe_mac_info *
175 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
176 {
177 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
178 
179 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
180 		return &entry->bridge.l2;
181 
182 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
183 		return &entry->ipv6.l2;
184 
185 	return &entry->ipv4.l2;
186 }
187 
188 static inline u32 *
189 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
190 {
191 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
192 
193 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
194 		return &entry->bridge.ib2;
195 
196 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
197 		return &entry->ipv6.ib2;
198 
199 	return &entry->ipv4.ib2;
200 }
201 
202 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
203 			  int type, int l4proto, u8 pse_port, u8 *src_mac,
204 			  u8 *dest_mac)
205 {
206 	struct mtk_foe_mac_info *l2;
207 	u32 ports_pad, val;
208 
209 	memset(entry, 0, sizeof(*entry));
210 
211 	if (mtk_is_netsys_v2_or_greater(eth)) {
212 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
213 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
214 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
215 		      MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
216 		entry->ib1 = val;
217 
218 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
219 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
220 	} else {
221 		int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
222 
223 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
224 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
225 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
226 		      MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
227 		entry->ib1 = val;
228 
229 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
230 		      FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
231 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
232 	}
233 
234 	if (is_multicast_ether_addr(dest_mac))
235 		val |= mtk_get_ib2_multicast_mask(eth);
236 
237 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
238 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
239 		entry->ipv4.orig.ports = ports_pad;
240 	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
241 		entry->ipv6.ports = ports_pad;
242 
243 	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
244 		ether_addr_copy(entry->bridge.src_mac, src_mac);
245 		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
246 		entry->bridge.ib2 = val;
247 		l2 = &entry->bridge.l2;
248 	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
249 		entry->ipv6.ib2 = val;
250 		l2 = &entry->ipv6.l2;
251 	} else {
252 		entry->ipv4.ib2 = val;
253 		l2 = &entry->ipv4.l2;
254 	}
255 
256 	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
257 	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
258 	l2->src_mac_hi = get_unaligned_be32(src_mac);
259 	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
260 
261 	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
262 		l2->etype = ETH_P_IPV6;
263 	else
264 		l2->etype = ETH_P_IP;
265 
266 	return 0;
267 }
268 
269 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
270 			       struct mtk_foe_entry *entry, u8 port)
271 {
272 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
273 	u32 val = *ib2;
274 
275 	if (mtk_is_netsys_v2_or_greater(eth)) {
276 		val &= ~MTK_FOE_IB2_DEST_PORT_V2;
277 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
278 	} else {
279 		val &= ~MTK_FOE_IB2_DEST_PORT;
280 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
281 	}
282 	*ib2 = val;
283 
284 	return 0;
285 }
286 
287 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
288 				 struct mtk_foe_entry *entry, bool egress,
289 				 __be32 src_addr, __be16 src_port,
290 				 __be32 dest_addr, __be16 dest_port)
291 {
292 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
293 	struct mtk_ipv4_tuple *t;
294 
295 	switch (type) {
296 	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
297 		if (egress) {
298 			t = &entry->ipv4.new;
299 			break;
300 		}
301 		fallthrough;
302 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
303 	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
304 		t = &entry->ipv4.orig;
305 		break;
306 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
307 		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
308 		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
309 		return 0;
310 	default:
311 		WARN_ON_ONCE(1);
312 		return -EINVAL;
313 	}
314 
315 	t->src_ip = be32_to_cpu(src_addr);
316 	t->dest_ip = be32_to_cpu(dest_addr);
317 
318 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
319 		return 0;
320 
321 	t->src_port = be16_to_cpu(src_port);
322 	t->dest_port = be16_to_cpu(dest_port);
323 
324 	return 0;
325 }
326 
327 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
328 				 struct mtk_foe_entry *entry,
329 				 __be32 *src_addr, __be16 src_port,
330 				 __be32 *dest_addr, __be16 dest_port)
331 {
332 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
333 	u32 *src, *dest;
334 	int i;
335 
336 	switch (type) {
337 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
338 		src = entry->dslite.tunnel_src_ip;
339 		dest = entry->dslite.tunnel_dest_ip;
340 		break;
341 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
342 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
343 		entry->ipv6.src_port = be16_to_cpu(src_port);
344 		entry->ipv6.dest_port = be16_to_cpu(dest_port);
345 		fallthrough;
346 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
347 		src = entry->ipv6.src_ip;
348 		dest = entry->ipv6.dest_ip;
349 		break;
350 	default:
351 		WARN_ON_ONCE(1);
352 		return -EINVAL;
353 	}
354 
355 	for (i = 0; i < 4; i++)
356 		src[i] = be32_to_cpu(src_addr[i]);
357 	for (i = 0; i < 4; i++)
358 		dest[i] = be32_to_cpu(dest_addr[i]);
359 
360 	return 0;
361 }
362 
363 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
364 			  int port)
365 {
366 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
367 
368 	l2->etype = BIT(port);
369 
370 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
371 		entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
372 	else
373 		l2->etype |= BIT(8);
374 
375 	entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
376 
377 	return 0;
378 }
379 
380 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
381 			   int vid)
382 {
383 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
384 
385 	switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
386 	case 0:
387 		entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
388 			      mtk_prep_ib1_vlan_layer(eth, 1);
389 		l2->vlan1 = vid;
390 		return 0;
391 	case 1:
392 		if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
393 			l2->vlan1 = vid;
394 			l2->etype |= BIT(8);
395 		} else {
396 			l2->vlan2 = vid;
397 			entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
398 		}
399 		return 0;
400 	default:
401 		return -ENOSPC;
402 	}
403 }
404 
405 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
406 			    int sid)
407 {
408 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
409 
410 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
411 	    (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
412 		l2->etype = ETH_P_PPP_SES;
413 
414 	entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
415 	l2->pppoe_id = sid;
416 
417 	return 0;
418 }
419 
420 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
421 			   int wdma_idx, int txq, int bss, int wcid)
422 {
423 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
424 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
425 
426 	switch (eth->soc->version) {
427 	case 3:
428 		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
429 		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
430 			 MTK_FOE_IB2_WDMA_WINFO_V2;
431 		l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
432 			     FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
433 		break;
434 	case 2:
435 		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
436 		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
437 			 MTK_FOE_IB2_WDMA_WINFO_V2;
438 		l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
439 			    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
440 		break;
441 	default:
442 		*ib2 &= ~MTK_FOE_IB2_PORT_MG;
443 		*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
444 		if (wdma_idx)
445 			*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
446 		l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
447 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
448 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
449 		break;
450 	}
451 
452 	return 0;
453 }
454 
455 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
456 			    unsigned int queue)
457 {
458 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
459 
460 	if (mtk_is_netsys_v2_or_greater(eth)) {
461 		*ib2 &= ~MTK_FOE_IB2_QID_V2;
462 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
463 		*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
464 	} else {
465 		*ib2 &= ~MTK_FOE_IB2_QID;
466 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
467 		*ib2 |= MTK_FOE_IB2_PSE_QOS;
468 	}
469 
470 	return 0;
471 }
472 
473 static bool
474 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
475 		     struct mtk_foe_entry *data)
476 {
477 	int type, len;
478 
479 	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
480 		return false;
481 
482 	type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
483 	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
484 		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
485 	else
486 		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
487 
488 	return !memcmp(&entry->data.data, &data->data, len - 4);
489 }
490 
491 static void
492 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
493 {
494 	struct hlist_head *head;
495 	struct hlist_node *tmp;
496 
497 	if (entry->type == MTK_FLOW_TYPE_L2) {
498 		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
499 				       mtk_flow_l2_ht_params);
500 
501 		head = &entry->l2_flows;
502 		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
503 			__mtk_foe_entry_clear(ppe, entry);
504 		return;
505 	}
506 
507 	hlist_del_init(&entry->list);
508 	if (entry->hash != 0xffff) {
509 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
510 
511 		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
512 		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
513 		dma_wmb();
514 		mtk_ppe_cache_clear(ppe);
515 
516 		if (ppe->accounting) {
517 			struct mtk_foe_accounting *acct;
518 
519 			acct = ppe->acct_table + entry->hash * sizeof(*acct);
520 			acct->packets = 0;
521 			acct->bytes = 0;
522 		}
523 	}
524 	entry->hash = 0xffff;
525 
526 	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
527 		return;
528 
529 	hlist_del_init(&entry->l2_data.list);
530 	kfree(entry);
531 }
532 
533 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
534 {
535 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
536 	u16 now = mtk_eth_timestamp(ppe->eth);
537 	u16 timestamp = ib1 & ib1_ts_mask;
538 
539 	if (timestamp > now)
540 		return ib1_ts_mask + 1 - timestamp + now;
541 	else
542 		return now - timestamp;
543 }
544 
545 static void
546 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
547 {
548 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
549 	struct mtk_flow_entry *cur;
550 	struct mtk_foe_entry *hwe;
551 	struct hlist_node *tmp;
552 	int idle;
553 
554 	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
555 	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
556 		int cur_idle;
557 		u32 ib1;
558 
559 		hwe = mtk_foe_get_entry(ppe, cur->hash);
560 		ib1 = READ_ONCE(hwe->ib1);
561 
562 		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
563 			cur->hash = 0xffff;
564 			__mtk_foe_entry_clear(ppe, cur);
565 			continue;
566 		}
567 
568 		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
569 		if (cur_idle >= idle)
570 			continue;
571 
572 		idle = cur_idle;
573 		entry->data.ib1 &= ~ib1_ts_mask;
574 		entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
575 	}
576 }
577 
578 static void
579 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
580 {
581 	struct mtk_foe_entry foe = {};
582 	struct mtk_foe_entry *hwe;
583 
584 	spin_lock_bh(&ppe_lock);
585 
586 	if (entry->type == MTK_FLOW_TYPE_L2) {
587 		mtk_flow_entry_update_l2(ppe, entry);
588 		goto out;
589 	}
590 
591 	if (entry->hash == 0xffff)
592 		goto out;
593 
594 	hwe = mtk_foe_get_entry(ppe, entry->hash);
595 	memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
596 	if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
597 		entry->hash = 0xffff;
598 		goto out;
599 	}
600 
601 	entry->data.ib1 = foe.ib1;
602 
603 out:
604 	spin_unlock_bh(&ppe_lock);
605 }
606 
607 static void
608 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
609 		       u16 hash)
610 {
611 	struct mtk_eth *eth = ppe->eth;
612 	u16 timestamp = mtk_eth_timestamp(eth);
613 	struct mtk_foe_entry *hwe;
614 	u32 val;
615 
616 	if (mtk_is_netsys_v2_or_greater(eth)) {
617 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
618 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
619 					 timestamp);
620 	} else {
621 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
622 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
623 					 timestamp);
624 	}
625 
626 	hwe = mtk_foe_get_entry(ppe, hash);
627 	memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
628 	wmb();
629 	hwe->ib1 = entry->ib1;
630 
631 	if (ppe->accounting) {
632 		if (mtk_is_netsys_v2_or_greater(eth))
633 			val = MTK_FOE_IB2_MIB_CNT_V2;
634 		else
635 			val = MTK_FOE_IB2_MIB_CNT;
636 		*mtk_foe_entry_ib2(eth, hwe) |= val;
637 	}
638 
639 	dma_wmb();
640 
641 	mtk_ppe_cache_clear(ppe);
642 }
643 
644 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
645 {
646 	spin_lock_bh(&ppe_lock);
647 	__mtk_foe_entry_clear(ppe, entry);
648 	spin_unlock_bh(&ppe_lock);
649 }
650 
651 static int
652 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
653 {
654 	struct mtk_flow_entry *prev;
655 
656 	entry->type = MTK_FLOW_TYPE_L2;
657 
658 	prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
659 						 mtk_flow_l2_ht_params);
660 	if (likely(!prev))
661 		return 0;
662 
663 	if (IS_ERR(prev))
664 		return PTR_ERR(prev);
665 
666 	return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
667 				       &entry->l2_node, mtk_flow_l2_ht_params);
668 }
669 
670 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
671 {
672 	const struct mtk_soc_data *soc = ppe->eth->soc;
673 	int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
674 	u32 hash;
675 
676 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
677 		return mtk_foe_entry_commit_l2(ppe, entry);
678 
679 	hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
680 	entry->hash = 0xffff;
681 	spin_lock_bh(&ppe_lock);
682 	hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
683 	spin_unlock_bh(&ppe_lock);
684 
685 	return 0;
686 }
687 
688 static void
689 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
690 			     u16 hash)
691 {
692 	const struct mtk_soc_data *soc = ppe->eth->soc;
693 	struct mtk_flow_entry *flow_info;
694 	struct mtk_foe_entry foe = {}, *hwe;
695 	struct mtk_foe_mac_info *l2;
696 	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
697 	int type;
698 
699 	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
700 	if (!flow_info)
701 		return;
702 
703 	flow_info->l2_data.base_flow = entry;
704 	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
705 	flow_info->hash = hash;
706 	hlist_add_head(&flow_info->list,
707 		       &ppe->foe_flow[hash / soc->hash_offset]);
708 	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
709 
710 	hwe = mtk_foe_get_entry(ppe, hash);
711 	memcpy(&foe, hwe, soc->foe_entry_size);
712 	foe.ib1 &= ib1_mask;
713 	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
714 
715 	l2 = mtk_foe_entry_l2(ppe->eth, &foe);
716 	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
717 
718 	type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
719 	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
720 		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
721 	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
722 		l2->etype = ETH_P_IPV6;
723 
724 	*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
725 
726 	__mtk_foe_entry_commit(ppe, &foe, hash);
727 }
728 
729 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
730 {
731 	const struct mtk_soc_data *soc = ppe->eth->soc;
732 	struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
733 	struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
734 	struct mtk_flow_entry *entry;
735 	struct mtk_foe_bridge key = {};
736 	struct hlist_node *n;
737 	struct ethhdr *eh;
738 	bool found = false;
739 	u8 *tag;
740 
741 	spin_lock_bh(&ppe_lock);
742 
743 	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
744 		goto out;
745 
746 	hlist_for_each_entry_safe(entry, n, head, list) {
747 		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
748 			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
749 				     MTK_FOE_STATE_BIND))
750 				continue;
751 
752 			entry->hash = 0xffff;
753 			__mtk_foe_entry_clear(ppe, entry);
754 			continue;
755 		}
756 
757 		if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
758 			if (entry->hash != 0xffff)
759 				entry->hash = 0xffff;
760 			continue;
761 		}
762 
763 		entry->hash = hash;
764 		__mtk_foe_entry_commit(ppe, &entry->data, hash);
765 		found = true;
766 	}
767 
768 	if (found)
769 		goto out;
770 
771 	eh = eth_hdr(skb);
772 	ether_addr_copy(key.dest_mac, eh->h_dest);
773 	ether_addr_copy(key.src_mac, eh->h_source);
774 	tag = skb->data - 2;
775 	key.vlan = 0;
776 	switch (skb->protocol) {
777 #if IS_ENABLED(CONFIG_NET_DSA)
778 	case htons(ETH_P_XDSA):
779 		if (!netdev_uses_dsa(skb->dev) ||
780 		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
781 			goto out;
782 
783 		if (!skb_metadata_dst(skb))
784 			tag += 4;
785 
786 		if (get_unaligned_be16(tag) != ETH_P_8021Q)
787 			break;
788 
789 		fallthrough;
790 #endif
791 	case htons(ETH_P_8021Q):
792 		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
793 		break;
794 	default:
795 		break;
796 	}
797 
798 	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
799 	if (!entry)
800 		goto out;
801 
802 	mtk_foe_entry_commit_subflow(ppe, entry, hash);
803 
804 out:
805 	spin_unlock_bh(&ppe_lock);
806 }
807 
808 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
809 {
810 	mtk_flow_entry_update(ppe, entry);
811 
812 	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
813 }
814 
815 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
816 {
817 	if (!ppe)
818 		return -EINVAL;
819 
820 	/* disable KA */
821 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
822 	ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
823 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
824 	usleep_range(10000, 11000);
825 
826 	/* set KA timer to maximum */
827 	ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
828 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
829 
830 	/* set KA tick select */
831 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
832 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
833 	usleep_range(10000, 11000);
834 
835 	/* disable scan mode */
836 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
837 	usleep_range(10000, 11000);
838 
839 	return mtk_ppe_wait_busy(ppe);
840 }
841 
842 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
843 						 struct mtk_foe_accounting *diff)
844 {
845 	struct mtk_foe_accounting *acct;
846 	int size = sizeof(struct mtk_foe_accounting);
847 	u64 bytes, packets;
848 
849 	if (!ppe->accounting)
850 		return NULL;
851 
852 	if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
853 		return NULL;
854 
855 	acct = ppe->acct_table + index * size;
856 
857 	acct->bytes += bytes;
858 	acct->packets += packets;
859 
860 	if (diff) {
861 		diff->bytes = bytes;
862 		diff->packets = packets;
863 	}
864 
865 	return acct;
866 }
867 
868 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
869 {
870 	bool accounting = eth->soc->has_accounting;
871 	const struct mtk_soc_data *soc = eth->soc;
872 	struct mtk_foe_accounting *acct;
873 	struct device *dev = eth->dev;
874 	struct mtk_mib_entry *mib;
875 	struct mtk_ppe *ppe;
876 	u32 foe_flow_size;
877 	void *foe;
878 
879 	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
880 	if (!ppe)
881 		return NULL;
882 
883 	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
884 
885 	/* need to allocate a separate device, since it PPE DMA access is
886 	 * not coherent.
887 	 */
888 	ppe->base = base;
889 	ppe->eth = eth;
890 	ppe->dev = dev;
891 	ppe->version = eth->soc->offload_version;
892 	ppe->accounting = accounting;
893 
894 	foe = dmam_alloc_coherent(ppe->dev,
895 				  MTK_PPE_ENTRIES * soc->foe_entry_size,
896 				  &ppe->foe_phys, GFP_KERNEL);
897 	if (!foe)
898 		goto err_free_l2_flows;
899 
900 	ppe->foe_table = foe;
901 
902 	foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
903 			sizeof(*ppe->foe_flow);
904 	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
905 	if (!ppe->foe_flow)
906 		goto err_free_l2_flows;
907 
908 	if (accounting) {
909 		mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
910 					  &ppe->mib_phys, GFP_KERNEL);
911 		if (!mib)
912 			return NULL;
913 
914 		ppe->mib_table = mib;
915 
916 		acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
917 				    GFP_KERNEL);
918 
919 		if (!acct)
920 			return NULL;
921 
922 		ppe->acct_table = acct;
923 	}
924 
925 	mtk_ppe_debugfs_init(ppe, index);
926 
927 	return ppe;
928 
929 err_free_l2_flows:
930 	rhashtable_destroy(&ppe->l2_flows);
931 	return NULL;
932 }
933 
934 void mtk_ppe_deinit(struct mtk_eth *eth)
935 {
936 	int i;
937 
938 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
939 		if (!eth->ppe[i])
940 			return;
941 		rhashtable_destroy(&eth->ppe[i]->l2_flows);
942 	}
943 }
944 
945 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
946 {
947 	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
948 	int i, k;
949 
950 	memset(ppe->foe_table, 0,
951 	       MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
952 
953 	if (!IS_ENABLED(CONFIG_SOC_MT7621))
954 		return;
955 
956 	/* skip all entries that cross the 1024 byte boundary */
957 	for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
958 		for (k = 0; k < ARRAY_SIZE(skip); k++) {
959 			struct mtk_foe_entry *hwe;
960 
961 			hwe = mtk_foe_get_entry(ppe, i + skip[k]);
962 			hwe->ib1 |= MTK_FOE_IB1_STATIC;
963 		}
964 	}
965 }
966 
967 void mtk_ppe_start(struct mtk_ppe *ppe)
968 {
969 	u32 val;
970 
971 	if (!ppe)
972 		return;
973 
974 	mtk_ppe_init_foe_table(ppe);
975 	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
976 
977 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
978 	      MTK_PPE_TB_CFG_AGE_UNBIND |
979 	      MTK_PPE_TB_CFG_AGE_TCP |
980 	      MTK_PPE_TB_CFG_AGE_UDP |
981 	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
982 	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
983 			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
984 	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
985 			 MTK_PPE_KEEPALIVE_DISABLE) |
986 	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
987 	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
988 			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
989 	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
990 			 MTK_PPE_ENTRIES_SHIFT);
991 	if (mtk_is_netsys_v2_or_greater(ppe->eth))
992 		val |= MTK_PPE_TB_CFG_INFO_SEL;
993 	if (!mtk_is_netsys_v3_or_greater(ppe->eth))
994 		val |= MTK_PPE_TB_CFG_ENTRY_80B;
995 	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
996 
997 	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
998 		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
999 
1000 	mtk_ppe_cache_enable(ppe, true);
1001 
1002 	val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
1003 	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
1004 	      MTK_PPE_FLOW_CFG_IP6_6RD |
1005 	      MTK_PPE_FLOW_CFG_IP4_NAT |
1006 	      MTK_PPE_FLOW_CFG_IP4_NAPT |
1007 	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
1008 	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1009 	if (mtk_is_netsys_v2_or_greater(ppe->eth))
1010 		val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
1011 		       MTK_PPE_MD_TOAP_BYP_CRSN1 |
1012 		       MTK_PPE_MD_TOAP_BYP_CRSN2 |
1013 		       MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
1014 	else
1015 		val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
1016 		       MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
1017 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1018 
1019 	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
1020 	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
1021 	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
1022 
1023 	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
1024 	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
1025 	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
1026 
1027 	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
1028 	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
1029 	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
1030 
1031 	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1032 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1033 
1034 	val = MTK_PPE_BIND_LIMIT1_FULL |
1035 	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1036 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1037 
1038 	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1039 	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1040 	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1041 
1042 	/* enable PPE */
1043 	val = MTK_PPE_GLO_CFG_EN |
1044 	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1045 	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
1046 	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1047 	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1048 
1049 	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1050 
1051 	if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
1052 		ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1053 		ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1054 	}
1055 
1056 	if (ppe->accounting && ppe->mib_phys) {
1057 		ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1058 		ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1059 			MTK_PPE_MIB_CFG_EN);
1060 		ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1061 			MTK_PPE_MIB_CFG_RD_CLR);
1062 		ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1063 			MTK_PPE_MIB_CFG_RD_CLR);
1064 	}
1065 }
1066 
1067 int mtk_ppe_stop(struct mtk_ppe *ppe)
1068 {
1069 	u32 val;
1070 	int i;
1071 
1072 	if (!ppe)
1073 		return 0;
1074 
1075 	for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1076 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1077 
1078 		hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1079 				      MTK_FOE_STATE_INVALID);
1080 	}
1081 
1082 	mtk_ppe_cache_enable(ppe, false);
1083 
1084 	/* disable offload engine */
1085 	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1086 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1087 
1088 	/* disable aging */
1089 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1090 	      MTK_PPE_TB_CFG_AGE_UNBIND |
1091 	      MTK_PPE_TB_CFG_AGE_TCP |
1092 	      MTK_PPE_TB_CFG_AGE_UDP |
1093 	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
1094 	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1095 
1096 	return mtk_ppe_wait_busy(ppe);
1097 }
1098