1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dsa.h>
12 #include "mtk_eth_soc.h"
13 #include "mtk_ppe.h"
14 #include "mtk_ppe_regs.h"
15 
16 static DEFINE_SPINLOCK(ppe_lock);
17 
18 static const struct rhashtable_params mtk_flow_l2_ht_params = {
19 	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
20 	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
21 	.key_len = offsetof(struct mtk_foe_bridge, key_end),
22 	.automatic_shrinking = true,
23 };
24 
25 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
26 {
27 	writel(val, ppe->base + reg);
28 }
29 
30 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
31 {
32 	return readl(ppe->base + reg);
33 }
34 
35 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
36 {
37 	u32 val;
38 
39 	val = ppe_r32(ppe, reg);
40 	val &= ~mask;
41 	val |= set;
42 	ppe_w32(ppe, reg, val);
43 
44 	return val;
45 }
46 
47 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
48 {
49 	return ppe_m32(ppe, reg, 0, val);
50 }
51 
52 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
53 {
54 	return ppe_m32(ppe, reg, val, 0);
55 }
56 
57 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
58 {
59 	return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
60 }
61 
62 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
63 {
64 	int ret;
65 	u32 val;
66 
67 	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
68 				 !(val & MTK_PPE_GLO_CFG_BUSY),
69 				 20, MTK_PPE_WAIT_TIMEOUT_US);
70 
71 	if (ret)
72 		dev_err(ppe->dev, "PPE table busy");
73 
74 	return ret;
75 }
76 
77 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
78 {
79 	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
80 	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
81 }
82 
83 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
84 {
85 	mtk_ppe_cache_clear(ppe);
86 
87 	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
88 		enable * MTK_PPE_CACHE_CTL_EN);
89 }
90 
91 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
92 {
93 	u32 hv1, hv2, hv3;
94 	u32 hash;
95 
96 	switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
97 		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
98 		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
99 			hv1 = e->ipv4.orig.ports;
100 			hv2 = e->ipv4.orig.dest_ip;
101 			hv3 = e->ipv4.orig.src_ip;
102 			break;
103 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
104 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
105 			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
106 			hv1 ^= e->ipv6.ports;
107 
108 			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
109 			hv2 ^= e->ipv6.dest_ip[0];
110 
111 			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
112 			hv3 ^= e->ipv6.src_ip[0];
113 			break;
114 		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
115 		case MTK_PPE_PKT_TYPE_IPV6_6RD:
116 		default:
117 			WARN_ON_ONCE(1);
118 			return MTK_PPE_HASH_MASK;
119 	}
120 
121 	hash = (hv1 & hv2) | ((~hv1) & hv3);
122 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
123 	hash ^= hv1 ^ hv2 ^ hv3;
124 	hash ^= hash >> 16;
125 	hash <<= (ffs(eth->soc->hash_offset) - 1);
126 	hash &= MTK_PPE_ENTRIES - 1;
127 
128 	return hash;
129 }
130 
131 static inline struct mtk_foe_mac_info *
132 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
133 {
134 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
135 
136 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
137 		return &entry->bridge.l2;
138 
139 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
140 		return &entry->ipv6.l2;
141 
142 	return &entry->ipv4.l2;
143 }
144 
145 static inline u32 *
146 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
147 {
148 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
149 
150 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
151 		return &entry->bridge.ib2;
152 
153 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
154 		return &entry->ipv6.ib2;
155 
156 	return &entry->ipv4.ib2;
157 }
158 
159 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
160 			  int type, int l4proto, u8 pse_port, u8 *src_mac,
161 			  u8 *dest_mac)
162 {
163 	struct mtk_foe_mac_info *l2;
164 	u32 ports_pad, val;
165 
166 	memset(entry, 0, sizeof(*entry));
167 
168 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
169 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
170 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
171 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
172 		      MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
173 		entry->ib1 = val;
174 
175 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
176 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
177 	} else {
178 		int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
179 
180 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
181 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
182 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
183 		      MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
184 		entry->ib1 = val;
185 
186 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
187 		      FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
188 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
189 	}
190 
191 	if (is_multicast_ether_addr(dest_mac))
192 		val |= mtk_get_ib2_multicast_mask(eth);
193 
194 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
195 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
196 		entry->ipv4.orig.ports = ports_pad;
197 	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
198 		entry->ipv6.ports = ports_pad;
199 
200 	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
201 		ether_addr_copy(entry->bridge.src_mac, src_mac);
202 		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
203 		entry->bridge.ib2 = val;
204 		l2 = &entry->bridge.l2;
205 	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
206 		entry->ipv6.ib2 = val;
207 		l2 = &entry->ipv6.l2;
208 	} else {
209 		entry->ipv4.ib2 = val;
210 		l2 = &entry->ipv4.l2;
211 	}
212 
213 	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
214 	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
215 	l2->src_mac_hi = get_unaligned_be32(src_mac);
216 	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
217 
218 	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
219 		l2->etype = ETH_P_IPV6;
220 	else
221 		l2->etype = ETH_P_IP;
222 
223 	return 0;
224 }
225 
226 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
227 			       struct mtk_foe_entry *entry, u8 port)
228 {
229 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
230 	u32 val = *ib2;
231 
232 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
233 		val &= ~MTK_FOE_IB2_DEST_PORT_V2;
234 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
235 	} else {
236 		val &= ~MTK_FOE_IB2_DEST_PORT;
237 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
238 	}
239 	*ib2 = val;
240 
241 	return 0;
242 }
243 
244 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
245 				 struct mtk_foe_entry *entry, bool egress,
246 				 __be32 src_addr, __be16 src_port,
247 				 __be32 dest_addr, __be16 dest_port)
248 {
249 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
250 	struct mtk_ipv4_tuple *t;
251 
252 	switch (type) {
253 	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
254 		if (egress) {
255 			t = &entry->ipv4.new;
256 			break;
257 		}
258 		fallthrough;
259 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
260 	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
261 		t = &entry->ipv4.orig;
262 		break;
263 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
264 		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
265 		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
266 		return 0;
267 	default:
268 		WARN_ON_ONCE(1);
269 		return -EINVAL;
270 	}
271 
272 	t->src_ip = be32_to_cpu(src_addr);
273 	t->dest_ip = be32_to_cpu(dest_addr);
274 
275 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
276 		return 0;
277 
278 	t->src_port = be16_to_cpu(src_port);
279 	t->dest_port = be16_to_cpu(dest_port);
280 
281 	return 0;
282 }
283 
284 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
285 				 struct mtk_foe_entry *entry,
286 				 __be32 *src_addr, __be16 src_port,
287 				 __be32 *dest_addr, __be16 dest_port)
288 {
289 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
290 	u32 *src, *dest;
291 	int i;
292 
293 	switch (type) {
294 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
295 		src = entry->dslite.tunnel_src_ip;
296 		dest = entry->dslite.tunnel_dest_ip;
297 		break;
298 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
299 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
300 		entry->ipv6.src_port = be16_to_cpu(src_port);
301 		entry->ipv6.dest_port = be16_to_cpu(dest_port);
302 		fallthrough;
303 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
304 		src = entry->ipv6.src_ip;
305 		dest = entry->ipv6.dest_ip;
306 		break;
307 	default:
308 		WARN_ON_ONCE(1);
309 		return -EINVAL;
310 	}
311 
312 	for (i = 0; i < 4; i++)
313 		src[i] = be32_to_cpu(src_addr[i]);
314 	for (i = 0; i < 4; i++)
315 		dest[i] = be32_to_cpu(dest_addr[i]);
316 
317 	return 0;
318 }
319 
320 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
321 			  int port)
322 {
323 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
324 
325 	l2->etype = BIT(port);
326 
327 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
328 		entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
329 	else
330 		l2->etype |= BIT(8);
331 
332 	entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
333 
334 	return 0;
335 }
336 
337 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
338 			   int vid)
339 {
340 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
341 
342 	switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
343 	case 0:
344 		entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
345 			      mtk_prep_ib1_vlan_layer(eth, 1);
346 		l2->vlan1 = vid;
347 		return 0;
348 	case 1:
349 		if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
350 			l2->vlan1 = vid;
351 			l2->etype |= BIT(8);
352 		} else {
353 			l2->vlan2 = vid;
354 			entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
355 		}
356 		return 0;
357 	default:
358 		return -ENOSPC;
359 	}
360 }
361 
362 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
363 			    int sid)
364 {
365 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
366 
367 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
368 	    (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
369 		l2->etype = ETH_P_PPP_SES;
370 
371 	entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
372 	l2->pppoe_id = sid;
373 
374 	return 0;
375 }
376 
377 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
378 			   int wdma_idx, int txq, int bss, int wcid)
379 {
380 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
381 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
382 
383 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
384 		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
385 		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
386 			 MTK_FOE_IB2_WDMA_WINFO_V2;
387 		l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
388 			    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
389 	} else {
390 		*ib2 &= ~MTK_FOE_IB2_PORT_MG;
391 		*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
392 		if (wdma_idx)
393 			*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
394 		l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
395 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
396 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
397 	}
398 
399 	return 0;
400 }
401 
402 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
403 			    unsigned int queue)
404 {
405 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
406 
407 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
408 		*ib2 &= ~MTK_FOE_IB2_QID_V2;
409 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
410 		*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
411 	} else {
412 		*ib2 &= ~MTK_FOE_IB2_QID;
413 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
414 		*ib2 |= MTK_FOE_IB2_PSE_QOS;
415 	}
416 
417 	return 0;
418 }
419 
420 static bool
421 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
422 		     struct mtk_foe_entry *data)
423 {
424 	int type, len;
425 
426 	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
427 		return false;
428 
429 	type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
430 	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
431 		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
432 	else
433 		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
434 
435 	return !memcmp(&entry->data.data, &data->data, len - 4);
436 }
437 
438 static void
439 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
440 {
441 	struct hlist_head *head;
442 	struct hlist_node *tmp;
443 
444 	if (entry->type == MTK_FLOW_TYPE_L2) {
445 		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
446 				       mtk_flow_l2_ht_params);
447 
448 		head = &entry->l2_flows;
449 		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
450 			__mtk_foe_entry_clear(ppe, entry);
451 		return;
452 	}
453 
454 	hlist_del_init(&entry->list);
455 	if (entry->hash != 0xffff) {
456 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
457 
458 		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
459 		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
460 		dma_wmb();
461 	}
462 	entry->hash = 0xffff;
463 
464 	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
465 		return;
466 
467 	hlist_del_init(&entry->l2_data.list);
468 	kfree(entry);
469 }
470 
471 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
472 {
473 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
474 	u16 now = mtk_eth_timestamp(ppe->eth);
475 	u16 timestamp = ib1 & ib1_ts_mask;
476 
477 	if (timestamp > now)
478 		return ib1_ts_mask + 1 - timestamp + now;
479 	else
480 		return now - timestamp;
481 }
482 
483 static void
484 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
485 {
486 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
487 	struct mtk_flow_entry *cur;
488 	struct mtk_foe_entry *hwe;
489 	struct hlist_node *tmp;
490 	int idle;
491 
492 	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
493 	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
494 		int cur_idle;
495 		u32 ib1;
496 
497 		hwe = mtk_foe_get_entry(ppe, cur->hash);
498 		ib1 = READ_ONCE(hwe->ib1);
499 
500 		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
501 			cur->hash = 0xffff;
502 			__mtk_foe_entry_clear(ppe, cur);
503 			continue;
504 		}
505 
506 		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
507 		if (cur_idle >= idle)
508 			continue;
509 
510 		idle = cur_idle;
511 		entry->data.ib1 &= ~ib1_ts_mask;
512 		entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
513 	}
514 }
515 
516 static void
517 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
518 {
519 	struct mtk_foe_entry foe = {};
520 	struct mtk_foe_entry *hwe;
521 
522 	spin_lock_bh(&ppe_lock);
523 
524 	if (entry->type == MTK_FLOW_TYPE_L2) {
525 		mtk_flow_entry_update_l2(ppe, entry);
526 		goto out;
527 	}
528 
529 	if (entry->hash == 0xffff)
530 		goto out;
531 
532 	hwe = mtk_foe_get_entry(ppe, entry->hash);
533 	memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
534 	if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
535 		entry->hash = 0xffff;
536 		goto out;
537 	}
538 
539 	entry->data.ib1 = foe.ib1;
540 
541 out:
542 	spin_unlock_bh(&ppe_lock);
543 }
544 
545 static void
546 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
547 		       u16 hash)
548 {
549 	struct mtk_eth *eth = ppe->eth;
550 	u16 timestamp = mtk_eth_timestamp(eth);
551 	struct mtk_foe_entry *hwe;
552 
553 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
554 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
555 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
556 					 timestamp);
557 	} else {
558 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
559 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
560 					 timestamp);
561 	}
562 
563 	hwe = mtk_foe_get_entry(ppe, hash);
564 	memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
565 	wmb();
566 	hwe->ib1 = entry->ib1;
567 
568 	dma_wmb();
569 
570 	mtk_ppe_cache_clear(ppe);
571 }
572 
573 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
574 {
575 	spin_lock_bh(&ppe_lock);
576 	__mtk_foe_entry_clear(ppe, entry);
577 	spin_unlock_bh(&ppe_lock);
578 }
579 
580 static int
581 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
582 {
583 	entry->type = MTK_FLOW_TYPE_L2;
584 
585 	return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
586 				      mtk_flow_l2_ht_params);
587 }
588 
589 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
590 {
591 	const struct mtk_soc_data *soc = ppe->eth->soc;
592 	int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
593 	u32 hash;
594 
595 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
596 		return mtk_foe_entry_commit_l2(ppe, entry);
597 
598 	hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
599 	entry->hash = 0xffff;
600 	spin_lock_bh(&ppe_lock);
601 	hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
602 	spin_unlock_bh(&ppe_lock);
603 
604 	return 0;
605 }
606 
607 static void
608 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
609 			     u16 hash)
610 {
611 	const struct mtk_soc_data *soc = ppe->eth->soc;
612 	struct mtk_flow_entry *flow_info;
613 	struct mtk_foe_entry foe = {}, *hwe;
614 	struct mtk_foe_mac_info *l2;
615 	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
616 	int type;
617 
618 	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
619 	if (!flow_info)
620 		return;
621 
622 	flow_info->l2_data.base_flow = entry;
623 	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
624 	flow_info->hash = hash;
625 	hlist_add_head(&flow_info->list,
626 		       &ppe->foe_flow[hash / soc->hash_offset]);
627 	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
628 
629 	hwe = mtk_foe_get_entry(ppe, hash);
630 	memcpy(&foe, hwe, soc->foe_entry_size);
631 	foe.ib1 &= ib1_mask;
632 	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
633 
634 	l2 = mtk_foe_entry_l2(ppe->eth, &foe);
635 	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
636 
637 	type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
638 	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
639 		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
640 	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
641 		l2->etype = ETH_P_IPV6;
642 
643 	*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
644 
645 	__mtk_foe_entry_commit(ppe, &foe, hash);
646 }
647 
648 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
649 {
650 	const struct mtk_soc_data *soc = ppe->eth->soc;
651 	struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
652 	struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
653 	struct mtk_flow_entry *entry;
654 	struct mtk_foe_bridge key = {};
655 	struct hlist_node *n;
656 	struct ethhdr *eh;
657 	bool found = false;
658 	u8 *tag;
659 
660 	spin_lock_bh(&ppe_lock);
661 
662 	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
663 		goto out;
664 
665 	hlist_for_each_entry_safe(entry, n, head, list) {
666 		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
667 			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
668 				     MTK_FOE_STATE_BIND))
669 				continue;
670 
671 			entry->hash = 0xffff;
672 			__mtk_foe_entry_clear(ppe, entry);
673 			continue;
674 		}
675 
676 		if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
677 			if (entry->hash != 0xffff)
678 				entry->hash = 0xffff;
679 			continue;
680 		}
681 
682 		entry->hash = hash;
683 		__mtk_foe_entry_commit(ppe, &entry->data, hash);
684 		found = true;
685 	}
686 
687 	if (found)
688 		goto out;
689 
690 	eh = eth_hdr(skb);
691 	ether_addr_copy(key.dest_mac, eh->h_dest);
692 	ether_addr_copy(key.src_mac, eh->h_source);
693 	tag = skb->data - 2;
694 	key.vlan = 0;
695 	switch (skb->protocol) {
696 #if IS_ENABLED(CONFIG_NET_DSA)
697 	case htons(ETH_P_XDSA):
698 		if (!netdev_uses_dsa(skb->dev) ||
699 		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
700 			goto out;
701 
702 		tag += 4;
703 		if (get_unaligned_be16(tag) != ETH_P_8021Q)
704 			break;
705 
706 		fallthrough;
707 #endif
708 	case htons(ETH_P_8021Q):
709 		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
710 		break;
711 	default:
712 		break;
713 	}
714 
715 	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
716 	if (!entry)
717 		goto out;
718 
719 	mtk_foe_entry_commit_subflow(ppe, entry, hash);
720 
721 out:
722 	spin_unlock_bh(&ppe_lock);
723 }
724 
725 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
726 {
727 	mtk_flow_entry_update(ppe, entry);
728 
729 	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
730 }
731 
732 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
733 {
734 	if (!ppe)
735 		return -EINVAL;
736 
737 	/* disable KA */
738 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
739 	ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
740 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
741 	usleep_range(10000, 11000);
742 
743 	/* set KA timer to maximum */
744 	ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
745 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
746 
747 	/* set KA tick select */
748 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
749 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
750 	usleep_range(10000, 11000);
751 
752 	/* disable scan mode */
753 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
754 	usleep_range(10000, 11000);
755 
756 	return mtk_ppe_wait_busy(ppe);
757 }
758 
759 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
760 			     int version, int index)
761 {
762 	const struct mtk_soc_data *soc = eth->soc;
763 	struct device *dev = eth->dev;
764 	struct mtk_ppe *ppe;
765 	u32 foe_flow_size;
766 	void *foe;
767 
768 	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
769 	if (!ppe)
770 		return NULL;
771 
772 	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
773 
774 	/* need to allocate a separate device, since it PPE DMA access is
775 	 * not coherent.
776 	 */
777 	ppe->base = base;
778 	ppe->eth = eth;
779 	ppe->dev = dev;
780 	ppe->version = version;
781 
782 	foe = dmam_alloc_coherent(ppe->dev,
783 				  MTK_PPE_ENTRIES * soc->foe_entry_size,
784 				  &ppe->foe_phys, GFP_KERNEL);
785 	if (!foe)
786 		goto err_free_l2_flows;
787 
788 	ppe->foe_table = foe;
789 
790 	foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
791 			sizeof(*ppe->foe_flow);
792 	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
793 	if (!ppe->foe_flow)
794 		goto err_free_l2_flows;
795 
796 	mtk_ppe_debugfs_init(ppe, index);
797 
798 	return ppe;
799 
800 err_free_l2_flows:
801 	rhashtable_destroy(&ppe->l2_flows);
802 	return NULL;
803 }
804 
805 void mtk_ppe_deinit(struct mtk_eth *eth)
806 {
807 	int i;
808 
809 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
810 		if (!eth->ppe[i])
811 			return;
812 		rhashtable_destroy(&eth->ppe[i]->l2_flows);
813 	}
814 }
815 
816 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
817 {
818 	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
819 	int i, k;
820 
821 	memset(ppe->foe_table, 0,
822 	       MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
823 
824 	if (!IS_ENABLED(CONFIG_SOC_MT7621))
825 		return;
826 
827 	/* skip all entries that cross the 1024 byte boundary */
828 	for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
829 		for (k = 0; k < ARRAY_SIZE(skip); k++) {
830 			struct mtk_foe_entry *hwe;
831 
832 			hwe = mtk_foe_get_entry(ppe, i + skip[k]);
833 			hwe->ib1 |= MTK_FOE_IB1_STATIC;
834 		}
835 	}
836 }
837 
838 void mtk_ppe_start(struct mtk_ppe *ppe)
839 {
840 	u32 val;
841 
842 	if (!ppe)
843 		return;
844 
845 	mtk_ppe_init_foe_table(ppe);
846 	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
847 
848 	val = MTK_PPE_TB_CFG_ENTRY_80B |
849 	      MTK_PPE_TB_CFG_AGE_NON_L4 |
850 	      MTK_PPE_TB_CFG_AGE_UNBIND |
851 	      MTK_PPE_TB_CFG_AGE_TCP |
852 	      MTK_PPE_TB_CFG_AGE_UDP |
853 	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
854 	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
855 			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
856 	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
857 			 MTK_PPE_KEEPALIVE_DISABLE) |
858 	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
859 	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
860 			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
861 	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
862 			 MTK_PPE_ENTRIES_SHIFT);
863 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
864 		val |= MTK_PPE_TB_CFG_INFO_SEL;
865 	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
866 
867 	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
868 		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
869 
870 	mtk_ppe_cache_enable(ppe, true);
871 
872 	val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
873 	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
874 	      MTK_PPE_FLOW_CFG_IP6_6RD |
875 	      MTK_PPE_FLOW_CFG_IP4_NAT |
876 	      MTK_PPE_FLOW_CFG_IP4_NAPT |
877 	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
878 	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
879 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
880 		val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
881 		       MTK_PPE_MD_TOAP_BYP_CRSN1 |
882 		       MTK_PPE_MD_TOAP_BYP_CRSN2 |
883 		       MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
884 	else
885 		val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
886 		       MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
887 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
888 
889 	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
890 	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
891 	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
892 
893 	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
894 	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
895 	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
896 
897 	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
898 	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
899 	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
900 
901 	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
902 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
903 
904 	val = MTK_PPE_BIND_LIMIT1_FULL |
905 	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
906 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
907 
908 	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
909 	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
910 	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
911 
912 	/* enable PPE */
913 	val = MTK_PPE_GLO_CFG_EN |
914 	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
915 	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
916 	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
917 	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
918 
919 	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
920 
921 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
922 		ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
923 		ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
924 	}
925 }
926 
927 int mtk_ppe_stop(struct mtk_ppe *ppe)
928 {
929 	u32 val;
930 	int i;
931 
932 	if (!ppe)
933 		return 0;
934 
935 	for (i = 0; i < MTK_PPE_ENTRIES; i++) {
936 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
937 
938 		hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
939 				      MTK_FOE_STATE_INVALID);
940 	}
941 
942 	mtk_ppe_cache_enable(ppe, false);
943 
944 	/* disable offload engine */
945 	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
946 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
947 
948 	/* disable aging */
949 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
950 	      MTK_PPE_TB_CFG_AGE_UNBIND |
951 	      MTK_PPE_TB_CFG_AGE_TCP |
952 	      MTK_PPE_TB_CFG_AGE_UDP |
953 	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
954 	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
955 
956 	return mtk_ppe_wait_busy(ppe);
957 }
958