xref: /openbmc/linux/drivers/net/ethernet/mediatek/mtk_ppe.c (revision 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/jiffies.h>
6 #include <linux/delay.h>
7 #include <linux/io.h>
8 #include <linux/etherdevice.h>
9 #include <linux/platform_device.h>
10 #include "mtk_ppe.h"
11 #include "mtk_ppe_regs.h"
12 
13 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
14 {
15 	writel(val, ppe->base + reg);
16 }
17 
18 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
19 {
20 	return readl(ppe->base + reg);
21 }
22 
23 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
24 {
25 	u32 val;
26 
27 	val = ppe_r32(ppe, reg);
28 	val &= ~mask;
29 	val |= set;
30 	ppe_w32(ppe, reg, val);
31 
32 	return val;
33 }
34 
35 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
36 {
37 	return ppe_m32(ppe, reg, 0, val);
38 }
39 
40 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
41 {
42 	return ppe_m32(ppe, reg, val, 0);
43 }
44 
45 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
46 {
47 	unsigned long timeout = jiffies + HZ;
48 
49 	while (time_is_before_jiffies(timeout)) {
50 		if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY))
51 			return 0;
52 
53 		usleep_range(10, 20);
54 	}
55 
56 	dev_err(ppe->dev, "PPE table busy");
57 
58 	return -ETIMEDOUT;
59 }
60 
61 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
62 {
63 	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
64 	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
65 }
66 
67 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
68 {
69 	mtk_ppe_cache_clear(ppe);
70 
71 	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
72 		enable * MTK_PPE_CACHE_CTL_EN);
73 }
74 
75 static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
76 {
77 	u32 hv1, hv2, hv3;
78 	u32 hash;
79 
80 	switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
81 		case MTK_PPE_PKT_TYPE_BRIDGE:
82 			hv1 = e->bridge.src_mac_lo;
83 			hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
84 			hv2 = e->bridge.src_mac_hi >> 16;
85 			hv2 ^= e->bridge.dest_mac_lo;
86 			hv3 = e->bridge.dest_mac_hi;
87 			break;
88 		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
89 		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
90 			hv1 = e->ipv4.orig.ports;
91 			hv2 = e->ipv4.orig.dest_ip;
92 			hv3 = e->ipv4.orig.src_ip;
93 			break;
94 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
95 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
96 			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
97 			hv1 ^= e->ipv6.ports;
98 
99 			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
100 			hv2 ^= e->ipv6.dest_ip[0];
101 
102 			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
103 			hv3 ^= e->ipv6.src_ip[0];
104 			break;
105 		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
106 		case MTK_PPE_PKT_TYPE_IPV6_6RD:
107 		default:
108 			WARN_ON_ONCE(1);
109 			return MTK_PPE_HASH_MASK;
110 	}
111 
112 	hash = (hv1 & hv2) | ((~hv1) & hv3);
113 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
114 	hash ^= hv1 ^ hv2 ^ hv3;
115 	hash ^= hash >> 16;
116 	hash <<= 1;
117 	hash &= MTK_PPE_ENTRIES - 1;
118 
119 	return hash;
120 }
121 
122 static inline struct mtk_foe_mac_info *
123 mtk_foe_entry_l2(struct mtk_foe_entry *entry)
124 {
125 	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
126 
127 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
128 		return &entry->ipv6.l2;
129 
130 	return &entry->ipv4.l2;
131 }
132 
133 static inline u32 *
134 mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
135 {
136 	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
137 
138 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
139 		return &entry->ipv6.ib2;
140 
141 	return &entry->ipv4.ib2;
142 }
143 
144 int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
145 			  u8 pse_port, u8 *src_mac, u8 *dest_mac)
146 {
147 	struct mtk_foe_mac_info *l2;
148 	u32 ports_pad, val;
149 
150 	memset(entry, 0, sizeof(*entry));
151 
152 	val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
153 	      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
154 	      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
155 	      MTK_FOE_IB1_BIND_TTL |
156 	      MTK_FOE_IB1_BIND_CACHE;
157 	entry->ib1 = val;
158 
159 	val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
160 	      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
161 	      FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
162 
163 	if (is_multicast_ether_addr(dest_mac))
164 		val |= MTK_FOE_IB2_MULTICAST;
165 
166 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
167 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
168 		entry->ipv4.orig.ports = ports_pad;
169 	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
170 		entry->ipv6.ports = ports_pad;
171 
172 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
173 		entry->ipv6.ib2 = val;
174 		l2 = &entry->ipv6.l2;
175 	} else {
176 		entry->ipv4.ib2 = val;
177 		l2 = &entry->ipv4.l2;
178 	}
179 
180 	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
181 	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
182 	l2->src_mac_hi = get_unaligned_be32(src_mac);
183 	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
184 
185 	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
186 		l2->etype = ETH_P_IPV6;
187 	else
188 		l2->etype = ETH_P_IP;
189 
190 	return 0;
191 }
192 
193 int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
194 {
195 	u32 *ib2 = mtk_foe_entry_ib2(entry);
196 	u32 val;
197 
198 	val = *ib2;
199 	val &= ~MTK_FOE_IB2_DEST_PORT;
200 	val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
201 	*ib2 = val;
202 
203 	return 0;
204 }
205 
206 int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
207 				 __be32 src_addr, __be16 src_port,
208 				 __be32 dest_addr, __be16 dest_port)
209 {
210 	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
211 	struct mtk_ipv4_tuple *t;
212 
213 	switch (type) {
214 	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
215 		if (egress) {
216 			t = &entry->ipv4.new;
217 			break;
218 		}
219 		fallthrough;
220 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
221 	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
222 		t = &entry->ipv4.orig;
223 		break;
224 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
225 		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
226 		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
227 		return 0;
228 	default:
229 		WARN_ON_ONCE(1);
230 		return -EINVAL;
231 	}
232 
233 	t->src_ip = be32_to_cpu(src_addr);
234 	t->dest_ip = be32_to_cpu(dest_addr);
235 
236 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
237 		return 0;
238 
239 	t->src_port = be16_to_cpu(src_port);
240 	t->dest_port = be16_to_cpu(dest_port);
241 
242 	return 0;
243 }
244 
245 int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
246 				 __be32 *src_addr, __be16 src_port,
247 				 __be32 *dest_addr, __be16 dest_port)
248 {
249 	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
250 	u32 *src, *dest;
251 	int i;
252 
253 	switch (type) {
254 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
255 		src = entry->dslite.tunnel_src_ip;
256 		dest = entry->dslite.tunnel_dest_ip;
257 		break;
258 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
259 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
260 		entry->ipv6.src_port = be16_to_cpu(src_port);
261 		entry->ipv6.dest_port = be16_to_cpu(dest_port);
262 		fallthrough;
263 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
264 		src = entry->ipv6.src_ip;
265 		dest = entry->ipv6.dest_ip;
266 		break;
267 	default:
268 		WARN_ON_ONCE(1);
269 		return -EINVAL;
270 	};
271 
272 	for (i = 0; i < 4; i++)
273 		src[i] = be32_to_cpu(src_addr[i]);
274 	for (i = 0; i < 4; i++)
275 		dest[i] = be32_to_cpu(dest_addr[i]);
276 
277 	return 0;
278 }
279 
280 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
281 {
282 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
283 
284 	l2->etype = BIT(port);
285 
286 	if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
287 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
288 	else
289 		l2->etype |= BIT(8);
290 
291 	entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
292 
293 	return 0;
294 }
295 
296 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
297 {
298 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
299 
300 	switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
301 	case 0:
302 		entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
303 			      FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
304 		l2->vlan1 = vid;
305 		return 0;
306 	case 1:
307 		if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
308 			l2->vlan1 = vid;
309 			l2->etype |= BIT(8);
310 		} else {
311 			l2->vlan2 = vid;
312 			entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
313 		}
314 		return 0;
315 	default:
316 		return -ENOSPC;
317 	}
318 }
319 
320 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
321 {
322 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
323 
324 	if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
325 	    (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
326 		l2->etype = ETH_P_PPP_SES;
327 
328 	entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
329 	l2->pppoe_id = sid;
330 
331 	return 0;
332 }
333 
334 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
335 {
336 	return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
337 	       FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
338 }
339 
340 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
341 			 u16 timestamp)
342 {
343 	struct mtk_foe_entry *hwe;
344 	u32 hash;
345 
346 	timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
347 	entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
348 	entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
349 
350 	hash = mtk_ppe_hash_entry(entry);
351 	hwe = &ppe->foe_table[hash];
352 	if (!mtk_foe_entry_usable(hwe)) {
353 		hwe++;
354 		hash++;
355 
356 		if (!mtk_foe_entry_usable(hwe))
357 			return -ENOSPC;
358 	}
359 
360 	memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
361 	wmb();
362 	hwe->ib1 = entry->ib1;
363 
364 	dma_wmb();
365 
366 	mtk_ppe_cache_clear(ppe);
367 
368 	return hash;
369 }
370 
371 int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
372 		 int version)
373 {
374 	struct mtk_foe_entry *foe;
375 
376 	/* need to allocate a separate device, since it PPE DMA access is
377 	 * not coherent.
378 	 */
379 	ppe->base = base;
380 	ppe->dev = dev;
381 	ppe->version = version;
382 
383 	foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
384 				  &ppe->foe_phys, GFP_KERNEL);
385 	if (!foe)
386 		return -ENOMEM;
387 
388 	ppe->foe_table = foe;
389 
390 	mtk_ppe_debugfs_init(ppe);
391 
392 	return 0;
393 }
394 
395 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
396 {
397 	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
398 	int i, k;
399 
400 	memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
401 
402 	if (!IS_ENABLED(CONFIG_SOC_MT7621))
403 		return;
404 
405 	/* skip all entries that cross the 1024 byte boundary */
406 	for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
407 		for (k = 0; k < ARRAY_SIZE(skip); k++)
408 			ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
409 }
410 
411 int mtk_ppe_start(struct mtk_ppe *ppe)
412 {
413 	u32 val;
414 
415 	mtk_ppe_init_foe_table(ppe);
416 	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
417 
418 	val = MTK_PPE_TB_CFG_ENTRY_80B |
419 	      MTK_PPE_TB_CFG_AGE_NON_L4 |
420 	      MTK_PPE_TB_CFG_AGE_UNBIND |
421 	      MTK_PPE_TB_CFG_AGE_TCP |
422 	      MTK_PPE_TB_CFG_AGE_UDP |
423 	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
424 	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
425 			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
426 	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
427 			 MTK_PPE_KEEPALIVE_DISABLE) |
428 	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
429 	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
430 			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
431 	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
432 			 MTK_PPE_ENTRIES_SHIFT);
433 	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
434 
435 	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
436 		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
437 
438 	mtk_ppe_cache_enable(ppe, true);
439 
440 	val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
441 	      MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
442 	      MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
443 	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
444 	      MTK_PPE_FLOW_CFG_IP6_6RD |
445 	      MTK_PPE_FLOW_CFG_IP4_NAT |
446 	      MTK_PPE_FLOW_CFG_IP4_NAPT |
447 	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
448 	      MTK_PPE_FLOW_CFG_L2_BRIDGE |
449 	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
450 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
451 
452 	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
453 	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
454 	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
455 
456 	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
457 	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
458 	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
459 
460 	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
461 	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
462 	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
463 
464 	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
465 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
466 
467 	val = MTK_PPE_BIND_LIMIT1_FULL |
468 	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
469 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
470 
471 	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
472 	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
473 	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
474 
475 	/* enable PPE */
476 	val = MTK_PPE_GLO_CFG_EN |
477 	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
478 	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
479 	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
480 	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
481 
482 	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
483 
484 	return 0;
485 }
486 
487 int mtk_ppe_stop(struct mtk_ppe *ppe)
488 {
489 	u32 val;
490 	int i;
491 
492 	for (i = 0; i < MTK_PPE_ENTRIES; i++)
493 		ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
494 						   MTK_FOE_STATE_INVALID);
495 
496 	mtk_ppe_cache_enable(ppe, false);
497 
498 	/* disable offload engine */
499 	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
500 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
501 
502 	/* disable aging */
503 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
504 	      MTK_PPE_TB_CFG_AGE_UNBIND |
505 	      MTK_PPE_TB_CFG_AGE_TCP |
506 	      MTK_PPE_TB_CFG_AGE_UDP |
507 	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
508 	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
509 
510 	return mtk_ppe_wait_busy(ppe);
511 }
512