1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dst_metadata.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_ppe.h"
15 #include "mtk_ppe_regs.h"
16
17 static DEFINE_SPINLOCK(ppe_lock);
18
19 static const struct rhashtable_params mtk_flow_l2_ht_params = {
20 .head_offset = offsetof(struct mtk_flow_entry, l2_node),
21 .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
22 .key_len = offsetof(struct mtk_foe_bridge, key_end),
23 .automatic_shrinking = true,
24 };
25
ppe_w32(struct mtk_ppe * ppe,u32 reg,u32 val)26 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27 {
28 writel(val, ppe->base + reg);
29 }
30
ppe_r32(struct mtk_ppe * ppe,u32 reg)31 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32 {
33 return readl(ppe->base + reg);
34 }
35
ppe_m32(struct mtk_ppe * ppe,u32 reg,u32 mask,u32 set)36 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
37 {
38 u32 val;
39
40 val = ppe_r32(ppe, reg);
41 val &= ~mask;
42 val |= set;
43 ppe_w32(ppe, reg, val);
44
45 return val;
46 }
47
ppe_set(struct mtk_ppe * ppe,u32 reg,u32 val)48 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49 {
50 return ppe_m32(ppe, reg, 0, val);
51 }
52
ppe_clear(struct mtk_ppe * ppe,u32 reg,u32 val)53 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54 {
55 return ppe_m32(ppe, reg, val, 0);
56 }
57
mtk_eth_timestamp(struct mtk_eth * eth)58 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59 {
60 return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
61 }
62
mtk_ppe_wait_busy(struct mtk_ppe * ppe)63 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
64 {
65 int ret;
66 u32 val;
67
68 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
69 !(val & MTK_PPE_GLO_CFG_BUSY),
70 20, MTK_PPE_WAIT_TIMEOUT_US);
71
72 if (ret)
73 dev_err(ppe->dev, "PPE table busy");
74
75 return ret;
76 }
77
mtk_ppe_mib_wait_busy(struct mtk_ppe * ppe)78 static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
79 {
80 int ret;
81 u32 val;
82
83 ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
84 !(val & MTK_PPE_MIB_SER_CR_ST),
85 20, MTK_PPE_WAIT_TIMEOUT_US);
86
87 if (ret)
88 dev_err(ppe->dev, "MIB table busy");
89
90 return ret;
91 }
92
mtk_mib_entry_read(struct mtk_ppe * ppe,u16 index,u64 * bytes,u64 * packets)93 static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
94 {
95 u32 val, cnt_r0, cnt_r1, cnt_r2;
96 int ret;
97
98 val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
99 ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
100
101 ret = mtk_ppe_mib_wait_busy(ppe);
102 if (ret)
103 return ret;
104
105 cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
106 cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
107 cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
108
109 if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
110 /* 64 bit for each counter */
111 u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
112 *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
113 *packets = ((u64)cnt_r3 << 32) | cnt_r2;
114 } else {
115 /* 48 bit byte counter, 40 bit packet counter */
116 u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
117 u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
118 u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
119 u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
120 *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
121 *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
122 }
123
124 return 0;
125 }
126
mtk_ppe_cache_clear(struct mtk_ppe * ppe)127 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
128 {
129 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
130 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
131 }
132
mtk_ppe_cache_enable(struct mtk_ppe * ppe,bool enable)133 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
134 {
135 mtk_ppe_cache_clear(ppe);
136
137 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
138 enable * MTK_PPE_CACHE_CTL_EN);
139 }
140
mtk_ppe_hash_entry(struct mtk_eth * eth,struct mtk_foe_entry * e)141 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
142 {
143 u32 hv1, hv2, hv3;
144 u32 hash;
145
146 switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
147 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
148 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
149 hv1 = e->ipv4.orig.ports;
150 hv2 = e->ipv4.orig.dest_ip;
151 hv3 = e->ipv4.orig.src_ip;
152 break;
153 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
154 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
155 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
156 hv1 ^= e->ipv6.ports;
157
158 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
159 hv2 ^= e->ipv6.dest_ip[0];
160
161 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
162 hv3 ^= e->ipv6.src_ip[0];
163 break;
164 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
165 case MTK_PPE_PKT_TYPE_IPV6_6RD:
166 default:
167 WARN_ON_ONCE(1);
168 return MTK_PPE_HASH_MASK;
169 }
170
171 hash = (hv1 & hv2) | ((~hv1) & hv3);
172 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
173 hash ^= hv1 ^ hv2 ^ hv3;
174 hash ^= hash >> 16;
175 hash <<= (ffs(eth->soc->hash_offset) - 1);
176 hash &= MTK_PPE_ENTRIES - 1;
177
178 return hash;
179 }
180
181 static inline struct mtk_foe_mac_info *
mtk_foe_entry_l2(struct mtk_eth * eth,struct mtk_foe_entry * entry)182 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
183 {
184 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
185
186 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
187 return &entry->bridge.l2;
188
189 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
190 return &entry->ipv6.l2;
191
192 return &entry->ipv4.l2;
193 }
194
195 static inline u32 *
mtk_foe_entry_ib2(struct mtk_eth * eth,struct mtk_foe_entry * entry)196 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
197 {
198 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
199
200 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
201 return &entry->bridge.ib2;
202
203 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
204 return &entry->ipv6.ib2;
205
206 return &entry->ipv4.ib2;
207 }
208
mtk_foe_entry_prepare(struct mtk_eth * eth,struct mtk_foe_entry * entry,int type,int l4proto,u8 pse_port,u8 * src_mac,u8 * dest_mac)209 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
210 int type, int l4proto, u8 pse_port, u8 *src_mac,
211 u8 *dest_mac)
212 {
213 struct mtk_foe_mac_info *l2;
214 u32 ports_pad, val;
215
216 memset(entry, 0, sizeof(*entry));
217
218 if (mtk_is_netsys_v2_or_greater(eth)) {
219 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
220 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
221 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
222 MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
223 entry->ib1 = val;
224
225 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
226 FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
227 } else {
228 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
229
230 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
231 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
232 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
233 MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
234 entry->ib1 = val;
235
236 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
237 FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
238 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
239 }
240
241 if (is_multicast_ether_addr(dest_mac))
242 val |= mtk_get_ib2_multicast_mask(eth);
243
244 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
245 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
246 entry->ipv4.orig.ports = ports_pad;
247 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
248 entry->ipv6.ports = ports_pad;
249
250 if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
251 ether_addr_copy(entry->bridge.src_mac, src_mac);
252 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
253 entry->bridge.ib2 = val;
254 l2 = &entry->bridge.l2;
255 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
256 entry->ipv6.ib2 = val;
257 l2 = &entry->ipv6.l2;
258 } else {
259 entry->ipv4.ib2 = val;
260 l2 = &entry->ipv4.l2;
261 }
262
263 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
264 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
265 l2->src_mac_hi = get_unaligned_be32(src_mac);
266 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
267
268 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
269 l2->etype = ETH_P_IPV6;
270 else
271 l2->etype = ETH_P_IP;
272
273 return 0;
274 }
275
mtk_foe_entry_set_pse_port(struct mtk_eth * eth,struct mtk_foe_entry * entry,u8 port)276 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
277 struct mtk_foe_entry *entry, u8 port)
278 {
279 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
280 u32 val = *ib2;
281
282 if (mtk_is_netsys_v2_or_greater(eth)) {
283 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
284 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
285 } else {
286 val &= ~MTK_FOE_IB2_DEST_PORT;
287 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
288 }
289 *ib2 = val;
290
291 return 0;
292 }
293
mtk_foe_entry_set_ipv4_tuple(struct mtk_eth * eth,struct mtk_foe_entry * entry,bool egress,__be32 src_addr,__be16 src_port,__be32 dest_addr,__be16 dest_port)294 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
295 struct mtk_foe_entry *entry, bool egress,
296 __be32 src_addr, __be16 src_port,
297 __be32 dest_addr, __be16 dest_port)
298 {
299 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
300 struct mtk_ipv4_tuple *t;
301
302 switch (type) {
303 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
304 if (egress) {
305 t = &entry->ipv4.new;
306 break;
307 }
308 fallthrough;
309 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
310 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
311 t = &entry->ipv4.orig;
312 break;
313 case MTK_PPE_PKT_TYPE_IPV6_6RD:
314 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
315 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
316 return 0;
317 default:
318 WARN_ON_ONCE(1);
319 return -EINVAL;
320 }
321
322 t->src_ip = be32_to_cpu(src_addr);
323 t->dest_ip = be32_to_cpu(dest_addr);
324
325 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
326 return 0;
327
328 t->src_port = be16_to_cpu(src_port);
329 t->dest_port = be16_to_cpu(dest_port);
330
331 return 0;
332 }
333
mtk_foe_entry_set_ipv6_tuple(struct mtk_eth * eth,struct mtk_foe_entry * entry,__be32 * src_addr,__be16 src_port,__be32 * dest_addr,__be16 dest_port)334 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
335 struct mtk_foe_entry *entry,
336 __be32 *src_addr, __be16 src_port,
337 __be32 *dest_addr, __be16 dest_port)
338 {
339 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
340 u32 *src, *dest;
341 int i;
342
343 switch (type) {
344 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
345 src = entry->dslite.tunnel_src_ip;
346 dest = entry->dslite.tunnel_dest_ip;
347 break;
348 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
349 case MTK_PPE_PKT_TYPE_IPV6_6RD:
350 entry->ipv6.src_port = be16_to_cpu(src_port);
351 entry->ipv6.dest_port = be16_to_cpu(dest_port);
352 fallthrough;
353 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
354 src = entry->ipv6.src_ip;
355 dest = entry->ipv6.dest_ip;
356 break;
357 default:
358 WARN_ON_ONCE(1);
359 return -EINVAL;
360 }
361
362 for (i = 0; i < 4; i++)
363 src[i] = be32_to_cpu(src_addr[i]);
364 for (i = 0; i < 4; i++)
365 dest[i] = be32_to_cpu(dest_addr[i]);
366
367 return 0;
368 }
369
mtk_foe_entry_set_dsa(struct mtk_eth * eth,struct mtk_foe_entry * entry,int port)370 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
371 int port)
372 {
373 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
374
375 l2->etype = BIT(port);
376
377 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
378 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
379 else
380 l2->etype |= BIT(8);
381
382 entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
383
384 return 0;
385 }
386
mtk_foe_entry_set_vlan(struct mtk_eth * eth,struct mtk_foe_entry * entry,int vid)387 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
388 int vid)
389 {
390 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
391
392 switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
393 case 0:
394 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
395 mtk_prep_ib1_vlan_layer(eth, 1);
396 l2->vlan1 = vid;
397 return 0;
398 case 1:
399 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
400 l2->vlan1 = vid;
401 l2->etype |= BIT(8);
402 } else {
403 l2->vlan2 = vid;
404 entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
405 }
406 return 0;
407 default:
408 return -ENOSPC;
409 }
410 }
411
mtk_foe_entry_set_pppoe(struct mtk_eth * eth,struct mtk_foe_entry * entry,int sid)412 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
413 int sid)
414 {
415 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
416
417 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
418 (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
419 l2->etype = ETH_P_PPP_SES;
420
421 entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
422 l2->pppoe_id = sid;
423
424 return 0;
425 }
426
mtk_foe_entry_set_wdma(struct mtk_eth * eth,struct mtk_foe_entry * entry,int wdma_idx,int txq,int bss,int wcid)427 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
428 int wdma_idx, int txq, int bss, int wcid)
429 {
430 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
431 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
432
433 switch (eth->soc->version) {
434 case 3:
435 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
436 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
437 MTK_FOE_IB2_WDMA_WINFO_V2;
438 l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
439 FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
440 break;
441 case 2:
442 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
443 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
444 MTK_FOE_IB2_WDMA_WINFO_V2;
445 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
446 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
447 break;
448 default:
449 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
450 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
451 if (wdma_idx)
452 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
453 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
454 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
455 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
456 break;
457 }
458
459 return 0;
460 }
461
mtk_foe_entry_set_queue(struct mtk_eth * eth,struct mtk_foe_entry * entry,unsigned int queue)462 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
463 unsigned int queue)
464 {
465 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
466
467 if (mtk_is_netsys_v2_or_greater(eth)) {
468 *ib2 &= ~MTK_FOE_IB2_QID_V2;
469 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
470 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
471 } else {
472 *ib2 &= ~MTK_FOE_IB2_QID;
473 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
474 *ib2 |= MTK_FOE_IB2_PSE_QOS;
475 }
476
477 return 0;
478 }
479
480 static bool
mtk_flow_entry_match(struct mtk_eth * eth,struct mtk_flow_entry * entry,struct mtk_foe_entry * data)481 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
482 struct mtk_foe_entry *data)
483 {
484 int type, len;
485
486 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
487 return false;
488
489 type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
490 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
491 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
492 else
493 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
494
495 return !memcmp(&entry->data.data, &data->data, len - 4);
496 }
497
498 static void
__mtk_foe_entry_clear(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)499 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
500 {
501 struct hlist_head *head;
502 struct hlist_node *tmp;
503
504 if (entry->type == MTK_FLOW_TYPE_L2) {
505 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
506 mtk_flow_l2_ht_params);
507
508 head = &entry->l2_flows;
509 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
510 __mtk_foe_entry_clear(ppe, entry);
511 return;
512 }
513
514 hlist_del_init(&entry->list);
515 if (entry->hash != 0xffff) {
516 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
517
518 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
519 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
520 dma_wmb();
521 mtk_ppe_cache_clear(ppe);
522
523 if (ppe->accounting) {
524 struct mtk_foe_accounting *acct;
525
526 acct = ppe->acct_table + entry->hash * sizeof(*acct);
527 acct->packets = 0;
528 acct->bytes = 0;
529 }
530 }
531 entry->hash = 0xffff;
532
533 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
534 return;
535
536 hlist_del_init(&entry->l2_data.list);
537 kfree(entry);
538 }
539
__mtk_foe_entry_idle_time(struct mtk_ppe * ppe,u32 ib1)540 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
541 {
542 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
543 u16 now = mtk_eth_timestamp(ppe->eth);
544 u16 timestamp = ib1 & ib1_ts_mask;
545
546 if (timestamp > now)
547 return ib1_ts_mask + 1 - timestamp + now;
548 else
549 return now - timestamp;
550 }
551
552 static void
mtk_flow_entry_update_l2(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)553 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
554 {
555 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
556 struct mtk_flow_entry *cur;
557 struct mtk_foe_entry *hwe;
558 struct hlist_node *tmp;
559 int idle;
560
561 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
562 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
563 int cur_idle;
564 u32 ib1;
565
566 hwe = mtk_foe_get_entry(ppe, cur->hash);
567 ib1 = READ_ONCE(hwe->ib1);
568
569 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
570 cur->hash = 0xffff;
571 __mtk_foe_entry_clear(ppe, cur);
572 continue;
573 }
574
575 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
576 if (cur_idle >= idle)
577 continue;
578
579 idle = cur_idle;
580 entry->data.ib1 &= ~ib1_ts_mask;
581 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
582 }
583 }
584
585 static void
mtk_flow_entry_update(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)586 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
587 {
588 struct mtk_foe_entry foe = {};
589 struct mtk_foe_entry *hwe;
590
591 spin_lock_bh(&ppe_lock);
592
593 if (entry->type == MTK_FLOW_TYPE_L2) {
594 mtk_flow_entry_update_l2(ppe, entry);
595 goto out;
596 }
597
598 if (entry->hash == 0xffff)
599 goto out;
600
601 hwe = mtk_foe_get_entry(ppe, entry->hash);
602 memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
603 if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
604 entry->hash = 0xffff;
605 goto out;
606 }
607
608 entry->data.ib1 = foe.ib1;
609
610 out:
611 spin_unlock_bh(&ppe_lock);
612 }
613
614 static void
__mtk_foe_entry_commit(struct mtk_ppe * ppe,struct mtk_foe_entry * entry,u16 hash)615 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
616 u16 hash)
617 {
618 struct mtk_eth *eth = ppe->eth;
619 u16 timestamp = mtk_eth_timestamp(eth);
620 struct mtk_foe_entry *hwe;
621 u32 val;
622
623 if (mtk_is_netsys_v2_or_greater(eth)) {
624 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
625 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
626 timestamp);
627 } else {
628 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
629 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
630 timestamp);
631 }
632
633 hwe = mtk_foe_get_entry(ppe, hash);
634 memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
635 wmb();
636 hwe->ib1 = entry->ib1;
637
638 if (ppe->accounting) {
639 if (mtk_is_netsys_v2_or_greater(eth))
640 val = MTK_FOE_IB2_MIB_CNT_V2;
641 else
642 val = MTK_FOE_IB2_MIB_CNT;
643 *mtk_foe_entry_ib2(eth, hwe) |= val;
644 }
645
646 dma_wmb();
647
648 mtk_ppe_cache_clear(ppe);
649 }
650
mtk_foe_entry_clear(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)651 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
652 {
653 spin_lock_bh(&ppe_lock);
654 __mtk_foe_entry_clear(ppe, entry);
655 spin_unlock_bh(&ppe_lock);
656 }
657
658 static int
mtk_foe_entry_commit_l2(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)659 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
660 {
661 struct mtk_flow_entry *prev;
662
663 entry->type = MTK_FLOW_TYPE_L2;
664
665 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
666 mtk_flow_l2_ht_params);
667 if (likely(!prev))
668 return 0;
669
670 if (IS_ERR(prev))
671 return PTR_ERR(prev);
672
673 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
674 &entry->l2_node, mtk_flow_l2_ht_params);
675 }
676
mtk_foe_entry_commit(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)677 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
678 {
679 const struct mtk_soc_data *soc = ppe->eth->soc;
680 int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
681 u32 hash;
682
683 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
684 return mtk_foe_entry_commit_l2(ppe, entry);
685
686 hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
687 entry->hash = 0xffff;
688 spin_lock_bh(&ppe_lock);
689 hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
690 spin_unlock_bh(&ppe_lock);
691
692 return 0;
693 }
694
695 static void
mtk_foe_entry_commit_subflow(struct mtk_ppe * ppe,struct mtk_flow_entry * entry,u16 hash)696 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
697 u16 hash)
698 {
699 const struct mtk_soc_data *soc = ppe->eth->soc;
700 struct mtk_flow_entry *flow_info;
701 struct mtk_foe_entry foe = {}, *hwe;
702 struct mtk_foe_mac_info *l2;
703 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
704 int type;
705
706 flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
707 if (!flow_info)
708 return;
709
710 flow_info->l2_data.base_flow = entry;
711 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
712 flow_info->hash = hash;
713 hlist_add_head(&flow_info->list,
714 &ppe->foe_flow[hash / soc->hash_offset]);
715 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
716
717 hwe = mtk_foe_get_entry(ppe, hash);
718 memcpy(&foe, hwe, soc->foe_entry_size);
719 foe.ib1 &= ib1_mask;
720 foe.ib1 |= entry->data.ib1 & ~ib1_mask;
721
722 l2 = mtk_foe_entry_l2(ppe->eth, &foe);
723 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
724
725 type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
726 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
727 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
728 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
729 l2->etype = ETH_P_IPV6;
730
731 *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
732
733 __mtk_foe_entry_commit(ppe, &foe, hash);
734 }
735
__mtk_ppe_check_skb(struct mtk_ppe * ppe,struct sk_buff * skb,u16 hash)736 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
737 {
738 const struct mtk_soc_data *soc = ppe->eth->soc;
739 struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
740 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
741 struct mtk_flow_entry *entry;
742 struct mtk_foe_bridge key = {};
743 struct hlist_node *n;
744 struct ethhdr *eh;
745 bool found = false;
746 u8 *tag;
747
748 spin_lock_bh(&ppe_lock);
749
750 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
751 goto out;
752
753 hlist_for_each_entry_safe(entry, n, head, list) {
754 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
755 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
756 MTK_FOE_STATE_BIND))
757 continue;
758
759 entry->hash = 0xffff;
760 __mtk_foe_entry_clear(ppe, entry);
761 continue;
762 }
763
764 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
765 if (entry->hash != 0xffff)
766 entry->hash = 0xffff;
767 continue;
768 }
769
770 entry->hash = hash;
771 __mtk_foe_entry_commit(ppe, &entry->data, hash);
772 found = true;
773 }
774
775 if (found)
776 goto out;
777
778 eh = eth_hdr(skb);
779 ether_addr_copy(key.dest_mac, eh->h_dest);
780 ether_addr_copy(key.src_mac, eh->h_source);
781 tag = skb->data - 2;
782 key.vlan = 0;
783 switch (skb->protocol) {
784 #if IS_ENABLED(CONFIG_NET_DSA)
785 case htons(ETH_P_XDSA):
786 if (!netdev_uses_dsa(skb->dev) ||
787 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
788 goto out;
789
790 if (!skb_metadata_dst(skb))
791 tag += 4;
792
793 if (get_unaligned_be16(tag) != ETH_P_8021Q)
794 break;
795
796 fallthrough;
797 #endif
798 case htons(ETH_P_8021Q):
799 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
800 break;
801 default:
802 break;
803 }
804
805 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
806 if (!entry)
807 goto out;
808
809 mtk_foe_entry_commit_subflow(ppe, entry, hash);
810
811 out:
812 spin_unlock_bh(&ppe_lock);
813 }
814
mtk_foe_entry_idle_time(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)815 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
816 {
817 mtk_flow_entry_update(ppe, entry);
818
819 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
820 }
821
mtk_ppe_prepare_reset(struct mtk_ppe * ppe)822 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
823 {
824 if (!ppe)
825 return -EINVAL;
826
827 /* disable KA */
828 ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
829 ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
830 ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
831 usleep_range(10000, 11000);
832
833 /* set KA timer to maximum */
834 ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
835 ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
836
837 /* set KA tick select */
838 ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
839 ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
840 usleep_range(10000, 11000);
841
842 /* disable scan mode */
843 ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
844 usleep_range(10000, 11000);
845
846 return mtk_ppe_wait_busy(ppe);
847 }
848
mtk_foe_entry_get_mib(struct mtk_ppe * ppe,u32 index,struct mtk_foe_accounting * diff)849 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
850 struct mtk_foe_accounting *diff)
851 {
852 struct mtk_foe_accounting *acct;
853 int size = sizeof(struct mtk_foe_accounting);
854 u64 bytes, packets;
855
856 if (!ppe->accounting)
857 return NULL;
858
859 if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
860 return NULL;
861
862 acct = ppe->acct_table + index * size;
863
864 acct->bytes += bytes;
865 acct->packets += packets;
866
867 if (diff) {
868 diff->bytes = bytes;
869 diff->packets = packets;
870 }
871
872 return acct;
873 }
874
mtk_ppe_init(struct mtk_eth * eth,void __iomem * base,int index)875 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
876 {
877 bool accounting = eth->soc->has_accounting;
878 const struct mtk_soc_data *soc = eth->soc;
879 struct mtk_foe_accounting *acct;
880 struct device *dev = eth->dev;
881 struct mtk_mib_entry *mib;
882 struct mtk_ppe *ppe;
883 u32 foe_flow_size;
884 void *foe;
885
886 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
887 if (!ppe)
888 return NULL;
889
890 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
891
892 /* need to allocate a separate device, since it PPE DMA access is
893 * not coherent.
894 */
895 ppe->base = base;
896 ppe->eth = eth;
897 ppe->dev = dev;
898 ppe->version = eth->soc->offload_version;
899 ppe->accounting = accounting;
900
901 foe = dmam_alloc_coherent(ppe->dev,
902 MTK_PPE_ENTRIES * soc->foe_entry_size,
903 &ppe->foe_phys, GFP_KERNEL);
904 if (!foe)
905 goto err_free_l2_flows;
906
907 ppe->foe_table = foe;
908
909 foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
910 sizeof(*ppe->foe_flow);
911 ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
912 if (!ppe->foe_flow)
913 goto err_free_l2_flows;
914
915 if (accounting) {
916 mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
917 &ppe->mib_phys, GFP_KERNEL);
918 if (!mib)
919 return NULL;
920
921 ppe->mib_table = mib;
922
923 acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
924 GFP_KERNEL);
925
926 if (!acct)
927 return NULL;
928
929 ppe->acct_table = acct;
930 }
931
932 mtk_ppe_debugfs_init(ppe, index);
933
934 return ppe;
935
936 err_free_l2_flows:
937 rhashtable_destroy(&ppe->l2_flows);
938 return NULL;
939 }
940
mtk_ppe_deinit(struct mtk_eth * eth)941 void mtk_ppe_deinit(struct mtk_eth *eth)
942 {
943 int i;
944
945 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
946 if (!eth->ppe[i])
947 return;
948 rhashtable_destroy(ð->ppe[i]->l2_flows);
949 }
950 }
951
mtk_ppe_init_foe_table(struct mtk_ppe * ppe)952 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
953 {
954 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
955 int i, k;
956
957 memset(ppe->foe_table, 0,
958 MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
959
960 if (!IS_ENABLED(CONFIG_SOC_MT7621))
961 return;
962
963 /* skip all entries that cross the 1024 byte boundary */
964 for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
965 for (k = 0; k < ARRAY_SIZE(skip); k++) {
966 struct mtk_foe_entry *hwe;
967
968 hwe = mtk_foe_get_entry(ppe, i + skip[k]);
969 hwe->ib1 |= MTK_FOE_IB1_STATIC;
970 }
971 }
972 }
973
mtk_ppe_start(struct mtk_ppe * ppe)974 void mtk_ppe_start(struct mtk_ppe *ppe)
975 {
976 u32 val;
977
978 if (!ppe)
979 return;
980
981 mtk_ppe_init_foe_table(ppe);
982 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
983
984 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
985 MTK_PPE_TB_CFG_AGE_UNBIND |
986 MTK_PPE_TB_CFG_AGE_TCP |
987 MTK_PPE_TB_CFG_AGE_UDP |
988 MTK_PPE_TB_CFG_AGE_TCP_FIN |
989 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
990 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
991 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
992 MTK_PPE_KEEPALIVE_DISABLE) |
993 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
994 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
995 MTK_PPE_SCAN_MODE_CHECK_AGE) |
996 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
997 MTK_PPE_ENTRIES_SHIFT);
998 if (mtk_is_netsys_v2_or_greater(ppe->eth))
999 val |= MTK_PPE_TB_CFG_INFO_SEL;
1000 if (!mtk_is_netsys_v3_or_greater(ppe->eth))
1001 val |= MTK_PPE_TB_CFG_ENTRY_80B;
1002 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
1003
1004 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
1005 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
1006
1007 mtk_ppe_cache_enable(ppe, true);
1008
1009 val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
1010 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
1011 MTK_PPE_FLOW_CFG_IP6_6RD |
1012 MTK_PPE_FLOW_CFG_IP4_NAT |
1013 MTK_PPE_FLOW_CFG_IP4_NAPT |
1014 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1015 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1016 if (mtk_is_netsys_v2_or_greater(ppe->eth))
1017 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
1018 MTK_PPE_MD_TOAP_BYP_CRSN1 |
1019 MTK_PPE_MD_TOAP_BYP_CRSN2 |
1020 MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
1021 else
1022 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
1023 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
1024 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1025
1026 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
1027 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
1028 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
1029
1030 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
1031 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
1032 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
1033
1034 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
1035 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
1036 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
1037
1038 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1039 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1040
1041 val = MTK_PPE_BIND_LIMIT1_FULL |
1042 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1043 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1044
1045 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1046 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1047 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1048
1049 /* enable PPE */
1050 val = MTK_PPE_GLO_CFG_EN |
1051 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1052 MTK_PPE_GLO_CFG_IP4_CS_DROP |
1053 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1054 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1055
1056 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1057
1058 if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
1059 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1060 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1061 }
1062
1063 if (ppe->accounting && ppe->mib_phys) {
1064 ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1065 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1066 MTK_PPE_MIB_CFG_EN);
1067 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1068 MTK_PPE_MIB_CFG_RD_CLR);
1069 ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1070 MTK_PPE_MIB_CFG_RD_CLR);
1071 }
1072 }
1073
mtk_ppe_stop(struct mtk_ppe * ppe)1074 int mtk_ppe_stop(struct mtk_ppe *ppe)
1075 {
1076 u32 val;
1077 int i;
1078
1079 if (!ppe)
1080 return 0;
1081
1082 for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1083 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1084
1085 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1086 MTK_FOE_STATE_INVALID);
1087 }
1088
1089 mtk_ppe_cache_enable(ppe, false);
1090
1091 /* disable aging */
1092 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1093 MTK_PPE_TB_CFG_AGE_UNBIND |
1094 MTK_PPE_TB_CFG_AGE_TCP |
1095 MTK_PPE_TB_CFG_AGE_UDP |
1096 MTK_PPE_TB_CFG_AGE_TCP_FIN |
1097 MTK_PPE_TB_CFG_SCAN_MODE;
1098 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1099
1100 if (mtk_ppe_wait_busy(ppe))
1101 return -ETIMEDOUT;
1102
1103 /* disable offload engine */
1104 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1105 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1106
1107 return 0;
1108 }
1109