1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/io.h> 6 #include <linux/iopoll.h> 7 #include <linux/etherdevice.h> 8 #include <linux/platform_device.h> 9 #include <linux/if_ether.h> 10 #include <linux/if_vlan.h> 11 #include <net/dsa.h> 12 #include "mtk_eth_soc.h" 13 #include "mtk_ppe.h" 14 #include "mtk_ppe_regs.h" 15 16 static DEFINE_SPINLOCK(ppe_lock); 17 18 static const struct rhashtable_params mtk_flow_l2_ht_params = { 19 .head_offset = offsetof(struct mtk_flow_entry, l2_node), 20 .key_offset = offsetof(struct mtk_flow_entry, data.bridge), 21 .key_len = offsetof(struct mtk_foe_bridge, key_end), 22 .automatic_shrinking = true, 23 }; 24 25 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) 26 { 27 writel(val, ppe->base + reg); 28 } 29 30 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg) 31 { 32 return readl(ppe->base + reg); 33 } 34 35 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set) 36 { 37 u32 val; 38 39 val = ppe_r32(ppe, reg); 40 val &= ~mask; 41 val |= set; 42 ppe_w32(ppe, reg, val); 43 44 return val; 45 } 46 47 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val) 48 { 49 return ppe_m32(ppe, reg, 0, val); 50 } 51 52 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) 53 { 54 return ppe_m32(ppe, reg, val, 0); 55 } 56 57 static u32 mtk_eth_timestamp(struct mtk_eth *eth) 58 { 59 return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth); 60 } 61 62 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) 63 { 64 int ret; 65 u32 val; 66 67 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val, 68 !(val & MTK_PPE_GLO_CFG_BUSY), 69 20, MTK_PPE_WAIT_TIMEOUT_US); 70 71 if (ret) 72 dev_err(ppe->dev, "PPE table busy"); 73 74 return ret; 75 } 76 77 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) 78 { 79 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); 80 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); 81 } 82 83 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable) 84 { 85 mtk_ppe_cache_clear(ppe); 86 87 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN, 88 enable * MTK_PPE_CACHE_CTL_EN); 89 } 90 91 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e) 92 { 93 u32 hv1, hv2, hv3; 94 u32 hash; 95 96 switch (mtk_get_ib1_pkt_type(eth, e->ib1)) { 97 case MTK_PPE_PKT_TYPE_IPV4_ROUTE: 98 case MTK_PPE_PKT_TYPE_IPV4_HNAPT: 99 hv1 = e->ipv4.orig.ports; 100 hv2 = e->ipv4.orig.dest_ip; 101 hv3 = e->ipv4.orig.src_ip; 102 break; 103 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: 104 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: 105 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3]; 106 hv1 ^= e->ipv6.ports; 107 108 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2]; 109 hv2 ^= e->ipv6.dest_ip[0]; 110 111 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1]; 112 hv3 ^= e->ipv6.src_ip[0]; 113 break; 114 case MTK_PPE_PKT_TYPE_IPV4_DSLITE: 115 case MTK_PPE_PKT_TYPE_IPV6_6RD: 116 default: 117 WARN_ON_ONCE(1); 118 return MTK_PPE_HASH_MASK; 119 } 120 121 hash = (hv1 & hv2) | ((~hv1) & hv3); 122 hash = (hash >> 24) | ((hash & 0xffffff) << 8); 123 hash ^= hv1 ^ hv2 ^ hv3; 124 hash ^= hash >> 16; 125 hash <<= (ffs(eth->soc->hash_offset) - 1); 126 hash &= MTK_PPE_ENTRIES - 1; 127 128 return hash; 129 } 130 131 static inline struct mtk_foe_mac_info * 132 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry) 133 { 134 int type = mtk_get_ib1_pkt_type(eth, entry->ib1); 135 136 if (type == MTK_PPE_PKT_TYPE_BRIDGE) 137 return &entry->bridge.l2; 138 139 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) 140 return &entry->ipv6.l2; 141 142 return &entry->ipv4.l2; 143 } 144 145 static inline u32 * 146 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry) 147 { 148 int type = mtk_get_ib1_pkt_type(eth, entry->ib1); 149 150 if (type == MTK_PPE_PKT_TYPE_BRIDGE) 151 return &entry->bridge.ib2; 152 153 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) 154 return &entry->ipv6.ib2; 155 156 return &entry->ipv4.ib2; 157 } 158 159 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, 160 int type, int l4proto, u8 pse_port, u8 *src_mac, 161 u8 *dest_mac) 162 { 163 struct mtk_foe_mac_info *l2; 164 u32 ports_pad, val; 165 166 memset(entry, 0, sizeof(*entry)); 167 168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { 169 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | 170 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) | 171 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | 172 MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2; 173 entry->ib1 = val; 174 175 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) | 176 FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf); 177 } else { 178 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | 179 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | 180 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | 181 MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL; 182 entry->ib1 = val; 183 184 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) | 185 FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | 186 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f); 187 } 188 189 if (is_multicast_ether_addr(dest_mac)) 190 val |= mtk_get_ib2_multicast_mask(eth); 191 192 ports_pad = 0xa5a5a500 | (l4proto & 0xff); 193 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) 194 entry->ipv4.orig.ports = ports_pad; 195 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) 196 entry->ipv6.ports = ports_pad; 197 198 if (type == MTK_PPE_PKT_TYPE_BRIDGE) { 199 ether_addr_copy(entry->bridge.src_mac, src_mac); 200 ether_addr_copy(entry->bridge.dest_mac, dest_mac); 201 entry->bridge.ib2 = val; 202 l2 = &entry->bridge.l2; 203 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { 204 entry->ipv6.ib2 = val; 205 l2 = &entry->ipv6.l2; 206 } else { 207 entry->ipv4.ib2 = val; 208 l2 = &entry->ipv4.l2; 209 } 210 211 l2->dest_mac_hi = get_unaligned_be32(dest_mac); 212 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4); 213 l2->src_mac_hi = get_unaligned_be32(src_mac); 214 l2->src_mac_lo = get_unaligned_be16(src_mac + 4); 215 216 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) 217 l2->etype = ETH_P_IPV6; 218 else 219 l2->etype = ETH_P_IP; 220 221 return 0; 222 } 223 224 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, 225 struct mtk_foe_entry *entry, u8 port) 226 { 227 u32 *ib2 = mtk_foe_entry_ib2(eth, entry); 228 u32 val = *ib2; 229 230 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { 231 val &= ~MTK_FOE_IB2_DEST_PORT_V2; 232 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port); 233 } else { 234 val &= ~MTK_FOE_IB2_DEST_PORT; 235 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); 236 } 237 *ib2 = val; 238 239 return 0; 240 } 241 242 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, 243 struct mtk_foe_entry *entry, bool egress, 244 __be32 src_addr, __be16 src_port, 245 __be32 dest_addr, __be16 dest_port) 246 { 247 int type = mtk_get_ib1_pkt_type(eth, entry->ib1); 248 struct mtk_ipv4_tuple *t; 249 250 switch (type) { 251 case MTK_PPE_PKT_TYPE_IPV4_HNAPT: 252 if (egress) { 253 t = &entry->ipv4.new; 254 break; 255 } 256 fallthrough; 257 case MTK_PPE_PKT_TYPE_IPV4_DSLITE: 258 case MTK_PPE_PKT_TYPE_IPV4_ROUTE: 259 t = &entry->ipv4.orig; 260 break; 261 case MTK_PPE_PKT_TYPE_IPV6_6RD: 262 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr); 263 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr); 264 return 0; 265 default: 266 WARN_ON_ONCE(1); 267 return -EINVAL; 268 } 269 270 t->src_ip = be32_to_cpu(src_addr); 271 t->dest_ip = be32_to_cpu(dest_addr); 272 273 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) 274 return 0; 275 276 t->src_port = be16_to_cpu(src_port); 277 t->dest_port = be16_to_cpu(dest_port); 278 279 return 0; 280 } 281 282 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, 283 struct mtk_foe_entry *entry, 284 __be32 *src_addr, __be16 src_port, 285 __be32 *dest_addr, __be16 dest_port) 286 { 287 int type = mtk_get_ib1_pkt_type(eth, entry->ib1); 288 u32 *src, *dest; 289 int i; 290 291 switch (type) { 292 case MTK_PPE_PKT_TYPE_IPV4_DSLITE: 293 src = entry->dslite.tunnel_src_ip; 294 dest = entry->dslite.tunnel_dest_ip; 295 break; 296 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: 297 case MTK_PPE_PKT_TYPE_IPV6_6RD: 298 entry->ipv6.src_port = be16_to_cpu(src_port); 299 entry->ipv6.dest_port = be16_to_cpu(dest_port); 300 fallthrough; 301 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: 302 src = entry->ipv6.src_ip; 303 dest = entry->ipv6.dest_ip; 304 break; 305 default: 306 WARN_ON_ONCE(1); 307 return -EINVAL; 308 } 309 310 for (i = 0; i < 4; i++) 311 src[i] = be32_to_cpu(src_addr[i]); 312 for (i = 0; i < 4; i++) 313 dest[i] = be32_to_cpu(dest_addr[i]); 314 315 return 0; 316 } 317 318 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry, 319 int port) 320 { 321 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); 322 323 l2->etype = BIT(port); 324 325 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth))) 326 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1); 327 else 328 l2->etype |= BIT(8); 329 330 entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth); 331 332 return 0; 333 } 334 335 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, 336 int vid) 337 { 338 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); 339 340 switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) { 341 case 0: 342 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) | 343 mtk_prep_ib1_vlan_layer(eth, 1); 344 l2->vlan1 = vid; 345 return 0; 346 case 1: 347 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) { 348 l2->vlan1 = vid; 349 l2->etype |= BIT(8); 350 } else { 351 l2->vlan2 = vid; 352 entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1); 353 } 354 return 0; 355 default: 356 return -ENOSPC; 357 } 358 } 359 360 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, 361 int sid) 362 { 363 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); 364 365 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) || 366 (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) 367 l2->etype = ETH_P_PPP_SES; 368 369 entry->ib1 |= mtk_get_ib1_ppoe_mask(eth); 370 l2->pppoe_id = sid; 371 372 return 0; 373 } 374 375 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, 376 int wdma_idx, int txq, int bss, int wcid) 377 { 378 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); 379 u32 *ib2 = mtk_foe_entry_ib2(eth, entry); 380 381 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { 382 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; 383 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | 384 MTK_FOE_IB2_WDMA_WINFO_V2; 385 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) | 386 FIELD_PREP(MTK_FOE_WINFO_BSS, bss); 387 } else { 388 *ib2 &= ~MTK_FOE_IB2_PORT_MG; 389 *ib2 |= MTK_FOE_IB2_WDMA_WINFO; 390 if (wdma_idx) 391 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; 392 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | 393 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | 394 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); 395 } 396 397 return 0; 398 } 399 400 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) 401 { 402 return !(entry->ib1 & MTK_FOE_IB1_STATIC) && 403 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; 404 } 405 406 static bool 407 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, 408 struct mtk_foe_entry *data) 409 { 410 int type, len; 411 412 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) 413 return false; 414 415 type = mtk_get_ib1_pkt_type(eth, entry->data.ib1); 416 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) 417 len = offsetof(struct mtk_foe_entry, ipv6._rsv); 418 else 419 len = offsetof(struct mtk_foe_entry, ipv4.ib2); 420 421 return !memcmp(&entry->data.data, &data->data, len - 4); 422 } 423 424 static void 425 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 426 { 427 struct hlist_head *head; 428 struct hlist_node *tmp; 429 430 if (entry->type == MTK_FLOW_TYPE_L2) { 431 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, 432 mtk_flow_l2_ht_params); 433 434 head = &entry->l2_flows; 435 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) 436 __mtk_foe_entry_clear(ppe, entry); 437 return; 438 } 439 440 hlist_del_init(&entry->list); 441 if (entry->hash != 0xffff) { 442 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); 443 444 hwe->ib1 &= ~MTK_FOE_IB1_STATE; 445 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); 446 dma_wmb(); 447 } 448 entry->hash = 0xffff; 449 450 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) 451 return; 452 453 hlist_del_init(&entry->l2_data.list); 454 kfree(entry); 455 } 456 457 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) 458 { 459 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); 460 u16 now = mtk_eth_timestamp(ppe->eth); 461 u16 timestamp = ib1 & ib1_ts_mask; 462 463 if (timestamp > now) 464 return ib1_ts_mask + 1 - timestamp + now; 465 else 466 return now - timestamp; 467 } 468 469 static void 470 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 471 { 472 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); 473 struct mtk_flow_entry *cur; 474 struct mtk_foe_entry *hwe; 475 struct hlist_node *tmp; 476 int idle; 477 478 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); 479 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { 480 int cur_idle; 481 u32 ib1; 482 483 hwe = mtk_foe_get_entry(ppe, cur->hash); 484 ib1 = READ_ONCE(hwe->ib1); 485 486 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { 487 cur->hash = 0xffff; 488 __mtk_foe_entry_clear(ppe, cur); 489 continue; 490 } 491 492 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); 493 if (cur_idle >= idle) 494 continue; 495 496 idle = cur_idle; 497 entry->data.ib1 &= ~ib1_ts_mask; 498 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; 499 } 500 } 501 502 static void 503 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 504 { 505 struct mtk_foe_entry foe = {}; 506 struct mtk_foe_entry *hwe; 507 508 spin_lock_bh(&ppe_lock); 509 510 if (entry->type == MTK_FLOW_TYPE_L2) { 511 mtk_flow_entry_update_l2(ppe, entry); 512 goto out; 513 } 514 515 if (entry->hash == 0xffff) 516 goto out; 517 518 hwe = mtk_foe_get_entry(ppe, entry->hash); 519 memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); 520 if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) { 521 entry->hash = 0xffff; 522 goto out; 523 } 524 525 entry->data.ib1 = foe.ib1; 526 527 out: 528 spin_unlock_bh(&ppe_lock); 529 } 530 531 static void 532 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, 533 u16 hash) 534 { 535 struct mtk_eth *eth = ppe->eth; 536 u16 timestamp = mtk_eth_timestamp(eth); 537 struct mtk_foe_entry *hwe; 538 539 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { 540 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; 541 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2, 542 timestamp); 543 } else { 544 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; 545 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, 546 timestamp); 547 } 548 549 hwe = mtk_foe_get_entry(ppe, hash); 550 memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1)); 551 wmb(); 552 hwe->ib1 = entry->ib1; 553 554 dma_wmb(); 555 556 mtk_ppe_cache_clear(ppe); 557 } 558 559 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 560 { 561 spin_lock_bh(&ppe_lock); 562 __mtk_foe_entry_clear(ppe, entry); 563 spin_unlock_bh(&ppe_lock); 564 } 565 566 static int 567 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 568 { 569 entry->type = MTK_FLOW_TYPE_L2; 570 571 return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, 572 mtk_flow_l2_ht_params); 573 } 574 575 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 576 { 577 const struct mtk_soc_data *soc = ppe->eth->soc; 578 int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1); 579 u32 hash; 580 581 if (type == MTK_PPE_PKT_TYPE_BRIDGE) 582 return mtk_foe_entry_commit_l2(ppe, entry); 583 584 hash = mtk_ppe_hash_entry(ppe->eth, &entry->data); 585 entry->hash = 0xffff; 586 spin_lock_bh(&ppe_lock); 587 hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]); 588 spin_unlock_bh(&ppe_lock); 589 590 return 0; 591 } 592 593 static void 594 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, 595 u16 hash) 596 { 597 const struct mtk_soc_data *soc = ppe->eth->soc; 598 struct mtk_flow_entry *flow_info; 599 struct mtk_foe_entry foe = {}, *hwe; 600 struct mtk_foe_mac_info *l2; 601 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; 602 int type; 603 604 flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), 605 GFP_ATOMIC); 606 if (!flow_info) 607 return; 608 609 flow_info->l2_data.base_flow = entry; 610 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; 611 flow_info->hash = hash; 612 hlist_add_head(&flow_info->list, 613 &ppe->foe_flow[hash / soc->hash_offset]); 614 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); 615 616 hwe = mtk_foe_get_entry(ppe, hash); 617 memcpy(&foe, hwe, soc->foe_entry_size); 618 foe.ib1 &= ib1_mask; 619 foe.ib1 |= entry->data.ib1 & ~ib1_mask; 620 621 l2 = mtk_foe_entry_l2(ppe->eth, &foe); 622 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); 623 624 type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1); 625 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) 626 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); 627 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) 628 l2->etype = ETH_P_IPV6; 629 630 *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2; 631 632 __mtk_foe_entry_commit(ppe, &foe, hash); 633 } 634 635 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) 636 { 637 const struct mtk_soc_data *soc = ppe->eth->soc; 638 struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset]; 639 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); 640 struct mtk_flow_entry *entry; 641 struct mtk_foe_bridge key = {}; 642 struct hlist_node *n; 643 struct ethhdr *eh; 644 bool found = false; 645 u8 *tag; 646 647 spin_lock_bh(&ppe_lock); 648 649 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) 650 goto out; 651 652 hlist_for_each_entry_safe(entry, n, head, list) { 653 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { 654 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == 655 MTK_FOE_STATE_BIND)) 656 continue; 657 658 entry->hash = 0xffff; 659 __mtk_foe_entry_clear(ppe, entry); 660 continue; 661 } 662 663 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) { 664 if (entry->hash != 0xffff) 665 entry->hash = 0xffff; 666 continue; 667 } 668 669 entry->hash = hash; 670 __mtk_foe_entry_commit(ppe, &entry->data, hash); 671 found = true; 672 } 673 674 if (found) 675 goto out; 676 677 eh = eth_hdr(skb); 678 ether_addr_copy(key.dest_mac, eh->h_dest); 679 ether_addr_copy(key.src_mac, eh->h_source); 680 tag = skb->data - 2; 681 key.vlan = 0; 682 switch (skb->protocol) { 683 #if IS_ENABLED(CONFIG_NET_DSA) 684 case htons(ETH_P_XDSA): 685 if (!netdev_uses_dsa(skb->dev) || 686 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) 687 goto out; 688 689 tag += 4; 690 if (get_unaligned_be16(tag) != ETH_P_8021Q) 691 break; 692 693 fallthrough; 694 #endif 695 case htons(ETH_P_8021Q): 696 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; 697 break; 698 default: 699 break; 700 } 701 702 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); 703 if (!entry) 704 goto out; 705 706 mtk_foe_entry_commit_subflow(ppe, entry, hash); 707 708 out: 709 spin_unlock_bh(&ppe_lock); 710 } 711 712 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) 713 { 714 mtk_flow_entry_update(ppe, entry); 715 716 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); 717 } 718 719 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, 720 int version, int index) 721 { 722 const struct mtk_soc_data *soc = eth->soc; 723 struct device *dev = eth->dev; 724 struct mtk_ppe *ppe; 725 u32 foe_flow_size; 726 void *foe; 727 728 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); 729 if (!ppe) 730 return NULL; 731 732 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); 733 734 /* need to allocate a separate device, since it PPE DMA access is 735 * not coherent. 736 */ 737 ppe->base = base; 738 ppe->eth = eth; 739 ppe->dev = dev; 740 ppe->version = version; 741 742 foe = dmam_alloc_coherent(ppe->dev, 743 MTK_PPE_ENTRIES * soc->foe_entry_size, 744 &ppe->foe_phys, GFP_KERNEL); 745 if (!foe) 746 return NULL; 747 748 ppe->foe_table = foe; 749 750 foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) * 751 sizeof(*ppe->foe_flow); 752 ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL); 753 if (!ppe->foe_flow) 754 return NULL; 755 756 mtk_ppe_debugfs_init(ppe, index); 757 758 return ppe; 759 } 760 761 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) 762 { 763 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; 764 int i, k; 765 766 memset(ppe->foe_table, 0, 767 MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size); 768 769 if (!IS_ENABLED(CONFIG_SOC_MT7621)) 770 return; 771 772 /* skip all entries that cross the 1024 byte boundary */ 773 for (i = 0; i < MTK_PPE_ENTRIES; i += 128) { 774 for (k = 0; k < ARRAY_SIZE(skip); k++) { 775 struct mtk_foe_entry *hwe; 776 777 hwe = mtk_foe_get_entry(ppe, i + skip[k]); 778 hwe->ib1 |= MTK_FOE_IB1_STATIC; 779 } 780 } 781 } 782 783 void mtk_ppe_start(struct mtk_ppe *ppe) 784 { 785 u32 val; 786 787 if (!ppe) 788 return; 789 790 mtk_ppe_init_foe_table(ppe); 791 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); 792 793 val = MTK_PPE_TB_CFG_ENTRY_80B | 794 MTK_PPE_TB_CFG_AGE_NON_L4 | 795 MTK_PPE_TB_CFG_AGE_UNBIND | 796 MTK_PPE_TB_CFG_AGE_TCP | 797 MTK_PPE_TB_CFG_AGE_UDP | 798 MTK_PPE_TB_CFG_AGE_TCP_FIN | 799 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS, 800 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) | 801 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE, 802 MTK_PPE_KEEPALIVE_DISABLE) | 803 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | 804 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, 805 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | 806 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, 807 MTK_PPE_ENTRIES_SHIFT); 808 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) 809 val |= MTK_PPE_TB_CFG_INFO_SEL; 810 ppe_w32(ppe, MTK_PPE_TB_CFG, val); 811 812 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, 813 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6); 814 815 mtk_ppe_cache_enable(ppe, true); 816 817 val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | 818 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | 819 MTK_PPE_FLOW_CFG_IP6_6RD | 820 MTK_PPE_FLOW_CFG_IP4_NAT | 821 MTK_PPE_FLOW_CFG_IP4_NAPT | 822 MTK_PPE_FLOW_CFG_IP4_DSLITE | 823 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; 824 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) 825 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 | 826 MTK_PPE_MD_TOAP_BYP_CRSN1 | 827 MTK_PPE_MD_TOAP_BYP_CRSN2 | 828 MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY; 829 else 830 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | 831 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG; 832 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); 833 834 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | 835 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3); 836 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val); 837 838 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) | 839 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1); 840 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val); 841 842 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | 843 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7); 844 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val); 845 846 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF; 847 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val); 848 849 val = MTK_PPE_BIND_LIMIT1_FULL | 850 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1); 851 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val); 852 853 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) | 854 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1); 855 ppe_w32(ppe, MTK_PPE_BIND_RATE, val); 856 857 /* enable PPE */ 858 val = MTK_PPE_GLO_CFG_EN | 859 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP | 860 MTK_PPE_GLO_CFG_IP4_CS_DROP | 861 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE; 862 ppe_w32(ppe, MTK_PPE_GLO_CFG, val); 863 864 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); 865 866 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) { 867 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); 868 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); 869 } 870 } 871 872 int mtk_ppe_stop(struct mtk_ppe *ppe) 873 { 874 u32 val; 875 int i; 876 877 if (!ppe) 878 return 0; 879 880 for (i = 0; i < MTK_PPE_ENTRIES; i++) { 881 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i); 882 883 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, 884 MTK_FOE_STATE_INVALID); 885 } 886 887 mtk_ppe_cache_enable(ppe, false); 888 889 /* disable offload engine */ 890 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); 891 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); 892 893 /* disable aging */ 894 val = MTK_PPE_TB_CFG_AGE_NON_L4 | 895 MTK_PPE_TB_CFG_AGE_UNBIND | 896 MTK_PPE_TB_CFG_AGE_TCP | 897 MTK_PPE_TB_CFG_AGE_UDP | 898 MTK_PPE_TB_CFG_AGE_TCP_FIN; 899 ppe_clear(ppe, MTK_PPE_TB_CFG, val); 900 901 return mtk_ppe_wait_busy(ppe); 902 } 903