1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/if_ether.h> 7 #include <linux/rhashtable.h> 8 #include <linux/ip.h> 9 #include <linux/ipv6.h> 10 #include <net/flow_offload.h> 11 #include <net/pkt_cls.h> 12 #include <net/dsa.h> 13 #include "mtk_eth_soc.h" 14 #include "mtk_wed.h" 15 16 struct mtk_flow_data { 17 struct ethhdr eth; 18 19 union { 20 struct { 21 __be32 src_addr; 22 __be32 dst_addr; 23 } v4; 24 25 struct { 26 struct in6_addr src_addr; 27 struct in6_addr dst_addr; 28 } v6; 29 }; 30 31 __be16 src_port; 32 __be16 dst_port; 33 34 u16 vlan_in; 35 36 struct { 37 u16 id; 38 __be16 proto; 39 u8 num; 40 } vlan; 41 struct { 42 u16 sid; 43 u8 num; 44 } pppoe; 45 }; 46 47 static const struct rhashtable_params mtk_flow_ht_params = { 48 .head_offset = offsetof(struct mtk_flow_entry, node), 49 .key_offset = offsetof(struct mtk_flow_entry, cookie), 50 .key_len = sizeof(unsigned long), 51 .automatic_shrinking = true, 52 }; 53 54 static int 55 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, 56 bool egress) 57 { 58 return mtk_foe_entry_set_ipv4_tuple(foe, egress, 59 data->v4.src_addr, data->src_port, 60 data->v4.dst_addr, data->dst_port); 61 } 62 63 static int 64 mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) 65 { 66 return mtk_foe_entry_set_ipv6_tuple(foe, 67 data->v6.src_addr.s6_addr32, data->src_port, 68 data->v6.dst_addr.s6_addr32, data->dst_port); 69 } 70 71 static void 72 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) 73 { 74 void *dest = eth + act->mangle.offset; 75 const void *src = &act->mangle.val; 76 77 if (act->mangle.offset > 8) 78 return; 79 80 if (act->mangle.mask == 0xffff) { 81 src += 2; 82 dest += 2; 83 } 84 85 memcpy(dest, src, act->mangle.mask ? 2 : 4); 86 } 87 88 static int 89 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) 90 { 91 struct net_device_path_ctx ctx = { 92 .dev = dev, 93 .daddr = addr, 94 }; 95 struct net_device_path path = {}; 96 97 if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) 98 return -1; 99 100 if (!dev->netdev_ops->ndo_fill_forward_path) 101 return -1; 102 103 if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) 104 return -1; 105 106 if (path.type != DEV_PATH_MTK_WDMA) 107 return -1; 108 109 info->wdma_idx = path.mtk_wdma.wdma_idx; 110 info->queue = path.mtk_wdma.queue; 111 info->bss = path.mtk_wdma.bss; 112 info->wcid = path.mtk_wdma.wcid; 113 114 return 0; 115 } 116 117 118 static int 119 mtk_flow_mangle_ports(const struct flow_action_entry *act, 120 struct mtk_flow_data *data) 121 { 122 u32 val = ntohl(act->mangle.val); 123 124 switch (act->mangle.offset) { 125 case 0: 126 if (act->mangle.mask == ~htonl(0xffff)) 127 data->dst_port = cpu_to_be16(val); 128 else 129 data->src_port = cpu_to_be16(val >> 16); 130 break; 131 case 2: 132 data->dst_port = cpu_to_be16(val); 133 break; 134 default: 135 return -EINVAL; 136 } 137 138 return 0; 139 } 140 141 static int 142 mtk_flow_mangle_ipv4(const struct flow_action_entry *act, 143 struct mtk_flow_data *data) 144 { 145 __be32 *dest; 146 147 switch (act->mangle.offset) { 148 case offsetof(struct iphdr, saddr): 149 dest = &data->v4.src_addr; 150 break; 151 case offsetof(struct iphdr, daddr): 152 dest = &data->v4.dst_addr; 153 break; 154 default: 155 return -EINVAL; 156 } 157 158 memcpy(dest, &act->mangle.val, sizeof(u32)); 159 160 return 0; 161 } 162 163 static int 164 mtk_flow_get_dsa_port(struct net_device **dev) 165 { 166 #if IS_ENABLED(CONFIG_NET_DSA) 167 struct dsa_port *dp; 168 169 dp = dsa_port_from_netdev(*dev); 170 if (IS_ERR(dp)) 171 return -ENODEV; 172 173 if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) 174 return -ENODEV; 175 176 *dev = dp->cpu_dp->master; 177 178 return dp->index; 179 #else 180 return -ENODEV; 181 #endif 182 } 183 184 static int 185 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, 186 struct net_device *dev, const u8 *dest_mac, 187 int *wed_index) 188 { 189 struct mtk_wdma_info info = {}; 190 int pse_port, dsa_port; 191 192 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { 193 mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, 194 info.wcid); 195 pse_port = 3; 196 *wed_index = info.wdma_idx; 197 goto out; 198 } 199 200 dsa_port = mtk_flow_get_dsa_port(&dev); 201 if (dsa_port >= 0) 202 mtk_foe_entry_set_dsa(foe, dsa_port); 203 204 if (dev == eth->netdev[0]) 205 pse_port = 1; 206 else if (dev == eth->netdev[1]) 207 pse_port = 2; 208 else 209 return -EOPNOTSUPP; 210 211 out: 212 mtk_foe_entry_set_pse_port(foe, pse_port); 213 214 return 0; 215 } 216 217 static int 218 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) 219 { 220 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 221 struct flow_action_entry *act; 222 struct mtk_flow_data data = {}; 223 struct mtk_foe_entry foe; 224 struct net_device *odev = NULL; 225 struct mtk_flow_entry *entry; 226 int offload_type = 0; 227 int wed_index = -1; 228 u16 addr_type = 0; 229 u8 l4proto = 0; 230 int err = 0; 231 int i; 232 233 if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) 234 return -EEXIST; 235 236 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { 237 struct flow_match_meta match; 238 239 flow_rule_match_meta(rule, &match); 240 } else { 241 return -EOPNOTSUPP; 242 } 243 244 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 245 struct flow_match_control match; 246 247 flow_rule_match_control(rule, &match); 248 addr_type = match.key->addr_type; 249 } else { 250 return -EOPNOTSUPP; 251 } 252 253 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 254 struct flow_match_basic match; 255 256 flow_rule_match_basic(rule, &match); 257 l4proto = match.key->ip_proto; 258 } else { 259 return -EOPNOTSUPP; 260 } 261 262 switch (addr_type) { 263 case 0: 264 offload_type = MTK_PPE_PKT_TYPE_BRIDGE; 265 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 266 struct flow_match_eth_addrs match; 267 268 flow_rule_match_eth_addrs(rule, &match); 269 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); 270 memcpy(data.eth.h_source, match.key->src, ETH_ALEN); 271 } else { 272 return -EOPNOTSUPP; 273 } 274 275 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 276 struct flow_match_vlan match; 277 278 flow_rule_match_vlan(rule, &match); 279 280 if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) 281 return -EOPNOTSUPP; 282 283 data.vlan_in = match.key->vlan_id; 284 } 285 break; 286 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 287 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; 288 break; 289 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 290 offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; 291 break; 292 default: 293 return -EOPNOTSUPP; 294 } 295 296 flow_action_for_each(i, act, &rule->action) { 297 switch (act->id) { 298 case FLOW_ACTION_MANGLE: 299 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 300 return -EOPNOTSUPP; 301 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) 302 mtk_flow_offload_mangle_eth(act, &data.eth); 303 break; 304 case FLOW_ACTION_REDIRECT: 305 odev = act->dev; 306 break; 307 case FLOW_ACTION_CSUM: 308 break; 309 case FLOW_ACTION_VLAN_PUSH: 310 if (data.vlan.num == 1 || 311 act->vlan.proto != htons(ETH_P_8021Q)) 312 return -EOPNOTSUPP; 313 314 data.vlan.id = act->vlan.vid; 315 data.vlan.proto = act->vlan.proto; 316 data.vlan.num++; 317 break; 318 case FLOW_ACTION_VLAN_POP: 319 break; 320 case FLOW_ACTION_PPPOE_PUSH: 321 if (data.pppoe.num == 1) 322 return -EOPNOTSUPP; 323 324 data.pppoe.sid = act->pppoe.sid; 325 data.pppoe.num++; 326 break; 327 default: 328 return -EOPNOTSUPP; 329 } 330 } 331 332 if (!is_valid_ether_addr(data.eth.h_source) || 333 !is_valid_ether_addr(data.eth.h_dest)) 334 return -EINVAL; 335 336 err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0, 337 data.eth.h_source, 338 data.eth.h_dest); 339 if (err) 340 return err; 341 342 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 343 struct flow_match_ports ports; 344 345 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 346 return -EOPNOTSUPP; 347 348 flow_rule_match_ports(rule, &ports); 349 data.src_port = ports.key->src; 350 data.dst_port = ports.key->dst; 351 } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { 352 return -EOPNOTSUPP; 353 } 354 355 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 356 struct flow_match_ipv4_addrs addrs; 357 358 flow_rule_match_ipv4_addrs(rule, &addrs); 359 360 data.v4.src_addr = addrs.key->src; 361 data.v4.dst_addr = addrs.key->dst; 362 363 mtk_flow_set_ipv4_addr(&foe, &data, false); 364 } 365 366 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 367 struct flow_match_ipv6_addrs addrs; 368 369 flow_rule_match_ipv6_addrs(rule, &addrs); 370 371 data.v6.src_addr = addrs.key->src; 372 data.v6.dst_addr = addrs.key->dst; 373 374 mtk_flow_set_ipv6_addr(&foe, &data); 375 } 376 377 flow_action_for_each(i, act, &rule->action) { 378 if (act->id != FLOW_ACTION_MANGLE) 379 continue; 380 381 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 382 return -EOPNOTSUPP; 383 384 switch (act->mangle.htype) { 385 case FLOW_ACT_MANGLE_HDR_TYPE_TCP: 386 case FLOW_ACT_MANGLE_HDR_TYPE_UDP: 387 err = mtk_flow_mangle_ports(act, &data); 388 break; 389 case FLOW_ACT_MANGLE_HDR_TYPE_IP4: 390 err = mtk_flow_mangle_ipv4(act, &data); 391 break; 392 case FLOW_ACT_MANGLE_HDR_TYPE_ETH: 393 /* handled earlier */ 394 break; 395 default: 396 return -EOPNOTSUPP; 397 } 398 399 if (err) 400 return err; 401 } 402 403 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 404 err = mtk_flow_set_ipv4_addr(&foe, &data, true); 405 if (err) 406 return err; 407 } 408 409 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 410 foe.bridge.vlan = data.vlan_in; 411 412 if (data.vlan.num == 1) { 413 if (data.vlan.proto != htons(ETH_P_8021Q)) 414 return -EOPNOTSUPP; 415 416 mtk_foe_entry_set_vlan(&foe, data.vlan.id); 417 } 418 if (data.pppoe.num == 1) 419 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); 420 421 err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, 422 &wed_index); 423 if (err) 424 return err; 425 426 if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) 427 return err; 428 429 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 430 if (!entry) 431 return -ENOMEM; 432 433 entry->cookie = f->cookie; 434 memcpy(&entry->data, &foe, sizeof(entry->data)); 435 entry->wed_index = wed_index; 436 437 if (mtk_foe_entry_commit(eth->ppe, entry) < 0) 438 goto free; 439 440 err = rhashtable_insert_fast(ð->flow_table, &entry->node, 441 mtk_flow_ht_params); 442 if (err < 0) 443 goto clear; 444 445 return 0; 446 447 clear: 448 mtk_foe_entry_clear(eth->ppe, entry); 449 free: 450 kfree(entry); 451 if (wed_index >= 0) 452 mtk_wed_flow_remove(wed_index); 453 return err; 454 } 455 456 static int 457 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) 458 { 459 struct mtk_flow_entry *entry; 460 461 entry = rhashtable_lookup(ð->flow_table, &f->cookie, 462 mtk_flow_ht_params); 463 if (!entry) 464 return -ENOENT; 465 466 mtk_foe_entry_clear(eth->ppe, entry); 467 rhashtable_remove_fast(ð->flow_table, &entry->node, 468 mtk_flow_ht_params); 469 if (entry->wed_index >= 0) 470 mtk_wed_flow_remove(entry->wed_index); 471 kfree(entry); 472 473 return 0; 474 } 475 476 static int 477 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) 478 { 479 struct mtk_flow_entry *entry; 480 u32 idle; 481 482 entry = rhashtable_lookup(ð->flow_table, &f->cookie, 483 mtk_flow_ht_params); 484 if (!entry) 485 return -ENOENT; 486 487 idle = mtk_foe_entry_idle_time(eth->ppe, entry); 488 f->stats.lastused = jiffies - idle * HZ; 489 490 return 0; 491 } 492 493 static DEFINE_MUTEX(mtk_flow_offload_mutex); 494 495 static int 496 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 497 { 498 struct flow_cls_offload *cls = type_data; 499 struct net_device *dev = cb_priv; 500 struct mtk_mac *mac = netdev_priv(dev); 501 struct mtk_eth *eth = mac->hw; 502 int err; 503 504 if (!tc_can_offload(dev)) 505 return -EOPNOTSUPP; 506 507 if (type != TC_SETUP_CLSFLOWER) 508 return -EOPNOTSUPP; 509 510 mutex_lock(&mtk_flow_offload_mutex); 511 switch (cls->command) { 512 case FLOW_CLS_REPLACE: 513 err = mtk_flow_offload_replace(eth, cls); 514 break; 515 case FLOW_CLS_DESTROY: 516 err = mtk_flow_offload_destroy(eth, cls); 517 break; 518 case FLOW_CLS_STATS: 519 err = mtk_flow_offload_stats(eth, cls); 520 break; 521 default: 522 err = -EOPNOTSUPP; 523 break; 524 } 525 mutex_unlock(&mtk_flow_offload_mutex); 526 527 return err; 528 } 529 530 static int 531 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) 532 { 533 struct mtk_mac *mac = netdev_priv(dev); 534 struct mtk_eth *eth = mac->hw; 535 static LIST_HEAD(block_cb_list); 536 struct flow_block_cb *block_cb; 537 flow_setup_cb_t *cb; 538 539 if (!eth->ppe || !eth->ppe->foe_table) 540 return -EOPNOTSUPP; 541 542 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 543 return -EOPNOTSUPP; 544 545 cb = mtk_eth_setup_tc_block_cb; 546 f->driver_block_list = &block_cb_list; 547 548 switch (f->command) { 549 case FLOW_BLOCK_BIND: 550 block_cb = flow_block_cb_lookup(f->block, cb, dev); 551 if (block_cb) { 552 flow_block_cb_incref(block_cb); 553 return 0; 554 } 555 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 556 if (IS_ERR(block_cb)) 557 return PTR_ERR(block_cb); 558 559 flow_block_cb_add(block_cb, f); 560 list_add_tail(&block_cb->driver_list, &block_cb_list); 561 return 0; 562 case FLOW_BLOCK_UNBIND: 563 block_cb = flow_block_cb_lookup(f->block, cb, dev); 564 if (!block_cb) 565 return -ENOENT; 566 567 if (flow_block_cb_decref(block_cb)) { 568 flow_block_cb_remove(block_cb, f); 569 list_del(&block_cb->driver_list); 570 } 571 return 0; 572 default: 573 return -EOPNOTSUPP; 574 } 575 } 576 577 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, 578 void *type_data) 579 { 580 switch (type) { 581 case TC_SETUP_BLOCK: 582 case TC_SETUP_FT: 583 return mtk_eth_setup_tc_block(dev, type_data); 584 default: 585 return -EOPNOTSUPP; 586 } 587 } 588 589 int mtk_eth_offload_init(struct mtk_eth *eth) 590 { 591 if (!eth->ppe || !eth->ppe->foe_table) 592 return 0; 593 594 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); 595 } 596