1 /* 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <net/tc_act/tc_mirred.h> 36 #include <net/tc_act/tc_pedit.h> 37 #include <net/tc_act/tc_gact.h> 38 #include <net/tc_act/tc_vlan.h> 39 40 #include "cxgb4.h" 41 #include "cxgb4_filter.h" 42 #include "cxgb4_tc_flower.h" 43 44 #define STATS_CHECK_PERIOD (HZ / 2) 45 46 struct ch_tc_pedit_fields pedits[] = { 47 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), 48 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), 49 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), 50 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2), 51 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0), 52 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0), 53 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0), 54 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4), 55 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8), 56 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12), 57 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0), 58 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), 59 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), 60 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), 61 PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), 62 PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), 63 PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), 64 PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), 65 }; 66 67 static struct ch_tc_flower_entry *allocate_flower_entry(void) 68 { 69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); 70 spin_lock_init(&new->lock); 71 return new; 72 } 73 74 /* Must be called with either RTNL or rcu_read_lock */ 75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, 76 unsigned long flower_cookie) 77 { 78 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie, 79 adap->flower_ht_params); 80 } 81 82 static void cxgb4_process_flow_match(struct net_device *dev, 83 struct tc_cls_flower_offload *cls, 84 struct ch_filter_specification *fs) 85 { 86 u16 addr_type = 0; 87 88 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 89 struct flow_dissector_key_control *key = 90 skb_flow_dissector_target(cls->dissector, 91 FLOW_DISSECTOR_KEY_CONTROL, 92 cls->key); 93 94 addr_type = key->addr_type; 95 } 96 97 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 98 struct flow_dissector_key_basic *key = 99 skb_flow_dissector_target(cls->dissector, 100 FLOW_DISSECTOR_KEY_BASIC, 101 cls->key); 102 struct flow_dissector_key_basic *mask = 103 skb_flow_dissector_target(cls->dissector, 104 FLOW_DISSECTOR_KEY_BASIC, 105 cls->mask); 106 u16 ethtype_key = ntohs(key->n_proto); 107 u16 ethtype_mask = ntohs(mask->n_proto); 108 109 if (ethtype_key == ETH_P_ALL) { 110 ethtype_key = 0; 111 ethtype_mask = 0; 112 } 113 114 fs->val.ethtype = ethtype_key; 115 fs->mask.ethtype = ethtype_mask; 116 fs->val.proto = key->ip_proto; 117 fs->mask.proto = mask->ip_proto; 118 } 119 120 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 121 struct flow_dissector_key_ipv4_addrs *key = 122 skb_flow_dissector_target(cls->dissector, 123 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 124 cls->key); 125 struct flow_dissector_key_ipv4_addrs *mask = 126 skb_flow_dissector_target(cls->dissector, 127 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 128 cls->mask); 129 fs->type = 0; 130 memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); 131 memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); 132 memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); 133 memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); 134 135 /* also initialize nat_lip/fip to same values */ 136 memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst)); 137 memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src)); 138 139 } 140 141 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 142 struct flow_dissector_key_ipv6_addrs *key = 143 skb_flow_dissector_target(cls->dissector, 144 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 145 cls->key); 146 struct flow_dissector_key_ipv6_addrs *mask = 147 skb_flow_dissector_target(cls->dissector, 148 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 149 cls->mask); 150 151 fs->type = 1; 152 memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); 153 memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); 154 memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); 155 memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); 156 157 /* also initialize nat_lip/fip to same values */ 158 memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst)); 159 memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src)); 160 } 161 162 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 163 struct flow_dissector_key_ports *key, *mask; 164 165 key = skb_flow_dissector_target(cls->dissector, 166 FLOW_DISSECTOR_KEY_PORTS, 167 cls->key); 168 mask = skb_flow_dissector_target(cls->dissector, 169 FLOW_DISSECTOR_KEY_PORTS, 170 cls->mask); 171 fs->val.lport = cpu_to_be16(key->dst); 172 fs->mask.lport = cpu_to_be16(mask->dst); 173 fs->val.fport = cpu_to_be16(key->src); 174 fs->mask.fport = cpu_to_be16(mask->src); 175 176 /* also initialize nat_lport/fport to same values */ 177 fs->nat_lport = cpu_to_be16(key->dst); 178 fs->nat_fport = cpu_to_be16(key->src); 179 } 180 181 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { 182 struct flow_dissector_key_ip *key, *mask; 183 184 key = skb_flow_dissector_target(cls->dissector, 185 FLOW_DISSECTOR_KEY_IP, 186 cls->key); 187 mask = skb_flow_dissector_target(cls->dissector, 188 FLOW_DISSECTOR_KEY_IP, 189 cls->mask); 190 fs->val.tos = key->tos; 191 fs->mask.tos = mask->tos; 192 } 193 194 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 195 struct flow_dissector_key_vlan *key, *mask; 196 u16 vlan_tci, vlan_tci_mask; 197 198 key = skb_flow_dissector_target(cls->dissector, 199 FLOW_DISSECTOR_KEY_VLAN, 200 cls->key); 201 mask = skb_flow_dissector_target(cls->dissector, 202 FLOW_DISSECTOR_KEY_VLAN, 203 cls->mask); 204 vlan_tci = key->vlan_id | (key->vlan_priority << 205 VLAN_PRIO_SHIFT); 206 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 207 VLAN_PRIO_SHIFT); 208 fs->val.ivlan = cpu_to_be16(vlan_tci); 209 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 210 211 /* Chelsio adapters use ivlan_vld bit to match vlan packets 212 * as 802.1Q. Also, when vlan tag is present in packets, 213 * ethtype match is used then to match on ethtype of inner 214 * header ie. the header following the vlan header. 215 * So, set the ivlan_vld based on ethtype info supplied by 216 * TC for vlan packets if its 802.1Q. And then reset the 217 * ethtype value else, hw will try to match the supplied 218 * ethtype value with ethtype of inner header. 219 */ 220 if (fs->val.ethtype == ETH_P_8021Q) { 221 fs->val.ivlan_vld = 1; 222 fs->mask.ivlan_vld = 1; 223 fs->val.ethtype = 0; 224 fs->mask.ethtype = 0; 225 } 226 } 227 228 /* Match only packets coming from the ingress port where this 229 * filter will be created. 230 */ 231 fs->val.iport = netdev2pinfo(dev)->port_id; 232 fs->mask.iport = ~0; 233 } 234 235 static int cxgb4_validate_flow_match(struct net_device *dev, 236 struct tc_cls_flower_offload *cls) 237 { 238 u16 ethtype_mask = 0; 239 u16 ethtype_key = 0; 240 241 if (cls->dissector->used_keys & 242 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 243 BIT(FLOW_DISSECTOR_KEY_BASIC) | 244 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 245 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 246 BIT(FLOW_DISSECTOR_KEY_PORTS) | 247 BIT(FLOW_DISSECTOR_KEY_VLAN) | 248 BIT(FLOW_DISSECTOR_KEY_IP))) { 249 netdev_warn(dev, "Unsupported key used: 0x%x\n", 250 cls->dissector->used_keys); 251 return -EOPNOTSUPP; 252 } 253 254 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 255 struct flow_dissector_key_basic *key = 256 skb_flow_dissector_target(cls->dissector, 257 FLOW_DISSECTOR_KEY_BASIC, 258 cls->key); 259 struct flow_dissector_key_basic *mask = 260 skb_flow_dissector_target(cls->dissector, 261 FLOW_DISSECTOR_KEY_BASIC, 262 cls->mask); 263 ethtype_key = ntohs(key->n_proto); 264 ethtype_mask = ntohs(mask->n_proto); 265 } 266 267 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { 268 u16 eth_ip_type = ethtype_key & ethtype_mask; 269 struct flow_dissector_key_ip *mask; 270 271 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { 272 netdev_err(dev, "IP Key supported only with IPv4/v6"); 273 return -EINVAL; 274 } 275 276 mask = skb_flow_dissector_target(cls->dissector, 277 FLOW_DISSECTOR_KEY_IP, 278 cls->mask); 279 if (mask->ttl) { 280 netdev_warn(dev, "ttl match unsupported for offload"); 281 return -EOPNOTSUPP; 282 } 283 } 284 285 return 0; 286 } 287 288 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, 289 u8 field) 290 { 291 u32 set_val = val & ~mask; 292 u32 offset = 0; 293 u8 size = 1; 294 int i; 295 296 for (i = 0; i < ARRAY_SIZE(pedits); i++) { 297 if (pedits[i].field == field) { 298 offset = pedits[i].offset; 299 size = pedits[i].size; 300 break; 301 } 302 } 303 memcpy((u8 *)fs + offset, &set_val, size); 304 } 305 306 static void process_pedit_field(struct ch_filter_specification *fs, u32 val, 307 u32 mask, u32 offset, u8 htype) 308 { 309 switch (htype) { 310 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 311 switch (offset) { 312 case PEDIT_ETH_DMAC_31_0: 313 fs->newdmac = 1; 314 offload_pedit(fs, val, mask, ETH_DMAC_31_0); 315 break; 316 case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 317 if (~mask & PEDIT_ETH_DMAC_MASK) 318 offload_pedit(fs, val, mask, ETH_DMAC_47_32); 319 else 320 offload_pedit(fs, val >> 16, mask >> 16, 321 ETH_SMAC_15_0); 322 break; 323 case PEDIT_ETH_SMAC_47_16: 324 fs->newsmac = 1; 325 offload_pedit(fs, val, mask, ETH_SMAC_47_16); 326 } 327 break; 328 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 329 switch (offset) { 330 case PEDIT_IP4_SRC: 331 offload_pedit(fs, val, mask, IP4_SRC); 332 break; 333 case PEDIT_IP4_DST: 334 offload_pedit(fs, val, mask, IP4_DST); 335 } 336 fs->nat_mode = NAT_MODE_ALL; 337 break; 338 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 339 switch (offset) { 340 case PEDIT_IP6_SRC_31_0: 341 offload_pedit(fs, val, mask, IP6_SRC_31_0); 342 break; 343 case PEDIT_IP6_SRC_63_32: 344 offload_pedit(fs, val, mask, IP6_SRC_63_32); 345 break; 346 case PEDIT_IP6_SRC_95_64: 347 offload_pedit(fs, val, mask, IP6_SRC_95_64); 348 break; 349 case PEDIT_IP6_SRC_127_96: 350 offload_pedit(fs, val, mask, IP6_SRC_127_96); 351 break; 352 case PEDIT_IP6_DST_31_0: 353 offload_pedit(fs, val, mask, IP6_DST_31_0); 354 break; 355 case PEDIT_IP6_DST_63_32: 356 offload_pedit(fs, val, mask, IP6_DST_63_32); 357 break; 358 case PEDIT_IP6_DST_95_64: 359 offload_pedit(fs, val, mask, IP6_DST_95_64); 360 break; 361 case PEDIT_IP6_DST_127_96: 362 offload_pedit(fs, val, mask, IP6_DST_127_96); 363 } 364 fs->nat_mode = NAT_MODE_ALL; 365 break; 366 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 367 switch (offset) { 368 case PEDIT_TCP_SPORT_DPORT: 369 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 370 offload_pedit(fs, cpu_to_be32(val) >> 16, 371 cpu_to_be32(mask) >> 16, 372 TCP_SPORT); 373 else 374 offload_pedit(fs, cpu_to_be32(val), 375 cpu_to_be32(mask), TCP_DPORT); 376 } 377 fs->nat_mode = NAT_MODE_ALL; 378 break; 379 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 380 switch (offset) { 381 case PEDIT_UDP_SPORT_DPORT: 382 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 383 offload_pedit(fs, cpu_to_be32(val) >> 16, 384 cpu_to_be32(mask) >> 16, 385 UDP_SPORT); 386 else 387 offload_pedit(fs, cpu_to_be32(val), 388 cpu_to_be32(mask), UDP_DPORT); 389 } 390 fs->nat_mode = NAT_MODE_ALL; 391 } 392 } 393 394 static void cxgb4_process_flow_actions(struct net_device *in, 395 struct tc_cls_flower_offload *cls, 396 struct ch_filter_specification *fs) 397 { 398 const struct tc_action *a; 399 LIST_HEAD(actions); 400 401 tcf_exts_to_list(cls->exts, &actions); 402 list_for_each_entry(a, &actions, list) { 403 if (is_tcf_gact_ok(a)) { 404 fs->action = FILTER_PASS; 405 } else if (is_tcf_gact_shot(a)) { 406 fs->action = FILTER_DROP; 407 } else if (is_tcf_mirred_egress_redirect(a)) { 408 struct net_device *out = tcf_mirred_dev(a); 409 struct port_info *pi = netdev_priv(out); 410 411 fs->action = FILTER_SWITCH; 412 fs->eport = pi->port_id; 413 } else if (is_tcf_vlan(a)) { 414 u32 vlan_action = tcf_vlan_action(a); 415 u8 prio = tcf_vlan_push_prio(a); 416 u16 vid = tcf_vlan_push_vid(a); 417 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; 418 419 switch (vlan_action) { 420 case TCA_VLAN_ACT_POP: 421 fs->newvlan |= VLAN_REMOVE; 422 break; 423 case TCA_VLAN_ACT_PUSH: 424 fs->newvlan |= VLAN_INSERT; 425 fs->vlan = vlan_tci; 426 break; 427 case TCA_VLAN_ACT_MODIFY: 428 fs->newvlan |= VLAN_REWRITE; 429 fs->vlan = vlan_tci; 430 break; 431 default: 432 break; 433 } 434 } else if (is_tcf_pedit(a)) { 435 u32 mask, val, offset; 436 int nkeys, i; 437 u8 htype; 438 439 nkeys = tcf_pedit_nkeys(a); 440 for (i = 0; i < nkeys; i++) { 441 htype = tcf_pedit_htype(a, i); 442 mask = tcf_pedit_mask(a, i); 443 val = tcf_pedit_val(a, i); 444 offset = tcf_pedit_offset(a, i); 445 446 process_pedit_field(fs, val, mask, offset, 447 htype); 448 } 449 } 450 } 451 } 452 453 static bool valid_l4_mask(u32 mask) 454 { 455 u16 hi, lo; 456 457 /* Either the upper 16-bits (SPORT) OR the lower 458 * 16-bits (DPORT) can be set, but NOT BOTH. 459 */ 460 hi = (mask >> 16) & 0xFFFF; 461 lo = mask & 0xFFFF; 462 463 return hi && lo ? false : true; 464 } 465 466 static bool valid_pedit_action(struct net_device *dev, 467 const struct tc_action *a) 468 { 469 u32 mask, offset; 470 u8 cmd, htype; 471 int nkeys, i; 472 473 nkeys = tcf_pedit_nkeys(a); 474 for (i = 0; i < nkeys; i++) { 475 htype = tcf_pedit_htype(a, i); 476 cmd = tcf_pedit_cmd(a, i); 477 mask = tcf_pedit_mask(a, i); 478 offset = tcf_pedit_offset(a, i); 479 480 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { 481 netdev_err(dev, "%s: Unsupported pedit cmd\n", 482 __func__); 483 return false; 484 } 485 486 switch (htype) { 487 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 488 switch (offset) { 489 case PEDIT_ETH_DMAC_31_0: 490 case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 491 case PEDIT_ETH_SMAC_47_16: 492 break; 493 default: 494 netdev_err(dev, "%s: Unsupported pedit field\n", 495 __func__); 496 return false; 497 } 498 break; 499 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 500 switch (offset) { 501 case PEDIT_IP4_SRC: 502 case PEDIT_IP4_DST: 503 break; 504 default: 505 netdev_err(dev, "%s: Unsupported pedit field\n", 506 __func__); 507 return false; 508 } 509 break; 510 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 511 switch (offset) { 512 case PEDIT_IP6_SRC_31_0: 513 case PEDIT_IP6_SRC_63_32: 514 case PEDIT_IP6_SRC_95_64: 515 case PEDIT_IP6_SRC_127_96: 516 case PEDIT_IP6_DST_31_0: 517 case PEDIT_IP6_DST_63_32: 518 case PEDIT_IP6_DST_95_64: 519 case PEDIT_IP6_DST_127_96: 520 break; 521 default: 522 netdev_err(dev, "%s: Unsupported pedit field\n", 523 __func__); 524 return false; 525 } 526 break; 527 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 528 switch (offset) { 529 case PEDIT_TCP_SPORT_DPORT: 530 if (!valid_l4_mask(~mask)) { 531 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", 532 __func__); 533 return false; 534 } 535 break; 536 default: 537 netdev_err(dev, "%s: Unsupported pedit field\n", 538 __func__); 539 return false; 540 } 541 break; 542 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 543 switch (offset) { 544 case PEDIT_UDP_SPORT_DPORT: 545 if (!valid_l4_mask(~mask)) { 546 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", 547 __func__); 548 return false; 549 } 550 break; 551 default: 552 netdev_err(dev, "%s: Unsupported pedit field\n", 553 __func__); 554 return false; 555 } 556 break; 557 default: 558 netdev_err(dev, "%s: Unsupported pedit type\n", 559 __func__); 560 return false; 561 } 562 } 563 return true; 564 } 565 566 static int cxgb4_validate_flow_actions(struct net_device *dev, 567 struct tc_cls_flower_offload *cls) 568 { 569 const struct tc_action *a; 570 bool act_redir = false; 571 bool act_pedit = false; 572 bool act_vlan = false; 573 LIST_HEAD(actions); 574 575 tcf_exts_to_list(cls->exts, &actions); 576 list_for_each_entry(a, &actions, list) { 577 if (is_tcf_gact_ok(a)) { 578 /* Do nothing */ 579 } else if (is_tcf_gact_shot(a)) { 580 /* Do nothing */ 581 } else if (is_tcf_mirred_egress_redirect(a)) { 582 struct adapter *adap = netdev2adap(dev); 583 struct net_device *n_dev, *target_dev; 584 unsigned int i; 585 bool found = false; 586 587 target_dev = tcf_mirred_dev(a); 588 for_each_port(adap, i) { 589 n_dev = adap->port[i]; 590 if (target_dev == n_dev) { 591 found = true; 592 break; 593 } 594 } 595 596 /* If interface doesn't belong to our hw, then 597 * the provided output port is not valid 598 */ 599 if (!found) { 600 netdev_err(dev, "%s: Out port invalid\n", 601 __func__); 602 return -EINVAL; 603 } 604 act_redir = true; 605 } else if (is_tcf_vlan(a)) { 606 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); 607 u32 vlan_action = tcf_vlan_action(a); 608 609 switch (vlan_action) { 610 case TCA_VLAN_ACT_POP: 611 break; 612 case TCA_VLAN_ACT_PUSH: 613 case TCA_VLAN_ACT_MODIFY: 614 if (proto != ETH_P_8021Q) { 615 netdev_err(dev, "%s: Unsupported vlan proto\n", 616 __func__); 617 return -EOPNOTSUPP; 618 } 619 break; 620 default: 621 netdev_err(dev, "%s: Unsupported vlan action\n", 622 __func__); 623 return -EOPNOTSUPP; 624 } 625 act_vlan = true; 626 } else if (is_tcf_pedit(a)) { 627 bool pedit_valid = valid_pedit_action(dev, a); 628 629 if (!pedit_valid) 630 return -EOPNOTSUPP; 631 act_pedit = true; 632 } else { 633 netdev_err(dev, "%s: Unsupported action\n", __func__); 634 return -EOPNOTSUPP; 635 } 636 } 637 638 if ((act_pedit || act_vlan) && !act_redir) { 639 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n", 640 __func__); 641 return -EINVAL; 642 } 643 644 return 0; 645 } 646 647 int cxgb4_tc_flower_replace(struct net_device *dev, 648 struct tc_cls_flower_offload *cls) 649 { 650 struct adapter *adap = netdev2adap(dev); 651 struct ch_tc_flower_entry *ch_flower; 652 struct ch_filter_specification *fs; 653 struct filter_ctx ctx; 654 int fidx; 655 int ret; 656 657 if (cxgb4_validate_flow_actions(dev, cls)) 658 return -EOPNOTSUPP; 659 660 if (cxgb4_validate_flow_match(dev, cls)) 661 return -EOPNOTSUPP; 662 663 ch_flower = allocate_flower_entry(); 664 if (!ch_flower) { 665 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); 666 return -ENOMEM; 667 } 668 669 fs = &ch_flower->fs; 670 fs->hitcnts = 1; 671 cxgb4_process_flow_match(dev, cls, fs); 672 cxgb4_process_flow_actions(dev, cls, fs); 673 674 fs->hash = is_filter_exact_match(adap, fs); 675 if (fs->hash) { 676 fidx = 0; 677 } else { 678 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); 679 if (fidx < 0) { 680 netdev_err(dev, "%s: No fidx for offload.\n", __func__); 681 ret = -ENOMEM; 682 goto free_entry; 683 } 684 } 685 686 init_completion(&ctx.completion); 687 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); 688 if (ret) { 689 netdev_err(dev, "%s: filter creation err %d\n", 690 __func__, ret); 691 goto free_entry; 692 } 693 694 /* Wait for reply */ 695 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); 696 if (!ret) { 697 ret = -ETIMEDOUT; 698 goto free_entry; 699 } 700 701 ret = ctx.result; 702 /* Check if hw returned error for filter creation */ 703 if (ret) { 704 netdev_err(dev, "%s: filter creation err %d\n", 705 __func__, ret); 706 goto free_entry; 707 } 708 709 ch_flower->tc_flower_cookie = cls->cookie; 710 ch_flower->filter_id = ctx.tid; 711 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node, 712 adap->flower_ht_params); 713 if (ret) 714 goto del_filter; 715 716 return 0; 717 718 del_filter: 719 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); 720 721 free_entry: 722 kfree(ch_flower); 723 return ret; 724 } 725 726 int cxgb4_tc_flower_destroy(struct net_device *dev, 727 struct tc_cls_flower_offload *cls) 728 { 729 struct adapter *adap = netdev2adap(dev); 730 struct ch_tc_flower_entry *ch_flower; 731 int ret; 732 733 ch_flower = ch_flower_lookup(adap, cls->cookie); 734 if (!ch_flower) 735 return -ENOENT; 736 737 ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); 738 if (ret) 739 goto err; 740 741 ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, 742 adap->flower_ht_params); 743 if (ret) { 744 netdev_err(dev, "Flow remove from rhashtable failed"); 745 goto err; 746 } 747 kfree_rcu(ch_flower, rcu); 748 749 err: 750 return ret; 751 } 752 753 static void ch_flower_stats_handler(struct work_struct *work) 754 { 755 struct adapter *adap = container_of(work, struct adapter, 756 flower_stats_work); 757 struct ch_tc_flower_entry *flower_entry; 758 struct ch_tc_flower_stats *ofld_stats; 759 struct rhashtable_iter iter; 760 u64 packets; 761 u64 bytes; 762 int ret; 763 764 rhashtable_walk_enter(&adap->flower_tbl, &iter); 765 do { 766 rhashtable_walk_start(&iter); 767 768 while ((flower_entry = rhashtable_walk_next(&iter)) && 769 !IS_ERR(flower_entry)) { 770 ret = cxgb4_get_filter_counters(adap->port[0], 771 flower_entry->filter_id, 772 &packets, &bytes, 773 flower_entry->fs.hash); 774 if (!ret) { 775 spin_lock(&flower_entry->lock); 776 ofld_stats = &flower_entry->stats; 777 778 if (ofld_stats->prev_packet_count != packets) { 779 ofld_stats->prev_packet_count = packets; 780 ofld_stats->last_used = jiffies; 781 } 782 spin_unlock(&flower_entry->lock); 783 } 784 } 785 786 rhashtable_walk_stop(&iter); 787 788 } while (flower_entry == ERR_PTR(-EAGAIN)); 789 rhashtable_walk_exit(&iter); 790 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 791 } 792 793 static void ch_flower_stats_cb(struct timer_list *t) 794 { 795 struct adapter *adap = from_timer(adap, t, flower_stats_timer); 796 797 schedule_work(&adap->flower_stats_work); 798 } 799 800 int cxgb4_tc_flower_stats(struct net_device *dev, 801 struct tc_cls_flower_offload *cls) 802 { 803 struct adapter *adap = netdev2adap(dev); 804 struct ch_tc_flower_stats *ofld_stats; 805 struct ch_tc_flower_entry *ch_flower; 806 u64 packets; 807 u64 bytes; 808 int ret; 809 810 ch_flower = ch_flower_lookup(adap, cls->cookie); 811 if (!ch_flower) { 812 ret = -ENOENT; 813 goto err; 814 } 815 816 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, 817 &packets, &bytes, 818 ch_flower->fs.hash); 819 if (ret < 0) 820 goto err; 821 822 spin_lock_bh(&ch_flower->lock); 823 ofld_stats = &ch_flower->stats; 824 if (ofld_stats->packet_count != packets) { 825 if (ofld_stats->prev_packet_count != packets) 826 ofld_stats->last_used = jiffies; 827 tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, 828 packets - ofld_stats->packet_count, 829 ofld_stats->last_used); 830 831 ofld_stats->packet_count = packets; 832 ofld_stats->byte_count = bytes; 833 ofld_stats->prev_packet_count = packets; 834 } 835 spin_unlock_bh(&ch_flower->lock); 836 return 0; 837 838 err: 839 return ret; 840 } 841 842 static const struct rhashtable_params cxgb4_tc_flower_ht_params = { 843 .nelem_hint = 384, 844 .head_offset = offsetof(struct ch_tc_flower_entry, node), 845 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie), 846 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie), 847 .max_size = 524288, 848 .min_size = 512, 849 .automatic_shrinking = true 850 }; 851 852 int cxgb4_init_tc_flower(struct adapter *adap) 853 { 854 int ret; 855 856 adap->flower_ht_params = cxgb4_tc_flower_ht_params; 857 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); 858 if (ret) 859 return ret; 860 861 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); 862 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); 863 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 864 return 0; 865 } 866 867 void cxgb4_cleanup_tc_flower(struct adapter *adap) 868 { 869 if (adap->flower_stats_timer.function) 870 del_timer_sync(&adap->flower_stats_timer); 871 cancel_work_sync(&adap->flower_stats_work); 872 rhashtable_destroy(&adap->flower_tbl); 873 } 874