1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_tc_lib.h" 6 #include "ice_fltr.h" 7 #include "ice_lib.h" 8 #include "ice_protocol_type.h" 9 10 /** 11 * ice_tc_count_lkups - determine lookup count for switch filter 12 * @flags: TC-flower flags 13 * @headers: Pointer to TC flower filter header structure 14 * @fltr: Pointer to outer TC filter structure 15 * 16 * Determine lookup count based on TC flower input for switch filter. 17 */ 18 static int 19 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, 20 struct ice_tc_flower_fltr *fltr) 21 { 22 int lkups_cnt = 0; 23 24 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) 25 lkups_cnt++; 26 27 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) 28 lkups_cnt++; 29 30 if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) 31 lkups_cnt++; 32 33 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | 34 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | 35 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 36 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) 37 lkups_cnt++; 38 39 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) 40 lkups_cnt++; 41 42 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) 43 lkups_cnt++; 44 45 /* are MAC fields specified? */ 46 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) 47 lkups_cnt++; 48 49 /* is VLAN specified? */ 50 if (flags & ICE_TC_FLWR_FIELD_VLAN) 51 lkups_cnt++; 52 53 /* is CVLAN specified? */ 54 if (flags & ICE_TC_FLWR_FIELD_CVLAN) 55 lkups_cnt++; 56 57 /* are IPv[4|6] fields specified? */ 58 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | 59 ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) 60 lkups_cnt++; 61 62 /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ 63 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | 64 ICE_TC_FLWR_FIELD_SRC_L4_PORT)) 65 lkups_cnt++; 66 67 return lkups_cnt; 68 } 69 70 static enum ice_protocol_type ice_proto_type_from_mac(bool inner) 71 { 72 return inner ? ICE_MAC_IL : ICE_MAC_OFOS; 73 } 74 75 static enum ice_protocol_type ice_proto_type_from_etype(bool inner) 76 { 77 return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL; 78 } 79 80 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) 81 { 82 return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; 83 } 84 85 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) 86 { 87 return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; 88 } 89 90 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) 91 { 92 switch (ip_proto) { 93 case IPPROTO_TCP: 94 return ICE_TCP_IL; 95 case IPPROTO_UDP: 96 return ICE_UDP_ILOS; 97 } 98 99 return 0; 100 } 101 102 static enum ice_protocol_type 103 ice_proto_type_from_tunnel(enum ice_tunnel_type type) 104 { 105 switch (type) { 106 case TNL_VXLAN: 107 return ICE_VXLAN; 108 case TNL_GENEVE: 109 return ICE_GENEVE; 110 case TNL_GRETAP: 111 return ICE_NVGRE; 112 case TNL_GTPU: 113 /* NO_PAY profiles will not work with GTP-U */ 114 return ICE_GTP; 115 case TNL_GTPC: 116 return ICE_GTP_NO_PAY; 117 default: 118 return 0; 119 } 120 } 121 122 static enum ice_sw_tunnel_type 123 ice_sw_type_from_tunnel(enum ice_tunnel_type type) 124 { 125 switch (type) { 126 case TNL_VXLAN: 127 return ICE_SW_TUN_VXLAN; 128 case TNL_GENEVE: 129 return ICE_SW_TUN_GENEVE; 130 case TNL_GRETAP: 131 return ICE_SW_TUN_NVGRE; 132 case TNL_GTPU: 133 return ICE_SW_TUN_GTPU; 134 case TNL_GTPC: 135 return ICE_SW_TUN_GTPC; 136 default: 137 return ICE_NON_TUN; 138 } 139 } 140 141 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid) 142 { 143 switch (vlan_tpid) { 144 case ETH_P_8021Q: 145 case ETH_P_8021AD: 146 case ETH_P_QINQ1: 147 return vlan_tpid; 148 default: 149 return 0; 150 } 151 } 152 153 static int 154 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, 155 struct ice_adv_lkup_elem *list) 156 { 157 struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; 158 int i = 0; 159 160 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { 161 u32 tenant_id; 162 163 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); 164 switch (fltr->tunnel_type) { 165 case TNL_VXLAN: 166 case TNL_GENEVE: 167 tenant_id = be32_to_cpu(fltr->tenant_id) << 8; 168 list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); 169 memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4); 170 i++; 171 break; 172 case TNL_GRETAP: 173 list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; 174 memcpy(&list[i].m_u.nvgre_hdr.tni_flow, 175 "\xff\xff\xff\xff", 4); 176 i++; 177 break; 178 case TNL_GTPC: 179 case TNL_GTPU: 180 list[i].h_u.gtp_hdr.teid = fltr->tenant_id; 181 memcpy(&list[i].m_u.gtp_hdr.teid, 182 "\xff\xff\xff\xff", 4); 183 i++; 184 break; 185 default: 186 break; 187 } 188 } 189 190 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { 191 list[i].type = ice_proto_type_from_mac(false); 192 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, 193 hdr->l2_key.dst_mac); 194 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, 195 hdr->l2_mask.dst_mac); 196 i++; 197 } 198 199 if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && 200 (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { 201 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); 202 203 if (fltr->gtp_pdu_info_masks.pdu_type) { 204 list[i].h_u.gtp_hdr.pdu_type = 205 fltr->gtp_pdu_info_keys.pdu_type << 4; 206 memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1); 207 } 208 209 if (fltr->gtp_pdu_info_masks.qfi) { 210 list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi; 211 memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1); 212 } 213 214 i++; 215 } 216 217 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | 218 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { 219 list[i].type = ice_proto_type_from_ipv4(false); 220 221 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { 222 list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; 223 list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; 224 } 225 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { 226 list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; 227 list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; 228 } 229 i++; 230 } 231 232 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 233 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { 234 list[i].type = ice_proto_type_from_ipv6(false); 235 236 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { 237 memcpy(&list[i].h_u.ipv6_hdr.src_addr, 238 &hdr->l3_key.src_ipv6_addr, 239 sizeof(hdr->l3_key.src_ipv6_addr)); 240 memcpy(&list[i].m_u.ipv6_hdr.src_addr, 241 &hdr->l3_mask.src_ipv6_addr, 242 sizeof(hdr->l3_mask.src_ipv6_addr)); 243 } 244 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { 245 memcpy(&list[i].h_u.ipv6_hdr.dst_addr, 246 &hdr->l3_key.dst_ipv6_addr, 247 sizeof(hdr->l3_key.dst_ipv6_addr)); 248 memcpy(&list[i].m_u.ipv6_hdr.dst_addr, 249 &hdr->l3_mask.dst_ipv6_addr, 250 sizeof(hdr->l3_mask.dst_ipv6_addr)); 251 } 252 i++; 253 } 254 255 if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && 256 hdr->l3_key.ip_proto == IPPROTO_UDP) { 257 list[i].type = ICE_UDP_OF; 258 list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; 259 list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; 260 i++; 261 } 262 263 return i; 264 } 265 266 /** 267 * ice_tc_fill_rules - fill filter rules based on TC fltr 268 * @hw: pointer to HW structure 269 * @flags: tc flower field flags 270 * @tc_fltr: pointer to TC flower filter 271 * @list: list of advance rule elements 272 * @rule_info: pointer to information about rule 273 * @l4_proto: pointer to information such as L4 proto type 274 * 275 * Fill ice_adv_lkup_elem list based on TC flower flags and 276 * TC flower headers. This list should be used to add 277 * advance filter in hardware. 278 */ 279 static int 280 ice_tc_fill_rules(struct ice_hw *hw, u32 flags, 281 struct ice_tc_flower_fltr *tc_fltr, 282 struct ice_adv_lkup_elem *list, 283 struct ice_adv_rule_info *rule_info, 284 u16 *l4_proto) 285 { 286 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; 287 bool inner = false; 288 u16 vlan_tpid = 0; 289 int i = 0; 290 291 rule_info->vlan_type = vlan_tpid; 292 293 rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); 294 if (tc_fltr->tunnel_type != TNL_LAST) { 295 i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); 296 297 headers = &tc_fltr->inner_headers; 298 inner = true; 299 } 300 301 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { 302 list[i].type = ice_proto_type_from_etype(inner); 303 list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; 304 list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; 305 i++; 306 } 307 308 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 309 ICE_TC_FLWR_FIELD_SRC_MAC)) { 310 struct ice_tc_l2_hdr *l2_key, *l2_mask; 311 312 l2_key = &headers->l2_key; 313 l2_mask = &headers->l2_mask; 314 315 list[i].type = ice_proto_type_from_mac(inner); 316 if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { 317 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, 318 l2_key->dst_mac); 319 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, 320 l2_mask->dst_mac); 321 } 322 if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { 323 ether_addr_copy(list[i].h_u.eth_hdr.src_addr, 324 l2_key->src_mac); 325 ether_addr_copy(list[i].m_u.eth_hdr.src_addr, 326 l2_mask->src_mac); 327 } 328 i++; 329 } 330 331 /* copy VLAN info */ 332 if (flags & ICE_TC_FLWR_FIELD_VLAN) { 333 vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); 334 rule_info->vlan_type = 335 ice_check_supported_vlan_tpid(vlan_tpid); 336 337 if (flags & ICE_TC_FLWR_FIELD_CVLAN) 338 list[i].type = ICE_VLAN_EX; 339 else 340 list[i].type = ICE_VLAN_OFOS; 341 list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; 342 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 343 i++; 344 } 345 346 if (flags & ICE_TC_FLWR_FIELD_CVLAN) { 347 list[i].type = ICE_VLAN_IN; 348 list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id; 349 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 350 i++; 351 } 352 353 /* copy L3 (IPv[4|6]: src, dest) address */ 354 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | 355 ICE_TC_FLWR_FIELD_SRC_IPV4)) { 356 struct ice_tc_l3_hdr *l3_key, *l3_mask; 357 358 list[i].type = ice_proto_type_from_ipv4(inner); 359 l3_key = &headers->l3_key; 360 l3_mask = &headers->l3_mask; 361 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { 362 list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4; 363 list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4; 364 } 365 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { 366 list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4; 367 list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4; 368 } 369 i++; 370 } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | 371 ICE_TC_FLWR_FIELD_SRC_IPV6)) { 372 struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; 373 struct ice_tc_l3_hdr *l3_key, *l3_mask; 374 375 list[i].type = ice_proto_type_from_ipv6(inner); 376 ipv6_hdr = &list[i].h_u.ipv6_hdr; 377 ipv6_mask = &list[i].m_u.ipv6_hdr; 378 l3_key = &headers->l3_key; 379 l3_mask = &headers->l3_mask; 380 381 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { 382 memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr, 383 sizeof(l3_key->dst_ipv6_addr)); 384 memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr, 385 sizeof(l3_mask->dst_ipv6_addr)); 386 } 387 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { 388 memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr, 389 sizeof(l3_key->src_ipv6_addr)); 390 memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr, 391 sizeof(l3_mask->src_ipv6_addr)); 392 } 393 i++; 394 } 395 396 /* copy L4 (src, dest) port */ 397 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | 398 ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { 399 struct ice_tc_l4_hdr *l4_key, *l4_mask; 400 401 list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto); 402 l4_key = &headers->l4_key; 403 l4_mask = &headers->l4_mask; 404 405 if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { 406 list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; 407 list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; 408 } 409 if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { 410 list[i].h_u.l4_hdr.src_port = l4_key->src_port; 411 list[i].m_u.l4_hdr.src_port = l4_mask->src_port; 412 } 413 i++; 414 } 415 416 return i; 417 } 418 419 /** 420 * ice_tc_tun_get_type - get the tunnel type 421 * @tunnel_dev: ptr to tunnel device 422 * 423 * This function detects appropriate tunnel_type if specified device is 424 * tunnel device such as VXLAN/Geneve 425 */ 426 static int ice_tc_tun_get_type(struct net_device *tunnel_dev) 427 { 428 if (netif_is_vxlan(tunnel_dev)) 429 return TNL_VXLAN; 430 if (netif_is_geneve(tunnel_dev)) 431 return TNL_GENEVE; 432 if (netif_is_gretap(tunnel_dev) || 433 netif_is_ip6gretap(tunnel_dev)) 434 return TNL_GRETAP; 435 436 /* Assume GTP-U by default in case of GTP netdev. 437 * GTP-C may be selected later, based on enc_dst_port. 438 */ 439 if (netif_is_gtp(tunnel_dev)) 440 return TNL_GTPU; 441 return TNL_LAST; 442 } 443 444 bool ice_is_tunnel_supported(struct net_device *dev) 445 { 446 return ice_tc_tun_get_type(dev) != TNL_LAST; 447 } 448 449 static int 450 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, 451 struct flow_action_entry *act) 452 { 453 struct ice_repr *repr; 454 455 switch (act->id) { 456 case FLOW_ACTION_DROP: 457 fltr->action.fltr_act = ICE_DROP_PACKET; 458 break; 459 460 case FLOW_ACTION_REDIRECT: 461 fltr->action.fltr_act = ICE_FWD_TO_VSI; 462 463 if (ice_is_port_repr_netdev(act->dev)) { 464 repr = ice_netdev_to_repr(act->dev); 465 466 fltr->dest_vsi = repr->src_vsi; 467 fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 468 } else if (netif_is_ice(act->dev) || 469 ice_is_tunnel_supported(act->dev)) { 470 fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 471 } else { 472 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); 473 return -EINVAL; 474 } 475 476 break; 477 478 default: 479 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); 480 return -EINVAL; 481 } 482 483 return 0; 484 } 485 486 static int 487 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 488 { 489 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 490 struct ice_adv_rule_info rule_info = { 0 }; 491 struct ice_rule_query_data rule_added; 492 struct ice_hw *hw = &vsi->back->hw; 493 struct ice_adv_lkup_elem *list; 494 u32 flags = fltr->flags; 495 int lkups_cnt; 496 int ret; 497 int i; 498 499 if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { 500 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); 501 return -EOPNOTSUPP; 502 } 503 504 lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); 505 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 506 if (!list) 507 return -ENOMEM; 508 509 i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL); 510 if (i != lkups_cnt) { 511 ret = -EINVAL; 512 goto exit; 513 } 514 515 /* egress traffic is always redirect to uplink */ 516 if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) 517 fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; 518 519 rule_info.sw_act.fltr_act = fltr->action.fltr_act; 520 if (fltr->action.fltr_act != ICE_DROP_PACKET) 521 rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; 522 /* For now, making priority to be highest, and it also becomes 523 * the priority for recipe which will get created as a result of 524 * new extraction sequence based on input set. 525 * Priority '7' is max val for switch recipe, higher the number 526 * results into order of switch rule evaluation. 527 */ 528 rule_info.priority = 7; 529 530 if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { 531 rule_info.sw_act.flag |= ICE_FLTR_RX; 532 rule_info.sw_act.src = hw->pf_id; 533 rule_info.rx = true; 534 } else { 535 rule_info.sw_act.flag |= ICE_FLTR_TX; 536 rule_info.sw_act.src = vsi->idx; 537 rule_info.rx = false; 538 rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 539 rule_info.flags_info.act_valid = true; 540 } 541 542 /* specify the cookie as filter_rule_id */ 543 rule_info.fltr_rule_id = fltr->cookie; 544 545 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); 546 if (ret == -EEXIST) { 547 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); 548 ret = -EINVAL; 549 goto exit; 550 } else if (ret) { 551 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); 552 goto exit; 553 } 554 555 /* store the output params, which are needed later for removing 556 * advanced switch filter 557 */ 558 fltr->rid = rule_added.rid; 559 fltr->rule_id = rule_added.rule_id; 560 fltr->dest_id = rule_added.vsi_handle; 561 562 exit: 563 kfree(list); 564 return ret; 565 } 566 567 /** 568 * ice_add_tc_flower_adv_fltr - add appropriate filter rules 569 * @vsi: Pointer to VSI 570 * @tc_fltr: Pointer to TC flower filter structure 571 * 572 * based on filter parameters using Advance recipes supported 573 * by OS package. 574 */ 575 static int 576 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, 577 struct ice_tc_flower_fltr *tc_fltr) 578 { 579 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; 580 struct ice_adv_rule_info rule_info = {0}; 581 struct ice_rule_query_data rule_added; 582 struct ice_adv_lkup_elem *list; 583 struct ice_pf *pf = vsi->back; 584 struct ice_hw *hw = &pf->hw; 585 u32 flags = tc_fltr->flags; 586 struct ice_vsi *ch_vsi; 587 struct device *dev; 588 u16 lkups_cnt = 0; 589 u16 l4_proto = 0; 590 int ret = 0; 591 u16 i = 0; 592 593 dev = ice_pf_to_dev(pf); 594 if (ice_is_safe_mode(pf)) { 595 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode"); 596 return -EOPNOTSUPP; 597 } 598 599 if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | 600 ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | 601 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | 602 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 603 ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { 604 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)"); 605 return -EOPNOTSUPP; 606 } 607 608 /* get the channel (aka ADQ VSI) */ 609 if (tc_fltr->dest_vsi) 610 ch_vsi = tc_fltr->dest_vsi; 611 else 612 ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class]; 613 614 lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); 615 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 616 if (!list) 617 return -ENOMEM; 618 619 i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto); 620 if (i != lkups_cnt) { 621 ret = -EINVAL; 622 goto exit; 623 } 624 625 rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; 626 if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) { 627 if (!ch_vsi) { 628 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist"); 629 ret = -EINVAL; 630 goto exit; 631 } 632 633 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 634 rule_info.sw_act.vsi_handle = ch_vsi->idx; 635 rule_info.priority = 7; 636 rule_info.sw_act.src = hw->pf_id; 637 rule_info.rx = true; 638 dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", 639 tc_fltr->action.tc_class, 640 rule_info.sw_act.vsi_handle, lkups_cnt); 641 } else { 642 rule_info.sw_act.flag |= ICE_FLTR_TX; 643 rule_info.sw_act.src = vsi->idx; 644 rule_info.rx = false; 645 } 646 647 /* specify the cookie as filter_rule_id */ 648 rule_info.fltr_rule_id = tc_fltr->cookie; 649 650 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); 651 if (ret == -EEXIST) { 652 NL_SET_ERR_MSG_MOD(tc_fltr->extack, 653 "Unable to add filter because it already exist"); 654 ret = -EINVAL; 655 goto exit; 656 } else if (ret) { 657 NL_SET_ERR_MSG_MOD(tc_fltr->extack, 658 "Unable to add filter due to error"); 659 goto exit; 660 } 661 662 /* store the output params, which are needed later for removing 663 * advanced switch filter 664 */ 665 tc_fltr->rid = rule_added.rid; 666 tc_fltr->rule_id = rule_added.rule_id; 667 if (tc_fltr->action.tc_class > 0 && ch_vsi) { 668 /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and 669 * for PF ADQ filter, it is not yet set in tc_fltr, 670 * hence store the dest_vsi ptr in tc_fltr 671 */ 672 if (ch_vsi->type == ICE_VSI_CHNL) 673 tc_fltr->dest_vsi = ch_vsi; 674 /* keep track of advanced switch filter for 675 * destination VSI (channel VSI) 676 */ 677 ch_vsi->num_chnl_fltr++; 678 /* in this case, dest_id is VSI handle (sw handle) */ 679 tc_fltr->dest_id = rule_added.vsi_handle; 680 681 /* keeps track of channel filters for PF VSI */ 682 if (vsi->type == ICE_VSI_PF && 683 (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 684 ICE_TC_FLWR_FIELD_ENC_DST_MAC))) 685 pf->num_dmac_chnl_fltrs++; 686 } 687 dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", 688 lkups_cnt, flags, 689 tc_fltr->action.tc_class, rule_added.rid, 690 rule_added.rule_id, rule_added.vsi_handle); 691 exit: 692 kfree(list); 693 return ret; 694 } 695 696 /** 697 * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter 698 * @match: Pointer to flow match structure 699 * @fltr: Pointer to filter structure 700 * @headers: inner or outer header fields 701 * @is_encap: set true for tunnel IPv4 address 702 */ 703 static int 704 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, 705 struct ice_tc_flower_fltr *fltr, 706 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) 707 { 708 if (match->key->dst) { 709 if (is_encap) 710 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; 711 else 712 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; 713 headers->l3_key.dst_ipv4 = match->key->dst; 714 headers->l3_mask.dst_ipv4 = match->mask->dst; 715 } 716 if (match->key->src) { 717 if (is_encap) 718 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; 719 else 720 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; 721 headers->l3_key.src_ipv4 = match->key->src; 722 headers->l3_mask.src_ipv4 = match->mask->src; 723 } 724 return 0; 725 } 726 727 /** 728 * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter 729 * @match: Pointer to flow match structure 730 * @fltr: Pointer to filter structure 731 * @headers: inner or outer header fields 732 * @is_encap: set true for tunnel IPv6 address 733 */ 734 static int 735 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, 736 struct ice_tc_flower_fltr *fltr, 737 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) 738 { 739 struct ice_tc_l3_hdr *l3_key, *l3_mask; 740 741 /* src and dest IPV6 address should not be LOOPBACK 742 * (0:0:0:0:0:0:0:1), which can be represented as ::1 743 */ 744 if (ipv6_addr_loopback(&match->key->dst) || 745 ipv6_addr_loopback(&match->key->src)) { 746 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK"); 747 return -EINVAL; 748 } 749 /* if src/dest IPv6 address is *,* error */ 750 if (ipv6_addr_any(&match->mask->dst) && 751 ipv6_addr_any(&match->mask->src)) { 752 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); 753 return -EINVAL; 754 } 755 if (!ipv6_addr_any(&match->mask->dst)) { 756 if (is_encap) 757 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; 758 else 759 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; 760 } 761 if (!ipv6_addr_any(&match->mask->src)) { 762 if (is_encap) 763 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; 764 else 765 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; 766 } 767 768 l3_key = &headers->l3_key; 769 l3_mask = &headers->l3_mask; 770 771 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 772 ICE_TC_FLWR_FIELD_SRC_IPV6)) { 773 memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, 774 sizeof(match->key->src.s6_addr)); 775 memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, 776 sizeof(match->mask->src.s6_addr)); 777 } 778 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | 779 ICE_TC_FLWR_FIELD_DEST_IPV6)) { 780 memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, 781 sizeof(match->key->dst.s6_addr)); 782 memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, 783 sizeof(match->mask->dst.s6_addr)); 784 } 785 786 return 0; 787 } 788 789 /** 790 * ice_tc_set_port - Parse ports from TC flower filter 791 * @match: Flow match structure 792 * @fltr: Pointer to filter structure 793 * @headers: inner or outer header fields 794 * @is_encap: set true for tunnel port 795 */ 796 static int 797 ice_tc_set_port(struct flow_match_ports match, 798 struct ice_tc_flower_fltr *fltr, 799 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) 800 { 801 if (match.key->dst) { 802 if (is_encap) 803 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; 804 else 805 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; 806 807 headers->l4_key.dst_port = match.key->dst; 808 headers->l4_mask.dst_port = match.mask->dst; 809 } 810 if (match.key->src) { 811 if (is_encap) 812 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; 813 else 814 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; 815 816 headers->l4_key.src_port = match.key->src; 817 headers->l4_mask.src_port = match.mask->src; 818 } 819 return 0; 820 } 821 822 static struct net_device * 823 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) 824 { 825 struct flow_action_entry *act; 826 int i; 827 828 if (ice_is_tunnel_supported(dev)) 829 return dev; 830 831 flow_action_for_each(i, act, &rule->action) { 832 if (act->id == FLOW_ACTION_REDIRECT && 833 ice_is_tunnel_supported(act->dev)) 834 return act->dev; 835 } 836 837 return NULL; 838 } 839 840 /** 841 * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C 842 * @match: Flow match structure 843 * @fltr: Pointer to filter structure 844 * 845 * GTP-C/GTP-U is selected based on destination port number (enc_dst_port). 846 * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU, 847 * therefore making GTP-U the default choice (when destination port number is 848 * not specified). 849 */ 850 static int 851 ice_parse_gtp_type(struct flow_match_ports match, 852 struct ice_tc_flower_fltr *fltr) 853 { 854 u16 dst_port; 855 856 if (match.key->dst) { 857 dst_port = be16_to_cpu(match.key->dst); 858 859 switch (dst_port) { 860 case 2152: 861 break; 862 case 2123: 863 fltr->tunnel_type = TNL_GTPC; 864 break; 865 default: 866 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number"); 867 return -EINVAL; 868 } 869 } 870 871 return 0; 872 } 873 874 static int 875 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, 876 struct ice_tc_flower_fltr *fltr) 877 { 878 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 879 struct flow_match_control enc_control; 880 881 fltr->tunnel_type = ice_tc_tun_get_type(dev); 882 headers->l3_key.ip_proto = IPPROTO_UDP; 883 884 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 885 struct flow_match_enc_keyid enc_keyid; 886 887 flow_rule_match_enc_keyid(rule, &enc_keyid); 888 889 if (!enc_keyid.mask->keyid || 890 enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32)) 891 return -EINVAL; 892 893 fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID; 894 fltr->tenant_id = enc_keyid.key->keyid; 895 } 896 897 flow_rule_match_enc_control(rule, &enc_control); 898 899 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 900 struct flow_match_ipv4_addrs match; 901 902 flow_rule_match_enc_ipv4_addrs(rule, &match); 903 if (ice_tc_set_ipv4(&match, fltr, headers, true)) 904 return -EINVAL; 905 } else if (enc_control.key->addr_type == 906 FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 907 struct flow_match_ipv6_addrs match; 908 909 flow_rule_match_enc_ipv6_addrs(rule, &match); 910 if (ice_tc_set_ipv6(&match, fltr, headers, true)) 911 return -EINVAL; 912 } 913 914 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 915 struct flow_match_ip match; 916 917 flow_rule_match_enc_ip(rule, &match); 918 headers->l3_key.tos = match.key->tos; 919 headers->l3_key.ttl = match.key->ttl; 920 headers->l3_mask.tos = match.mask->tos; 921 headers->l3_mask.ttl = match.mask->ttl; 922 } 923 924 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) && 925 fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { 926 struct flow_match_ports match; 927 928 flow_rule_match_enc_ports(rule, &match); 929 930 if (fltr->tunnel_type != TNL_GTPU) { 931 if (ice_tc_set_port(match, fltr, headers, true)) 932 return -EINVAL; 933 } else { 934 if (ice_parse_gtp_type(match, fltr)) 935 return -EINVAL; 936 } 937 } 938 939 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { 940 struct flow_match_enc_opts match; 941 942 flow_rule_match_enc_opts(rule, &match); 943 944 memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0], 945 sizeof(struct gtp_pdu_session_info)); 946 947 memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], 948 sizeof(struct gtp_pdu_session_info)); 949 950 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; 951 } 952 953 return 0; 954 } 955 956 /** 957 * ice_parse_cls_flower - Parse TC flower filters provided by kernel 958 * @vsi: Pointer to the VSI 959 * @filter_dev: Pointer to device on which filter is being added 960 * @f: Pointer to struct flow_cls_offload 961 * @fltr: Pointer to filter structure 962 */ 963 static int 964 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, 965 struct flow_cls_offload *f, 966 struct ice_tc_flower_fltr *fltr) 967 { 968 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 969 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 970 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; 971 struct flow_dissector *dissector; 972 struct net_device *tunnel_dev; 973 974 dissector = rule->match.dissector; 975 976 if (dissector->used_keys & 977 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 978 BIT(FLOW_DISSECTOR_KEY_BASIC) | 979 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 980 BIT(FLOW_DISSECTOR_KEY_VLAN) | 981 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 982 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 983 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 984 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 985 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 986 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 987 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 988 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 989 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | 990 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | 991 BIT(FLOW_DISSECTOR_KEY_PORTS))) { 992 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); 993 return -EOPNOTSUPP; 994 } 995 996 tunnel_dev = ice_get_tunnel_device(filter_dev, rule); 997 if (tunnel_dev) { 998 int err; 999 1000 filter_dev = tunnel_dev; 1001 1002 err = ice_parse_tunnel_attr(filter_dev, rule, fltr); 1003 if (err) { 1004 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes"); 1005 return err; 1006 } 1007 1008 /* header pointers should point to the inner headers, outer 1009 * header were already set by ice_parse_tunnel_attr 1010 */ 1011 headers = &fltr->inner_headers; 1012 } else if (dissector->used_keys & 1013 (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 1014 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 1015 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 1016 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { 1017 NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); 1018 return -EOPNOTSUPP; 1019 } else { 1020 fltr->tunnel_type = TNL_LAST; 1021 } 1022 1023 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 1024 struct flow_match_basic match; 1025 1026 flow_rule_match_basic(rule, &match); 1027 1028 n_proto_key = ntohs(match.key->n_proto); 1029 n_proto_mask = ntohs(match.mask->n_proto); 1030 1031 if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || 1032 fltr->tunnel_type == TNL_GTPU || 1033 fltr->tunnel_type == TNL_GTPC) { 1034 n_proto_key = 0; 1035 n_proto_mask = 0; 1036 } else { 1037 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; 1038 } 1039 1040 headers->l2_key.n_proto = cpu_to_be16(n_proto_key); 1041 headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); 1042 headers->l3_key.ip_proto = match.key->ip_proto; 1043 } 1044 1045 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1046 struct flow_match_eth_addrs match; 1047 1048 flow_rule_match_eth_addrs(rule, &match); 1049 1050 if (!is_zero_ether_addr(match.key->dst)) { 1051 ether_addr_copy(headers->l2_key.dst_mac, 1052 match.key->dst); 1053 ether_addr_copy(headers->l2_mask.dst_mac, 1054 match.mask->dst); 1055 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; 1056 } 1057 1058 if (!is_zero_ether_addr(match.key->src)) { 1059 ether_addr_copy(headers->l2_key.src_mac, 1060 match.key->src); 1061 ether_addr_copy(headers->l2_mask.src_mac, 1062 match.mask->src); 1063 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; 1064 } 1065 } 1066 1067 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 1068 is_vlan_dev(filter_dev)) { 1069 struct flow_dissector_key_vlan mask; 1070 struct flow_dissector_key_vlan key; 1071 struct flow_match_vlan match; 1072 1073 if (is_vlan_dev(filter_dev)) { 1074 match.key = &key; 1075 match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 1076 match.key->vlan_priority = 0; 1077 match.mask = &mask; 1078 memset(match.mask, 0xff, sizeof(*match.mask)); 1079 match.mask->vlan_priority = 0; 1080 } else { 1081 flow_rule_match_vlan(rule, &match); 1082 } 1083 1084 if (match.mask->vlan_id) { 1085 if (match.mask->vlan_id == VLAN_VID_MASK) { 1086 fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; 1087 } else { 1088 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask"); 1089 return -EINVAL; 1090 } 1091 } 1092 1093 headers->vlan_hdr.vlan_id = 1094 cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); 1095 if (match.mask->vlan_priority) 1096 headers->vlan_hdr.vlan_prio = match.key->vlan_priority; 1097 if (match.mask->vlan_tpid) 1098 headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid; 1099 } 1100 1101 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 1102 struct flow_match_vlan match; 1103 1104 if (!ice_is_dvm_ena(&vsi->back->hw)) { 1105 NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled"); 1106 return -EINVAL; 1107 } 1108 1109 flow_rule_match_cvlan(rule, &match); 1110 1111 if (match.mask->vlan_id) { 1112 if (match.mask->vlan_id == VLAN_VID_MASK) { 1113 fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN; 1114 } else { 1115 NL_SET_ERR_MSG_MOD(fltr->extack, 1116 "Bad CVLAN mask"); 1117 return -EINVAL; 1118 } 1119 } 1120 1121 headers->cvlan_hdr.vlan_id = 1122 cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); 1123 if (match.mask->vlan_priority) 1124 headers->cvlan_hdr.vlan_prio = match.key->vlan_priority; 1125 } 1126 1127 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 1128 struct flow_match_control match; 1129 1130 flow_rule_match_control(rule, &match); 1131 1132 addr_type = match.key->addr_type; 1133 } 1134 1135 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1136 struct flow_match_ipv4_addrs match; 1137 1138 flow_rule_match_ipv4_addrs(rule, &match); 1139 if (ice_tc_set_ipv4(&match, fltr, headers, false)) 1140 return -EINVAL; 1141 } 1142 1143 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1144 struct flow_match_ipv6_addrs match; 1145 1146 flow_rule_match_ipv6_addrs(rule, &match); 1147 if (ice_tc_set_ipv6(&match, fltr, headers, false)) 1148 return -EINVAL; 1149 } 1150 1151 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 1152 struct flow_match_ports match; 1153 1154 flow_rule_match_ports(rule, &match); 1155 if (ice_tc_set_port(match, fltr, headers, false)) 1156 return -EINVAL; 1157 switch (headers->l3_key.ip_proto) { 1158 case IPPROTO_TCP: 1159 case IPPROTO_UDP: 1160 break; 1161 default: 1162 NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported"); 1163 return -EINVAL; 1164 } 1165 } 1166 return 0; 1167 } 1168 1169 /** 1170 * ice_add_switch_fltr - Add TC flower filters 1171 * @vsi: Pointer to VSI 1172 * @fltr: Pointer to struct ice_tc_flower_fltr 1173 * 1174 * Add filter in HW switch block 1175 */ 1176 static int 1177 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 1178 { 1179 if (fltr->action.fltr_act == ICE_FWD_TO_QGRP) 1180 return -EOPNOTSUPP; 1181 1182 if (ice_is_eswitch_mode_switchdev(vsi->back)) 1183 return ice_eswitch_add_tc_fltr(vsi, fltr); 1184 1185 return ice_add_tc_flower_adv_fltr(vsi, fltr); 1186 } 1187 1188 /** 1189 * ice_handle_tclass_action - Support directing to a traffic class 1190 * @vsi: Pointer to VSI 1191 * @cls_flower: Pointer to TC flower offload structure 1192 * @fltr: Pointer to TC flower filter structure 1193 * 1194 * Support directing traffic to a traffic class 1195 */ 1196 static int 1197 ice_handle_tclass_action(struct ice_vsi *vsi, 1198 struct flow_cls_offload *cls_flower, 1199 struct ice_tc_flower_fltr *fltr) 1200 { 1201 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); 1202 struct ice_vsi *main_vsi; 1203 1204 if (tc < 0) { 1205 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid"); 1206 return -EINVAL; 1207 } 1208 if (!tc) { 1209 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination"); 1210 return -EINVAL; 1211 } 1212 1213 if (!(vsi->all_enatc & BIT(tc))) { 1214 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); 1215 return -EINVAL; 1216 } 1217 1218 /* Redirect to a TC class or Queue Group */ 1219 main_vsi = ice_get_main_vsi(vsi->back); 1220 if (!main_vsi || !main_vsi->netdev) { 1221 NL_SET_ERR_MSG_MOD(fltr->extack, 1222 "Unable to add filter because of invalid netdevice"); 1223 return -EINVAL; 1224 } 1225 1226 if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && 1227 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1228 ICE_TC_FLWR_FIELD_SRC_MAC))) { 1229 NL_SET_ERR_MSG_MOD(fltr->extack, 1230 "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination"); 1231 return -EOPNOTSUPP; 1232 } 1233 1234 /* For ADQ, filter must include dest MAC address, otherwise unwanted 1235 * packets with unrelated MAC address get delivered to ADQ VSIs as long 1236 * as remaining filter criteria is satisfied such as dest IP address 1237 * and dest/src L4 port. Following code is trying to handle: 1238 * 1. For non-tunnel, if user specify MAC addresses, use them (means 1239 * this code won't do anything 1240 * 2. For non-tunnel, if user didn't specify MAC address, add implicit 1241 * dest MAC to be lower netdev's active unicast MAC address 1242 * 3. For tunnel, as of now TC-filter through flower classifier doesn't 1243 * have provision for user to specify outer DMAC, hence driver to 1244 * implicitly add outer dest MAC to be lower netdev's active unicast 1245 * MAC address. 1246 */ 1247 if (fltr->tunnel_type != TNL_LAST && 1248 !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 1249 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC; 1250 1251 if (fltr->tunnel_type == TNL_LAST && 1252 !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) 1253 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; 1254 1255 if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1256 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) { 1257 ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, 1258 vsi->netdev->dev_addr); 1259 eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); 1260 } 1261 1262 /* validate specified dest MAC address, make sure either it belongs to 1263 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as 1264 * unicast MAC filter destined to main VSI. 1265 */ 1266 if (!ice_mac_fltr_exist(&main_vsi->back->hw, 1267 fltr->outer_headers.l2_key.dst_mac, 1268 main_vsi->idx)) { 1269 NL_SET_ERR_MSG_MOD(fltr->extack, 1270 "Unable to add filter because legacy MAC filter for specified destination doesn't exist"); 1271 return -EINVAL; 1272 } 1273 1274 /* Make sure VLAN is already added to main VSI, before allowing ADQ to 1275 * add a VLAN based filter such as MAC + VLAN + L4 port. 1276 */ 1277 if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { 1278 u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); 1279 1280 if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id, 1281 main_vsi->idx)) { 1282 NL_SET_ERR_MSG_MOD(fltr->extack, 1283 "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); 1284 return -EINVAL; 1285 } 1286 } 1287 fltr->action.fltr_act = ICE_FWD_TO_VSI; 1288 fltr->action.tc_class = tc; 1289 1290 return 0; 1291 } 1292 1293 /** 1294 * ice_parse_tc_flower_actions - Parse the actions for a TC filter 1295 * @vsi: Pointer to VSI 1296 * @cls_flower: Pointer to TC flower offload structure 1297 * @fltr: Pointer to TC flower filter structure 1298 * 1299 * Parse the actions for a TC filter 1300 */ 1301 static int 1302 ice_parse_tc_flower_actions(struct ice_vsi *vsi, 1303 struct flow_cls_offload *cls_flower, 1304 struct ice_tc_flower_fltr *fltr) 1305 { 1306 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1307 struct flow_action *flow_action = &rule->action; 1308 struct flow_action_entry *act; 1309 int i; 1310 1311 if (cls_flower->classid) 1312 return ice_handle_tclass_action(vsi, cls_flower, fltr); 1313 1314 if (!flow_action_has_entries(flow_action)) 1315 return -EINVAL; 1316 1317 flow_action_for_each(i, act, flow_action) { 1318 if (ice_is_eswitch_mode_switchdev(vsi->back)) { 1319 int err = ice_eswitch_tc_parse_action(fltr, act); 1320 1321 if (err) 1322 return err; 1323 continue; 1324 } 1325 /* Allow only one rule per filter */ 1326 1327 /* Drop action */ 1328 if (act->id == FLOW_ACTION_DROP) { 1329 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP"); 1330 return -EINVAL; 1331 } 1332 fltr->action.fltr_act = ICE_FWD_TO_VSI; 1333 } 1334 return 0; 1335 } 1336 1337 /** 1338 * ice_del_tc_fltr - deletes a filter from HW table 1339 * @vsi: Pointer to VSI 1340 * @fltr: Pointer to struct ice_tc_flower_fltr 1341 * 1342 * This function deletes a filter from HW table and manages book-keeping 1343 */ 1344 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 1345 { 1346 struct ice_rule_query_data rule_rem; 1347 struct ice_pf *pf = vsi->back; 1348 int err; 1349 1350 rule_rem.rid = fltr->rid; 1351 rule_rem.rule_id = fltr->rule_id; 1352 rule_rem.vsi_handle = fltr->dest_id; 1353 err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); 1354 if (err) { 1355 if (err == -ENOENT) { 1356 NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); 1357 return -ENOENT; 1358 } 1359 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter"); 1360 return -EIO; 1361 } 1362 1363 /* update advanced switch filter count for destination 1364 * VSI if filter destination was VSI 1365 */ 1366 if (fltr->dest_vsi) { 1367 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 1368 fltr->dest_vsi->num_chnl_fltr--; 1369 1370 /* keeps track of channel filters for PF VSI */ 1371 if (vsi->type == ICE_VSI_PF && 1372 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1373 ICE_TC_FLWR_FIELD_ENC_DST_MAC))) 1374 pf->num_dmac_chnl_fltrs--; 1375 } 1376 } 1377 return 0; 1378 } 1379 1380 /** 1381 * ice_add_tc_fltr - adds a TC flower filter 1382 * @netdev: Pointer to netdev 1383 * @vsi: Pointer to VSI 1384 * @f: Pointer to flower offload structure 1385 * @__fltr: Pointer to struct ice_tc_flower_fltr 1386 * 1387 * This function parses TC-flower input fields, parses action, 1388 * and adds a filter. 1389 */ 1390 static int 1391 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, 1392 struct flow_cls_offload *f, 1393 struct ice_tc_flower_fltr **__fltr) 1394 { 1395 struct ice_tc_flower_fltr *fltr; 1396 int err; 1397 1398 /* by default, set output to be INVALID */ 1399 *__fltr = NULL; 1400 1401 fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); 1402 if (!fltr) 1403 return -ENOMEM; 1404 1405 fltr->cookie = f->cookie; 1406 fltr->extack = f->common.extack; 1407 fltr->src_vsi = vsi; 1408 INIT_HLIST_NODE(&fltr->tc_flower_node); 1409 1410 err = ice_parse_cls_flower(netdev, vsi, f, fltr); 1411 if (err < 0) 1412 goto err; 1413 1414 err = ice_parse_tc_flower_actions(vsi, f, fltr); 1415 if (err < 0) 1416 goto err; 1417 1418 err = ice_add_switch_fltr(vsi, fltr); 1419 if (err < 0) 1420 goto err; 1421 1422 /* return the newly created filter */ 1423 *__fltr = fltr; 1424 1425 return 0; 1426 err: 1427 kfree(fltr); 1428 return err; 1429 } 1430 1431 /** 1432 * ice_find_tc_flower_fltr - Find the TC flower filter in the list 1433 * @pf: Pointer to PF 1434 * @cookie: filter specific cookie 1435 */ 1436 static struct ice_tc_flower_fltr * 1437 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) 1438 { 1439 struct ice_tc_flower_fltr *fltr; 1440 1441 hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) 1442 if (cookie == fltr->cookie) 1443 return fltr; 1444 1445 return NULL; 1446 } 1447 1448 /** 1449 * ice_add_cls_flower - add TC flower filters 1450 * @netdev: Pointer to filter device 1451 * @vsi: Pointer to VSI 1452 * @cls_flower: Pointer to flower offload structure 1453 */ 1454 int 1455 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, 1456 struct flow_cls_offload *cls_flower) 1457 { 1458 struct netlink_ext_ack *extack = cls_flower->common.extack; 1459 struct net_device *vsi_netdev = vsi->netdev; 1460 struct ice_tc_flower_fltr *fltr; 1461 struct ice_pf *pf = vsi->back; 1462 int err; 1463 1464 if (ice_is_reset_in_progress(pf->state)) 1465 return -EBUSY; 1466 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 1467 return -EINVAL; 1468 1469 if (ice_is_port_repr_netdev(netdev)) 1470 vsi_netdev = netdev; 1471 1472 if (!(vsi_netdev->features & NETIF_F_HW_TC) && 1473 !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { 1474 /* Based on TC indirect notifications from kernel, all ice 1475 * devices get an instance of rule from higher level device. 1476 * Avoid triggering explicit error in this case. 1477 */ 1478 if (netdev == vsi_netdev) 1479 NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); 1480 return -EINVAL; 1481 } 1482 1483 /* avoid duplicate entries, if exists - return error */ 1484 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); 1485 if (fltr) { 1486 NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring"); 1487 return -EEXIST; 1488 } 1489 1490 /* prep and add TC-flower filter in HW */ 1491 err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); 1492 if (err) 1493 return err; 1494 1495 /* add filter into an ordered list */ 1496 hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list); 1497 return 0; 1498 } 1499 1500 /** 1501 * ice_del_cls_flower - delete TC flower filters 1502 * @vsi: Pointer to VSI 1503 * @cls_flower: Pointer to struct flow_cls_offload 1504 */ 1505 int 1506 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) 1507 { 1508 struct ice_tc_flower_fltr *fltr; 1509 struct ice_pf *pf = vsi->back; 1510 int err; 1511 1512 /* find filter */ 1513 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); 1514 if (!fltr) { 1515 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) && 1516 hlist_empty(&pf->tc_flower_fltr_list)) 1517 return 0; 1518 1519 NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it"); 1520 return -EINVAL; 1521 } 1522 1523 fltr->extack = cls_flower->common.extack; 1524 /* delete filter from HW */ 1525 err = ice_del_tc_fltr(vsi, fltr); 1526 if (err) 1527 return err; 1528 1529 /* delete filter from an ordered list */ 1530 hlist_del(&fltr->tc_flower_node); 1531 1532 /* free the filter node */ 1533 kfree(fltr); 1534 1535 return 0; 1536 } 1537 1538 /** 1539 * ice_replay_tc_fltrs - replay TC filters 1540 * @pf: pointer to PF struct 1541 */ 1542 void ice_replay_tc_fltrs(struct ice_pf *pf) 1543 { 1544 struct ice_tc_flower_fltr *fltr; 1545 struct hlist_node *node; 1546 1547 hlist_for_each_entry_safe(fltr, node, 1548 &pf->tc_flower_fltr_list, 1549 tc_flower_node) { 1550 fltr->extack = NULL; 1551 ice_add_switch_fltr(fltr->src_vsi, fltr); 1552 } 1553 } 1554