1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_tc_lib.h" 6 #include "ice_fltr.h" 7 #include "ice_lib.h" 8 #include "ice_protocol_type.h" 9 10 /** 11 * ice_tc_count_lkups - determine lookup count for switch filter 12 * @flags: TC-flower flags 13 * @headers: Pointer to TC flower filter header structure 14 * @fltr: Pointer to outer TC filter structure 15 * 16 * Determine lookup count based on TC flower input for switch filter. 17 */ 18 static int 19 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, 20 struct ice_tc_flower_fltr *fltr) 21 { 22 int lkups_cnt = 0; 23 24 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) 25 lkups_cnt++; 26 27 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) 28 lkups_cnt++; 29 30 if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) 31 lkups_cnt++; 32 33 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | 34 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | 35 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 36 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) 37 lkups_cnt++; 38 39 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) 40 lkups_cnt++; 41 42 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) 43 lkups_cnt++; 44 45 /* are MAC fields specified? */ 46 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) 47 lkups_cnt++; 48 49 /* is VLAN specified? */ 50 if (flags & ICE_TC_FLWR_FIELD_VLAN) 51 lkups_cnt++; 52 53 /* is CVLAN specified? */ 54 if (flags & ICE_TC_FLWR_FIELD_CVLAN) 55 lkups_cnt++; 56 57 /* are PPPoE options specified? */ 58 if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | 59 ICE_TC_FLWR_FIELD_PPP_PROTO)) 60 lkups_cnt++; 61 62 /* are IPv[4|6] fields specified? */ 63 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | 64 ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) 65 lkups_cnt++; 66 67 /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ 68 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | 69 ICE_TC_FLWR_FIELD_SRC_L4_PORT)) 70 lkups_cnt++; 71 72 return lkups_cnt; 73 } 74 75 static enum ice_protocol_type ice_proto_type_from_mac(bool inner) 76 { 77 return inner ? ICE_MAC_IL : ICE_MAC_OFOS; 78 } 79 80 static enum ice_protocol_type ice_proto_type_from_etype(bool inner) 81 { 82 return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL; 83 } 84 85 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) 86 { 87 return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; 88 } 89 90 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) 91 { 92 return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; 93 } 94 95 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) 96 { 97 switch (ip_proto) { 98 case IPPROTO_TCP: 99 return ICE_TCP_IL; 100 case IPPROTO_UDP: 101 return ICE_UDP_ILOS; 102 } 103 104 return 0; 105 } 106 107 static enum ice_protocol_type 108 ice_proto_type_from_tunnel(enum ice_tunnel_type type) 109 { 110 switch (type) { 111 case TNL_VXLAN: 112 return ICE_VXLAN; 113 case TNL_GENEVE: 114 return ICE_GENEVE; 115 case TNL_GRETAP: 116 return ICE_NVGRE; 117 case TNL_GTPU: 118 /* NO_PAY profiles will not work with GTP-U */ 119 return ICE_GTP; 120 case TNL_GTPC: 121 return ICE_GTP_NO_PAY; 122 default: 123 return 0; 124 } 125 } 126 127 static enum ice_sw_tunnel_type 128 ice_sw_type_from_tunnel(enum ice_tunnel_type type) 129 { 130 switch (type) { 131 case TNL_VXLAN: 132 return ICE_SW_TUN_VXLAN; 133 case TNL_GENEVE: 134 return ICE_SW_TUN_GENEVE; 135 case TNL_GRETAP: 136 return ICE_SW_TUN_NVGRE; 137 case TNL_GTPU: 138 return ICE_SW_TUN_GTPU; 139 case TNL_GTPC: 140 return ICE_SW_TUN_GTPC; 141 default: 142 return ICE_NON_TUN; 143 } 144 } 145 146 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid) 147 { 148 switch (vlan_tpid) { 149 case ETH_P_8021Q: 150 case ETH_P_8021AD: 151 case ETH_P_QINQ1: 152 return vlan_tpid; 153 default: 154 return 0; 155 } 156 } 157 158 static int 159 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, 160 struct ice_adv_lkup_elem *list) 161 { 162 struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; 163 int i = 0; 164 165 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { 166 u32 tenant_id; 167 168 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); 169 switch (fltr->tunnel_type) { 170 case TNL_VXLAN: 171 case TNL_GENEVE: 172 tenant_id = be32_to_cpu(fltr->tenant_id) << 8; 173 list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); 174 memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4); 175 i++; 176 break; 177 case TNL_GRETAP: 178 list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; 179 memcpy(&list[i].m_u.nvgre_hdr.tni_flow, 180 "\xff\xff\xff\xff", 4); 181 i++; 182 break; 183 case TNL_GTPC: 184 case TNL_GTPU: 185 list[i].h_u.gtp_hdr.teid = fltr->tenant_id; 186 memcpy(&list[i].m_u.gtp_hdr.teid, 187 "\xff\xff\xff\xff", 4); 188 i++; 189 break; 190 default: 191 break; 192 } 193 } 194 195 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { 196 list[i].type = ice_proto_type_from_mac(false); 197 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, 198 hdr->l2_key.dst_mac); 199 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, 200 hdr->l2_mask.dst_mac); 201 i++; 202 } 203 204 if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && 205 (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { 206 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); 207 208 if (fltr->gtp_pdu_info_masks.pdu_type) { 209 list[i].h_u.gtp_hdr.pdu_type = 210 fltr->gtp_pdu_info_keys.pdu_type << 4; 211 memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1); 212 } 213 214 if (fltr->gtp_pdu_info_masks.qfi) { 215 list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi; 216 memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1); 217 } 218 219 i++; 220 } 221 222 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | 223 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { 224 list[i].type = ice_proto_type_from_ipv4(false); 225 226 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { 227 list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; 228 list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; 229 } 230 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { 231 list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; 232 list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; 233 } 234 i++; 235 } 236 237 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 238 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { 239 list[i].type = ice_proto_type_from_ipv6(false); 240 241 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { 242 memcpy(&list[i].h_u.ipv6_hdr.src_addr, 243 &hdr->l3_key.src_ipv6_addr, 244 sizeof(hdr->l3_key.src_ipv6_addr)); 245 memcpy(&list[i].m_u.ipv6_hdr.src_addr, 246 &hdr->l3_mask.src_ipv6_addr, 247 sizeof(hdr->l3_mask.src_ipv6_addr)); 248 } 249 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { 250 memcpy(&list[i].h_u.ipv6_hdr.dst_addr, 251 &hdr->l3_key.dst_ipv6_addr, 252 sizeof(hdr->l3_key.dst_ipv6_addr)); 253 memcpy(&list[i].m_u.ipv6_hdr.dst_addr, 254 &hdr->l3_mask.dst_ipv6_addr, 255 sizeof(hdr->l3_mask.dst_ipv6_addr)); 256 } 257 i++; 258 } 259 260 if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && 261 hdr->l3_key.ip_proto == IPPROTO_UDP) { 262 list[i].type = ICE_UDP_OF; 263 list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; 264 list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; 265 i++; 266 } 267 268 return i; 269 } 270 271 /** 272 * ice_tc_fill_rules - fill filter rules based on TC fltr 273 * @hw: pointer to HW structure 274 * @flags: tc flower field flags 275 * @tc_fltr: pointer to TC flower filter 276 * @list: list of advance rule elements 277 * @rule_info: pointer to information about rule 278 * @l4_proto: pointer to information such as L4 proto type 279 * 280 * Fill ice_adv_lkup_elem list based on TC flower flags and 281 * TC flower headers. This list should be used to add 282 * advance filter in hardware. 283 */ 284 static int 285 ice_tc_fill_rules(struct ice_hw *hw, u32 flags, 286 struct ice_tc_flower_fltr *tc_fltr, 287 struct ice_adv_lkup_elem *list, 288 struct ice_adv_rule_info *rule_info, 289 u16 *l4_proto) 290 { 291 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; 292 bool inner = false; 293 u16 vlan_tpid = 0; 294 int i = 0; 295 296 rule_info->vlan_type = vlan_tpid; 297 298 rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); 299 if (tc_fltr->tunnel_type != TNL_LAST) { 300 i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); 301 302 headers = &tc_fltr->inner_headers; 303 inner = true; 304 } 305 306 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { 307 list[i].type = ice_proto_type_from_etype(inner); 308 list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; 309 list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; 310 i++; 311 } 312 313 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 314 ICE_TC_FLWR_FIELD_SRC_MAC)) { 315 struct ice_tc_l2_hdr *l2_key, *l2_mask; 316 317 l2_key = &headers->l2_key; 318 l2_mask = &headers->l2_mask; 319 320 list[i].type = ice_proto_type_from_mac(inner); 321 if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { 322 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, 323 l2_key->dst_mac); 324 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, 325 l2_mask->dst_mac); 326 } 327 if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { 328 ether_addr_copy(list[i].h_u.eth_hdr.src_addr, 329 l2_key->src_mac); 330 ether_addr_copy(list[i].m_u.eth_hdr.src_addr, 331 l2_mask->src_mac); 332 } 333 i++; 334 } 335 336 /* copy VLAN info */ 337 if (flags & ICE_TC_FLWR_FIELD_VLAN) { 338 vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); 339 rule_info->vlan_type = 340 ice_check_supported_vlan_tpid(vlan_tpid); 341 342 if (flags & ICE_TC_FLWR_FIELD_CVLAN) 343 list[i].type = ICE_VLAN_EX; 344 else 345 list[i].type = ICE_VLAN_OFOS; 346 list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; 347 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 348 i++; 349 } 350 351 if (flags & ICE_TC_FLWR_FIELD_CVLAN) { 352 list[i].type = ICE_VLAN_IN; 353 list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id; 354 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 355 i++; 356 } 357 358 if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | 359 ICE_TC_FLWR_FIELD_PPP_PROTO)) { 360 struct ice_pppoe_hdr *vals, *masks; 361 362 vals = &list[i].h_u.pppoe_hdr; 363 masks = &list[i].m_u.pppoe_hdr; 364 365 list[i].type = ICE_PPPOE; 366 367 if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) { 368 vals->session_id = headers->pppoe_hdr.session_id; 369 masks->session_id = cpu_to_be16(0xFFFF); 370 } 371 372 if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) { 373 vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto; 374 masks->ppp_prot_id = cpu_to_be16(0xFFFF); 375 } 376 377 i++; 378 } 379 380 /* copy L3 (IPv[4|6]: src, dest) address */ 381 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | 382 ICE_TC_FLWR_FIELD_SRC_IPV4)) { 383 struct ice_tc_l3_hdr *l3_key, *l3_mask; 384 385 list[i].type = ice_proto_type_from_ipv4(inner); 386 l3_key = &headers->l3_key; 387 l3_mask = &headers->l3_mask; 388 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { 389 list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4; 390 list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4; 391 } 392 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { 393 list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4; 394 list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4; 395 } 396 i++; 397 } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | 398 ICE_TC_FLWR_FIELD_SRC_IPV6)) { 399 struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; 400 struct ice_tc_l3_hdr *l3_key, *l3_mask; 401 402 list[i].type = ice_proto_type_from_ipv6(inner); 403 ipv6_hdr = &list[i].h_u.ipv6_hdr; 404 ipv6_mask = &list[i].m_u.ipv6_hdr; 405 l3_key = &headers->l3_key; 406 l3_mask = &headers->l3_mask; 407 408 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { 409 memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr, 410 sizeof(l3_key->dst_ipv6_addr)); 411 memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr, 412 sizeof(l3_mask->dst_ipv6_addr)); 413 } 414 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { 415 memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr, 416 sizeof(l3_key->src_ipv6_addr)); 417 memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr, 418 sizeof(l3_mask->src_ipv6_addr)); 419 } 420 i++; 421 } 422 423 /* copy L4 (src, dest) port */ 424 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | 425 ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { 426 struct ice_tc_l4_hdr *l4_key, *l4_mask; 427 428 list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto); 429 l4_key = &headers->l4_key; 430 l4_mask = &headers->l4_mask; 431 432 if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { 433 list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; 434 list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; 435 } 436 if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { 437 list[i].h_u.l4_hdr.src_port = l4_key->src_port; 438 list[i].m_u.l4_hdr.src_port = l4_mask->src_port; 439 } 440 i++; 441 } 442 443 return i; 444 } 445 446 /** 447 * ice_tc_tun_get_type - get the tunnel type 448 * @tunnel_dev: ptr to tunnel device 449 * 450 * This function detects appropriate tunnel_type if specified device is 451 * tunnel device such as VXLAN/Geneve 452 */ 453 static int ice_tc_tun_get_type(struct net_device *tunnel_dev) 454 { 455 if (netif_is_vxlan(tunnel_dev)) 456 return TNL_VXLAN; 457 if (netif_is_geneve(tunnel_dev)) 458 return TNL_GENEVE; 459 if (netif_is_gretap(tunnel_dev) || 460 netif_is_ip6gretap(tunnel_dev)) 461 return TNL_GRETAP; 462 463 /* Assume GTP-U by default in case of GTP netdev. 464 * GTP-C may be selected later, based on enc_dst_port. 465 */ 466 if (netif_is_gtp(tunnel_dev)) 467 return TNL_GTPU; 468 return TNL_LAST; 469 } 470 471 bool ice_is_tunnel_supported(struct net_device *dev) 472 { 473 return ice_tc_tun_get_type(dev) != TNL_LAST; 474 } 475 476 static int 477 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, 478 struct flow_action_entry *act) 479 { 480 struct ice_repr *repr; 481 482 switch (act->id) { 483 case FLOW_ACTION_DROP: 484 fltr->action.fltr_act = ICE_DROP_PACKET; 485 break; 486 487 case FLOW_ACTION_REDIRECT: 488 fltr->action.fltr_act = ICE_FWD_TO_VSI; 489 490 if (ice_is_port_repr_netdev(act->dev)) { 491 repr = ice_netdev_to_repr(act->dev); 492 493 fltr->dest_vsi = repr->src_vsi; 494 fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 495 } else if (netif_is_ice(act->dev) || 496 ice_is_tunnel_supported(act->dev)) { 497 fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 498 } else { 499 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); 500 return -EINVAL; 501 } 502 503 break; 504 505 default: 506 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); 507 return -EINVAL; 508 } 509 510 return 0; 511 } 512 513 static int 514 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 515 { 516 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 517 struct ice_adv_rule_info rule_info = { 0 }; 518 struct ice_rule_query_data rule_added; 519 struct ice_hw *hw = &vsi->back->hw; 520 struct ice_adv_lkup_elem *list; 521 u32 flags = fltr->flags; 522 int lkups_cnt; 523 int ret; 524 int i; 525 526 if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { 527 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); 528 return -EOPNOTSUPP; 529 } 530 531 lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); 532 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 533 if (!list) 534 return -ENOMEM; 535 536 i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL); 537 if (i != lkups_cnt) { 538 ret = -EINVAL; 539 goto exit; 540 } 541 542 /* egress traffic is always redirect to uplink */ 543 if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) 544 fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; 545 546 rule_info.sw_act.fltr_act = fltr->action.fltr_act; 547 if (fltr->action.fltr_act != ICE_DROP_PACKET) 548 rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; 549 /* For now, making priority to be highest, and it also becomes 550 * the priority for recipe which will get created as a result of 551 * new extraction sequence based on input set. 552 * Priority '7' is max val for switch recipe, higher the number 553 * results into order of switch rule evaluation. 554 */ 555 rule_info.priority = 7; 556 557 if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { 558 rule_info.sw_act.flag |= ICE_FLTR_RX; 559 rule_info.sw_act.src = hw->pf_id; 560 rule_info.rx = true; 561 } else { 562 rule_info.sw_act.flag |= ICE_FLTR_TX; 563 rule_info.sw_act.src = vsi->idx; 564 rule_info.rx = false; 565 rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 566 rule_info.flags_info.act_valid = true; 567 } 568 569 /* specify the cookie as filter_rule_id */ 570 rule_info.fltr_rule_id = fltr->cookie; 571 572 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); 573 if (ret == -EEXIST) { 574 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); 575 ret = -EINVAL; 576 goto exit; 577 } else if (ret) { 578 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); 579 goto exit; 580 } 581 582 /* store the output params, which are needed later for removing 583 * advanced switch filter 584 */ 585 fltr->rid = rule_added.rid; 586 fltr->rule_id = rule_added.rule_id; 587 fltr->dest_id = rule_added.vsi_handle; 588 589 exit: 590 kfree(list); 591 return ret; 592 } 593 594 /** 595 * ice_add_tc_flower_adv_fltr - add appropriate filter rules 596 * @vsi: Pointer to VSI 597 * @tc_fltr: Pointer to TC flower filter structure 598 * 599 * based on filter parameters using Advance recipes supported 600 * by OS package. 601 */ 602 static int 603 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, 604 struct ice_tc_flower_fltr *tc_fltr) 605 { 606 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; 607 struct ice_adv_rule_info rule_info = {0}; 608 struct ice_rule_query_data rule_added; 609 struct ice_adv_lkup_elem *list; 610 struct ice_pf *pf = vsi->back; 611 struct ice_hw *hw = &pf->hw; 612 u32 flags = tc_fltr->flags; 613 struct ice_vsi *ch_vsi; 614 struct device *dev; 615 u16 lkups_cnt = 0; 616 u16 l4_proto = 0; 617 int ret = 0; 618 u16 i = 0; 619 620 dev = ice_pf_to_dev(pf); 621 if (ice_is_safe_mode(pf)) { 622 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode"); 623 return -EOPNOTSUPP; 624 } 625 626 if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | 627 ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | 628 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | 629 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 630 ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { 631 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)"); 632 return -EOPNOTSUPP; 633 } 634 635 /* get the channel (aka ADQ VSI) */ 636 if (tc_fltr->dest_vsi) 637 ch_vsi = tc_fltr->dest_vsi; 638 else 639 ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class]; 640 641 lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); 642 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 643 if (!list) 644 return -ENOMEM; 645 646 i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto); 647 if (i != lkups_cnt) { 648 ret = -EINVAL; 649 goto exit; 650 } 651 652 rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; 653 if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) { 654 if (!ch_vsi) { 655 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist"); 656 ret = -EINVAL; 657 goto exit; 658 } 659 660 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 661 rule_info.sw_act.vsi_handle = ch_vsi->idx; 662 rule_info.priority = 7; 663 rule_info.sw_act.src = hw->pf_id; 664 rule_info.rx = true; 665 dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", 666 tc_fltr->action.tc_class, 667 rule_info.sw_act.vsi_handle, lkups_cnt); 668 } else { 669 rule_info.sw_act.flag |= ICE_FLTR_TX; 670 rule_info.sw_act.src = vsi->idx; 671 rule_info.rx = false; 672 } 673 674 /* specify the cookie as filter_rule_id */ 675 rule_info.fltr_rule_id = tc_fltr->cookie; 676 677 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); 678 if (ret == -EEXIST) { 679 NL_SET_ERR_MSG_MOD(tc_fltr->extack, 680 "Unable to add filter because it already exist"); 681 ret = -EINVAL; 682 goto exit; 683 } else if (ret) { 684 NL_SET_ERR_MSG_MOD(tc_fltr->extack, 685 "Unable to add filter due to error"); 686 goto exit; 687 } 688 689 /* store the output params, which are needed later for removing 690 * advanced switch filter 691 */ 692 tc_fltr->rid = rule_added.rid; 693 tc_fltr->rule_id = rule_added.rule_id; 694 if (tc_fltr->action.tc_class > 0 && ch_vsi) { 695 /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and 696 * for PF ADQ filter, it is not yet set in tc_fltr, 697 * hence store the dest_vsi ptr in tc_fltr 698 */ 699 if (ch_vsi->type == ICE_VSI_CHNL) 700 tc_fltr->dest_vsi = ch_vsi; 701 /* keep track of advanced switch filter for 702 * destination VSI (channel VSI) 703 */ 704 ch_vsi->num_chnl_fltr++; 705 /* in this case, dest_id is VSI handle (sw handle) */ 706 tc_fltr->dest_id = rule_added.vsi_handle; 707 708 /* keeps track of channel filters for PF VSI */ 709 if (vsi->type == ICE_VSI_PF && 710 (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 711 ICE_TC_FLWR_FIELD_ENC_DST_MAC))) 712 pf->num_dmac_chnl_fltrs++; 713 } 714 dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", 715 lkups_cnt, flags, 716 tc_fltr->action.tc_class, rule_added.rid, 717 rule_added.rule_id, rule_added.vsi_handle); 718 exit: 719 kfree(list); 720 return ret; 721 } 722 723 /** 724 * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter 725 * @match: Pointer to flow match structure 726 * @fltr: Pointer to filter structure 727 * @headers: Pointer to outer header fields 728 * @returns PPP protocol used in filter (ppp_ses or ppp_disc) 729 */ 730 static u16 731 ice_tc_set_pppoe(struct flow_match_pppoe *match, 732 struct ice_tc_flower_fltr *fltr, 733 struct ice_tc_flower_lyr_2_4_hdrs *headers) 734 { 735 if (match->mask->session_id) { 736 fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID; 737 headers->pppoe_hdr.session_id = match->key->session_id; 738 } 739 740 if (match->mask->ppp_proto) { 741 fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO; 742 headers->pppoe_hdr.ppp_proto = match->key->ppp_proto; 743 } 744 745 return be16_to_cpu(match->key->type); 746 } 747 748 /** 749 * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter 750 * @match: Pointer to flow match structure 751 * @fltr: Pointer to filter structure 752 * @headers: inner or outer header fields 753 * @is_encap: set true for tunnel IPv4 address 754 */ 755 static int 756 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, 757 struct ice_tc_flower_fltr *fltr, 758 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) 759 { 760 if (match->key->dst) { 761 if (is_encap) 762 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; 763 else 764 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; 765 headers->l3_key.dst_ipv4 = match->key->dst; 766 headers->l3_mask.dst_ipv4 = match->mask->dst; 767 } 768 if (match->key->src) { 769 if (is_encap) 770 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; 771 else 772 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; 773 headers->l3_key.src_ipv4 = match->key->src; 774 headers->l3_mask.src_ipv4 = match->mask->src; 775 } 776 return 0; 777 } 778 779 /** 780 * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter 781 * @match: Pointer to flow match structure 782 * @fltr: Pointer to filter structure 783 * @headers: inner or outer header fields 784 * @is_encap: set true for tunnel IPv6 address 785 */ 786 static int 787 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, 788 struct ice_tc_flower_fltr *fltr, 789 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) 790 { 791 struct ice_tc_l3_hdr *l3_key, *l3_mask; 792 793 /* src and dest IPV6 address should not be LOOPBACK 794 * (0:0:0:0:0:0:0:1), which can be represented as ::1 795 */ 796 if (ipv6_addr_loopback(&match->key->dst) || 797 ipv6_addr_loopback(&match->key->src)) { 798 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK"); 799 return -EINVAL; 800 } 801 /* if src/dest IPv6 address is *,* error */ 802 if (ipv6_addr_any(&match->mask->dst) && 803 ipv6_addr_any(&match->mask->src)) { 804 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); 805 return -EINVAL; 806 } 807 if (!ipv6_addr_any(&match->mask->dst)) { 808 if (is_encap) 809 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; 810 else 811 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; 812 } 813 if (!ipv6_addr_any(&match->mask->src)) { 814 if (is_encap) 815 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; 816 else 817 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; 818 } 819 820 l3_key = &headers->l3_key; 821 l3_mask = &headers->l3_mask; 822 823 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | 824 ICE_TC_FLWR_FIELD_SRC_IPV6)) { 825 memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, 826 sizeof(match->key->src.s6_addr)); 827 memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, 828 sizeof(match->mask->src.s6_addr)); 829 } 830 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | 831 ICE_TC_FLWR_FIELD_DEST_IPV6)) { 832 memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, 833 sizeof(match->key->dst.s6_addr)); 834 memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, 835 sizeof(match->mask->dst.s6_addr)); 836 } 837 838 return 0; 839 } 840 841 /** 842 * ice_tc_set_port - Parse ports from TC flower filter 843 * @match: Flow match structure 844 * @fltr: Pointer to filter structure 845 * @headers: inner or outer header fields 846 * @is_encap: set true for tunnel port 847 */ 848 static int 849 ice_tc_set_port(struct flow_match_ports match, 850 struct ice_tc_flower_fltr *fltr, 851 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) 852 { 853 if (match.key->dst) { 854 if (is_encap) 855 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; 856 else 857 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; 858 859 headers->l4_key.dst_port = match.key->dst; 860 headers->l4_mask.dst_port = match.mask->dst; 861 } 862 if (match.key->src) { 863 if (is_encap) 864 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; 865 else 866 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; 867 868 headers->l4_key.src_port = match.key->src; 869 headers->l4_mask.src_port = match.mask->src; 870 } 871 return 0; 872 } 873 874 static struct net_device * 875 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) 876 { 877 struct flow_action_entry *act; 878 int i; 879 880 if (ice_is_tunnel_supported(dev)) 881 return dev; 882 883 flow_action_for_each(i, act, &rule->action) { 884 if (act->id == FLOW_ACTION_REDIRECT && 885 ice_is_tunnel_supported(act->dev)) 886 return act->dev; 887 } 888 889 return NULL; 890 } 891 892 /** 893 * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C 894 * @match: Flow match structure 895 * @fltr: Pointer to filter structure 896 * 897 * GTP-C/GTP-U is selected based on destination port number (enc_dst_port). 898 * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU, 899 * therefore making GTP-U the default choice (when destination port number is 900 * not specified). 901 */ 902 static int 903 ice_parse_gtp_type(struct flow_match_ports match, 904 struct ice_tc_flower_fltr *fltr) 905 { 906 u16 dst_port; 907 908 if (match.key->dst) { 909 dst_port = be16_to_cpu(match.key->dst); 910 911 switch (dst_port) { 912 case 2152: 913 break; 914 case 2123: 915 fltr->tunnel_type = TNL_GTPC; 916 break; 917 default: 918 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number"); 919 return -EINVAL; 920 } 921 } 922 923 return 0; 924 } 925 926 static int 927 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, 928 struct ice_tc_flower_fltr *fltr) 929 { 930 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 931 struct flow_match_control enc_control; 932 933 fltr->tunnel_type = ice_tc_tun_get_type(dev); 934 headers->l3_key.ip_proto = IPPROTO_UDP; 935 936 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 937 struct flow_match_enc_keyid enc_keyid; 938 939 flow_rule_match_enc_keyid(rule, &enc_keyid); 940 941 if (!enc_keyid.mask->keyid || 942 enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32)) 943 return -EINVAL; 944 945 fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID; 946 fltr->tenant_id = enc_keyid.key->keyid; 947 } 948 949 flow_rule_match_enc_control(rule, &enc_control); 950 951 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 952 struct flow_match_ipv4_addrs match; 953 954 flow_rule_match_enc_ipv4_addrs(rule, &match); 955 if (ice_tc_set_ipv4(&match, fltr, headers, true)) 956 return -EINVAL; 957 } else if (enc_control.key->addr_type == 958 FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 959 struct flow_match_ipv6_addrs match; 960 961 flow_rule_match_enc_ipv6_addrs(rule, &match); 962 if (ice_tc_set_ipv6(&match, fltr, headers, true)) 963 return -EINVAL; 964 } 965 966 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 967 struct flow_match_ip match; 968 969 flow_rule_match_enc_ip(rule, &match); 970 headers->l3_key.tos = match.key->tos; 971 headers->l3_key.ttl = match.key->ttl; 972 headers->l3_mask.tos = match.mask->tos; 973 headers->l3_mask.ttl = match.mask->ttl; 974 } 975 976 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) && 977 fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { 978 struct flow_match_ports match; 979 980 flow_rule_match_enc_ports(rule, &match); 981 982 if (fltr->tunnel_type != TNL_GTPU) { 983 if (ice_tc_set_port(match, fltr, headers, true)) 984 return -EINVAL; 985 } else { 986 if (ice_parse_gtp_type(match, fltr)) 987 return -EINVAL; 988 } 989 } 990 991 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { 992 struct flow_match_enc_opts match; 993 994 flow_rule_match_enc_opts(rule, &match); 995 996 memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0], 997 sizeof(struct gtp_pdu_session_info)); 998 999 memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], 1000 sizeof(struct gtp_pdu_session_info)); 1001 1002 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; 1003 } 1004 1005 return 0; 1006 } 1007 1008 /** 1009 * ice_parse_cls_flower - Parse TC flower filters provided by kernel 1010 * @vsi: Pointer to the VSI 1011 * @filter_dev: Pointer to device on which filter is being added 1012 * @f: Pointer to struct flow_cls_offload 1013 * @fltr: Pointer to filter structure 1014 */ 1015 static int 1016 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, 1017 struct flow_cls_offload *f, 1018 struct ice_tc_flower_fltr *fltr) 1019 { 1020 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; 1021 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1022 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; 1023 struct flow_dissector *dissector; 1024 struct net_device *tunnel_dev; 1025 1026 dissector = rule->match.dissector; 1027 1028 if (dissector->used_keys & 1029 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 1030 BIT(FLOW_DISSECTOR_KEY_BASIC) | 1031 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 1032 BIT(FLOW_DISSECTOR_KEY_VLAN) | 1033 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 1034 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 1035 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 1036 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 1037 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 1038 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 1039 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 1040 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 1041 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | 1042 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | 1043 BIT(FLOW_DISSECTOR_KEY_PORTS) | 1044 BIT(FLOW_DISSECTOR_KEY_PPPOE))) { 1045 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); 1046 return -EOPNOTSUPP; 1047 } 1048 1049 tunnel_dev = ice_get_tunnel_device(filter_dev, rule); 1050 if (tunnel_dev) { 1051 int err; 1052 1053 filter_dev = tunnel_dev; 1054 1055 err = ice_parse_tunnel_attr(filter_dev, rule, fltr); 1056 if (err) { 1057 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes"); 1058 return err; 1059 } 1060 1061 /* header pointers should point to the inner headers, outer 1062 * header were already set by ice_parse_tunnel_attr 1063 */ 1064 headers = &fltr->inner_headers; 1065 } else if (dissector->used_keys & 1066 (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 1067 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 1068 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 1069 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { 1070 NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); 1071 return -EOPNOTSUPP; 1072 } else { 1073 fltr->tunnel_type = TNL_LAST; 1074 } 1075 1076 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 1077 struct flow_match_basic match; 1078 1079 flow_rule_match_basic(rule, &match); 1080 1081 n_proto_key = ntohs(match.key->n_proto); 1082 n_proto_mask = ntohs(match.mask->n_proto); 1083 1084 if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || 1085 fltr->tunnel_type == TNL_GTPU || 1086 fltr->tunnel_type == TNL_GTPC) { 1087 n_proto_key = 0; 1088 n_proto_mask = 0; 1089 } else { 1090 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; 1091 } 1092 1093 headers->l2_key.n_proto = cpu_to_be16(n_proto_key); 1094 headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); 1095 headers->l3_key.ip_proto = match.key->ip_proto; 1096 } 1097 1098 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1099 struct flow_match_eth_addrs match; 1100 1101 flow_rule_match_eth_addrs(rule, &match); 1102 1103 if (!is_zero_ether_addr(match.key->dst)) { 1104 ether_addr_copy(headers->l2_key.dst_mac, 1105 match.key->dst); 1106 ether_addr_copy(headers->l2_mask.dst_mac, 1107 match.mask->dst); 1108 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; 1109 } 1110 1111 if (!is_zero_ether_addr(match.key->src)) { 1112 ether_addr_copy(headers->l2_key.src_mac, 1113 match.key->src); 1114 ether_addr_copy(headers->l2_mask.src_mac, 1115 match.mask->src); 1116 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; 1117 } 1118 } 1119 1120 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 1121 is_vlan_dev(filter_dev)) { 1122 struct flow_dissector_key_vlan mask; 1123 struct flow_dissector_key_vlan key; 1124 struct flow_match_vlan match; 1125 1126 if (is_vlan_dev(filter_dev)) { 1127 match.key = &key; 1128 match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 1129 match.key->vlan_priority = 0; 1130 match.mask = &mask; 1131 memset(match.mask, 0xff, sizeof(*match.mask)); 1132 match.mask->vlan_priority = 0; 1133 } else { 1134 flow_rule_match_vlan(rule, &match); 1135 } 1136 1137 if (match.mask->vlan_id) { 1138 if (match.mask->vlan_id == VLAN_VID_MASK) { 1139 fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; 1140 } else { 1141 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask"); 1142 return -EINVAL; 1143 } 1144 } 1145 1146 headers->vlan_hdr.vlan_id = 1147 cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); 1148 if (match.mask->vlan_priority) 1149 headers->vlan_hdr.vlan_prio = match.key->vlan_priority; 1150 if (match.mask->vlan_tpid) 1151 headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid; 1152 } 1153 1154 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 1155 struct flow_match_vlan match; 1156 1157 if (!ice_is_dvm_ena(&vsi->back->hw)) { 1158 NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled"); 1159 return -EINVAL; 1160 } 1161 1162 flow_rule_match_cvlan(rule, &match); 1163 1164 if (match.mask->vlan_id) { 1165 if (match.mask->vlan_id == VLAN_VID_MASK) { 1166 fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN; 1167 } else { 1168 NL_SET_ERR_MSG_MOD(fltr->extack, 1169 "Bad CVLAN mask"); 1170 return -EINVAL; 1171 } 1172 } 1173 1174 headers->cvlan_hdr.vlan_id = 1175 cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); 1176 if (match.mask->vlan_priority) 1177 headers->cvlan_hdr.vlan_prio = match.key->vlan_priority; 1178 } 1179 1180 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) { 1181 struct flow_match_pppoe match; 1182 1183 flow_rule_match_pppoe(rule, &match); 1184 n_proto_key = ice_tc_set_pppoe(&match, fltr, headers); 1185 1186 /* If ethertype equals ETH_P_PPP_SES, n_proto might be 1187 * overwritten by encapsulated protocol (ppp_proto field) or set 1188 * to 0. To correct this, flow_match_pppoe provides the type 1189 * field, which contains the actual ethertype (ETH_P_PPP_SES). 1190 */ 1191 headers->l2_key.n_proto = cpu_to_be16(n_proto_key); 1192 headers->l2_mask.n_proto = cpu_to_be16(0xFFFF); 1193 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; 1194 } 1195 1196 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 1197 struct flow_match_control match; 1198 1199 flow_rule_match_control(rule, &match); 1200 1201 addr_type = match.key->addr_type; 1202 } 1203 1204 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1205 struct flow_match_ipv4_addrs match; 1206 1207 flow_rule_match_ipv4_addrs(rule, &match); 1208 if (ice_tc_set_ipv4(&match, fltr, headers, false)) 1209 return -EINVAL; 1210 } 1211 1212 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1213 struct flow_match_ipv6_addrs match; 1214 1215 flow_rule_match_ipv6_addrs(rule, &match); 1216 if (ice_tc_set_ipv6(&match, fltr, headers, false)) 1217 return -EINVAL; 1218 } 1219 1220 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 1221 struct flow_match_ports match; 1222 1223 flow_rule_match_ports(rule, &match); 1224 if (ice_tc_set_port(match, fltr, headers, false)) 1225 return -EINVAL; 1226 switch (headers->l3_key.ip_proto) { 1227 case IPPROTO_TCP: 1228 case IPPROTO_UDP: 1229 break; 1230 default: 1231 NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported"); 1232 return -EINVAL; 1233 } 1234 } 1235 return 0; 1236 } 1237 1238 /** 1239 * ice_add_switch_fltr - Add TC flower filters 1240 * @vsi: Pointer to VSI 1241 * @fltr: Pointer to struct ice_tc_flower_fltr 1242 * 1243 * Add filter in HW switch block 1244 */ 1245 static int 1246 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 1247 { 1248 if (fltr->action.fltr_act == ICE_FWD_TO_QGRP) 1249 return -EOPNOTSUPP; 1250 1251 if (ice_is_eswitch_mode_switchdev(vsi->back)) 1252 return ice_eswitch_add_tc_fltr(vsi, fltr); 1253 1254 return ice_add_tc_flower_adv_fltr(vsi, fltr); 1255 } 1256 1257 /** 1258 * ice_handle_tclass_action - Support directing to a traffic class 1259 * @vsi: Pointer to VSI 1260 * @cls_flower: Pointer to TC flower offload structure 1261 * @fltr: Pointer to TC flower filter structure 1262 * 1263 * Support directing traffic to a traffic class 1264 */ 1265 static int 1266 ice_handle_tclass_action(struct ice_vsi *vsi, 1267 struct flow_cls_offload *cls_flower, 1268 struct ice_tc_flower_fltr *fltr) 1269 { 1270 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); 1271 struct ice_vsi *main_vsi; 1272 1273 if (tc < 0) { 1274 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid"); 1275 return -EINVAL; 1276 } 1277 if (!tc) { 1278 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination"); 1279 return -EINVAL; 1280 } 1281 1282 if (!(vsi->all_enatc & BIT(tc))) { 1283 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); 1284 return -EINVAL; 1285 } 1286 1287 /* Redirect to a TC class or Queue Group */ 1288 main_vsi = ice_get_main_vsi(vsi->back); 1289 if (!main_vsi || !main_vsi->netdev) { 1290 NL_SET_ERR_MSG_MOD(fltr->extack, 1291 "Unable to add filter because of invalid netdevice"); 1292 return -EINVAL; 1293 } 1294 1295 if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && 1296 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1297 ICE_TC_FLWR_FIELD_SRC_MAC))) { 1298 NL_SET_ERR_MSG_MOD(fltr->extack, 1299 "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination"); 1300 return -EOPNOTSUPP; 1301 } 1302 1303 /* For ADQ, filter must include dest MAC address, otherwise unwanted 1304 * packets with unrelated MAC address get delivered to ADQ VSIs as long 1305 * as remaining filter criteria is satisfied such as dest IP address 1306 * and dest/src L4 port. Following code is trying to handle: 1307 * 1. For non-tunnel, if user specify MAC addresses, use them (means 1308 * this code won't do anything 1309 * 2. For non-tunnel, if user didn't specify MAC address, add implicit 1310 * dest MAC to be lower netdev's active unicast MAC address 1311 * 3. For tunnel, as of now TC-filter through flower classifier doesn't 1312 * have provision for user to specify outer DMAC, hence driver to 1313 * implicitly add outer dest MAC to be lower netdev's active unicast 1314 * MAC address. 1315 */ 1316 if (fltr->tunnel_type != TNL_LAST && 1317 !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 1318 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC; 1319 1320 if (fltr->tunnel_type == TNL_LAST && 1321 !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) 1322 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; 1323 1324 if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1325 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) { 1326 ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, 1327 vsi->netdev->dev_addr); 1328 eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); 1329 } 1330 1331 /* validate specified dest MAC address, make sure either it belongs to 1332 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as 1333 * unicast MAC filter destined to main VSI. 1334 */ 1335 if (!ice_mac_fltr_exist(&main_vsi->back->hw, 1336 fltr->outer_headers.l2_key.dst_mac, 1337 main_vsi->idx)) { 1338 NL_SET_ERR_MSG_MOD(fltr->extack, 1339 "Unable to add filter because legacy MAC filter for specified destination doesn't exist"); 1340 return -EINVAL; 1341 } 1342 1343 /* Make sure VLAN is already added to main VSI, before allowing ADQ to 1344 * add a VLAN based filter such as MAC + VLAN + L4 port. 1345 */ 1346 if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { 1347 u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); 1348 1349 if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id, 1350 main_vsi->idx)) { 1351 NL_SET_ERR_MSG_MOD(fltr->extack, 1352 "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); 1353 return -EINVAL; 1354 } 1355 } 1356 fltr->action.fltr_act = ICE_FWD_TO_VSI; 1357 fltr->action.tc_class = tc; 1358 1359 return 0; 1360 } 1361 1362 /** 1363 * ice_parse_tc_flower_actions - Parse the actions for a TC filter 1364 * @vsi: Pointer to VSI 1365 * @cls_flower: Pointer to TC flower offload structure 1366 * @fltr: Pointer to TC flower filter structure 1367 * 1368 * Parse the actions for a TC filter 1369 */ 1370 static int 1371 ice_parse_tc_flower_actions(struct ice_vsi *vsi, 1372 struct flow_cls_offload *cls_flower, 1373 struct ice_tc_flower_fltr *fltr) 1374 { 1375 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1376 struct flow_action *flow_action = &rule->action; 1377 struct flow_action_entry *act; 1378 int i; 1379 1380 if (cls_flower->classid) 1381 return ice_handle_tclass_action(vsi, cls_flower, fltr); 1382 1383 if (!flow_action_has_entries(flow_action)) 1384 return -EINVAL; 1385 1386 flow_action_for_each(i, act, flow_action) { 1387 if (ice_is_eswitch_mode_switchdev(vsi->back)) { 1388 int err = ice_eswitch_tc_parse_action(fltr, act); 1389 1390 if (err) 1391 return err; 1392 continue; 1393 } 1394 /* Allow only one rule per filter */ 1395 1396 /* Drop action */ 1397 if (act->id == FLOW_ACTION_DROP) { 1398 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP"); 1399 return -EINVAL; 1400 } 1401 fltr->action.fltr_act = ICE_FWD_TO_VSI; 1402 } 1403 return 0; 1404 } 1405 1406 /** 1407 * ice_del_tc_fltr - deletes a filter from HW table 1408 * @vsi: Pointer to VSI 1409 * @fltr: Pointer to struct ice_tc_flower_fltr 1410 * 1411 * This function deletes a filter from HW table and manages book-keeping 1412 */ 1413 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 1414 { 1415 struct ice_rule_query_data rule_rem; 1416 struct ice_pf *pf = vsi->back; 1417 int err; 1418 1419 rule_rem.rid = fltr->rid; 1420 rule_rem.rule_id = fltr->rule_id; 1421 rule_rem.vsi_handle = fltr->dest_id; 1422 err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); 1423 if (err) { 1424 if (err == -ENOENT) { 1425 NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); 1426 return -ENOENT; 1427 } 1428 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter"); 1429 return -EIO; 1430 } 1431 1432 /* update advanced switch filter count for destination 1433 * VSI if filter destination was VSI 1434 */ 1435 if (fltr->dest_vsi) { 1436 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 1437 fltr->dest_vsi->num_chnl_fltr--; 1438 1439 /* keeps track of channel filters for PF VSI */ 1440 if (vsi->type == ICE_VSI_PF && 1441 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1442 ICE_TC_FLWR_FIELD_ENC_DST_MAC))) 1443 pf->num_dmac_chnl_fltrs--; 1444 } 1445 } 1446 return 0; 1447 } 1448 1449 /** 1450 * ice_add_tc_fltr - adds a TC flower filter 1451 * @netdev: Pointer to netdev 1452 * @vsi: Pointer to VSI 1453 * @f: Pointer to flower offload structure 1454 * @__fltr: Pointer to struct ice_tc_flower_fltr 1455 * 1456 * This function parses TC-flower input fields, parses action, 1457 * and adds a filter. 1458 */ 1459 static int 1460 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, 1461 struct flow_cls_offload *f, 1462 struct ice_tc_flower_fltr **__fltr) 1463 { 1464 struct ice_tc_flower_fltr *fltr; 1465 int err; 1466 1467 /* by default, set output to be INVALID */ 1468 *__fltr = NULL; 1469 1470 fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); 1471 if (!fltr) 1472 return -ENOMEM; 1473 1474 fltr->cookie = f->cookie; 1475 fltr->extack = f->common.extack; 1476 fltr->src_vsi = vsi; 1477 INIT_HLIST_NODE(&fltr->tc_flower_node); 1478 1479 err = ice_parse_cls_flower(netdev, vsi, f, fltr); 1480 if (err < 0) 1481 goto err; 1482 1483 err = ice_parse_tc_flower_actions(vsi, f, fltr); 1484 if (err < 0) 1485 goto err; 1486 1487 err = ice_add_switch_fltr(vsi, fltr); 1488 if (err < 0) 1489 goto err; 1490 1491 /* return the newly created filter */ 1492 *__fltr = fltr; 1493 1494 return 0; 1495 err: 1496 kfree(fltr); 1497 return err; 1498 } 1499 1500 /** 1501 * ice_find_tc_flower_fltr - Find the TC flower filter in the list 1502 * @pf: Pointer to PF 1503 * @cookie: filter specific cookie 1504 */ 1505 static struct ice_tc_flower_fltr * 1506 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) 1507 { 1508 struct ice_tc_flower_fltr *fltr; 1509 1510 hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) 1511 if (cookie == fltr->cookie) 1512 return fltr; 1513 1514 return NULL; 1515 } 1516 1517 /** 1518 * ice_add_cls_flower - add TC flower filters 1519 * @netdev: Pointer to filter device 1520 * @vsi: Pointer to VSI 1521 * @cls_flower: Pointer to flower offload structure 1522 */ 1523 int 1524 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, 1525 struct flow_cls_offload *cls_flower) 1526 { 1527 struct netlink_ext_ack *extack = cls_flower->common.extack; 1528 struct net_device *vsi_netdev = vsi->netdev; 1529 struct ice_tc_flower_fltr *fltr; 1530 struct ice_pf *pf = vsi->back; 1531 int err; 1532 1533 if (ice_is_reset_in_progress(pf->state)) 1534 return -EBUSY; 1535 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 1536 return -EINVAL; 1537 1538 if (ice_is_port_repr_netdev(netdev)) 1539 vsi_netdev = netdev; 1540 1541 if (!(vsi_netdev->features & NETIF_F_HW_TC) && 1542 !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { 1543 /* Based on TC indirect notifications from kernel, all ice 1544 * devices get an instance of rule from higher level device. 1545 * Avoid triggering explicit error in this case. 1546 */ 1547 if (netdev == vsi_netdev) 1548 NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); 1549 return -EINVAL; 1550 } 1551 1552 /* avoid duplicate entries, if exists - return error */ 1553 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); 1554 if (fltr) { 1555 NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring"); 1556 return -EEXIST; 1557 } 1558 1559 /* prep and add TC-flower filter in HW */ 1560 err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); 1561 if (err) 1562 return err; 1563 1564 /* add filter into an ordered list */ 1565 hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list); 1566 return 0; 1567 } 1568 1569 /** 1570 * ice_del_cls_flower - delete TC flower filters 1571 * @vsi: Pointer to VSI 1572 * @cls_flower: Pointer to struct flow_cls_offload 1573 */ 1574 int 1575 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) 1576 { 1577 struct ice_tc_flower_fltr *fltr; 1578 struct ice_pf *pf = vsi->back; 1579 int err; 1580 1581 /* find filter */ 1582 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); 1583 if (!fltr) { 1584 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) && 1585 hlist_empty(&pf->tc_flower_fltr_list)) 1586 return 0; 1587 1588 NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it"); 1589 return -EINVAL; 1590 } 1591 1592 fltr->extack = cls_flower->common.extack; 1593 /* delete filter from HW */ 1594 err = ice_del_tc_fltr(vsi, fltr); 1595 if (err) 1596 return err; 1597 1598 /* delete filter from an ordered list */ 1599 hlist_del(&fltr->tc_flower_node); 1600 1601 /* free the filter node */ 1602 kfree(fltr); 1603 1604 return 0; 1605 } 1606 1607 /** 1608 * ice_replay_tc_fltrs - replay TC filters 1609 * @pf: pointer to PF struct 1610 */ 1611 void ice_replay_tc_fltrs(struct ice_pf *pf) 1612 { 1613 struct ice_tc_flower_fltr *fltr; 1614 struct hlist_node *node; 1615 1616 hlist_for_each_entry_safe(fltr, node, 1617 &pf->tc_flower_fltr_list, 1618 tc_flower_node) { 1619 fltr->extack = NULL; 1620 ice_add_switch_fltr(fltr->src_vsi, fltr); 1621 } 1622 } 1623