1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2018-2020, Intel Corporation. */ 3 4 /* flow director ethtool support for ice */ 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "ice_flow.h" 9 10 static struct in6_addr full_ipv6_addr_mask = { 11 .in6_u = { 12 .u6_addr8 = { 13 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 14 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 15 } 16 } 17 }; 18 19 static struct in6_addr zero_ipv6_addr_mask = { 20 .in6_u = { 21 .u6_addr8 = { 22 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 23 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 24 } 25 } 26 }; 27 28 /* calls to ice_flow_add_prof require the number of segments in the array 29 * for segs_cnt. In this code that is one more than the index. 30 */ 31 #define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1) 32 33 /** 34 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool 35 * flow type values 36 * @flow: filter type to be converted 37 * 38 * Returns the corresponding ethtool flow type. 39 */ 40 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) 41 { 42 switch (flow) { 43 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 44 return TCP_V4_FLOW; 45 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 46 return UDP_V4_FLOW; 47 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 48 return SCTP_V4_FLOW; 49 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 50 return IPV4_USER_FLOW; 51 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 52 return TCP_V6_FLOW; 53 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 54 return UDP_V6_FLOW; 55 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 56 return SCTP_V6_FLOW; 57 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 58 return IPV6_USER_FLOW; 59 default: 60 /* 0 is undefined ethtool flow */ 61 return 0; 62 } 63 } 64 65 /** 66 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 67 * @eth: Ethtool flow type to be converted 68 * 69 * Returns flow enum 70 */ 71 static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) 72 { 73 switch (eth) { 74 case TCP_V4_FLOW: 75 return ICE_FLTR_PTYPE_NONF_IPV4_TCP; 76 case UDP_V4_FLOW: 77 return ICE_FLTR_PTYPE_NONF_IPV4_UDP; 78 case SCTP_V4_FLOW: 79 return ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 80 case IPV4_USER_FLOW: 81 return ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 82 case TCP_V6_FLOW: 83 return ICE_FLTR_PTYPE_NONF_IPV6_TCP; 84 case UDP_V6_FLOW: 85 return ICE_FLTR_PTYPE_NONF_IPV6_UDP; 86 case SCTP_V6_FLOW: 87 return ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 88 case IPV6_USER_FLOW: 89 return ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 90 default: 91 return ICE_FLTR_PTYPE_NONF_NONE; 92 } 93 } 94 95 /** 96 * ice_is_mask_valid - check mask field set 97 * @mask: full mask to check 98 * @field: field for which mask should be valid 99 * 100 * If the mask is fully set return true. If it is not valid for field return 101 * false. 102 */ 103 static bool ice_is_mask_valid(u64 mask, u64 field) 104 { 105 return (mask & field) == field; 106 } 107 108 /** 109 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data 110 * @hw: hardware structure that contains filter list 111 * @cmd: ethtool command data structure to receive the filter data 112 * 113 * Returns 0 on success and -EINVAL on failure 114 */ 115 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) 116 { 117 struct ethtool_rx_flow_spec *fsp; 118 struct ice_fdir_fltr *rule; 119 int ret = 0; 120 u16 idx; 121 122 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 123 124 mutex_lock(&hw->fdir_fltr_lock); 125 126 rule = ice_fdir_find_fltr_by_idx(hw, fsp->location); 127 128 if (!rule || fsp->location != rule->fltr_id) { 129 ret = -EINVAL; 130 goto release_lock; 131 } 132 133 fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type); 134 135 memset(&fsp->m_u, 0, sizeof(fsp->m_u)); 136 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); 137 138 switch (fsp->flow_type) { 139 case IPV4_USER_FLOW: 140 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 141 fsp->h_u.usr_ip4_spec.proto = 0; 142 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header; 143 fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos; 144 fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip; 145 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 146 fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip; 147 fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 148 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; 149 fsp->m_u.usr_ip4_spec.proto = 0; 150 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header; 151 fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos; 152 break; 153 case TCP_V4_FLOW: 154 case UDP_V4_FLOW: 155 case SCTP_V4_FLOW: 156 fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port; 157 fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port; 158 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip; 159 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 160 fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port; 161 fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port; 162 fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip; 163 fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 164 break; 165 case IPV6_USER_FLOW: 166 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header; 167 fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc; 168 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto; 169 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 170 sizeof(struct in6_addr)); 171 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 172 sizeof(struct in6_addr)); 173 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip, 174 sizeof(struct in6_addr)); 175 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip, 176 sizeof(struct in6_addr)); 177 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header; 178 fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc; 179 fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto; 180 break; 181 case TCP_V6_FLOW: 182 case UDP_V6_FLOW: 183 case SCTP_V6_FLOW: 184 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 185 sizeof(struct in6_addr)); 186 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 187 sizeof(struct in6_addr)); 188 fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port; 189 fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port; 190 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, 191 rule->mask.v6.src_ip, 192 sizeof(struct in6_addr)); 193 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, 194 rule->mask.v6.dst_ip, 195 sizeof(struct in6_addr)); 196 fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port; 197 fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port; 198 fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc; 199 fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc; 200 break; 201 default: 202 break; 203 } 204 205 if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) 206 fsp->ring_cookie = RX_CLS_FLOW_DISC; 207 else 208 fsp->ring_cookie = rule->q_index; 209 210 idx = ice_ethtool_flow_to_fltr(fsp->flow_type); 211 if (idx == ICE_FLTR_PTYPE_NONF_NONE) { 212 dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", 213 rule->flow_type); 214 ret = -EINVAL; 215 } 216 217 release_lock: 218 mutex_unlock(&hw->fdir_fltr_lock); 219 return ret; 220 } 221 222 /** 223 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters 224 * @hw: hardware structure containing the filter list 225 * @cmd: ethtool command data structure 226 * @rule_locs: ethtool array passed in from OS to receive filter IDs 227 * 228 * Returns 0 as expected for success by ethtool 229 */ 230 int 231 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 232 u32 *rule_locs) 233 { 234 struct ice_fdir_fltr *f_rule; 235 unsigned int cnt = 0; 236 int val = 0; 237 238 /* report total rule count */ 239 cmd->data = ice_get_fdir_cnt_all(hw); 240 241 mutex_lock(&hw->fdir_fltr_lock); 242 243 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 244 if (cnt == cmd->rule_cnt) { 245 val = -EMSGSIZE; 246 goto release_lock; 247 } 248 rule_locs[cnt] = f_rule->fltr_id; 249 cnt++; 250 } 251 252 release_lock: 253 mutex_unlock(&hw->fdir_fltr_lock); 254 if (!val) 255 cmd->rule_cnt = cnt; 256 return val; 257 } 258 259 /** 260 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow 261 * @hw: hardware structure containing the filter list 262 * @blk: hardware block 263 * @flow: FDir flow type to release 264 */ 265 static struct ice_fd_hw_prof * 266 ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow) 267 { 268 if (blk == ICE_BLK_FD && hw->fdir_prof) 269 return hw->fdir_prof[flow]; 270 271 return NULL; 272 } 273 274 /** 275 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables 276 * @hw: hardware structure containing the filter list 277 * @blk: hardware block 278 * @flow: FDir flow type to release 279 */ 280 static void 281 ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) 282 { 283 struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); 284 int tun; 285 286 if (!prof) 287 return; 288 289 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 290 u64 prof_id; 291 int j; 292 293 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 294 for (j = 0; j < prof->cnt; j++) { 295 u16 vsi_num; 296 297 if (!prof->entry_h[j][tun] || !prof->vsi_h[j]) 298 continue; 299 vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]); 300 ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id); 301 ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]); 302 prof->entry_h[j][tun] = 0; 303 } 304 ice_flow_rem_prof(hw, blk, prof_id); 305 } 306 } 307 308 /** 309 * ice_fdir_rem_flow - release the ice_flow structures for a filter type 310 * @hw: hardware structure containing the filter list 311 * @blk: hardware block 312 * @flow_type: FDir flow type to release 313 */ 314 static void 315 ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk, 316 enum ice_fltr_ptype flow_type) 317 { 318 int flow = (int)flow_type & ~FLOW_EXT; 319 struct ice_fd_hw_prof *prof; 320 int tun, i; 321 322 prof = ice_fdir_get_hw_prof(hw, blk, flow); 323 if (!prof) 324 return; 325 326 ice_fdir_erase_flow_from_hw(hw, blk, flow); 327 for (i = 0; i < prof->cnt; i++) 328 prof->vsi_h[i] = 0; 329 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 330 if (!prof->fdir_seg[tun]) 331 continue; 332 devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]); 333 prof->fdir_seg[tun] = NULL; 334 } 335 prof->cnt = 0; 336 } 337 338 /** 339 * ice_fdir_release_flows - release all flows in use for later replay 340 * @hw: pointer to HW instance 341 */ 342 void ice_fdir_release_flows(struct ice_hw *hw) 343 { 344 int flow; 345 346 /* release Flow Director HW table entries */ 347 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) 348 ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow); 349 } 350 351 /** 352 * ice_fdir_replay_flows - replay HW Flow Director filter info 353 * @hw: pointer to HW instance 354 */ 355 void ice_fdir_replay_flows(struct ice_hw *hw) 356 { 357 int flow; 358 359 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 360 int tun; 361 362 if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt) 363 continue; 364 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 365 struct ice_flow_prof *hw_prof; 366 struct ice_fd_hw_prof *prof; 367 u64 prof_id; 368 int j; 369 370 prof = hw->fdir_prof[flow]; 371 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 372 ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, 373 prof->fdir_seg[tun], TNL_SEG_CNT(tun), 374 &hw_prof); 375 for (j = 0; j < prof->cnt; j++) { 376 enum ice_flow_priority prio; 377 u64 entry_h = 0; 378 int err; 379 380 prio = ICE_FLOW_PRIO_NORMAL; 381 err = ice_flow_add_entry(hw, ICE_BLK_FD, 382 prof_id, 383 prof->vsi_h[0], 384 prof->vsi_h[j], 385 prio, prof->fdir_seg, 386 &entry_h); 387 if (err) { 388 dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n", 389 flow); 390 continue; 391 } 392 prof->entry_h[j][tun] = entry_h; 393 } 394 } 395 } 396 } 397 398 /** 399 * ice_parse_rx_flow_user_data - deconstruct user-defined data 400 * @fsp: pointer to ethtool Rx flow specification 401 * @data: pointer to userdef data structure for storage 402 * 403 * Returns 0 on success, negative error value on failure 404 */ 405 static int 406 ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 407 struct ice_rx_flow_userdef *data) 408 { 409 u64 value, mask; 410 411 memset(data, 0, sizeof(*data)); 412 if (!(fsp->flow_type & FLOW_EXT)) 413 return 0; 414 415 value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data)); 416 mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data)); 417 if (!mask) 418 return 0; 419 420 #define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0) 421 #define ICE_USERDEF_FLEX_OFFS_S 16 422 #define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S) 423 #define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0) 424 425 /* 0x1fe is the maximum value for offsets stored in the internal 426 * filtering tables. 427 */ 428 #define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe 429 430 if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) || 431 value > ICE_USERDEF_FLEX_FLTR_M) 432 return -EINVAL; 433 434 data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; 435 data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> 436 ICE_USERDEF_FLEX_OFFS_S; 437 if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) 438 return -EINVAL; 439 440 data->flex_fltr = true; 441 442 return 0; 443 } 444 445 /** 446 * ice_fdir_num_avail_fltr - return the number of unused flow director filters 447 * @hw: pointer to hardware structure 448 * @vsi: software VSI structure 449 * 450 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can 451 * use filters from either pool. The guaranteed pool is divided between VSIs. 452 * The best effort filter pool is common to all VSIs and is a device shared 453 * resource pool. The number of filters available to this VSI is the sum of 454 * the VSIs guaranteed filter pool and the global available best effort 455 * filter pool. 456 * 457 * Returns the number of available flow director filters to this VSI 458 */ 459 static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) 460 { 461 u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 462 u16 num_guar; 463 u16 num_be; 464 465 /* total guaranteed filters assigned to this VSI */ 466 num_guar = vsi->num_gfltr; 467 468 /* minus the guaranteed filters programed by this VSI */ 469 num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) & 470 VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S; 471 472 /* total global best effort filters */ 473 num_be = hw->func_caps.fd_fltr_best_effort; 474 475 /* minus the global best effort filters programmed */ 476 num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >> 477 GLQF_FD_CNT_FD_BCNT_S; 478 479 return num_guar + num_be; 480 } 481 482 /** 483 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s) 484 * @hw: HW structure containing the FDir flow profile structure(s) 485 * @flow: flow type to allocate the flow profile for 486 * 487 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0 488 * on success and negative on error. 489 */ 490 static int 491 ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow) 492 { 493 if (!hw) 494 return -EINVAL; 495 496 if (!hw->fdir_prof) { 497 hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw), 498 ICE_FLTR_PTYPE_MAX, 499 sizeof(*hw->fdir_prof), 500 GFP_KERNEL); 501 if (!hw->fdir_prof) 502 return -ENOMEM; 503 } 504 505 if (!hw->fdir_prof[flow]) { 506 hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw), 507 sizeof(**hw->fdir_prof), 508 GFP_KERNEL); 509 if (!hw->fdir_prof[flow]) 510 return -ENOMEM; 511 } 512 513 return 0; 514 } 515 516 /** 517 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule 518 * @pf: pointer to the PF structure 519 * @seg: protocol header description pointer 520 * @flow: filter enum 521 * @tun: FDir segment to program 522 */ 523 static int 524 ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, 525 enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun) 526 { 527 struct device *dev = ice_pf_to_dev(pf); 528 struct ice_vsi *main_vsi, *ctrl_vsi; 529 struct ice_flow_seg_info *old_seg; 530 struct ice_flow_prof *prof = NULL; 531 struct ice_fd_hw_prof *hw_prof; 532 struct ice_hw *hw = &pf->hw; 533 u64 entry1_h = 0; 534 u64 entry2_h = 0; 535 u64 prof_id; 536 int err; 537 538 main_vsi = ice_get_main_vsi(pf); 539 if (!main_vsi) 540 return -EINVAL; 541 542 ctrl_vsi = ice_get_ctrl_vsi(pf); 543 if (!ctrl_vsi) 544 return -EINVAL; 545 546 err = ice_fdir_alloc_flow_prof(hw, flow); 547 if (err) 548 return err; 549 550 hw_prof = hw->fdir_prof[flow]; 551 old_seg = hw_prof->fdir_seg[tun]; 552 if (old_seg) { 553 /* This flow_type already has a changed input set. 554 * If it matches the requested input set then we are 555 * done. Or, if it's different then it's an error. 556 */ 557 if (!memcmp(old_seg, seg, sizeof(*seg))) 558 return -EEXIST; 559 560 /* if there are FDir filters using this flow, 561 * then return error. 562 */ 563 if (hw->fdir_fltr_cnt[flow]) { 564 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 565 return -EINVAL; 566 } 567 568 if (ice_is_arfs_using_perfect_flow(hw, flow)) { 569 dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n", 570 flow); 571 return -EINVAL; 572 } 573 574 /* remove HW filter definition */ 575 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 576 } 577 578 /* Adding a profile, but there is only one header supported. 579 * That is the final parameters are 1 header (segment), no 580 * actions (NULL) and zero actions 0. 581 */ 582 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 583 err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 584 TNL_SEG_CNT(tun), &prof); 585 if (err) 586 return err; 587 err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, 588 main_vsi->idx, ICE_FLOW_PRIO_NORMAL, 589 seg, &entry1_h); 590 if (err) 591 goto err_prof; 592 err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, 593 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 594 seg, &entry2_h); 595 if (err) 596 goto err_entry; 597 598 hw_prof->fdir_seg[tun] = seg; 599 hw_prof->entry_h[0][tun] = entry1_h; 600 hw_prof->entry_h[1][tun] = entry2_h; 601 hw_prof->vsi_h[0] = main_vsi->idx; 602 hw_prof->vsi_h[1] = ctrl_vsi->idx; 603 if (!hw_prof->cnt) 604 hw_prof->cnt = 2; 605 606 return 0; 607 608 err_entry: 609 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 610 ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); 611 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 612 err_prof: 613 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 614 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 615 616 return err; 617 } 618 619 /** 620 * ice_set_init_fdir_seg 621 * @seg: flow segment for programming 622 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6 623 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP 624 * 625 * Set the configuration for perfect filters to the provided flow segment for 626 * programming the HW filter. This is to be called only when initializing 627 * filters as this function it assumes no filters exist. 628 */ 629 static int 630 ice_set_init_fdir_seg(struct ice_flow_seg_info *seg, 631 enum ice_flow_seg_hdr l3_proto, 632 enum ice_flow_seg_hdr l4_proto) 633 { 634 enum ice_flow_field src_addr, dst_addr, src_port, dst_port; 635 636 if (!seg) 637 return -EINVAL; 638 639 if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) { 640 src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA; 641 dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA; 642 } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) { 643 src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA; 644 dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA; 645 } else { 646 return -EINVAL; 647 } 648 649 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 650 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 651 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 652 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 653 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 654 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 655 } else { 656 return -EINVAL; 657 } 658 659 ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto); 660 661 /* IP source address */ 662 ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL, 663 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 664 665 /* IP destination address */ 666 ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL, 667 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 668 669 /* Layer 4 source port */ 670 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 671 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 672 673 /* Layer 4 destination port */ 674 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 675 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 676 677 return 0; 678 } 679 680 /** 681 * ice_create_init_fdir_rule 682 * @pf: PF structure 683 * @flow: filter enum 684 * 685 * Return error value or 0 on success. 686 */ 687 static int 688 ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) 689 { 690 struct ice_flow_seg_info *seg, *tun_seg; 691 struct device *dev = ice_pf_to_dev(pf); 692 struct ice_hw *hw = &pf->hw; 693 int ret; 694 695 /* if there is already a filter rule for kind return -EINVAL */ 696 if (hw->fdir_prof && hw->fdir_prof[flow] && 697 hw->fdir_prof[flow]->fdir_seg[0]) 698 return -EINVAL; 699 700 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 701 if (!seg) 702 return -ENOMEM; 703 704 tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, 705 GFP_KERNEL); 706 if (!tun_seg) { 707 devm_kfree(dev, seg); 708 return -ENOMEM; 709 } 710 711 if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP) 712 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 713 ICE_FLOW_SEG_HDR_TCP); 714 else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP) 715 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 716 ICE_FLOW_SEG_HDR_UDP); 717 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP) 718 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 719 ICE_FLOW_SEG_HDR_TCP); 720 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 721 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 722 ICE_FLOW_SEG_HDR_UDP); 723 else 724 ret = -EINVAL; 725 if (ret) 726 goto err_exit; 727 728 /* add filter for outer headers */ 729 ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN); 730 if (ret) 731 /* could not write filter, free memory */ 732 goto err_exit; 733 734 /* make tunneled filter HW entries if possible */ 735 memcpy(&tun_seg[1], seg, sizeof(*seg)); 736 ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN); 737 if (ret) 738 /* could not write tunnel filter, but outer header filter 739 * exists 740 */ 741 devm_kfree(dev, tun_seg); 742 743 set_bit(flow, hw->fdir_perfect_fltr); 744 return ret; 745 err_exit: 746 devm_kfree(dev, tun_seg); 747 devm_kfree(dev, seg); 748 749 return -EOPNOTSUPP; 750 } 751 752 /** 753 * ice_set_fdir_ip4_seg 754 * @seg: flow segment for programming 755 * @tcp_ip4_spec: mask data from ethtool 756 * @l4_proto: Layer 4 protocol to program 757 * @perfect_fltr: only valid on success; returns true if perfect filter, 758 * false if not 759 * 760 * Set the mask data into the flow segment to be used to program HW 761 * table based on provided L4 protocol for IPv4 762 */ 763 static int 764 ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg, 765 struct ethtool_tcpip4_spec *tcp_ip4_spec, 766 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 767 { 768 enum ice_flow_field src_port, dst_port; 769 770 /* make sure we don't have any empty rule */ 771 if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src && 772 !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst) 773 return -EINVAL; 774 775 /* filtering on TOS not supported */ 776 if (tcp_ip4_spec->tos) 777 return -EOPNOTSUPP; 778 779 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 780 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 781 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 782 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 783 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 784 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 785 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 786 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 787 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 788 } else { 789 return -EOPNOTSUPP; 790 } 791 792 *perfect_fltr = true; 793 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto); 794 795 /* IP source address */ 796 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 797 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 798 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 799 ICE_FLOW_FLD_OFF_INVAL, false); 800 else if (!tcp_ip4_spec->ip4src) 801 *perfect_fltr = false; 802 else 803 return -EOPNOTSUPP; 804 805 /* IP destination address */ 806 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 807 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 808 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 809 ICE_FLOW_FLD_OFF_INVAL, false); 810 else if (!tcp_ip4_spec->ip4dst) 811 *perfect_fltr = false; 812 else 813 return -EOPNOTSUPP; 814 815 /* Layer 4 source port */ 816 if (tcp_ip4_spec->psrc == htons(0xFFFF)) 817 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 818 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 819 false); 820 else if (!tcp_ip4_spec->psrc) 821 *perfect_fltr = false; 822 else 823 return -EOPNOTSUPP; 824 825 /* Layer 4 destination port */ 826 if (tcp_ip4_spec->pdst == htons(0xFFFF)) 827 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 828 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 829 false); 830 else if (!tcp_ip4_spec->pdst) 831 *perfect_fltr = false; 832 else 833 return -EOPNOTSUPP; 834 835 return 0; 836 } 837 838 /** 839 * ice_set_fdir_ip4_usr_seg 840 * @seg: flow segment for programming 841 * @usr_ip4_spec: ethtool userdef packet offset 842 * @perfect_fltr: only valid on success; returns true if perfect filter, 843 * false if not 844 * 845 * Set the offset data into the flow segment to be used to program HW 846 * table for IPv4 847 */ 848 static int 849 ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg, 850 struct ethtool_usrip4_spec *usr_ip4_spec, 851 bool *perfect_fltr) 852 { 853 /* first 4 bytes of Layer 4 header */ 854 if (usr_ip4_spec->l4_4_bytes) 855 return -EINVAL; 856 if (usr_ip4_spec->tos) 857 return -EINVAL; 858 if (usr_ip4_spec->ip_ver) 859 return -EINVAL; 860 /* Filtering on Layer 4 protocol not supported */ 861 if (usr_ip4_spec->proto) 862 return -EOPNOTSUPP; 863 /* empty rules are not valid */ 864 if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) 865 return -EINVAL; 866 867 *perfect_fltr = true; 868 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); 869 870 /* IP source address */ 871 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 872 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 873 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 874 ICE_FLOW_FLD_OFF_INVAL, false); 875 else if (!usr_ip4_spec->ip4src) 876 *perfect_fltr = false; 877 else 878 return -EOPNOTSUPP; 879 880 /* IP destination address */ 881 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 882 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 883 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 884 ICE_FLOW_FLD_OFF_INVAL, false); 885 else if (!usr_ip4_spec->ip4dst) 886 *perfect_fltr = false; 887 else 888 return -EOPNOTSUPP; 889 890 return 0; 891 } 892 893 /** 894 * ice_set_fdir_ip6_seg 895 * @seg: flow segment for programming 896 * @tcp_ip6_spec: mask data from ethtool 897 * @l4_proto: Layer 4 protocol to program 898 * @perfect_fltr: only valid on success; returns true if perfect filter, 899 * false if not 900 * 901 * Set the mask data into the flow segment to be used to program HW 902 * table based on provided L4 protocol for IPv6 903 */ 904 static int 905 ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg, 906 struct ethtool_tcpip6_spec *tcp_ip6_spec, 907 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 908 { 909 enum ice_flow_field src_port, dst_port; 910 911 /* make sure we don't have any empty rule */ 912 if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 913 sizeof(struct in6_addr)) && 914 !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 915 sizeof(struct in6_addr)) && 916 !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst) 917 return -EINVAL; 918 919 /* filtering on TC not supported */ 920 if (tcp_ip6_spec->tclass) 921 return -EOPNOTSUPP; 922 923 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 924 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 925 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 926 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 927 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 928 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 929 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 930 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 931 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 932 } else { 933 return -EINVAL; 934 } 935 936 *perfect_fltr = true; 937 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto); 938 939 if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask, 940 sizeof(struct in6_addr))) 941 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 942 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 943 ICE_FLOW_FLD_OFF_INVAL, false); 944 else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 945 sizeof(struct in6_addr))) 946 *perfect_fltr = false; 947 else 948 return -EOPNOTSUPP; 949 950 if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask, 951 sizeof(struct in6_addr))) 952 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 953 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 954 ICE_FLOW_FLD_OFF_INVAL, false); 955 else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 956 sizeof(struct in6_addr))) 957 *perfect_fltr = false; 958 else 959 return -EOPNOTSUPP; 960 961 /* Layer 4 source port */ 962 if (tcp_ip6_spec->psrc == htons(0xFFFF)) 963 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 964 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 965 false); 966 else if (!tcp_ip6_spec->psrc) 967 *perfect_fltr = false; 968 else 969 return -EOPNOTSUPP; 970 971 /* Layer 4 destination port */ 972 if (tcp_ip6_spec->pdst == htons(0xFFFF)) 973 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 974 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 975 false); 976 else if (!tcp_ip6_spec->pdst) 977 *perfect_fltr = false; 978 else 979 return -EOPNOTSUPP; 980 981 return 0; 982 } 983 984 /** 985 * ice_set_fdir_ip6_usr_seg 986 * @seg: flow segment for programming 987 * @usr_ip6_spec: ethtool userdef packet offset 988 * @perfect_fltr: only valid on success; returns true if perfect filter, 989 * false if not 990 * 991 * Set the offset data into the flow segment to be used to program HW 992 * table for IPv6 993 */ 994 static int 995 ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, 996 struct ethtool_usrip6_spec *usr_ip6_spec, 997 bool *perfect_fltr) 998 { 999 /* filtering on Layer 4 bytes not supported */ 1000 if (usr_ip6_spec->l4_4_bytes) 1001 return -EOPNOTSUPP; 1002 /* filtering on TC not supported */ 1003 if (usr_ip6_spec->tclass) 1004 return -EOPNOTSUPP; 1005 /* filtering on Layer 4 protocol not supported */ 1006 if (usr_ip6_spec->l4_proto) 1007 return -EOPNOTSUPP; 1008 /* empty rules are not valid */ 1009 if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1010 sizeof(struct in6_addr)) && 1011 !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1012 sizeof(struct in6_addr))) 1013 return -EINVAL; 1014 1015 *perfect_fltr = true; 1016 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); 1017 1018 if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask, 1019 sizeof(struct in6_addr))) 1020 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 1021 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1022 ICE_FLOW_FLD_OFF_INVAL, false); 1023 else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1024 sizeof(struct in6_addr))) 1025 *perfect_fltr = false; 1026 else 1027 return -EOPNOTSUPP; 1028 1029 if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask, 1030 sizeof(struct in6_addr))) 1031 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 1032 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1033 ICE_FLOW_FLD_OFF_INVAL, false); 1034 else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1035 sizeof(struct in6_addr))) 1036 *perfect_fltr = false; 1037 else 1038 return -EOPNOTSUPP; 1039 1040 return 0; 1041 } 1042 1043 /** 1044 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter 1045 * @pf: PF structure 1046 * @fsp: pointer to ethtool Rx flow specification 1047 * @user: user defined data from flow specification 1048 * 1049 * Returns 0 on success. 1050 */ 1051 static int 1052 ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, 1053 struct ice_rx_flow_userdef *user) 1054 { 1055 struct ice_flow_seg_info *seg, *tun_seg; 1056 struct device *dev = ice_pf_to_dev(pf); 1057 enum ice_fltr_ptype fltr_idx; 1058 struct ice_hw *hw = &pf->hw; 1059 bool perfect_filter; 1060 int ret; 1061 1062 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 1063 if (!seg) 1064 return -ENOMEM; 1065 1066 tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, 1067 GFP_KERNEL); 1068 if (!tun_seg) { 1069 devm_kfree(dev, seg); 1070 return -ENOMEM; 1071 } 1072 1073 switch (fsp->flow_type & ~FLOW_EXT) { 1074 case TCP_V4_FLOW: 1075 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1076 ICE_FLOW_SEG_HDR_TCP, 1077 &perfect_filter); 1078 break; 1079 case UDP_V4_FLOW: 1080 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1081 ICE_FLOW_SEG_HDR_UDP, 1082 &perfect_filter); 1083 break; 1084 case SCTP_V4_FLOW: 1085 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1086 ICE_FLOW_SEG_HDR_SCTP, 1087 &perfect_filter); 1088 break; 1089 case IPV4_USER_FLOW: 1090 ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec, 1091 &perfect_filter); 1092 break; 1093 case TCP_V6_FLOW: 1094 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1095 ICE_FLOW_SEG_HDR_TCP, 1096 &perfect_filter); 1097 break; 1098 case UDP_V6_FLOW: 1099 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1100 ICE_FLOW_SEG_HDR_UDP, 1101 &perfect_filter); 1102 break; 1103 case SCTP_V6_FLOW: 1104 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1105 ICE_FLOW_SEG_HDR_SCTP, 1106 &perfect_filter); 1107 break; 1108 case IPV6_USER_FLOW: 1109 ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, 1110 &perfect_filter); 1111 break; 1112 default: 1113 ret = -EINVAL; 1114 } 1115 if (ret) 1116 goto err_exit; 1117 1118 /* tunnel segments are shifted up one. */ 1119 memcpy(&tun_seg[1], seg, sizeof(*seg)); 1120 1121 if (user && user->flex_fltr) { 1122 perfect_filter = false; 1123 ice_flow_add_fld_raw(seg, user->flex_offset, 1124 ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1125 ICE_FLOW_FLD_OFF_INVAL, 1126 ICE_FLOW_FLD_OFF_INVAL); 1127 ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset, 1128 ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1129 ICE_FLOW_FLD_OFF_INVAL, 1130 ICE_FLOW_FLD_OFF_INVAL); 1131 } 1132 1133 /* add filter for outer headers */ 1134 fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); 1135 ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, 1136 ICE_FD_HW_SEG_NON_TUN); 1137 if (ret == -EEXIST) 1138 /* Rule already exists, free memory and continue */ 1139 devm_kfree(dev, seg); 1140 else if (ret) 1141 /* could not write filter, free memory */ 1142 goto err_exit; 1143 1144 /* make tunneled filter HW entries if possible */ 1145 memcpy(&tun_seg[1], seg, sizeof(*seg)); 1146 ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx, 1147 ICE_FD_HW_SEG_TUN); 1148 if (ret == -EEXIST) { 1149 /* Rule already exists, free memory and count as success */ 1150 devm_kfree(dev, tun_seg); 1151 ret = 0; 1152 } else if (ret) { 1153 /* could not write tunnel filter, but outer filter exists */ 1154 devm_kfree(dev, tun_seg); 1155 } 1156 1157 if (perfect_filter) 1158 set_bit(fltr_idx, hw->fdir_perfect_fltr); 1159 else 1160 clear_bit(fltr_idx, hw->fdir_perfect_fltr); 1161 1162 return ret; 1163 1164 err_exit: 1165 devm_kfree(dev, tun_seg); 1166 devm_kfree(dev, seg); 1167 1168 return -EOPNOTSUPP; 1169 } 1170 1171 /** 1172 * ice_fdir_write_fltr - send a flow director filter to the hardware 1173 * @pf: PF data structure 1174 * @input: filter structure 1175 * @add: true adds filter and false removed filter 1176 * @is_tun: true adds inner filter on tunnel and false outer headers 1177 * 1178 * returns 0 on success and negative value on error 1179 */ 1180 int 1181 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 1182 bool is_tun) 1183 { 1184 struct device *dev = ice_pf_to_dev(pf); 1185 struct ice_hw *hw = &pf->hw; 1186 struct ice_fltr_desc desc; 1187 struct ice_vsi *ctrl_vsi; 1188 u8 *pkt, *frag_pkt; 1189 bool has_frag; 1190 int err; 1191 1192 ctrl_vsi = ice_get_ctrl_vsi(pf); 1193 if (!ctrl_vsi) 1194 return -EINVAL; 1195 1196 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1197 if (!pkt) 1198 return -ENOMEM; 1199 frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1200 if (!frag_pkt) { 1201 err = -ENOMEM; 1202 goto err_free; 1203 } 1204 1205 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1206 err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1207 if (err) 1208 goto err_free_all; 1209 err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1210 if (err) 1211 goto err_free_all; 1212 1213 /* repeat for fragment packet */ 1214 has_frag = ice_fdir_has_frag(input->flow_type); 1215 if (has_frag) { 1216 /* does not return error */ 1217 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1218 err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, 1219 is_tun); 1220 if (err) 1221 goto err_frag; 1222 err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); 1223 if (err) 1224 goto err_frag; 1225 } else { 1226 devm_kfree(dev, frag_pkt); 1227 } 1228 1229 return 0; 1230 1231 err_free_all: 1232 devm_kfree(dev, frag_pkt); 1233 err_free: 1234 devm_kfree(dev, pkt); 1235 return err; 1236 1237 err_frag: 1238 devm_kfree(dev, frag_pkt); 1239 return err; 1240 } 1241 1242 /** 1243 * ice_fdir_write_all_fltr - send a flow director filter to the hardware 1244 * @pf: PF data structure 1245 * @input: filter structure 1246 * @add: true adds filter and false removed filter 1247 * 1248 * returns 0 on success and negative value on error 1249 */ 1250 static int 1251 ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, 1252 bool add) 1253 { 1254 u16 port_num; 1255 int tun; 1256 1257 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 1258 bool is_tun = tun == ICE_FD_HW_SEG_TUN; 1259 int err; 1260 1261 if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL)) 1262 continue; 1263 err = ice_fdir_write_fltr(pf, input, add, is_tun); 1264 if (err) 1265 return err; 1266 } 1267 return 0; 1268 } 1269 1270 /** 1271 * ice_fdir_replay_fltrs - replay filters from the HW filter list 1272 * @pf: board private structure 1273 */ 1274 void ice_fdir_replay_fltrs(struct ice_pf *pf) 1275 { 1276 struct ice_fdir_fltr *f_rule; 1277 struct ice_hw *hw = &pf->hw; 1278 1279 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 1280 int err = ice_fdir_write_all_fltr(pf, f_rule, true); 1281 1282 if (err) 1283 dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n", 1284 err, f_rule->fltr_id); 1285 } 1286 } 1287 1288 /** 1289 * ice_fdir_create_dflt_rules - create default perfect filters 1290 * @pf: PF data structure 1291 * 1292 * Returns 0 for success or error. 1293 */ 1294 int ice_fdir_create_dflt_rules(struct ice_pf *pf) 1295 { 1296 int err; 1297 1298 /* Create perfect TCP and UDP rules in hardware. */ 1299 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); 1300 if (err) 1301 return err; 1302 1303 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); 1304 if (err) 1305 return err; 1306 1307 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); 1308 if (err) 1309 return err; 1310 1311 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); 1312 1313 return err; 1314 } 1315 1316 /** 1317 * ice_vsi_manage_fdir - turn on/off flow director 1318 * @vsi: the VSI being changed 1319 * @ena: boolean value indicating if this is an enable or disable request 1320 */ 1321 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) 1322 { 1323 struct ice_fdir_fltr *f_rule, *tmp; 1324 struct ice_pf *pf = vsi->back; 1325 struct ice_hw *hw = &pf->hw; 1326 enum ice_fltr_ptype flow; 1327 1328 if (ena) { 1329 set_bit(ICE_FLAG_FD_ENA, pf->flags); 1330 ice_fdir_create_dflt_rules(pf); 1331 return; 1332 } 1333 1334 mutex_lock(&hw->fdir_fltr_lock); 1335 if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) 1336 goto release_lock; 1337 list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { 1338 /* ignore return value */ 1339 ice_fdir_write_all_fltr(pf, f_rule, false); 1340 ice_fdir_update_cntrs(hw, f_rule->flow_type, false); 1341 list_del(&f_rule->fltr_node); 1342 devm_kfree(ice_hw_to_dev(hw), f_rule); 1343 } 1344 1345 if (hw->fdir_prof) 1346 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; 1347 flow++) 1348 if (hw->fdir_prof[flow]) 1349 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 1350 1351 release_lock: 1352 mutex_unlock(&hw->fdir_fltr_lock); 1353 } 1354 1355 /** 1356 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow 1357 * @pf: PF structure 1358 * @flow_type: FDir flow type to release 1359 */ 1360 static void 1361 ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type) 1362 { 1363 struct ice_hw *hw = &pf->hw; 1364 bool need_perfect = false; 1365 1366 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 1367 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 1368 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 1369 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 1370 need_perfect = true; 1371 1372 if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr)) 1373 return; 1374 1375 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type); 1376 if (need_perfect) 1377 ice_create_init_fdir_rule(pf, flow_type); 1378 } 1379 1380 /** 1381 * ice_fdir_update_list_entry - add or delete a filter from the filter list 1382 * @pf: PF structure 1383 * @input: filter structure 1384 * @fltr_idx: ethtool index of filter to modify 1385 * 1386 * returns 0 on success and negative on errors 1387 */ 1388 static int 1389 ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, 1390 int fltr_idx) 1391 { 1392 struct ice_fdir_fltr *old_fltr; 1393 struct ice_hw *hw = &pf->hw; 1394 int err = -ENOENT; 1395 1396 /* Do not update filters during reset */ 1397 if (ice_is_reset_in_progress(pf->state)) 1398 return -EBUSY; 1399 1400 old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx); 1401 if (old_fltr) { 1402 err = ice_fdir_write_all_fltr(pf, old_fltr, false); 1403 if (err) 1404 return err; 1405 ice_fdir_update_cntrs(hw, old_fltr->flow_type, false); 1406 if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type]) 1407 /* we just deleted the last filter of flow_type so we 1408 * should also delete the HW filter info. 1409 */ 1410 ice_fdir_do_rem_flow(pf, old_fltr->flow_type); 1411 list_del(&old_fltr->fltr_node); 1412 devm_kfree(ice_hw_to_dev(hw), old_fltr); 1413 } 1414 if (!input) 1415 return err; 1416 ice_fdir_list_add_fltr(hw, input); 1417 ice_fdir_update_cntrs(hw, input->flow_type, true); 1418 return 0; 1419 } 1420 1421 /** 1422 * ice_del_fdir_ethtool - delete Flow Director filter 1423 * @vsi: pointer to target VSI 1424 * @cmd: command to add or delete Flow Director filter 1425 * 1426 * Returns 0 on success and negative values for failure 1427 */ 1428 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1429 { 1430 struct ethtool_rx_flow_spec *fsp = 1431 (struct ethtool_rx_flow_spec *)&cmd->fs; 1432 struct ice_pf *pf = vsi->back; 1433 struct ice_hw *hw = &pf->hw; 1434 int val; 1435 1436 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1437 return -EOPNOTSUPP; 1438 1439 /* Do not delete filters during reset */ 1440 if (ice_is_reset_in_progress(pf->state)) { 1441 dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n"); 1442 return -EBUSY; 1443 } 1444 1445 if (test_bit(ICE_FD_FLUSH_REQ, pf->state)) 1446 return -EBUSY; 1447 1448 mutex_lock(&hw->fdir_fltr_lock); 1449 val = ice_fdir_update_list_entry(pf, NULL, fsp->location); 1450 mutex_unlock(&hw->fdir_fltr_lock); 1451 1452 return val; 1453 } 1454 1455 /** 1456 * ice_set_fdir_input_set - Set the input set for Flow Director 1457 * @vsi: pointer to target VSI 1458 * @fsp: pointer to ethtool Rx flow specification 1459 * @input: filter structure 1460 */ 1461 static int 1462 ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, 1463 struct ice_fdir_fltr *input) 1464 { 1465 u16 dest_vsi, q_index = 0; 1466 struct ice_pf *pf; 1467 struct ice_hw *hw; 1468 int flow_type; 1469 u8 dest_ctl; 1470 1471 if (!vsi || !fsp || !input) 1472 return -EINVAL; 1473 1474 pf = vsi->back; 1475 hw = &pf->hw; 1476 1477 dest_vsi = vsi->idx; 1478 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 1479 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1480 } else { 1481 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 1482 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 1483 1484 if (vf) { 1485 dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n"); 1486 return -EINVAL; 1487 } 1488 1489 if (ring >= vsi->num_rxq) 1490 return -EINVAL; 1491 1492 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1493 q_index = ring; 1494 } 1495 1496 input->fltr_id = fsp->location; 1497 input->q_index = q_index; 1498 flow_type = fsp->flow_type & ~FLOW_EXT; 1499 1500 input->dest_vsi = dest_vsi; 1501 input->dest_ctl = dest_ctl; 1502 input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID; 1503 input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base); 1504 input->flow_type = ice_ethtool_flow_to_fltr(flow_type); 1505 1506 if (fsp->flow_type & FLOW_EXT) { 1507 memcpy(input->ext_data.usr_def, fsp->h_ext.data, 1508 sizeof(input->ext_data.usr_def)); 1509 input->ext_data.vlan_type = fsp->h_ext.vlan_etype; 1510 input->ext_data.vlan_tag = fsp->h_ext.vlan_tci; 1511 memcpy(input->ext_mask.usr_def, fsp->m_ext.data, 1512 sizeof(input->ext_mask.usr_def)); 1513 input->ext_mask.vlan_type = fsp->m_ext.vlan_etype; 1514 input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci; 1515 } 1516 1517 switch (flow_type) { 1518 case TCP_V4_FLOW: 1519 case UDP_V4_FLOW: 1520 case SCTP_V4_FLOW: 1521 input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1522 input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc; 1523 input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 1524 input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; 1525 input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 1526 input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc; 1527 input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; 1528 input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; 1529 break; 1530 case IPV4_USER_FLOW: 1531 input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; 1532 input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src; 1533 input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; 1534 input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto; 1535 input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver; 1536 input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos; 1537 input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; 1538 input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src; 1539 input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; 1540 input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto; 1541 input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver; 1542 input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos; 1543 break; 1544 case TCP_V6_FLOW: 1545 case UDP_V6_FLOW: 1546 case SCTP_V6_FLOW: 1547 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1548 sizeof(struct in6_addr)); 1549 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1550 sizeof(struct in6_addr)); 1551 input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst; 1552 input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc; 1553 input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass; 1554 memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, 1555 sizeof(struct in6_addr)); 1556 memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, 1557 sizeof(struct in6_addr)); 1558 input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst; 1559 input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc; 1560 input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass; 1561 break; 1562 case IPV6_USER_FLOW: 1563 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1564 sizeof(struct in6_addr)); 1565 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1566 sizeof(struct in6_addr)); 1567 input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; 1568 input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; 1569 1570 /* if no protocol requested, use IPPROTO_NONE */ 1571 if (!fsp->m_u.usr_ip6_spec.l4_proto) 1572 input->ip.v6.proto = IPPROTO_NONE; 1573 else 1574 input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; 1575 1576 memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1577 sizeof(struct in6_addr)); 1578 memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1579 sizeof(struct in6_addr)); 1580 input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; 1581 input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; 1582 input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; 1583 break; 1584 default: 1585 /* not doing un-parsed flow types */ 1586 return -EINVAL; 1587 } 1588 1589 return 0; 1590 } 1591 1592 /** 1593 * ice_add_fdir_ethtool - Add/Remove Flow Director filter 1594 * @vsi: pointer to target VSI 1595 * @cmd: command to add or delete Flow Director filter 1596 * 1597 * Returns 0 on success and negative values for failure 1598 */ 1599 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1600 { 1601 struct ice_rx_flow_userdef userdata; 1602 struct ethtool_rx_flow_spec *fsp; 1603 struct ice_fdir_fltr *input; 1604 struct device *dev; 1605 struct ice_pf *pf; 1606 struct ice_hw *hw; 1607 int fltrs_needed; 1608 u16 tunnel_port; 1609 int ret; 1610 1611 if (!vsi) 1612 return -EINVAL; 1613 1614 pf = vsi->back; 1615 hw = &pf->hw; 1616 dev = ice_pf_to_dev(pf); 1617 1618 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1619 return -EOPNOTSUPP; 1620 1621 /* Do not program filters during reset */ 1622 if (ice_is_reset_in_progress(pf->state)) { 1623 dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n"); 1624 return -EBUSY; 1625 } 1626 1627 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1628 1629 if (ice_parse_rx_flow_user_data(fsp, &userdata)) 1630 return -EINVAL; 1631 1632 if (fsp->flow_type & FLOW_MAC_EXT) 1633 return -EINVAL; 1634 1635 ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata); 1636 if (ret) 1637 return ret; 1638 1639 if (fsp->location >= ice_get_fdir_cnt_all(hw)) { 1640 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1641 return -ENOSPC; 1642 } 1643 1644 /* return error if not an update and no available filters */ 1645 fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; 1646 if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && 1647 ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { 1648 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1649 return -ENOSPC; 1650 } 1651 1652 input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL); 1653 if (!input) 1654 return -ENOMEM; 1655 1656 ret = ice_set_fdir_input_set(vsi, fsp, input); 1657 if (ret) 1658 goto free_input; 1659 1660 mutex_lock(&hw->fdir_fltr_lock); 1661 if (ice_fdir_is_dup_fltr(hw, input)) { 1662 ret = -EINVAL; 1663 goto release_lock; 1664 } 1665 1666 if (userdata.flex_fltr) { 1667 input->flex_fltr = true; 1668 input->flex_word = cpu_to_be16(userdata.flex_word); 1669 input->flex_offset = userdata.flex_offset; 1670 } 1671 1672 input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; 1673 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1674 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; 1675 1676 /* input struct is added to the HW filter list */ 1677 ice_fdir_update_list_entry(pf, input, fsp->location); 1678 1679 ret = ice_fdir_write_all_fltr(pf, input, true); 1680 if (ret) 1681 goto remove_sw_rule; 1682 1683 goto release_lock; 1684 1685 remove_sw_rule: 1686 ice_fdir_update_cntrs(hw, input->flow_type, false); 1687 list_del(&input->fltr_node); 1688 release_lock: 1689 mutex_unlock(&hw->fdir_fltr_lock); 1690 free_input: 1691 if (ret) 1692 devm_kfree(dev, input); 1693 1694 return ret; 1695 } 1696