1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2018-2020, Intel Corporation. */ 3 4 /* flow director ethtool support for ice */ 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "ice_flow.h" 9 10 static struct in6_addr full_ipv6_addr_mask = { 11 .in6_u = { 12 .u6_addr8 = { 13 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 14 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 15 } 16 } 17 }; 18 19 static struct in6_addr zero_ipv6_addr_mask = { 20 .in6_u = { 21 .u6_addr8 = { 22 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 23 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 24 } 25 } 26 }; 27 28 /* calls to ice_flow_add_prof require the number of segments in the array 29 * for segs_cnt. In this code that is one more than the index. 30 */ 31 #define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1) 32 33 /** 34 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool 35 * flow type values 36 * @flow: filter type to be converted 37 * 38 * Returns the corresponding ethtool flow type. 39 */ 40 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) 41 { 42 switch (flow) { 43 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 44 return TCP_V4_FLOW; 45 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 46 return UDP_V4_FLOW; 47 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 48 return SCTP_V4_FLOW; 49 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 50 return IPV4_USER_FLOW; 51 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 52 return TCP_V6_FLOW; 53 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 54 return UDP_V6_FLOW; 55 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 56 return SCTP_V6_FLOW; 57 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 58 return IPV6_USER_FLOW; 59 default: 60 /* 0 is undefined ethtool flow */ 61 return 0; 62 } 63 } 64 65 /** 66 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 67 * @eth: Ethtool flow type to be converted 68 * 69 * Returns flow enum 70 */ 71 static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) 72 { 73 switch (eth) { 74 case TCP_V4_FLOW: 75 return ICE_FLTR_PTYPE_NONF_IPV4_TCP; 76 case UDP_V4_FLOW: 77 return ICE_FLTR_PTYPE_NONF_IPV4_UDP; 78 case SCTP_V4_FLOW: 79 return ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 80 case IPV4_USER_FLOW: 81 return ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 82 case TCP_V6_FLOW: 83 return ICE_FLTR_PTYPE_NONF_IPV6_TCP; 84 case UDP_V6_FLOW: 85 return ICE_FLTR_PTYPE_NONF_IPV6_UDP; 86 case SCTP_V6_FLOW: 87 return ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 88 case IPV6_USER_FLOW: 89 return ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 90 default: 91 return ICE_FLTR_PTYPE_NONF_NONE; 92 } 93 } 94 95 /** 96 * ice_is_mask_valid - check mask field set 97 * @mask: full mask to check 98 * @field: field for which mask should be valid 99 * 100 * If the mask is fully set return true. If it is not valid for field return 101 * false. 102 */ 103 static bool ice_is_mask_valid(u64 mask, u64 field) 104 { 105 return (mask & field) == field; 106 } 107 108 /** 109 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data 110 * @hw: hardware structure that contains filter list 111 * @cmd: ethtool command data structure to receive the filter data 112 * 113 * Returns 0 on success and -EINVAL on failure 114 */ 115 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) 116 { 117 struct ethtool_rx_flow_spec *fsp; 118 struct ice_fdir_fltr *rule; 119 int ret = 0; 120 u16 idx; 121 122 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 123 124 mutex_lock(&hw->fdir_fltr_lock); 125 126 rule = ice_fdir_find_fltr_by_idx(hw, fsp->location); 127 128 if (!rule || fsp->location != rule->fltr_id) { 129 ret = -EINVAL; 130 goto release_lock; 131 } 132 133 fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type); 134 135 memset(&fsp->m_u, 0, sizeof(fsp->m_u)); 136 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); 137 138 switch (fsp->flow_type) { 139 case IPV4_USER_FLOW: 140 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 141 fsp->h_u.usr_ip4_spec.proto = 0; 142 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header; 143 fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos; 144 fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip; 145 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 146 fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip; 147 fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 148 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; 149 fsp->m_u.usr_ip4_spec.proto = 0; 150 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header; 151 fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos; 152 break; 153 case TCP_V4_FLOW: 154 case UDP_V4_FLOW: 155 case SCTP_V4_FLOW: 156 fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port; 157 fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port; 158 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip; 159 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 160 fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port; 161 fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port; 162 fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip; 163 fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 164 break; 165 case IPV6_USER_FLOW: 166 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header; 167 fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc; 168 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto; 169 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 170 sizeof(struct in6_addr)); 171 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 172 sizeof(struct in6_addr)); 173 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip, 174 sizeof(struct in6_addr)); 175 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip, 176 sizeof(struct in6_addr)); 177 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header; 178 fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc; 179 fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto; 180 break; 181 case TCP_V6_FLOW: 182 case UDP_V6_FLOW: 183 case SCTP_V6_FLOW: 184 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 185 sizeof(struct in6_addr)); 186 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 187 sizeof(struct in6_addr)); 188 fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port; 189 fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port; 190 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, 191 rule->mask.v6.src_ip, 192 sizeof(struct in6_addr)); 193 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, 194 rule->mask.v6.dst_ip, 195 sizeof(struct in6_addr)); 196 fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port; 197 fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port; 198 fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc; 199 fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc; 200 break; 201 default: 202 break; 203 } 204 205 if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) 206 fsp->ring_cookie = RX_CLS_FLOW_DISC; 207 else 208 fsp->ring_cookie = rule->q_index; 209 210 idx = ice_ethtool_flow_to_fltr(fsp->flow_type); 211 if (idx == ICE_FLTR_PTYPE_NONF_NONE) { 212 dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", 213 rule->flow_type); 214 ret = -EINVAL; 215 } 216 217 release_lock: 218 mutex_unlock(&hw->fdir_fltr_lock); 219 return ret; 220 } 221 222 /** 223 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters 224 * @hw: hardware structure containing the filter list 225 * @cmd: ethtool command data structure 226 * @rule_locs: ethtool array passed in from OS to receive filter IDs 227 * 228 * Returns 0 as expected for success by ethtool 229 */ 230 int 231 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 232 u32 *rule_locs) 233 { 234 struct ice_fdir_fltr *f_rule; 235 unsigned int cnt = 0; 236 int val = 0; 237 238 /* report total rule count */ 239 cmd->data = ice_get_fdir_cnt_all(hw); 240 241 mutex_lock(&hw->fdir_fltr_lock); 242 243 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 244 if (cnt == cmd->rule_cnt) { 245 val = -EMSGSIZE; 246 goto release_lock; 247 } 248 rule_locs[cnt] = f_rule->fltr_id; 249 cnt++; 250 } 251 252 release_lock: 253 mutex_unlock(&hw->fdir_fltr_lock); 254 if (!val) 255 cmd->rule_cnt = cnt; 256 return val; 257 } 258 259 /** 260 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow 261 * @hw: hardware structure containing the filter list 262 * @blk: hardware block 263 * @flow: FDir flow type to release 264 */ 265 static struct ice_fd_hw_prof * 266 ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow) 267 { 268 if (blk == ICE_BLK_FD && hw->fdir_prof) 269 return hw->fdir_prof[flow]; 270 271 return NULL; 272 } 273 274 /** 275 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables 276 * @hw: hardware structure containing the filter list 277 * @blk: hardware block 278 * @flow: FDir flow type to release 279 */ 280 static void 281 ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) 282 { 283 struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); 284 int tun; 285 286 if (!prof) 287 return; 288 289 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 290 u64 prof_id; 291 int j; 292 293 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 294 for (j = 0; j < prof->cnt; j++) { 295 u16 vsi_num; 296 297 if (!prof->entry_h[j][tun] || !prof->vsi_h[j]) 298 continue; 299 vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]); 300 ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id); 301 ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]); 302 prof->entry_h[j][tun] = 0; 303 } 304 ice_flow_rem_prof(hw, blk, prof_id); 305 } 306 } 307 308 /** 309 * ice_fdir_rem_flow - release the ice_flow structures for a filter type 310 * @hw: hardware structure containing the filter list 311 * @blk: hardware block 312 * @flow_type: FDir flow type to release 313 */ 314 static void 315 ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk, 316 enum ice_fltr_ptype flow_type) 317 { 318 int flow = (int)flow_type & ~FLOW_EXT; 319 struct ice_fd_hw_prof *prof; 320 int tun, i; 321 322 prof = ice_fdir_get_hw_prof(hw, blk, flow); 323 if (!prof) 324 return; 325 326 ice_fdir_erase_flow_from_hw(hw, blk, flow); 327 for (i = 0; i < prof->cnt; i++) 328 prof->vsi_h[i] = 0; 329 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 330 if (!prof->fdir_seg[tun]) 331 continue; 332 devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]); 333 prof->fdir_seg[tun] = NULL; 334 } 335 prof->cnt = 0; 336 } 337 338 /** 339 * ice_fdir_release_flows - release all flows in use for later replay 340 * @hw: pointer to HW instance 341 */ 342 void ice_fdir_release_flows(struct ice_hw *hw) 343 { 344 int flow; 345 346 /* release Flow Director HW table entries */ 347 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) 348 ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow); 349 } 350 351 /** 352 * ice_fdir_replay_flows - replay HW Flow Director filter info 353 * @hw: pointer to HW instance 354 */ 355 void ice_fdir_replay_flows(struct ice_hw *hw) 356 { 357 int flow; 358 359 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 360 int tun; 361 362 if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt) 363 continue; 364 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 365 struct ice_flow_prof *hw_prof; 366 struct ice_fd_hw_prof *prof; 367 u64 prof_id; 368 int j; 369 370 prof = hw->fdir_prof[flow]; 371 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 372 ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, 373 prof->fdir_seg[tun], TNL_SEG_CNT(tun), 374 &hw_prof); 375 for (j = 0; j < prof->cnt; j++) { 376 enum ice_flow_priority prio; 377 u64 entry_h = 0; 378 int err; 379 380 prio = ICE_FLOW_PRIO_NORMAL; 381 err = ice_flow_add_entry(hw, ICE_BLK_FD, 382 prof_id, 383 prof->vsi_h[0], 384 prof->vsi_h[j], 385 prio, prof->fdir_seg, 386 &entry_h); 387 if (err) { 388 dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n", 389 flow); 390 continue; 391 } 392 prof->entry_h[j][tun] = entry_h; 393 } 394 } 395 } 396 } 397 398 /** 399 * ice_parse_rx_flow_user_data - deconstruct user-defined data 400 * @fsp: pointer to ethtool Rx flow specification 401 * @data: pointer to userdef data structure for storage 402 * 403 * Returns 0 on success, negative error value on failure 404 */ 405 static int 406 ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 407 struct ice_rx_flow_userdef *data) 408 { 409 u64 value, mask; 410 411 memset(data, 0, sizeof(*data)); 412 if (!(fsp->flow_type & FLOW_EXT)) 413 return 0; 414 415 value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data)); 416 mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data)); 417 if (!mask) 418 return 0; 419 420 #define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0) 421 #define ICE_USERDEF_FLEX_OFFS_S 16 422 #define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S) 423 #define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0) 424 425 /* 0x1fe is the maximum value for offsets stored in the internal 426 * filtering tables. 427 */ 428 #define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe 429 430 if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) || 431 value > ICE_USERDEF_FLEX_FLTR_M) 432 return -EINVAL; 433 434 data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; 435 data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> 436 ICE_USERDEF_FLEX_OFFS_S; 437 if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) 438 return -EINVAL; 439 440 data->flex_fltr = true; 441 442 return 0; 443 } 444 445 /** 446 * ice_fdir_num_avail_fltr - return the number of unused flow director filters 447 * @hw: pointer to hardware structure 448 * @vsi: software VSI structure 449 * 450 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can 451 * use filters from either pool. The guaranteed pool is divided between VSIs. 452 * The best effort filter pool is common to all VSIs and is a device shared 453 * resource pool. The number of filters available to this VSI is the sum of 454 * the VSIs guaranteed filter pool and the global available best effort 455 * filter pool. 456 * 457 * Returns the number of available flow director filters to this VSI 458 */ 459 static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) 460 { 461 u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 462 u16 num_guar; 463 u16 num_be; 464 465 /* total guaranteed filters assigned to this VSI */ 466 num_guar = vsi->num_gfltr; 467 468 /* minus the guaranteed filters programed by this VSI */ 469 num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) & 470 VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S; 471 472 /* total global best effort filters */ 473 num_be = hw->func_caps.fd_fltr_best_effort; 474 475 /* minus the global best effort filters programmed */ 476 num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >> 477 GLQF_FD_CNT_FD_BCNT_S; 478 479 return num_guar + num_be; 480 } 481 482 /** 483 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s) 484 * @hw: HW structure containing the FDir flow profile structure(s) 485 * @flow: flow type to allocate the flow profile for 486 * 487 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0 488 * on success and negative on error. 489 */ 490 static int 491 ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow) 492 { 493 if (!hw) 494 return -EINVAL; 495 496 if (!hw->fdir_prof) { 497 hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw), 498 ICE_FLTR_PTYPE_MAX, 499 sizeof(*hw->fdir_prof), 500 GFP_KERNEL); 501 if (!hw->fdir_prof) 502 return -ENOMEM; 503 } 504 505 if (!hw->fdir_prof[flow]) { 506 hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw), 507 sizeof(**hw->fdir_prof), 508 GFP_KERNEL); 509 if (!hw->fdir_prof[flow]) 510 return -ENOMEM; 511 } 512 513 return 0; 514 } 515 516 /** 517 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule 518 * @pf: pointer to the PF structure 519 * @seg: protocol header description pointer 520 * @flow: filter enum 521 * @tun: FDir segment to program 522 */ 523 static int 524 ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, 525 enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun) 526 { 527 struct device *dev = ice_pf_to_dev(pf); 528 struct ice_vsi *main_vsi, *ctrl_vsi; 529 struct ice_flow_seg_info *old_seg; 530 struct ice_flow_prof *prof = NULL; 531 struct ice_fd_hw_prof *hw_prof; 532 struct ice_hw *hw = &pf->hw; 533 enum ice_status status; 534 u64 entry1_h = 0; 535 u64 entry2_h = 0; 536 u64 prof_id; 537 int err; 538 539 main_vsi = ice_get_main_vsi(pf); 540 if (!main_vsi) 541 return -EINVAL; 542 543 ctrl_vsi = ice_get_ctrl_vsi(pf); 544 if (!ctrl_vsi) 545 return -EINVAL; 546 547 err = ice_fdir_alloc_flow_prof(hw, flow); 548 if (err) 549 return err; 550 551 hw_prof = hw->fdir_prof[flow]; 552 old_seg = hw_prof->fdir_seg[tun]; 553 if (old_seg) { 554 /* This flow_type already has a changed input set. 555 * If it matches the requested input set then we are 556 * done. Or, if it's different then it's an error. 557 */ 558 if (!memcmp(old_seg, seg, sizeof(*seg))) 559 return -EEXIST; 560 561 /* if there are FDir filters using this flow, 562 * then return error. 563 */ 564 if (hw->fdir_fltr_cnt[flow]) { 565 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 566 return -EINVAL; 567 } 568 569 if (ice_is_arfs_using_perfect_flow(hw, flow)) { 570 dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n", 571 flow); 572 return -EINVAL; 573 } 574 575 /* remove HW filter definition */ 576 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 577 } 578 579 /* Adding a profile, but there is only one header supported. 580 * That is the final parameters are 1 header (segment), no 581 * actions (NULL) and zero actions 0. 582 */ 583 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 584 status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 585 TNL_SEG_CNT(tun), &prof); 586 if (status) 587 return ice_status_to_errno(status); 588 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, 589 main_vsi->idx, ICE_FLOW_PRIO_NORMAL, 590 seg, &entry1_h); 591 if (status) { 592 err = ice_status_to_errno(status); 593 goto err_prof; 594 } 595 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, 596 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 597 seg, &entry2_h); 598 if (status) { 599 err = ice_status_to_errno(status); 600 goto err_entry; 601 } 602 603 hw_prof->fdir_seg[tun] = seg; 604 hw_prof->entry_h[0][tun] = entry1_h; 605 hw_prof->entry_h[1][tun] = entry2_h; 606 hw_prof->vsi_h[0] = main_vsi->idx; 607 hw_prof->vsi_h[1] = ctrl_vsi->idx; 608 if (!hw_prof->cnt) 609 hw_prof->cnt = 2; 610 611 return 0; 612 613 err_entry: 614 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 615 ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); 616 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 617 err_prof: 618 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 619 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 620 621 return err; 622 } 623 624 /** 625 * ice_set_init_fdir_seg 626 * @seg: flow segment for programming 627 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6 628 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP 629 * 630 * Set the configuration for perfect filters to the provided flow segment for 631 * programming the HW filter. This is to be called only when initializing 632 * filters as this function it assumes no filters exist. 633 */ 634 static int 635 ice_set_init_fdir_seg(struct ice_flow_seg_info *seg, 636 enum ice_flow_seg_hdr l3_proto, 637 enum ice_flow_seg_hdr l4_proto) 638 { 639 enum ice_flow_field src_addr, dst_addr, src_port, dst_port; 640 641 if (!seg) 642 return -EINVAL; 643 644 if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) { 645 src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA; 646 dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA; 647 } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) { 648 src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA; 649 dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA; 650 } else { 651 return -EINVAL; 652 } 653 654 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 655 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 656 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 657 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 658 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 659 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 660 } else { 661 return -EINVAL; 662 } 663 664 ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto); 665 666 /* IP source address */ 667 ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL, 668 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 669 670 /* IP destination address */ 671 ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL, 672 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 673 674 /* Layer 4 source port */ 675 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 676 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 677 678 /* Layer 4 destination port */ 679 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 680 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 681 682 return 0; 683 } 684 685 /** 686 * ice_create_init_fdir_rule 687 * @pf: PF structure 688 * @flow: filter enum 689 * 690 * Return error value or 0 on success. 691 */ 692 static int 693 ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) 694 { 695 struct ice_flow_seg_info *seg, *tun_seg; 696 struct device *dev = ice_pf_to_dev(pf); 697 struct ice_hw *hw = &pf->hw; 698 int ret; 699 700 /* if there is already a filter rule for kind return -EINVAL */ 701 if (hw->fdir_prof && hw->fdir_prof[flow] && 702 hw->fdir_prof[flow]->fdir_seg[0]) 703 return -EINVAL; 704 705 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 706 if (!seg) 707 return -ENOMEM; 708 709 tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, 710 GFP_KERNEL); 711 if (!tun_seg) { 712 devm_kfree(dev, seg); 713 return -ENOMEM; 714 } 715 716 if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP) 717 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 718 ICE_FLOW_SEG_HDR_TCP); 719 else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP) 720 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 721 ICE_FLOW_SEG_HDR_UDP); 722 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP) 723 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 724 ICE_FLOW_SEG_HDR_TCP); 725 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 726 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 727 ICE_FLOW_SEG_HDR_UDP); 728 else 729 ret = -EINVAL; 730 if (ret) 731 goto err_exit; 732 733 /* add filter for outer headers */ 734 ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN); 735 if (ret) 736 /* could not write filter, free memory */ 737 goto err_exit; 738 739 /* make tunneled filter HW entries if possible */ 740 memcpy(&tun_seg[1], seg, sizeof(*seg)); 741 ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN); 742 if (ret) 743 /* could not write tunnel filter, but outer header filter 744 * exists 745 */ 746 devm_kfree(dev, tun_seg); 747 748 set_bit(flow, hw->fdir_perfect_fltr); 749 return ret; 750 err_exit: 751 devm_kfree(dev, tun_seg); 752 devm_kfree(dev, seg); 753 754 return -EOPNOTSUPP; 755 } 756 757 /** 758 * ice_set_fdir_ip4_seg 759 * @seg: flow segment for programming 760 * @tcp_ip4_spec: mask data from ethtool 761 * @l4_proto: Layer 4 protocol to program 762 * @perfect_fltr: only valid on success; returns true if perfect filter, 763 * false if not 764 * 765 * Set the mask data into the flow segment to be used to program HW 766 * table based on provided L4 protocol for IPv4 767 */ 768 static int 769 ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg, 770 struct ethtool_tcpip4_spec *tcp_ip4_spec, 771 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 772 { 773 enum ice_flow_field src_port, dst_port; 774 775 /* make sure we don't have any empty rule */ 776 if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src && 777 !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst) 778 return -EINVAL; 779 780 /* filtering on TOS not supported */ 781 if (tcp_ip4_spec->tos) 782 return -EOPNOTSUPP; 783 784 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 785 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 786 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 787 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 788 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 789 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 790 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 791 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 792 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 793 } else { 794 return -EOPNOTSUPP; 795 } 796 797 *perfect_fltr = true; 798 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto); 799 800 /* IP source address */ 801 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 802 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 803 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 804 ICE_FLOW_FLD_OFF_INVAL, false); 805 else if (!tcp_ip4_spec->ip4src) 806 *perfect_fltr = false; 807 else 808 return -EOPNOTSUPP; 809 810 /* IP destination address */ 811 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 812 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 813 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 814 ICE_FLOW_FLD_OFF_INVAL, false); 815 else if (!tcp_ip4_spec->ip4dst) 816 *perfect_fltr = false; 817 else 818 return -EOPNOTSUPP; 819 820 /* Layer 4 source port */ 821 if (tcp_ip4_spec->psrc == htons(0xFFFF)) 822 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 823 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 824 false); 825 else if (!tcp_ip4_spec->psrc) 826 *perfect_fltr = false; 827 else 828 return -EOPNOTSUPP; 829 830 /* Layer 4 destination port */ 831 if (tcp_ip4_spec->pdst == htons(0xFFFF)) 832 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 833 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 834 false); 835 else if (!tcp_ip4_spec->pdst) 836 *perfect_fltr = false; 837 else 838 return -EOPNOTSUPP; 839 840 return 0; 841 } 842 843 /** 844 * ice_set_fdir_ip4_usr_seg 845 * @seg: flow segment for programming 846 * @usr_ip4_spec: ethtool userdef packet offset 847 * @perfect_fltr: only valid on success; returns true if perfect filter, 848 * false if not 849 * 850 * Set the offset data into the flow segment to be used to program HW 851 * table for IPv4 852 */ 853 static int 854 ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg, 855 struct ethtool_usrip4_spec *usr_ip4_spec, 856 bool *perfect_fltr) 857 { 858 /* first 4 bytes of Layer 4 header */ 859 if (usr_ip4_spec->l4_4_bytes) 860 return -EINVAL; 861 if (usr_ip4_spec->tos) 862 return -EINVAL; 863 if (usr_ip4_spec->ip_ver) 864 return -EINVAL; 865 /* Filtering on Layer 4 protocol not supported */ 866 if (usr_ip4_spec->proto) 867 return -EOPNOTSUPP; 868 /* empty rules are not valid */ 869 if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) 870 return -EINVAL; 871 872 *perfect_fltr = true; 873 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); 874 875 /* IP source address */ 876 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 877 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 878 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 879 ICE_FLOW_FLD_OFF_INVAL, false); 880 else if (!usr_ip4_spec->ip4src) 881 *perfect_fltr = false; 882 else 883 return -EOPNOTSUPP; 884 885 /* IP destination address */ 886 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 887 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 888 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 889 ICE_FLOW_FLD_OFF_INVAL, false); 890 else if (!usr_ip4_spec->ip4dst) 891 *perfect_fltr = false; 892 else 893 return -EOPNOTSUPP; 894 895 return 0; 896 } 897 898 /** 899 * ice_set_fdir_ip6_seg 900 * @seg: flow segment for programming 901 * @tcp_ip6_spec: mask data from ethtool 902 * @l4_proto: Layer 4 protocol to program 903 * @perfect_fltr: only valid on success; returns true if perfect filter, 904 * false if not 905 * 906 * Set the mask data into the flow segment to be used to program HW 907 * table based on provided L4 protocol for IPv6 908 */ 909 static int 910 ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg, 911 struct ethtool_tcpip6_spec *tcp_ip6_spec, 912 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 913 { 914 enum ice_flow_field src_port, dst_port; 915 916 /* make sure we don't have any empty rule */ 917 if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 918 sizeof(struct in6_addr)) && 919 !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 920 sizeof(struct in6_addr)) && 921 !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst) 922 return -EINVAL; 923 924 /* filtering on TC not supported */ 925 if (tcp_ip6_spec->tclass) 926 return -EOPNOTSUPP; 927 928 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 929 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 930 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 931 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 932 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 933 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 934 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 935 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 936 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 937 } else { 938 return -EINVAL; 939 } 940 941 *perfect_fltr = true; 942 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto); 943 944 if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask, 945 sizeof(struct in6_addr))) 946 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 947 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 948 ICE_FLOW_FLD_OFF_INVAL, false); 949 else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 950 sizeof(struct in6_addr))) 951 *perfect_fltr = false; 952 else 953 return -EOPNOTSUPP; 954 955 if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask, 956 sizeof(struct in6_addr))) 957 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 958 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 959 ICE_FLOW_FLD_OFF_INVAL, false); 960 else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 961 sizeof(struct in6_addr))) 962 *perfect_fltr = false; 963 else 964 return -EOPNOTSUPP; 965 966 /* Layer 4 source port */ 967 if (tcp_ip6_spec->psrc == htons(0xFFFF)) 968 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 969 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 970 false); 971 else if (!tcp_ip6_spec->psrc) 972 *perfect_fltr = false; 973 else 974 return -EOPNOTSUPP; 975 976 /* Layer 4 destination port */ 977 if (tcp_ip6_spec->pdst == htons(0xFFFF)) 978 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 979 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 980 false); 981 else if (!tcp_ip6_spec->pdst) 982 *perfect_fltr = false; 983 else 984 return -EOPNOTSUPP; 985 986 return 0; 987 } 988 989 /** 990 * ice_set_fdir_ip6_usr_seg 991 * @seg: flow segment for programming 992 * @usr_ip6_spec: ethtool userdef packet offset 993 * @perfect_fltr: only valid on success; returns true if perfect filter, 994 * false if not 995 * 996 * Set the offset data into the flow segment to be used to program HW 997 * table for IPv6 998 */ 999 static int 1000 ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, 1001 struct ethtool_usrip6_spec *usr_ip6_spec, 1002 bool *perfect_fltr) 1003 { 1004 /* filtering on Layer 4 bytes not supported */ 1005 if (usr_ip6_spec->l4_4_bytes) 1006 return -EOPNOTSUPP; 1007 /* filtering on TC not supported */ 1008 if (usr_ip6_spec->tclass) 1009 return -EOPNOTSUPP; 1010 /* filtering on Layer 4 protocol not supported */ 1011 if (usr_ip6_spec->l4_proto) 1012 return -EOPNOTSUPP; 1013 /* empty rules are not valid */ 1014 if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1015 sizeof(struct in6_addr)) && 1016 !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1017 sizeof(struct in6_addr))) 1018 return -EINVAL; 1019 1020 *perfect_fltr = true; 1021 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); 1022 1023 if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask, 1024 sizeof(struct in6_addr))) 1025 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 1026 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1027 ICE_FLOW_FLD_OFF_INVAL, false); 1028 else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1029 sizeof(struct in6_addr))) 1030 *perfect_fltr = false; 1031 else 1032 return -EOPNOTSUPP; 1033 1034 if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask, 1035 sizeof(struct in6_addr))) 1036 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 1037 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1038 ICE_FLOW_FLD_OFF_INVAL, false); 1039 else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1040 sizeof(struct in6_addr))) 1041 *perfect_fltr = false; 1042 else 1043 return -EOPNOTSUPP; 1044 1045 return 0; 1046 } 1047 1048 /** 1049 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter 1050 * @pf: PF structure 1051 * @fsp: pointer to ethtool Rx flow specification 1052 * @user: user defined data from flow specification 1053 * 1054 * Returns 0 on success. 1055 */ 1056 static int 1057 ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, 1058 struct ice_rx_flow_userdef *user) 1059 { 1060 struct ice_flow_seg_info *seg, *tun_seg; 1061 struct device *dev = ice_pf_to_dev(pf); 1062 enum ice_fltr_ptype fltr_idx; 1063 struct ice_hw *hw = &pf->hw; 1064 bool perfect_filter; 1065 int ret; 1066 1067 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 1068 if (!seg) 1069 return -ENOMEM; 1070 1071 tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, 1072 GFP_KERNEL); 1073 if (!tun_seg) { 1074 devm_kfree(dev, seg); 1075 return -ENOMEM; 1076 } 1077 1078 switch (fsp->flow_type & ~FLOW_EXT) { 1079 case TCP_V4_FLOW: 1080 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1081 ICE_FLOW_SEG_HDR_TCP, 1082 &perfect_filter); 1083 break; 1084 case UDP_V4_FLOW: 1085 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1086 ICE_FLOW_SEG_HDR_UDP, 1087 &perfect_filter); 1088 break; 1089 case SCTP_V4_FLOW: 1090 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1091 ICE_FLOW_SEG_HDR_SCTP, 1092 &perfect_filter); 1093 break; 1094 case IPV4_USER_FLOW: 1095 ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec, 1096 &perfect_filter); 1097 break; 1098 case TCP_V6_FLOW: 1099 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1100 ICE_FLOW_SEG_HDR_TCP, 1101 &perfect_filter); 1102 break; 1103 case UDP_V6_FLOW: 1104 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1105 ICE_FLOW_SEG_HDR_UDP, 1106 &perfect_filter); 1107 break; 1108 case SCTP_V6_FLOW: 1109 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1110 ICE_FLOW_SEG_HDR_SCTP, 1111 &perfect_filter); 1112 break; 1113 case IPV6_USER_FLOW: 1114 ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, 1115 &perfect_filter); 1116 break; 1117 default: 1118 ret = -EINVAL; 1119 } 1120 if (ret) 1121 goto err_exit; 1122 1123 /* tunnel segments are shifted up one. */ 1124 memcpy(&tun_seg[1], seg, sizeof(*seg)); 1125 1126 if (user && user->flex_fltr) { 1127 perfect_filter = false; 1128 ice_flow_add_fld_raw(seg, user->flex_offset, 1129 ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1130 ICE_FLOW_FLD_OFF_INVAL, 1131 ICE_FLOW_FLD_OFF_INVAL); 1132 ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset, 1133 ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1134 ICE_FLOW_FLD_OFF_INVAL, 1135 ICE_FLOW_FLD_OFF_INVAL); 1136 } 1137 1138 /* add filter for outer headers */ 1139 fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); 1140 ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, 1141 ICE_FD_HW_SEG_NON_TUN); 1142 if (ret == -EEXIST) 1143 /* Rule already exists, free memory and continue */ 1144 devm_kfree(dev, seg); 1145 else if (ret) 1146 /* could not write filter, free memory */ 1147 goto err_exit; 1148 1149 /* make tunneled filter HW entries if possible */ 1150 memcpy(&tun_seg[1], seg, sizeof(*seg)); 1151 ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx, 1152 ICE_FD_HW_SEG_TUN); 1153 if (ret == -EEXIST) { 1154 /* Rule already exists, free memory and count as success */ 1155 devm_kfree(dev, tun_seg); 1156 ret = 0; 1157 } else if (ret) { 1158 /* could not write tunnel filter, but outer filter exists */ 1159 devm_kfree(dev, tun_seg); 1160 } 1161 1162 if (perfect_filter) 1163 set_bit(fltr_idx, hw->fdir_perfect_fltr); 1164 else 1165 clear_bit(fltr_idx, hw->fdir_perfect_fltr); 1166 1167 return ret; 1168 1169 err_exit: 1170 devm_kfree(dev, tun_seg); 1171 devm_kfree(dev, seg); 1172 1173 return -EOPNOTSUPP; 1174 } 1175 1176 /** 1177 * ice_fdir_write_fltr - send a flow director filter to the hardware 1178 * @pf: PF data structure 1179 * @input: filter structure 1180 * @add: true adds filter and false removed filter 1181 * @is_tun: true adds inner filter on tunnel and false outer headers 1182 * 1183 * returns 0 on success and negative value on error 1184 */ 1185 int 1186 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 1187 bool is_tun) 1188 { 1189 struct device *dev = ice_pf_to_dev(pf); 1190 struct ice_hw *hw = &pf->hw; 1191 struct ice_fltr_desc desc; 1192 struct ice_vsi *ctrl_vsi; 1193 enum ice_status status; 1194 u8 *pkt, *frag_pkt; 1195 bool has_frag; 1196 int err; 1197 1198 ctrl_vsi = ice_get_ctrl_vsi(pf); 1199 if (!ctrl_vsi) 1200 return -EINVAL; 1201 1202 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1203 if (!pkt) 1204 return -ENOMEM; 1205 frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1206 if (!frag_pkt) { 1207 err = -ENOMEM; 1208 goto err_free; 1209 } 1210 1211 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1212 status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1213 if (status) { 1214 err = ice_status_to_errno(status); 1215 goto err_free_all; 1216 } 1217 err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1218 if (err) 1219 goto err_free_all; 1220 1221 /* repeat for fragment packet */ 1222 has_frag = ice_fdir_has_frag(input->flow_type); 1223 if (has_frag) { 1224 /* does not return error */ 1225 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1226 status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, 1227 is_tun); 1228 if (status) { 1229 err = ice_status_to_errno(status); 1230 goto err_frag; 1231 } 1232 err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); 1233 if (err) 1234 goto err_frag; 1235 } else { 1236 devm_kfree(dev, frag_pkt); 1237 } 1238 1239 return 0; 1240 1241 err_free_all: 1242 devm_kfree(dev, frag_pkt); 1243 err_free: 1244 devm_kfree(dev, pkt); 1245 return err; 1246 1247 err_frag: 1248 devm_kfree(dev, frag_pkt); 1249 return err; 1250 } 1251 1252 /** 1253 * ice_fdir_write_all_fltr - send a flow director filter to the hardware 1254 * @pf: PF data structure 1255 * @input: filter structure 1256 * @add: true adds filter and false removed filter 1257 * 1258 * returns 0 on success and negative value on error 1259 */ 1260 static int 1261 ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, 1262 bool add) 1263 { 1264 u16 port_num; 1265 int tun; 1266 1267 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 1268 bool is_tun = tun == ICE_FD_HW_SEG_TUN; 1269 int err; 1270 1271 if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num)) 1272 continue; 1273 err = ice_fdir_write_fltr(pf, input, add, is_tun); 1274 if (err) 1275 return err; 1276 } 1277 return 0; 1278 } 1279 1280 /** 1281 * ice_fdir_replay_fltrs - replay filters from the HW filter list 1282 * @pf: board private structure 1283 */ 1284 void ice_fdir_replay_fltrs(struct ice_pf *pf) 1285 { 1286 struct ice_fdir_fltr *f_rule; 1287 struct ice_hw *hw = &pf->hw; 1288 1289 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 1290 int err = ice_fdir_write_all_fltr(pf, f_rule, true); 1291 1292 if (err) 1293 dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n", 1294 err, f_rule->fltr_id); 1295 } 1296 } 1297 1298 /** 1299 * ice_fdir_create_dflt_rules - create default perfect filters 1300 * @pf: PF data structure 1301 * 1302 * Returns 0 for success or error. 1303 */ 1304 int ice_fdir_create_dflt_rules(struct ice_pf *pf) 1305 { 1306 int err; 1307 1308 /* Create perfect TCP and UDP rules in hardware. */ 1309 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); 1310 if (err) 1311 return err; 1312 1313 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); 1314 if (err) 1315 return err; 1316 1317 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); 1318 if (err) 1319 return err; 1320 1321 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); 1322 1323 return err; 1324 } 1325 1326 /** 1327 * ice_vsi_manage_fdir - turn on/off flow director 1328 * @vsi: the VSI being changed 1329 * @ena: boolean value indicating if this is an enable or disable request 1330 */ 1331 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) 1332 { 1333 struct ice_fdir_fltr *f_rule, *tmp; 1334 struct ice_pf *pf = vsi->back; 1335 struct ice_hw *hw = &pf->hw; 1336 enum ice_fltr_ptype flow; 1337 1338 if (ena) { 1339 set_bit(ICE_FLAG_FD_ENA, pf->flags); 1340 ice_fdir_create_dflt_rules(pf); 1341 return; 1342 } 1343 1344 mutex_lock(&hw->fdir_fltr_lock); 1345 if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) 1346 goto release_lock; 1347 list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { 1348 /* ignore return value */ 1349 ice_fdir_write_all_fltr(pf, f_rule, false); 1350 ice_fdir_update_cntrs(hw, f_rule->flow_type, false); 1351 list_del(&f_rule->fltr_node); 1352 devm_kfree(ice_hw_to_dev(hw), f_rule); 1353 } 1354 1355 if (hw->fdir_prof) 1356 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; 1357 flow++) 1358 if (hw->fdir_prof[flow]) 1359 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 1360 1361 release_lock: 1362 mutex_unlock(&hw->fdir_fltr_lock); 1363 } 1364 1365 /** 1366 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow 1367 * @pf: PF structure 1368 * @flow_type: FDir flow type to release 1369 */ 1370 static void 1371 ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type) 1372 { 1373 struct ice_hw *hw = &pf->hw; 1374 bool need_perfect = false; 1375 1376 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 1377 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 1378 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 1379 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 1380 need_perfect = true; 1381 1382 if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr)) 1383 return; 1384 1385 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type); 1386 if (need_perfect) 1387 ice_create_init_fdir_rule(pf, flow_type); 1388 } 1389 1390 /** 1391 * ice_fdir_update_list_entry - add or delete a filter from the filter list 1392 * @pf: PF structure 1393 * @input: filter structure 1394 * @fltr_idx: ethtool index of filter to modify 1395 * 1396 * returns 0 on success and negative on errors 1397 */ 1398 static int 1399 ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, 1400 int fltr_idx) 1401 { 1402 struct ice_fdir_fltr *old_fltr; 1403 struct ice_hw *hw = &pf->hw; 1404 int err = -ENOENT; 1405 1406 /* Do not update filters during reset */ 1407 if (ice_is_reset_in_progress(pf->state)) 1408 return -EBUSY; 1409 1410 old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx); 1411 if (old_fltr) { 1412 err = ice_fdir_write_all_fltr(pf, old_fltr, false); 1413 if (err) 1414 return err; 1415 ice_fdir_update_cntrs(hw, old_fltr->flow_type, false); 1416 if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type]) 1417 /* we just deleted the last filter of flow_type so we 1418 * should also delete the HW filter info. 1419 */ 1420 ice_fdir_do_rem_flow(pf, old_fltr->flow_type); 1421 list_del(&old_fltr->fltr_node); 1422 devm_kfree(ice_hw_to_dev(hw), old_fltr); 1423 } 1424 if (!input) 1425 return err; 1426 ice_fdir_list_add_fltr(hw, input); 1427 ice_fdir_update_cntrs(hw, input->flow_type, true); 1428 return 0; 1429 } 1430 1431 /** 1432 * ice_del_fdir_ethtool - delete Flow Director filter 1433 * @vsi: pointer to target VSI 1434 * @cmd: command to add or delete Flow Director filter 1435 * 1436 * Returns 0 on success and negative values for failure 1437 */ 1438 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1439 { 1440 struct ethtool_rx_flow_spec *fsp = 1441 (struct ethtool_rx_flow_spec *)&cmd->fs; 1442 struct ice_pf *pf = vsi->back; 1443 struct ice_hw *hw = &pf->hw; 1444 int val; 1445 1446 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1447 return -EOPNOTSUPP; 1448 1449 /* Do not delete filters during reset */ 1450 if (ice_is_reset_in_progress(pf->state)) { 1451 dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n"); 1452 return -EBUSY; 1453 } 1454 1455 if (test_bit(ICE_FD_FLUSH_REQ, pf->state)) 1456 return -EBUSY; 1457 1458 mutex_lock(&hw->fdir_fltr_lock); 1459 val = ice_fdir_update_list_entry(pf, NULL, fsp->location); 1460 mutex_unlock(&hw->fdir_fltr_lock); 1461 1462 return val; 1463 } 1464 1465 /** 1466 * ice_set_fdir_input_set - Set the input set for Flow Director 1467 * @vsi: pointer to target VSI 1468 * @fsp: pointer to ethtool Rx flow specification 1469 * @input: filter structure 1470 */ 1471 static int 1472 ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, 1473 struct ice_fdir_fltr *input) 1474 { 1475 u16 dest_vsi, q_index = 0; 1476 struct ice_pf *pf; 1477 struct ice_hw *hw; 1478 int flow_type; 1479 u8 dest_ctl; 1480 1481 if (!vsi || !fsp || !input) 1482 return -EINVAL; 1483 1484 pf = vsi->back; 1485 hw = &pf->hw; 1486 1487 dest_vsi = vsi->idx; 1488 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 1489 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1490 } else { 1491 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 1492 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 1493 1494 if (vf) { 1495 dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n"); 1496 return -EINVAL; 1497 } 1498 1499 if (ring >= vsi->num_rxq) 1500 return -EINVAL; 1501 1502 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1503 q_index = ring; 1504 } 1505 1506 input->fltr_id = fsp->location; 1507 input->q_index = q_index; 1508 flow_type = fsp->flow_type & ~FLOW_EXT; 1509 1510 input->dest_vsi = dest_vsi; 1511 input->dest_ctl = dest_ctl; 1512 input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID; 1513 input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base); 1514 input->flow_type = ice_ethtool_flow_to_fltr(flow_type); 1515 1516 if (fsp->flow_type & FLOW_EXT) { 1517 memcpy(input->ext_data.usr_def, fsp->h_ext.data, 1518 sizeof(input->ext_data.usr_def)); 1519 input->ext_data.vlan_type = fsp->h_ext.vlan_etype; 1520 input->ext_data.vlan_tag = fsp->h_ext.vlan_tci; 1521 memcpy(input->ext_mask.usr_def, fsp->m_ext.data, 1522 sizeof(input->ext_mask.usr_def)); 1523 input->ext_mask.vlan_type = fsp->m_ext.vlan_etype; 1524 input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci; 1525 } 1526 1527 switch (flow_type) { 1528 case TCP_V4_FLOW: 1529 case UDP_V4_FLOW: 1530 case SCTP_V4_FLOW: 1531 input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1532 input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc; 1533 input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 1534 input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; 1535 input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 1536 input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc; 1537 input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; 1538 input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; 1539 break; 1540 case IPV4_USER_FLOW: 1541 input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; 1542 input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src; 1543 input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; 1544 input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto; 1545 input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver; 1546 input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos; 1547 input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; 1548 input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src; 1549 input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; 1550 input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto; 1551 input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver; 1552 input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos; 1553 break; 1554 case TCP_V6_FLOW: 1555 case UDP_V6_FLOW: 1556 case SCTP_V6_FLOW: 1557 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1558 sizeof(struct in6_addr)); 1559 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1560 sizeof(struct in6_addr)); 1561 input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst; 1562 input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc; 1563 input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass; 1564 memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, 1565 sizeof(struct in6_addr)); 1566 memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, 1567 sizeof(struct in6_addr)); 1568 input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst; 1569 input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc; 1570 input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass; 1571 break; 1572 case IPV6_USER_FLOW: 1573 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1574 sizeof(struct in6_addr)); 1575 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1576 sizeof(struct in6_addr)); 1577 input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; 1578 input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; 1579 1580 /* if no protocol requested, use IPPROTO_NONE */ 1581 if (!fsp->m_u.usr_ip6_spec.l4_proto) 1582 input->ip.v6.proto = IPPROTO_NONE; 1583 else 1584 input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; 1585 1586 memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1587 sizeof(struct in6_addr)); 1588 memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1589 sizeof(struct in6_addr)); 1590 input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; 1591 input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; 1592 input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; 1593 break; 1594 default: 1595 /* not doing un-parsed flow types */ 1596 return -EINVAL; 1597 } 1598 1599 return 0; 1600 } 1601 1602 /** 1603 * ice_add_fdir_ethtool - Add/Remove Flow Director filter 1604 * @vsi: pointer to target VSI 1605 * @cmd: command to add or delete Flow Director filter 1606 * 1607 * Returns 0 on success and negative values for failure 1608 */ 1609 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1610 { 1611 struct ice_rx_flow_userdef userdata; 1612 struct ethtool_rx_flow_spec *fsp; 1613 struct ice_fdir_fltr *input; 1614 struct device *dev; 1615 struct ice_pf *pf; 1616 struct ice_hw *hw; 1617 int fltrs_needed; 1618 u16 tunnel_port; 1619 int ret; 1620 1621 if (!vsi) 1622 return -EINVAL; 1623 1624 pf = vsi->back; 1625 hw = &pf->hw; 1626 dev = ice_pf_to_dev(pf); 1627 1628 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1629 return -EOPNOTSUPP; 1630 1631 /* Do not program filters during reset */ 1632 if (ice_is_reset_in_progress(pf->state)) { 1633 dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n"); 1634 return -EBUSY; 1635 } 1636 1637 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1638 1639 if (ice_parse_rx_flow_user_data(fsp, &userdata)) 1640 return -EINVAL; 1641 1642 if (fsp->flow_type & FLOW_MAC_EXT) 1643 return -EINVAL; 1644 1645 ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata); 1646 if (ret) 1647 return ret; 1648 1649 if (fsp->location >= ice_get_fdir_cnt_all(hw)) { 1650 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1651 return -ENOSPC; 1652 } 1653 1654 /* return error if not an update and no available filters */ 1655 fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1; 1656 if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && 1657 ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { 1658 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1659 return -ENOSPC; 1660 } 1661 1662 input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL); 1663 if (!input) 1664 return -ENOMEM; 1665 1666 ret = ice_set_fdir_input_set(vsi, fsp, input); 1667 if (ret) 1668 goto free_input; 1669 1670 mutex_lock(&hw->fdir_fltr_lock); 1671 if (ice_fdir_is_dup_fltr(hw, input)) { 1672 ret = -EINVAL; 1673 goto release_lock; 1674 } 1675 1676 if (userdata.flex_fltr) { 1677 input->flex_fltr = true; 1678 input->flex_word = cpu_to_be16(userdata.flex_word); 1679 input->flex_offset = userdata.flex_offset; 1680 } 1681 1682 input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; 1683 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1684 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; 1685 1686 /* input struct is added to the HW filter list */ 1687 ice_fdir_update_list_entry(pf, input, fsp->location); 1688 1689 ret = ice_fdir_write_all_fltr(pf, input, true); 1690 if (ret) 1691 goto remove_sw_rule; 1692 1693 goto release_lock; 1694 1695 remove_sw_rule: 1696 ice_fdir_update_cntrs(hw, input->flow_type, false); 1697 list_del(&input->fltr_node); 1698 release_lock: 1699 mutex_unlock(&hw->fdir_fltr_lock); 1700 free_input: 1701 if (ret) 1702 devm_kfree(dev, input); 1703 1704 return ret; 1705 } 1706