1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_switch.h" 5 6 #define ICE_ETH_DA_OFFSET 0 7 #define ICE_ETH_ETHTYPE_OFFSET 12 8 #define ICE_ETH_VLAN_TCI_OFFSET 14 9 #define ICE_MAX_VLAN_ID 0xFFF 10 11 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 12 * struct to configure any switch filter rules. 13 * {DA (6 bytes), SA(6 bytes), 14 * Ether type (2 bytes for header without VLAN tag) OR 15 * VLAN tag (4 bytes for header with VLAN tag) } 16 * 17 * Word on Hardcoded values 18 * byte 0 = 0x2: to identify it as locally administered DA MAC 19 * byte 6 = 0x2: to identify it as locally administered SA MAC 20 * byte 12 = 0x81 & byte 13 = 0x00: 21 * In case of VLAN filter first two bytes defines ether type (0x8100) 22 * and remaining two bytes are placeholder for programming a given VLAN ID 23 * In case of Ether type filter it is treated as header without VLAN tag 24 * and byte 12 and 13 is used to program a given Ether type instead 25 */ 26 #define DUMMY_ETH_HDR_LEN 16 27 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 28 0x2, 0, 0, 0, 0, 0, 29 0x81, 0, 0, 0}; 30 31 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 32 (sizeof(struct ice_aqc_sw_rules_elem) - \ 33 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 34 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1) 35 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 36 (sizeof(struct ice_aqc_sw_rules_elem) - \ 37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 38 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1) 39 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 40 (sizeof(struct ice_aqc_sw_rules_elem) - \ 41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 42 sizeof(struct ice_sw_rule_lg_act) - \ 43 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \ 44 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act))) 45 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 46 (sizeof(struct ice_aqc_sw_rules_elem) - \ 47 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 48 sizeof(struct ice_sw_rule_vsi_list) - \ 49 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \ 50 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi))) 51 52 /** 53 * ice_init_def_sw_recp - initialize the recipe book keeping tables 54 * @hw: pointer to the HW struct 55 * 56 * Allocate memory for the entire recipe table and initialize the structures/ 57 * entries corresponding to basic recipes. 58 */ 59 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) 60 { 61 struct ice_sw_recipe *recps; 62 u8 i; 63 64 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 65 sizeof(*recps), GFP_KERNEL); 66 if (!recps) 67 return ICE_ERR_NO_MEMORY; 68 69 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 70 recps[i].root_rid = i; 71 INIT_LIST_HEAD(&recps[i].filt_rules); 72 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 73 mutex_init(&recps[i].filt_rule_lock); 74 } 75 76 hw->switch_info->recp_list = recps; 77 78 return 0; 79 } 80 81 /** 82 * ice_aq_get_sw_cfg - get switch configuration 83 * @hw: pointer to the hardware structure 84 * @buf: pointer to the result buffer 85 * @buf_size: length of the buffer available for response 86 * @req_desc: pointer to requested descriptor 87 * @num_elems: pointer to number of elements 88 * @cd: pointer to command details structure or NULL 89 * 90 * Get switch configuration (0x0200) to be placed in 'buff'. 91 * This admin command returns information such as initial VSI/port number 92 * and switch ID it belongs to. 93 * 94 * NOTE: *req_desc is both an input/output parameter. 95 * The caller of this function first calls this function with *request_desc set 96 * to 0. If the response from f/w has *req_desc set to 0, all the switch 97 * configuration information has been returned; if non-zero (meaning not all 98 * the information was returned), the caller should call this function again 99 * with *req_desc set to the previous value returned by f/w to get the 100 * next block of switch configuration information. 101 * 102 * *num_elems is output only parameter. This reflects the number of elements 103 * in response buffer. The caller of this function to use *num_elems while 104 * parsing the response buffer. 105 */ 106 static enum ice_status 107 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, 108 u16 buf_size, u16 *req_desc, u16 *num_elems, 109 struct ice_sq_cd *cd) 110 { 111 struct ice_aqc_get_sw_cfg *cmd; 112 enum ice_status status; 113 struct ice_aq_desc desc; 114 115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 116 cmd = &desc.params.get_sw_conf; 117 cmd->element = cpu_to_le16(*req_desc); 118 119 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 120 if (!status) { 121 *req_desc = le16_to_cpu(cmd->element); 122 *num_elems = le16_to_cpu(cmd->num_elems); 123 } 124 125 return status; 126 } 127 128 /** 129 * ice_aq_add_vsi 130 * @hw: pointer to the HW struct 131 * @vsi_ctx: pointer to a VSI context struct 132 * @cd: pointer to command details structure or NULL 133 * 134 * Add a VSI context to the hardware (0x0210) 135 */ 136 static enum ice_status 137 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 138 struct ice_sq_cd *cd) 139 { 140 struct ice_aqc_add_update_free_vsi_resp *res; 141 struct ice_aqc_add_get_update_free_vsi *cmd; 142 struct ice_aq_desc desc; 143 enum ice_status status; 144 145 cmd = &desc.params.vsi_cmd; 146 res = &desc.params.add_update_free_vsi_res; 147 148 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 149 150 if (!vsi_ctx->alloc_from_pool) 151 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 152 ICE_AQ_VSI_IS_VALID); 153 cmd->vf_id = vsi_ctx->vf_num; 154 155 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 156 157 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 158 159 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 160 sizeof(vsi_ctx->info), cd); 161 162 if (!status) { 163 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 164 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 165 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 166 } 167 168 return status; 169 } 170 171 /** 172 * ice_aq_free_vsi 173 * @hw: pointer to the HW struct 174 * @vsi_ctx: pointer to a VSI context struct 175 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 176 * @cd: pointer to command details structure or NULL 177 * 178 * Free VSI context info from hardware (0x0213) 179 */ 180 static enum ice_status 181 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 182 bool keep_vsi_alloc, struct ice_sq_cd *cd) 183 { 184 struct ice_aqc_add_update_free_vsi_resp *resp; 185 struct ice_aqc_add_get_update_free_vsi *cmd; 186 struct ice_aq_desc desc; 187 enum ice_status status; 188 189 cmd = &desc.params.vsi_cmd; 190 resp = &desc.params.add_update_free_vsi_res; 191 192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 193 194 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 195 if (keep_vsi_alloc) 196 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 197 198 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 199 if (!status) { 200 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 201 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 202 } 203 204 return status; 205 } 206 207 /** 208 * ice_aq_update_vsi 209 * @hw: pointer to the HW struct 210 * @vsi_ctx: pointer to a VSI context struct 211 * @cd: pointer to command details structure or NULL 212 * 213 * Update VSI context in the hardware (0x0211) 214 */ 215 static enum ice_status 216 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 217 struct ice_sq_cd *cd) 218 { 219 struct ice_aqc_add_update_free_vsi_resp *resp; 220 struct ice_aqc_add_get_update_free_vsi *cmd; 221 struct ice_aq_desc desc; 222 enum ice_status status; 223 224 cmd = &desc.params.vsi_cmd; 225 resp = &desc.params.add_update_free_vsi_res; 226 227 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 228 229 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 230 231 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 232 233 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 234 sizeof(vsi_ctx->info), cd); 235 236 if (!status) { 237 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 238 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 239 } 240 241 return status; 242 } 243 244 /** 245 * ice_is_vsi_valid - check whether the VSI is valid or not 246 * @hw: pointer to the HW struct 247 * @vsi_handle: VSI handle 248 * 249 * check whether the VSI is valid or not 250 */ 251 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 252 { 253 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 254 } 255 256 /** 257 * ice_get_hw_vsi_num - return the HW VSI number 258 * @hw: pointer to the HW struct 259 * @vsi_handle: VSI handle 260 * 261 * return the HW VSI number 262 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 263 */ 264 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 265 { 266 return hw->vsi_ctx[vsi_handle]->vsi_num; 267 } 268 269 /** 270 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 271 * @hw: pointer to the HW struct 272 * @vsi_handle: VSI handle 273 * 274 * return the VSI context entry for a given VSI handle 275 */ 276 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 277 { 278 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 279 } 280 281 /** 282 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 283 * @hw: pointer to the HW struct 284 * @vsi_handle: VSI handle 285 * @vsi: VSI context pointer 286 * 287 * save the VSI context entry for a given VSI handle 288 */ 289 static void 290 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 291 { 292 hw->vsi_ctx[vsi_handle] = vsi; 293 } 294 295 /** 296 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 297 * @hw: pointer to the HW struct 298 * @vsi_handle: VSI handle 299 */ 300 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 301 { 302 struct ice_vsi_ctx *vsi; 303 u8 i; 304 305 vsi = ice_get_vsi_ctx(hw, vsi_handle); 306 if (!vsi) 307 return; 308 ice_for_each_traffic_class(i) { 309 if (vsi->lan_q_ctx[i]) { 310 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 311 vsi->lan_q_ctx[i] = NULL; 312 } 313 } 314 } 315 316 /** 317 * ice_clear_vsi_ctx - clear the VSI context entry 318 * @hw: pointer to the HW struct 319 * @vsi_handle: VSI handle 320 * 321 * clear the VSI context entry 322 */ 323 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 324 { 325 struct ice_vsi_ctx *vsi; 326 327 vsi = ice_get_vsi_ctx(hw, vsi_handle); 328 if (vsi) { 329 ice_clear_vsi_q_ctx(hw, vsi_handle); 330 devm_kfree(ice_hw_to_dev(hw), vsi); 331 hw->vsi_ctx[vsi_handle] = NULL; 332 } 333 } 334 335 /** 336 * ice_clear_all_vsi_ctx - clear all the VSI context entries 337 * @hw: pointer to the HW struct 338 */ 339 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 340 { 341 u16 i; 342 343 for (i = 0; i < ICE_MAX_VSI; i++) 344 ice_clear_vsi_ctx(hw, i); 345 } 346 347 /** 348 * ice_add_vsi - add VSI context to the hardware and VSI handle list 349 * @hw: pointer to the HW struct 350 * @vsi_handle: unique VSI handle provided by drivers 351 * @vsi_ctx: pointer to a VSI context struct 352 * @cd: pointer to command details structure or NULL 353 * 354 * Add a VSI context to the hardware also add it into the VSI handle list. 355 * If this function gets called after reset for existing VSIs then update 356 * with the new HW VSI number in the corresponding VSI handle list entry. 357 */ 358 enum ice_status 359 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 360 struct ice_sq_cd *cd) 361 { 362 struct ice_vsi_ctx *tmp_vsi_ctx; 363 enum ice_status status; 364 365 if (vsi_handle >= ICE_MAX_VSI) 366 return ICE_ERR_PARAM; 367 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 368 if (status) 369 return status; 370 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 371 if (!tmp_vsi_ctx) { 372 /* Create a new VSI context */ 373 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 374 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 375 if (!tmp_vsi_ctx) { 376 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 377 return ICE_ERR_NO_MEMORY; 378 } 379 *tmp_vsi_ctx = *vsi_ctx; 380 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 381 } else { 382 /* update with new HW VSI num */ 383 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 384 } 385 386 return 0; 387 } 388 389 /** 390 * ice_free_vsi- free VSI context from hardware and VSI handle list 391 * @hw: pointer to the HW struct 392 * @vsi_handle: unique VSI handle 393 * @vsi_ctx: pointer to a VSI context struct 394 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 395 * @cd: pointer to command details structure or NULL 396 * 397 * Free VSI context info from hardware as well as from VSI handle list 398 */ 399 enum ice_status 400 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 401 bool keep_vsi_alloc, struct ice_sq_cd *cd) 402 { 403 enum ice_status status; 404 405 if (!ice_is_vsi_valid(hw, vsi_handle)) 406 return ICE_ERR_PARAM; 407 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 408 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 409 if (!status) 410 ice_clear_vsi_ctx(hw, vsi_handle); 411 return status; 412 } 413 414 /** 415 * ice_update_vsi 416 * @hw: pointer to the HW struct 417 * @vsi_handle: unique VSI handle 418 * @vsi_ctx: pointer to a VSI context struct 419 * @cd: pointer to command details structure or NULL 420 * 421 * Update VSI context in the hardware 422 */ 423 enum ice_status 424 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 425 struct ice_sq_cd *cd) 426 { 427 if (!ice_is_vsi_valid(hw, vsi_handle)) 428 return ICE_ERR_PARAM; 429 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 430 return ice_aq_update_vsi(hw, vsi_ctx, cd); 431 } 432 433 /** 434 * ice_aq_alloc_free_vsi_list 435 * @hw: pointer to the HW struct 436 * @vsi_list_id: VSI list ID returned or used for lookup 437 * @lkup_type: switch rule filter lookup type 438 * @opc: switch rules population command type - pass in the command opcode 439 * 440 * allocates or free a VSI list resource 441 */ 442 static enum ice_status 443 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 444 enum ice_sw_lkup_type lkup_type, 445 enum ice_adminq_opc opc) 446 { 447 struct ice_aqc_alloc_free_res_elem *sw_buf; 448 struct ice_aqc_res_elem *vsi_ele; 449 enum ice_status status; 450 u16 buf_len; 451 452 buf_len = sizeof(*sw_buf); 453 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 454 if (!sw_buf) 455 return ICE_ERR_NO_MEMORY; 456 sw_buf->num_elems = cpu_to_le16(1); 457 458 if (lkup_type == ICE_SW_LKUP_MAC || 459 lkup_type == ICE_SW_LKUP_MAC_VLAN || 460 lkup_type == ICE_SW_LKUP_ETHERTYPE || 461 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 462 lkup_type == ICE_SW_LKUP_PROMISC || 463 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 464 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 465 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 466 sw_buf->res_type = 467 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 468 } else { 469 status = ICE_ERR_PARAM; 470 goto ice_aq_alloc_free_vsi_list_exit; 471 } 472 473 if (opc == ice_aqc_opc_free_res) 474 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 475 476 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 477 if (status) 478 goto ice_aq_alloc_free_vsi_list_exit; 479 480 if (opc == ice_aqc_opc_alloc_res) { 481 vsi_ele = &sw_buf->elem[0]; 482 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 483 } 484 485 ice_aq_alloc_free_vsi_list_exit: 486 devm_kfree(ice_hw_to_dev(hw), sw_buf); 487 return status; 488 } 489 490 /** 491 * ice_aq_sw_rules - add/update/remove switch rules 492 * @hw: pointer to the HW struct 493 * @rule_list: pointer to switch rule population list 494 * @rule_list_sz: total size of the rule list in bytes 495 * @num_rules: number of switch rules in the rule_list 496 * @opc: switch rules population command type - pass in the command opcode 497 * @cd: pointer to command details structure or NULL 498 * 499 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 500 */ 501 static enum ice_status 502 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 503 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 504 { 505 struct ice_aq_desc desc; 506 507 if (opc != ice_aqc_opc_add_sw_rules && 508 opc != ice_aqc_opc_update_sw_rules && 509 opc != ice_aqc_opc_remove_sw_rules) 510 return ICE_ERR_PARAM; 511 512 ice_fill_dflt_direct_cmd_desc(&desc, opc); 513 514 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 515 desc.params.sw_rules.num_rules_fltr_entry_index = 516 cpu_to_le16(num_rules); 517 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 518 } 519 520 /* ice_init_port_info - Initialize port_info with switch configuration data 521 * @pi: pointer to port_info 522 * @vsi_port_num: VSI number or port number 523 * @type: Type of switch element (port or VSI) 524 * @swid: switch ID of the switch the element is attached to 525 * @pf_vf_num: PF or VF number 526 * @is_vf: true if the element is a VF, false otherwise 527 */ 528 static void 529 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 530 u16 swid, u16 pf_vf_num, bool is_vf) 531 { 532 switch (type) { 533 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 534 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 535 pi->sw_id = swid; 536 pi->pf_vf_num = pf_vf_num; 537 pi->is_vf = is_vf; 538 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 539 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 540 break; 541 default: 542 ice_debug(pi->hw, ICE_DBG_SW, 543 "incorrect VSI/port type received\n"); 544 break; 545 } 546 } 547 548 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 549 * @hw: pointer to the hardware structure 550 */ 551 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) 552 { 553 struct ice_aqc_get_sw_cfg_resp *rbuf; 554 enum ice_status status; 555 u16 req_desc = 0; 556 u16 num_elems; 557 u16 i; 558 559 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 560 GFP_KERNEL); 561 562 if (!rbuf) 563 return ICE_ERR_NO_MEMORY; 564 565 /* Multiple calls to ice_aq_get_sw_cfg may be required 566 * to get all the switch configuration information. The need 567 * for additional calls is indicated by ice_aq_get_sw_cfg 568 * writing a non-zero value in req_desc 569 */ 570 do { 571 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 572 &req_desc, &num_elems, NULL); 573 574 if (status) 575 break; 576 577 for (i = 0; i < num_elems; i++) { 578 struct ice_aqc_get_sw_cfg_resp_elem *ele; 579 u16 pf_vf_num, swid, vsi_port_num; 580 bool is_vf = false; 581 u8 type; 582 583 ele = rbuf[i].elements; 584 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 585 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 586 587 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 588 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 589 590 swid = le16_to_cpu(ele->swid); 591 592 if (le16_to_cpu(ele->pf_vf_num) & 593 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 594 is_vf = true; 595 596 type = le16_to_cpu(ele->vsi_port_num) >> 597 ICE_AQC_GET_SW_CONF_RESP_TYPE_S; 598 599 if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 600 /* FW VSI is not needed. Just continue. */ 601 continue; 602 } 603 604 ice_init_port_info(hw->port_info, vsi_port_num, 605 type, swid, pf_vf_num, is_vf); 606 } 607 } while (req_desc && !status); 608 609 devm_kfree(ice_hw_to_dev(hw), (void *)rbuf); 610 return status; 611 } 612 613 /** 614 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 615 * @hw: pointer to the hardware structure 616 * @fi: filter info structure to fill/update 617 * 618 * This helper function populates the lb_en and lan_en elements of the provided 619 * ice_fltr_info struct using the switch's type and characteristics of the 620 * switch rule being configured. 621 */ 622 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 623 { 624 fi->lb_en = false; 625 fi->lan_en = false; 626 if ((fi->flag & ICE_FLTR_TX) && 627 (fi->fltr_act == ICE_FWD_TO_VSI || 628 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 629 fi->fltr_act == ICE_FWD_TO_Q || 630 fi->fltr_act == ICE_FWD_TO_QGRP)) { 631 /* Setting LB for prune actions will result in replicated 632 * packets to the internal switch that will be dropped. 633 */ 634 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 635 fi->lb_en = true; 636 637 /* Set lan_en to TRUE if 638 * 1. The switch is a VEB AND 639 * 2 640 * 2.1 The lookup is a directional lookup like ethertype, 641 * promiscuous, ethertype-MAC, promiscuous-VLAN 642 * and default-port OR 643 * 2.2 The lookup is VLAN, OR 644 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 645 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 646 * 647 * OR 648 * 649 * The switch is a VEPA. 650 * 651 * In all other cases, the LAN enable has to be set to false. 652 */ 653 if (hw->evb_veb) { 654 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 655 fi->lkup_type == ICE_SW_LKUP_PROMISC || 656 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 657 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 658 fi->lkup_type == ICE_SW_LKUP_DFLT || 659 fi->lkup_type == ICE_SW_LKUP_VLAN || 660 (fi->lkup_type == ICE_SW_LKUP_MAC && 661 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 662 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 663 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 664 fi->lan_en = true; 665 } else { 666 fi->lan_en = true; 667 } 668 } 669 } 670 671 /** 672 * ice_fill_sw_rule - Helper function to fill switch rule structure 673 * @hw: pointer to the hardware structure 674 * @f_info: entry containing packet forwarding information 675 * @s_rule: switch rule structure to be filled in based on mac_entry 676 * @opc: switch rules population command type - pass in the command opcode 677 */ 678 static void 679 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 680 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 681 { 682 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 683 void *daddr = NULL; 684 u16 eth_hdr_sz; 685 u8 *eth_hdr; 686 u32 act = 0; 687 __be16 *off; 688 u8 q_rgn; 689 690 if (opc == ice_aqc_opc_remove_sw_rules) { 691 s_rule->pdata.lkup_tx_rx.act = 0; 692 s_rule->pdata.lkup_tx_rx.index = 693 cpu_to_le16(f_info->fltr_rule_id); 694 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 695 return; 696 } 697 698 eth_hdr_sz = sizeof(dummy_eth_header); 699 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 700 701 /* initialize the ether header with a dummy header */ 702 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 703 ice_fill_sw_info(hw, f_info); 704 705 switch (f_info->fltr_act) { 706 case ICE_FWD_TO_VSI: 707 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 708 ICE_SINGLE_ACT_VSI_ID_M; 709 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 710 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 711 ICE_SINGLE_ACT_VALID_BIT; 712 break; 713 case ICE_FWD_TO_VSI_LIST: 714 act |= ICE_SINGLE_ACT_VSI_LIST; 715 act |= (f_info->fwd_id.vsi_list_id << 716 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 717 ICE_SINGLE_ACT_VSI_LIST_ID_M; 718 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 719 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 720 ICE_SINGLE_ACT_VALID_BIT; 721 break; 722 case ICE_FWD_TO_Q: 723 act |= ICE_SINGLE_ACT_TO_Q; 724 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 725 ICE_SINGLE_ACT_Q_INDEX_M; 726 break; 727 case ICE_DROP_PACKET: 728 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 729 ICE_SINGLE_ACT_VALID_BIT; 730 break; 731 case ICE_FWD_TO_QGRP: 732 q_rgn = f_info->qgrp_size > 0 ? 733 (u8)ilog2(f_info->qgrp_size) : 0; 734 act |= ICE_SINGLE_ACT_TO_Q; 735 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 736 ICE_SINGLE_ACT_Q_INDEX_M; 737 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 738 ICE_SINGLE_ACT_Q_REGION_M; 739 break; 740 default: 741 return; 742 } 743 744 if (f_info->lb_en) 745 act |= ICE_SINGLE_ACT_LB_ENABLE; 746 if (f_info->lan_en) 747 act |= ICE_SINGLE_ACT_LAN_ENABLE; 748 749 switch (f_info->lkup_type) { 750 case ICE_SW_LKUP_MAC: 751 daddr = f_info->l_data.mac.mac_addr; 752 break; 753 case ICE_SW_LKUP_VLAN: 754 vlan_id = f_info->l_data.vlan.vlan_id; 755 if (f_info->fltr_act == ICE_FWD_TO_VSI || 756 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 757 act |= ICE_SINGLE_ACT_PRUNE; 758 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 759 } 760 break; 761 case ICE_SW_LKUP_ETHERTYPE_MAC: 762 daddr = f_info->l_data.ethertype_mac.mac_addr; 763 /* fall-through */ 764 case ICE_SW_LKUP_ETHERTYPE: 765 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 766 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 767 break; 768 case ICE_SW_LKUP_MAC_VLAN: 769 daddr = f_info->l_data.mac_vlan.mac_addr; 770 vlan_id = f_info->l_data.mac_vlan.vlan_id; 771 break; 772 case ICE_SW_LKUP_PROMISC_VLAN: 773 vlan_id = f_info->l_data.mac_vlan.vlan_id; 774 /* fall-through */ 775 case ICE_SW_LKUP_PROMISC: 776 daddr = f_info->l_data.mac_vlan.mac_addr; 777 break; 778 default: 779 break; 780 } 781 782 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 783 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 784 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 785 786 /* Recipe set depending on lookup type */ 787 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 788 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 789 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 790 791 if (daddr) 792 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 793 794 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 795 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 796 *off = cpu_to_be16(vlan_id); 797 } 798 799 /* Create the switch rule with the final dummy Ethernet header */ 800 if (opc != ice_aqc_opc_update_sw_rules) 801 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 802 } 803 804 /** 805 * ice_add_marker_act 806 * @hw: pointer to the hardware structure 807 * @m_ent: the management entry for which sw marker needs to be added 808 * @sw_marker: sw marker to tag the Rx descriptor with 809 * @l_id: large action resource ID 810 * 811 * Create a large action to hold software marker and update the switch rule 812 * entry pointed by m_ent with newly created large action 813 */ 814 static enum ice_status 815 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 816 u16 sw_marker, u16 l_id) 817 { 818 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 819 /* For software marker we need 3 large actions 820 * 1. FWD action: FWD TO VSI or VSI LIST 821 * 2. GENERIC VALUE action to hold the profile ID 822 * 3. GENERIC VALUE action to hold the software marker ID 823 */ 824 const u16 num_lg_acts = 3; 825 enum ice_status status; 826 u16 lg_act_size; 827 u16 rules_size; 828 u32 act; 829 u16 id; 830 831 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 832 return ICE_ERR_PARAM; 833 834 /* Create two back-to-back switch rules and submit them to the HW using 835 * one memory buffer: 836 * 1. Large Action 837 * 2. Look up Tx Rx 838 */ 839 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 840 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 841 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 842 if (!lg_act) 843 return ICE_ERR_NO_MEMORY; 844 845 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 846 847 /* Fill in the first switch rule i.e. large action */ 848 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 849 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 850 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 851 852 /* First action VSI forwarding or VSI list forwarding depending on how 853 * many VSIs 854 */ 855 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 856 m_ent->fltr_info.fwd_id.hw_vsi_id; 857 858 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 859 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & 860 ICE_LG_ACT_VSI_LIST_ID_M; 861 if (m_ent->vsi_count > 1) 862 act |= ICE_LG_ACT_VSI_LIST; 863 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 864 865 /* Second action descriptor type */ 866 act = ICE_LG_ACT_GENERIC; 867 868 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 869 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 870 871 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 872 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 873 874 /* Third action Marker value */ 875 act |= ICE_LG_ACT_GENERIC; 876 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 877 ICE_LG_ACT_GENERIC_VALUE_M; 878 879 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 880 881 /* call the fill switch rule to fill the lookup Tx Rx structure */ 882 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 883 ice_aqc_opc_update_sw_rules); 884 885 /* Update the action to point to the large action ID */ 886 rx_tx->pdata.lkup_tx_rx.act = 887 cpu_to_le32(ICE_SINGLE_ACT_PTR | 888 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 889 ICE_SINGLE_ACT_PTR_VAL_M)); 890 891 /* Use the filter rule ID of the previously created rule with single 892 * act. Once the update happens, hardware will treat this as large 893 * action 894 */ 895 rx_tx->pdata.lkup_tx_rx.index = 896 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 897 898 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 899 ice_aqc_opc_update_sw_rules, NULL); 900 if (!status) { 901 m_ent->lg_act_idx = l_id; 902 m_ent->sw_marker_id = sw_marker; 903 } 904 905 devm_kfree(ice_hw_to_dev(hw), lg_act); 906 return status; 907 } 908 909 /** 910 * ice_create_vsi_list_map 911 * @hw: pointer to the hardware structure 912 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 913 * @num_vsi: number of VSI handles in the array 914 * @vsi_list_id: VSI list ID generated as part of allocate resource 915 * 916 * Helper function to create a new entry of VSI list ID to VSI mapping 917 * using the given VSI list ID 918 */ 919 static struct ice_vsi_list_map_info * 920 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 921 u16 vsi_list_id) 922 { 923 struct ice_switch_info *sw = hw->switch_info; 924 struct ice_vsi_list_map_info *v_map; 925 int i; 926 927 v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL); 928 if (!v_map) 929 return NULL; 930 931 v_map->vsi_list_id = vsi_list_id; 932 v_map->ref_cnt = 1; 933 for (i = 0; i < num_vsi; i++) 934 set_bit(vsi_handle_arr[i], v_map->vsi_map); 935 936 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 937 return v_map; 938 } 939 940 /** 941 * ice_update_vsi_list_rule 942 * @hw: pointer to the hardware structure 943 * @vsi_handle_arr: array of VSI handles to form a VSI list 944 * @num_vsi: number of VSI handles in the array 945 * @vsi_list_id: VSI list ID generated as part of allocate resource 946 * @remove: Boolean value to indicate if this is a remove action 947 * @opc: switch rules population command type - pass in the command opcode 948 * @lkup_type: lookup type of the filter 949 * 950 * Call AQ command to add a new switch rule or update existing switch rule 951 * using the given VSI list ID 952 */ 953 static enum ice_status 954 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 955 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 956 enum ice_sw_lkup_type lkup_type) 957 { 958 struct ice_aqc_sw_rules_elem *s_rule; 959 enum ice_status status; 960 u16 s_rule_size; 961 u16 type; 962 int i; 963 964 if (!num_vsi) 965 return ICE_ERR_PARAM; 966 967 if (lkup_type == ICE_SW_LKUP_MAC || 968 lkup_type == ICE_SW_LKUP_MAC_VLAN || 969 lkup_type == ICE_SW_LKUP_ETHERTYPE || 970 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 971 lkup_type == ICE_SW_LKUP_PROMISC || 972 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 973 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 974 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 975 else if (lkup_type == ICE_SW_LKUP_VLAN) 976 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 977 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 978 else 979 return ICE_ERR_PARAM; 980 981 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 982 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 983 if (!s_rule) 984 return ICE_ERR_NO_MEMORY; 985 for (i = 0; i < num_vsi; i++) { 986 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 987 status = ICE_ERR_PARAM; 988 goto exit; 989 } 990 /* AQ call requires hw_vsi_id(s) */ 991 s_rule->pdata.vsi_list.vsi[i] = 992 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 993 } 994 995 s_rule->type = cpu_to_le16(type); 996 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 997 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 998 999 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 1000 1001 exit: 1002 devm_kfree(ice_hw_to_dev(hw), s_rule); 1003 return status; 1004 } 1005 1006 /** 1007 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1008 * @hw: pointer to the HW struct 1009 * @vsi_handle_arr: array of VSI handles to form a VSI list 1010 * @num_vsi: number of VSI handles in the array 1011 * @vsi_list_id: stores the ID of the VSI list to be created 1012 * @lkup_type: switch rule filter's lookup type 1013 */ 1014 static enum ice_status 1015 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1016 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1017 { 1018 enum ice_status status; 1019 1020 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1021 ice_aqc_opc_alloc_res); 1022 if (status) 1023 return status; 1024 1025 /* Update the newly created VSI list to include the specified VSIs */ 1026 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1027 *vsi_list_id, false, 1028 ice_aqc_opc_add_sw_rules, lkup_type); 1029 } 1030 1031 /** 1032 * ice_create_pkt_fwd_rule 1033 * @hw: pointer to the hardware structure 1034 * @f_entry: entry containing packet forwarding information 1035 * 1036 * Create switch rule with given filter information and add an entry 1037 * to the corresponding filter management list to track this switch rule 1038 * and VSI mapping 1039 */ 1040 static enum ice_status 1041 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1042 struct ice_fltr_list_entry *f_entry) 1043 { 1044 struct ice_fltr_mgmt_list_entry *fm_entry; 1045 struct ice_aqc_sw_rules_elem *s_rule; 1046 enum ice_sw_lkup_type l_type; 1047 struct ice_sw_recipe *recp; 1048 enum ice_status status; 1049 1050 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1051 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1052 if (!s_rule) 1053 return ICE_ERR_NO_MEMORY; 1054 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1055 GFP_KERNEL); 1056 if (!fm_entry) { 1057 status = ICE_ERR_NO_MEMORY; 1058 goto ice_create_pkt_fwd_rule_exit; 1059 } 1060 1061 fm_entry->fltr_info = f_entry->fltr_info; 1062 1063 /* Initialize all the fields for the management entry */ 1064 fm_entry->vsi_count = 1; 1065 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1066 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1067 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1068 1069 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1070 ice_aqc_opc_add_sw_rules); 1071 1072 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1073 ice_aqc_opc_add_sw_rules, NULL); 1074 if (status) { 1075 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1076 goto ice_create_pkt_fwd_rule_exit; 1077 } 1078 1079 f_entry->fltr_info.fltr_rule_id = 1080 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1081 fm_entry->fltr_info.fltr_rule_id = 1082 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1083 1084 /* The book keeping entries will get removed when base driver 1085 * calls remove filter AQ command 1086 */ 1087 l_type = fm_entry->fltr_info.lkup_type; 1088 recp = &hw->switch_info->recp_list[l_type]; 1089 list_add(&fm_entry->list_entry, &recp->filt_rules); 1090 1091 ice_create_pkt_fwd_rule_exit: 1092 devm_kfree(ice_hw_to_dev(hw), s_rule); 1093 return status; 1094 } 1095 1096 /** 1097 * ice_update_pkt_fwd_rule 1098 * @hw: pointer to the hardware structure 1099 * @f_info: filter information for switch rule 1100 * 1101 * Call AQ command to update a previously created switch rule with a 1102 * VSI list ID 1103 */ 1104 static enum ice_status 1105 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 1106 { 1107 struct ice_aqc_sw_rules_elem *s_rule; 1108 enum ice_status status; 1109 1110 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1111 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1112 if (!s_rule) 1113 return ICE_ERR_NO_MEMORY; 1114 1115 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 1116 1117 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 1118 1119 /* Update switch rule with new rule set to forward VSI list */ 1120 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1121 ice_aqc_opc_update_sw_rules, NULL); 1122 1123 devm_kfree(ice_hw_to_dev(hw), s_rule); 1124 return status; 1125 } 1126 1127 /** 1128 * ice_update_sw_rule_bridge_mode 1129 * @hw: pointer to the HW struct 1130 * 1131 * Updates unicast switch filter rules based on VEB/VEPA mode 1132 */ 1133 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 1134 { 1135 struct ice_switch_info *sw = hw->switch_info; 1136 struct ice_fltr_mgmt_list_entry *fm_entry; 1137 enum ice_status status = 0; 1138 struct list_head *rule_head; 1139 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1140 1141 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1142 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1143 1144 mutex_lock(rule_lock); 1145 list_for_each_entry(fm_entry, rule_head, list_entry) { 1146 struct ice_fltr_info *fi = &fm_entry->fltr_info; 1147 u8 *addr = fi->l_data.mac.mac_addr; 1148 1149 /* Update unicast Tx rules to reflect the selected 1150 * VEB/VEPA mode 1151 */ 1152 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 1153 (fi->fltr_act == ICE_FWD_TO_VSI || 1154 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1155 fi->fltr_act == ICE_FWD_TO_Q || 1156 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1157 status = ice_update_pkt_fwd_rule(hw, fi); 1158 if (status) 1159 break; 1160 } 1161 } 1162 1163 mutex_unlock(rule_lock); 1164 1165 return status; 1166 } 1167 1168 /** 1169 * ice_add_update_vsi_list 1170 * @hw: pointer to the hardware structure 1171 * @m_entry: pointer to current filter management list entry 1172 * @cur_fltr: filter information from the book keeping entry 1173 * @new_fltr: filter information with the new VSI to be added 1174 * 1175 * Call AQ command to add or update previously created VSI list with new VSI. 1176 * 1177 * Helper function to do book keeping associated with adding filter information 1178 * The algorithm to do the book keeping is described below : 1179 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 1180 * if only one VSI has been added till now 1181 * Allocate a new VSI list and add two VSIs 1182 * to this list using switch rule command 1183 * Update the previously created switch rule with the 1184 * newly created VSI list ID 1185 * if a VSI list was previously created 1186 * Add the new VSI to the previously created VSI list set 1187 * using the update switch rule command 1188 */ 1189 static enum ice_status 1190 ice_add_update_vsi_list(struct ice_hw *hw, 1191 struct ice_fltr_mgmt_list_entry *m_entry, 1192 struct ice_fltr_info *cur_fltr, 1193 struct ice_fltr_info *new_fltr) 1194 { 1195 enum ice_status status = 0; 1196 u16 vsi_list_id = 0; 1197 1198 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 1199 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 1200 return ICE_ERR_NOT_IMPL; 1201 1202 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 1203 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 1204 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 1205 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 1206 return ICE_ERR_NOT_IMPL; 1207 1208 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 1209 /* Only one entry existed in the mapping and it was not already 1210 * a part of a VSI list. So, create a VSI list with the old and 1211 * new VSIs. 1212 */ 1213 struct ice_fltr_info tmp_fltr; 1214 u16 vsi_handle_arr[2]; 1215 1216 /* A rule already exists with the new VSI being added */ 1217 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 1218 return ICE_ERR_ALREADY_EXISTS; 1219 1220 vsi_handle_arr[0] = cur_fltr->vsi_handle; 1221 vsi_handle_arr[1] = new_fltr->vsi_handle; 1222 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1223 &vsi_list_id, 1224 new_fltr->lkup_type); 1225 if (status) 1226 return status; 1227 1228 tmp_fltr = *new_fltr; 1229 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 1230 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1231 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1232 /* Update the previous switch rule of "MAC forward to VSI" to 1233 * "MAC fwd to VSI list" 1234 */ 1235 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1236 if (status) 1237 return status; 1238 1239 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 1240 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1241 m_entry->vsi_list_info = 1242 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1243 vsi_list_id); 1244 1245 /* If this entry was large action then the large action needs 1246 * to be updated to point to FWD to VSI list 1247 */ 1248 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 1249 status = 1250 ice_add_marker_act(hw, m_entry, 1251 m_entry->sw_marker_id, 1252 m_entry->lg_act_idx); 1253 } else { 1254 u16 vsi_handle = new_fltr->vsi_handle; 1255 enum ice_adminq_opc opcode; 1256 1257 if (!m_entry->vsi_list_info) 1258 return ICE_ERR_CFG; 1259 1260 /* A rule already exists with the new VSI being added */ 1261 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 1262 return 0; 1263 1264 /* Update the previously created VSI list set with 1265 * the new VSI ID passed in 1266 */ 1267 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 1268 opcode = ice_aqc_opc_update_sw_rules; 1269 1270 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 1271 vsi_list_id, false, opcode, 1272 new_fltr->lkup_type); 1273 /* update VSI list mapping info with new VSI ID */ 1274 if (!status) 1275 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 1276 } 1277 if (!status) 1278 m_entry->vsi_count++; 1279 return status; 1280 } 1281 1282 /** 1283 * ice_find_rule_entry - Search a rule entry 1284 * @hw: pointer to the hardware structure 1285 * @recp_id: lookup type for which the specified rule needs to be searched 1286 * @f_info: rule information 1287 * 1288 * Helper function to search for a given rule entry 1289 * Returns pointer to entry storing the rule if found 1290 */ 1291 static struct ice_fltr_mgmt_list_entry * 1292 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 1293 { 1294 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 1295 struct ice_switch_info *sw = hw->switch_info; 1296 struct list_head *list_head; 1297 1298 list_head = &sw->recp_list[recp_id].filt_rules; 1299 list_for_each_entry(list_itr, list_head, list_entry) { 1300 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 1301 sizeof(f_info->l_data)) && 1302 f_info->flag == list_itr->fltr_info.flag) { 1303 ret = list_itr; 1304 break; 1305 } 1306 } 1307 return ret; 1308 } 1309 1310 /** 1311 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 1312 * @hw: pointer to the hardware structure 1313 * @recp_id: lookup type for which VSI lists needs to be searched 1314 * @vsi_handle: VSI handle to be found in VSI list 1315 * @vsi_list_id: VSI list ID found containing vsi_handle 1316 * 1317 * Helper function to search a VSI list with single entry containing given VSI 1318 * handle element. This can be extended further to search VSI list with more 1319 * than 1 vsi_count. Returns pointer to VSI list entry if found. 1320 */ 1321 static struct ice_vsi_list_map_info * 1322 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 1323 u16 *vsi_list_id) 1324 { 1325 struct ice_vsi_list_map_info *map_info = NULL; 1326 struct ice_switch_info *sw = hw->switch_info; 1327 struct ice_fltr_mgmt_list_entry *list_itr; 1328 struct list_head *list_head; 1329 1330 list_head = &sw->recp_list[recp_id].filt_rules; 1331 list_for_each_entry(list_itr, list_head, list_entry) { 1332 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 1333 map_info = list_itr->vsi_list_info; 1334 if (test_bit(vsi_handle, map_info->vsi_map)) { 1335 *vsi_list_id = map_info->vsi_list_id; 1336 return map_info; 1337 } 1338 } 1339 } 1340 return NULL; 1341 } 1342 1343 /** 1344 * ice_add_rule_internal - add rule for a given lookup type 1345 * @hw: pointer to the hardware structure 1346 * @recp_id: lookup type (recipe ID) for which rule has to be added 1347 * @f_entry: structure containing MAC forwarding information 1348 * 1349 * Adds or updates the rule lists for a given recipe 1350 */ 1351 static enum ice_status 1352 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 1353 struct ice_fltr_list_entry *f_entry) 1354 { 1355 struct ice_switch_info *sw = hw->switch_info; 1356 struct ice_fltr_info *new_fltr, *cur_fltr; 1357 struct ice_fltr_mgmt_list_entry *m_entry; 1358 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1359 enum ice_status status = 0; 1360 1361 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1362 return ICE_ERR_PARAM; 1363 f_entry->fltr_info.fwd_id.hw_vsi_id = 1364 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1365 1366 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 1367 1368 mutex_lock(rule_lock); 1369 new_fltr = &f_entry->fltr_info; 1370 if (new_fltr->flag & ICE_FLTR_RX) 1371 new_fltr->src = hw->port_info->lport; 1372 else if (new_fltr->flag & ICE_FLTR_TX) 1373 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 1374 1375 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 1376 if (!m_entry) { 1377 mutex_unlock(rule_lock); 1378 return ice_create_pkt_fwd_rule(hw, f_entry); 1379 } 1380 1381 cur_fltr = &m_entry->fltr_info; 1382 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 1383 mutex_unlock(rule_lock); 1384 1385 return status; 1386 } 1387 1388 /** 1389 * ice_remove_vsi_list_rule 1390 * @hw: pointer to the hardware structure 1391 * @vsi_list_id: VSI list ID generated as part of allocate resource 1392 * @lkup_type: switch rule filter lookup type 1393 * 1394 * The VSI list should be emptied before this function is called to remove the 1395 * VSI list. 1396 */ 1397 static enum ice_status 1398 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 1399 enum ice_sw_lkup_type lkup_type) 1400 { 1401 struct ice_aqc_sw_rules_elem *s_rule; 1402 enum ice_status status; 1403 u16 s_rule_size; 1404 1405 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 1406 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1407 if (!s_rule) 1408 return ICE_ERR_NO_MEMORY; 1409 1410 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 1411 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1412 1413 /* Free the vsi_list resource that we allocated. It is assumed that the 1414 * list is empty at this point. 1415 */ 1416 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 1417 ice_aqc_opc_free_res); 1418 1419 devm_kfree(ice_hw_to_dev(hw), s_rule); 1420 return status; 1421 } 1422 1423 /** 1424 * ice_rem_update_vsi_list 1425 * @hw: pointer to the hardware structure 1426 * @vsi_handle: VSI handle of the VSI to remove 1427 * @fm_list: filter management entry for which the VSI list management needs to 1428 * be done 1429 */ 1430 static enum ice_status 1431 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 1432 struct ice_fltr_mgmt_list_entry *fm_list) 1433 { 1434 enum ice_sw_lkup_type lkup_type; 1435 enum ice_status status = 0; 1436 u16 vsi_list_id; 1437 1438 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 1439 fm_list->vsi_count == 0) 1440 return ICE_ERR_PARAM; 1441 1442 /* A rule with the VSI being removed does not exist */ 1443 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 1444 return ICE_ERR_DOES_NOT_EXIST; 1445 1446 lkup_type = fm_list->fltr_info.lkup_type; 1447 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 1448 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 1449 ice_aqc_opc_update_sw_rules, 1450 lkup_type); 1451 if (status) 1452 return status; 1453 1454 fm_list->vsi_count--; 1455 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 1456 1457 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 1458 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 1459 struct ice_vsi_list_map_info *vsi_list_info = 1460 fm_list->vsi_list_info; 1461 u16 rem_vsi_handle; 1462 1463 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 1464 ICE_MAX_VSI); 1465 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 1466 return ICE_ERR_OUT_OF_RANGE; 1467 1468 /* Make sure VSI list is empty before removing it below */ 1469 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 1470 vsi_list_id, true, 1471 ice_aqc_opc_update_sw_rules, 1472 lkup_type); 1473 if (status) 1474 return status; 1475 1476 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 1477 tmp_fltr_info.fwd_id.hw_vsi_id = 1478 ice_get_hw_vsi_num(hw, rem_vsi_handle); 1479 tmp_fltr_info.vsi_handle = rem_vsi_handle; 1480 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 1481 if (status) { 1482 ice_debug(hw, ICE_DBG_SW, 1483 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 1484 tmp_fltr_info.fwd_id.hw_vsi_id, status); 1485 return status; 1486 } 1487 1488 fm_list->fltr_info = tmp_fltr_info; 1489 } 1490 1491 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 1492 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 1493 struct ice_vsi_list_map_info *vsi_list_info = 1494 fm_list->vsi_list_info; 1495 1496 /* Remove the VSI list since it is no longer used */ 1497 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 1498 if (status) { 1499 ice_debug(hw, ICE_DBG_SW, 1500 "Failed to remove VSI list %d, error %d\n", 1501 vsi_list_id, status); 1502 return status; 1503 } 1504 1505 list_del(&vsi_list_info->list_entry); 1506 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 1507 fm_list->vsi_list_info = NULL; 1508 } 1509 1510 return status; 1511 } 1512 1513 /** 1514 * ice_remove_rule_internal - Remove a filter rule of a given type 1515 * @hw: pointer to the hardware structure 1516 * @recp_id: recipe ID for which the rule needs to removed 1517 * @f_entry: rule entry containing filter information 1518 */ 1519 static enum ice_status 1520 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 1521 struct ice_fltr_list_entry *f_entry) 1522 { 1523 struct ice_switch_info *sw = hw->switch_info; 1524 struct ice_fltr_mgmt_list_entry *list_elem; 1525 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1526 enum ice_status status = 0; 1527 bool remove_rule = false; 1528 u16 vsi_handle; 1529 1530 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1531 return ICE_ERR_PARAM; 1532 f_entry->fltr_info.fwd_id.hw_vsi_id = 1533 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1534 1535 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 1536 mutex_lock(rule_lock); 1537 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 1538 if (!list_elem) { 1539 status = ICE_ERR_DOES_NOT_EXIST; 1540 goto exit; 1541 } 1542 1543 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 1544 remove_rule = true; 1545 } else if (!list_elem->vsi_list_info) { 1546 status = ICE_ERR_DOES_NOT_EXIST; 1547 goto exit; 1548 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 1549 /* a ref_cnt > 1 indicates that the vsi_list is being 1550 * shared by multiple rules. Decrement the ref_cnt and 1551 * remove this rule, but do not modify the list, as it 1552 * is in-use by other rules. 1553 */ 1554 list_elem->vsi_list_info->ref_cnt--; 1555 remove_rule = true; 1556 } else { 1557 /* a ref_cnt of 1 indicates the vsi_list is only used 1558 * by one rule. However, the original removal request is only 1559 * for a single VSI. Update the vsi_list first, and only 1560 * remove the rule if there are no further VSIs in this list. 1561 */ 1562 vsi_handle = f_entry->fltr_info.vsi_handle; 1563 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 1564 if (status) 1565 goto exit; 1566 /* if VSI count goes to zero after updating the VSI list */ 1567 if (list_elem->vsi_count == 0) 1568 remove_rule = true; 1569 } 1570 1571 if (remove_rule) { 1572 /* Remove the lookup rule */ 1573 struct ice_aqc_sw_rules_elem *s_rule; 1574 1575 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1576 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1577 GFP_KERNEL); 1578 if (!s_rule) { 1579 status = ICE_ERR_NO_MEMORY; 1580 goto exit; 1581 } 1582 1583 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 1584 ice_aqc_opc_remove_sw_rules); 1585 1586 status = ice_aq_sw_rules(hw, s_rule, 1587 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 1588 ice_aqc_opc_remove_sw_rules, NULL); 1589 1590 /* Remove a book keeping from the list */ 1591 devm_kfree(ice_hw_to_dev(hw), s_rule); 1592 1593 if (status) 1594 goto exit; 1595 1596 list_del(&list_elem->list_entry); 1597 devm_kfree(ice_hw_to_dev(hw), list_elem); 1598 } 1599 exit: 1600 mutex_unlock(rule_lock); 1601 return status; 1602 } 1603 1604 /** 1605 * ice_add_mac - Add a MAC address based filter rule 1606 * @hw: pointer to the hardware structure 1607 * @m_list: list of MAC addresses and forwarding information 1608 * 1609 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 1610 * multiple unicast addresses, the function assumes that all the 1611 * addresses are unique in a given add_mac call. It doesn't 1612 * check for duplicates in this case, removing duplicates from a given 1613 * list should be taken care of in the caller of this function. 1614 */ 1615 enum ice_status 1616 ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 1617 { 1618 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 1619 struct ice_fltr_list_entry *m_list_itr; 1620 struct list_head *rule_head; 1621 u16 elem_sent, total_elem_left; 1622 struct ice_switch_info *sw; 1623 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1624 enum ice_status status = 0; 1625 u16 num_unicast = 0; 1626 u16 s_rule_size; 1627 1628 if (!m_list || !hw) 1629 return ICE_ERR_PARAM; 1630 1631 s_rule = NULL; 1632 sw = hw->switch_info; 1633 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1634 list_for_each_entry(m_list_itr, m_list, list_entry) { 1635 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 1636 u16 vsi_handle; 1637 u16 hw_vsi_id; 1638 1639 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 1640 vsi_handle = m_list_itr->fltr_info.vsi_handle; 1641 if (!ice_is_vsi_valid(hw, vsi_handle)) 1642 return ICE_ERR_PARAM; 1643 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 1644 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 1645 /* update the src in case it is VSI num */ 1646 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 1647 return ICE_ERR_PARAM; 1648 m_list_itr->fltr_info.src = hw_vsi_id; 1649 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 1650 is_zero_ether_addr(add)) 1651 return ICE_ERR_PARAM; 1652 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 1653 /* Don't overwrite the unicast address */ 1654 mutex_lock(rule_lock); 1655 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 1656 &m_list_itr->fltr_info)) { 1657 mutex_unlock(rule_lock); 1658 return ICE_ERR_ALREADY_EXISTS; 1659 } 1660 mutex_unlock(rule_lock); 1661 num_unicast++; 1662 } else if (is_multicast_ether_addr(add) || 1663 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 1664 m_list_itr->status = 1665 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 1666 m_list_itr); 1667 if (m_list_itr->status) 1668 return m_list_itr->status; 1669 } 1670 } 1671 1672 mutex_lock(rule_lock); 1673 /* Exit if no suitable entries were found for adding bulk switch rule */ 1674 if (!num_unicast) { 1675 status = 0; 1676 goto ice_add_mac_exit; 1677 } 1678 1679 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1680 1681 /* Allocate switch rule buffer for the bulk update for unicast */ 1682 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1683 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 1684 GFP_KERNEL); 1685 if (!s_rule) { 1686 status = ICE_ERR_NO_MEMORY; 1687 goto ice_add_mac_exit; 1688 } 1689 1690 r_iter = s_rule; 1691 list_for_each_entry(m_list_itr, m_list, list_entry) { 1692 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 1693 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 1694 1695 if (is_unicast_ether_addr(mac_addr)) { 1696 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 1697 ice_aqc_opc_add_sw_rules); 1698 r_iter = (struct ice_aqc_sw_rules_elem *) 1699 ((u8 *)r_iter + s_rule_size); 1700 } 1701 } 1702 1703 /* Call AQ bulk switch rule update for all unicast addresses */ 1704 r_iter = s_rule; 1705 /* Call AQ switch rule in AQ_MAX chunk */ 1706 for (total_elem_left = num_unicast; total_elem_left > 0; 1707 total_elem_left -= elem_sent) { 1708 struct ice_aqc_sw_rules_elem *entry = r_iter; 1709 1710 elem_sent = min(total_elem_left, 1711 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); 1712 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 1713 elem_sent, ice_aqc_opc_add_sw_rules, 1714 NULL); 1715 if (status) 1716 goto ice_add_mac_exit; 1717 r_iter = (struct ice_aqc_sw_rules_elem *) 1718 ((u8 *)r_iter + (elem_sent * s_rule_size)); 1719 } 1720 1721 /* Fill up rule ID based on the value returned from FW */ 1722 r_iter = s_rule; 1723 list_for_each_entry(m_list_itr, m_list, list_entry) { 1724 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 1725 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 1726 struct ice_fltr_mgmt_list_entry *fm_entry; 1727 1728 if (is_unicast_ether_addr(mac_addr)) { 1729 f_info->fltr_rule_id = 1730 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 1731 f_info->fltr_act = ICE_FWD_TO_VSI; 1732 /* Create an entry to track this MAC address */ 1733 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 1734 sizeof(*fm_entry), GFP_KERNEL); 1735 if (!fm_entry) { 1736 status = ICE_ERR_NO_MEMORY; 1737 goto ice_add_mac_exit; 1738 } 1739 fm_entry->fltr_info = *f_info; 1740 fm_entry->vsi_count = 1; 1741 /* The book keeping entries will get removed when 1742 * base driver calls remove filter AQ command 1743 */ 1744 1745 list_add(&fm_entry->list_entry, rule_head); 1746 r_iter = (struct ice_aqc_sw_rules_elem *) 1747 ((u8 *)r_iter + s_rule_size); 1748 } 1749 } 1750 1751 ice_add_mac_exit: 1752 mutex_unlock(rule_lock); 1753 if (s_rule) 1754 devm_kfree(ice_hw_to_dev(hw), s_rule); 1755 return status; 1756 } 1757 1758 /** 1759 * ice_add_vlan_internal - Add one VLAN based filter rule 1760 * @hw: pointer to the hardware structure 1761 * @f_entry: filter entry containing one VLAN information 1762 */ 1763 static enum ice_status 1764 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 1765 { 1766 struct ice_switch_info *sw = hw->switch_info; 1767 struct ice_fltr_mgmt_list_entry *v_list_itr; 1768 struct ice_fltr_info *new_fltr, *cur_fltr; 1769 enum ice_sw_lkup_type lkup_type; 1770 u16 vsi_list_id = 0, vsi_handle; 1771 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1772 enum ice_status status = 0; 1773 1774 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1775 return ICE_ERR_PARAM; 1776 1777 f_entry->fltr_info.fwd_id.hw_vsi_id = 1778 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1779 new_fltr = &f_entry->fltr_info; 1780 1781 /* VLAN ID should only be 12 bits */ 1782 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 1783 return ICE_ERR_PARAM; 1784 1785 if (new_fltr->src_id != ICE_SRC_ID_VSI) 1786 return ICE_ERR_PARAM; 1787 1788 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 1789 lkup_type = new_fltr->lkup_type; 1790 vsi_handle = new_fltr->vsi_handle; 1791 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 1792 mutex_lock(rule_lock); 1793 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 1794 if (!v_list_itr) { 1795 struct ice_vsi_list_map_info *map_info = NULL; 1796 1797 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 1798 /* All VLAN pruning rules use a VSI list. Check if 1799 * there is already a VSI list containing VSI that we 1800 * want to add. If found, use the same vsi_list_id for 1801 * this new VLAN rule or else create a new list. 1802 */ 1803 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 1804 vsi_handle, 1805 &vsi_list_id); 1806 if (!map_info) { 1807 status = ice_create_vsi_list_rule(hw, 1808 &vsi_handle, 1809 1, 1810 &vsi_list_id, 1811 lkup_type); 1812 if (status) 1813 goto exit; 1814 } 1815 /* Convert the action to forwarding to a VSI list. */ 1816 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1817 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 1818 } 1819 1820 status = ice_create_pkt_fwd_rule(hw, f_entry); 1821 if (!status) { 1822 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 1823 new_fltr); 1824 if (!v_list_itr) { 1825 status = ICE_ERR_DOES_NOT_EXIST; 1826 goto exit; 1827 } 1828 /* reuse VSI list for new rule and increment ref_cnt */ 1829 if (map_info) { 1830 v_list_itr->vsi_list_info = map_info; 1831 map_info->ref_cnt++; 1832 } else { 1833 v_list_itr->vsi_list_info = 1834 ice_create_vsi_list_map(hw, &vsi_handle, 1835 1, vsi_list_id); 1836 } 1837 } 1838 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 1839 /* Update existing VSI list to add new VSI ID only if it used 1840 * by one VLAN rule. 1841 */ 1842 cur_fltr = &v_list_itr->fltr_info; 1843 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 1844 new_fltr); 1845 } else { 1846 /* If VLAN rule exists and VSI list being used by this rule is 1847 * referenced by more than 1 VLAN rule. Then create a new VSI 1848 * list appending previous VSI with new VSI and update existing 1849 * VLAN rule to point to new VSI list ID 1850 */ 1851 struct ice_fltr_info tmp_fltr; 1852 u16 vsi_handle_arr[2]; 1853 u16 cur_handle; 1854 1855 /* Current implementation only supports reusing VSI list with 1856 * one VSI count. We should never hit below condition 1857 */ 1858 if (v_list_itr->vsi_count > 1 && 1859 v_list_itr->vsi_list_info->ref_cnt > 1) { 1860 ice_debug(hw, ICE_DBG_SW, 1861 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 1862 status = ICE_ERR_CFG; 1863 goto exit; 1864 } 1865 1866 cur_handle = 1867 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 1868 ICE_MAX_VSI); 1869 1870 /* A rule already exists with the new VSI being added */ 1871 if (cur_handle == vsi_handle) { 1872 status = ICE_ERR_ALREADY_EXISTS; 1873 goto exit; 1874 } 1875 1876 vsi_handle_arr[0] = cur_handle; 1877 vsi_handle_arr[1] = vsi_handle; 1878 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1879 &vsi_list_id, lkup_type); 1880 if (status) 1881 goto exit; 1882 1883 tmp_fltr = v_list_itr->fltr_info; 1884 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 1885 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1886 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1887 /* Update the previous switch rule to a new VSI list which 1888 * includes current VSI that is requested 1889 */ 1890 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1891 if (status) 1892 goto exit; 1893 1894 /* before overriding VSI list map info. decrement ref_cnt of 1895 * previous VSI list 1896 */ 1897 v_list_itr->vsi_list_info->ref_cnt--; 1898 1899 /* now update to newly created list */ 1900 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 1901 v_list_itr->vsi_list_info = 1902 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1903 vsi_list_id); 1904 v_list_itr->vsi_count++; 1905 } 1906 1907 exit: 1908 mutex_unlock(rule_lock); 1909 return status; 1910 } 1911 1912 /** 1913 * ice_add_vlan - Add VLAN based filter rule 1914 * @hw: pointer to the hardware structure 1915 * @v_list: list of VLAN entries and forwarding information 1916 */ 1917 enum ice_status 1918 ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 1919 { 1920 struct ice_fltr_list_entry *v_list_itr; 1921 1922 if (!v_list || !hw) 1923 return ICE_ERR_PARAM; 1924 1925 list_for_each_entry(v_list_itr, v_list, list_entry) { 1926 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 1927 return ICE_ERR_PARAM; 1928 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1929 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 1930 if (v_list_itr->status) 1931 return v_list_itr->status; 1932 } 1933 return 0; 1934 } 1935 1936 /** 1937 * ice_add_eth_mac - Add ethertype and MAC based filter rule 1938 * @hw: pointer to the hardware structure 1939 * @em_list: list of ether type MAC filter, MAC is optional 1940 * 1941 * This function requires the caller to populate the entries in 1942 * the filter list with the necessary fields (including flags to 1943 * indicate Tx or Rx rules). 1944 */ 1945 enum ice_status 1946 ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 1947 { 1948 struct ice_fltr_list_entry *em_list_itr; 1949 1950 if (!em_list || !hw) 1951 return ICE_ERR_PARAM; 1952 1953 list_for_each_entry(em_list_itr, em_list, list_entry) { 1954 enum ice_sw_lkup_type l_type = 1955 em_list_itr->fltr_info.lkup_type; 1956 1957 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 1958 l_type != ICE_SW_LKUP_ETHERTYPE) 1959 return ICE_ERR_PARAM; 1960 1961 em_list_itr->status = ice_add_rule_internal(hw, l_type, 1962 em_list_itr); 1963 if (em_list_itr->status) 1964 return em_list_itr->status; 1965 } 1966 return 0; 1967 } 1968 1969 /** 1970 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 1971 * @hw: pointer to the hardware structure 1972 * @em_list: list of ethertype or ethertype MAC entries 1973 */ 1974 enum ice_status 1975 ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 1976 { 1977 struct ice_fltr_list_entry *em_list_itr, *tmp; 1978 1979 if (!em_list || !hw) 1980 return ICE_ERR_PARAM; 1981 1982 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 1983 enum ice_sw_lkup_type l_type = 1984 em_list_itr->fltr_info.lkup_type; 1985 1986 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 1987 l_type != ICE_SW_LKUP_ETHERTYPE) 1988 return ICE_ERR_PARAM; 1989 1990 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 1991 em_list_itr); 1992 if (em_list_itr->status) 1993 return em_list_itr->status; 1994 } 1995 return 0; 1996 } 1997 1998 /** 1999 * ice_rem_sw_rule_info 2000 * @hw: pointer to the hardware structure 2001 * @rule_head: pointer to the switch list structure that we want to delete 2002 */ 2003 static void 2004 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2005 { 2006 if (!list_empty(rule_head)) { 2007 struct ice_fltr_mgmt_list_entry *entry; 2008 struct ice_fltr_mgmt_list_entry *tmp; 2009 2010 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 2011 list_del(&entry->list_entry); 2012 devm_kfree(ice_hw_to_dev(hw), entry); 2013 } 2014 } 2015 } 2016 2017 /** 2018 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 2019 * @hw: pointer to the hardware structure 2020 * @vsi_handle: VSI handle to set as default 2021 * @set: true to add the above mentioned switch rule, false to remove it 2022 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 2023 * 2024 * add filter rule to set/unset given VSI as default VSI for the switch 2025 * (represented by swid) 2026 */ 2027 enum ice_status 2028 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 2029 { 2030 struct ice_aqc_sw_rules_elem *s_rule; 2031 struct ice_fltr_info f_info; 2032 enum ice_adminq_opc opcode; 2033 enum ice_status status; 2034 u16 s_rule_size; 2035 u16 hw_vsi_id; 2036 2037 if (!ice_is_vsi_valid(hw, vsi_handle)) 2038 return ICE_ERR_PARAM; 2039 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2040 2041 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 2042 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 2043 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2044 if (!s_rule) 2045 return ICE_ERR_NO_MEMORY; 2046 2047 memset(&f_info, 0, sizeof(f_info)); 2048 2049 f_info.lkup_type = ICE_SW_LKUP_DFLT; 2050 f_info.flag = direction; 2051 f_info.fltr_act = ICE_FWD_TO_VSI; 2052 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 2053 2054 if (f_info.flag & ICE_FLTR_RX) { 2055 f_info.src = hw->port_info->lport; 2056 f_info.src_id = ICE_SRC_ID_LPORT; 2057 if (!set) 2058 f_info.fltr_rule_id = 2059 hw->port_info->dflt_rx_vsi_rule_id; 2060 } else if (f_info.flag & ICE_FLTR_TX) { 2061 f_info.src_id = ICE_SRC_ID_VSI; 2062 f_info.src = hw_vsi_id; 2063 if (!set) 2064 f_info.fltr_rule_id = 2065 hw->port_info->dflt_tx_vsi_rule_id; 2066 } 2067 2068 if (set) 2069 opcode = ice_aqc_opc_add_sw_rules; 2070 else 2071 opcode = ice_aqc_opc_remove_sw_rules; 2072 2073 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 2074 2075 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 2076 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 2077 goto out; 2078 if (set) { 2079 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2080 2081 if (f_info.flag & ICE_FLTR_TX) { 2082 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 2083 hw->port_info->dflt_tx_vsi_rule_id = index; 2084 } else if (f_info.flag & ICE_FLTR_RX) { 2085 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 2086 hw->port_info->dflt_rx_vsi_rule_id = index; 2087 } 2088 } else { 2089 if (f_info.flag & ICE_FLTR_TX) { 2090 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 2091 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 2092 } else if (f_info.flag & ICE_FLTR_RX) { 2093 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 2094 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 2095 } 2096 } 2097 2098 out: 2099 devm_kfree(ice_hw_to_dev(hw), s_rule); 2100 return status; 2101 } 2102 2103 /** 2104 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 2105 * @hw: pointer to the hardware structure 2106 * @recp_id: lookup type for which the specified rule needs to be searched 2107 * @f_info: rule information 2108 * 2109 * Helper function to search for a unicast rule entry - this is to be used 2110 * to remove unicast MAC filter that is not shared with other VSIs on the 2111 * PF switch. 2112 * 2113 * Returns pointer to entry storing the rule if found 2114 */ 2115 static struct ice_fltr_mgmt_list_entry * 2116 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 2117 struct ice_fltr_info *f_info) 2118 { 2119 struct ice_switch_info *sw = hw->switch_info; 2120 struct ice_fltr_mgmt_list_entry *list_itr; 2121 struct list_head *list_head; 2122 2123 list_head = &sw->recp_list[recp_id].filt_rules; 2124 list_for_each_entry(list_itr, list_head, list_entry) { 2125 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2126 sizeof(f_info->l_data)) && 2127 f_info->fwd_id.hw_vsi_id == 2128 list_itr->fltr_info.fwd_id.hw_vsi_id && 2129 f_info->flag == list_itr->fltr_info.flag) 2130 return list_itr; 2131 } 2132 return NULL; 2133 } 2134 2135 /** 2136 * ice_remove_mac - remove a MAC address based filter rule 2137 * @hw: pointer to the hardware structure 2138 * @m_list: list of MAC addresses and forwarding information 2139 * 2140 * This function removes either a MAC filter rule or a specific VSI from a 2141 * VSI list for a multicast MAC address. 2142 * 2143 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by 2144 * ice_add_mac. Caller should be aware that this call will only work if all 2145 * the entries passed into m_list were added previously. It will not attempt to 2146 * do a partial remove of entries that were found. 2147 */ 2148 enum ice_status 2149 ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 2150 { 2151 struct ice_fltr_list_entry *list_itr, *tmp; 2152 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2153 2154 if (!m_list) 2155 return ICE_ERR_PARAM; 2156 2157 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2158 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 2159 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 2160 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 2161 u16 vsi_handle; 2162 2163 if (l_type != ICE_SW_LKUP_MAC) 2164 return ICE_ERR_PARAM; 2165 2166 vsi_handle = list_itr->fltr_info.vsi_handle; 2167 if (!ice_is_vsi_valid(hw, vsi_handle)) 2168 return ICE_ERR_PARAM; 2169 2170 list_itr->fltr_info.fwd_id.hw_vsi_id = 2171 ice_get_hw_vsi_num(hw, vsi_handle); 2172 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2173 /* Don't remove the unicast address that belongs to 2174 * another VSI on the switch, since it is not being 2175 * shared... 2176 */ 2177 mutex_lock(rule_lock); 2178 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 2179 &list_itr->fltr_info)) { 2180 mutex_unlock(rule_lock); 2181 return ICE_ERR_DOES_NOT_EXIST; 2182 } 2183 mutex_unlock(rule_lock); 2184 } 2185 list_itr->status = ice_remove_rule_internal(hw, 2186 ICE_SW_LKUP_MAC, 2187 list_itr); 2188 if (list_itr->status) 2189 return list_itr->status; 2190 } 2191 return 0; 2192 } 2193 2194 /** 2195 * ice_remove_vlan - Remove VLAN based filter rule 2196 * @hw: pointer to the hardware structure 2197 * @v_list: list of VLAN entries and forwarding information 2198 */ 2199 enum ice_status 2200 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 2201 { 2202 struct ice_fltr_list_entry *v_list_itr, *tmp; 2203 2204 if (!v_list || !hw) 2205 return ICE_ERR_PARAM; 2206 2207 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2208 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 2209 2210 if (l_type != ICE_SW_LKUP_VLAN) 2211 return ICE_ERR_PARAM; 2212 v_list_itr->status = ice_remove_rule_internal(hw, 2213 ICE_SW_LKUP_VLAN, 2214 v_list_itr); 2215 if (v_list_itr->status) 2216 return v_list_itr->status; 2217 } 2218 return 0; 2219 } 2220 2221 /** 2222 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 2223 * @fm_entry: filter entry to inspect 2224 * @vsi_handle: VSI handle to compare with filter info 2225 */ 2226 static bool 2227 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 2228 { 2229 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 2230 fm_entry->fltr_info.vsi_handle == vsi_handle) || 2231 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 2232 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 2233 } 2234 2235 /** 2236 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 2237 * @hw: pointer to the hardware structure 2238 * @vsi_handle: VSI handle to remove filters from 2239 * @vsi_list_head: pointer to the list to add entry to 2240 * @fi: pointer to fltr_info of filter entry to copy & add 2241 * 2242 * Helper function, used when creating a list of filters to remove from 2243 * a specific VSI. The entry added to vsi_list_head is a COPY of the 2244 * original filter entry, with the exception of fltr_info.fltr_act and 2245 * fltr_info.fwd_id fields. These are set such that later logic can 2246 * extract which VSI to remove the fltr from, and pass on that information. 2247 */ 2248 static enum ice_status 2249 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2250 struct list_head *vsi_list_head, 2251 struct ice_fltr_info *fi) 2252 { 2253 struct ice_fltr_list_entry *tmp; 2254 2255 /* this memory is freed up in the caller function 2256 * once filters for this VSI are removed 2257 */ 2258 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 2259 if (!tmp) 2260 return ICE_ERR_NO_MEMORY; 2261 2262 tmp->fltr_info = *fi; 2263 2264 /* Overwrite these fields to indicate which VSI to remove filter from, 2265 * so find and remove logic can extract the information from the 2266 * list entries. Note that original entries will still have proper 2267 * values. 2268 */ 2269 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2270 tmp->fltr_info.vsi_handle = vsi_handle; 2271 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2272 2273 list_add(&tmp->list_entry, vsi_list_head); 2274 2275 return 0; 2276 } 2277 2278 /** 2279 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 2280 * @hw: pointer to the hardware structure 2281 * @vsi_handle: VSI handle to remove filters from 2282 * @lkup_list_head: pointer to the list that has certain lookup type filters 2283 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 2284 * 2285 * Locates all filters in lkup_list_head that are used by the given VSI, 2286 * and adds COPIES of those entries to vsi_list_head (intended to be used 2287 * to remove the listed filters). 2288 * Note that this means all entries in vsi_list_head must be explicitly 2289 * deallocated by the caller when done with list. 2290 */ 2291 static enum ice_status 2292 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2293 struct list_head *lkup_list_head, 2294 struct list_head *vsi_list_head) 2295 { 2296 struct ice_fltr_mgmt_list_entry *fm_entry; 2297 enum ice_status status = 0; 2298 2299 /* check to make sure VSI ID is valid and within boundary */ 2300 if (!ice_is_vsi_valid(hw, vsi_handle)) 2301 return ICE_ERR_PARAM; 2302 2303 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 2304 struct ice_fltr_info *fi; 2305 2306 fi = &fm_entry->fltr_info; 2307 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) 2308 continue; 2309 2310 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2311 vsi_list_head, fi); 2312 if (status) 2313 return status; 2314 } 2315 return status; 2316 } 2317 2318 /** 2319 * ice_determine_promisc_mask 2320 * @fi: filter info to parse 2321 * 2322 * Helper function to determine which ICE_PROMISC_ mask corresponds 2323 * to given filter into. 2324 */ 2325 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 2326 { 2327 u16 vid = fi->l_data.mac_vlan.vlan_id; 2328 u8 *macaddr = fi->l_data.mac.mac_addr; 2329 bool is_tx_fltr = false; 2330 u8 promisc_mask = 0; 2331 2332 if (fi->flag == ICE_FLTR_TX) 2333 is_tx_fltr = true; 2334 2335 if (is_broadcast_ether_addr(macaddr)) 2336 promisc_mask |= is_tx_fltr ? 2337 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 2338 else if (is_multicast_ether_addr(macaddr)) 2339 promisc_mask |= is_tx_fltr ? 2340 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 2341 else if (is_unicast_ether_addr(macaddr)) 2342 promisc_mask |= is_tx_fltr ? 2343 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 2344 if (vid) 2345 promisc_mask |= is_tx_fltr ? 2346 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 2347 2348 return promisc_mask; 2349 } 2350 2351 /** 2352 * ice_remove_promisc - Remove promisc based filter rules 2353 * @hw: pointer to the hardware structure 2354 * @recp_id: recipe ID for which the rule needs to removed 2355 * @v_list: list of promisc entries 2356 */ 2357 static enum ice_status 2358 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, 2359 struct list_head *v_list) 2360 { 2361 struct ice_fltr_list_entry *v_list_itr, *tmp; 2362 2363 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2364 v_list_itr->status = 2365 ice_remove_rule_internal(hw, recp_id, v_list_itr); 2366 if (v_list_itr->status) 2367 return v_list_itr->status; 2368 } 2369 return 0; 2370 } 2371 2372 /** 2373 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 2374 * @hw: pointer to the hardware structure 2375 * @vsi_handle: VSI handle to clear mode 2376 * @promisc_mask: mask of promiscuous config bits to clear 2377 * @vid: VLAN ID to clear VLAN promiscuous 2378 */ 2379 enum ice_status 2380 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 2381 u16 vid) 2382 { 2383 struct ice_switch_info *sw = hw->switch_info; 2384 struct ice_fltr_list_entry *fm_entry, *tmp; 2385 struct list_head remove_list_head; 2386 struct ice_fltr_mgmt_list_entry *itr; 2387 struct list_head *rule_head; 2388 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2389 enum ice_status status = 0; 2390 u8 recipe_id; 2391 2392 if (!ice_is_vsi_valid(hw, vsi_handle)) 2393 return ICE_ERR_PARAM; 2394 2395 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 2396 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 2397 else 2398 recipe_id = ICE_SW_LKUP_PROMISC; 2399 2400 rule_head = &sw->recp_list[recipe_id].filt_rules; 2401 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 2402 2403 INIT_LIST_HEAD(&remove_list_head); 2404 2405 mutex_lock(rule_lock); 2406 list_for_each_entry(itr, rule_head, list_entry) { 2407 struct ice_fltr_info *fltr_info; 2408 u8 fltr_promisc_mask = 0; 2409 2410 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 2411 continue; 2412 fltr_info = &itr->fltr_info; 2413 2414 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 2415 vid != fltr_info->l_data.mac_vlan.vlan_id) 2416 continue; 2417 2418 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 2419 2420 /* Skip if filter is not completely specified by given mask */ 2421 if (fltr_promisc_mask & ~promisc_mask) 2422 continue; 2423 2424 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2425 &remove_list_head, 2426 fltr_info); 2427 if (status) { 2428 mutex_unlock(rule_lock); 2429 goto free_fltr_list; 2430 } 2431 } 2432 mutex_unlock(rule_lock); 2433 2434 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 2435 2436 free_fltr_list: 2437 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 2438 list_del(&fm_entry->list_entry); 2439 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2440 } 2441 2442 return status; 2443 } 2444 2445 /** 2446 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 2447 * @hw: pointer to the hardware structure 2448 * @vsi_handle: VSI handle to configure 2449 * @promisc_mask: mask of promiscuous config bits 2450 * @vid: VLAN ID to set VLAN promiscuous 2451 */ 2452 enum ice_status 2453 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 2454 { 2455 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 2456 struct ice_fltr_list_entry f_list_entry; 2457 struct ice_fltr_info new_fltr; 2458 enum ice_status status = 0; 2459 bool is_tx_fltr; 2460 u16 hw_vsi_id; 2461 int pkt_type; 2462 u8 recipe_id; 2463 2464 if (!ice_is_vsi_valid(hw, vsi_handle)) 2465 return ICE_ERR_PARAM; 2466 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2467 2468 memset(&new_fltr, 0, sizeof(new_fltr)); 2469 2470 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 2471 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 2472 new_fltr.l_data.mac_vlan.vlan_id = vid; 2473 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 2474 } else { 2475 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 2476 recipe_id = ICE_SW_LKUP_PROMISC; 2477 } 2478 2479 /* Separate filters must be set for each direction/packet type 2480 * combination, so we will loop over the mask value, store the 2481 * individual type, and clear it out in the input mask as it 2482 * is found. 2483 */ 2484 while (promisc_mask) { 2485 u8 *mac_addr; 2486 2487 pkt_type = 0; 2488 is_tx_fltr = false; 2489 2490 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 2491 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 2492 pkt_type = UCAST_FLTR; 2493 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 2494 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 2495 pkt_type = UCAST_FLTR; 2496 is_tx_fltr = true; 2497 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 2498 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 2499 pkt_type = MCAST_FLTR; 2500 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 2501 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 2502 pkt_type = MCAST_FLTR; 2503 is_tx_fltr = true; 2504 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 2505 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 2506 pkt_type = BCAST_FLTR; 2507 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 2508 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 2509 pkt_type = BCAST_FLTR; 2510 is_tx_fltr = true; 2511 } 2512 2513 /* Check for VLAN promiscuous flag */ 2514 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 2515 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 2516 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 2517 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 2518 is_tx_fltr = true; 2519 } 2520 2521 /* Set filter DA based on packet type */ 2522 mac_addr = new_fltr.l_data.mac.mac_addr; 2523 if (pkt_type == BCAST_FLTR) { 2524 eth_broadcast_addr(mac_addr); 2525 } else if (pkt_type == MCAST_FLTR || 2526 pkt_type == UCAST_FLTR) { 2527 /* Use the dummy ether header DA */ 2528 ether_addr_copy(mac_addr, dummy_eth_header); 2529 if (pkt_type == MCAST_FLTR) 2530 mac_addr[0] |= 0x1; /* Set multicast bit */ 2531 } 2532 2533 /* Need to reset this to zero for all iterations */ 2534 new_fltr.flag = 0; 2535 if (is_tx_fltr) { 2536 new_fltr.flag |= ICE_FLTR_TX; 2537 new_fltr.src = hw_vsi_id; 2538 } else { 2539 new_fltr.flag |= ICE_FLTR_RX; 2540 new_fltr.src = hw->port_info->lport; 2541 } 2542 2543 new_fltr.fltr_act = ICE_FWD_TO_VSI; 2544 new_fltr.vsi_handle = vsi_handle; 2545 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 2546 f_list_entry.fltr_info = new_fltr; 2547 2548 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 2549 if (status) 2550 goto set_promisc_exit; 2551 } 2552 2553 set_promisc_exit: 2554 return status; 2555 } 2556 2557 /** 2558 * ice_set_vlan_vsi_promisc 2559 * @hw: pointer to the hardware structure 2560 * @vsi_handle: VSI handle to configure 2561 * @promisc_mask: mask of promiscuous config bits 2562 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 2563 * 2564 * Configure VSI with all associated VLANs to given promiscuous mode(s) 2565 */ 2566 enum ice_status 2567 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 2568 bool rm_vlan_promisc) 2569 { 2570 struct ice_switch_info *sw = hw->switch_info; 2571 struct ice_fltr_list_entry *list_itr, *tmp; 2572 struct list_head vsi_list_head; 2573 struct list_head *vlan_head; 2574 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 2575 enum ice_status status; 2576 u16 vlan_id; 2577 2578 INIT_LIST_HEAD(&vsi_list_head); 2579 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2580 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 2581 mutex_lock(vlan_lock); 2582 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 2583 &vsi_list_head); 2584 mutex_unlock(vlan_lock); 2585 if (status) 2586 goto free_fltr_list; 2587 2588 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 2589 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 2590 if (rm_vlan_promisc) 2591 status = ice_clear_vsi_promisc(hw, vsi_handle, 2592 promisc_mask, vlan_id); 2593 else 2594 status = ice_set_vsi_promisc(hw, vsi_handle, 2595 promisc_mask, vlan_id); 2596 if (status) 2597 break; 2598 } 2599 2600 free_fltr_list: 2601 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 2602 list_del(&list_itr->list_entry); 2603 devm_kfree(ice_hw_to_dev(hw), list_itr); 2604 } 2605 return status; 2606 } 2607 2608 /** 2609 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 2610 * @hw: pointer to the hardware structure 2611 * @vsi_handle: VSI handle to remove filters from 2612 * @lkup: switch rule filter lookup type 2613 */ 2614 static void 2615 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 2616 enum ice_sw_lkup_type lkup) 2617 { 2618 struct ice_switch_info *sw = hw->switch_info; 2619 struct ice_fltr_list_entry *fm_entry; 2620 struct list_head remove_list_head; 2621 struct list_head *rule_head; 2622 struct ice_fltr_list_entry *tmp; 2623 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2624 enum ice_status status; 2625 2626 INIT_LIST_HEAD(&remove_list_head); 2627 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 2628 rule_head = &sw->recp_list[lkup].filt_rules; 2629 mutex_lock(rule_lock); 2630 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 2631 &remove_list_head); 2632 mutex_unlock(rule_lock); 2633 if (status) 2634 return; 2635 2636 switch (lkup) { 2637 case ICE_SW_LKUP_MAC: 2638 ice_remove_mac(hw, &remove_list_head); 2639 break; 2640 case ICE_SW_LKUP_VLAN: 2641 ice_remove_vlan(hw, &remove_list_head); 2642 break; 2643 case ICE_SW_LKUP_PROMISC: 2644 case ICE_SW_LKUP_PROMISC_VLAN: 2645 ice_remove_promisc(hw, lkup, &remove_list_head); 2646 break; 2647 case ICE_SW_LKUP_MAC_VLAN: 2648 case ICE_SW_LKUP_ETHERTYPE: 2649 case ICE_SW_LKUP_ETHERTYPE_MAC: 2650 case ICE_SW_LKUP_DFLT: 2651 case ICE_SW_LKUP_LAST: 2652 default: 2653 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 2654 break; 2655 } 2656 2657 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 2658 list_del(&fm_entry->list_entry); 2659 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2660 } 2661 } 2662 2663 /** 2664 * ice_remove_vsi_fltr - Remove all filters for a VSI 2665 * @hw: pointer to the hardware structure 2666 * @vsi_handle: VSI handle to remove filters from 2667 */ 2668 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 2669 { 2670 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 2671 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 2672 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 2673 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 2674 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 2675 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 2676 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 2677 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 2678 } 2679 2680 /** 2681 * ice_replay_vsi_fltr - Replay filters for requested VSI 2682 * @hw: pointer to the hardware structure 2683 * @vsi_handle: driver VSI handle 2684 * @recp_id: Recipe ID for which rules need to be replayed 2685 * @list_head: list for which filters need to be replayed 2686 * 2687 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 2688 * It is required to pass valid VSI handle. 2689 */ 2690 static enum ice_status 2691 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 2692 struct list_head *list_head) 2693 { 2694 struct ice_fltr_mgmt_list_entry *itr; 2695 enum ice_status status = 0; 2696 u16 hw_vsi_id; 2697 2698 if (list_empty(list_head)) 2699 return status; 2700 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2701 2702 list_for_each_entry(itr, list_head, list_entry) { 2703 struct ice_fltr_list_entry f_entry; 2704 2705 f_entry.fltr_info = itr->fltr_info; 2706 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 2707 itr->fltr_info.vsi_handle == vsi_handle) { 2708 /* update the src in case it is VSI num */ 2709 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 2710 f_entry.fltr_info.src = hw_vsi_id; 2711 status = ice_add_rule_internal(hw, recp_id, &f_entry); 2712 if (status) 2713 goto end; 2714 continue; 2715 } 2716 if (!itr->vsi_list_info || 2717 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 2718 continue; 2719 /* Clearing it so that the logic can add it back */ 2720 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 2721 f_entry.fltr_info.vsi_handle = vsi_handle; 2722 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 2723 /* update the src in case it is VSI num */ 2724 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 2725 f_entry.fltr_info.src = hw_vsi_id; 2726 if (recp_id == ICE_SW_LKUP_VLAN) 2727 status = ice_add_vlan_internal(hw, &f_entry); 2728 else 2729 status = ice_add_rule_internal(hw, recp_id, &f_entry); 2730 if (status) 2731 goto end; 2732 } 2733 end: 2734 return status; 2735 } 2736 2737 /** 2738 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 2739 * @hw: pointer to the hardware structure 2740 * @vsi_handle: driver VSI handle 2741 * 2742 * Replays filters for requested VSI via vsi_handle. 2743 */ 2744 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 2745 { 2746 struct ice_switch_info *sw = hw->switch_info; 2747 enum ice_status status = 0; 2748 u8 i; 2749 2750 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 2751 struct list_head *head; 2752 2753 head = &sw->recp_list[i].filt_replay_rules; 2754 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 2755 if (status) 2756 return status; 2757 } 2758 return status; 2759 } 2760 2761 /** 2762 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 2763 * @hw: pointer to the HW struct 2764 * 2765 * Deletes the filter replay rules. 2766 */ 2767 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 2768 { 2769 struct ice_switch_info *sw = hw->switch_info; 2770 u8 i; 2771 2772 if (!sw) 2773 return; 2774 2775 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 2776 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 2777 struct list_head *l_head; 2778 2779 l_head = &sw->recp_list[i].filt_replay_rules; 2780 ice_rem_sw_rule_info(hw, l_head); 2781 } 2782 } 2783 } 2784