1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_switch.h" 5 6 #define ICE_ETH_DA_OFFSET 0 7 #define ICE_ETH_ETHTYPE_OFFSET 12 8 #define ICE_ETH_VLAN_TCI_OFFSET 14 9 #define ICE_MAX_VLAN_ID 0xFFF 10 11 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 12 * struct to configure any switch filter rules. 13 * {DA (6 bytes), SA(6 bytes), 14 * Ether type (2 bytes for header without VLAN tag) OR 15 * VLAN tag (4 bytes for header with VLAN tag) } 16 * 17 * Word on Hardcoded values 18 * byte 0 = 0x2: to identify it as locally administered DA MAC 19 * byte 6 = 0x2: to identify it as locally administered SA MAC 20 * byte 12 = 0x81 & byte 13 = 0x00: 21 * In case of VLAN filter first two bytes defines ether type (0x8100) 22 * and remaining two bytes are placeholder for programming a given VLAN ID 23 * In case of Ether type filter it is treated as header without VLAN tag 24 * and byte 12 and 13 is used to program a given Ether type instead 25 */ 26 #define DUMMY_ETH_HDR_LEN 16 27 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 28 0x2, 0, 0, 0, 0, 0, 29 0x81, 0, 0, 0}; 30 31 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 32 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ 33 (DUMMY_ETH_HDR_LEN * \ 34 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) 35 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 36 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) 37 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 38 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ 39 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) 40 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 41 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ 42 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) 43 44 /** 45 * ice_init_def_sw_recp - initialize the recipe book keeping tables 46 * @hw: pointer to the HW struct 47 * 48 * Allocate memory for the entire recipe table and initialize the structures/ 49 * entries corresponding to basic recipes. 50 */ 51 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) 52 { 53 struct ice_sw_recipe *recps; 54 u8 i; 55 56 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 57 sizeof(*recps), GFP_KERNEL); 58 if (!recps) 59 return ICE_ERR_NO_MEMORY; 60 61 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 62 recps[i].root_rid = i; 63 INIT_LIST_HEAD(&recps[i].filt_rules); 64 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 65 mutex_init(&recps[i].filt_rule_lock); 66 } 67 68 hw->switch_info->recp_list = recps; 69 70 return 0; 71 } 72 73 /** 74 * ice_aq_get_sw_cfg - get switch configuration 75 * @hw: pointer to the hardware structure 76 * @buf: pointer to the result buffer 77 * @buf_size: length of the buffer available for response 78 * @req_desc: pointer to requested descriptor 79 * @num_elems: pointer to number of elements 80 * @cd: pointer to command details structure or NULL 81 * 82 * Get switch configuration (0x0200) to be placed in buf. 83 * This admin command returns information such as initial VSI/port number 84 * and switch ID it belongs to. 85 * 86 * NOTE: *req_desc is both an input/output parameter. 87 * The caller of this function first calls this function with *request_desc set 88 * to 0. If the response from f/w has *req_desc set to 0, all the switch 89 * configuration information has been returned; if non-zero (meaning not all 90 * the information was returned), the caller should call this function again 91 * with *req_desc set to the previous value returned by f/w to get the 92 * next block of switch configuration information. 93 * 94 * *num_elems is output only parameter. This reflects the number of elements 95 * in response buffer. The caller of this function to use *num_elems while 96 * parsing the response buffer. 97 */ 98 static enum ice_status 99 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 100 u16 buf_size, u16 *req_desc, u16 *num_elems, 101 struct ice_sq_cd *cd) 102 { 103 struct ice_aqc_get_sw_cfg *cmd; 104 struct ice_aq_desc desc; 105 enum ice_status status; 106 107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 108 cmd = &desc.params.get_sw_conf; 109 cmd->element = cpu_to_le16(*req_desc); 110 111 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 112 if (!status) { 113 *req_desc = le16_to_cpu(cmd->element); 114 *num_elems = le16_to_cpu(cmd->num_elems); 115 } 116 117 return status; 118 } 119 120 /** 121 * ice_aq_add_vsi 122 * @hw: pointer to the HW struct 123 * @vsi_ctx: pointer to a VSI context struct 124 * @cd: pointer to command details structure or NULL 125 * 126 * Add a VSI context to the hardware (0x0210) 127 */ 128 static enum ice_status 129 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 130 struct ice_sq_cd *cd) 131 { 132 struct ice_aqc_add_update_free_vsi_resp *res; 133 struct ice_aqc_add_get_update_free_vsi *cmd; 134 struct ice_aq_desc desc; 135 enum ice_status status; 136 137 cmd = &desc.params.vsi_cmd; 138 res = &desc.params.add_update_free_vsi_res; 139 140 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 141 142 if (!vsi_ctx->alloc_from_pool) 143 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 144 ICE_AQ_VSI_IS_VALID); 145 cmd->vf_id = vsi_ctx->vf_num; 146 147 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 148 149 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 150 151 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 152 sizeof(vsi_ctx->info), cd); 153 154 if (!status) { 155 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 156 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 157 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 158 } 159 160 return status; 161 } 162 163 /** 164 * ice_aq_free_vsi 165 * @hw: pointer to the HW struct 166 * @vsi_ctx: pointer to a VSI context struct 167 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 168 * @cd: pointer to command details structure or NULL 169 * 170 * Free VSI context info from hardware (0x0213) 171 */ 172 static enum ice_status 173 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 174 bool keep_vsi_alloc, struct ice_sq_cd *cd) 175 { 176 struct ice_aqc_add_update_free_vsi_resp *resp; 177 struct ice_aqc_add_get_update_free_vsi *cmd; 178 struct ice_aq_desc desc; 179 enum ice_status status; 180 181 cmd = &desc.params.vsi_cmd; 182 resp = &desc.params.add_update_free_vsi_res; 183 184 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 185 186 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 187 if (keep_vsi_alloc) 188 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 189 190 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 191 if (!status) { 192 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 193 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 194 } 195 196 return status; 197 } 198 199 /** 200 * ice_aq_update_vsi 201 * @hw: pointer to the HW struct 202 * @vsi_ctx: pointer to a VSI context struct 203 * @cd: pointer to command details structure or NULL 204 * 205 * Update VSI context in the hardware (0x0211) 206 */ 207 static enum ice_status 208 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 209 struct ice_sq_cd *cd) 210 { 211 struct ice_aqc_add_update_free_vsi_resp *resp; 212 struct ice_aqc_add_get_update_free_vsi *cmd; 213 struct ice_aq_desc desc; 214 enum ice_status status; 215 216 cmd = &desc.params.vsi_cmd; 217 resp = &desc.params.add_update_free_vsi_res; 218 219 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 220 221 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 222 223 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 224 225 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 226 sizeof(vsi_ctx->info), cd); 227 228 if (!status) { 229 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 230 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 231 } 232 233 return status; 234 } 235 236 /** 237 * ice_is_vsi_valid - check whether the VSI is valid or not 238 * @hw: pointer to the HW struct 239 * @vsi_handle: VSI handle 240 * 241 * check whether the VSI is valid or not 242 */ 243 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 244 { 245 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 246 } 247 248 /** 249 * ice_get_hw_vsi_num - return the HW VSI number 250 * @hw: pointer to the HW struct 251 * @vsi_handle: VSI handle 252 * 253 * return the HW VSI number 254 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 255 */ 256 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 257 { 258 return hw->vsi_ctx[vsi_handle]->vsi_num; 259 } 260 261 /** 262 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 263 * @hw: pointer to the HW struct 264 * @vsi_handle: VSI handle 265 * 266 * return the VSI context entry for a given VSI handle 267 */ 268 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 269 { 270 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 271 } 272 273 /** 274 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 275 * @hw: pointer to the HW struct 276 * @vsi_handle: VSI handle 277 * @vsi: VSI context pointer 278 * 279 * save the VSI context entry for a given VSI handle 280 */ 281 static void 282 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 283 { 284 hw->vsi_ctx[vsi_handle] = vsi; 285 } 286 287 /** 288 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 289 * @hw: pointer to the HW struct 290 * @vsi_handle: VSI handle 291 */ 292 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 293 { 294 struct ice_vsi_ctx *vsi; 295 u8 i; 296 297 vsi = ice_get_vsi_ctx(hw, vsi_handle); 298 if (!vsi) 299 return; 300 ice_for_each_traffic_class(i) { 301 if (vsi->lan_q_ctx[i]) { 302 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 303 vsi->lan_q_ctx[i] = NULL; 304 } 305 } 306 } 307 308 /** 309 * ice_clear_vsi_ctx - clear the VSI context entry 310 * @hw: pointer to the HW struct 311 * @vsi_handle: VSI handle 312 * 313 * clear the VSI context entry 314 */ 315 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 316 { 317 struct ice_vsi_ctx *vsi; 318 319 vsi = ice_get_vsi_ctx(hw, vsi_handle); 320 if (vsi) { 321 ice_clear_vsi_q_ctx(hw, vsi_handle); 322 devm_kfree(ice_hw_to_dev(hw), vsi); 323 hw->vsi_ctx[vsi_handle] = NULL; 324 } 325 } 326 327 /** 328 * ice_clear_all_vsi_ctx - clear all the VSI context entries 329 * @hw: pointer to the HW struct 330 */ 331 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 332 { 333 u16 i; 334 335 for (i = 0; i < ICE_MAX_VSI; i++) 336 ice_clear_vsi_ctx(hw, i); 337 } 338 339 /** 340 * ice_add_vsi - add VSI context to the hardware and VSI handle list 341 * @hw: pointer to the HW struct 342 * @vsi_handle: unique VSI handle provided by drivers 343 * @vsi_ctx: pointer to a VSI context struct 344 * @cd: pointer to command details structure or NULL 345 * 346 * Add a VSI context to the hardware also add it into the VSI handle list. 347 * If this function gets called after reset for existing VSIs then update 348 * with the new HW VSI number in the corresponding VSI handle list entry. 349 */ 350 enum ice_status 351 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 352 struct ice_sq_cd *cd) 353 { 354 struct ice_vsi_ctx *tmp_vsi_ctx; 355 enum ice_status status; 356 357 if (vsi_handle >= ICE_MAX_VSI) 358 return ICE_ERR_PARAM; 359 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 360 if (status) 361 return status; 362 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 363 if (!tmp_vsi_ctx) { 364 /* Create a new VSI context */ 365 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 366 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 367 if (!tmp_vsi_ctx) { 368 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 369 return ICE_ERR_NO_MEMORY; 370 } 371 *tmp_vsi_ctx = *vsi_ctx; 372 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 373 } else { 374 /* update with new HW VSI num */ 375 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 376 } 377 378 return 0; 379 } 380 381 /** 382 * ice_free_vsi- free VSI context from hardware and VSI handle list 383 * @hw: pointer to the HW struct 384 * @vsi_handle: unique VSI handle 385 * @vsi_ctx: pointer to a VSI context struct 386 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 387 * @cd: pointer to command details structure or NULL 388 * 389 * Free VSI context info from hardware as well as from VSI handle list 390 */ 391 enum ice_status 392 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 393 bool keep_vsi_alloc, struct ice_sq_cd *cd) 394 { 395 enum ice_status status; 396 397 if (!ice_is_vsi_valid(hw, vsi_handle)) 398 return ICE_ERR_PARAM; 399 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 400 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 401 if (!status) 402 ice_clear_vsi_ctx(hw, vsi_handle); 403 return status; 404 } 405 406 /** 407 * ice_update_vsi 408 * @hw: pointer to the HW struct 409 * @vsi_handle: unique VSI handle 410 * @vsi_ctx: pointer to a VSI context struct 411 * @cd: pointer to command details structure or NULL 412 * 413 * Update VSI context in the hardware 414 */ 415 enum ice_status 416 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 417 struct ice_sq_cd *cd) 418 { 419 if (!ice_is_vsi_valid(hw, vsi_handle)) 420 return ICE_ERR_PARAM; 421 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 422 return ice_aq_update_vsi(hw, vsi_ctx, cd); 423 } 424 425 /** 426 * ice_aq_alloc_free_vsi_list 427 * @hw: pointer to the HW struct 428 * @vsi_list_id: VSI list ID returned or used for lookup 429 * @lkup_type: switch rule filter lookup type 430 * @opc: switch rules population command type - pass in the command opcode 431 * 432 * allocates or free a VSI list resource 433 */ 434 static enum ice_status 435 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 436 enum ice_sw_lkup_type lkup_type, 437 enum ice_adminq_opc opc) 438 { 439 struct ice_aqc_alloc_free_res_elem *sw_buf; 440 struct ice_aqc_res_elem *vsi_ele; 441 enum ice_status status; 442 u16 buf_len; 443 444 buf_len = struct_size(sw_buf, elem, 1); 445 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 446 if (!sw_buf) 447 return ICE_ERR_NO_MEMORY; 448 sw_buf->num_elems = cpu_to_le16(1); 449 450 if (lkup_type == ICE_SW_LKUP_MAC || 451 lkup_type == ICE_SW_LKUP_MAC_VLAN || 452 lkup_type == ICE_SW_LKUP_ETHERTYPE || 453 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 454 lkup_type == ICE_SW_LKUP_PROMISC || 455 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 456 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 457 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 458 sw_buf->res_type = 459 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 460 } else { 461 status = ICE_ERR_PARAM; 462 goto ice_aq_alloc_free_vsi_list_exit; 463 } 464 465 if (opc == ice_aqc_opc_free_res) 466 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 467 468 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 469 if (status) 470 goto ice_aq_alloc_free_vsi_list_exit; 471 472 if (opc == ice_aqc_opc_alloc_res) { 473 vsi_ele = &sw_buf->elem[0]; 474 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 475 } 476 477 ice_aq_alloc_free_vsi_list_exit: 478 devm_kfree(ice_hw_to_dev(hw), sw_buf); 479 return status; 480 } 481 482 /** 483 * ice_aq_sw_rules - add/update/remove switch rules 484 * @hw: pointer to the HW struct 485 * @rule_list: pointer to switch rule population list 486 * @rule_list_sz: total size of the rule list in bytes 487 * @num_rules: number of switch rules in the rule_list 488 * @opc: switch rules population command type - pass in the command opcode 489 * @cd: pointer to command details structure or NULL 490 * 491 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 492 */ 493 static enum ice_status 494 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 495 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 496 { 497 struct ice_aq_desc desc; 498 enum ice_status status; 499 500 if (opc != ice_aqc_opc_add_sw_rules && 501 opc != ice_aqc_opc_update_sw_rules && 502 opc != ice_aqc_opc_remove_sw_rules) 503 return ICE_ERR_PARAM; 504 505 ice_fill_dflt_direct_cmd_desc(&desc, opc); 506 507 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 508 desc.params.sw_rules.num_rules_fltr_entry_index = 509 cpu_to_le16(num_rules); 510 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 511 if (opc != ice_aqc_opc_add_sw_rules && 512 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 513 status = ICE_ERR_DOES_NOT_EXIST; 514 515 return status; 516 } 517 518 /* ice_init_port_info - Initialize port_info with switch configuration data 519 * @pi: pointer to port_info 520 * @vsi_port_num: VSI number or port number 521 * @type: Type of switch element (port or VSI) 522 * @swid: switch ID of the switch the element is attached to 523 * @pf_vf_num: PF or VF number 524 * @is_vf: true if the element is a VF, false otherwise 525 */ 526 static void 527 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 528 u16 swid, u16 pf_vf_num, bool is_vf) 529 { 530 switch (type) { 531 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 532 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 533 pi->sw_id = swid; 534 pi->pf_vf_num = pf_vf_num; 535 pi->is_vf = is_vf; 536 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 537 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 538 break; 539 default: 540 ice_debug(pi->hw, ICE_DBG_SW, 541 "incorrect VSI/port type received\n"); 542 break; 543 } 544 } 545 546 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 547 * @hw: pointer to the hardware structure 548 */ 549 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) 550 { 551 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 552 enum ice_status status; 553 u16 req_desc = 0; 554 u16 num_elems; 555 u16 i; 556 557 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 558 GFP_KERNEL); 559 560 if (!rbuf) 561 return ICE_ERR_NO_MEMORY; 562 563 /* Multiple calls to ice_aq_get_sw_cfg may be required 564 * to get all the switch configuration information. The need 565 * for additional calls is indicated by ice_aq_get_sw_cfg 566 * writing a non-zero value in req_desc 567 */ 568 do { 569 struct ice_aqc_get_sw_cfg_resp_elem *ele; 570 571 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 572 &req_desc, &num_elems, NULL); 573 574 if (status) 575 break; 576 577 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 578 u16 pf_vf_num, swid, vsi_port_num; 579 bool is_vf = false; 580 u8 res_type; 581 582 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 583 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 584 585 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 586 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 587 588 swid = le16_to_cpu(ele->swid); 589 590 if (le16_to_cpu(ele->pf_vf_num) & 591 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 592 is_vf = true; 593 594 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 595 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 596 597 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 598 /* FW VSI is not needed. Just continue. */ 599 continue; 600 } 601 602 ice_init_port_info(hw->port_info, vsi_port_num, 603 res_type, swid, pf_vf_num, is_vf); 604 } 605 } while (req_desc && !status); 606 607 devm_kfree(ice_hw_to_dev(hw), (void *)rbuf); 608 return status; 609 } 610 611 /** 612 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 613 * @hw: pointer to the hardware structure 614 * @fi: filter info structure to fill/update 615 * 616 * This helper function populates the lb_en and lan_en elements of the provided 617 * ice_fltr_info struct using the switch's type and characteristics of the 618 * switch rule being configured. 619 */ 620 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 621 { 622 fi->lb_en = false; 623 fi->lan_en = false; 624 if ((fi->flag & ICE_FLTR_TX) && 625 (fi->fltr_act == ICE_FWD_TO_VSI || 626 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 627 fi->fltr_act == ICE_FWD_TO_Q || 628 fi->fltr_act == ICE_FWD_TO_QGRP)) { 629 /* Setting LB for prune actions will result in replicated 630 * packets to the internal switch that will be dropped. 631 */ 632 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 633 fi->lb_en = true; 634 635 /* Set lan_en to TRUE if 636 * 1. The switch is a VEB AND 637 * 2 638 * 2.1 The lookup is a directional lookup like ethertype, 639 * promiscuous, ethertype-MAC, promiscuous-VLAN 640 * and default-port OR 641 * 2.2 The lookup is VLAN, OR 642 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 643 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 644 * 645 * OR 646 * 647 * The switch is a VEPA. 648 * 649 * In all other cases, the LAN enable has to be set to false. 650 */ 651 if (hw->evb_veb) { 652 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 653 fi->lkup_type == ICE_SW_LKUP_PROMISC || 654 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 655 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 656 fi->lkup_type == ICE_SW_LKUP_DFLT || 657 fi->lkup_type == ICE_SW_LKUP_VLAN || 658 (fi->lkup_type == ICE_SW_LKUP_MAC && 659 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 660 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 661 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 662 fi->lan_en = true; 663 } else { 664 fi->lan_en = true; 665 } 666 } 667 } 668 669 /** 670 * ice_fill_sw_rule - Helper function to fill switch rule structure 671 * @hw: pointer to the hardware structure 672 * @f_info: entry containing packet forwarding information 673 * @s_rule: switch rule structure to be filled in based on mac_entry 674 * @opc: switch rules population command type - pass in the command opcode 675 */ 676 static void 677 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 678 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 679 { 680 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 681 void *daddr = NULL; 682 u16 eth_hdr_sz; 683 u8 *eth_hdr; 684 u32 act = 0; 685 __be16 *off; 686 u8 q_rgn; 687 688 if (opc == ice_aqc_opc_remove_sw_rules) { 689 s_rule->pdata.lkup_tx_rx.act = 0; 690 s_rule->pdata.lkup_tx_rx.index = 691 cpu_to_le16(f_info->fltr_rule_id); 692 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 693 return; 694 } 695 696 eth_hdr_sz = sizeof(dummy_eth_header); 697 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 698 699 /* initialize the ether header with a dummy header */ 700 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 701 ice_fill_sw_info(hw, f_info); 702 703 switch (f_info->fltr_act) { 704 case ICE_FWD_TO_VSI: 705 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 706 ICE_SINGLE_ACT_VSI_ID_M; 707 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 708 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 709 ICE_SINGLE_ACT_VALID_BIT; 710 break; 711 case ICE_FWD_TO_VSI_LIST: 712 act |= ICE_SINGLE_ACT_VSI_LIST; 713 act |= (f_info->fwd_id.vsi_list_id << 714 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 715 ICE_SINGLE_ACT_VSI_LIST_ID_M; 716 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 717 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 718 ICE_SINGLE_ACT_VALID_BIT; 719 break; 720 case ICE_FWD_TO_Q: 721 act |= ICE_SINGLE_ACT_TO_Q; 722 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 723 ICE_SINGLE_ACT_Q_INDEX_M; 724 break; 725 case ICE_DROP_PACKET: 726 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 727 ICE_SINGLE_ACT_VALID_BIT; 728 break; 729 case ICE_FWD_TO_QGRP: 730 q_rgn = f_info->qgrp_size > 0 ? 731 (u8)ilog2(f_info->qgrp_size) : 0; 732 act |= ICE_SINGLE_ACT_TO_Q; 733 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 734 ICE_SINGLE_ACT_Q_INDEX_M; 735 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 736 ICE_SINGLE_ACT_Q_REGION_M; 737 break; 738 default: 739 return; 740 } 741 742 if (f_info->lb_en) 743 act |= ICE_SINGLE_ACT_LB_ENABLE; 744 if (f_info->lan_en) 745 act |= ICE_SINGLE_ACT_LAN_ENABLE; 746 747 switch (f_info->lkup_type) { 748 case ICE_SW_LKUP_MAC: 749 daddr = f_info->l_data.mac.mac_addr; 750 break; 751 case ICE_SW_LKUP_VLAN: 752 vlan_id = f_info->l_data.vlan.vlan_id; 753 if (f_info->fltr_act == ICE_FWD_TO_VSI || 754 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 755 act |= ICE_SINGLE_ACT_PRUNE; 756 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 757 } 758 break; 759 case ICE_SW_LKUP_ETHERTYPE_MAC: 760 daddr = f_info->l_data.ethertype_mac.mac_addr; 761 fallthrough; 762 case ICE_SW_LKUP_ETHERTYPE: 763 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 764 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 765 break; 766 case ICE_SW_LKUP_MAC_VLAN: 767 daddr = f_info->l_data.mac_vlan.mac_addr; 768 vlan_id = f_info->l_data.mac_vlan.vlan_id; 769 break; 770 case ICE_SW_LKUP_PROMISC_VLAN: 771 vlan_id = f_info->l_data.mac_vlan.vlan_id; 772 fallthrough; 773 case ICE_SW_LKUP_PROMISC: 774 daddr = f_info->l_data.mac_vlan.mac_addr; 775 break; 776 default: 777 break; 778 } 779 780 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 781 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 782 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 783 784 /* Recipe set depending on lookup type */ 785 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 786 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 787 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 788 789 if (daddr) 790 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 791 792 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 793 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 794 *off = cpu_to_be16(vlan_id); 795 } 796 797 /* Create the switch rule with the final dummy Ethernet header */ 798 if (opc != ice_aqc_opc_update_sw_rules) 799 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 800 } 801 802 /** 803 * ice_add_marker_act 804 * @hw: pointer to the hardware structure 805 * @m_ent: the management entry for which sw marker needs to be added 806 * @sw_marker: sw marker to tag the Rx descriptor with 807 * @l_id: large action resource ID 808 * 809 * Create a large action to hold software marker and update the switch rule 810 * entry pointed by m_ent with newly created large action 811 */ 812 static enum ice_status 813 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 814 u16 sw_marker, u16 l_id) 815 { 816 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 817 /* For software marker we need 3 large actions 818 * 1. FWD action: FWD TO VSI or VSI LIST 819 * 2. GENERIC VALUE action to hold the profile ID 820 * 3. GENERIC VALUE action to hold the software marker ID 821 */ 822 const u16 num_lg_acts = 3; 823 enum ice_status status; 824 u16 lg_act_size; 825 u16 rules_size; 826 u32 act; 827 u16 id; 828 829 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 830 return ICE_ERR_PARAM; 831 832 /* Create two back-to-back switch rules and submit them to the HW using 833 * one memory buffer: 834 * 1. Large Action 835 * 2. Look up Tx Rx 836 */ 837 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 838 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 839 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 840 if (!lg_act) 841 return ICE_ERR_NO_MEMORY; 842 843 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 844 845 /* Fill in the first switch rule i.e. large action */ 846 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 847 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 848 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 849 850 /* First action VSI forwarding or VSI list forwarding depending on how 851 * many VSIs 852 */ 853 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 854 m_ent->fltr_info.fwd_id.hw_vsi_id; 855 856 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 857 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 858 if (m_ent->vsi_count > 1) 859 act |= ICE_LG_ACT_VSI_LIST; 860 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 861 862 /* Second action descriptor type */ 863 act = ICE_LG_ACT_GENERIC; 864 865 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 866 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 867 868 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 869 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 870 871 /* Third action Marker value */ 872 act |= ICE_LG_ACT_GENERIC; 873 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 874 ICE_LG_ACT_GENERIC_VALUE_M; 875 876 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 877 878 /* call the fill switch rule to fill the lookup Tx Rx structure */ 879 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 880 ice_aqc_opc_update_sw_rules); 881 882 /* Update the action to point to the large action ID */ 883 rx_tx->pdata.lkup_tx_rx.act = 884 cpu_to_le32(ICE_SINGLE_ACT_PTR | 885 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 886 ICE_SINGLE_ACT_PTR_VAL_M)); 887 888 /* Use the filter rule ID of the previously created rule with single 889 * act. Once the update happens, hardware will treat this as large 890 * action 891 */ 892 rx_tx->pdata.lkup_tx_rx.index = 893 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 894 895 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 896 ice_aqc_opc_update_sw_rules, NULL); 897 if (!status) { 898 m_ent->lg_act_idx = l_id; 899 m_ent->sw_marker_id = sw_marker; 900 } 901 902 devm_kfree(ice_hw_to_dev(hw), lg_act); 903 return status; 904 } 905 906 /** 907 * ice_create_vsi_list_map 908 * @hw: pointer to the hardware structure 909 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 910 * @num_vsi: number of VSI handles in the array 911 * @vsi_list_id: VSI list ID generated as part of allocate resource 912 * 913 * Helper function to create a new entry of VSI list ID to VSI mapping 914 * using the given VSI list ID 915 */ 916 static struct ice_vsi_list_map_info * 917 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 918 u16 vsi_list_id) 919 { 920 struct ice_switch_info *sw = hw->switch_info; 921 struct ice_vsi_list_map_info *v_map; 922 int i; 923 924 v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL); 925 if (!v_map) 926 return NULL; 927 928 v_map->vsi_list_id = vsi_list_id; 929 v_map->ref_cnt = 1; 930 for (i = 0; i < num_vsi; i++) 931 set_bit(vsi_handle_arr[i], v_map->vsi_map); 932 933 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 934 return v_map; 935 } 936 937 /** 938 * ice_update_vsi_list_rule 939 * @hw: pointer to the hardware structure 940 * @vsi_handle_arr: array of VSI handles to form a VSI list 941 * @num_vsi: number of VSI handles in the array 942 * @vsi_list_id: VSI list ID generated as part of allocate resource 943 * @remove: Boolean value to indicate if this is a remove action 944 * @opc: switch rules population command type - pass in the command opcode 945 * @lkup_type: lookup type of the filter 946 * 947 * Call AQ command to add a new switch rule or update existing switch rule 948 * using the given VSI list ID 949 */ 950 static enum ice_status 951 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 952 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 953 enum ice_sw_lkup_type lkup_type) 954 { 955 struct ice_aqc_sw_rules_elem *s_rule; 956 enum ice_status status; 957 u16 s_rule_size; 958 u16 rule_type; 959 int i; 960 961 if (!num_vsi) 962 return ICE_ERR_PARAM; 963 964 if (lkup_type == ICE_SW_LKUP_MAC || 965 lkup_type == ICE_SW_LKUP_MAC_VLAN || 966 lkup_type == ICE_SW_LKUP_ETHERTYPE || 967 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 968 lkup_type == ICE_SW_LKUP_PROMISC || 969 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 970 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 971 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 972 else if (lkup_type == ICE_SW_LKUP_VLAN) 973 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 974 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 975 else 976 return ICE_ERR_PARAM; 977 978 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 979 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 980 if (!s_rule) 981 return ICE_ERR_NO_MEMORY; 982 for (i = 0; i < num_vsi; i++) { 983 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 984 status = ICE_ERR_PARAM; 985 goto exit; 986 } 987 /* AQ call requires hw_vsi_id(s) */ 988 s_rule->pdata.vsi_list.vsi[i] = 989 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 990 } 991 992 s_rule->type = cpu_to_le16(rule_type); 993 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 994 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 995 996 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 997 998 exit: 999 devm_kfree(ice_hw_to_dev(hw), s_rule); 1000 return status; 1001 } 1002 1003 /** 1004 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1005 * @hw: pointer to the HW struct 1006 * @vsi_handle_arr: array of VSI handles to form a VSI list 1007 * @num_vsi: number of VSI handles in the array 1008 * @vsi_list_id: stores the ID of the VSI list to be created 1009 * @lkup_type: switch rule filter's lookup type 1010 */ 1011 static enum ice_status 1012 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1013 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1014 { 1015 enum ice_status status; 1016 1017 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1018 ice_aqc_opc_alloc_res); 1019 if (status) 1020 return status; 1021 1022 /* Update the newly created VSI list to include the specified VSIs */ 1023 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1024 *vsi_list_id, false, 1025 ice_aqc_opc_add_sw_rules, lkup_type); 1026 } 1027 1028 /** 1029 * ice_create_pkt_fwd_rule 1030 * @hw: pointer to the hardware structure 1031 * @f_entry: entry containing packet forwarding information 1032 * 1033 * Create switch rule with given filter information and add an entry 1034 * to the corresponding filter management list to track this switch rule 1035 * and VSI mapping 1036 */ 1037 static enum ice_status 1038 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1039 struct ice_fltr_list_entry *f_entry) 1040 { 1041 struct ice_fltr_mgmt_list_entry *fm_entry; 1042 struct ice_aqc_sw_rules_elem *s_rule; 1043 enum ice_sw_lkup_type l_type; 1044 struct ice_sw_recipe *recp; 1045 enum ice_status status; 1046 1047 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1048 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1049 if (!s_rule) 1050 return ICE_ERR_NO_MEMORY; 1051 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1052 GFP_KERNEL); 1053 if (!fm_entry) { 1054 status = ICE_ERR_NO_MEMORY; 1055 goto ice_create_pkt_fwd_rule_exit; 1056 } 1057 1058 fm_entry->fltr_info = f_entry->fltr_info; 1059 1060 /* Initialize all the fields for the management entry */ 1061 fm_entry->vsi_count = 1; 1062 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1063 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1064 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1065 1066 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1067 ice_aqc_opc_add_sw_rules); 1068 1069 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1070 ice_aqc_opc_add_sw_rules, NULL); 1071 if (status) { 1072 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1073 goto ice_create_pkt_fwd_rule_exit; 1074 } 1075 1076 f_entry->fltr_info.fltr_rule_id = 1077 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1078 fm_entry->fltr_info.fltr_rule_id = 1079 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1080 1081 /* The book keeping entries will get removed when base driver 1082 * calls remove filter AQ command 1083 */ 1084 l_type = fm_entry->fltr_info.lkup_type; 1085 recp = &hw->switch_info->recp_list[l_type]; 1086 list_add(&fm_entry->list_entry, &recp->filt_rules); 1087 1088 ice_create_pkt_fwd_rule_exit: 1089 devm_kfree(ice_hw_to_dev(hw), s_rule); 1090 return status; 1091 } 1092 1093 /** 1094 * ice_update_pkt_fwd_rule 1095 * @hw: pointer to the hardware structure 1096 * @f_info: filter information for switch rule 1097 * 1098 * Call AQ command to update a previously created switch rule with a 1099 * VSI list ID 1100 */ 1101 static enum ice_status 1102 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 1103 { 1104 struct ice_aqc_sw_rules_elem *s_rule; 1105 enum ice_status status; 1106 1107 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1108 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1109 if (!s_rule) 1110 return ICE_ERR_NO_MEMORY; 1111 1112 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 1113 1114 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 1115 1116 /* Update switch rule with new rule set to forward VSI list */ 1117 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1118 ice_aqc_opc_update_sw_rules, NULL); 1119 1120 devm_kfree(ice_hw_to_dev(hw), s_rule); 1121 return status; 1122 } 1123 1124 /** 1125 * ice_update_sw_rule_bridge_mode 1126 * @hw: pointer to the HW struct 1127 * 1128 * Updates unicast switch filter rules based on VEB/VEPA mode 1129 */ 1130 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 1131 { 1132 struct ice_switch_info *sw = hw->switch_info; 1133 struct ice_fltr_mgmt_list_entry *fm_entry; 1134 enum ice_status status = 0; 1135 struct list_head *rule_head; 1136 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1137 1138 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1139 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1140 1141 mutex_lock(rule_lock); 1142 list_for_each_entry(fm_entry, rule_head, list_entry) { 1143 struct ice_fltr_info *fi = &fm_entry->fltr_info; 1144 u8 *addr = fi->l_data.mac.mac_addr; 1145 1146 /* Update unicast Tx rules to reflect the selected 1147 * VEB/VEPA mode 1148 */ 1149 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 1150 (fi->fltr_act == ICE_FWD_TO_VSI || 1151 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1152 fi->fltr_act == ICE_FWD_TO_Q || 1153 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1154 status = ice_update_pkt_fwd_rule(hw, fi); 1155 if (status) 1156 break; 1157 } 1158 } 1159 1160 mutex_unlock(rule_lock); 1161 1162 return status; 1163 } 1164 1165 /** 1166 * ice_add_update_vsi_list 1167 * @hw: pointer to the hardware structure 1168 * @m_entry: pointer to current filter management list entry 1169 * @cur_fltr: filter information from the book keeping entry 1170 * @new_fltr: filter information with the new VSI to be added 1171 * 1172 * Call AQ command to add or update previously created VSI list with new VSI. 1173 * 1174 * Helper function to do book keeping associated with adding filter information 1175 * The algorithm to do the book keeping is described below : 1176 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 1177 * if only one VSI has been added till now 1178 * Allocate a new VSI list and add two VSIs 1179 * to this list using switch rule command 1180 * Update the previously created switch rule with the 1181 * newly created VSI list ID 1182 * if a VSI list was previously created 1183 * Add the new VSI to the previously created VSI list set 1184 * using the update switch rule command 1185 */ 1186 static enum ice_status 1187 ice_add_update_vsi_list(struct ice_hw *hw, 1188 struct ice_fltr_mgmt_list_entry *m_entry, 1189 struct ice_fltr_info *cur_fltr, 1190 struct ice_fltr_info *new_fltr) 1191 { 1192 enum ice_status status = 0; 1193 u16 vsi_list_id = 0; 1194 1195 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 1196 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 1197 return ICE_ERR_NOT_IMPL; 1198 1199 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 1200 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 1201 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 1202 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 1203 return ICE_ERR_NOT_IMPL; 1204 1205 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 1206 /* Only one entry existed in the mapping and it was not already 1207 * a part of a VSI list. So, create a VSI list with the old and 1208 * new VSIs. 1209 */ 1210 struct ice_fltr_info tmp_fltr; 1211 u16 vsi_handle_arr[2]; 1212 1213 /* A rule already exists with the new VSI being added */ 1214 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 1215 return ICE_ERR_ALREADY_EXISTS; 1216 1217 vsi_handle_arr[0] = cur_fltr->vsi_handle; 1218 vsi_handle_arr[1] = new_fltr->vsi_handle; 1219 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1220 &vsi_list_id, 1221 new_fltr->lkup_type); 1222 if (status) 1223 return status; 1224 1225 tmp_fltr = *new_fltr; 1226 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 1227 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1228 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1229 /* Update the previous switch rule of "MAC forward to VSI" to 1230 * "MAC fwd to VSI list" 1231 */ 1232 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1233 if (status) 1234 return status; 1235 1236 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 1237 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1238 m_entry->vsi_list_info = 1239 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1240 vsi_list_id); 1241 1242 /* If this entry was large action then the large action needs 1243 * to be updated to point to FWD to VSI list 1244 */ 1245 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 1246 status = 1247 ice_add_marker_act(hw, m_entry, 1248 m_entry->sw_marker_id, 1249 m_entry->lg_act_idx); 1250 } else { 1251 u16 vsi_handle = new_fltr->vsi_handle; 1252 enum ice_adminq_opc opcode; 1253 1254 if (!m_entry->vsi_list_info) 1255 return ICE_ERR_CFG; 1256 1257 /* A rule already exists with the new VSI being added */ 1258 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 1259 return 0; 1260 1261 /* Update the previously created VSI list set with 1262 * the new VSI ID passed in 1263 */ 1264 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 1265 opcode = ice_aqc_opc_update_sw_rules; 1266 1267 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 1268 vsi_list_id, false, opcode, 1269 new_fltr->lkup_type); 1270 /* update VSI list mapping info with new VSI ID */ 1271 if (!status) 1272 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 1273 } 1274 if (!status) 1275 m_entry->vsi_count++; 1276 return status; 1277 } 1278 1279 /** 1280 * ice_find_rule_entry - Search a rule entry 1281 * @hw: pointer to the hardware structure 1282 * @recp_id: lookup type for which the specified rule needs to be searched 1283 * @f_info: rule information 1284 * 1285 * Helper function to search for a given rule entry 1286 * Returns pointer to entry storing the rule if found 1287 */ 1288 static struct ice_fltr_mgmt_list_entry * 1289 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 1290 { 1291 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 1292 struct ice_switch_info *sw = hw->switch_info; 1293 struct list_head *list_head; 1294 1295 list_head = &sw->recp_list[recp_id].filt_rules; 1296 list_for_each_entry(list_itr, list_head, list_entry) { 1297 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 1298 sizeof(f_info->l_data)) && 1299 f_info->flag == list_itr->fltr_info.flag) { 1300 ret = list_itr; 1301 break; 1302 } 1303 } 1304 return ret; 1305 } 1306 1307 /** 1308 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 1309 * @hw: pointer to the hardware structure 1310 * @recp_id: lookup type for which VSI lists needs to be searched 1311 * @vsi_handle: VSI handle to be found in VSI list 1312 * @vsi_list_id: VSI list ID found containing vsi_handle 1313 * 1314 * Helper function to search a VSI list with single entry containing given VSI 1315 * handle element. This can be extended further to search VSI list with more 1316 * than 1 vsi_count. Returns pointer to VSI list entry if found. 1317 */ 1318 static struct ice_vsi_list_map_info * 1319 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 1320 u16 *vsi_list_id) 1321 { 1322 struct ice_vsi_list_map_info *map_info = NULL; 1323 struct ice_switch_info *sw = hw->switch_info; 1324 struct ice_fltr_mgmt_list_entry *list_itr; 1325 struct list_head *list_head; 1326 1327 list_head = &sw->recp_list[recp_id].filt_rules; 1328 list_for_each_entry(list_itr, list_head, list_entry) { 1329 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 1330 map_info = list_itr->vsi_list_info; 1331 if (test_bit(vsi_handle, map_info->vsi_map)) { 1332 *vsi_list_id = map_info->vsi_list_id; 1333 return map_info; 1334 } 1335 } 1336 } 1337 return NULL; 1338 } 1339 1340 /** 1341 * ice_add_rule_internal - add rule for a given lookup type 1342 * @hw: pointer to the hardware structure 1343 * @recp_id: lookup type (recipe ID) for which rule has to be added 1344 * @f_entry: structure containing MAC forwarding information 1345 * 1346 * Adds or updates the rule lists for a given recipe 1347 */ 1348 static enum ice_status 1349 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 1350 struct ice_fltr_list_entry *f_entry) 1351 { 1352 struct ice_switch_info *sw = hw->switch_info; 1353 struct ice_fltr_info *new_fltr, *cur_fltr; 1354 struct ice_fltr_mgmt_list_entry *m_entry; 1355 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1356 enum ice_status status = 0; 1357 1358 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1359 return ICE_ERR_PARAM; 1360 f_entry->fltr_info.fwd_id.hw_vsi_id = 1361 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1362 1363 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 1364 1365 mutex_lock(rule_lock); 1366 new_fltr = &f_entry->fltr_info; 1367 if (new_fltr->flag & ICE_FLTR_RX) 1368 new_fltr->src = hw->port_info->lport; 1369 else if (new_fltr->flag & ICE_FLTR_TX) 1370 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 1371 1372 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 1373 if (!m_entry) { 1374 mutex_unlock(rule_lock); 1375 return ice_create_pkt_fwd_rule(hw, f_entry); 1376 } 1377 1378 cur_fltr = &m_entry->fltr_info; 1379 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 1380 mutex_unlock(rule_lock); 1381 1382 return status; 1383 } 1384 1385 /** 1386 * ice_remove_vsi_list_rule 1387 * @hw: pointer to the hardware structure 1388 * @vsi_list_id: VSI list ID generated as part of allocate resource 1389 * @lkup_type: switch rule filter lookup type 1390 * 1391 * The VSI list should be emptied before this function is called to remove the 1392 * VSI list. 1393 */ 1394 static enum ice_status 1395 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 1396 enum ice_sw_lkup_type lkup_type) 1397 { 1398 struct ice_aqc_sw_rules_elem *s_rule; 1399 enum ice_status status; 1400 u16 s_rule_size; 1401 1402 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 1403 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1404 if (!s_rule) 1405 return ICE_ERR_NO_MEMORY; 1406 1407 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 1408 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1409 1410 /* Free the vsi_list resource that we allocated. It is assumed that the 1411 * list is empty at this point. 1412 */ 1413 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 1414 ice_aqc_opc_free_res); 1415 1416 devm_kfree(ice_hw_to_dev(hw), s_rule); 1417 return status; 1418 } 1419 1420 /** 1421 * ice_rem_update_vsi_list 1422 * @hw: pointer to the hardware structure 1423 * @vsi_handle: VSI handle of the VSI to remove 1424 * @fm_list: filter management entry for which the VSI list management needs to 1425 * be done 1426 */ 1427 static enum ice_status 1428 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 1429 struct ice_fltr_mgmt_list_entry *fm_list) 1430 { 1431 enum ice_sw_lkup_type lkup_type; 1432 enum ice_status status = 0; 1433 u16 vsi_list_id; 1434 1435 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 1436 fm_list->vsi_count == 0) 1437 return ICE_ERR_PARAM; 1438 1439 /* A rule with the VSI being removed does not exist */ 1440 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 1441 return ICE_ERR_DOES_NOT_EXIST; 1442 1443 lkup_type = fm_list->fltr_info.lkup_type; 1444 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 1445 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 1446 ice_aqc_opc_update_sw_rules, 1447 lkup_type); 1448 if (status) 1449 return status; 1450 1451 fm_list->vsi_count--; 1452 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 1453 1454 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 1455 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 1456 struct ice_vsi_list_map_info *vsi_list_info = 1457 fm_list->vsi_list_info; 1458 u16 rem_vsi_handle; 1459 1460 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 1461 ICE_MAX_VSI); 1462 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 1463 return ICE_ERR_OUT_OF_RANGE; 1464 1465 /* Make sure VSI list is empty before removing it below */ 1466 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 1467 vsi_list_id, true, 1468 ice_aqc_opc_update_sw_rules, 1469 lkup_type); 1470 if (status) 1471 return status; 1472 1473 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 1474 tmp_fltr_info.fwd_id.hw_vsi_id = 1475 ice_get_hw_vsi_num(hw, rem_vsi_handle); 1476 tmp_fltr_info.vsi_handle = rem_vsi_handle; 1477 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 1478 if (status) { 1479 ice_debug(hw, ICE_DBG_SW, 1480 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 1481 tmp_fltr_info.fwd_id.hw_vsi_id, status); 1482 return status; 1483 } 1484 1485 fm_list->fltr_info = tmp_fltr_info; 1486 } 1487 1488 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 1489 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 1490 struct ice_vsi_list_map_info *vsi_list_info = 1491 fm_list->vsi_list_info; 1492 1493 /* Remove the VSI list since it is no longer used */ 1494 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 1495 if (status) { 1496 ice_debug(hw, ICE_DBG_SW, 1497 "Failed to remove VSI list %d, error %d\n", 1498 vsi_list_id, status); 1499 return status; 1500 } 1501 1502 list_del(&vsi_list_info->list_entry); 1503 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 1504 fm_list->vsi_list_info = NULL; 1505 } 1506 1507 return status; 1508 } 1509 1510 /** 1511 * ice_remove_rule_internal - Remove a filter rule of a given type 1512 * @hw: pointer to the hardware structure 1513 * @recp_id: recipe ID for which the rule needs to removed 1514 * @f_entry: rule entry containing filter information 1515 */ 1516 static enum ice_status 1517 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 1518 struct ice_fltr_list_entry *f_entry) 1519 { 1520 struct ice_switch_info *sw = hw->switch_info; 1521 struct ice_fltr_mgmt_list_entry *list_elem; 1522 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1523 enum ice_status status = 0; 1524 bool remove_rule = false; 1525 u16 vsi_handle; 1526 1527 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1528 return ICE_ERR_PARAM; 1529 f_entry->fltr_info.fwd_id.hw_vsi_id = 1530 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1531 1532 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 1533 mutex_lock(rule_lock); 1534 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 1535 if (!list_elem) { 1536 status = ICE_ERR_DOES_NOT_EXIST; 1537 goto exit; 1538 } 1539 1540 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 1541 remove_rule = true; 1542 } else if (!list_elem->vsi_list_info) { 1543 status = ICE_ERR_DOES_NOT_EXIST; 1544 goto exit; 1545 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 1546 /* a ref_cnt > 1 indicates that the vsi_list is being 1547 * shared by multiple rules. Decrement the ref_cnt and 1548 * remove this rule, but do not modify the list, as it 1549 * is in-use by other rules. 1550 */ 1551 list_elem->vsi_list_info->ref_cnt--; 1552 remove_rule = true; 1553 } else { 1554 /* a ref_cnt of 1 indicates the vsi_list is only used 1555 * by one rule. However, the original removal request is only 1556 * for a single VSI. Update the vsi_list first, and only 1557 * remove the rule if there are no further VSIs in this list. 1558 */ 1559 vsi_handle = f_entry->fltr_info.vsi_handle; 1560 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 1561 if (status) 1562 goto exit; 1563 /* if VSI count goes to zero after updating the VSI list */ 1564 if (list_elem->vsi_count == 0) 1565 remove_rule = true; 1566 } 1567 1568 if (remove_rule) { 1569 /* Remove the lookup rule */ 1570 struct ice_aqc_sw_rules_elem *s_rule; 1571 1572 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1573 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1574 GFP_KERNEL); 1575 if (!s_rule) { 1576 status = ICE_ERR_NO_MEMORY; 1577 goto exit; 1578 } 1579 1580 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 1581 ice_aqc_opc_remove_sw_rules); 1582 1583 status = ice_aq_sw_rules(hw, s_rule, 1584 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 1585 ice_aqc_opc_remove_sw_rules, NULL); 1586 1587 /* Remove a book keeping from the list */ 1588 devm_kfree(ice_hw_to_dev(hw), s_rule); 1589 1590 if (status) 1591 goto exit; 1592 1593 list_del(&list_elem->list_entry); 1594 devm_kfree(ice_hw_to_dev(hw), list_elem); 1595 } 1596 exit: 1597 mutex_unlock(rule_lock); 1598 return status; 1599 } 1600 1601 /** 1602 * ice_add_mac - Add a MAC address based filter rule 1603 * @hw: pointer to the hardware structure 1604 * @m_list: list of MAC addresses and forwarding information 1605 * 1606 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 1607 * multiple unicast addresses, the function assumes that all the 1608 * addresses are unique in a given add_mac call. It doesn't 1609 * check for duplicates in this case, removing duplicates from a given 1610 * list should be taken care of in the caller of this function. 1611 */ 1612 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 1613 { 1614 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 1615 struct ice_fltr_list_entry *m_list_itr; 1616 struct list_head *rule_head; 1617 u16 total_elem_left, s_rule_size; 1618 struct ice_switch_info *sw; 1619 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1620 enum ice_status status = 0; 1621 u16 num_unicast = 0; 1622 u8 elem_sent; 1623 1624 if (!m_list || !hw) 1625 return ICE_ERR_PARAM; 1626 1627 s_rule = NULL; 1628 sw = hw->switch_info; 1629 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1630 list_for_each_entry(m_list_itr, m_list, list_entry) { 1631 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 1632 u16 vsi_handle; 1633 u16 hw_vsi_id; 1634 1635 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 1636 vsi_handle = m_list_itr->fltr_info.vsi_handle; 1637 if (!ice_is_vsi_valid(hw, vsi_handle)) 1638 return ICE_ERR_PARAM; 1639 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 1640 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 1641 /* update the src in case it is VSI num */ 1642 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 1643 return ICE_ERR_PARAM; 1644 m_list_itr->fltr_info.src = hw_vsi_id; 1645 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 1646 is_zero_ether_addr(add)) 1647 return ICE_ERR_PARAM; 1648 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 1649 /* Don't overwrite the unicast address */ 1650 mutex_lock(rule_lock); 1651 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 1652 &m_list_itr->fltr_info)) { 1653 mutex_unlock(rule_lock); 1654 return ICE_ERR_ALREADY_EXISTS; 1655 } 1656 mutex_unlock(rule_lock); 1657 num_unicast++; 1658 } else if (is_multicast_ether_addr(add) || 1659 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 1660 m_list_itr->status = 1661 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 1662 m_list_itr); 1663 if (m_list_itr->status) 1664 return m_list_itr->status; 1665 } 1666 } 1667 1668 mutex_lock(rule_lock); 1669 /* Exit if no suitable entries were found for adding bulk switch rule */ 1670 if (!num_unicast) { 1671 status = 0; 1672 goto ice_add_mac_exit; 1673 } 1674 1675 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1676 1677 /* Allocate switch rule buffer for the bulk update for unicast */ 1678 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1679 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 1680 GFP_KERNEL); 1681 if (!s_rule) { 1682 status = ICE_ERR_NO_MEMORY; 1683 goto ice_add_mac_exit; 1684 } 1685 1686 r_iter = s_rule; 1687 list_for_each_entry(m_list_itr, m_list, list_entry) { 1688 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 1689 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 1690 1691 if (is_unicast_ether_addr(mac_addr)) { 1692 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 1693 ice_aqc_opc_add_sw_rules); 1694 r_iter = (struct ice_aqc_sw_rules_elem *) 1695 ((u8 *)r_iter + s_rule_size); 1696 } 1697 } 1698 1699 /* Call AQ bulk switch rule update for all unicast addresses */ 1700 r_iter = s_rule; 1701 /* Call AQ switch rule in AQ_MAX chunk */ 1702 for (total_elem_left = num_unicast; total_elem_left > 0; 1703 total_elem_left -= elem_sent) { 1704 struct ice_aqc_sw_rules_elem *entry = r_iter; 1705 1706 elem_sent = min_t(u8, total_elem_left, 1707 (ICE_AQ_MAX_BUF_LEN / s_rule_size)); 1708 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 1709 elem_sent, ice_aqc_opc_add_sw_rules, 1710 NULL); 1711 if (status) 1712 goto ice_add_mac_exit; 1713 r_iter = (struct ice_aqc_sw_rules_elem *) 1714 ((u8 *)r_iter + (elem_sent * s_rule_size)); 1715 } 1716 1717 /* Fill up rule ID based on the value returned from FW */ 1718 r_iter = s_rule; 1719 list_for_each_entry(m_list_itr, m_list, list_entry) { 1720 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 1721 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 1722 struct ice_fltr_mgmt_list_entry *fm_entry; 1723 1724 if (is_unicast_ether_addr(mac_addr)) { 1725 f_info->fltr_rule_id = 1726 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 1727 f_info->fltr_act = ICE_FWD_TO_VSI; 1728 /* Create an entry to track this MAC address */ 1729 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 1730 sizeof(*fm_entry), GFP_KERNEL); 1731 if (!fm_entry) { 1732 status = ICE_ERR_NO_MEMORY; 1733 goto ice_add_mac_exit; 1734 } 1735 fm_entry->fltr_info = *f_info; 1736 fm_entry->vsi_count = 1; 1737 /* The book keeping entries will get removed when 1738 * base driver calls remove filter AQ command 1739 */ 1740 1741 list_add(&fm_entry->list_entry, rule_head); 1742 r_iter = (struct ice_aqc_sw_rules_elem *) 1743 ((u8 *)r_iter + s_rule_size); 1744 } 1745 } 1746 1747 ice_add_mac_exit: 1748 mutex_unlock(rule_lock); 1749 if (s_rule) 1750 devm_kfree(ice_hw_to_dev(hw), s_rule); 1751 return status; 1752 } 1753 1754 /** 1755 * ice_add_vlan_internal - Add one VLAN based filter rule 1756 * @hw: pointer to the hardware structure 1757 * @f_entry: filter entry containing one VLAN information 1758 */ 1759 static enum ice_status 1760 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 1761 { 1762 struct ice_switch_info *sw = hw->switch_info; 1763 struct ice_fltr_mgmt_list_entry *v_list_itr; 1764 struct ice_fltr_info *new_fltr, *cur_fltr; 1765 enum ice_sw_lkup_type lkup_type; 1766 u16 vsi_list_id = 0, vsi_handle; 1767 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1768 enum ice_status status = 0; 1769 1770 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1771 return ICE_ERR_PARAM; 1772 1773 f_entry->fltr_info.fwd_id.hw_vsi_id = 1774 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1775 new_fltr = &f_entry->fltr_info; 1776 1777 /* VLAN ID should only be 12 bits */ 1778 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 1779 return ICE_ERR_PARAM; 1780 1781 if (new_fltr->src_id != ICE_SRC_ID_VSI) 1782 return ICE_ERR_PARAM; 1783 1784 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 1785 lkup_type = new_fltr->lkup_type; 1786 vsi_handle = new_fltr->vsi_handle; 1787 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 1788 mutex_lock(rule_lock); 1789 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 1790 if (!v_list_itr) { 1791 struct ice_vsi_list_map_info *map_info = NULL; 1792 1793 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 1794 /* All VLAN pruning rules use a VSI list. Check if 1795 * there is already a VSI list containing VSI that we 1796 * want to add. If found, use the same vsi_list_id for 1797 * this new VLAN rule or else create a new list. 1798 */ 1799 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 1800 vsi_handle, 1801 &vsi_list_id); 1802 if (!map_info) { 1803 status = ice_create_vsi_list_rule(hw, 1804 &vsi_handle, 1805 1, 1806 &vsi_list_id, 1807 lkup_type); 1808 if (status) 1809 goto exit; 1810 } 1811 /* Convert the action to forwarding to a VSI list. */ 1812 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1813 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 1814 } 1815 1816 status = ice_create_pkt_fwd_rule(hw, f_entry); 1817 if (!status) { 1818 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 1819 new_fltr); 1820 if (!v_list_itr) { 1821 status = ICE_ERR_DOES_NOT_EXIST; 1822 goto exit; 1823 } 1824 /* reuse VSI list for new rule and increment ref_cnt */ 1825 if (map_info) { 1826 v_list_itr->vsi_list_info = map_info; 1827 map_info->ref_cnt++; 1828 } else { 1829 v_list_itr->vsi_list_info = 1830 ice_create_vsi_list_map(hw, &vsi_handle, 1831 1, vsi_list_id); 1832 } 1833 } 1834 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 1835 /* Update existing VSI list to add new VSI ID only if it used 1836 * by one VLAN rule. 1837 */ 1838 cur_fltr = &v_list_itr->fltr_info; 1839 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 1840 new_fltr); 1841 } else { 1842 /* If VLAN rule exists and VSI list being used by this rule is 1843 * referenced by more than 1 VLAN rule. Then create a new VSI 1844 * list appending previous VSI with new VSI and update existing 1845 * VLAN rule to point to new VSI list ID 1846 */ 1847 struct ice_fltr_info tmp_fltr; 1848 u16 vsi_handle_arr[2]; 1849 u16 cur_handle; 1850 1851 /* Current implementation only supports reusing VSI list with 1852 * one VSI count. We should never hit below condition 1853 */ 1854 if (v_list_itr->vsi_count > 1 && 1855 v_list_itr->vsi_list_info->ref_cnt > 1) { 1856 ice_debug(hw, ICE_DBG_SW, 1857 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 1858 status = ICE_ERR_CFG; 1859 goto exit; 1860 } 1861 1862 cur_handle = 1863 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 1864 ICE_MAX_VSI); 1865 1866 /* A rule already exists with the new VSI being added */ 1867 if (cur_handle == vsi_handle) { 1868 status = ICE_ERR_ALREADY_EXISTS; 1869 goto exit; 1870 } 1871 1872 vsi_handle_arr[0] = cur_handle; 1873 vsi_handle_arr[1] = vsi_handle; 1874 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1875 &vsi_list_id, lkup_type); 1876 if (status) 1877 goto exit; 1878 1879 tmp_fltr = v_list_itr->fltr_info; 1880 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 1881 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1882 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1883 /* Update the previous switch rule to a new VSI list which 1884 * includes current VSI that is requested 1885 */ 1886 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1887 if (status) 1888 goto exit; 1889 1890 /* before overriding VSI list map info. decrement ref_cnt of 1891 * previous VSI list 1892 */ 1893 v_list_itr->vsi_list_info->ref_cnt--; 1894 1895 /* now update to newly created list */ 1896 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 1897 v_list_itr->vsi_list_info = 1898 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1899 vsi_list_id); 1900 v_list_itr->vsi_count++; 1901 } 1902 1903 exit: 1904 mutex_unlock(rule_lock); 1905 return status; 1906 } 1907 1908 /** 1909 * ice_add_vlan - Add VLAN based filter rule 1910 * @hw: pointer to the hardware structure 1911 * @v_list: list of VLAN entries and forwarding information 1912 */ 1913 enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 1914 { 1915 struct ice_fltr_list_entry *v_list_itr; 1916 1917 if (!v_list || !hw) 1918 return ICE_ERR_PARAM; 1919 1920 list_for_each_entry(v_list_itr, v_list, list_entry) { 1921 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 1922 return ICE_ERR_PARAM; 1923 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1924 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 1925 if (v_list_itr->status) 1926 return v_list_itr->status; 1927 } 1928 return 0; 1929 } 1930 1931 /** 1932 * ice_add_eth_mac - Add ethertype and MAC based filter rule 1933 * @hw: pointer to the hardware structure 1934 * @em_list: list of ether type MAC filter, MAC is optional 1935 * 1936 * This function requires the caller to populate the entries in 1937 * the filter list with the necessary fields (including flags to 1938 * indicate Tx or Rx rules). 1939 */ 1940 enum ice_status 1941 ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 1942 { 1943 struct ice_fltr_list_entry *em_list_itr; 1944 1945 if (!em_list || !hw) 1946 return ICE_ERR_PARAM; 1947 1948 list_for_each_entry(em_list_itr, em_list, list_entry) { 1949 enum ice_sw_lkup_type l_type = 1950 em_list_itr->fltr_info.lkup_type; 1951 1952 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 1953 l_type != ICE_SW_LKUP_ETHERTYPE) 1954 return ICE_ERR_PARAM; 1955 1956 em_list_itr->status = ice_add_rule_internal(hw, l_type, 1957 em_list_itr); 1958 if (em_list_itr->status) 1959 return em_list_itr->status; 1960 } 1961 return 0; 1962 } 1963 1964 /** 1965 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 1966 * @hw: pointer to the hardware structure 1967 * @em_list: list of ethertype or ethertype MAC entries 1968 */ 1969 enum ice_status 1970 ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 1971 { 1972 struct ice_fltr_list_entry *em_list_itr, *tmp; 1973 1974 if (!em_list || !hw) 1975 return ICE_ERR_PARAM; 1976 1977 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 1978 enum ice_sw_lkup_type l_type = 1979 em_list_itr->fltr_info.lkup_type; 1980 1981 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 1982 l_type != ICE_SW_LKUP_ETHERTYPE) 1983 return ICE_ERR_PARAM; 1984 1985 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 1986 em_list_itr); 1987 if (em_list_itr->status) 1988 return em_list_itr->status; 1989 } 1990 return 0; 1991 } 1992 1993 /** 1994 * ice_rem_sw_rule_info 1995 * @hw: pointer to the hardware structure 1996 * @rule_head: pointer to the switch list structure that we want to delete 1997 */ 1998 static void 1999 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2000 { 2001 if (!list_empty(rule_head)) { 2002 struct ice_fltr_mgmt_list_entry *entry; 2003 struct ice_fltr_mgmt_list_entry *tmp; 2004 2005 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 2006 list_del(&entry->list_entry); 2007 devm_kfree(ice_hw_to_dev(hw), entry); 2008 } 2009 } 2010 } 2011 2012 /** 2013 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 2014 * @hw: pointer to the hardware structure 2015 * @vsi_handle: VSI handle to set as default 2016 * @set: true to add the above mentioned switch rule, false to remove it 2017 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 2018 * 2019 * add filter rule to set/unset given VSI as default VSI for the switch 2020 * (represented by swid) 2021 */ 2022 enum ice_status 2023 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 2024 { 2025 struct ice_aqc_sw_rules_elem *s_rule; 2026 struct ice_fltr_info f_info; 2027 enum ice_adminq_opc opcode; 2028 enum ice_status status; 2029 u16 s_rule_size; 2030 u16 hw_vsi_id; 2031 2032 if (!ice_is_vsi_valid(hw, vsi_handle)) 2033 return ICE_ERR_PARAM; 2034 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2035 2036 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 2037 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 2038 2039 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2040 if (!s_rule) 2041 return ICE_ERR_NO_MEMORY; 2042 2043 memset(&f_info, 0, sizeof(f_info)); 2044 2045 f_info.lkup_type = ICE_SW_LKUP_DFLT; 2046 f_info.flag = direction; 2047 f_info.fltr_act = ICE_FWD_TO_VSI; 2048 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 2049 2050 if (f_info.flag & ICE_FLTR_RX) { 2051 f_info.src = hw->port_info->lport; 2052 f_info.src_id = ICE_SRC_ID_LPORT; 2053 if (!set) 2054 f_info.fltr_rule_id = 2055 hw->port_info->dflt_rx_vsi_rule_id; 2056 } else if (f_info.flag & ICE_FLTR_TX) { 2057 f_info.src_id = ICE_SRC_ID_VSI; 2058 f_info.src = hw_vsi_id; 2059 if (!set) 2060 f_info.fltr_rule_id = 2061 hw->port_info->dflt_tx_vsi_rule_id; 2062 } 2063 2064 if (set) 2065 opcode = ice_aqc_opc_add_sw_rules; 2066 else 2067 opcode = ice_aqc_opc_remove_sw_rules; 2068 2069 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 2070 2071 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 2072 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 2073 goto out; 2074 if (set) { 2075 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2076 2077 if (f_info.flag & ICE_FLTR_TX) { 2078 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 2079 hw->port_info->dflt_tx_vsi_rule_id = index; 2080 } else if (f_info.flag & ICE_FLTR_RX) { 2081 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 2082 hw->port_info->dflt_rx_vsi_rule_id = index; 2083 } 2084 } else { 2085 if (f_info.flag & ICE_FLTR_TX) { 2086 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 2087 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 2088 } else if (f_info.flag & ICE_FLTR_RX) { 2089 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 2090 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 2091 } 2092 } 2093 2094 out: 2095 devm_kfree(ice_hw_to_dev(hw), s_rule); 2096 return status; 2097 } 2098 2099 /** 2100 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 2101 * @hw: pointer to the hardware structure 2102 * @recp_id: lookup type for which the specified rule needs to be searched 2103 * @f_info: rule information 2104 * 2105 * Helper function to search for a unicast rule entry - this is to be used 2106 * to remove unicast MAC filter that is not shared with other VSIs on the 2107 * PF switch. 2108 * 2109 * Returns pointer to entry storing the rule if found 2110 */ 2111 static struct ice_fltr_mgmt_list_entry * 2112 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 2113 struct ice_fltr_info *f_info) 2114 { 2115 struct ice_switch_info *sw = hw->switch_info; 2116 struct ice_fltr_mgmt_list_entry *list_itr; 2117 struct list_head *list_head; 2118 2119 list_head = &sw->recp_list[recp_id].filt_rules; 2120 list_for_each_entry(list_itr, list_head, list_entry) { 2121 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2122 sizeof(f_info->l_data)) && 2123 f_info->fwd_id.hw_vsi_id == 2124 list_itr->fltr_info.fwd_id.hw_vsi_id && 2125 f_info->flag == list_itr->fltr_info.flag) 2126 return list_itr; 2127 } 2128 return NULL; 2129 } 2130 2131 /** 2132 * ice_remove_mac - remove a MAC address based filter rule 2133 * @hw: pointer to the hardware structure 2134 * @m_list: list of MAC addresses and forwarding information 2135 * 2136 * This function removes either a MAC filter rule or a specific VSI from a 2137 * VSI list for a multicast MAC address. 2138 * 2139 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by 2140 * ice_add_mac. Caller should be aware that this call will only work if all 2141 * the entries passed into m_list were added previously. It will not attempt to 2142 * do a partial remove of entries that were found. 2143 */ 2144 enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 2145 { 2146 struct ice_fltr_list_entry *list_itr, *tmp; 2147 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2148 2149 if (!m_list) 2150 return ICE_ERR_PARAM; 2151 2152 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2153 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 2154 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 2155 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 2156 u16 vsi_handle; 2157 2158 if (l_type != ICE_SW_LKUP_MAC) 2159 return ICE_ERR_PARAM; 2160 2161 vsi_handle = list_itr->fltr_info.vsi_handle; 2162 if (!ice_is_vsi_valid(hw, vsi_handle)) 2163 return ICE_ERR_PARAM; 2164 2165 list_itr->fltr_info.fwd_id.hw_vsi_id = 2166 ice_get_hw_vsi_num(hw, vsi_handle); 2167 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2168 /* Don't remove the unicast address that belongs to 2169 * another VSI on the switch, since it is not being 2170 * shared... 2171 */ 2172 mutex_lock(rule_lock); 2173 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 2174 &list_itr->fltr_info)) { 2175 mutex_unlock(rule_lock); 2176 return ICE_ERR_DOES_NOT_EXIST; 2177 } 2178 mutex_unlock(rule_lock); 2179 } 2180 list_itr->status = ice_remove_rule_internal(hw, 2181 ICE_SW_LKUP_MAC, 2182 list_itr); 2183 if (list_itr->status) 2184 return list_itr->status; 2185 } 2186 return 0; 2187 } 2188 2189 /** 2190 * ice_remove_vlan - Remove VLAN based filter rule 2191 * @hw: pointer to the hardware structure 2192 * @v_list: list of VLAN entries and forwarding information 2193 */ 2194 enum ice_status 2195 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 2196 { 2197 struct ice_fltr_list_entry *v_list_itr, *tmp; 2198 2199 if (!v_list || !hw) 2200 return ICE_ERR_PARAM; 2201 2202 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2203 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 2204 2205 if (l_type != ICE_SW_LKUP_VLAN) 2206 return ICE_ERR_PARAM; 2207 v_list_itr->status = ice_remove_rule_internal(hw, 2208 ICE_SW_LKUP_VLAN, 2209 v_list_itr); 2210 if (v_list_itr->status) 2211 return v_list_itr->status; 2212 } 2213 return 0; 2214 } 2215 2216 /** 2217 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 2218 * @fm_entry: filter entry to inspect 2219 * @vsi_handle: VSI handle to compare with filter info 2220 */ 2221 static bool 2222 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 2223 { 2224 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 2225 fm_entry->fltr_info.vsi_handle == vsi_handle) || 2226 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 2227 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 2228 } 2229 2230 /** 2231 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 2232 * @hw: pointer to the hardware structure 2233 * @vsi_handle: VSI handle to remove filters from 2234 * @vsi_list_head: pointer to the list to add entry to 2235 * @fi: pointer to fltr_info of filter entry to copy & add 2236 * 2237 * Helper function, used when creating a list of filters to remove from 2238 * a specific VSI. The entry added to vsi_list_head is a COPY of the 2239 * original filter entry, with the exception of fltr_info.fltr_act and 2240 * fltr_info.fwd_id fields. These are set such that later logic can 2241 * extract which VSI to remove the fltr from, and pass on that information. 2242 */ 2243 static enum ice_status 2244 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2245 struct list_head *vsi_list_head, 2246 struct ice_fltr_info *fi) 2247 { 2248 struct ice_fltr_list_entry *tmp; 2249 2250 /* this memory is freed up in the caller function 2251 * once filters for this VSI are removed 2252 */ 2253 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 2254 if (!tmp) 2255 return ICE_ERR_NO_MEMORY; 2256 2257 tmp->fltr_info = *fi; 2258 2259 /* Overwrite these fields to indicate which VSI to remove filter from, 2260 * so find and remove logic can extract the information from the 2261 * list entries. Note that original entries will still have proper 2262 * values. 2263 */ 2264 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2265 tmp->fltr_info.vsi_handle = vsi_handle; 2266 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2267 2268 list_add(&tmp->list_entry, vsi_list_head); 2269 2270 return 0; 2271 } 2272 2273 /** 2274 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 2275 * @hw: pointer to the hardware structure 2276 * @vsi_handle: VSI handle to remove filters from 2277 * @lkup_list_head: pointer to the list that has certain lookup type filters 2278 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 2279 * 2280 * Locates all filters in lkup_list_head that are used by the given VSI, 2281 * and adds COPIES of those entries to vsi_list_head (intended to be used 2282 * to remove the listed filters). 2283 * Note that this means all entries in vsi_list_head must be explicitly 2284 * deallocated by the caller when done with list. 2285 */ 2286 static enum ice_status 2287 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2288 struct list_head *lkup_list_head, 2289 struct list_head *vsi_list_head) 2290 { 2291 struct ice_fltr_mgmt_list_entry *fm_entry; 2292 enum ice_status status = 0; 2293 2294 /* check to make sure VSI ID is valid and within boundary */ 2295 if (!ice_is_vsi_valid(hw, vsi_handle)) 2296 return ICE_ERR_PARAM; 2297 2298 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 2299 struct ice_fltr_info *fi; 2300 2301 fi = &fm_entry->fltr_info; 2302 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) 2303 continue; 2304 2305 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2306 vsi_list_head, fi); 2307 if (status) 2308 return status; 2309 } 2310 return status; 2311 } 2312 2313 /** 2314 * ice_determine_promisc_mask 2315 * @fi: filter info to parse 2316 * 2317 * Helper function to determine which ICE_PROMISC_ mask corresponds 2318 * to given filter into. 2319 */ 2320 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 2321 { 2322 u16 vid = fi->l_data.mac_vlan.vlan_id; 2323 u8 *macaddr = fi->l_data.mac.mac_addr; 2324 bool is_tx_fltr = false; 2325 u8 promisc_mask = 0; 2326 2327 if (fi->flag == ICE_FLTR_TX) 2328 is_tx_fltr = true; 2329 2330 if (is_broadcast_ether_addr(macaddr)) 2331 promisc_mask |= is_tx_fltr ? 2332 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 2333 else if (is_multicast_ether_addr(macaddr)) 2334 promisc_mask |= is_tx_fltr ? 2335 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 2336 else if (is_unicast_ether_addr(macaddr)) 2337 promisc_mask |= is_tx_fltr ? 2338 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 2339 if (vid) 2340 promisc_mask |= is_tx_fltr ? 2341 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 2342 2343 return promisc_mask; 2344 } 2345 2346 /** 2347 * ice_remove_promisc - Remove promisc based filter rules 2348 * @hw: pointer to the hardware structure 2349 * @recp_id: recipe ID for which the rule needs to removed 2350 * @v_list: list of promisc entries 2351 */ 2352 static enum ice_status 2353 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, 2354 struct list_head *v_list) 2355 { 2356 struct ice_fltr_list_entry *v_list_itr, *tmp; 2357 2358 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2359 v_list_itr->status = 2360 ice_remove_rule_internal(hw, recp_id, v_list_itr); 2361 if (v_list_itr->status) 2362 return v_list_itr->status; 2363 } 2364 return 0; 2365 } 2366 2367 /** 2368 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 2369 * @hw: pointer to the hardware structure 2370 * @vsi_handle: VSI handle to clear mode 2371 * @promisc_mask: mask of promiscuous config bits to clear 2372 * @vid: VLAN ID to clear VLAN promiscuous 2373 */ 2374 enum ice_status 2375 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 2376 u16 vid) 2377 { 2378 struct ice_switch_info *sw = hw->switch_info; 2379 struct ice_fltr_list_entry *fm_entry, *tmp; 2380 struct list_head remove_list_head; 2381 struct ice_fltr_mgmt_list_entry *itr; 2382 struct list_head *rule_head; 2383 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2384 enum ice_status status = 0; 2385 u8 recipe_id; 2386 2387 if (!ice_is_vsi_valid(hw, vsi_handle)) 2388 return ICE_ERR_PARAM; 2389 2390 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 2391 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 2392 else 2393 recipe_id = ICE_SW_LKUP_PROMISC; 2394 2395 rule_head = &sw->recp_list[recipe_id].filt_rules; 2396 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 2397 2398 INIT_LIST_HEAD(&remove_list_head); 2399 2400 mutex_lock(rule_lock); 2401 list_for_each_entry(itr, rule_head, list_entry) { 2402 struct ice_fltr_info *fltr_info; 2403 u8 fltr_promisc_mask = 0; 2404 2405 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 2406 continue; 2407 fltr_info = &itr->fltr_info; 2408 2409 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 2410 vid != fltr_info->l_data.mac_vlan.vlan_id) 2411 continue; 2412 2413 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 2414 2415 /* Skip if filter is not completely specified by given mask */ 2416 if (fltr_promisc_mask & ~promisc_mask) 2417 continue; 2418 2419 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2420 &remove_list_head, 2421 fltr_info); 2422 if (status) { 2423 mutex_unlock(rule_lock); 2424 goto free_fltr_list; 2425 } 2426 } 2427 mutex_unlock(rule_lock); 2428 2429 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 2430 2431 free_fltr_list: 2432 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 2433 list_del(&fm_entry->list_entry); 2434 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2435 } 2436 2437 return status; 2438 } 2439 2440 /** 2441 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 2442 * @hw: pointer to the hardware structure 2443 * @vsi_handle: VSI handle to configure 2444 * @promisc_mask: mask of promiscuous config bits 2445 * @vid: VLAN ID to set VLAN promiscuous 2446 */ 2447 enum ice_status 2448 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 2449 { 2450 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 2451 struct ice_fltr_list_entry f_list_entry; 2452 struct ice_fltr_info new_fltr; 2453 enum ice_status status = 0; 2454 bool is_tx_fltr; 2455 u16 hw_vsi_id; 2456 int pkt_type; 2457 u8 recipe_id; 2458 2459 if (!ice_is_vsi_valid(hw, vsi_handle)) 2460 return ICE_ERR_PARAM; 2461 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2462 2463 memset(&new_fltr, 0, sizeof(new_fltr)); 2464 2465 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 2466 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 2467 new_fltr.l_data.mac_vlan.vlan_id = vid; 2468 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 2469 } else { 2470 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 2471 recipe_id = ICE_SW_LKUP_PROMISC; 2472 } 2473 2474 /* Separate filters must be set for each direction/packet type 2475 * combination, so we will loop over the mask value, store the 2476 * individual type, and clear it out in the input mask as it 2477 * is found. 2478 */ 2479 while (promisc_mask) { 2480 u8 *mac_addr; 2481 2482 pkt_type = 0; 2483 is_tx_fltr = false; 2484 2485 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 2486 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 2487 pkt_type = UCAST_FLTR; 2488 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 2489 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 2490 pkt_type = UCAST_FLTR; 2491 is_tx_fltr = true; 2492 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 2493 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 2494 pkt_type = MCAST_FLTR; 2495 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 2496 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 2497 pkt_type = MCAST_FLTR; 2498 is_tx_fltr = true; 2499 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 2500 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 2501 pkt_type = BCAST_FLTR; 2502 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 2503 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 2504 pkt_type = BCAST_FLTR; 2505 is_tx_fltr = true; 2506 } 2507 2508 /* Check for VLAN promiscuous flag */ 2509 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 2510 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 2511 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 2512 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 2513 is_tx_fltr = true; 2514 } 2515 2516 /* Set filter DA based on packet type */ 2517 mac_addr = new_fltr.l_data.mac.mac_addr; 2518 if (pkt_type == BCAST_FLTR) { 2519 eth_broadcast_addr(mac_addr); 2520 } else if (pkt_type == MCAST_FLTR || 2521 pkt_type == UCAST_FLTR) { 2522 /* Use the dummy ether header DA */ 2523 ether_addr_copy(mac_addr, dummy_eth_header); 2524 if (pkt_type == MCAST_FLTR) 2525 mac_addr[0] |= 0x1; /* Set multicast bit */ 2526 } 2527 2528 /* Need to reset this to zero for all iterations */ 2529 new_fltr.flag = 0; 2530 if (is_tx_fltr) { 2531 new_fltr.flag |= ICE_FLTR_TX; 2532 new_fltr.src = hw_vsi_id; 2533 } else { 2534 new_fltr.flag |= ICE_FLTR_RX; 2535 new_fltr.src = hw->port_info->lport; 2536 } 2537 2538 new_fltr.fltr_act = ICE_FWD_TO_VSI; 2539 new_fltr.vsi_handle = vsi_handle; 2540 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 2541 f_list_entry.fltr_info = new_fltr; 2542 2543 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 2544 if (status) 2545 goto set_promisc_exit; 2546 } 2547 2548 set_promisc_exit: 2549 return status; 2550 } 2551 2552 /** 2553 * ice_set_vlan_vsi_promisc 2554 * @hw: pointer to the hardware structure 2555 * @vsi_handle: VSI handle to configure 2556 * @promisc_mask: mask of promiscuous config bits 2557 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 2558 * 2559 * Configure VSI with all associated VLANs to given promiscuous mode(s) 2560 */ 2561 enum ice_status 2562 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 2563 bool rm_vlan_promisc) 2564 { 2565 struct ice_switch_info *sw = hw->switch_info; 2566 struct ice_fltr_list_entry *list_itr, *tmp; 2567 struct list_head vsi_list_head; 2568 struct list_head *vlan_head; 2569 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 2570 enum ice_status status; 2571 u16 vlan_id; 2572 2573 INIT_LIST_HEAD(&vsi_list_head); 2574 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2575 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 2576 mutex_lock(vlan_lock); 2577 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 2578 &vsi_list_head); 2579 mutex_unlock(vlan_lock); 2580 if (status) 2581 goto free_fltr_list; 2582 2583 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 2584 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 2585 if (rm_vlan_promisc) 2586 status = ice_clear_vsi_promisc(hw, vsi_handle, 2587 promisc_mask, vlan_id); 2588 else 2589 status = ice_set_vsi_promisc(hw, vsi_handle, 2590 promisc_mask, vlan_id); 2591 if (status) 2592 break; 2593 } 2594 2595 free_fltr_list: 2596 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 2597 list_del(&list_itr->list_entry); 2598 devm_kfree(ice_hw_to_dev(hw), list_itr); 2599 } 2600 return status; 2601 } 2602 2603 /** 2604 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 2605 * @hw: pointer to the hardware structure 2606 * @vsi_handle: VSI handle to remove filters from 2607 * @lkup: switch rule filter lookup type 2608 */ 2609 static void 2610 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 2611 enum ice_sw_lkup_type lkup) 2612 { 2613 struct ice_switch_info *sw = hw->switch_info; 2614 struct ice_fltr_list_entry *fm_entry; 2615 struct list_head remove_list_head; 2616 struct list_head *rule_head; 2617 struct ice_fltr_list_entry *tmp; 2618 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2619 enum ice_status status; 2620 2621 INIT_LIST_HEAD(&remove_list_head); 2622 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 2623 rule_head = &sw->recp_list[lkup].filt_rules; 2624 mutex_lock(rule_lock); 2625 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 2626 &remove_list_head); 2627 mutex_unlock(rule_lock); 2628 if (status) 2629 return; 2630 2631 switch (lkup) { 2632 case ICE_SW_LKUP_MAC: 2633 ice_remove_mac(hw, &remove_list_head); 2634 break; 2635 case ICE_SW_LKUP_VLAN: 2636 ice_remove_vlan(hw, &remove_list_head); 2637 break; 2638 case ICE_SW_LKUP_PROMISC: 2639 case ICE_SW_LKUP_PROMISC_VLAN: 2640 ice_remove_promisc(hw, lkup, &remove_list_head); 2641 break; 2642 case ICE_SW_LKUP_MAC_VLAN: 2643 case ICE_SW_LKUP_ETHERTYPE: 2644 case ICE_SW_LKUP_ETHERTYPE_MAC: 2645 case ICE_SW_LKUP_DFLT: 2646 case ICE_SW_LKUP_LAST: 2647 default: 2648 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 2649 break; 2650 } 2651 2652 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 2653 list_del(&fm_entry->list_entry); 2654 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2655 } 2656 } 2657 2658 /** 2659 * ice_remove_vsi_fltr - Remove all filters for a VSI 2660 * @hw: pointer to the hardware structure 2661 * @vsi_handle: VSI handle to remove filters from 2662 */ 2663 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 2664 { 2665 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 2666 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 2667 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 2668 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 2669 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 2670 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 2671 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 2672 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 2673 } 2674 2675 /** 2676 * ice_alloc_res_cntr - allocating resource counter 2677 * @hw: pointer to the hardware structure 2678 * @type: type of resource 2679 * @alloc_shared: if set it is shared else dedicated 2680 * @num_items: number of entries requested for FD resource type 2681 * @counter_id: counter index returned by AQ call 2682 */ 2683 enum ice_status 2684 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 2685 u16 *counter_id) 2686 { 2687 struct ice_aqc_alloc_free_res_elem *buf; 2688 enum ice_status status; 2689 u16 buf_len; 2690 2691 /* Allocate resource */ 2692 buf_len = struct_size(buf, elem, 1); 2693 buf = kzalloc(buf_len, GFP_KERNEL); 2694 if (!buf) 2695 return ICE_ERR_NO_MEMORY; 2696 2697 buf->num_elems = cpu_to_le16(num_items); 2698 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 2699 ICE_AQC_RES_TYPE_M) | alloc_shared); 2700 2701 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2702 ice_aqc_opc_alloc_res, NULL); 2703 if (status) 2704 goto exit; 2705 2706 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 2707 2708 exit: 2709 kfree(buf); 2710 return status; 2711 } 2712 2713 /** 2714 * ice_free_res_cntr - free resource counter 2715 * @hw: pointer to the hardware structure 2716 * @type: type of resource 2717 * @alloc_shared: if set it is shared else dedicated 2718 * @num_items: number of entries to be freed for FD resource type 2719 * @counter_id: counter ID resource which needs to be freed 2720 */ 2721 enum ice_status 2722 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 2723 u16 counter_id) 2724 { 2725 struct ice_aqc_alloc_free_res_elem *buf; 2726 enum ice_status status; 2727 u16 buf_len; 2728 2729 /* Free resource */ 2730 buf_len = struct_size(buf, elem, 1); 2731 buf = kzalloc(buf_len, GFP_KERNEL); 2732 if (!buf) 2733 return ICE_ERR_NO_MEMORY; 2734 2735 buf->num_elems = cpu_to_le16(num_items); 2736 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 2737 ICE_AQC_RES_TYPE_M) | alloc_shared); 2738 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 2739 2740 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2741 ice_aqc_opc_free_res, NULL); 2742 if (status) 2743 ice_debug(hw, ICE_DBG_SW, 2744 "counter resource could not be freed\n"); 2745 2746 kfree(buf); 2747 return status; 2748 } 2749 2750 /** 2751 * ice_replay_vsi_fltr - Replay filters for requested VSI 2752 * @hw: pointer to the hardware structure 2753 * @vsi_handle: driver VSI handle 2754 * @recp_id: Recipe ID for which rules need to be replayed 2755 * @list_head: list for which filters need to be replayed 2756 * 2757 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 2758 * It is required to pass valid VSI handle. 2759 */ 2760 static enum ice_status 2761 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 2762 struct list_head *list_head) 2763 { 2764 struct ice_fltr_mgmt_list_entry *itr; 2765 enum ice_status status = 0; 2766 u16 hw_vsi_id; 2767 2768 if (list_empty(list_head)) 2769 return status; 2770 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2771 2772 list_for_each_entry(itr, list_head, list_entry) { 2773 struct ice_fltr_list_entry f_entry; 2774 2775 f_entry.fltr_info = itr->fltr_info; 2776 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 2777 itr->fltr_info.vsi_handle == vsi_handle) { 2778 /* update the src in case it is VSI num */ 2779 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 2780 f_entry.fltr_info.src = hw_vsi_id; 2781 status = ice_add_rule_internal(hw, recp_id, &f_entry); 2782 if (status) 2783 goto end; 2784 continue; 2785 } 2786 if (!itr->vsi_list_info || 2787 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 2788 continue; 2789 /* Clearing it so that the logic can add it back */ 2790 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 2791 f_entry.fltr_info.vsi_handle = vsi_handle; 2792 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 2793 /* update the src in case it is VSI num */ 2794 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 2795 f_entry.fltr_info.src = hw_vsi_id; 2796 if (recp_id == ICE_SW_LKUP_VLAN) 2797 status = ice_add_vlan_internal(hw, &f_entry); 2798 else 2799 status = ice_add_rule_internal(hw, recp_id, &f_entry); 2800 if (status) 2801 goto end; 2802 } 2803 end: 2804 return status; 2805 } 2806 2807 /** 2808 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 2809 * @hw: pointer to the hardware structure 2810 * @vsi_handle: driver VSI handle 2811 * 2812 * Replays filters for requested VSI via vsi_handle. 2813 */ 2814 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 2815 { 2816 struct ice_switch_info *sw = hw->switch_info; 2817 enum ice_status status = 0; 2818 u8 i; 2819 2820 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 2821 struct list_head *head; 2822 2823 head = &sw->recp_list[i].filt_replay_rules; 2824 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 2825 if (status) 2826 return status; 2827 } 2828 return status; 2829 } 2830 2831 /** 2832 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 2833 * @hw: pointer to the HW struct 2834 * 2835 * Deletes the filter replay rules. 2836 */ 2837 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 2838 { 2839 struct ice_switch_info *sw = hw->switch_info; 2840 u8 i; 2841 2842 if (!sw) 2843 return; 2844 2845 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 2846 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 2847 struct list_head *l_head; 2848 2849 l_head = &sw->recp_list[i].filt_replay_rules; 2850 ice_rem_sw_rule_info(hw, l_head); 2851 } 2852 } 2853 } 2854