1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_switch.h" 5 6 #define ICE_ETH_DA_OFFSET 0 7 #define ICE_ETH_ETHTYPE_OFFSET 12 8 #define ICE_ETH_VLAN_TCI_OFFSET 14 9 #define ICE_MAX_VLAN_ID 0xFFF 10 11 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 12 * struct to configure any switch filter rules. 13 * {DA (6 bytes), SA(6 bytes), 14 * Ether type (2 bytes for header without VLAN tag) OR 15 * VLAN tag (4 bytes for header with VLAN tag) } 16 * 17 * Word on Hardcoded values 18 * byte 0 = 0x2: to identify it as locally administered DA MAC 19 * byte 6 = 0x2: to identify it as locally administered SA MAC 20 * byte 12 = 0x81 & byte 13 = 0x00: 21 * In case of VLAN filter first two bytes defines ether type (0x8100) 22 * and remaining two bytes are placeholder for programming a given VLAN ID 23 * In case of Ether type filter it is treated as header without VLAN tag 24 * and byte 12 and 13 is used to program a given Ether type instead 25 */ 26 #define DUMMY_ETH_HDR_LEN 16 27 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 28 0x2, 0, 0, 0, 0, 0, 29 0x81, 0, 0, 0}; 30 31 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 32 (sizeof(struct ice_aqc_sw_rules_elem) - \ 33 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 34 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1) 35 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 36 (sizeof(struct ice_aqc_sw_rules_elem) - \ 37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 38 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1) 39 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 40 (sizeof(struct ice_aqc_sw_rules_elem) - \ 41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 42 sizeof(struct ice_sw_rule_lg_act) - \ 43 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \ 44 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act))) 45 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 46 (sizeof(struct ice_aqc_sw_rules_elem) - \ 47 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ 48 sizeof(struct ice_sw_rule_vsi_list) - \ 49 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \ 50 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi))) 51 52 /** 53 * ice_aq_alloc_free_res - command to allocate/free resources 54 * @hw: pointer to the HW struct 55 * @num_entries: number of resource entries in buffer 56 * @buf: Indirect buffer to hold data parameters and response 57 * @buf_size: size of buffer for indirect commands 58 * @opc: pass in the command opcode 59 * @cd: pointer to command details structure or NULL 60 * 61 * Helper function to allocate/free resources using the admin queue commands 62 */ 63 static enum ice_status 64 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 65 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 66 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 67 { 68 struct ice_aqc_alloc_free_res_cmd *cmd; 69 struct ice_aq_desc desc; 70 71 cmd = &desc.params.sw_res_ctrl; 72 73 if (!buf) 74 return ICE_ERR_PARAM; 75 76 if (buf_size < (num_entries * sizeof(buf->elem[0]))) 77 return ICE_ERR_PARAM; 78 79 ice_fill_dflt_direct_cmd_desc(&desc, opc); 80 81 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 82 83 cmd->num_entries = cpu_to_le16(num_entries); 84 85 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 86 } 87 88 /** 89 * ice_init_def_sw_recp - initialize the recipe book keeping tables 90 * @hw: pointer to the HW struct 91 * 92 * Allocate memory for the entire recipe table and initialize the structures/ 93 * entries corresponding to basic recipes. 94 */ 95 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) 96 { 97 struct ice_sw_recipe *recps; 98 u8 i; 99 100 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 101 sizeof(*recps), GFP_KERNEL); 102 if (!recps) 103 return ICE_ERR_NO_MEMORY; 104 105 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 106 recps[i].root_rid = i; 107 INIT_LIST_HEAD(&recps[i].filt_rules); 108 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 109 mutex_init(&recps[i].filt_rule_lock); 110 } 111 112 hw->switch_info->recp_list = recps; 113 114 return 0; 115 } 116 117 /** 118 * ice_aq_get_sw_cfg - get switch configuration 119 * @hw: pointer to the hardware structure 120 * @buf: pointer to the result buffer 121 * @buf_size: length of the buffer available for response 122 * @req_desc: pointer to requested descriptor 123 * @num_elems: pointer to number of elements 124 * @cd: pointer to command details structure or NULL 125 * 126 * Get switch configuration (0x0200) to be placed in 'buff'. 127 * This admin command returns information such as initial VSI/port number 128 * and switch ID it belongs to. 129 * 130 * NOTE: *req_desc is both an input/output parameter. 131 * The caller of this function first calls this function with *request_desc set 132 * to 0. If the response from f/w has *req_desc set to 0, all the switch 133 * configuration information has been returned; if non-zero (meaning not all 134 * the information was returned), the caller should call this function again 135 * with *req_desc set to the previous value returned by f/w to get the 136 * next block of switch configuration information. 137 * 138 * *num_elems is output only parameter. This reflects the number of elements 139 * in response buffer. The caller of this function to use *num_elems while 140 * parsing the response buffer. 141 */ 142 static enum ice_status 143 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, 144 u16 buf_size, u16 *req_desc, u16 *num_elems, 145 struct ice_sq_cd *cd) 146 { 147 struct ice_aqc_get_sw_cfg *cmd; 148 enum ice_status status; 149 struct ice_aq_desc desc; 150 151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 152 cmd = &desc.params.get_sw_conf; 153 cmd->element = cpu_to_le16(*req_desc); 154 155 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 156 if (!status) { 157 *req_desc = le16_to_cpu(cmd->element); 158 *num_elems = le16_to_cpu(cmd->num_elems); 159 } 160 161 return status; 162 } 163 164 /** 165 * ice_aq_add_vsi 166 * @hw: pointer to the HW struct 167 * @vsi_ctx: pointer to a VSI context struct 168 * @cd: pointer to command details structure or NULL 169 * 170 * Add a VSI context to the hardware (0x0210) 171 */ 172 static enum ice_status 173 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 174 struct ice_sq_cd *cd) 175 { 176 struct ice_aqc_add_update_free_vsi_resp *res; 177 struct ice_aqc_add_get_update_free_vsi *cmd; 178 struct ice_aq_desc desc; 179 enum ice_status status; 180 181 cmd = &desc.params.vsi_cmd; 182 res = &desc.params.add_update_free_vsi_res; 183 184 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 185 186 if (!vsi_ctx->alloc_from_pool) 187 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 188 ICE_AQ_VSI_IS_VALID); 189 cmd->vf_id = vsi_ctx->vf_num; 190 191 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 192 193 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 194 195 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 196 sizeof(vsi_ctx->info), cd); 197 198 if (!status) { 199 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 200 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 201 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 202 } 203 204 return status; 205 } 206 207 /** 208 * ice_aq_free_vsi 209 * @hw: pointer to the HW struct 210 * @vsi_ctx: pointer to a VSI context struct 211 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 212 * @cd: pointer to command details structure or NULL 213 * 214 * Free VSI context info from hardware (0x0213) 215 */ 216 static enum ice_status 217 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 218 bool keep_vsi_alloc, struct ice_sq_cd *cd) 219 { 220 struct ice_aqc_add_update_free_vsi_resp *resp; 221 struct ice_aqc_add_get_update_free_vsi *cmd; 222 struct ice_aq_desc desc; 223 enum ice_status status; 224 225 cmd = &desc.params.vsi_cmd; 226 resp = &desc.params.add_update_free_vsi_res; 227 228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 229 230 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 231 if (keep_vsi_alloc) 232 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 233 234 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 235 if (!status) { 236 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 237 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 238 } 239 240 return status; 241 } 242 243 /** 244 * ice_aq_update_vsi 245 * @hw: pointer to the HW struct 246 * @vsi_ctx: pointer to a VSI context struct 247 * @cd: pointer to command details structure or NULL 248 * 249 * Update VSI context in the hardware (0x0211) 250 */ 251 static enum ice_status 252 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 253 struct ice_sq_cd *cd) 254 { 255 struct ice_aqc_add_update_free_vsi_resp *resp; 256 struct ice_aqc_add_get_update_free_vsi *cmd; 257 struct ice_aq_desc desc; 258 enum ice_status status; 259 260 cmd = &desc.params.vsi_cmd; 261 resp = &desc.params.add_update_free_vsi_res; 262 263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 264 265 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 266 267 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 268 269 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 270 sizeof(vsi_ctx->info), cd); 271 272 if (!status) { 273 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 274 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 275 } 276 277 return status; 278 } 279 280 /** 281 * ice_is_vsi_valid - check whether the VSI is valid or not 282 * @hw: pointer to the HW struct 283 * @vsi_handle: VSI handle 284 * 285 * check whether the VSI is valid or not 286 */ 287 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 288 { 289 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 290 } 291 292 /** 293 * ice_get_hw_vsi_num - return the HW VSI number 294 * @hw: pointer to the HW struct 295 * @vsi_handle: VSI handle 296 * 297 * return the HW VSI number 298 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 299 */ 300 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 301 { 302 return hw->vsi_ctx[vsi_handle]->vsi_num; 303 } 304 305 /** 306 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 307 * @hw: pointer to the HW struct 308 * @vsi_handle: VSI handle 309 * 310 * return the VSI context entry for a given VSI handle 311 */ 312 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 313 { 314 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 315 } 316 317 /** 318 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 319 * @hw: pointer to the HW struct 320 * @vsi_handle: VSI handle 321 * @vsi: VSI context pointer 322 * 323 * save the VSI context entry for a given VSI handle 324 */ 325 static void 326 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 327 { 328 hw->vsi_ctx[vsi_handle] = vsi; 329 } 330 331 /** 332 * ice_clear_vsi_ctx - clear the VSI context entry 333 * @hw: pointer to the HW struct 334 * @vsi_handle: VSI handle 335 * 336 * clear the VSI context entry 337 */ 338 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 339 { 340 struct ice_vsi_ctx *vsi; 341 342 vsi = ice_get_vsi_ctx(hw, vsi_handle); 343 if (vsi) { 344 devm_kfree(ice_hw_to_dev(hw), vsi); 345 hw->vsi_ctx[vsi_handle] = NULL; 346 } 347 } 348 349 /** 350 * ice_clear_all_vsi_ctx - clear all the VSI context entries 351 * @hw: pointer to the HW struct 352 */ 353 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 354 { 355 u16 i; 356 357 for (i = 0; i < ICE_MAX_VSI; i++) 358 ice_clear_vsi_ctx(hw, i); 359 } 360 361 /** 362 * ice_add_vsi - add VSI context to the hardware and VSI handle list 363 * @hw: pointer to the HW struct 364 * @vsi_handle: unique VSI handle provided by drivers 365 * @vsi_ctx: pointer to a VSI context struct 366 * @cd: pointer to command details structure or NULL 367 * 368 * Add a VSI context to the hardware also add it into the VSI handle list. 369 * If this function gets called after reset for existing VSIs then update 370 * with the new HW VSI number in the corresponding VSI handle list entry. 371 */ 372 enum ice_status 373 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 374 struct ice_sq_cd *cd) 375 { 376 struct ice_vsi_ctx *tmp_vsi_ctx; 377 enum ice_status status; 378 379 if (vsi_handle >= ICE_MAX_VSI) 380 return ICE_ERR_PARAM; 381 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 382 if (status) 383 return status; 384 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 385 if (!tmp_vsi_ctx) { 386 /* Create a new VSI context */ 387 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 388 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 389 if (!tmp_vsi_ctx) { 390 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 391 return ICE_ERR_NO_MEMORY; 392 } 393 *tmp_vsi_ctx = *vsi_ctx; 394 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 395 } else { 396 /* update with new HW VSI num */ 397 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) 398 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 399 } 400 401 return 0; 402 } 403 404 /** 405 * ice_free_vsi- free VSI context from hardware and VSI handle list 406 * @hw: pointer to the HW struct 407 * @vsi_handle: unique VSI handle 408 * @vsi_ctx: pointer to a VSI context struct 409 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 410 * @cd: pointer to command details structure or NULL 411 * 412 * Free VSI context info from hardware as well as from VSI handle list 413 */ 414 enum ice_status 415 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 416 bool keep_vsi_alloc, struct ice_sq_cd *cd) 417 { 418 enum ice_status status; 419 420 if (!ice_is_vsi_valid(hw, vsi_handle)) 421 return ICE_ERR_PARAM; 422 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 423 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 424 if (!status) 425 ice_clear_vsi_ctx(hw, vsi_handle); 426 return status; 427 } 428 429 /** 430 * ice_update_vsi 431 * @hw: pointer to the HW struct 432 * @vsi_handle: unique VSI handle 433 * @vsi_ctx: pointer to a VSI context struct 434 * @cd: pointer to command details structure or NULL 435 * 436 * Update VSI context in the hardware 437 */ 438 enum ice_status 439 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 440 struct ice_sq_cd *cd) 441 { 442 if (!ice_is_vsi_valid(hw, vsi_handle)) 443 return ICE_ERR_PARAM; 444 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 445 return ice_aq_update_vsi(hw, vsi_ctx, cd); 446 } 447 448 /** 449 * ice_aq_alloc_free_vsi_list 450 * @hw: pointer to the HW struct 451 * @vsi_list_id: VSI list ID returned or used for lookup 452 * @lkup_type: switch rule filter lookup type 453 * @opc: switch rules population command type - pass in the command opcode 454 * 455 * allocates or free a VSI list resource 456 */ 457 static enum ice_status 458 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 459 enum ice_sw_lkup_type lkup_type, 460 enum ice_adminq_opc opc) 461 { 462 struct ice_aqc_alloc_free_res_elem *sw_buf; 463 struct ice_aqc_res_elem *vsi_ele; 464 enum ice_status status; 465 u16 buf_len; 466 467 buf_len = sizeof(*sw_buf); 468 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 469 if (!sw_buf) 470 return ICE_ERR_NO_MEMORY; 471 sw_buf->num_elems = cpu_to_le16(1); 472 473 if (lkup_type == ICE_SW_LKUP_MAC || 474 lkup_type == ICE_SW_LKUP_MAC_VLAN || 475 lkup_type == ICE_SW_LKUP_ETHERTYPE || 476 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 477 lkup_type == ICE_SW_LKUP_PROMISC || 478 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 479 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 480 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 481 sw_buf->res_type = 482 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 483 } else { 484 status = ICE_ERR_PARAM; 485 goto ice_aq_alloc_free_vsi_list_exit; 486 } 487 488 if (opc == ice_aqc_opc_free_res) 489 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 490 491 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 492 if (status) 493 goto ice_aq_alloc_free_vsi_list_exit; 494 495 if (opc == ice_aqc_opc_alloc_res) { 496 vsi_ele = &sw_buf->elem[0]; 497 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 498 } 499 500 ice_aq_alloc_free_vsi_list_exit: 501 devm_kfree(ice_hw_to_dev(hw), sw_buf); 502 return status; 503 } 504 505 /** 506 * ice_aq_sw_rules - add/update/remove switch rules 507 * @hw: pointer to the HW struct 508 * @rule_list: pointer to switch rule population list 509 * @rule_list_sz: total size of the rule list in bytes 510 * @num_rules: number of switch rules in the rule_list 511 * @opc: switch rules population command type - pass in the command opcode 512 * @cd: pointer to command details structure or NULL 513 * 514 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 515 */ 516 static enum ice_status 517 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 518 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 519 { 520 struct ice_aq_desc desc; 521 522 if (opc != ice_aqc_opc_add_sw_rules && 523 opc != ice_aqc_opc_update_sw_rules && 524 opc != ice_aqc_opc_remove_sw_rules) 525 return ICE_ERR_PARAM; 526 527 ice_fill_dflt_direct_cmd_desc(&desc, opc); 528 529 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 530 desc.params.sw_rules.num_rules_fltr_entry_index = 531 cpu_to_le16(num_rules); 532 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 533 } 534 535 /* ice_init_port_info - Initialize port_info with switch configuration data 536 * @pi: pointer to port_info 537 * @vsi_port_num: VSI number or port number 538 * @type: Type of switch element (port or VSI) 539 * @swid: switch ID of the switch the element is attached to 540 * @pf_vf_num: PF or VF number 541 * @is_vf: true if the element is a VF, false otherwise 542 */ 543 static void 544 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 545 u16 swid, u16 pf_vf_num, bool is_vf) 546 { 547 switch (type) { 548 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 549 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 550 pi->sw_id = swid; 551 pi->pf_vf_num = pf_vf_num; 552 pi->is_vf = is_vf; 553 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 554 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 555 break; 556 default: 557 ice_debug(pi->hw, ICE_DBG_SW, 558 "incorrect VSI/port type received\n"); 559 break; 560 } 561 } 562 563 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 564 * @hw: pointer to the hardware structure 565 */ 566 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) 567 { 568 struct ice_aqc_get_sw_cfg_resp *rbuf; 569 enum ice_status status; 570 u16 req_desc = 0; 571 u16 num_elems; 572 u16 i; 573 574 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 575 GFP_KERNEL); 576 577 if (!rbuf) 578 return ICE_ERR_NO_MEMORY; 579 580 /* Multiple calls to ice_aq_get_sw_cfg may be required 581 * to get all the switch configuration information. The need 582 * for additional calls is indicated by ice_aq_get_sw_cfg 583 * writing a non-zero value in req_desc 584 */ 585 do { 586 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 587 &req_desc, &num_elems, NULL); 588 589 if (status) 590 break; 591 592 for (i = 0; i < num_elems; i++) { 593 struct ice_aqc_get_sw_cfg_resp_elem *ele; 594 u16 pf_vf_num, swid, vsi_port_num; 595 bool is_vf = false; 596 u8 type; 597 598 ele = rbuf[i].elements; 599 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 600 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 601 602 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 603 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 604 605 swid = le16_to_cpu(ele->swid); 606 607 if (le16_to_cpu(ele->pf_vf_num) & 608 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 609 is_vf = true; 610 611 type = le16_to_cpu(ele->vsi_port_num) >> 612 ICE_AQC_GET_SW_CONF_RESP_TYPE_S; 613 614 if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 615 /* FW VSI is not needed. Just continue. */ 616 continue; 617 } 618 619 ice_init_port_info(hw->port_info, vsi_port_num, 620 type, swid, pf_vf_num, is_vf); 621 } 622 } while (req_desc && !status); 623 624 devm_kfree(ice_hw_to_dev(hw), (void *)rbuf); 625 return status; 626 } 627 628 /** 629 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 630 * @hw: pointer to the hardware structure 631 * @fi: filter info structure to fill/update 632 * 633 * This helper function populates the lb_en and lan_en elements of the provided 634 * ice_fltr_info struct using the switch's type and characteristics of the 635 * switch rule being configured. 636 */ 637 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 638 { 639 fi->lb_en = false; 640 fi->lan_en = false; 641 if ((fi->flag & ICE_FLTR_TX) && 642 (fi->fltr_act == ICE_FWD_TO_VSI || 643 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 644 fi->fltr_act == ICE_FWD_TO_Q || 645 fi->fltr_act == ICE_FWD_TO_QGRP)) { 646 /* Setting LB for prune actions will result in replicated 647 * packets to the internal switch that will be dropped. 648 */ 649 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 650 fi->lb_en = true; 651 652 /* Set lan_en to TRUE if 653 * 1. The switch is a VEB AND 654 * 2 655 * 2.1 The lookup is a directional lookup like ethertype, 656 * promiscuous, ethertype-MAC, promiscuous-VLAN 657 * and default-port OR 658 * 2.2 The lookup is VLAN, OR 659 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 660 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 661 * 662 * OR 663 * 664 * The switch is a VEPA. 665 * 666 * In all other cases, the LAN enable has to be set to false. 667 */ 668 if (hw->evb_veb) { 669 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 670 fi->lkup_type == ICE_SW_LKUP_PROMISC || 671 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 672 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 673 fi->lkup_type == ICE_SW_LKUP_DFLT || 674 fi->lkup_type == ICE_SW_LKUP_VLAN || 675 (fi->lkup_type == ICE_SW_LKUP_MAC && 676 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 677 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 678 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 679 fi->lan_en = true; 680 } else { 681 fi->lan_en = true; 682 } 683 } 684 } 685 686 /** 687 * ice_fill_sw_rule - Helper function to fill switch rule structure 688 * @hw: pointer to the hardware structure 689 * @f_info: entry containing packet forwarding information 690 * @s_rule: switch rule structure to be filled in based on mac_entry 691 * @opc: switch rules population command type - pass in the command opcode 692 */ 693 static void 694 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 695 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 696 { 697 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 698 void *daddr = NULL; 699 u16 eth_hdr_sz; 700 u8 *eth_hdr; 701 u32 act = 0; 702 __be16 *off; 703 u8 q_rgn; 704 705 if (opc == ice_aqc_opc_remove_sw_rules) { 706 s_rule->pdata.lkup_tx_rx.act = 0; 707 s_rule->pdata.lkup_tx_rx.index = 708 cpu_to_le16(f_info->fltr_rule_id); 709 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 710 return; 711 } 712 713 eth_hdr_sz = sizeof(dummy_eth_header); 714 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 715 716 /* initialize the ether header with a dummy header */ 717 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 718 ice_fill_sw_info(hw, f_info); 719 720 switch (f_info->fltr_act) { 721 case ICE_FWD_TO_VSI: 722 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 723 ICE_SINGLE_ACT_VSI_ID_M; 724 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 725 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 726 ICE_SINGLE_ACT_VALID_BIT; 727 break; 728 case ICE_FWD_TO_VSI_LIST: 729 act |= ICE_SINGLE_ACT_VSI_LIST; 730 act |= (f_info->fwd_id.vsi_list_id << 731 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 732 ICE_SINGLE_ACT_VSI_LIST_ID_M; 733 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 734 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 735 ICE_SINGLE_ACT_VALID_BIT; 736 break; 737 case ICE_FWD_TO_Q: 738 act |= ICE_SINGLE_ACT_TO_Q; 739 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 740 ICE_SINGLE_ACT_Q_INDEX_M; 741 break; 742 case ICE_DROP_PACKET: 743 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 744 ICE_SINGLE_ACT_VALID_BIT; 745 break; 746 case ICE_FWD_TO_QGRP: 747 q_rgn = f_info->qgrp_size > 0 ? 748 (u8)ilog2(f_info->qgrp_size) : 0; 749 act |= ICE_SINGLE_ACT_TO_Q; 750 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 751 ICE_SINGLE_ACT_Q_INDEX_M; 752 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 753 ICE_SINGLE_ACT_Q_REGION_M; 754 break; 755 default: 756 return; 757 } 758 759 if (f_info->lb_en) 760 act |= ICE_SINGLE_ACT_LB_ENABLE; 761 if (f_info->lan_en) 762 act |= ICE_SINGLE_ACT_LAN_ENABLE; 763 764 switch (f_info->lkup_type) { 765 case ICE_SW_LKUP_MAC: 766 daddr = f_info->l_data.mac.mac_addr; 767 break; 768 case ICE_SW_LKUP_VLAN: 769 vlan_id = f_info->l_data.vlan.vlan_id; 770 if (f_info->fltr_act == ICE_FWD_TO_VSI || 771 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 772 act |= ICE_SINGLE_ACT_PRUNE; 773 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 774 } 775 break; 776 case ICE_SW_LKUP_ETHERTYPE_MAC: 777 daddr = f_info->l_data.ethertype_mac.mac_addr; 778 /* fall-through */ 779 case ICE_SW_LKUP_ETHERTYPE: 780 off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 781 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 782 break; 783 case ICE_SW_LKUP_MAC_VLAN: 784 daddr = f_info->l_data.mac_vlan.mac_addr; 785 vlan_id = f_info->l_data.mac_vlan.vlan_id; 786 break; 787 case ICE_SW_LKUP_PROMISC_VLAN: 788 vlan_id = f_info->l_data.mac_vlan.vlan_id; 789 /* fall-through */ 790 case ICE_SW_LKUP_PROMISC: 791 daddr = f_info->l_data.mac_vlan.mac_addr; 792 break; 793 default: 794 break; 795 } 796 797 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 798 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 799 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 800 801 /* Recipe set depending on lookup type */ 802 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 803 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 804 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 805 806 if (daddr) 807 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 808 809 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 810 off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 811 *off = cpu_to_be16(vlan_id); 812 } 813 814 /* Create the switch rule with the final dummy Ethernet header */ 815 if (opc != ice_aqc_opc_update_sw_rules) 816 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 817 } 818 819 /** 820 * ice_add_marker_act 821 * @hw: pointer to the hardware structure 822 * @m_ent: the management entry for which sw marker needs to be added 823 * @sw_marker: sw marker to tag the Rx descriptor with 824 * @l_id: large action resource ID 825 * 826 * Create a large action to hold software marker and update the switch rule 827 * entry pointed by m_ent with newly created large action 828 */ 829 static enum ice_status 830 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 831 u16 sw_marker, u16 l_id) 832 { 833 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 834 /* For software marker we need 3 large actions 835 * 1. FWD action: FWD TO VSI or VSI LIST 836 * 2. GENERIC VALUE action to hold the profile ID 837 * 3. GENERIC VALUE action to hold the software marker ID 838 */ 839 const u16 num_lg_acts = 3; 840 enum ice_status status; 841 u16 lg_act_size; 842 u16 rules_size; 843 u32 act; 844 u16 id; 845 846 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 847 return ICE_ERR_PARAM; 848 849 /* Create two back-to-back switch rules and submit them to the HW using 850 * one memory buffer: 851 * 1. Large Action 852 * 2. Look up Tx Rx 853 */ 854 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 855 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 856 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 857 if (!lg_act) 858 return ICE_ERR_NO_MEMORY; 859 860 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 861 862 /* Fill in the first switch rule i.e. large action */ 863 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 864 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 865 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 866 867 /* First action VSI forwarding or VSI list forwarding depending on how 868 * many VSIs 869 */ 870 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 871 m_ent->fltr_info.fwd_id.hw_vsi_id; 872 873 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 874 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & 875 ICE_LG_ACT_VSI_LIST_ID_M; 876 if (m_ent->vsi_count > 1) 877 act |= ICE_LG_ACT_VSI_LIST; 878 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 879 880 /* Second action descriptor type */ 881 act = ICE_LG_ACT_GENERIC; 882 883 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 884 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 885 886 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 887 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 888 889 /* Third action Marker value */ 890 act |= ICE_LG_ACT_GENERIC; 891 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 892 ICE_LG_ACT_GENERIC_VALUE_M; 893 894 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 895 896 /* call the fill switch rule to fill the lookup Tx Rx structure */ 897 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 898 ice_aqc_opc_update_sw_rules); 899 900 /* Update the action to point to the large action ID */ 901 rx_tx->pdata.lkup_tx_rx.act = 902 cpu_to_le32(ICE_SINGLE_ACT_PTR | 903 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 904 ICE_SINGLE_ACT_PTR_VAL_M)); 905 906 /* Use the filter rule ID of the previously created rule with single 907 * act. Once the update happens, hardware will treat this as large 908 * action 909 */ 910 rx_tx->pdata.lkup_tx_rx.index = 911 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 912 913 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 914 ice_aqc_opc_update_sw_rules, NULL); 915 if (!status) { 916 m_ent->lg_act_idx = l_id; 917 m_ent->sw_marker_id = sw_marker; 918 } 919 920 devm_kfree(ice_hw_to_dev(hw), lg_act); 921 return status; 922 } 923 924 /** 925 * ice_create_vsi_list_map 926 * @hw: pointer to the hardware structure 927 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 928 * @num_vsi: number of VSI handles in the array 929 * @vsi_list_id: VSI list ID generated as part of allocate resource 930 * 931 * Helper function to create a new entry of VSI list ID to VSI mapping 932 * using the given VSI list ID 933 */ 934 static struct ice_vsi_list_map_info * 935 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 936 u16 vsi_list_id) 937 { 938 struct ice_switch_info *sw = hw->switch_info; 939 struct ice_vsi_list_map_info *v_map; 940 int i; 941 942 v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL); 943 if (!v_map) 944 return NULL; 945 946 v_map->vsi_list_id = vsi_list_id; 947 v_map->ref_cnt = 1; 948 for (i = 0; i < num_vsi; i++) 949 set_bit(vsi_handle_arr[i], v_map->vsi_map); 950 951 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 952 return v_map; 953 } 954 955 /** 956 * ice_update_vsi_list_rule 957 * @hw: pointer to the hardware structure 958 * @vsi_handle_arr: array of VSI handles to form a VSI list 959 * @num_vsi: number of VSI handles in the array 960 * @vsi_list_id: VSI list ID generated as part of allocate resource 961 * @remove: Boolean value to indicate if this is a remove action 962 * @opc: switch rules population command type - pass in the command opcode 963 * @lkup_type: lookup type of the filter 964 * 965 * Call AQ command to add a new switch rule or update existing switch rule 966 * using the given VSI list ID 967 */ 968 static enum ice_status 969 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 970 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 971 enum ice_sw_lkup_type lkup_type) 972 { 973 struct ice_aqc_sw_rules_elem *s_rule; 974 enum ice_status status; 975 u16 s_rule_size; 976 u16 type; 977 int i; 978 979 if (!num_vsi) 980 return ICE_ERR_PARAM; 981 982 if (lkup_type == ICE_SW_LKUP_MAC || 983 lkup_type == ICE_SW_LKUP_MAC_VLAN || 984 lkup_type == ICE_SW_LKUP_ETHERTYPE || 985 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 986 lkup_type == ICE_SW_LKUP_PROMISC || 987 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 988 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 989 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 990 else if (lkup_type == ICE_SW_LKUP_VLAN) 991 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 992 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 993 else 994 return ICE_ERR_PARAM; 995 996 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 997 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 998 if (!s_rule) 999 return ICE_ERR_NO_MEMORY; 1000 for (i = 0; i < num_vsi; i++) { 1001 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 1002 status = ICE_ERR_PARAM; 1003 goto exit; 1004 } 1005 /* AQ call requires hw_vsi_id(s) */ 1006 s_rule->pdata.vsi_list.vsi[i] = 1007 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 1008 } 1009 1010 s_rule->type = cpu_to_le16(type); 1011 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 1012 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1013 1014 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 1015 1016 exit: 1017 devm_kfree(ice_hw_to_dev(hw), s_rule); 1018 return status; 1019 } 1020 1021 /** 1022 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1023 * @hw: pointer to the HW struct 1024 * @vsi_handle_arr: array of VSI handles to form a VSI list 1025 * @num_vsi: number of VSI handles in the array 1026 * @vsi_list_id: stores the ID of the VSI list to be created 1027 * @lkup_type: switch rule filter's lookup type 1028 */ 1029 static enum ice_status 1030 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1031 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1032 { 1033 enum ice_status status; 1034 1035 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1036 ice_aqc_opc_alloc_res); 1037 if (status) 1038 return status; 1039 1040 /* Update the newly created VSI list to include the specified VSIs */ 1041 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1042 *vsi_list_id, false, 1043 ice_aqc_opc_add_sw_rules, lkup_type); 1044 } 1045 1046 /** 1047 * ice_create_pkt_fwd_rule 1048 * @hw: pointer to the hardware structure 1049 * @f_entry: entry containing packet forwarding information 1050 * 1051 * Create switch rule with given filter information and add an entry 1052 * to the corresponding filter management list to track this switch rule 1053 * and VSI mapping 1054 */ 1055 static enum ice_status 1056 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1057 struct ice_fltr_list_entry *f_entry) 1058 { 1059 struct ice_fltr_mgmt_list_entry *fm_entry; 1060 struct ice_aqc_sw_rules_elem *s_rule; 1061 enum ice_sw_lkup_type l_type; 1062 struct ice_sw_recipe *recp; 1063 enum ice_status status; 1064 1065 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1066 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1067 if (!s_rule) 1068 return ICE_ERR_NO_MEMORY; 1069 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1070 GFP_KERNEL); 1071 if (!fm_entry) { 1072 status = ICE_ERR_NO_MEMORY; 1073 goto ice_create_pkt_fwd_rule_exit; 1074 } 1075 1076 fm_entry->fltr_info = f_entry->fltr_info; 1077 1078 /* Initialize all the fields for the management entry */ 1079 fm_entry->vsi_count = 1; 1080 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1081 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1082 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1083 1084 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1085 ice_aqc_opc_add_sw_rules); 1086 1087 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1088 ice_aqc_opc_add_sw_rules, NULL); 1089 if (status) { 1090 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1091 goto ice_create_pkt_fwd_rule_exit; 1092 } 1093 1094 f_entry->fltr_info.fltr_rule_id = 1095 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1096 fm_entry->fltr_info.fltr_rule_id = 1097 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1098 1099 /* The book keeping entries will get removed when base driver 1100 * calls remove filter AQ command 1101 */ 1102 l_type = fm_entry->fltr_info.lkup_type; 1103 recp = &hw->switch_info->recp_list[l_type]; 1104 list_add(&fm_entry->list_entry, &recp->filt_rules); 1105 1106 ice_create_pkt_fwd_rule_exit: 1107 devm_kfree(ice_hw_to_dev(hw), s_rule); 1108 return status; 1109 } 1110 1111 /** 1112 * ice_update_pkt_fwd_rule 1113 * @hw: pointer to the hardware structure 1114 * @f_info: filter information for switch rule 1115 * 1116 * Call AQ command to update a previously created switch rule with a 1117 * VSI list ID 1118 */ 1119 static enum ice_status 1120 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 1121 { 1122 struct ice_aqc_sw_rules_elem *s_rule; 1123 enum ice_status status; 1124 1125 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1126 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1127 if (!s_rule) 1128 return ICE_ERR_NO_MEMORY; 1129 1130 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 1131 1132 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 1133 1134 /* Update switch rule with new rule set to forward VSI list */ 1135 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1136 ice_aqc_opc_update_sw_rules, NULL); 1137 1138 devm_kfree(ice_hw_to_dev(hw), s_rule); 1139 return status; 1140 } 1141 1142 /** 1143 * ice_update_sw_rule_bridge_mode 1144 * @hw: pointer to the HW struct 1145 * 1146 * Updates unicast switch filter rules based on VEB/VEPA mode 1147 */ 1148 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 1149 { 1150 struct ice_switch_info *sw = hw->switch_info; 1151 struct ice_fltr_mgmt_list_entry *fm_entry; 1152 enum ice_status status = 0; 1153 struct list_head *rule_head; 1154 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1155 1156 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1157 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1158 1159 mutex_lock(rule_lock); 1160 list_for_each_entry(fm_entry, rule_head, list_entry) { 1161 struct ice_fltr_info *fi = &fm_entry->fltr_info; 1162 u8 *addr = fi->l_data.mac.mac_addr; 1163 1164 /* Update unicast Tx rules to reflect the selected 1165 * VEB/VEPA mode 1166 */ 1167 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 1168 (fi->fltr_act == ICE_FWD_TO_VSI || 1169 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1170 fi->fltr_act == ICE_FWD_TO_Q || 1171 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1172 status = ice_update_pkt_fwd_rule(hw, fi); 1173 if (status) 1174 break; 1175 } 1176 } 1177 1178 mutex_unlock(rule_lock); 1179 1180 return status; 1181 } 1182 1183 /** 1184 * ice_add_update_vsi_list 1185 * @hw: pointer to the hardware structure 1186 * @m_entry: pointer to current filter management list entry 1187 * @cur_fltr: filter information from the book keeping entry 1188 * @new_fltr: filter information with the new VSI to be added 1189 * 1190 * Call AQ command to add or update previously created VSI list with new VSI. 1191 * 1192 * Helper function to do book keeping associated with adding filter information 1193 * The algorithm to do the book keeping is described below : 1194 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 1195 * if only one VSI has been added till now 1196 * Allocate a new VSI list and add two VSIs 1197 * to this list using switch rule command 1198 * Update the previously created switch rule with the 1199 * newly created VSI list ID 1200 * if a VSI list was previously created 1201 * Add the new VSI to the previously created VSI list set 1202 * using the update switch rule command 1203 */ 1204 static enum ice_status 1205 ice_add_update_vsi_list(struct ice_hw *hw, 1206 struct ice_fltr_mgmt_list_entry *m_entry, 1207 struct ice_fltr_info *cur_fltr, 1208 struct ice_fltr_info *new_fltr) 1209 { 1210 enum ice_status status = 0; 1211 u16 vsi_list_id = 0; 1212 1213 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 1214 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 1215 return ICE_ERR_NOT_IMPL; 1216 1217 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 1218 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 1219 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 1220 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 1221 return ICE_ERR_NOT_IMPL; 1222 1223 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 1224 /* Only one entry existed in the mapping and it was not already 1225 * a part of a VSI list. So, create a VSI list with the old and 1226 * new VSIs. 1227 */ 1228 struct ice_fltr_info tmp_fltr; 1229 u16 vsi_handle_arr[2]; 1230 1231 /* A rule already exists with the new VSI being added */ 1232 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 1233 return ICE_ERR_ALREADY_EXISTS; 1234 1235 vsi_handle_arr[0] = cur_fltr->vsi_handle; 1236 vsi_handle_arr[1] = new_fltr->vsi_handle; 1237 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1238 &vsi_list_id, 1239 new_fltr->lkup_type); 1240 if (status) 1241 return status; 1242 1243 tmp_fltr = *new_fltr; 1244 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 1245 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1246 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1247 /* Update the previous switch rule of "MAC forward to VSI" to 1248 * "MAC fwd to VSI list" 1249 */ 1250 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1251 if (status) 1252 return status; 1253 1254 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 1255 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1256 m_entry->vsi_list_info = 1257 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1258 vsi_list_id); 1259 1260 /* If this entry was large action then the large action needs 1261 * to be updated to point to FWD to VSI list 1262 */ 1263 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 1264 status = 1265 ice_add_marker_act(hw, m_entry, 1266 m_entry->sw_marker_id, 1267 m_entry->lg_act_idx); 1268 } else { 1269 u16 vsi_handle = new_fltr->vsi_handle; 1270 enum ice_adminq_opc opcode; 1271 1272 if (!m_entry->vsi_list_info) 1273 return ICE_ERR_CFG; 1274 1275 /* A rule already exists with the new VSI being added */ 1276 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 1277 return 0; 1278 1279 /* Update the previously created VSI list set with 1280 * the new VSI ID passed in 1281 */ 1282 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 1283 opcode = ice_aqc_opc_update_sw_rules; 1284 1285 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 1286 vsi_list_id, false, opcode, 1287 new_fltr->lkup_type); 1288 /* update VSI list mapping info with new VSI ID */ 1289 if (!status) 1290 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 1291 } 1292 if (!status) 1293 m_entry->vsi_count++; 1294 return status; 1295 } 1296 1297 /** 1298 * ice_find_rule_entry - Search a rule entry 1299 * @hw: pointer to the hardware structure 1300 * @recp_id: lookup type for which the specified rule needs to be searched 1301 * @f_info: rule information 1302 * 1303 * Helper function to search for a given rule entry 1304 * Returns pointer to entry storing the rule if found 1305 */ 1306 static struct ice_fltr_mgmt_list_entry * 1307 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 1308 { 1309 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 1310 struct ice_switch_info *sw = hw->switch_info; 1311 struct list_head *list_head; 1312 1313 list_head = &sw->recp_list[recp_id].filt_rules; 1314 list_for_each_entry(list_itr, list_head, list_entry) { 1315 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 1316 sizeof(f_info->l_data)) && 1317 f_info->flag == list_itr->fltr_info.flag) { 1318 ret = list_itr; 1319 break; 1320 } 1321 } 1322 return ret; 1323 } 1324 1325 /** 1326 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 1327 * @hw: pointer to the hardware structure 1328 * @recp_id: lookup type for which VSI lists needs to be searched 1329 * @vsi_handle: VSI handle to be found in VSI list 1330 * @vsi_list_id: VSI list ID found containing vsi_handle 1331 * 1332 * Helper function to search a VSI list with single entry containing given VSI 1333 * handle element. This can be extended further to search VSI list with more 1334 * than 1 vsi_count. Returns pointer to VSI list entry if found. 1335 */ 1336 static struct ice_vsi_list_map_info * 1337 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 1338 u16 *vsi_list_id) 1339 { 1340 struct ice_vsi_list_map_info *map_info = NULL; 1341 struct ice_switch_info *sw = hw->switch_info; 1342 struct ice_fltr_mgmt_list_entry *list_itr; 1343 struct list_head *list_head; 1344 1345 list_head = &sw->recp_list[recp_id].filt_rules; 1346 list_for_each_entry(list_itr, list_head, list_entry) { 1347 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 1348 map_info = list_itr->vsi_list_info; 1349 if (test_bit(vsi_handle, map_info->vsi_map)) { 1350 *vsi_list_id = map_info->vsi_list_id; 1351 return map_info; 1352 } 1353 } 1354 } 1355 return NULL; 1356 } 1357 1358 /** 1359 * ice_add_rule_internal - add rule for a given lookup type 1360 * @hw: pointer to the hardware structure 1361 * @recp_id: lookup type (recipe ID) for which rule has to be added 1362 * @f_entry: structure containing MAC forwarding information 1363 * 1364 * Adds or updates the rule lists for a given recipe 1365 */ 1366 static enum ice_status 1367 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 1368 struct ice_fltr_list_entry *f_entry) 1369 { 1370 struct ice_switch_info *sw = hw->switch_info; 1371 struct ice_fltr_info *new_fltr, *cur_fltr; 1372 struct ice_fltr_mgmt_list_entry *m_entry; 1373 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1374 enum ice_status status = 0; 1375 1376 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1377 return ICE_ERR_PARAM; 1378 f_entry->fltr_info.fwd_id.hw_vsi_id = 1379 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1380 1381 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 1382 1383 mutex_lock(rule_lock); 1384 new_fltr = &f_entry->fltr_info; 1385 if (new_fltr->flag & ICE_FLTR_RX) 1386 new_fltr->src = hw->port_info->lport; 1387 else if (new_fltr->flag & ICE_FLTR_TX) 1388 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 1389 1390 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 1391 if (!m_entry) { 1392 mutex_unlock(rule_lock); 1393 return ice_create_pkt_fwd_rule(hw, f_entry); 1394 } 1395 1396 cur_fltr = &m_entry->fltr_info; 1397 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 1398 mutex_unlock(rule_lock); 1399 1400 return status; 1401 } 1402 1403 /** 1404 * ice_remove_vsi_list_rule 1405 * @hw: pointer to the hardware structure 1406 * @vsi_list_id: VSI list ID generated as part of allocate resource 1407 * @lkup_type: switch rule filter lookup type 1408 * 1409 * The VSI list should be emptied before this function is called to remove the 1410 * VSI list. 1411 */ 1412 static enum ice_status 1413 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 1414 enum ice_sw_lkup_type lkup_type) 1415 { 1416 struct ice_aqc_sw_rules_elem *s_rule; 1417 enum ice_status status; 1418 u16 s_rule_size; 1419 1420 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 1421 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1422 if (!s_rule) 1423 return ICE_ERR_NO_MEMORY; 1424 1425 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 1426 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1427 1428 /* Free the vsi_list resource that we allocated. It is assumed that the 1429 * list is empty at this point. 1430 */ 1431 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 1432 ice_aqc_opc_free_res); 1433 1434 devm_kfree(ice_hw_to_dev(hw), s_rule); 1435 return status; 1436 } 1437 1438 /** 1439 * ice_rem_update_vsi_list 1440 * @hw: pointer to the hardware structure 1441 * @vsi_handle: VSI handle of the VSI to remove 1442 * @fm_list: filter management entry for which the VSI list management needs to 1443 * be done 1444 */ 1445 static enum ice_status 1446 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 1447 struct ice_fltr_mgmt_list_entry *fm_list) 1448 { 1449 enum ice_sw_lkup_type lkup_type; 1450 enum ice_status status = 0; 1451 u16 vsi_list_id; 1452 1453 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 1454 fm_list->vsi_count == 0) 1455 return ICE_ERR_PARAM; 1456 1457 /* A rule with the VSI being removed does not exist */ 1458 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 1459 return ICE_ERR_DOES_NOT_EXIST; 1460 1461 lkup_type = fm_list->fltr_info.lkup_type; 1462 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 1463 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 1464 ice_aqc_opc_update_sw_rules, 1465 lkup_type); 1466 if (status) 1467 return status; 1468 1469 fm_list->vsi_count--; 1470 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 1471 1472 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 1473 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 1474 struct ice_vsi_list_map_info *vsi_list_info = 1475 fm_list->vsi_list_info; 1476 u16 rem_vsi_handle; 1477 1478 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 1479 ICE_MAX_VSI); 1480 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 1481 return ICE_ERR_OUT_OF_RANGE; 1482 1483 /* Make sure VSI list is empty before removing it below */ 1484 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 1485 vsi_list_id, true, 1486 ice_aqc_opc_update_sw_rules, 1487 lkup_type); 1488 if (status) 1489 return status; 1490 1491 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 1492 tmp_fltr_info.fwd_id.hw_vsi_id = 1493 ice_get_hw_vsi_num(hw, rem_vsi_handle); 1494 tmp_fltr_info.vsi_handle = rem_vsi_handle; 1495 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 1496 if (status) { 1497 ice_debug(hw, ICE_DBG_SW, 1498 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 1499 tmp_fltr_info.fwd_id.hw_vsi_id, status); 1500 return status; 1501 } 1502 1503 fm_list->fltr_info = tmp_fltr_info; 1504 } 1505 1506 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 1507 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 1508 struct ice_vsi_list_map_info *vsi_list_info = 1509 fm_list->vsi_list_info; 1510 1511 /* Remove the VSI list since it is no longer used */ 1512 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 1513 if (status) { 1514 ice_debug(hw, ICE_DBG_SW, 1515 "Failed to remove VSI list %d, error %d\n", 1516 vsi_list_id, status); 1517 return status; 1518 } 1519 1520 list_del(&vsi_list_info->list_entry); 1521 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 1522 fm_list->vsi_list_info = NULL; 1523 } 1524 1525 return status; 1526 } 1527 1528 /** 1529 * ice_remove_rule_internal - Remove a filter rule of a given type 1530 * @hw: pointer to the hardware structure 1531 * @recp_id: recipe ID for which the rule needs to removed 1532 * @f_entry: rule entry containing filter information 1533 */ 1534 static enum ice_status 1535 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 1536 struct ice_fltr_list_entry *f_entry) 1537 { 1538 struct ice_switch_info *sw = hw->switch_info; 1539 struct ice_fltr_mgmt_list_entry *list_elem; 1540 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1541 enum ice_status status = 0; 1542 bool remove_rule = false; 1543 u16 vsi_handle; 1544 1545 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1546 return ICE_ERR_PARAM; 1547 f_entry->fltr_info.fwd_id.hw_vsi_id = 1548 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1549 1550 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 1551 mutex_lock(rule_lock); 1552 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 1553 if (!list_elem) { 1554 status = ICE_ERR_DOES_NOT_EXIST; 1555 goto exit; 1556 } 1557 1558 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 1559 remove_rule = true; 1560 } else if (!list_elem->vsi_list_info) { 1561 status = ICE_ERR_DOES_NOT_EXIST; 1562 goto exit; 1563 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 1564 /* a ref_cnt > 1 indicates that the vsi_list is being 1565 * shared by multiple rules. Decrement the ref_cnt and 1566 * remove this rule, but do not modify the list, as it 1567 * is in-use by other rules. 1568 */ 1569 list_elem->vsi_list_info->ref_cnt--; 1570 remove_rule = true; 1571 } else { 1572 /* a ref_cnt of 1 indicates the vsi_list is only used 1573 * by one rule. However, the original removal request is only 1574 * for a single VSI. Update the vsi_list first, and only 1575 * remove the rule if there are no further VSIs in this list. 1576 */ 1577 vsi_handle = f_entry->fltr_info.vsi_handle; 1578 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 1579 if (status) 1580 goto exit; 1581 /* if VSI count goes to zero after updating the VSI list */ 1582 if (list_elem->vsi_count == 0) 1583 remove_rule = true; 1584 } 1585 1586 if (remove_rule) { 1587 /* Remove the lookup rule */ 1588 struct ice_aqc_sw_rules_elem *s_rule; 1589 1590 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1591 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1592 GFP_KERNEL); 1593 if (!s_rule) { 1594 status = ICE_ERR_NO_MEMORY; 1595 goto exit; 1596 } 1597 1598 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 1599 ice_aqc_opc_remove_sw_rules); 1600 1601 status = ice_aq_sw_rules(hw, s_rule, 1602 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 1603 ice_aqc_opc_remove_sw_rules, NULL); 1604 if (status) 1605 goto exit; 1606 1607 /* Remove a book keeping from the list */ 1608 devm_kfree(ice_hw_to_dev(hw), s_rule); 1609 1610 list_del(&list_elem->list_entry); 1611 devm_kfree(ice_hw_to_dev(hw), list_elem); 1612 } 1613 exit: 1614 mutex_unlock(rule_lock); 1615 return status; 1616 } 1617 1618 /** 1619 * ice_add_mac - Add a MAC address based filter rule 1620 * @hw: pointer to the hardware structure 1621 * @m_list: list of MAC addresses and forwarding information 1622 * 1623 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 1624 * multiple unicast addresses, the function assumes that all the 1625 * addresses are unique in a given add_mac call. It doesn't 1626 * check for duplicates in this case, removing duplicates from a given 1627 * list should be taken care of in the caller of this function. 1628 */ 1629 enum ice_status 1630 ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 1631 { 1632 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 1633 struct ice_fltr_list_entry *m_list_itr; 1634 struct list_head *rule_head; 1635 u16 elem_sent, total_elem_left; 1636 struct ice_switch_info *sw; 1637 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1638 enum ice_status status = 0; 1639 u16 num_unicast = 0; 1640 u16 s_rule_size; 1641 1642 if (!m_list || !hw) 1643 return ICE_ERR_PARAM; 1644 1645 s_rule = NULL; 1646 sw = hw->switch_info; 1647 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1648 list_for_each_entry(m_list_itr, m_list, list_entry) { 1649 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 1650 u16 vsi_handle; 1651 u16 hw_vsi_id; 1652 1653 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 1654 vsi_handle = m_list_itr->fltr_info.vsi_handle; 1655 if (!ice_is_vsi_valid(hw, vsi_handle)) 1656 return ICE_ERR_PARAM; 1657 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 1658 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 1659 /* update the src in case it is VSI num */ 1660 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 1661 return ICE_ERR_PARAM; 1662 m_list_itr->fltr_info.src = hw_vsi_id; 1663 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 1664 is_zero_ether_addr(add)) 1665 return ICE_ERR_PARAM; 1666 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 1667 /* Don't overwrite the unicast address */ 1668 mutex_lock(rule_lock); 1669 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 1670 &m_list_itr->fltr_info)) { 1671 mutex_unlock(rule_lock); 1672 return ICE_ERR_ALREADY_EXISTS; 1673 } 1674 mutex_unlock(rule_lock); 1675 num_unicast++; 1676 } else if (is_multicast_ether_addr(add) || 1677 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 1678 m_list_itr->status = 1679 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 1680 m_list_itr); 1681 if (m_list_itr->status) 1682 return m_list_itr->status; 1683 } 1684 } 1685 1686 mutex_lock(rule_lock); 1687 /* Exit if no suitable entries were found for adding bulk switch rule */ 1688 if (!num_unicast) { 1689 status = 0; 1690 goto ice_add_mac_exit; 1691 } 1692 1693 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1694 1695 /* Allocate switch rule buffer for the bulk update for unicast */ 1696 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1697 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 1698 GFP_KERNEL); 1699 if (!s_rule) { 1700 status = ICE_ERR_NO_MEMORY; 1701 goto ice_add_mac_exit; 1702 } 1703 1704 r_iter = s_rule; 1705 list_for_each_entry(m_list_itr, m_list, list_entry) { 1706 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 1707 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 1708 1709 if (is_unicast_ether_addr(mac_addr)) { 1710 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 1711 ice_aqc_opc_add_sw_rules); 1712 r_iter = (struct ice_aqc_sw_rules_elem *) 1713 ((u8 *)r_iter + s_rule_size); 1714 } 1715 } 1716 1717 /* Call AQ bulk switch rule update for all unicast addresses */ 1718 r_iter = s_rule; 1719 /* Call AQ switch rule in AQ_MAX chunk */ 1720 for (total_elem_left = num_unicast; total_elem_left > 0; 1721 total_elem_left -= elem_sent) { 1722 struct ice_aqc_sw_rules_elem *entry = r_iter; 1723 1724 elem_sent = min(total_elem_left, 1725 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); 1726 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 1727 elem_sent, ice_aqc_opc_add_sw_rules, 1728 NULL); 1729 if (status) 1730 goto ice_add_mac_exit; 1731 r_iter = (struct ice_aqc_sw_rules_elem *) 1732 ((u8 *)r_iter + (elem_sent * s_rule_size)); 1733 } 1734 1735 /* Fill up rule ID based on the value returned from FW */ 1736 r_iter = s_rule; 1737 list_for_each_entry(m_list_itr, m_list, list_entry) { 1738 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 1739 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 1740 struct ice_fltr_mgmt_list_entry *fm_entry; 1741 1742 if (is_unicast_ether_addr(mac_addr)) { 1743 f_info->fltr_rule_id = 1744 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 1745 f_info->fltr_act = ICE_FWD_TO_VSI; 1746 /* Create an entry to track this MAC address */ 1747 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 1748 sizeof(*fm_entry), GFP_KERNEL); 1749 if (!fm_entry) { 1750 status = ICE_ERR_NO_MEMORY; 1751 goto ice_add_mac_exit; 1752 } 1753 fm_entry->fltr_info = *f_info; 1754 fm_entry->vsi_count = 1; 1755 /* The book keeping entries will get removed when 1756 * base driver calls remove filter AQ command 1757 */ 1758 1759 list_add(&fm_entry->list_entry, rule_head); 1760 r_iter = (struct ice_aqc_sw_rules_elem *) 1761 ((u8 *)r_iter + s_rule_size); 1762 } 1763 } 1764 1765 ice_add_mac_exit: 1766 mutex_unlock(rule_lock); 1767 if (s_rule) 1768 devm_kfree(ice_hw_to_dev(hw), s_rule); 1769 return status; 1770 } 1771 1772 /** 1773 * ice_add_vlan_internal - Add one VLAN based filter rule 1774 * @hw: pointer to the hardware structure 1775 * @f_entry: filter entry containing one VLAN information 1776 */ 1777 static enum ice_status 1778 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 1779 { 1780 struct ice_switch_info *sw = hw->switch_info; 1781 struct ice_fltr_mgmt_list_entry *v_list_itr; 1782 struct ice_fltr_info *new_fltr, *cur_fltr; 1783 enum ice_sw_lkup_type lkup_type; 1784 u16 vsi_list_id = 0, vsi_handle; 1785 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1786 enum ice_status status = 0; 1787 1788 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 1789 return ICE_ERR_PARAM; 1790 1791 f_entry->fltr_info.fwd_id.hw_vsi_id = 1792 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 1793 new_fltr = &f_entry->fltr_info; 1794 1795 /* VLAN ID should only be 12 bits */ 1796 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 1797 return ICE_ERR_PARAM; 1798 1799 if (new_fltr->src_id != ICE_SRC_ID_VSI) 1800 return ICE_ERR_PARAM; 1801 1802 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 1803 lkup_type = new_fltr->lkup_type; 1804 vsi_handle = new_fltr->vsi_handle; 1805 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 1806 mutex_lock(rule_lock); 1807 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 1808 if (!v_list_itr) { 1809 struct ice_vsi_list_map_info *map_info = NULL; 1810 1811 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 1812 /* All VLAN pruning rules use a VSI list. Check if 1813 * there is already a VSI list containing VSI that we 1814 * want to add. If found, use the same vsi_list_id for 1815 * this new VLAN rule or else create a new list. 1816 */ 1817 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 1818 vsi_handle, 1819 &vsi_list_id); 1820 if (!map_info) { 1821 status = ice_create_vsi_list_rule(hw, 1822 &vsi_handle, 1823 1, 1824 &vsi_list_id, 1825 lkup_type); 1826 if (status) 1827 goto exit; 1828 } 1829 /* Convert the action to forwarding to a VSI list. */ 1830 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1831 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 1832 } 1833 1834 status = ice_create_pkt_fwd_rule(hw, f_entry); 1835 if (!status) { 1836 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 1837 new_fltr); 1838 if (!v_list_itr) { 1839 status = ICE_ERR_DOES_NOT_EXIST; 1840 goto exit; 1841 } 1842 /* reuse VSI list for new rule and increment ref_cnt */ 1843 if (map_info) { 1844 v_list_itr->vsi_list_info = map_info; 1845 map_info->ref_cnt++; 1846 } else { 1847 v_list_itr->vsi_list_info = 1848 ice_create_vsi_list_map(hw, &vsi_handle, 1849 1, vsi_list_id); 1850 } 1851 } 1852 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 1853 /* Update existing VSI list to add new VSI ID only if it used 1854 * by one VLAN rule. 1855 */ 1856 cur_fltr = &v_list_itr->fltr_info; 1857 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 1858 new_fltr); 1859 } else { 1860 /* If VLAN rule exists and VSI list being used by this rule is 1861 * referenced by more than 1 VLAN rule. Then create a new VSI 1862 * list appending previous VSI with new VSI and update existing 1863 * VLAN rule to point to new VSI list ID 1864 */ 1865 struct ice_fltr_info tmp_fltr; 1866 u16 vsi_handle_arr[2]; 1867 u16 cur_handle; 1868 1869 /* Current implementation only supports reusing VSI list with 1870 * one VSI count. We should never hit below condition 1871 */ 1872 if (v_list_itr->vsi_count > 1 && 1873 v_list_itr->vsi_list_info->ref_cnt > 1) { 1874 ice_debug(hw, ICE_DBG_SW, 1875 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 1876 status = ICE_ERR_CFG; 1877 goto exit; 1878 } 1879 1880 cur_handle = 1881 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 1882 ICE_MAX_VSI); 1883 1884 /* A rule already exists with the new VSI being added */ 1885 if (cur_handle == vsi_handle) { 1886 status = ICE_ERR_ALREADY_EXISTS; 1887 goto exit; 1888 } 1889 1890 vsi_handle_arr[0] = cur_handle; 1891 vsi_handle_arr[1] = vsi_handle; 1892 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1893 &vsi_list_id, lkup_type); 1894 if (status) 1895 goto exit; 1896 1897 tmp_fltr = v_list_itr->fltr_info; 1898 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 1899 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1900 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1901 /* Update the previous switch rule to a new VSI list which 1902 * includes current VSI that is requested 1903 */ 1904 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1905 if (status) 1906 goto exit; 1907 1908 /* before overriding VSI list map info. decrement ref_cnt of 1909 * previous VSI list 1910 */ 1911 v_list_itr->vsi_list_info->ref_cnt--; 1912 1913 /* now update to newly created list */ 1914 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 1915 v_list_itr->vsi_list_info = 1916 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1917 vsi_list_id); 1918 v_list_itr->vsi_count++; 1919 } 1920 1921 exit: 1922 mutex_unlock(rule_lock); 1923 return status; 1924 } 1925 1926 /** 1927 * ice_add_vlan - Add VLAN based filter rule 1928 * @hw: pointer to the hardware structure 1929 * @v_list: list of VLAN entries and forwarding information 1930 */ 1931 enum ice_status 1932 ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 1933 { 1934 struct ice_fltr_list_entry *v_list_itr; 1935 1936 if (!v_list || !hw) 1937 return ICE_ERR_PARAM; 1938 1939 list_for_each_entry(v_list_itr, v_list, list_entry) { 1940 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 1941 return ICE_ERR_PARAM; 1942 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 1943 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 1944 if (v_list_itr->status) 1945 return v_list_itr->status; 1946 } 1947 return 0; 1948 } 1949 1950 /** 1951 * ice_rem_sw_rule_info 1952 * @hw: pointer to the hardware structure 1953 * @rule_head: pointer to the switch list structure that we want to delete 1954 */ 1955 static void 1956 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 1957 { 1958 if (!list_empty(rule_head)) { 1959 struct ice_fltr_mgmt_list_entry *entry; 1960 struct ice_fltr_mgmt_list_entry *tmp; 1961 1962 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 1963 list_del(&entry->list_entry); 1964 devm_kfree(ice_hw_to_dev(hw), entry); 1965 } 1966 } 1967 } 1968 1969 /** 1970 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 1971 * @hw: pointer to the hardware structure 1972 * @vsi_handle: VSI handle to set as default 1973 * @set: true to add the above mentioned switch rule, false to remove it 1974 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 1975 * 1976 * add filter rule to set/unset given VSI as default VSI for the switch 1977 * (represented by swid) 1978 */ 1979 enum ice_status 1980 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 1981 { 1982 struct ice_aqc_sw_rules_elem *s_rule; 1983 struct ice_fltr_info f_info; 1984 enum ice_adminq_opc opcode; 1985 enum ice_status status; 1986 u16 s_rule_size; 1987 u16 hw_vsi_id; 1988 1989 if (!ice_is_vsi_valid(hw, vsi_handle)) 1990 return ICE_ERR_PARAM; 1991 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 1992 1993 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 1994 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 1995 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1996 if (!s_rule) 1997 return ICE_ERR_NO_MEMORY; 1998 1999 memset(&f_info, 0, sizeof(f_info)); 2000 2001 f_info.lkup_type = ICE_SW_LKUP_DFLT; 2002 f_info.flag = direction; 2003 f_info.fltr_act = ICE_FWD_TO_VSI; 2004 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 2005 2006 if (f_info.flag & ICE_FLTR_RX) { 2007 f_info.src = hw->port_info->lport; 2008 f_info.src_id = ICE_SRC_ID_LPORT; 2009 if (!set) 2010 f_info.fltr_rule_id = 2011 hw->port_info->dflt_rx_vsi_rule_id; 2012 } else if (f_info.flag & ICE_FLTR_TX) { 2013 f_info.src_id = ICE_SRC_ID_VSI; 2014 f_info.src = hw_vsi_id; 2015 if (!set) 2016 f_info.fltr_rule_id = 2017 hw->port_info->dflt_tx_vsi_rule_id; 2018 } 2019 2020 if (set) 2021 opcode = ice_aqc_opc_add_sw_rules; 2022 else 2023 opcode = ice_aqc_opc_remove_sw_rules; 2024 2025 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 2026 2027 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 2028 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 2029 goto out; 2030 if (set) { 2031 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2032 2033 if (f_info.flag & ICE_FLTR_TX) { 2034 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 2035 hw->port_info->dflt_tx_vsi_rule_id = index; 2036 } else if (f_info.flag & ICE_FLTR_RX) { 2037 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 2038 hw->port_info->dflt_rx_vsi_rule_id = index; 2039 } 2040 } else { 2041 if (f_info.flag & ICE_FLTR_TX) { 2042 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 2043 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 2044 } else if (f_info.flag & ICE_FLTR_RX) { 2045 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 2046 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 2047 } 2048 } 2049 2050 out: 2051 devm_kfree(ice_hw_to_dev(hw), s_rule); 2052 return status; 2053 } 2054 2055 /** 2056 * ice_remove_mac - remove a MAC address based filter rule 2057 * @hw: pointer to the hardware structure 2058 * @m_list: list of MAC addresses and forwarding information 2059 * 2060 * This function removes either a MAC filter rule or a specific VSI from a 2061 * VSI list for a multicast MAC address. 2062 * 2063 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by 2064 * ice_add_mac. Caller should be aware that this call will only work if all 2065 * the entries passed into m_list were added previously. It will not attempt to 2066 * do a partial remove of entries that were found. 2067 */ 2068 enum ice_status 2069 ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 2070 { 2071 struct ice_fltr_list_entry *list_itr, *tmp; 2072 2073 if (!m_list) 2074 return ICE_ERR_PARAM; 2075 2076 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 2077 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 2078 2079 if (l_type != ICE_SW_LKUP_MAC) 2080 return ICE_ERR_PARAM; 2081 list_itr->status = ice_remove_rule_internal(hw, 2082 ICE_SW_LKUP_MAC, 2083 list_itr); 2084 if (list_itr->status) 2085 return list_itr->status; 2086 } 2087 return 0; 2088 } 2089 2090 /** 2091 * ice_remove_vlan - Remove VLAN based filter rule 2092 * @hw: pointer to the hardware structure 2093 * @v_list: list of VLAN entries and forwarding information 2094 */ 2095 enum ice_status 2096 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 2097 { 2098 struct ice_fltr_list_entry *v_list_itr, *tmp; 2099 2100 if (!v_list || !hw) 2101 return ICE_ERR_PARAM; 2102 2103 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2104 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 2105 2106 if (l_type != ICE_SW_LKUP_VLAN) 2107 return ICE_ERR_PARAM; 2108 v_list_itr->status = ice_remove_rule_internal(hw, 2109 ICE_SW_LKUP_VLAN, 2110 v_list_itr); 2111 if (v_list_itr->status) 2112 return v_list_itr->status; 2113 } 2114 return 0; 2115 } 2116 2117 /** 2118 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 2119 * @fm_entry: filter entry to inspect 2120 * @vsi_handle: VSI handle to compare with filter info 2121 */ 2122 static bool 2123 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 2124 { 2125 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 2126 fm_entry->fltr_info.vsi_handle == vsi_handle) || 2127 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 2128 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 2129 } 2130 2131 /** 2132 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 2133 * @hw: pointer to the hardware structure 2134 * @vsi_handle: VSI handle to remove filters from 2135 * @vsi_list_head: pointer to the list to add entry to 2136 * @fi: pointer to fltr_info of filter entry to copy & add 2137 * 2138 * Helper function, used when creating a list of filters to remove from 2139 * a specific VSI. The entry added to vsi_list_head is a COPY of the 2140 * original filter entry, with the exception of fltr_info.fltr_act and 2141 * fltr_info.fwd_id fields. These are set such that later logic can 2142 * extract which VSI to remove the fltr from, and pass on that information. 2143 */ 2144 static enum ice_status 2145 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2146 struct list_head *vsi_list_head, 2147 struct ice_fltr_info *fi) 2148 { 2149 struct ice_fltr_list_entry *tmp; 2150 2151 /* this memory is freed up in the caller function 2152 * once filters for this VSI are removed 2153 */ 2154 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 2155 if (!tmp) 2156 return ICE_ERR_NO_MEMORY; 2157 2158 tmp->fltr_info = *fi; 2159 2160 /* Overwrite these fields to indicate which VSI to remove filter from, 2161 * so find and remove logic can extract the information from the 2162 * list entries. Note that original entries will still have proper 2163 * values. 2164 */ 2165 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2166 tmp->fltr_info.vsi_handle = vsi_handle; 2167 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2168 2169 list_add(&tmp->list_entry, vsi_list_head); 2170 2171 return 0; 2172 } 2173 2174 /** 2175 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 2176 * @hw: pointer to the hardware structure 2177 * @vsi_handle: VSI handle to remove filters from 2178 * @lkup_list_head: pointer to the list that has certain lookup type filters 2179 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 2180 * 2181 * Locates all filters in lkup_list_head that are used by the given VSI, 2182 * and adds COPIES of those entries to vsi_list_head (intended to be used 2183 * to remove the listed filters). 2184 * Note that this means all entries in vsi_list_head must be explicitly 2185 * deallocated by the caller when done with list. 2186 */ 2187 static enum ice_status 2188 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2189 struct list_head *lkup_list_head, 2190 struct list_head *vsi_list_head) 2191 { 2192 struct ice_fltr_mgmt_list_entry *fm_entry; 2193 enum ice_status status = 0; 2194 2195 /* check to make sure VSI ID is valid and within boundary */ 2196 if (!ice_is_vsi_valid(hw, vsi_handle)) 2197 return ICE_ERR_PARAM; 2198 2199 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 2200 struct ice_fltr_info *fi; 2201 2202 fi = &fm_entry->fltr_info; 2203 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) 2204 continue; 2205 2206 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2207 vsi_list_head, fi); 2208 if (status) 2209 return status; 2210 } 2211 return status; 2212 } 2213 2214 /** 2215 * ice_determine_promisc_mask 2216 * @fi: filter info to parse 2217 * 2218 * Helper function to determine which ICE_PROMISC_ mask corresponds 2219 * to given filter into. 2220 */ 2221 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 2222 { 2223 u16 vid = fi->l_data.mac_vlan.vlan_id; 2224 u8 *macaddr = fi->l_data.mac.mac_addr; 2225 bool is_tx_fltr = false; 2226 u8 promisc_mask = 0; 2227 2228 if (fi->flag == ICE_FLTR_TX) 2229 is_tx_fltr = true; 2230 2231 if (is_broadcast_ether_addr(macaddr)) 2232 promisc_mask |= is_tx_fltr ? 2233 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 2234 else if (is_multicast_ether_addr(macaddr)) 2235 promisc_mask |= is_tx_fltr ? 2236 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 2237 else if (is_unicast_ether_addr(macaddr)) 2238 promisc_mask |= is_tx_fltr ? 2239 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 2240 if (vid) 2241 promisc_mask |= is_tx_fltr ? 2242 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 2243 2244 return promisc_mask; 2245 } 2246 2247 /** 2248 * ice_remove_promisc - Remove promisc based filter rules 2249 * @hw: pointer to the hardware structure 2250 * @recp_id: recipe ID for which the rule needs to removed 2251 * @v_list: list of promisc entries 2252 */ 2253 static enum ice_status 2254 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, 2255 struct list_head *v_list) 2256 { 2257 struct ice_fltr_list_entry *v_list_itr, *tmp; 2258 2259 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2260 v_list_itr->status = 2261 ice_remove_rule_internal(hw, recp_id, v_list_itr); 2262 if (v_list_itr->status) 2263 return v_list_itr->status; 2264 } 2265 return 0; 2266 } 2267 2268 /** 2269 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 2270 * @hw: pointer to the hardware structure 2271 * @vsi_handle: VSI handle to clear mode 2272 * @promisc_mask: mask of promiscuous config bits to clear 2273 * @vid: VLAN ID to clear VLAN promiscuous 2274 */ 2275 enum ice_status 2276 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 2277 u16 vid) 2278 { 2279 struct ice_switch_info *sw = hw->switch_info; 2280 struct ice_fltr_list_entry *fm_entry, *tmp; 2281 struct list_head remove_list_head; 2282 struct ice_fltr_mgmt_list_entry *itr; 2283 struct list_head *rule_head; 2284 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2285 enum ice_status status = 0; 2286 u8 recipe_id; 2287 2288 if (!ice_is_vsi_valid(hw, vsi_handle)) 2289 return ICE_ERR_PARAM; 2290 2291 if (vid) 2292 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 2293 else 2294 recipe_id = ICE_SW_LKUP_PROMISC; 2295 2296 rule_head = &sw->recp_list[recipe_id].filt_rules; 2297 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 2298 2299 INIT_LIST_HEAD(&remove_list_head); 2300 2301 mutex_lock(rule_lock); 2302 list_for_each_entry(itr, rule_head, list_entry) { 2303 u8 fltr_promisc_mask = 0; 2304 2305 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 2306 continue; 2307 2308 fltr_promisc_mask |= 2309 ice_determine_promisc_mask(&itr->fltr_info); 2310 2311 /* Skip if filter is not completely specified by given mask */ 2312 if (fltr_promisc_mask & ~promisc_mask) 2313 continue; 2314 2315 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2316 &remove_list_head, 2317 &itr->fltr_info); 2318 if (status) { 2319 mutex_unlock(rule_lock); 2320 goto free_fltr_list; 2321 } 2322 } 2323 mutex_unlock(rule_lock); 2324 2325 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 2326 2327 free_fltr_list: 2328 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 2329 list_del(&fm_entry->list_entry); 2330 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2331 } 2332 2333 return status; 2334 } 2335 2336 /** 2337 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 2338 * @hw: pointer to the hardware structure 2339 * @vsi_handle: VSI handle to configure 2340 * @promisc_mask: mask of promiscuous config bits 2341 * @vid: VLAN ID to set VLAN promiscuous 2342 */ 2343 enum ice_status 2344 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 2345 { 2346 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 2347 struct ice_fltr_list_entry f_list_entry; 2348 struct ice_fltr_info new_fltr; 2349 enum ice_status status = 0; 2350 bool is_tx_fltr; 2351 u16 hw_vsi_id; 2352 int pkt_type; 2353 u8 recipe_id; 2354 2355 if (!ice_is_vsi_valid(hw, vsi_handle)) 2356 return ICE_ERR_PARAM; 2357 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2358 2359 memset(&new_fltr, 0, sizeof(new_fltr)); 2360 2361 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 2362 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 2363 new_fltr.l_data.mac_vlan.vlan_id = vid; 2364 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 2365 } else { 2366 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 2367 recipe_id = ICE_SW_LKUP_PROMISC; 2368 } 2369 2370 /* Separate filters must be set for each direction/packet type 2371 * combination, so we will loop over the mask value, store the 2372 * individual type, and clear it out in the input mask as it 2373 * is found. 2374 */ 2375 while (promisc_mask) { 2376 u8 *mac_addr; 2377 2378 pkt_type = 0; 2379 is_tx_fltr = false; 2380 2381 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 2382 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 2383 pkt_type = UCAST_FLTR; 2384 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 2385 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 2386 pkt_type = UCAST_FLTR; 2387 is_tx_fltr = true; 2388 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 2389 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 2390 pkt_type = MCAST_FLTR; 2391 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 2392 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 2393 pkt_type = MCAST_FLTR; 2394 is_tx_fltr = true; 2395 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 2396 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 2397 pkt_type = BCAST_FLTR; 2398 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 2399 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 2400 pkt_type = BCAST_FLTR; 2401 is_tx_fltr = true; 2402 } 2403 2404 /* Check for VLAN promiscuous flag */ 2405 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 2406 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 2407 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 2408 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 2409 is_tx_fltr = true; 2410 } 2411 2412 /* Set filter DA based on packet type */ 2413 mac_addr = new_fltr.l_data.mac.mac_addr; 2414 if (pkt_type == BCAST_FLTR) { 2415 eth_broadcast_addr(mac_addr); 2416 } else if (pkt_type == MCAST_FLTR || 2417 pkt_type == UCAST_FLTR) { 2418 /* Use the dummy ether header DA */ 2419 ether_addr_copy(mac_addr, dummy_eth_header); 2420 if (pkt_type == MCAST_FLTR) 2421 mac_addr[0] |= 0x1; /* Set multicast bit */ 2422 } 2423 2424 /* Need to reset this to zero for all iterations */ 2425 new_fltr.flag = 0; 2426 if (is_tx_fltr) { 2427 new_fltr.flag |= ICE_FLTR_TX; 2428 new_fltr.src = hw_vsi_id; 2429 } else { 2430 new_fltr.flag |= ICE_FLTR_RX; 2431 new_fltr.src = hw->port_info->lport; 2432 } 2433 2434 new_fltr.fltr_act = ICE_FWD_TO_VSI; 2435 new_fltr.vsi_handle = vsi_handle; 2436 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 2437 f_list_entry.fltr_info = new_fltr; 2438 2439 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 2440 if (status) 2441 goto set_promisc_exit; 2442 } 2443 2444 set_promisc_exit: 2445 return status; 2446 } 2447 2448 /** 2449 * ice_set_vlan_vsi_promisc 2450 * @hw: pointer to the hardware structure 2451 * @vsi_handle: VSI handle to configure 2452 * @promisc_mask: mask of promiscuous config bits 2453 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 2454 * 2455 * Configure VSI with all associated VLANs to given promiscuous mode(s) 2456 */ 2457 enum ice_status 2458 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 2459 bool rm_vlan_promisc) 2460 { 2461 struct ice_switch_info *sw = hw->switch_info; 2462 struct ice_fltr_list_entry *list_itr, *tmp; 2463 struct list_head vsi_list_head; 2464 struct list_head *vlan_head; 2465 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 2466 enum ice_status status; 2467 u16 vlan_id; 2468 2469 INIT_LIST_HEAD(&vsi_list_head); 2470 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2471 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 2472 mutex_lock(vlan_lock); 2473 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 2474 &vsi_list_head); 2475 mutex_unlock(vlan_lock); 2476 if (status) 2477 goto free_fltr_list; 2478 2479 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 2480 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 2481 if (rm_vlan_promisc) 2482 status = ice_clear_vsi_promisc(hw, vsi_handle, 2483 promisc_mask, vlan_id); 2484 else 2485 status = ice_set_vsi_promisc(hw, vsi_handle, 2486 promisc_mask, vlan_id); 2487 if (status) 2488 break; 2489 } 2490 2491 free_fltr_list: 2492 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 2493 list_del(&list_itr->list_entry); 2494 devm_kfree(ice_hw_to_dev(hw), list_itr); 2495 } 2496 return status; 2497 } 2498 2499 /** 2500 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 2501 * @hw: pointer to the hardware structure 2502 * @vsi_handle: VSI handle to remove filters from 2503 * @lkup: switch rule filter lookup type 2504 */ 2505 static void 2506 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 2507 enum ice_sw_lkup_type lkup) 2508 { 2509 struct ice_switch_info *sw = hw->switch_info; 2510 struct ice_fltr_list_entry *fm_entry; 2511 struct list_head remove_list_head; 2512 struct list_head *rule_head; 2513 struct ice_fltr_list_entry *tmp; 2514 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2515 enum ice_status status; 2516 2517 INIT_LIST_HEAD(&remove_list_head); 2518 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 2519 rule_head = &sw->recp_list[lkup].filt_rules; 2520 mutex_lock(rule_lock); 2521 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 2522 &remove_list_head); 2523 mutex_unlock(rule_lock); 2524 if (status) 2525 return; 2526 2527 switch (lkup) { 2528 case ICE_SW_LKUP_MAC: 2529 ice_remove_mac(hw, &remove_list_head); 2530 break; 2531 case ICE_SW_LKUP_VLAN: 2532 ice_remove_vlan(hw, &remove_list_head); 2533 break; 2534 case ICE_SW_LKUP_PROMISC: 2535 case ICE_SW_LKUP_PROMISC_VLAN: 2536 ice_remove_promisc(hw, lkup, &remove_list_head); 2537 break; 2538 case ICE_SW_LKUP_MAC_VLAN: 2539 case ICE_SW_LKUP_ETHERTYPE: 2540 case ICE_SW_LKUP_ETHERTYPE_MAC: 2541 case ICE_SW_LKUP_DFLT: 2542 case ICE_SW_LKUP_LAST: 2543 default: 2544 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 2545 break; 2546 } 2547 2548 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 2549 list_del(&fm_entry->list_entry); 2550 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2551 } 2552 } 2553 2554 /** 2555 * ice_remove_vsi_fltr - Remove all filters for a VSI 2556 * @hw: pointer to the hardware structure 2557 * @vsi_handle: VSI handle to remove filters from 2558 */ 2559 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 2560 { 2561 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 2562 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 2563 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 2564 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 2565 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 2566 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 2567 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 2568 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 2569 } 2570 2571 /** 2572 * ice_replay_vsi_fltr - Replay filters for requested VSI 2573 * @hw: pointer to the hardware structure 2574 * @vsi_handle: driver VSI handle 2575 * @recp_id: Recipe ID for which rules need to be replayed 2576 * @list_head: list for which filters need to be replayed 2577 * 2578 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 2579 * It is required to pass valid VSI handle. 2580 */ 2581 static enum ice_status 2582 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 2583 struct list_head *list_head) 2584 { 2585 struct ice_fltr_mgmt_list_entry *itr; 2586 enum ice_status status = 0; 2587 u16 hw_vsi_id; 2588 2589 if (list_empty(list_head)) 2590 return status; 2591 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2592 2593 list_for_each_entry(itr, list_head, list_entry) { 2594 struct ice_fltr_list_entry f_entry; 2595 2596 f_entry.fltr_info = itr->fltr_info; 2597 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 2598 itr->fltr_info.vsi_handle == vsi_handle) { 2599 /* update the src in case it is VSI num */ 2600 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 2601 f_entry.fltr_info.src = hw_vsi_id; 2602 status = ice_add_rule_internal(hw, recp_id, &f_entry); 2603 if (status) 2604 goto end; 2605 continue; 2606 } 2607 if (!itr->vsi_list_info || 2608 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 2609 continue; 2610 /* Clearing it so that the logic can add it back */ 2611 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 2612 f_entry.fltr_info.vsi_handle = vsi_handle; 2613 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 2614 /* update the src in case it is VSI num */ 2615 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 2616 f_entry.fltr_info.src = hw_vsi_id; 2617 if (recp_id == ICE_SW_LKUP_VLAN) 2618 status = ice_add_vlan_internal(hw, &f_entry); 2619 else 2620 status = ice_add_rule_internal(hw, recp_id, &f_entry); 2621 if (status) 2622 goto end; 2623 } 2624 end: 2625 return status; 2626 } 2627 2628 /** 2629 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 2630 * @hw: pointer to the hardware structure 2631 * @vsi_handle: driver VSI handle 2632 * 2633 * Replays filters for requested VSI via vsi_handle. 2634 */ 2635 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 2636 { 2637 struct ice_switch_info *sw = hw->switch_info; 2638 enum ice_status status = 0; 2639 u8 i; 2640 2641 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 2642 struct list_head *head; 2643 2644 head = &sw->recp_list[i].filt_replay_rules; 2645 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 2646 if (status) 2647 return status; 2648 } 2649 return status; 2650 } 2651 2652 /** 2653 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 2654 * @hw: pointer to the HW struct 2655 * 2656 * Deletes the filter replay rules. 2657 */ 2658 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 2659 { 2660 struct ice_switch_info *sw = hw->switch_info; 2661 u8 i; 2662 2663 if (!sw) 2664 return; 2665 2666 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 2667 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 2668 struct list_head *l_head; 2669 2670 l_head = &sw->recp_list[i].filt_replay_rules; 2671 ice_rem_sw_rule_info(hw, l_head); 2672 } 2673 } 2674 } 2675