1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_lib.h" 5 #include "ice_switch.h" 6 7 #define ICE_ETH_DA_OFFSET 0 8 #define ICE_ETH_ETHTYPE_OFFSET 12 9 #define ICE_ETH_VLAN_TCI_OFFSET 14 10 #define ICE_MAX_VLAN_ID 0xFFF 11 #define ICE_IPV6_ETHER_ID 0x86DD 12 13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 14 * struct to configure any switch filter rules. 15 * {DA (6 bytes), SA(6 bytes), 16 * Ether type (2 bytes for header without VLAN tag) OR 17 * VLAN tag (4 bytes for header with VLAN tag) } 18 * 19 * Word on Hardcoded values 20 * byte 0 = 0x2: to identify it as locally administered DA MAC 21 * byte 6 = 0x2: to identify it as locally administered SA MAC 22 * byte 12 = 0x81 & byte 13 = 0x00: 23 * In case of VLAN filter first two bytes defines ether type (0x8100) 24 * and remaining two bytes are placeholder for programming a given VLAN ID 25 * In case of Ether type filter it is treated as header without VLAN tag 26 * and byte 12 and 13 is used to program a given Ether type instead 27 */ 28 #define DUMMY_ETH_HDR_LEN 16 29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 30 0x2, 0, 0, 0, 0, 0, 31 0x81, 0, 0, 0}; 32 33 struct ice_dummy_pkt_offsets { 34 enum ice_protocol_type type; 35 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ 36 }; 37 38 /* offset info for MAC + IPv4 + UDP dummy packet */ 39 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { 40 { ICE_MAC_OFOS, 0 }, 41 { ICE_ETYPE_OL, 12 }, 42 { ICE_IPV4_OFOS, 14 }, 43 { ICE_UDP_ILOS, 34 }, 44 { ICE_PROTOCOL_LAST, 0 }, 45 }; 46 47 /* Dummy packet for MAC + IPv4 + UDP */ 48 static const u8 dummy_udp_packet[] = { 49 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 50 0x00, 0x00, 0x00, 0x00, 51 0x00, 0x00, 0x00, 0x00, 52 53 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 54 55 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ 56 0x00, 0x01, 0x00, 0x00, 57 0x00, 0x11, 0x00, 0x00, 58 0x00, 0x00, 0x00, 0x00, 59 0x00, 0x00, 0x00, 0x00, 60 61 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ 62 0x00, 0x08, 0x00, 0x00, 63 64 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 65 }; 66 67 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ 68 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { 69 { ICE_MAC_OFOS, 0 }, 70 { ICE_VLAN_OFOS, 12 }, 71 { ICE_ETYPE_OL, 16 }, 72 { ICE_IPV4_OFOS, 18 }, 73 { ICE_UDP_ILOS, 38 }, 74 { ICE_PROTOCOL_LAST, 0 }, 75 }; 76 77 /* C-tag (801.1Q), IPv4:UDP dummy packet */ 78 static const u8 dummy_vlan_udp_packet[] = { 79 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 80 0x00, 0x00, 0x00, 0x00, 81 0x00, 0x00, 0x00, 0x00, 82 83 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 84 85 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 86 87 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ 88 0x00, 0x01, 0x00, 0x00, 89 0x00, 0x11, 0x00, 0x00, 90 0x00, 0x00, 0x00, 0x00, 91 0x00, 0x00, 0x00, 0x00, 92 93 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ 94 0x00, 0x08, 0x00, 0x00, 95 96 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 97 }; 98 99 /* offset info for MAC + IPv4 + TCP dummy packet */ 100 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { 101 { ICE_MAC_OFOS, 0 }, 102 { ICE_ETYPE_OL, 12 }, 103 { ICE_IPV4_OFOS, 14 }, 104 { ICE_TCP_IL, 34 }, 105 { ICE_PROTOCOL_LAST, 0 }, 106 }; 107 108 /* Dummy packet for MAC + IPv4 + TCP */ 109 static const u8 dummy_tcp_packet[] = { 110 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 111 0x00, 0x00, 0x00, 0x00, 112 0x00, 0x00, 0x00, 0x00, 113 114 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 115 116 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ 117 0x00, 0x01, 0x00, 0x00, 118 0x00, 0x06, 0x00, 0x00, 119 0x00, 0x00, 0x00, 0x00, 120 0x00, 0x00, 0x00, 0x00, 121 122 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ 123 0x00, 0x00, 0x00, 0x00, 124 0x00, 0x00, 0x00, 0x00, 125 0x50, 0x00, 0x00, 0x00, 126 0x00, 0x00, 0x00, 0x00, 127 128 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 129 }; 130 131 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ 132 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { 133 { ICE_MAC_OFOS, 0 }, 134 { ICE_VLAN_OFOS, 12 }, 135 { ICE_ETYPE_OL, 16 }, 136 { ICE_IPV4_OFOS, 18 }, 137 { ICE_TCP_IL, 38 }, 138 { ICE_PROTOCOL_LAST, 0 }, 139 }; 140 141 /* C-tag (801.1Q), IPv4:TCP dummy packet */ 142 static const u8 dummy_vlan_tcp_packet[] = { 143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 144 0x00, 0x00, 0x00, 0x00, 145 0x00, 0x00, 0x00, 0x00, 146 147 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 148 149 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 150 151 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ 152 0x00, 0x01, 0x00, 0x00, 153 0x00, 0x06, 0x00, 0x00, 154 0x00, 0x00, 0x00, 0x00, 155 0x00, 0x00, 0x00, 0x00, 156 157 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ 158 0x00, 0x00, 0x00, 0x00, 159 0x00, 0x00, 0x00, 0x00, 160 0x50, 0x00, 0x00, 0x00, 161 0x00, 0x00, 0x00, 0x00, 162 163 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 164 }; 165 166 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { 167 { ICE_MAC_OFOS, 0 }, 168 { ICE_ETYPE_OL, 12 }, 169 { ICE_IPV6_OFOS, 14 }, 170 { ICE_TCP_IL, 54 }, 171 { ICE_PROTOCOL_LAST, 0 }, 172 }; 173 174 static const u8 dummy_tcp_ipv6_packet[] = { 175 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 176 0x00, 0x00, 0x00, 0x00, 177 0x00, 0x00, 0x00, 0x00, 178 179 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 180 181 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 182 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 183 0x00, 0x00, 0x00, 0x00, 184 0x00, 0x00, 0x00, 0x00, 185 0x00, 0x00, 0x00, 0x00, 186 0x00, 0x00, 0x00, 0x00, 187 0x00, 0x00, 0x00, 0x00, 188 0x00, 0x00, 0x00, 0x00, 189 0x00, 0x00, 0x00, 0x00, 190 0x00, 0x00, 0x00, 0x00, 191 192 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ 193 0x00, 0x00, 0x00, 0x00, 194 0x00, 0x00, 0x00, 0x00, 195 0x50, 0x00, 0x00, 0x00, 196 0x00, 0x00, 0x00, 0x00, 197 198 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 199 }; 200 201 /* C-tag (802.1Q): IPv6 + TCP */ 202 static const struct ice_dummy_pkt_offsets 203 dummy_vlan_tcp_ipv6_packet_offsets[] = { 204 { ICE_MAC_OFOS, 0 }, 205 { ICE_VLAN_OFOS, 12 }, 206 { ICE_ETYPE_OL, 16 }, 207 { ICE_IPV6_OFOS, 18 }, 208 { ICE_TCP_IL, 58 }, 209 { ICE_PROTOCOL_LAST, 0 }, 210 }; 211 212 /* C-tag (802.1Q), IPv6 + TCP dummy packet */ 213 static const u8 dummy_vlan_tcp_ipv6_packet[] = { 214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 215 0x00, 0x00, 0x00, 0x00, 216 0x00, 0x00, 0x00, 0x00, 217 218 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 219 220 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 221 222 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 223 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 224 0x00, 0x00, 0x00, 0x00, 225 0x00, 0x00, 0x00, 0x00, 226 0x00, 0x00, 0x00, 0x00, 227 0x00, 0x00, 0x00, 0x00, 228 0x00, 0x00, 0x00, 0x00, 229 0x00, 0x00, 0x00, 0x00, 230 0x00, 0x00, 0x00, 0x00, 231 0x00, 0x00, 0x00, 0x00, 232 233 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ 234 0x00, 0x00, 0x00, 0x00, 235 0x00, 0x00, 0x00, 0x00, 236 0x50, 0x00, 0x00, 0x00, 237 0x00, 0x00, 0x00, 0x00, 238 239 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 240 }; 241 242 /* IPv6 + UDP */ 243 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { 244 { ICE_MAC_OFOS, 0 }, 245 { ICE_ETYPE_OL, 12 }, 246 { ICE_IPV6_OFOS, 14 }, 247 { ICE_UDP_ILOS, 54 }, 248 { ICE_PROTOCOL_LAST, 0 }, 249 }; 250 251 /* IPv6 + UDP dummy packet */ 252 static const u8 dummy_udp_ipv6_packet[] = { 253 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 254 0x00, 0x00, 0x00, 0x00, 255 0x00, 0x00, 0x00, 0x00, 256 257 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 258 259 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 260 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 261 0x00, 0x00, 0x00, 0x00, 262 0x00, 0x00, 0x00, 0x00, 263 0x00, 0x00, 0x00, 0x00, 264 0x00, 0x00, 0x00, 0x00, 265 0x00, 0x00, 0x00, 0x00, 266 0x00, 0x00, 0x00, 0x00, 267 0x00, 0x00, 0x00, 0x00, 268 0x00, 0x00, 0x00, 0x00, 269 270 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ 271 0x00, 0x10, 0x00, 0x00, 272 273 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ 274 0x00, 0x00, 0x00, 0x00, 275 276 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 277 }; 278 279 /* C-tag (802.1Q): IPv6 + UDP */ 280 static const struct ice_dummy_pkt_offsets 281 dummy_vlan_udp_ipv6_packet_offsets[] = { 282 { ICE_MAC_OFOS, 0 }, 283 { ICE_VLAN_OFOS, 12 }, 284 { ICE_ETYPE_OL, 16 }, 285 { ICE_IPV6_OFOS, 18 }, 286 { ICE_UDP_ILOS, 58 }, 287 { ICE_PROTOCOL_LAST, 0 }, 288 }; 289 290 /* C-tag (802.1Q), IPv6 + UDP dummy packet */ 291 static const u8 dummy_vlan_udp_ipv6_packet[] = { 292 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 293 0x00, 0x00, 0x00, 0x00, 294 0x00, 0x00, 0x00, 0x00, 295 296 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ 297 298 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 299 300 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 301 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ 302 0x00, 0x00, 0x00, 0x00, 303 0x00, 0x00, 0x00, 0x00, 304 0x00, 0x00, 0x00, 0x00, 305 0x00, 0x00, 0x00, 0x00, 306 0x00, 0x00, 0x00, 0x00, 307 0x00, 0x00, 0x00, 0x00, 308 0x00, 0x00, 0x00, 0x00, 309 0x00, 0x00, 0x00, 0x00, 310 311 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ 312 0x00, 0x08, 0x00, 0x00, 313 314 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 315 }; 316 317 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 318 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ 319 (DUMMY_ETH_HDR_LEN * \ 320 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) 321 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 322 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) 323 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 324 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ 325 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) 326 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 327 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ 328 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) 329 330 /* this is a recipe to profile association bitmap */ 331 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], 332 ICE_MAX_NUM_PROFILES); 333 334 /* this is a profile to recipe association bitmap */ 335 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], 336 ICE_MAX_NUM_RECIPES); 337 338 /** 339 * ice_init_def_sw_recp - initialize the recipe book keeping tables 340 * @hw: pointer to the HW struct 341 * 342 * Allocate memory for the entire recipe table and initialize the structures/ 343 * entries corresponding to basic recipes. 344 */ 345 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) 346 { 347 struct ice_sw_recipe *recps; 348 u8 i; 349 350 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 351 sizeof(*recps), GFP_KERNEL); 352 if (!recps) 353 return ICE_ERR_NO_MEMORY; 354 355 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 356 recps[i].root_rid = i; 357 INIT_LIST_HEAD(&recps[i].filt_rules); 358 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 359 INIT_LIST_HEAD(&recps[i].rg_list); 360 mutex_init(&recps[i].filt_rule_lock); 361 } 362 363 hw->switch_info->recp_list = recps; 364 365 return 0; 366 } 367 368 /** 369 * ice_aq_get_sw_cfg - get switch configuration 370 * @hw: pointer to the hardware structure 371 * @buf: pointer to the result buffer 372 * @buf_size: length of the buffer available for response 373 * @req_desc: pointer to requested descriptor 374 * @num_elems: pointer to number of elements 375 * @cd: pointer to command details structure or NULL 376 * 377 * Get switch configuration (0x0200) to be placed in buf. 378 * This admin command returns information such as initial VSI/port number 379 * and switch ID it belongs to. 380 * 381 * NOTE: *req_desc is both an input/output parameter. 382 * The caller of this function first calls this function with *request_desc set 383 * to 0. If the response from f/w has *req_desc set to 0, all the switch 384 * configuration information has been returned; if non-zero (meaning not all 385 * the information was returned), the caller should call this function again 386 * with *req_desc set to the previous value returned by f/w to get the 387 * next block of switch configuration information. 388 * 389 * *num_elems is output only parameter. This reflects the number of elements 390 * in response buffer. The caller of this function to use *num_elems while 391 * parsing the response buffer. 392 */ 393 static enum ice_status 394 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 395 u16 buf_size, u16 *req_desc, u16 *num_elems, 396 struct ice_sq_cd *cd) 397 { 398 struct ice_aqc_get_sw_cfg *cmd; 399 struct ice_aq_desc desc; 400 enum ice_status status; 401 402 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 403 cmd = &desc.params.get_sw_conf; 404 cmd->element = cpu_to_le16(*req_desc); 405 406 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 407 if (!status) { 408 *req_desc = le16_to_cpu(cmd->element); 409 *num_elems = le16_to_cpu(cmd->num_elems); 410 } 411 412 return status; 413 } 414 415 /** 416 * ice_aq_add_vsi 417 * @hw: pointer to the HW struct 418 * @vsi_ctx: pointer to a VSI context struct 419 * @cd: pointer to command details structure or NULL 420 * 421 * Add a VSI context to the hardware (0x0210) 422 */ 423 static enum ice_status 424 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 425 struct ice_sq_cd *cd) 426 { 427 struct ice_aqc_add_update_free_vsi_resp *res; 428 struct ice_aqc_add_get_update_free_vsi *cmd; 429 struct ice_aq_desc desc; 430 enum ice_status status; 431 432 cmd = &desc.params.vsi_cmd; 433 res = &desc.params.add_update_free_vsi_res; 434 435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 436 437 if (!vsi_ctx->alloc_from_pool) 438 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 439 ICE_AQ_VSI_IS_VALID); 440 cmd->vf_id = vsi_ctx->vf_num; 441 442 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 443 444 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 445 446 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 447 sizeof(vsi_ctx->info), cd); 448 449 if (!status) { 450 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 451 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 452 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 453 } 454 455 return status; 456 } 457 458 /** 459 * ice_aq_free_vsi 460 * @hw: pointer to the HW struct 461 * @vsi_ctx: pointer to a VSI context struct 462 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 463 * @cd: pointer to command details structure or NULL 464 * 465 * Free VSI context info from hardware (0x0213) 466 */ 467 static enum ice_status 468 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 469 bool keep_vsi_alloc, struct ice_sq_cd *cd) 470 { 471 struct ice_aqc_add_update_free_vsi_resp *resp; 472 struct ice_aqc_add_get_update_free_vsi *cmd; 473 struct ice_aq_desc desc; 474 enum ice_status status; 475 476 cmd = &desc.params.vsi_cmd; 477 resp = &desc.params.add_update_free_vsi_res; 478 479 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 480 481 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 482 if (keep_vsi_alloc) 483 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 484 485 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 486 if (!status) { 487 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 488 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 489 } 490 491 return status; 492 } 493 494 /** 495 * ice_aq_update_vsi 496 * @hw: pointer to the HW struct 497 * @vsi_ctx: pointer to a VSI context struct 498 * @cd: pointer to command details structure or NULL 499 * 500 * Update VSI context in the hardware (0x0211) 501 */ 502 static enum ice_status 503 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 504 struct ice_sq_cd *cd) 505 { 506 struct ice_aqc_add_update_free_vsi_resp *resp; 507 struct ice_aqc_add_get_update_free_vsi *cmd; 508 struct ice_aq_desc desc; 509 enum ice_status status; 510 511 cmd = &desc.params.vsi_cmd; 512 resp = &desc.params.add_update_free_vsi_res; 513 514 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 515 516 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 517 518 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 519 520 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 521 sizeof(vsi_ctx->info), cd); 522 523 if (!status) { 524 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 525 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 526 } 527 528 return status; 529 } 530 531 /** 532 * ice_is_vsi_valid - check whether the VSI is valid or not 533 * @hw: pointer to the HW struct 534 * @vsi_handle: VSI handle 535 * 536 * check whether the VSI is valid or not 537 */ 538 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 539 { 540 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 541 } 542 543 /** 544 * ice_get_hw_vsi_num - return the HW VSI number 545 * @hw: pointer to the HW struct 546 * @vsi_handle: VSI handle 547 * 548 * return the HW VSI number 549 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 550 */ 551 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 552 { 553 return hw->vsi_ctx[vsi_handle]->vsi_num; 554 } 555 556 /** 557 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 558 * @hw: pointer to the HW struct 559 * @vsi_handle: VSI handle 560 * 561 * return the VSI context entry for a given VSI handle 562 */ 563 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 564 { 565 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 566 } 567 568 /** 569 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 570 * @hw: pointer to the HW struct 571 * @vsi_handle: VSI handle 572 * @vsi: VSI context pointer 573 * 574 * save the VSI context entry for a given VSI handle 575 */ 576 static void 577 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 578 { 579 hw->vsi_ctx[vsi_handle] = vsi; 580 } 581 582 /** 583 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 584 * @hw: pointer to the HW struct 585 * @vsi_handle: VSI handle 586 */ 587 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 588 { 589 struct ice_vsi_ctx *vsi; 590 u8 i; 591 592 vsi = ice_get_vsi_ctx(hw, vsi_handle); 593 if (!vsi) 594 return; 595 ice_for_each_traffic_class(i) { 596 if (vsi->lan_q_ctx[i]) { 597 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 598 vsi->lan_q_ctx[i] = NULL; 599 } 600 if (vsi->rdma_q_ctx[i]) { 601 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); 602 vsi->rdma_q_ctx[i] = NULL; 603 } 604 } 605 } 606 607 /** 608 * ice_clear_vsi_ctx - clear the VSI context entry 609 * @hw: pointer to the HW struct 610 * @vsi_handle: VSI handle 611 * 612 * clear the VSI context entry 613 */ 614 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 615 { 616 struct ice_vsi_ctx *vsi; 617 618 vsi = ice_get_vsi_ctx(hw, vsi_handle); 619 if (vsi) { 620 ice_clear_vsi_q_ctx(hw, vsi_handle); 621 devm_kfree(ice_hw_to_dev(hw), vsi); 622 hw->vsi_ctx[vsi_handle] = NULL; 623 } 624 } 625 626 /** 627 * ice_clear_all_vsi_ctx - clear all the VSI context entries 628 * @hw: pointer to the HW struct 629 */ 630 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 631 { 632 u16 i; 633 634 for (i = 0; i < ICE_MAX_VSI; i++) 635 ice_clear_vsi_ctx(hw, i); 636 } 637 638 /** 639 * ice_add_vsi - add VSI context to the hardware and VSI handle list 640 * @hw: pointer to the HW struct 641 * @vsi_handle: unique VSI handle provided by drivers 642 * @vsi_ctx: pointer to a VSI context struct 643 * @cd: pointer to command details structure or NULL 644 * 645 * Add a VSI context to the hardware also add it into the VSI handle list. 646 * If this function gets called after reset for existing VSIs then update 647 * with the new HW VSI number in the corresponding VSI handle list entry. 648 */ 649 enum ice_status 650 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 651 struct ice_sq_cd *cd) 652 { 653 struct ice_vsi_ctx *tmp_vsi_ctx; 654 enum ice_status status; 655 656 if (vsi_handle >= ICE_MAX_VSI) 657 return ICE_ERR_PARAM; 658 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 659 if (status) 660 return status; 661 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 662 if (!tmp_vsi_ctx) { 663 /* Create a new VSI context */ 664 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 665 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 666 if (!tmp_vsi_ctx) { 667 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 668 return ICE_ERR_NO_MEMORY; 669 } 670 *tmp_vsi_ctx = *vsi_ctx; 671 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 672 } else { 673 /* update with new HW VSI num */ 674 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 675 } 676 677 return 0; 678 } 679 680 /** 681 * ice_free_vsi- free VSI context from hardware and VSI handle list 682 * @hw: pointer to the HW struct 683 * @vsi_handle: unique VSI handle 684 * @vsi_ctx: pointer to a VSI context struct 685 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 686 * @cd: pointer to command details structure or NULL 687 * 688 * Free VSI context info from hardware as well as from VSI handle list 689 */ 690 enum ice_status 691 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 692 bool keep_vsi_alloc, struct ice_sq_cd *cd) 693 { 694 enum ice_status status; 695 696 if (!ice_is_vsi_valid(hw, vsi_handle)) 697 return ICE_ERR_PARAM; 698 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 699 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 700 if (!status) 701 ice_clear_vsi_ctx(hw, vsi_handle); 702 return status; 703 } 704 705 /** 706 * ice_update_vsi 707 * @hw: pointer to the HW struct 708 * @vsi_handle: unique VSI handle 709 * @vsi_ctx: pointer to a VSI context struct 710 * @cd: pointer to command details structure or NULL 711 * 712 * Update VSI context in the hardware 713 */ 714 enum ice_status 715 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 716 struct ice_sq_cd *cd) 717 { 718 if (!ice_is_vsi_valid(hw, vsi_handle)) 719 return ICE_ERR_PARAM; 720 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 721 return ice_aq_update_vsi(hw, vsi_ctx, cd); 722 } 723 724 /** 725 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI 726 * @hw: pointer to HW struct 727 * @vsi_handle: VSI SW index 728 * @enable: boolean for enable/disable 729 */ 730 int 731 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) 732 { 733 struct ice_vsi_ctx *ctx; 734 735 ctx = ice_get_vsi_ctx(hw, vsi_handle); 736 if (!ctx) 737 return -EIO; 738 739 if (enable) 740 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 741 else 742 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 743 744 return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); 745 } 746 747 /** 748 * ice_aq_alloc_free_vsi_list 749 * @hw: pointer to the HW struct 750 * @vsi_list_id: VSI list ID returned or used for lookup 751 * @lkup_type: switch rule filter lookup type 752 * @opc: switch rules population command type - pass in the command opcode 753 * 754 * allocates or free a VSI list resource 755 */ 756 static enum ice_status 757 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 758 enum ice_sw_lkup_type lkup_type, 759 enum ice_adminq_opc opc) 760 { 761 struct ice_aqc_alloc_free_res_elem *sw_buf; 762 struct ice_aqc_res_elem *vsi_ele; 763 enum ice_status status; 764 u16 buf_len; 765 766 buf_len = struct_size(sw_buf, elem, 1); 767 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 768 if (!sw_buf) 769 return ICE_ERR_NO_MEMORY; 770 sw_buf->num_elems = cpu_to_le16(1); 771 772 if (lkup_type == ICE_SW_LKUP_MAC || 773 lkup_type == ICE_SW_LKUP_MAC_VLAN || 774 lkup_type == ICE_SW_LKUP_ETHERTYPE || 775 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 776 lkup_type == ICE_SW_LKUP_PROMISC || 777 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 778 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 779 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 780 sw_buf->res_type = 781 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 782 } else { 783 status = ICE_ERR_PARAM; 784 goto ice_aq_alloc_free_vsi_list_exit; 785 } 786 787 if (opc == ice_aqc_opc_free_res) 788 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 789 790 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 791 if (status) 792 goto ice_aq_alloc_free_vsi_list_exit; 793 794 if (opc == ice_aqc_opc_alloc_res) { 795 vsi_ele = &sw_buf->elem[0]; 796 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 797 } 798 799 ice_aq_alloc_free_vsi_list_exit: 800 devm_kfree(ice_hw_to_dev(hw), sw_buf); 801 return status; 802 } 803 804 /** 805 * ice_aq_sw_rules - add/update/remove switch rules 806 * @hw: pointer to the HW struct 807 * @rule_list: pointer to switch rule population list 808 * @rule_list_sz: total size of the rule list in bytes 809 * @num_rules: number of switch rules in the rule_list 810 * @opc: switch rules population command type - pass in the command opcode 811 * @cd: pointer to command details structure or NULL 812 * 813 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 814 */ 815 enum ice_status 816 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 817 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 818 { 819 struct ice_aq_desc desc; 820 enum ice_status status; 821 822 if (opc != ice_aqc_opc_add_sw_rules && 823 opc != ice_aqc_opc_update_sw_rules && 824 opc != ice_aqc_opc_remove_sw_rules) 825 return ICE_ERR_PARAM; 826 827 ice_fill_dflt_direct_cmd_desc(&desc, opc); 828 829 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 830 desc.params.sw_rules.num_rules_fltr_entry_index = 831 cpu_to_le16(num_rules); 832 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 833 if (opc != ice_aqc_opc_add_sw_rules && 834 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 835 status = ICE_ERR_DOES_NOT_EXIST; 836 837 return status; 838 } 839 840 /** 841 * ice_aq_add_recipe - add switch recipe 842 * @hw: pointer to the HW struct 843 * @s_recipe_list: pointer to switch rule population list 844 * @num_recipes: number of switch recipes in the list 845 * @cd: pointer to command details structure or NULL 846 * 847 * Add(0x0290) 848 */ 849 static enum ice_status 850 ice_aq_add_recipe(struct ice_hw *hw, 851 struct ice_aqc_recipe_data_elem *s_recipe_list, 852 u16 num_recipes, struct ice_sq_cd *cd) 853 { 854 struct ice_aqc_add_get_recipe *cmd; 855 struct ice_aq_desc desc; 856 u16 buf_size; 857 858 cmd = &desc.params.add_get_recipe; 859 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); 860 861 cmd->num_sub_recipes = cpu_to_le16(num_recipes); 862 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 863 864 buf_size = num_recipes * sizeof(*s_recipe_list); 865 866 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 867 } 868 869 /** 870 * ice_aq_get_recipe - get switch recipe 871 * @hw: pointer to the HW struct 872 * @s_recipe_list: pointer to switch rule population list 873 * @num_recipes: pointer to the number of recipes (input and output) 874 * @recipe_root: root recipe number of recipe(s) to retrieve 875 * @cd: pointer to command details structure or NULL 876 * 877 * Get(0x0292) 878 * 879 * On input, *num_recipes should equal the number of entries in s_recipe_list. 880 * On output, *num_recipes will equal the number of entries returned in 881 * s_recipe_list. 882 * 883 * The caller must supply enough space in s_recipe_list to hold all possible 884 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. 885 */ 886 static enum ice_status 887 ice_aq_get_recipe(struct ice_hw *hw, 888 struct ice_aqc_recipe_data_elem *s_recipe_list, 889 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) 890 { 891 struct ice_aqc_add_get_recipe *cmd; 892 struct ice_aq_desc desc; 893 enum ice_status status; 894 u16 buf_size; 895 896 if (*num_recipes != ICE_MAX_NUM_RECIPES) 897 return ICE_ERR_PARAM; 898 899 cmd = &desc.params.add_get_recipe; 900 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); 901 902 cmd->return_index = cpu_to_le16(recipe_root); 903 cmd->num_sub_recipes = 0; 904 905 buf_size = *num_recipes * sizeof(*s_recipe_list); 906 907 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 908 *num_recipes = le16_to_cpu(cmd->num_sub_recipes); 909 910 return status; 911 } 912 913 /** 914 * ice_aq_map_recipe_to_profile - Map recipe to packet profile 915 * @hw: pointer to the HW struct 916 * @profile_id: package profile ID to associate the recipe with 917 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 918 * @cd: pointer to command details structure or NULL 919 * Recipe to profile association (0x0291) 920 */ 921 static enum ice_status 922 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 923 struct ice_sq_cd *cd) 924 { 925 struct ice_aqc_recipe_to_profile *cmd; 926 struct ice_aq_desc desc; 927 928 cmd = &desc.params.recipe_to_profile; 929 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); 930 cmd->profile_id = cpu_to_le16(profile_id); 931 /* Set the recipe ID bit in the bitmask to let the device know which 932 * profile we are associating the recipe to 933 */ 934 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); 935 936 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 937 } 938 939 /** 940 * ice_aq_get_recipe_to_profile - Map recipe to packet profile 941 * @hw: pointer to the HW struct 942 * @profile_id: package profile ID to associate the recipe with 943 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 944 * @cd: pointer to command details structure or NULL 945 * Associate profile ID with given recipe (0x0293) 946 */ 947 static enum ice_status 948 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 949 struct ice_sq_cd *cd) 950 { 951 struct ice_aqc_recipe_to_profile *cmd; 952 struct ice_aq_desc desc; 953 enum ice_status status; 954 955 cmd = &desc.params.recipe_to_profile; 956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); 957 cmd->profile_id = cpu_to_le16(profile_id); 958 959 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 960 if (!status) 961 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); 962 963 return status; 964 } 965 966 /** 967 * ice_alloc_recipe - add recipe resource 968 * @hw: pointer to the hardware structure 969 * @rid: recipe ID returned as response to AQ call 970 */ 971 static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) 972 { 973 struct ice_aqc_alloc_free_res_elem *sw_buf; 974 enum ice_status status; 975 u16 buf_len; 976 977 buf_len = struct_size(sw_buf, elem, 1); 978 sw_buf = kzalloc(buf_len, GFP_KERNEL); 979 if (!sw_buf) 980 return ICE_ERR_NO_MEMORY; 981 982 sw_buf->num_elems = cpu_to_le16(1); 983 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 984 ICE_AQC_RES_TYPE_S) | 985 ICE_AQC_RES_TYPE_FLAG_SHARED); 986 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 987 ice_aqc_opc_alloc_res, NULL); 988 if (!status) 989 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 990 kfree(sw_buf); 991 992 return status; 993 } 994 995 /** 996 * ice_get_recp_to_prof_map - updates recipe to profile mapping 997 * @hw: pointer to hardware structure 998 * 999 * This function is used to populate recipe_to_profile matrix where index to 1000 * this array is the recipe ID and the element is the mapping of which profiles 1001 * is this recipe mapped to. 1002 */ 1003 static void ice_get_recp_to_prof_map(struct ice_hw *hw) 1004 { 1005 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 1006 u16 i; 1007 1008 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { 1009 u16 j; 1010 1011 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); 1012 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); 1013 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) 1014 continue; 1015 bitmap_copy(profile_to_recipe[i], r_bitmap, 1016 ICE_MAX_NUM_RECIPES); 1017 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) 1018 set_bit(i, recipe_to_profile[j]); 1019 } 1020 } 1021 1022 /** 1023 * ice_collect_result_idx - copy result index values 1024 * @buf: buffer that contains the result index 1025 * @recp: the recipe struct to copy data into 1026 */ 1027 static void 1028 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, 1029 struct ice_sw_recipe *recp) 1030 { 1031 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1032 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1033 recp->res_idxs); 1034 } 1035 1036 /** 1037 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries 1038 * @hw: pointer to hardware structure 1039 * @recps: struct that we need to populate 1040 * @rid: recipe ID that we are populating 1041 * @refresh_required: true if we should get recipe to profile mapping from FW 1042 * 1043 * This function is used to populate all the necessary entries into our 1044 * bookkeeping so that we have a current list of all the recipes that are 1045 * programmed in the firmware. 1046 */ 1047 static enum ice_status 1048 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, 1049 bool *refresh_required) 1050 { 1051 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); 1052 struct ice_aqc_recipe_data_elem *tmp; 1053 u16 num_recps = ICE_MAX_NUM_RECIPES; 1054 struct ice_prot_lkup_ext *lkup_exts; 1055 enum ice_status status; 1056 u8 fv_word_idx = 0; 1057 u16 sub_recps; 1058 1059 bitmap_zero(result_bm, ICE_MAX_FV_WORDS); 1060 1061 /* we need a buffer big enough to accommodate all the recipes */ 1062 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 1063 if (!tmp) 1064 return ICE_ERR_NO_MEMORY; 1065 1066 tmp[0].recipe_indx = rid; 1067 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); 1068 /* non-zero status meaning recipe doesn't exist */ 1069 if (status) 1070 goto err_unroll; 1071 1072 /* Get recipe to profile map so that we can get the fv from lkups that 1073 * we read for a recipe from FW. Since we want to minimize the number of 1074 * times we make this FW call, just make one call and cache the copy 1075 * until a new recipe is added. This operation is only required the 1076 * first time to get the changes from FW. Then to search existing 1077 * entries we don't need to update the cache again until another recipe 1078 * gets added. 1079 */ 1080 if (*refresh_required) { 1081 ice_get_recp_to_prof_map(hw); 1082 *refresh_required = false; 1083 } 1084 1085 /* Start populating all the entries for recps[rid] based on lkups from 1086 * firmware. Note that we are only creating the root recipe in our 1087 * database. 1088 */ 1089 lkup_exts = &recps[rid].lkup_exts; 1090 1091 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { 1092 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; 1093 struct ice_recp_grp_entry *rg_entry; 1094 u8 i, prof, idx, prot = 0; 1095 bool is_root; 1096 u16 off = 0; 1097 1098 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), 1099 GFP_KERNEL); 1100 if (!rg_entry) { 1101 status = ICE_ERR_NO_MEMORY; 1102 goto err_unroll; 1103 } 1104 1105 idx = root_bufs.recipe_indx; 1106 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; 1107 1108 /* Mark all result indices in this chain */ 1109 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1110 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1111 result_bm); 1112 1113 /* get the first profile that is associated with rid */ 1114 prof = find_first_bit(recipe_to_profile[idx], 1115 ICE_MAX_NUM_PROFILES); 1116 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { 1117 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; 1118 1119 rg_entry->fv_idx[i] = lkup_indx; 1120 rg_entry->fv_mask[i] = 1121 le16_to_cpu(root_bufs.content.mask[i + 1]); 1122 1123 /* If the recipe is a chained recipe then all its 1124 * child recipe's result will have a result index. 1125 * To fill fv_words we should not use those result 1126 * index, we only need the protocol ids and offsets. 1127 * We will skip all the fv_idx which stores result 1128 * index in them. We also need to skip any fv_idx which 1129 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a 1130 * valid offset value. 1131 */ 1132 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || 1133 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || 1134 rg_entry->fv_idx[i] == 0) 1135 continue; 1136 1137 ice_find_prot_off(hw, ICE_BLK_SW, prof, 1138 rg_entry->fv_idx[i], &prot, &off); 1139 lkup_exts->fv_words[fv_word_idx].prot_id = prot; 1140 lkup_exts->fv_words[fv_word_idx].off = off; 1141 lkup_exts->field_mask[fv_word_idx] = 1142 rg_entry->fv_mask[i]; 1143 fv_word_idx++; 1144 } 1145 /* populate rg_list with the data from the child entry of this 1146 * recipe 1147 */ 1148 list_add(&rg_entry->l_entry, &recps[rid].rg_list); 1149 1150 /* Propagate some data to the recipe database */ 1151 recps[idx].is_root = !!is_root; 1152 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1153 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 1154 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 1155 recps[idx].chain_idx = root_bufs.content.result_indx & 1156 ~ICE_AQ_RECIPE_RESULT_EN; 1157 set_bit(recps[idx].chain_idx, recps[idx].res_idxs); 1158 } else { 1159 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; 1160 } 1161 1162 if (!is_root) 1163 continue; 1164 1165 /* Only do the following for root recipes entries */ 1166 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, 1167 sizeof(recps[idx].r_bitmap)); 1168 recps[idx].root_rid = root_bufs.content.rid & 1169 ~ICE_AQ_RECIPE_ID_IS_ROOT; 1170 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1171 } 1172 1173 /* Complete initialization of the root recipe entry */ 1174 lkup_exts->n_val_words = fv_word_idx; 1175 recps[rid].big_recp = (num_recps > 1); 1176 recps[rid].n_grp_count = (u8)num_recps; 1177 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, 1178 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), 1179 GFP_KERNEL); 1180 if (!recps[rid].root_buf) 1181 goto err_unroll; 1182 1183 /* Copy result indexes */ 1184 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); 1185 recps[rid].recp_created = true; 1186 1187 err_unroll: 1188 kfree(tmp); 1189 return status; 1190 } 1191 1192 /* ice_init_port_info - Initialize port_info with switch configuration data 1193 * @pi: pointer to port_info 1194 * @vsi_port_num: VSI number or port number 1195 * @type: Type of switch element (port or VSI) 1196 * @swid: switch ID of the switch the element is attached to 1197 * @pf_vf_num: PF or VF number 1198 * @is_vf: true if the element is a VF, false otherwise 1199 */ 1200 static void 1201 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 1202 u16 swid, u16 pf_vf_num, bool is_vf) 1203 { 1204 switch (type) { 1205 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 1206 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 1207 pi->sw_id = swid; 1208 pi->pf_vf_num = pf_vf_num; 1209 pi->is_vf = is_vf; 1210 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 1211 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 1212 break; 1213 default: 1214 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); 1215 break; 1216 } 1217 } 1218 1219 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 1220 * @hw: pointer to the hardware structure 1221 */ 1222 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) 1223 { 1224 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 1225 enum ice_status status; 1226 u16 req_desc = 0; 1227 u16 num_elems; 1228 u16 i; 1229 1230 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 1231 GFP_KERNEL); 1232 1233 if (!rbuf) 1234 return ICE_ERR_NO_MEMORY; 1235 1236 /* Multiple calls to ice_aq_get_sw_cfg may be required 1237 * to get all the switch configuration information. The need 1238 * for additional calls is indicated by ice_aq_get_sw_cfg 1239 * writing a non-zero value in req_desc 1240 */ 1241 do { 1242 struct ice_aqc_get_sw_cfg_resp_elem *ele; 1243 1244 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 1245 &req_desc, &num_elems, NULL); 1246 1247 if (status) 1248 break; 1249 1250 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 1251 u16 pf_vf_num, swid, vsi_port_num; 1252 bool is_vf = false; 1253 u8 res_type; 1254 1255 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 1256 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 1257 1258 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 1259 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 1260 1261 swid = le16_to_cpu(ele->swid); 1262 1263 if (le16_to_cpu(ele->pf_vf_num) & 1264 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 1265 is_vf = true; 1266 1267 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 1268 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 1269 1270 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 1271 /* FW VSI is not needed. Just continue. */ 1272 continue; 1273 } 1274 1275 ice_init_port_info(hw->port_info, vsi_port_num, 1276 res_type, swid, pf_vf_num, is_vf); 1277 } 1278 } while (req_desc && !status); 1279 1280 devm_kfree(ice_hw_to_dev(hw), rbuf); 1281 return status; 1282 } 1283 1284 /** 1285 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 1286 * @hw: pointer to the hardware structure 1287 * @fi: filter info structure to fill/update 1288 * 1289 * This helper function populates the lb_en and lan_en elements of the provided 1290 * ice_fltr_info struct using the switch's type and characteristics of the 1291 * switch rule being configured. 1292 */ 1293 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 1294 { 1295 fi->lb_en = false; 1296 fi->lan_en = false; 1297 if ((fi->flag & ICE_FLTR_TX) && 1298 (fi->fltr_act == ICE_FWD_TO_VSI || 1299 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1300 fi->fltr_act == ICE_FWD_TO_Q || 1301 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1302 /* Setting LB for prune actions will result in replicated 1303 * packets to the internal switch that will be dropped. 1304 */ 1305 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 1306 fi->lb_en = true; 1307 1308 /* Set lan_en to TRUE if 1309 * 1. The switch is a VEB AND 1310 * 2 1311 * 2.1 The lookup is a directional lookup like ethertype, 1312 * promiscuous, ethertype-MAC, promiscuous-VLAN 1313 * and default-port OR 1314 * 2.2 The lookup is VLAN, OR 1315 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 1316 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 1317 * 1318 * OR 1319 * 1320 * The switch is a VEPA. 1321 * 1322 * In all other cases, the LAN enable has to be set to false. 1323 */ 1324 if (hw->evb_veb) { 1325 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 1326 fi->lkup_type == ICE_SW_LKUP_PROMISC || 1327 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1328 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 1329 fi->lkup_type == ICE_SW_LKUP_DFLT || 1330 fi->lkup_type == ICE_SW_LKUP_VLAN || 1331 (fi->lkup_type == ICE_SW_LKUP_MAC && 1332 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 1333 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 1334 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 1335 fi->lan_en = true; 1336 } else { 1337 fi->lan_en = true; 1338 } 1339 } 1340 } 1341 1342 /** 1343 * ice_fill_sw_rule - Helper function to fill switch rule structure 1344 * @hw: pointer to the hardware structure 1345 * @f_info: entry containing packet forwarding information 1346 * @s_rule: switch rule structure to be filled in based on mac_entry 1347 * @opc: switch rules population command type - pass in the command opcode 1348 */ 1349 static void 1350 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 1351 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 1352 { 1353 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 1354 void *daddr = NULL; 1355 u16 eth_hdr_sz; 1356 u8 *eth_hdr; 1357 u32 act = 0; 1358 __be16 *off; 1359 u8 q_rgn; 1360 1361 if (opc == ice_aqc_opc_remove_sw_rules) { 1362 s_rule->pdata.lkup_tx_rx.act = 0; 1363 s_rule->pdata.lkup_tx_rx.index = 1364 cpu_to_le16(f_info->fltr_rule_id); 1365 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 1366 return; 1367 } 1368 1369 eth_hdr_sz = sizeof(dummy_eth_header); 1370 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 1371 1372 /* initialize the ether header with a dummy header */ 1373 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 1374 ice_fill_sw_info(hw, f_info); 1375 1376 switch (f_info->fltr_act) { 1377 case ICE_FWD_TO_VSI: 1378 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 1379 ICE_SINGLE_ACT_VSI_ID_M; 1380 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1381 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1382 ICE_SINGLE_ACT_VALID_BIT; 1383 break; 1384 case ICE_FWD_TO_VSI_LIST: 1385 act |= ICE_SINGLE_ACT_VSI_LIST; 1386 act |= (f_info->fwd_id.vsi_list_id << 1387 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 1388 ICE_SINGLE_ACT_VSI_LIST_ID_M; 1389 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1390 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1391 ICE_SINGLE_ACT_VALID_BIT; 1392 break; 1393 case ICE_FWD_TO_Q: 1394 act |= ICE_SINGLE_ACT_TO_Q; 1395 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1396 ICE_SINGLE_ACT_Q_INDEX_M; 1397 break; 1398 case ICE_DROP_PACKET: 1399 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 1400 ICE_SINGLE_ACT_VALID_BIT; 1401 break; 1402 case ICE_FWD_TO_QGRP: 1403 q_rgn = f_info->qgrp_size > 0 ? 1404 (u8)ilog2(f_info->qgrp_size) : 0; 1405 act |= ICE_SINGLE_ACT_TO_Q; 1406 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1407 ICE_SINGLE_ACT_Q_INDEX_M; 1408 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 1409 ICE_SINGLE_ACT_Q_REGION_M; 1410 break; 1411 default: 1412 return; 1413 } 1414 1415 if (f_info->lb_en) 1416 act |= ICE_SINGLE_ACT_LB_ENABLE; 1417 if (f_info->lan_en) 1418 act |= ICE_SINGLE_ACT_LAN_ENABLE; 1419 1420 switch (f_info->lkup_type) { 1421 case ICE_SW_LKUP_MAC: 1422 daddr = f_info->l_data.mac.mac_addr; 1423 break; 1424 case ICE_SW_LKUP_VLAN: 1425 vlan_id = f_info->l_data.vlan.vlan_id; 1426 if (f_info->fltr_act == ICE_FWD_TO_VSI || 1427 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 1428 act |= ICE_SINGLE_ACT_PRUNE; 1429 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 1430 } 1431 break; 1432 case ICE_SW_LKUP_ETHERTYPE_MAC: 1433 daddr = f_info->l_data.ethertype_mac.mac_addr; 1434 fallthrough; 1435 case ICE_SW_LKUP_ETHERTYPE: 1436 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 1437 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 1438 break; 1439 case ICE_SW_LKUP_MAC_VLAN: 1440 daddr = f_info->l_data.mac_vlan.mac_addr; 1441 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1442 break; 1443 case ICE_SW_LKUP_PROMISC_VLAN: 1444 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1445 fallthrough; 1446 case ICE_SW_LKUP_PROMISC: 1447 daddr = f_info->l_data.mac_vlan.mac_addr; 1448 break; 1449 default: 1450 break; 1451 } 1452 1453 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 1454 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 1455 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 1456 1457 /* Recipe set depending on lookup type */ 1458 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 1459 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 1460 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 1461 1462 if (daddr) 1463 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 1464 1465 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 1466 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 1467 *off = cpu_to_be16(vlan_id); 1468 } 1469 1470 /* Create the switch rule with the final dummy Ethernet header */ 1471 if (opc != ice_aqc_opc_update_sw_rules) 1472 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 1473 } 1474 1475 /** 1476 * ice_add_marker_act 1477 * @hw: pointer to the hardware structure 1478 * @m_ent: the management entry for which sw marker needs to be added 1479 * @sw_marker: sw marker to tag the Rx descriptor with 1480 * @l_id: large action resource ID 1481 * 1482 * Create a large action to hold software marker and update the switch rule 1483 * entry pointed by m_ent with newly created large action 1484 */ 1485 static enum ice_status 1486 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 1487 u16 sw_marker, u16 l_id) 1488 { 1489 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 1490 /* For software marker we need 3 large actions 1491 * 1. FWD action: FWD TO VSI or VSI LIST 1492 * 2. GENERIC VALUE action to hold the profile ID 1493 * 3. GENERIC VALUE action to hold the software marker ID 1494 */ 1495 const u16 num_lg_acts = 3; 1496 enum ice_status status; 1497 u16 lg_act_size; 1498 u16 rules_size; 1499 u32 act; 1500 u16 id; 1501 1502 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 1503 return ICE_ERR_PARAM; 1504 1505 /* Create two back-to-back switch rules and submit them to the HW using 1506 * one memory buffer: 1507 * 1. Large Action 1508 * 2. Look up Tx Rx 1509 */ 1510 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 1511 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1512 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 1513 if (!lg_act) 1514 return ICE_ERR_NO_MEMORY; 1515 1516 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 1517 1518 /* Fill in the first switch rule i.e. large action */ 1519 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 1520 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 1521 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 1522 1523 /* First action VSI forwarding or VSI list forwarding depending on how 1524 * many VSIs 1525 */ 1526 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 1527 m_ent->fltr_info.fwd_id.hw_vsi_id; 1528 1529 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 1530 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 1531 if (m_ent->vsi_count > 1) 1532 act |= ICE_LG_ACT_VSI_LIST; 1533 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 1534 1535 /* Second action descriptor type */ 1536 act = ICE_LG_ACT_GENERIC; 1537 1538 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 1539 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 1540 1541 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 1542 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 1543 1544 /* Third action Marker value */ 1545 act |= ICE_LG_ACT_GENERIC; 1546 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 1547 ICE_LG_ACT_GENERIC_VALUE_M; 1548 1549 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 1550 1551 /* call the fill switch rule to fill the lookup Tx Rx structure */ 1552 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 1553 ice_aqc_opc_update_sw_rules); 1554 1555 /* Update the action to point to the large action ID */ 1556 rx_tx->pdata.lkup_tx_rx.act = 1557 cpu_to_le32(ICE_SINGLE_ACT_PTR | 1558 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 1559 ICE_SINGLE_ACT_PTR_VAL_M)); 1560 1561 /* Use the filter rule ID of the previously created rule with single 1562 * act. Once the update happens, hardware will treat this as large 1563 * action 1564 */ 1565 rx_tx->pdata.lkup_tx_rx.index = 1566 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 1567 1568 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 1569 ice_aqc_opc_update_sw_rules, NULL); 1570 if (!status) { 1571 m_ent->lg_act_idx = l_id; 1572 m_ent->sw_marker_id = sw_marker; 1573 } 1574 1575 devm_kfree(ice_hw_to_dev(hw), lg_act); 1576 return status; 1577 } 1578 1579 /** 1580 * ice_create_vsi_list_map 1581 * @hw: pointer to the hardware structure 1582 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 1583 * @num_vsi: number of VSI handles in the array 1584 * @vsi_list_id: VSI list ID generated as part of allocate resource 1585 * 1586 * Helper function to create a new entry of VSI list ID to VSI mapping 1587 * using the given VSI list ID 1588 */ 1589 static struct ice_vsi_list_map_info * 1590 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1591 u16 vsi_list_id) 1592 { 1593 struct ice_switch_info *sw = hw->switch_info; 1594 struct ice_vsi_list_map_info *v_map; 1595 int i; 1596 1597 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); 1598 if (!v_map) 1599 return NULL; 1600 1601 v_map->vsi_list_id = vsi_list_id; 1602 v_map->ref_cnt = 1; 1603 for (i = 0; i < num_vsi; i++) 1604 set_bit(vsi_handle_arr[i], v_map->vsi_map); 1605 1606 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 1607 return v_map; 1608 } 1609 1610 /** 1611 * ice_update_vsi_list_rule 1612 * @hw: pointer to the hardware structure 1613 * @vsi_handle_arr: array of VSI handles to form a VSI list 1614 * @num_vsi: number of VSI handles in the array 1615 * @vsi_list_id: VSI list ID generated as part of allocate resource 1616 * @remove: Boolean value to indicate if this is a remove action 1617 * @opc: switch rules population command type - pass in the command opcode 1618 * @lkup_type: lookup type of the filter 1619 * 1620 * Call AQ command to add a new switch rule or update existing switch rule 1621 * using the given VSI list ID 1622 */ 1623 static enum ice_status 1624 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1625 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 1626 enum ice_sw_lkup_type lkup_type) 1627 { 1628 struct ice_aqc_sw_rules_elem *s_rule; 1629 enum ice_status status; 1630 u16 s_rule_size; 1631 u16 rule_type; 1632 int i; 1633 1634 if (!num_vsi) 1635 return ICE_ERR_PARAM; 1636 1637 if (lkup_type == ICE_SW_LKUP_MAC || 1638 lkup_type == ICE_SW_LKUP_MAC_VLAN || 1639 lkup_type == ICE_SW_LKUP_ETHERTYPE || 1640 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1641 lkup_type == ICE_SW_LKUP_PROMISC || 1642 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 1643 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 1644 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 1645 else if (lkup_type == ICE_SW_LKUP_VLAN) 1646 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 1647 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 1648 else 1649 return ICE_ERR_PARAM; 1650 1651 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 1652 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1653 if (!s_rule) 1654 return ICE_ERR_NO_MEMORY; 1655 for (i = 0; i < num_vsi; i++) { 1656 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 1657 status = ICE_ERR_PARAM; 1658 goto exit; 1659 } 1660 /* AQ call requires hw_vsi_id(s) */ 1661 s_rule->pdata.vsi_list.vsi[i] = 1662 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 1663 } 1664 1665 s_rule->type = cpu_to_le16(rule_type); 1666 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 1667 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1668 1669 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 1670 1671 exit: 1672 devm_kfree(ice_hw_to_dev(hw), s_rule); 1673 return status; 1674 } 1675 1676 /** 1677 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1678 * @hw: pointer to the HW struct 1679 * @vsi_handle_arr: array of VSI handles to form a VSI list 1680 * @num_vsi: number of VSI handles in the array 1681 * @vsi_list_id: stores the ID of the VSI list to be created 1682 * @lkup_type: switch rule filter's lookup type 1683 */ 1684 static enum ice_status 1685 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1686 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1687 { 1688 enum ice_status status; 1689 1690 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1691 ice_aqc_opc_alloc_res); 1692 if (status) 1693 return status; 1694 1695 /* Update the newly created VSI list to include the specified VSIs */ 1696 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1697 *vsi_list_id, false, 1698 ice_aqc_opc_add_sw_rules, lkup_type); 1699 } 1700 1701 /** 1702 * ice_create_pkt_fwd_rule 1703 * @hw: pointer to the hardware structure 1704 * @f_entry: entry containing packet forwarding information 1705 * 1706 * Create switch rule with given filter information and add an entry 1707 * to the corresponding filter management list to track this switch rule 1708 * and VSI mapping 1709 */ 1710 static enum ice_status 1711 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1712 struct ice_fltr_list_entry *f_entry) 1713 { 1714 struct ice_fltr_mgmt_list_entry *fm_entry; 1715 struct ice_aqc_sw_rules_elem *s_rule; 1716 enum ice_sw_lkup_type l_type; 1717 struct ice_sw_recipe *recp; 1718 enum ice_status status; 1719 1720 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1721 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1722 if (!s_rule) 1723 return ICE_ERR_NO_MEMORY; 1724 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1725 GFP_KERNEL); 1726 if (!fm_entry) { 1727 status = ICE_ERR_NO_MEMORY; 1728 goto ice_create_pkt_fwd_rule_exit; 1729 } 1730 1731 fm_entry->fltr_info = f_entry->fltr_info; 1732 1733 /* Initialize all the fields for the management entry */ 1734 fm_entry->vsi_count = 1; 1735 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1736 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1737 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1738 1739 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1740 ice_aqc_opc_add_sw_rules); 1741 1742 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1743 ice_aqc_opc_add_sw_rules, NULL); 1744 if (status) { 1745 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1746 goto ice_create_pkt_fwd_rule_exit; 1747 } 1748 1749 f_entry->fltr_info.fltr_rule_id = 1750 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1751 fm_entry->fltr_info.fltr_rule_id = 1752 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1753 1754 /* The book keeping entries will get removed when base driver 1755 * calls remove filter AQ command 1756 */ 1757 l_type = fm_entry->fltr_info.lkup_type; 1758 recp = &hw->switch_info->recp_list[l_type]; 1759 list_add(&fm_entry->list_entry, &recp->filt_rules); 1760 1761 ice_create_pkt_fwd_rule_exit: 1762 devm_kfree(ice_hw_to_dev(hw), s_rule); 1763 return status; 1764 } 1765 1766 /** 1767 * ice_update_pkt_fwd_rule 1768 * @hw: pointer to the hardware structure 1769 * @f_info: filter information for switch rule 1770 * 1771 * Call AQ command to update a previously created switch rule with a 1772 * VSI list ID 1773 */ 1774 static enum ice_status 1775 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 1776 { 1777 struct ice_aqc_sw_rules_elem *s_rule; 1778 enum ice_status status; 1779 1780 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1781 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1782 if (!s_rule) 1783 return ICE_ERR_NO_MEMORY; 1784 1785 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 1786 1787 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 1788 1789 /* Update switch rule with new rule set to forward VSI list */ 1790 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1791 ice_aqc_opc_update_sw_rules, NULL); 1792 1793 devm_kfree(ice_hw_to_dev(hw), s_rule); 1794 return status; 1795 } 1796 1797 /** 1798 * ice_update_sw_rule_bridge_mode 1799 * @hw: pointer to the HW struct 1800 * 1801 * Updates unicast switch filter rules based on VEB/VEPA mode 1802 */ 1803 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 1804 { 1805 struct ice_switch_info *sw = hw->switch_info; 1806 struct ice_fltr_mgmt_list_entry *fm_entry; 1807 enum ice_status status = 0; 1808 struct list_head *rule_head; 1809 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1810 1811 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1812 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1813 1814 mutex_lock(rule_lock); 1815 list_for_each_entry(fm_entry, rule_head, list_entry) { 1816 struct ice_fltr_info *fi = &fm_entry->fltr_info; 1817 u8 *addr = fi->l_data.mac.mac_addr; 1818 1819 /* Update unicast Tx rules to reflect the selected 1820 * VEB/VEPA mode 1821 */ 1822 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 1823 (fi->fltr_act == ICE_FWD_TO_VSI || 1824 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1825 fi->fltr_act == ICE_FWD_TO_Q || 1826 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1827 status = ice_update_pkt_fwd_rule(hw, fi); 1828 if (status) 1829 break; 1830 } 1831 } 1832 1833 mutex_unlock(rule_lock); 1834 1835 return status; 1836 } 1837 1838 /** 1839 * ice_add_update_vsi_list 1840 * @hw: pointer to the hardware structure 1841 * @m_entry: pointer to current filter management list entry 1842 * @cur_fltr: filter information from the book keeping entry 1843 * @new_fltr: filter information with the new VSI to be added 1844 * 1845 * Call AQ command to add or update previously created VSI list with new VSI. 1846 * 1847 * Helper function to do book keeping associated with adding filter information 1848 * The algorithm to do the book keeping is described below : 1849 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 1850 * if only one VSI has been added till now 1851 * Allocate a new VSI list and add two VSIs 1852 * to this list using switch rule command 1853 * Update the previously created switch rule with the 1854 * newly created VSI list ID 1855 * if a VSI list was previously created 1856 * Add the new VSI to the previously created VSI list set 1857 * using the update switch rule command 1858 */ 1859 static enum ice_status 1860 ice_add_update_vsi_list(struct ice_hw *hw, 1861 struct ice_fltr_mgmt_list_entry *m_entry, 1862 struct ice_fltr_info *cur_fltr, 1863 struct ice_fltr_info *new_fltr) 1864 { 1865 enum ice_status status = 0; 1866 u16 vsi_list_id = 0; 1867 1868 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 1869 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 1870 return ICE_ERR_NOT_IMPL; 1871 1872 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 1873 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 1874 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 1875 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 1876 return ICE_ERR_NOT_IMPL; 1877 1878 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 1879 /* Only one entry existed in the mapping and it was not already 1880 * a part of a VSI list. So, create a VSI list with the old and 1881 * new VSIs. 1882 */ 1883 struct ice_fltr_info tmp_fltr; 1884 u16 vsi_handle_arr[2]; 1885 1886 /* A rule already exists with the new VSI being added */ 1887 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 1888 return ICE_ERR_ALREADY_EXISTS; 1889 1890 vsi_handle_arr[0] = cur_fltr->vsi_handle; 1891 vsi_handle_arr[1] = new_fltr->vsi_handle; 1892 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1893 &vsi_list_id, 1894 new_fltr->lkup_type); 1895 if (status) 1896 return status; 1897 1898 tmp_fltr = *new_fltr; 1899 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 1900 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1901 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1902 /* Update the previous switch rule of "MAC forward to VSI" to 1903 * "MAC fwd to VSI list" 1904 */ 1905 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1906 if (status) 1907 return status; 1908 1909 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 1910 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1911 m_entry->vsi_list_info = 1912 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1913 vsi_list_id); 1914 1915 if (!m_entry->vsi_list_info) 1916 return ICE_ERR_NO_MEMORY; 1917 1918 /* If this entry was large action then the large action needs 1919 * to be updated to point to FWD to VSI list 1920 */ 1921 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 1922 status = 1923 ice_add_marker_act(hw, m_entry, 1924 m_entry->sw_marker_id, 1925 m_entry->lg_act_idx); 1926 } else { 1927 u16 vsi_handle = new_fltr->vsi_handle; 1928 enum ice_adminq_opc opcode; 1929 1930 if (!m_entry->vsi_list_info) 1931 return ICE_ERR_CFG; 1932 1933 /* A rule already exists with the new VSI being added */ 1934 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 1935 return 0; 1936 1937 /* Update the previously created VSI list set with 1938 * the new VSI ID passed in 1939 */ 1940 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 1941 opcode = ice_aqc_opc_update_sw_rules; 1942 1943 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 1944 vsi_list_id, false, opcode, 1945 new_fltr->lkup_type); 1946 /* update VSI list mapping info with new VSI ID */ 1947 if (!status) 1948 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 1949 } 1950 if (!status) 1951 m_entry->vsi_count++; 1952 return status; 1953 } 1954 1955 /** 1956 * ice_find_rule_entry - Search a rule entry 1957 * @hw: pointer to the hardware structure 1958 * @recp_id: lookup type for which the specified rule needs to be searched 1959 * @f_info: rule information 1960 * 1961 * Helper function to search for a given rule entry 1962 * Returns pointer to entry storing the rule if found 1963 */ 1964 static struct ice_fltr_mgmt_list_entry * 1965 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 1966 { 1967 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 1968 struct ice_switch_info *sw = hw->switch_info; 1969 struct list_head *list_head; 1970 1971 list_head = &sw->recp_list[recp_id].filt_rules; 1972 list_for_each_entry(list_itr, list_head, list_entry) { 1973 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 1974 sizeof(f_info->l_data)) && 1975 f_info->flag == list_itr->fltr_info.flag) { 1976 ret = list_itr; 1977 break; 1978 } 1979 } 1980 return ret; 1981 } 1982 1983 /** 1984 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 1985 * @hw: pointer to the hardware structure 1986 * @recp_id: lookup type for which VSI lists needs to be searched 1987 * @vsi_handle: VSI handle to be found in VSI list 1988 * @vsi_list_id: VSI list ID found containing vsi_handle 1989 * 1990 * Helper function to search a VSI list with single entry containing given VSI 1991 * handle element. This can be extended further to search VSI list with more 1992 * than 1 vsi_count. Returns pointer to VSI list entry if found. 1993 */ 1994 static struct ice_vsi_list_map_info * 1995 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 1996 u16 *vsi_list_id) 1997 { 1998 struct ice_vsi_list_map_info *map_info = NULL; 1999 struct ice_switch_info *sw = hw->switch_info; 2000 struct ice_fltr_mgmt_list_entry *list_itr; 2001 struct list_head *list_head; 2002 2003 list_head = &sw->recp_list[recp_id].filt_rules; 2004 list_for_each_entry(list_itr, list_head, list_entry) { 2005 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 2006 map_info = list_itr->vsi_list_info; 2007 if (test_bit(vsi_handle, map_info->vsi_map)) { 2008 *vsi_list_id = map_info->vsi_list_id; 2009 return map_info; 2010 } 2011 } 2012 } 2013 return NULL; 2014 } 2015 2016 /** 2017 * ice_add_rule_internal - add rule for a given lookup type 2018 * @hw: pointer to the hardware structure 2019 * @recp_id: lookup type (recipe ID) for which rule has to be added 2020 * @f_entry: structure containing MAC forwarding information 2021 * 2022 * Adds or updates the rule lists for a given recipe 2023 */ 2024 static enum ice_status 2025 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 2026 struct ice_fltr_list_entry *f_entry) 2027 { 2028 struct ice_switch_info *sw = hw->switch_info; 2029 struct ice_fltr_info *new_fltr, *cur_fltr; 2030 struct ice_fltr_mgmt_list_entry *m_entry; 2031 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2032 enum ice_status status = 0; 2033 2034 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2035 return ICE_ERR_PARAM; 2036 f_entry->fltr_info.fwd_id.hw_vsi_id = 2037 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2038 2039 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2040 2041 mutex_lock(rule_lock); 2042 new_fltr = &f_entry->fltr_info; 2043 if (new_fltr->flag & ICE_FLTR_RX) 2044 new_fltr->src = hw->port_info->lport; 2045 else if (new_fltr->flag & ICE_FLTR_TX) 2046 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 2047 2048 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 2049 if (!m_entry) { 2050 mutex_unlock(rule_lock); 2051 return ice_create_pkt_fwd_rule(hw, f_entry); 2052 } 2053 2054 cur_fltr = &m_entry->fltr_info; 2055 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 2056 mutex_unlock(rule_lock); 2057 2058 return status; 2059 } 2060 2061 /** 2062 * ice_remove_vsi_list_rule 2063 * @hw: pointer to the hardware structure 2064 * @vsi_list_id: VSI list ID generated as part of allocate resource 2065 * @lkup_type: switch rule filter lookup type 2066 * 2067 * The VSI list should be emptied before this function is called to remove the 2068 * VSI list. 2069 */ 2070 static enum ice_status 2071 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 2072 enum ice_sw_lkup_type lkup_type) 2073 { 2074 struct ice_aqc_sw_rules_elem *s_rule; 2075 enum ice_status status; 2076 u16 s_rule_size; 2077 2078 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 2079 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2080 if (!s_rule) 2081 return ICE_ERR_NO_MEMORY; 2082 2083 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 2084 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 2085 2086 /* Free the vsi_list resource that we allocated. It is assumed that the 2087 * list is empty at this point. 2088 */ 2089 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 2090 ice_aqc_opc_free_res); 2091 2092 devm_kfree(ice_hw_to_dev(hw), s_rule); 2093 return status; 2094 } 2095 2096 /** 2097 * ice_rem_update_vsi_list 2098 * @hw: pointer to the hardware structure 2099 * @vsi_handle: VSI handle of the VSI to remove 2100 * @fm_list: filter management entry for which the VSI list management needs to 2101 * be done 2102 */ 2103 static enum ice_status 2104 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 2105 struct ice_fltr_mgmt_list_entry *fm_list) 2106 { 2107 enum ice_sw_lkup_type lkup_type; 2108 enum ice_status status = 0; 2109 u16 vsi_list_id; 2110 2111 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 2112 fm_list->vsi_count == 0) 2113 return ICE_ERR_PARAM; 2114 2115 /* A rule with the VSI being removed does not exist */ 2116 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 2117 return ICE_ERR_DOES_NOT_EXIST; 2118 2119 lkup_type = fm_list->fltr_info.lkup_type; 2120 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 2121 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 2122 ice_aqc_opc_update_sw_rules, 2123 lkup_type); 2124 if (status) 2125 return status; 2126 2127 fm_list->vsi_count--; 2128 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 2129 2130 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 2131 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 2132 struct ice_vsi_list_map_info *vsi_list_info = 2133 fm_list->vsi_list_info; 2134 u16 rem_vsi_handle; 2135 2136 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 2137 ICE_MAX_VSI); 2138 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 2139 return ICE_ERR_OUT_OF_RANGE; 2140 2141 /* Make sure VSI list is empty before removing it below */ 2142 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 2143 vsi_list_id, true, 2144 ice_aqc_opc_update_sw_rules, 2145 lkup_type); 2146 if (status) 2147 return status; 2148 2149 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 2150 tmp_fltr_info.fwd_id.hw_vsi_id = 2151 ice_get_hw_vsi_num(hw, rem_vsi_handle); 2152 tmp_fltr_info.vsi_handle = rem_vsi_handle; 2153 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 2154 if (status) { 2155 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 2156 tmp_fltr_info.fwd_id.hw_vsi_id, status); 2157 return status; 2158 } 2159 2160 fm_list->fltr_info = tmp_fltr_info; 2161 } 2162 2163 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 2164 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 2165 struct ice_vsi_list_map_info *vsi_list_info = 2166 fm_list->vsi_list_info; 2167 2168 /* Remove the VSI list since it is no longer used */ 2169 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 2170 if (status) { 2171 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 2172 vsi_list_id, status); 2173 return status; 2174 } 2175 2176 list_del(&vsi_list_info->list_entry); 2177 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 2178 fm_list->vsi_list_info = NULL; 2179 } 2180 2181 return status; 2182 } 2183 2184 /** 2185 * ice_remove_rule_internal - Remove a filter rule of a given type 2186 * @hw: pointer to the hardware structure 2187 * @recp_id: recipe ID for which the rule needs to removed 2188 * @f_entry: rule entry containing filter information 2189 */ 2190 static enum ice_status 2191 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 2192 struct ice_fltr_list_entry *f_entry) 2193 { 2194 struct ice_switch_info *sw = hw->switch_info; 2195 struct ice_fltr_mgmt_list_entry *list_elem; 2196 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2197 enum ice_status status = 0; 2198 bool remove_rule = false; 2199 u16 vsi_handle; 2200 2201 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2202 return ICE_ERR_PARAM; 2203 f_entry->fltr_info.fwd_id.hw_vsi_id = 2204 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2205 2206 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2207 mutex_lock(rule_lock); 2208 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 2209 if (!list_elem) { 2210 status = ICE_ERR_DOES_NOT_EXIST; 2211 goto exit; 2212 } 2213 2214 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 2215 remove_rule = true; 2216 } else if (!list_elem->vsi_list_info) { 2217 status = ICE_ERR_DOES_NOT_EXIST; 2218 goto exit; 2219 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 2220 /* a ref_cnt > 1 indicates that the vsi_list is being 2221 * shared by multiple rules. Decrement the ref_cnt and 2222 * remove this rule, but do not modify the list, as it 2223 * is in-use by other rules. 2224 */ 2225 list_elem->vsi_list_info->ref_cnt--; 2226 remove_rule = true; 2227 } else { 2228 /* a ref_cnt of 1 indicates the vsi_list is only used 2229 * by one rule. However, the original removal request is only 2230 * for a single VSI. Update the vsi_list first, and only 2231 * remove the rule if there are no further VSIs in this list. 2232 */ 2233 vsi_handle = f_entry->fltr_info.vsi_handle; 2234 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 2235 if (status) 2236 goto exit; 2237 /* if VSI count goes to zero after updating the VSI list */ 2238 if (list_elem->vsi_count == 0) 2239 remove_rule = true; 2240 } 2241 2242 if (remove_rule) { 2243 /* Remove the lookup rule */ 2244 struct ice_aqc_sw_rules_elem *s_rule; 2245 2246 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2247 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 2248 GFP_KERNEL); 2249 if (!s_rule) { 2250 status = ICE_ERR_NO_MEMORY; 2251 goto exit; 2252 } 2253 2254 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 2255 ice_aqc_opc_remove_sw_rules); 2256 2257 status = ice_aq_sw_rules(hw, s_rule, 2258 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 2259 ice_aqc_opc_remove_sw_rules, NULL); 2260 2261 /* Remove a book keeping from the list */ 2262 devm_kfree(ice_hw_to_dev(hw), s_rule); 2263 2264 if (status) 2265 goto exit; 2266 2267 list_del(&list_elem->list_entry); 2268 devm_kfree(ice_hw_to_dev(hw), list_elem); 2269 } 2270 exit: 2271 mutex_unlock(rule_lock); 2272 return status; 2273 } 2274 2275 /** 2276 * ice_add_mac - Add a MAC address based filter rule 2277 * @hw: pointer to the hardware structure 2278 * @m_list: list of MAC addresses and forwarding information 2279 * 2280 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 2281 * multiple unicast addresses, the function assumes that all the 2282 * addresses are unique in a given add_mac call. It doesn't 2283 * check for duplicates in this case, removing duplicates from a given 2284 * list should be taken care of in the caller of this function. 2285 */ 2286 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 2287 { 2288 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 2289 struct ice_fltr_list_entry *m_list_itr; 2290 struct list_head *rule_head; 2291 u16 total_elem_left, s_rule_size; 2292 struct ice_switch_info *sw; 2293 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2294 enum ice_status status = 0; 2295 u16 num_unicast = 0; 2296 u8 elem_sent; 2297 2298 if (!m_list || !hw) 2299 return ICE_ERR_PARAM; 2300 2301 s_rule = NULL; 2302 sw = hw->switch_info; 2303 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2304 list_for_each_entry(m_list_itr, m_list, list_entry) { 2305 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 2306 u16 vsi_handle; 2307 u16 hw_vsi_id; 2308 2309 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 2310 vsi_handle = m_list_itr->fltr_info.vsi_handle; 2311 if (!ice_is_vsi_valid(hw, vsi_handle)) 2312 return ICE_ERR_PARAM; 2313 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2314 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 2315 /* update the src in case it is VSI num */ 2316 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 2317 return ICE_ERR_PARAM; 2318 m_list_itr->fltr_info.src = hw_vsi_id; 2319 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 2320 is_zero_ether_addr(add)) 2321 return ICE_ERR_PARAM; 2322 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2323 /* Don't overwrite the unicast address */ 2324 mutex_lock(rule_lock); 2325 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 2326 &m_list_itr->fltr_info)) { 2327 mutex_unlock(rule_lock); 2328 return ICE_ERR_ALREADY_EXISTS; 2329 } 2330 mutex_unlock(rule_lock); 2331 num_unicast++; 2332 } else if (is_multicast_ether_addr(add) || 2333 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 2334 m_list_itr->status = 2335 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 2336 m_list_itr); 2337 if (m_list_itr->status) 2338 return m_list_itr->status; 2339 } 2340 } 2341 2342 mutex_lock(rule_lock); 2343 /* Exit if no suitable entries were found for adding bulk switch rule */ 2344 if (!num_unicast) { 2345 status = 0; 2346 goto ice_add_mac_exit; 2347 } 2348 2349 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2350 2351 /* Allocate switch rule buffer for the bulk update for unicast */ 2352 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 2353 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 2354 GFP_KERNEL); 2355 if (!s_rule) { 2356 status = ICE_ERR_NO_MEMORY; 2357 goto ice_add_mac_exit; 2358 } 2359 2360 r_iter = s_rule; 2361 list_for_each_entry(m_list_itr, m_list, list_entry) { 2362 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2363 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2364 2365 if (is_unicast_ether_addr(mac_addr)) { 2366 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 2367 ice_aqc_opc_add_sw_rules); 2368 r_iter = (struct ice_aqc_sw_rules_elem *) 2369 ((u8 *)r_iter + s_rule_size); 2370 } 2371 } 2372 2373 /* Call AQ bulk switch rule update for all unicast addresses */ 2374 r_iter = s_rule; 2375 /* Call AQ switch rule in AQ_MAX chunk */ 2376 for (total_elem_left = num_unicast; total_elem_left > 0; 2377 total_elem_left -= elem_sent) { 2378 struct ice_aqc_sw_rules_elem *entry = r_iter; 2379 2380 elem_sent = min_t(u8, total_elem_left, 2381 (ICE_AQ_MAX_BUF_LEN / s_rule_size)); 2382 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 2383 elem_sent, ice_aqc_opc_add_sw_rules, 2384 NULL); 2385 if (status) 2386 goto ice_add_mac_exit; 2387 r_iter = (struct ice_aqc_sw_rules_elem *) 2388 ((u8 *)r_iter + (elem_sent * s_rule_size)); 2389 } 2390 2391 /* Fill up rule ID based on the value returned from FW */ 2392 r_iter = s_rule; 2393 list_for_each_entry(m_list_itr, m_list, list_entry) { 2394 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2395 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2396 struct ice_fltr_mgmt_list_entry *fm_entry; 2397 2398 if (is_unicast_ether_addr(mac_addr)) { 2399 f_info->fltr_rule_id = 2400 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 2401 f_info->fltr_act = ICE_FWD_TO_VSI; 2402 /* Create an entry to track this MAC address */ 2403 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 2404 sizeof(*fm_entry), GFP_KERNEL); 2405 if (!fm_entry) { 2406 status = ICE_ERR_NO_MEMORY; 2407 goto ice_add_mac_exit; 2408 } 2409 fm_entry->fltr_info = *f_info; 2410 fm_entry->vsi_count = 1; 2411 /* The book keeping entries will get removed when 2412 * base driver calls remove filter AQ command 2413 */ 2414 2415 list_add(&fm_entry->list_entry, rule_head); 2416 r_iter = (struct ice_aqc_sw_rules_elem *) 2417 ((u8 *)r_iter + s_rule_size); 2418 } 2419 } 2420 2421 ice_add_mac_exit: 2422 mutex_unlock(rule_lock); 2423 if (s_rule) 2424 devm_kfree(ice_hw_to_dev(hw), s_rule); 2425 return status; 2426 } 2427 2428 /** 2429 * ice_add_vlan_internal - Add one VLAN based filter rule 2430 * @hw: pointer to the hardware structure 2431 * @f_entry: filter entry containing one VLAN information 2432 */ 2433 static enum ice_status 2434 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 2435 { 2436 struct ice_switch_info *sw = hw->switch_info; 2437 struct ice_fltr_mgmt_list_entry *v_list_itr; 2438 struct ice_fltr_info *new_fltr, *cur_fltr; 2439 enum ice_sw_lkup_type lkup_type; 2440 u16 vsi_list_id = 0, vsi_handle; 2441 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2442 enum ice_status status = 0; 2443 2444 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2445 return ICE_ERR_PARAM; 2446 2447 f_entry->fltr_info.fwd_id.hw_vsi_id = 2448 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2449 new_fltr = &f_entry->fltr_info; 2450 2451 /* VLAN ID should only be 12 bits */ 2452 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 2453 return ICE_ERR_PARAM; 2454 2455 if (new_fltr->src_id != ICE_SRC_ID_VSI) 2456 return ICE_ERR_PARAM; 2457 2458 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 2459 lkup_type = new_fltr->lkup_type; 2460 vsi_handle = new_fltr->vsi_handle; 2461 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2462 mutex_lock(rule_lock); 2463 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 2464 if (!v_list_itr) { 2465 struct ice_vsi_list_map_info *map_info = NULL; 2466 2467 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 2468 /* All VLAN pruning rules use a VSI list. Check if 2469 * there is already a VSI list containing VSI that we 2470 * want to add. If found, use the same vsi_list_id for 2471 * this new VLAN rule or else create a new list. 2472 */ 2473 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 2474 vsi_handle, 2475 &vsi_list_id); 2476 if (!map_info) { 2477 status = ice_create_vsi_list_rule(hw, 2478 &vsi_handle, 2479 1, 2480 &vsi_list_id, 2481 lkup_type); 2482 if (status) 2483 goto exit; 2484 } 2485 /* Convert the action to forwarding to a VSI list. */ 2486 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2487 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 2488 } 2489 2490 status = ice_create_pkt_fwd_rule(hw, f_entry); 2491 if (!status) { 2492 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 2493 new_fltr); 2494 if (!v_list_itr) { 2495 status = ICE_ERR_DOES_NOT_EXIST; 2496 goto exit; 2497 } 2498 /* reuse VSI list for new rule and increment ref_cnt */ 2499 if (map_info) { 2500 v_list_itr->vsi_list_info = map_info; 2501 map_info->ref_cnt++; 2502 } else { 2503 v_list_itr->vsi_list_info = 2504 ice_create_vsi_list_map(hw, &vsi_handle, 2505 1, vsi_list_id); 2506 } 2507 } 2508 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 2509 /* Update existing VSI list to add new VSI ID only if it used 2510 * by one VLAN rule. 2511 */ 2512 cur_fltr = &v_list_itr->fltr_info; 2513 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 2514 new_fltr); 2515 } else { 2516 /* If VLAN rule exists and VSI list being used by this rule is 2517 * referenced by more than 1 VLAN rule. Then create a new VSI 2518 * list appending previous VSI with new VSI and update existing 2519 * VLAN rule to point to new VSI list ID 2520 */ 2521 struct ice_fltr_info tmp_fltr; 2522 u16 vsi_handle_arr[2]; 2523 u16 cur_handle; 2524 2525 /* Current implementation only supports reusing VSI list with 2526 * one VSI count. We should never hit below condition 2527 */ 2528 if (v_list_itr->vsi_count > 1 && 2529 v_list_itr->vsi_list_info->ref_cnt > 1) { 2530 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 2531 status = ICE_ERR_CFG; 2532 goto exit; 2533 } 2534 2535 cur_handle = 2536 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 2537 ICE_MAX_VSI); 2538 2539 /* A rule already exists with the new VSI being added */ 2540 if (cur_handle == vsi_handle) { 2541 status = ICE_ERR_ALREADY_EXISTS; 2542 goto exit; 2543 } 2544 2545 vsi_handle_arr[0] = cur_handle; 2546 vsi_handle_arr[1] = vsi_handle; 2547 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2548 &vsi_list_id, lkup_type); 2549 if (status) 2550 goto exit; 2551 2552 tmp_fltr = v_list_itr->fltr_info; 2553 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 2554 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2555 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2556 /* Update the previous switch rule to a new VSI list which 2557 * includes current VSI that is requested 2558 */ 2559 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2560 if (status) 2561 goto exit; 2562 2563 /* before overriding VSI list map info. decrement ref_cnt of 2564 * previous VSI list 2565 */ 2566 v_list_itr->vsi_list_info->ref_cnt--; 2567 2568 /* now update to newly created list */ 2569 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 2570 v_list_itr->vsi_list_info = 2571 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2572 vsi_list_id); 2573 v_list_itr->vsi_count++; 2574 } 2575 2576 exit: 2577 mutex_unlock(rule_lock); 2578 return status; 2579 } 2580 2581 /** 2582 * ice_add_vlan - Add VLAN based filter rule 2583 * @hw: pointer to the hardware structure 2584 * @v_list: list of VLAN entries and forwarding information 2585 */ 2586 enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 2587 { 2588 struct ice_fltr_list_entry *v_list_itr; 2589 2590 if (!v_list || !hw) 2591 return ICE_ERR_PARAM; 2592 2593 list_for_each_entry(v_list_itr, v_list, list_entry) { 2594 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 2595 return ICE_ERR_PARAM; 2596 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 2597 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 2598 if (v_list_itr->status) 2599 return v_list_itr->status; 2600 } 2601 return 0; 2602 } 2603 2604 /** 2605 * ice_add_eth_mac - Add ethertype and MAC based filter rule 2606 * @hw: pointer to the hardware structure 2607 * @em_list: list of ether type MAC filter, MAC is optional 2608 * 2609 * This function requires the caller to populate the entries in 2610 * the filter list with the necessary fields (including flags to 2611 * indicate Tx or Rx rules). 2612 */ 2613 enum ice_status 2614 ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2615 { 2616 struct ice_fltr_list_entry *em_list_itr; 2617 2618 if (!em_list || !hw) 2619 return ICE_ERR_PARAM; 2620 2621 list_for_each_entry(em_list_itr, em_list, list_entry) { 2622 enum ice_sw_lkup_type l_type = 2623 em_list_itr->fltr_info.lkup_type; 2624 2625 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2626 l_type != ICE_SW_LKUP_ETHERTYPE) 2627 return ICE_ERR_PARAM; 2628 2629 em_list_itr->status = ice_add_rule_internal(hw, l_type, 2630 em_list_itr); 2631 if (em_list_itr->status) 2632 return em_list_itr->status; 2633 } 2634 return 0; 2635 } 2636 2637 /** 2638 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 2639 * @hw: pointer to the hardware structure 2640 * @em_list: list of ethertype or ethertype MAC entries 2641 */ 2642 enum ice_status 2643 ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2644 { 2645 struct ice_fltr_list_entry *em_list_itr, *tmp; 2646 2647 if (!em_list || !hw) 2648 return ICE_ERR_PARAM; 2649 2650 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 2651 enum ice_sw_lkup_type l_type = 2652 em_list_itr->fltr_info.lkup_type; 2653 2654 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2655 l_type != ICE_SW_LKUP_ETHERTYPE) 2656 return ICE_ERR_PARAM; 2657 2658 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 2659 em_list_itr); 2660 if (em_list_itr->status) 2661 return em_list_itr->status; 2662 } 2663 return 0; 2664 } 2665 2666 /** 2667 * ice_rem_sw_rule_info 2668 * @hw: pointer to the hardware structure 2669 * @rule_head: pointer to the switch list structure that we want to delete 2670 */ 2671 static void 2672 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2673 { 2674 if (!list_empty(rule_head)) { 2675 struct ice_fltr_mgmt_list_entry *entry; 2676 struct ice_fltr_mgmt_list_entry *tmp; 2677 2678 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 2679 list_del(&entry->list_entry); 2680 devm_kfree(ice_hw_to_dev(hw), entry); 2681 } 2682 } 2683 } 2684 2685 /** 2686 * ice_rem_adv_rule_info 2687 * @hw: pointer to the hardware structure 2688 * @rule_head: pointer to the switch list structure that we want to delete 2689 */ 2690 static void 2691 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2692 { 2693 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 2694 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 2695 2696 if (list_empty(rule_head)) 2697 return; 2698 2699 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { 2700 list_del(&lst_itr->list_entry); 2701 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 2702 devm_kfree(ice_hw_to_dev(hw), lst_itr); 2703 } 2704 } 2705 2706 /** 2707 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 2708 * @hw: pointer to the hardware structure 2709 * @vsi_handle: VSI handle to set as default 2710 * @set: true to add the above mentioned switch rule, false to remove it 2711 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 2712 * 2713 * add filter rule to set/unset given VSI as default VSI for the switch 2714 * (represented by swid) 2715 */ 2716 enum ice_status 2717 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 2718 { 2719 struct ice_aqc_sw_rules_elem *s_rule; 2720 struct ice_fltr_info f_info; 2721 enum ice_adminq_opc opcode; 2722 enum ice_status status; 2723 u16 s_rule_size; 2724 u16 hw_vsi_id; 2725 2726 if (!ice_is_vsi_valid(hw, vsi_handle)) 2727 return ICE_ERR_PARAM; 2728 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2729 2730 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 2731 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 2732 2733 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2734 if (!s_rule) 2735 return ICE_ERR_NO_MEMORY; 2736 2737 memset(&f_info, 0, sizeof(f_info)); 2738 2739 f_info.lkup_type = ICE_SW_LKUP_DFLT; 2740 f_info.flag = direction; 2741 f_info.fltr_act = ICE_FWD_TO_VSI; 2742 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 2743 2744 if (f_info.flag & ICE_FLTR_RX) { 2745 f_info.src = hw->port_info->lport; 2746 f_info.src_id = ICE_SRC_ID_LPORT; 2747 if (!set) 2748 f_info.fltr_rule_id = 2749 hw->port_info->dflt_rx_vsi_rule_id; 2750 } else if (f_info.flag & ICE_FLTR_TX) { 2751 f_info.src_id = ICE_SRC_ID_VSI; 2752 f_info.src = hw_vsi_id; 2753 if (!set) 2754 f_info.fltr_rule_id = 2755 hw->port_info->dflt_tx_vsi_rule_id; 2756 } 2757 2758 if (set) 2759 opcode = ice_aqc_opc_add_sw_rules; 2760 else 2761 opcode = ice_aqc_opc_remove_sw_rules; 2762 2763 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 2764 2765 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 2766 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 2767 goto out; 2768 if (set) { 2769 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2770 2771 if (f_info.flag & ICE_FLTR_TX) { 2772 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 2773 hw->port_info->dflt_tx_vsi_rule_id = index; 2774 } else if (f_info.flag & ICE_FLTR_RX) { 2775 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 2776 hw->port_info->dflt_rx_vsi_rule_id = index; 2777 } 2778 } else { 2779 if (f_info.flag & ICE_FLTR_TX) { 2780 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 2781 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 2782 } else if (f_info.flag & ICE_FLTR_RX) { 2783 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 2784 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 2785 } 2786 } 2787 2788 out: 2789 devm_kfree(ice_hw_to_dev(hw), s_rule); 2790 return status; 2791 } 2792 2793 /** 2794 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 2795 * @hw: pointer to the hardware structure 2796 * @recp_id: lookup type for which the specified rule needs to be searched 2797 * @f_info: rule information 2798 * 2799 * Helper function to search for a unicast rule entry - this is to be used 2800 * to remove unicast MAC filter that is not shared with other VSIs on the 2801 * PF switch. 2802 * 2803 * Returns pointer to entry storing the rule if found 2804 */ 2805 static struct ice_fltr_mgmt_list_entry * 2806 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 2807 struct ice_fltr_info *f_info) 2808 { 2809 struct ice_switch_info *sw = hw->switch_info; 2810 struct ice_fltr_mgmt_list_entry *list_itr; 2811 struct list_head *list_head; 2812 2813 list_head = &sw->recp_list[recp_id].filt_rules; 2814 list_for_each_entry(list_itr, list_head, list_entry) { 2815 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2816 sizeof(f_info->l_data)) && 2817 f_info->fwd_id.hw_vsi_id == 2818 list_itr->fltr_info.fwd_id.hw_vsi_id && 2819 f_info->flag == list_itr->fltr_info.flag) 2820 return list_itr; 2821 } 2822 return NULL; 2823 } 2824 2825 /** 2826 * ice_remove_mac - remove a MAC address based filter rule 2827 * @hw: pointer to the hardware structure 2828 * @m_list: list of MAC addresses and forwarding information 2829 * 2830 * This function removes either a MAC filter rule or a specific VSI from a 2831 * VSI list for a multicast MAC address. 2832 * 2833 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by 2834 * ice_add_mac. Caller should be aware that this call will only work if all 2835 * the entries passed into m_list were added previously. It will not attempt to 2836 * do a partial remove of entries that were found. 2837 */ 2838 enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 2839 { 2840 struct ice_fltr_list_entry *list_itr, *tmp; 2841 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2842 2843 if (!m_list) 2844 return ICE_ERR_PARAM; 2845 2846 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2847 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 2848 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 2849 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 2850 u16 vsi_handle; 2851 2852 if (l_type != ICE_SW_LKUP_MAC) 2853 return ICE_ERR_PARAM; 2854 2855 vsi_handle = list_itr->fltr_info.vsi_handle; 2856 if (!ice_is_vsi_valid(hw, vsi_handle)) 2857 return ICE_ERR_PARAM; 2858 2859 list_itr->fltr_info.fwd_id.hw_vsi_id = 2860 ice_get_hw_vsi_num(hw, vsi_handle); 2861 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2862 /* Don't remove the unicast address that belongs to 2863 * another VSI on the switch, since it is not being 2864 * shared... 2865 */ 2866 mutex_lock(rule_lock); 2867 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 2868 &list_itr->fltr_info)) { 2869 mutex_unlock(rule_lock); 2870 return ICE_ERR_DOES_NOT_EXIST; 2871 } 2872 mutex_unlock(rule_lock); 2873 } 2874 list_itr->status = ice_remove_rule_internal(hw, 2875 ICE_SW_LKUP_MAC, 2876 list_itr); 2877 if (list_itr->status) 2878 return list_itr->status; 2879 } 2880 return 0; 2881 } 2882 2883 /** 2884 * ice_remove_vlan - Remove VLAN based filter rule 2885 * @hw: pointer to the hardware structure 2886 * @v_list: list of VLAN entries and forwarding information 2887 */ 2888 enum ice_status 2889 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 2890 { 2891 struct ice_fltr_list_entry *v_list_itr, *tmp; 2892 2893 if (!v_list || !hw) 2894 return ICE_ERR_PARAM; 2895 2896 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 2897 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 2898 2899 if (l_type != ICE_SW_LKUP_VLAN) 2900 return ICE_ERR_PARAM; 2901 v_list_itr->status = ice_remove_rule_internal(hw, 2902 ICE_SW_LKUP_VLAN, 2903 v_list_itr); 2904 if (v_list_itr->status) 2905 return v_list_itr->status; 2906 } 2907 return 0; 2908 } 2909 2910 /** 2911 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 2912 * @fm_entry: filter entry to inspect 2913 * @vsi_handle: VSI handle to compare with filter info 2914 */ 2915 static bool 2916 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 2917 { 2918 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 2919 fm_entry->fltr_info.vsi_handle == vsi_handle) || 2920 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 2921 fm_entry->vsi_list_info && 2922 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 2923 } 2924 2925 /** 2926 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 2927 * @hw: pointer to the hardware structure 2928 * @vsi_handle: VSI handle to remove filters from 2929 * @vsi_list_head: pointer to the list to add entry to 2930 * @fi: pointer to fltr_info of filter entry to copy & add 2931 * 2932 * Helper function, used when creating a list of filters to remove from 2933 * a specific VSI. The entry added to vsi_list_head is a COPY of the 2934 * original filter entry, with the exception of fltr_info.fltr_act and 2935 * fltr_info.fwd_id fields. These are set such that later logic can 2936 * extract which VSI to remove the fltr from, and pass on that information. 2937 */ 2938 static enum ice_status 2939 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2940 struct list_head *vsi_list_head, 2941 struct ice_fltr_info *fi) 2942 { 2943 struct ice_fltr_list_entry *tmp; 2944 2945 /* this memory is freed up in the caller function 2946 * once filters for this VSI are removed 2947 */ 2948 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 2949 if (!tmp) 2950 return ICE_ERR_NO_MEMORY; 2951 2952 tmp->fltr_info = *fi; 2953 2954 /* Overwrite these fields to indicate which VSI to remove filter from, 2955 * so find and remove logic can extract the information from the 2956 * list entries. Note that original entries will still have proper 2957 * values. 2958 */ 2959 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2960 tmp->fltr_info.vsi_handle = vsi_handle; 2961 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2962 2963 list_add(&tmp->list_entry, vsi_list_head); 2964 2965 return 0; 2966 } 2967 2968 /** 2969 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 2970 * @hw: pointer to the hardware structure 2971 * @vsi_handle: VSI handle to remove filters from 2972 * @lkup_list_head: pointer to the list that has certain lookup type filters 2973 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 2974 * 2975 * Locates all filters in lkup_list_head that are used by the given VSI, 2976 * and adds COPIES of those entries to vsi_list_head (intended to be used 2977 * to remove the listed filters). 2978 * Note that this means all entries in vsi_list_head must be explicitly 2979 * deallocated by the caller when done with list. 2980 */ 2981 static enum ice_status 2982 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 2983 struct list_head *lkup_list_head, 2984 struct list_head *vsi_list_head) 2985 { 2986 struct ice_fltr_mgmt_list_entry *fm_entry; 2987 enum ice_status status = 0; 2988 2989 /* check to make sure VSI ID is valid and within boundary */ 2990 if (!ice_is_vsi_valid(hw, vsi_handle)) 2991 return ICE_ERR_PARAM; 2992 2993 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 2994 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 2995 continue; 2996 2997 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2998 vsi_list_head, 2999 &fm_entry->fltr_info); 3000 if (status) 3001 return status; 3002 } 3003 return status; 3004 } 3005 3006 /** 3007 * ice_determine_promisc_mask 3008 * @fi: filter info to parse 3009 * 3010 * Helper function to determine which ICE_PROMISC_ mask corresponds 3011 * to given filter into. 3012 */ 3013 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 3014 { 3015 u16 vid = fi->l_data.mac_vlan.vlan_id; 3016 u8 *macaddr = fi->l_data.mac.mac_addr; 3017 bool is_tx_fltr = false; 3018 u8 promisc_mask = 0; 3019 3020 if (fi->flag == ICE_FLTR_TX) 3021 is_tx_fltr = true; 3022 3023 if (is_broadcast_ether_addr(macaddr)) 3024 promisc_mask |= is_tx_fltr ? 3025 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 3026 else if (is_multicast_ether_addr(macaddr)) 3027 promisc_mask |= is_tx_fltr ? 3028 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 3029 else if (is_unicast_ether_addr(macaddr)) 3030 promisc_mask |= is_tx_fltr ? 3031 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 3032 if (vid) 3033 promisc_mask |= is_tx_fltr ? 3034 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 3035 3036 return promisc_mask; 3037 } 3038 3039 /** 3040 * ice_remove_promisc - Remove promisc based filter rules 3041 * @hw: pointer to the hardware structure 3042 * @recp_id: recipe ID for which the rule needs to removed 3043 * @v_list: list of promisc entries 3044 */ 3045 static enum ice_status 3046 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, 3047 struct list_head *v_list) 3048 { 3049 struct ice_fltr_list_entry *v_list_itr, *tmp; 3050 3051 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3052 v_list_itr->status = 3053 ice_remove_rule_internal(hw, recp_id, v_list_itr); 3054 if (v_list_itr->status) 3055 return v_list_itr->status; 3056 } 3057 return 0; 3058 } 3059 3060 /** 3061 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 3062 * @hw: pointer to the hardware structure 3063 * @vsi_handle: VSI handle to clear mode 3064 * @promisc_mask: mask of promiscuous config bits to clear 3065 * @vid: VLAN ID to clear VLAN promiscuous 3066 */ 3067 enum ice_status 3068 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3069 u16 vid) 3070 { 3071 struct ice_switch_info *sw = hw->switch_info; 3072 struct ice_fltr_list_entry *fm_entry, *tmp; 3073 struct list_head remove_list_head; 3074 struct ice_fltr_mgmt_list_entry *itr; 3075 struct list_head *rule_head; 3076 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3077 enum ice_status status = 0; 3078 u8 recipe_id; 3079 3080 if (!ice_is_vsi_valid(hw, vsi_handle)) 3081 return ICE_ERR_PARAM; 3082 3083 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 3084 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3085 else 3086 recipe_id = ICE_SW_LKUP_PROMISC; 3087 3088 rule_head = &sw->recp_list[recipe_id].filt_rules; 3089 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 3090 3091 INIT_LIST_HEAD(&remove_list_head); 3092 3093 mutex_lock(rule_lock); 3094 list_for_each_entry(itr, rule_head, list_entry) { 3095 struct ice_fltr_info *fltr_info; 3096 u8 fltr_promisc_mask = 0; 3097 3098 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 3099 continue; 3100 fltr_info = &itr->fltr_info; 3101 3102 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 3103 vid != fltr_info->l_data.mac_vlan.vlan_id) 3104 continue; 3105 3106 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 3107 3108 /* Skip if filter is not completely specified by given mask */ 3109 if (fltr_promisc_mask & ~promisc_mask) 3110 continue; 3111 3112 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3113 &remove_list_head, 3114 fltr_info); 3115 if (status) { 3116 mutex_unlock(rule_lock); 3117 goto free_fltr_list; 3118 } 3119 } 3120 mutex_unlock(rule_lock); 3121 3122 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 3123 3124 free_fltr_list: 3125 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3126 list_del(&fm_entry->list_entry); 3127 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3128 } 3129 3130 return status; 3131 } 3132 3133 /** 3134 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 3135 * @hw: pointer to the hardware structure 3136 * @vsi_handle: VSI handle to configure 3137 * @promisc_mask: mask of promiscuous config bits 3138 * @vid: VLAN ID to set VLAN promiscuous 3139 */ 3140 enum ice_status 3141 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 3142 { 3143 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 3144 struct ice_fltr_list_entry f_list_entry; 3145 struct ice_fltr_info new_fltr; 3146 enum ice_status status = 0; 3147 bool is_tx_fltr; 3148 u16 hw_vsi_id; 3149 int pkt_type; 3150 u8 recipe_id; 3151 3152 if (!ice_is_vsi_valid(hw, vsi_handle)) 3153 return ICE_ERR_PARAM; 3154 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3155 3156 memset(&new_fltr, 0, sizeof(new_fltr)); 3157 3158 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 3159 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 3160 new_fltr.l_data.mac_vlan.vlan_id = vid; 3161 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3162 } else { 3163 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 3164 recipe_id = ICE_SW_LKUP_PROMISC; 3165 } 3166 3167 /* Separate filters must be set for each direction/packet type 3168 * combination, so we will loop over the mask value, store the 3169 * individual type, and clear it out in the input mask as it 3170 * is found. 3171 */ 3172 while (promisc_mask) { 3173 u8 *mac_addr; 3174 3175 pkt_type = 0; 3176 is_tx_fltr = false; 3177 3178 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 3179 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 3180 pkt_type = UCAST_FLTR; 3181 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 3182 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 3183 pkt_type = UCAST_FLTR; 3184 is_tx_fltr = true; 3185 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 3186 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 3187 pkt_type = MCAST_FLTR; 3188 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 3189 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 3190 pkt_type = MCAST_FLTR; 3191 is_tx_fltr = true; 3192 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 3193 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 3194 pkt_type = BCAST_FLTR; 3195 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 3196 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 3197 pkt_type = BCAST_FLTR; 3198 is_tx_fltr = true; 3199 } 3200 3201 /* Check for VLAN promiscuous flag */ 3202 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 3203 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 3204 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 3205 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 3206 is_tx_fltr = true; 3207 } 3208 3209 /* Set filter DA based on packet type */ 3210 mac_addr = new_fltr.l_data.mac.mac_addr; 3211 if (pkt_type == BCAST_FLTR) { 3212 eth_broadcast_addr(mac_addr); 3213 } else if (pkt_type == MCAST_FLTR || 3214 pkt_type == UCAST_FLTR) { 3215 /* Use the dummy ether header DA */ 3216 ether_addr_copy(mac_addr, dummy_eth_header); 3217 if (pkt_type == MCAST_FLTR) 3218 mac_addr[0] |= 0x1; /* Set multicast bit */ 3219 } 3220 3221 /* Need to reset this to zero for all iterations */ 3222 new_fltr.flag = 0; 3223 if (is_tx_fltr) { 3224 new_fltr.flag |= ICE_FLTR_TX; 3225 new_fltr.src = hw_vsi_id; 3226 } else { 3227 new_fltr.flag |= ICE_FLTR_RX; 3228 new_fltr.src = hw->port_info->lport; 3229 } 3230 3231 new_fltr.fltr_act = ICE_FWD_TO_VSI; 3232 new_fltr.vsi_handle = vsi_handle; 3233 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 3234 f_list_entry.fltr_info = new_fltr; 3235 3236 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 3237 if (status) 3238 goto set_promisc_exit; 3239 } 3240 3241 set_promisc_exit: 3242 return status; 3243 } 3244 3245 /** 3246 * ice_set_vlan_vsi_promisc 3247 * @hw: pointer to the hardware structure 3248 * @vsi_handle: VSI handle to configure 3249 * @promisc_mask: mask of promiscuous config bits 3250 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 3251 * 3252 * Configure VSI with all associated VLANs to given promiscuous mode(s) 3253 */ 3254 enum ice_status 3255 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3256 bool rm_vlan_promisc) 3257 { 3258 struct ice_switch_info *sw = hw->switch_info; 3259 struct ice_fltr_list_entry *list_itr, *tmp; 3260 struct list_head vsi_list_head; 3261 struct list_head *vlan_head; 3262 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 3263 enum ice_status status; 3264 u16 vlan_id; 3265 3266 INIT_LIST_HEAD(&vsi_list_head); 3267 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3268 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 3269 mutex_lock(vlan_lock); 3270 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 3271 &vsi_list_head); 3272 mutex_unlock(vlan_lock); 3273 if (status) 3274 goto free_fltr_list; 3275 3276 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 3277 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 3278 if (rm_vlan_promisc) 3279 status = ice_clear_vsi_promisc(hw, vsi_handle, 3280 promisc_mask, vlan_id); 3281 else 3282 status = ice_set_vsi_promisc(hw, vsi_handle, 3283 promisc_mask, vlan_id); 3284 if (status) 3285 break; 3286 } 3287 3288 free_fltr_list: 3289 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 3290 list_del(&list_itr->list_entry); 3291 devm_kfree(ice_hw_to_dev(hw), list_itr); 3292 } 3293 return status; 3294 } 3295 3296 /** 3297 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 3298 * @hw: pointer to the hardware structure 3299 * @vsi_handle: VSI handle to remove filters from 3300 * @lkup: switch rule filter lookup type 3301 */ 3302 static void 3303 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 3304 enum ice_sw_lkup_type lkup) 3305 { 3306 struct ice_switch_info *sw = hw->switch_info; 3307 struct ice_fltr_list_entry *fm_entry; 3308 struct list_head remove_list_head; 3309 struct list_head *rule_head; 3310 struct ice_fltr_list_entry *tmp; 3311 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3312 enum ice_status status; 3313 3314 INIT_LIST_HEAD(&remove_list_head); 3315 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 3316 rule_head = &sw->recp_list[lkup].filt_rules; 3317 mutex_lock(rule_lock); 3318 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 3319 &remove_list_head); 3320 mutex_unlock(rule_lock); 3321 if (status) 3322 goto free_fltr_list; 3323 3324 switch (lkup) { 3325 case ICE_SW_LKUP_MAC: 3326 ice_remove_mac(hw, &remove_list_head); 3327 break; 3328 case ICE_SW_LKUP_VLAN: 3329 ice_remove_vlan(hw, &remove_list_head); 3330 break; 3331 case ICE_SW_LKUP_PROMISC: 3332 case ICE_SW_LKUP_PROMISC_VLAN: 3333 ice_remove_promisc(hw, lkup, &remove_list_head); 3334 break; 3335 case ICE_SW_LKUP_MAC_VLAN: 3336 case ICE_SW_LKUP_ETHERTYPE: 3337 case ICE_SW_LKUP_ETHERTYPE_MAC: 3338 case ICE_SW_LKUP_DFLT: 3339 case ICE_SW_LKUP_LAST: 3340 default: 3341 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 3342 break; 3343 } 3344 3345 free_fltr_list: 3346 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3347 list_del(&fm_entry->list_entry); 3348 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3349 } 3350 } 3351 3352 /** 3353 * ice_remove_vsi_fltr - Remove all filters for a VSI 3354 * @hw: pointer to the hardware structure 3355 * @vsi_handle: VSI handle to remove filters from 3356 */ 3357 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 3358 { 3359 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 3360 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 3361 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 3362 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 3363 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 3364 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 3365 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 3366 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 3367 } 3368 3369 /** 3370 * ice_alloc_res_cntr - allocating resource counter 3371 * @hw: pointer to the hardware structure 3372 * @type: type of resource 3373 * @alloc_shared: if set it is shared else dedicated 3374 * @num_items: number of entries requested for FD resource type 3375 * @counter_id: counter index returned by AQ call 3376 */ 3377 enum ice_status 3378 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3379 u16 *counter_id) 3380 { 3381 struct ice_aqc_alloc_free_res_elem *buf; 3382 enum ice_status status; 3383 u16 buf_len; 3384 3385 /* Allocate resource */ 3386 buf_len = struct_size(buf, elem, 1); 3387 buf = kzalloc(buf_len, GFP_KERNEL); 3388 if (!buf) 3389 return ICE_ERR_NO_MEMORY; 3390 3391 buf->num_elems = cpu_to_le16(num_items); 3392 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3393 ICE_AQC_RES_TYPE_M) | alloc_shared); 3394 3395 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3396 ice_aqc_opc_alloc_res, NULL); 3397 if (status) 3398 goto exit; 3399 3400 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 3401 3402 exit: 3403 kfree(buf); 3404 return status; 3405 } 3406 3407 /** 3408 * ice_free_res_cntr - free resource counter 3409 * @hw: pointer to the hardware structure 3410 * @type: type of resource 3411 * @alloc_shared: if set it is shared else dedicated 3412 * @num_items: number of entries to be freed for FD resource type 3413 * @counter_id: counter ID resource which needs to be freed 3414 */ 3415 enum ice_status 3416 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3417 u16 counter_id) 3418 { 3419 struct ice_aqc_alloc_free_res_elem *buf; 3420 enum ice_status status; 3421 u16 buf_len; 3422 3423 /* Free resource */ 3424 buf_len = struct_size(buf, elem, 1); 3425 buf = kzalloc(buf_len, GFP_KERNEL); 3426 if (!buf) 3427 return ICE_ERR_NO_MEMORY; 3428 3429 buf->num_elems = cpu_to_le16(num_items); 3430 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3431 ICE_AQC_RES_TYPE_M) | alloc_shared); 3432 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 3433 3434 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3435 ice_aqc_opc_free_res, NULL); 3436 if (status) 3437 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 3438 3439 kfree(buf); 3440 return status; 3441 } 3442 3443 /* This is mapping table entry that maps every word within a given protocol 3444 * structure to the real byte offset as per the specification of that 3445 * protocol header. 3446 * for example dst address is 3 words in ethertype header and corresponding 3447 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 3448 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a 3449 * matching entry describing its field. This needs to be updated if new 3450 * structure is added to that union. 3451 */ 3452 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { 3453 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, 3454 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, 3455 { ICE_ETYPE_OL, { 0 } }, 3456 { ICE_VLAN_OFOS, { 2, 0 } }, 3457 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3458 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3459 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3460 26, 28, 30, 32, 34, 36, 38 } }, 3461 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3462 26, 28, 30, 32, 34, 36, 38 } }, 3463 { ICE_TCP_IL, { 0, 2 } }, 3464 { ICE_UDP_OF, { 0, 2 } }, 3465 { ICE_UDP_ILOS, { 0, 2 } }, 3466 }; 3467 3468 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { 3469 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, 3470 { ICE_MAC_IL, ICE_MAC_IL_HW }, 3471 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, 3472 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, 3473 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, 3474 { ICE_IPV4_IL, ICE_IPV4_IL_HW }, 3475 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, 3476 { ICE_IPV6_IL, ICE_IPV6_IL_HW }, 3477 { ICE_TCP_IL, ICE_TCP_IL_HW }, 3478 { ICE_UDP_OF, ICE_UDP_OF_HW }, 3479 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, 3480 }; 3481 3482 /** 3483 * ice_find_recp - find a recipe 3484 * @hw: pointer to the hardware structure 3485 * @lkup_exts: extension sequence to match 3486 * 3487 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 3488 */ 3489 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) 3490 { 3491 bool refresh_required = true; 3492 struct ice_sw_recipe *recp; 3493 u8 i; 3494 3495 /* Walk through existing recipes to find a match */ 3496 recp = hw->switch_info->recp_list; 3497 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 3498 /* If recipe was not created for this ID, in SW bookkeeping, 3499 * check if FW has an entry for this recipe. If the FW has an 3500 * entry update it in our SW bookkeeping and continue with the 3501 * matching. 3502 */ 3503 if (!recp[i].recp_created) 3504 if (ice_get_recp_frm_fw(hw, 3505 hw->switch_info->recp_list, i, 3506 &refresh_required)) 3507 continue; 3508 3509 /* Skip inverse action recipes */ 3510 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & 3511 ICE_AQ_RECIPE_ACT_INV_ACT) 3512 continue; 3513 3514 /* if number of words we are looking for match */ 3515 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { 3516 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; 3517 struct ice_fv_word *be = lkup_exts->fv_words; 3518 u16 *cr = recp[i].lkup_exts.field_mask; 3519 u16 *de = lkup_exts->field_mask; 3520 bool found = true; 3521 u8 pe, qr; 3522 3523 /* ar, cr, and qr are related to the recipe words, while 3524 * be, de, and pe are related to the lookup words 3525 */ 3526 for (pe = 0; pe < lkup_exts->n_val_words; pe++) { 3527 for (qr = 0; qr < recp[i].lkup_exts.n_val_words; 3528 qr++) { 3529 if (ar[qr].off == be[pe].off && 3530 ar[qr].prot_id == be[pe].prot_id && 3531 cr[qr] == de[pe]) 3532 /* Found the "pe"th word in the 3533 * given recipe 3534 */ 3535 break; 3536 } 3537 /* After walking through all the words in the 3538 * "i"th recipe if "p"th word was not found then 3539 * this recipe is not what we are looking for. 3540 * So break out from this loop and try the next 3541 * recipe 3542 */ 3543 if (qr >= recp[i].lkup_exts.n_val_words) { 3544 found = false; 3545 break; 3546 } 3547 } 3548 /* If for "i"th recipe the found was never set to false 3549 * then it means we found our match 3550 */ 3551 if (found) 3552 return i; /* Return the recipe ID */ 3553 } 3554 } 3555 return ICE_MAX_NUM_RECIPES; 3556 } 3557 3558 /** 3559 * ice_prot_type_to_id - get protocol ID from protocol type 3560 * @type: protocol type 3561 * @id: pointer to variable that will receive the ID 3562 * 3563 * Returns true if found, false otherwise 3564 */ 3565 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) 3566 { 3567 u8 i; 3568 3569 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 3570 if (ice_prot_id_tbl[i].type == type) { 3571 *id = ice_prot_id_tbl[i].protocol_id; 3572 return true; 3573 } 3574 return false; 3575 } 3576 3577 /** 3578 * ice_fill_valid_words - count valid words 3579 * @rule: advanced rule with lookup information 3580 * @lkup_exts: byte offset extractions of the words that are valid 3581 * 3582 * calculate valid words in a lookup rule using mask value 3583 */ 3584 static u8 3585 ice_fill_valid_words(struct ice_adv_lkup_elem *rule, 3586 struct ice_prot_lkup_ext *lkup_exts) 3587 { 3588 u8 j, word, prot_id, ret_val; 3589 3590 if (!ice_prot_type_to_id(rule->type, &prot_id)) 3591 return 0; 3592 3593 word = lkup_exts->n_val_words; 3594 3595 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) 3596 if (((u16 *)&rule->m_u)[j] && 3597 rule->type < ARRAY_SIZE(ice_prot_ext)) { 3598 /* No more space to accommodate */ 3599 if (word >= ICE_MAX_CHAIN_WORDS) 3600 return 0; 3601 lkup_exts->fv_words[word].off = 3602 ice_prot_ext[rule->type].offs[j]; 3603 lkup_exts->fv_words[word].prot_id = 3604 ice_prot_id_tbl[rule->type].protocol_id; 3605 lkup_exts->field_mask[word] = 3606 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); 3607 word++; 3608 } 3609 3610 ret_val = word - lkup_exts->n_val_words; 3611 lkup_exts->n_val_words = word; 3612 3613 return ret_val; 3614 } 3615 3616 /** 3617 * ice_create_first_fit_recp_def - Create a recipe grouping 3618 * @hw: pointer to the hardware structure 3619 * @lkup_exts: an array of protocol header extractions 3620 * @rg_list: pointer to a list that stores new recipe groups 3621 * @recp_cnt: pointer to a variable that stores returned number of recipe groups 3622 * 3623 * Using first fit algorithm, take all the words that are still not done 3624 * and start grouping them in 4-word groups. Each group makes up one 3625 * recipe. 3626 */ 3627 static enum ice_status 3628 ice_create_first_fit_recp_def(struct ice_hw *hw, 3629 struct ice_prot_lkup_ext *lkup_exts, 3630 struct list_head *rg_list, 3631 u8 *recp_cnt) 3632 { 3633 struct ice_pref_recipe_group *grp = NULL; 3634 u8 j; 3635 3636 *recp_cnt = 0; 3637 3638 /* Walk through every word in the rule to check if it is not done. If so 3639 * then this word needs to be part of a new recipe. 3640 */ 3641 for (j = 0; j < lkup_exts->n_val_words; j++) 3642 if (!test_bit(j, lkup_exts->done)) { 3643 if (!grp || 3644 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { 3645 struct ice_recp_grp_entry *entry; 3646 3647 entry = devm_kzalloc(ice_hw_to_dev(hw), 3648 sizeof(*entry), 3649 GFP_KERNEL); 3650 if (!entry) 3651 return ICE_ERR_NO_MEMORY; 3652 list_add(&entry->l_entry, rg_list); 3653 grp = &entry->r_group; 3654 (*recp_cnt)++; 3655 } 3656 3657 grp->pairs[grp->n_val_pairs].prot_id = 3658 lkup_exts->fv_words[j].prot_id; 3659 grp->pairs[grp->n_val_pairs].off = 3660 lkup_exts->fv_words[j].off; 3661 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; 3662 grp->n_val_pairs++; 3663 } 3664 3665 return 0; 3666 } 3667 3668 /** 3669 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group 3670 * @hw: pointer to the hardware structure 3671 * @fv_list: field vector with the extraction sequence information 3672 * @rg_list: recipe groupings with protocol-offset pairs 3673 * 3674 * Helper function to fill in the field vector indices for protocol-offset 3675 * pairs. These indexes are then ultimately programmed into a recipe. 3676 */ 3677 static enum ice_status 3678 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, 3679 struct list_head *rg_list) 3680 { 3681 struct ice_sw_fv_list_entry *fv; 3682 struct ice_recp_grp_entry *rg; 3683 struct ice_fv_word *fv_ext; 3684 3685 if (list_empty(fv_list)) 3686 return 0; 3687 3688 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, 3689 list_entry); 3690 fv_ext = fv->fv_ptr->ew; 3691 3692 list_for_each_entry(rg, rg_list, l_entry) { 3693 u8 i; 3694 3695 for (i = 0; i < rg->r_group.n_val_pairs; i++) { 3696 struct ice_fv_word *pr; 3697 bool found = false; 3698 u16 mask; 3699 u8 j; 3700 3701 pr = &rg->r_group.pairs[i]; 3702 mask = rg->r_group.mask[i]; 3703 3704 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 3705 if (fv_ext[j].prot_id == pr->prot_id && 3706 fv_ext[j].off == pr->off) { 3707 found = true; 3708 3709 /* Store index of field vector */ 3710 rg->fv_idx[i] = j; 3711 rg->fv_mask[i] = mask; 3712 break; 3713 } 3714 3715 /* Protocol/offset could not be found, caller gave an 3716 * invalid pair 3717 */ 3718 if (!found) 3719 return ICE_ERR_PARAM; 3720 } 3721 } 3722 3723 return 0; 3724 } 3725 3726 /** 3727 * ice_find_free_recp_res_idx - find free result indexes for recipe 3728 * @hw: pointer to hardware structure 3729 * @profiles: bitmap of profiles that will be associated with the new recipe 3730 * @free_idx: pointer to variable to receive the free index bitmap 3731 * 3732 * The algorithm used here is: 3733 * 1. When creating a new recipe, create a set P which contains all 3734 * Profiles that will be associated with our new recipe 3735 * 3736 * 2. For each Profile p in set P: 3737 * a. Add all recipes associated with Profile p into set R 3738 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes 3739 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] 3740 * i. Or just assume they all have the same possible indexes: 3741 * 44, 45, 46, 47 3742 * i.e., PossibleIndexes = 0x0000F00000000000 3743 * 3744 * 3. For each Recipe r in set R: 3745 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes 3746 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes 3747 * 3748 * FreeIndexes will contain the bits indicating the indexes free for use, 3749 * then the code needs to update the recipe[r].used_result_idx_bits to 3750 * indicate which indexes were selected for use by this recipe. 3751 */ 3752 static u16 3753 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, 3754 unsigned long *free_idx) 3755 { 3756 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); 3757 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); 3758 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); 3759 u16 bit; 3760 3761 bitmap_zero(possible_idx, ICE_MAX_FV_WORDS); 3762 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); 3763 bitmap_zero(used_idx, ICE_MAX_FV_WORDS); 3764 bitmap_zero(free_idx, ICE_MAX_FV_WORDS); 3765 3766 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); 3767 3768 /* For each profile we are going to associate the recipe with, add the 3769 * recipes that are associated with that profile. This will give us 3770 * the set of recipes that our recipe may collide with. Also, determine 3771 * what possible result indexes are usable given this set of profiles. 3772 */ 3773 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { 3774 bitmap_or(recipes, recipes, profile_to_recipe[bit], 3775 ICE_MAX_NUM_RECIPES); 3776 bitmap_and(possible_idx, possible_idx, 3777 hw->switch_info->prof_res_bm[bit], 3778 ICE_MAX_FV_WORDS); 3779 } 3780 3781 /* For each recipe that our new recipe may collide with, determine 3782 * which indexes have been used. 3783 */ 3784 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) 3785 bitmap_or(used_idx, used_idx, 3786 hw->switch_info->recp_list[bit].res_idxs, 3787 ICE_MAX_FV_WORDS); 3788 3789 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); 3790 3791 /* return number of free indexes */ 3792 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); 3793 } 3794 3795 /** 3796 * ice_add_sw_recipe - function to call AQ calls to create switch recipe 3797 * @hw: pointer to hardware structure 3798 * @rm: recipe management list entry 3799 * @match_tun_mask: tunnel mask that needs to be programmed 3800 * @profiles: bitmap of profiles that will be associated. 3801 */ 3802 static enum ice_status 3803 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, 3804 u16 match_tun_mask, unsigned long *profiles) 3805 { 3806 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 3807 struct ice_aqc_recipe_data_elem *tmp; 3808 struct ice_aqc_recipe_data_elem *buf; 3809 struct ice_recp_grp_entry *entry; 3810 enum ice_status status; 3811 u16 free_res_idx; 3812 u16 recipe_count; 3813 u8 chain_idx; 3814 u8 recps = 0; 3815 3816 /* When more than one recipe are required, another recipe is needed to 3817 * chain them together. Matching a tunnel metadata ID takes up one of 3818 * the match fields in the chaining recipe reducing the number of 3819 * chained recipes by one. 3820 */ 3821 /* check number of free result indices */ 3822 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); 3823 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); 3824 3825 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", 3826 free_res_idx, rm->n_grp_count); 3827 3828 if (rm->n_grp_count > 1) { 3829 if (rm->n_grp_count > free_res_idx) 3830 return ICE_ERR_MAX_LIMIT; 3831 3832 rm->n_grp_count++; 3833 } 3834 3835 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) 3836 return ICE_ERR_MAX_LIMIT; 3837 3838 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 3839 if (!tmp) 3840 return ICE_ERR_NO_MEMORY; 3841 3842 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), 3843 GFP_KERNEL); 3844 if (!buf) { 3845 status = ICE_ERR_NO_MEMORY; 3846 goto err_mem; 3847 } 3848 3849 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); 3850 recipe_count = ICE_MAX_NUM_RECIPES; 3851 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, 3852 NULL); 3853 if (status || recipe_count == 0) 3854 goto err_unroll; 3855 3856 /* Allocate the recipe resources, and configure them according to the 3857 * match fields from protocol headers and extracted field vectors. 3858 */ 3859 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); 3860 list_for_each_entry(entry, &rm->rg_list, l_entry) { 3861 u8 i; 3862 3863 status = ice_alloc_recipe(hw, &entry->rid); 3864 if (status) 3865 goto err_unroll; 3866 3867 /* Clear the result index of the located recipe, as this will be 3868 * updated, if needed, later in the recipe creation process. 3869 */ 3870 tmp[0].content.result_indx = 0; 3871 3872 buf[recps] = tmp[0]; 3873 buf[recps].recipe_indx = (u8)entry->rid; 3874 /* if the recipe is a non-root recipe RID should be programmed 3875 * as 0 for the rules to be applied correctly. 3876 */ 3877 buf[recps].content.rid = 0; 3878 memset(&buf[recps].content.lkup_indx, 0, 3879 sizeof(buf[recps].content.lkup_indx)); 3880 3881 /* All recipes use look-up index 0 to match switch ID. */ 3882 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 3883 buf[recps].content.mask[0] = 3884 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 3885 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 3886 * to be 0 3887 */ 3888 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 3889 buf[recps].content.lkup_indx[i] = 0x80; 3890 buf[recps].content.mask[i] = 0; 3891 } 3892 3893 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 3894 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 3895 buf[recps].content.mask[i + 1] = 3896 cpu_to_le16(entry->fv_mask[i]); 3897 } 3898 3899 if (rm->n_grp_count > 1) { 3900 /* Checks to see if there really is a valid result index 3901 * that can be used. 3902 */ 3903 if (chain_idx >= ICE_MAX_FV_WORDS) { 3904 ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); 3905 status = ICE_ERR_MAX_LIMIT; 3906 goto err_unroll; 3907 } 3908 3909 entry->chain_idx = chain_idx; 3910 buf[recps].content.result_indx = 3911 ICE_AQ_RECIPE_RESULT_EN | 3912 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 3913 ICE_AQ_RECIPE_RESULT_DATA_M); 3914 clear_bit(chain_idx, result_idx_bm); 3915 chain_idx = find_first_bit(result_idx_bm, 3916 ICE_MAX_FV_WORDS); 3917 } 3918 3919 /* fill recipe dependencies */ 3920 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, 3921 ICE_MAX_NUM_RECIPES); 3922 set_bit(buf[recps].recipe_indx, 3923 (unsigned long *)buf[recps].recipe_bitmap); 3924 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 3925 recps++; 3926 } 3927 3928 if (rm->n_grp_count == 1) { 3929 rm->root_rid = buf[0].recipe_indx; 3930 set_bit(buf[0].recipe_indx, rm->r_bitmap); 3931 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; 3932 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { 3933 memcpy(buf[0].recipe_bitmap, rm->r_bitmap, 3934 sizeof(buf[0].recipe_bitmap)); 3935 } else { 3936 status = ICE_ERR_BAD_PTR; 3937 goto err_unroll; 3938 } 3939 /* Applicable only for ROOT_RECIPE, set the fwd_priority for 3940 * the recipe which is getting created if specified 3941 * by user. Usually any advanced switch filter, which results 3942 * into new extraction sequence, ended up creating a new recipe 3943 * of type ROOT and usually recipes are associated with profiles 3944 * Switch rule referreing newly created recipe, needs to have 3945 * either/or 'fwd' or 'join' priority, otherwise switch rule 3946 * evaluation will not happen correctly. In other words, if 3947 * switch rule to be evaluated on priority basis, then recipe 3948 * needs to have priority, otherwise it will be evaluated last. 3949 */ 3950 buf[0].content.act_ctrl_fwd_priority = rm->priority; 3951 } else { 3952 struct ice_recp_grp_entry *last_chain_entry; 3953 u16 rid, i; 3954 3955 /* Allocate the last recipe that will chain the outcomes of the 3956 * other recipes together 3957 */ 3958 status = ice_alloc_recipe(hw, &rid); 3959 if (status) 3960 goto err_unroll; 3961 3962 buf[recps].recipe_indx = (u8)rid; 3963 buf[recps].content.rid = (u8)rid; 3964 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 3965 /* the new entry created should also be part of rg_list to 3966 * make sure we have complete recipe 3967 */ 3968 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), 3969 sizeof(*last_chain_entry), 3970 GFP_KERNEL); 3971 if (!last_chain_entry) { 3972 status = ICE_ERR_NO_MEMORY; 3973 goto err_unroll; 3974 } 3975 last_chain_entry->rid = rid; 3976 memset(&buf[recps].content.lkup_indx, 0, 3977 sizeof(buf[recps].content.lkup_indx)); 3978 /* All recipes use look-up index 0 to match switch ID. */ 3979 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 3980 buf[recps].content.mask[0] = 3981 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 3982 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 3983 buf[recps].content.lkup_indx[i] = 3984 ICE_AQ_RECIPE_LKUP_IGNORE; 3985 buf[recps].content.mask[i] = 0; 3986 } 3987 3988 i = 1; 3989 /* update r_bitmap with the recp that is used for chaining */ 3990 set_bit(rid, rm->r_bitmap); 3991 /* this is the recipe that chains all the other recipes so it 3992 * should not have a chaining ID to indicate the same 3993 */ 3994 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 3995 list_for_each_entry(entry, &rm->rg_list, l_entry) { 3996 last_chain_entry->fv_idx[i] = entry->chain_idx; 3997 buf[recps].content.lkup_indx[i] = entry->chain_idx; 3998 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 3999 set_bit(entry->rid, rm->r_bitmap); 4000 } 4001 list_add(&last_chain_entry->l_entry, &rm->rg_list); 4002 if (sizeof(buf[recps].recipe_bitmap) >= 4003 sizeof(rm->r_bitmap)) { 4004 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, 4005 sizeof(buf[recps].recipe_bitmap)); 4006 } else { 4007 status = ICE_ERR_BAD_PTR; 4008 goto err_unroll; 4009 } 4010 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4011 4012 /* To differentiate among different UDP tunnels, a meta data ID 4013 * flag is used. 4014 */ 4015 if (match_tun_mask) { 4016 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND; 4017 buf[recps].content.mask[i] = 4018 cpu_to_le16(match_tun_mask); 4019 } 4020 4021 recps++; 4022 rm->root_rid = (u8)rid; 4023 } 4024 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4025 if (status) 4026 goto err_unroll; 4027 4028 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); 4029 ice_release_change_lock(hw); 4030 if (status) 4031 goto err_unroll; 4032 4033 /* Every recipe that just got created add it to the recipe 4034 * book keeping list 4035 */ 4036 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4037 struct ice_switch_info *sw = hw->switch_info; 4038 bool is_root, idx_found = false; 4039 struct ice_sw_recipe *recp; 4040 u16 idx, buf_idx = 0; 4041 4042 /* find buffer index for copying some data */ 4043 for (idx = 0; idx < rm->n_grp_count; idx++) 4044 if (buf[idx].recipe_indx == entry->rid) { 4045 buf_idx = idx; 4046 idx_found = true; 4047 } 4048 4049 if (!idx_found) { 4050 status = ICE_ERR_OUT_OF_RANGE; 4051 goto err_unroll; 4052 } 4053 4054 recp = &sw->recp_list[entry->rid]; 4055 is_root = (rm->root_rid == entry->rid); 4056 recp->is_root = is_root; 4057 4058 recp->root_rid = entry->rid; 4059 recp->big_recp = (is_root && rm->n_grp_count > 1); 4060 4061 memcpy(&recp->ext_words, entry->r_group.pairs, 4062 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); 4063 4064 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, 4065 sizeof(recp->r_bitmap)); 4066 4067 /* Copy non-result fv index values and masks to recipe. This 4068 * call will also update the result recipe bitmask. 4069 */ 4070 ice_collect_result_idx(&buf[buf_idx], recp); 4071 4072 /* for non-root recipes, also copy to the root, this allows 4073 * easier matching of a complete chained recipe 4074 */ 4075 if (!is_root) 4076 ice_collect_result_idx(&buf[buf_idx], 4077 &sw->recp_list[rm->root_rid]); 4078 4079 recp->n_ext_words = entry->r_group.n_val_pairs; 4080 recp->chain_idx = entry->chain_idx; 4081 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 4082 recp->n_grp_count = rm->n_grp_count; 4083 recp->recp_created = true; 4084 } 4085 rm->root_buf = buf; 4086 kfree(tmp); 4087 return status; 4088 4089 err_unroll: 4090 err_mem: 4091 kfree(tmp); 4092 devm_kfree(ice_hw_to_dev(hw), buf); 4093 return status; 4094 } 4095 4096 /** 4097 * ice_create_recipe_group - creates recipe group 4098 * @hw: pointer to hardware structure 4099 * @rm: recipe management list entry 4100 * @lkup_exts: lookup elements 4101 */ 4102 static enum ice_status 4103 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, 4104 struct ice_prot_lkup_ext *lkup_exts) 4105 { 4106 enum ice_status status; 4107 u8 recp_count = 0; 4108 4109 rm->n_grp_count = 0; 4110 4111 /* Create recipes for words that are marked not done by packing them 4112 * as best fit. 4113 */ 4114 status = ice_create_first_fit_recp_def(hw, lkup_exts, 4115 &rm->rg_list, &recp_count); 4116 if (!status) { 4117 rm->n_grp_count += recp_count; 4118 rm->n_ext_words = lkup_exts->n_val_words; 4119 memcpy(&rm->ext_words, lkup_exts->fv_words, 4120 sizeof(rm->ext_words)); 4121 memcpy(rm->word_masks, lkup_exts->field_mask, 4122 sizeof(rm->word_masks)); 4123 } 4124 4125 return status; 4126 } 4127 4128 /** 4129 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types 4130 * @hw: pointer to hardware structure 4131 * @lkups: lookup elements or match criteria for the advanced recipe, one 4132 * structure per protocol header 4133 * @lkups_cnt: number of protocols 4134 * @bm: bitmap of field vectors to consider 4135 * @fv_list: pointer to a list that holds the returned field vectors 4136 */ 4137 static enum ice_status 4138 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4139 unsigned long *bm, struct list_head *fv_list) 4140 { 4141 enum ice_status status; 4142 u8 *prot_ids; 4143 u16 i; 4144 4145 prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); 4146 if (!prot_ids) 4147 return ICE_ERR_NO_MEMORY; 4148 4149 for (i = 0; i < lkups_cnt; i++) 4150 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { 4151 status = ICE_ERR_CFG; 4152 goto free_mem; 4153 } 4154 4155 /* Find field vectors that include all specified protocol types */ 4156 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); 4157 4158 free_mem: 4159 kfree(prot_ids); 4160 return status; 4161 } 4162 4163 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule 4164 * @hw: pointer to hardware structure 4165 * @rinfo: other information regarding the rule e.g. priority and action info 4166 * @bm: pointer to memory for returning the bitmap of field vectors 4167 */ 4168 static void 4169 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, 4170 unsigned long *bm) 4171 { 4172 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 4173 4174 ice_get_sw_fv_bitmap(hw, ICE_PROF_NON_TUN, bm); 4175 } 4176 4177 /** 4178 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default 4179 * @hw: pointer to hardware structure 4180 * @lkups: lookup elements or match criteria for the advanced recipe, one 4181 * structure per protocol header 4182 * @lkups_cnt: number of protocols 4183 * @rinfo: other information regarding the rule e.g. priority and action info 4184 * @rid: return the recipe ID of the recipe created 4185 */ 4186 static enum ice_status 4187 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4188 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) 4189 { 4190 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); 4191 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); 4192 struct ice_prot_lkup_ext *lkup_exts; 4193 struct ice_recp_grp_entry *r_entry; 4194 struct ice_sw_fv_list_entry *fvit; 4195 struct ice_recp_grp_entry *r_tmp; 4196 struct ice_sw_fv_list_entry *tmp; 4197 enum ice_status status = 0; 4198 struct ice_sw_recipe *rm; 4199 u16 match_tun_mask = 0; 4200 u8 i; 4201 4202 if (!lkups_cnt) 4203 return ICE_ERR_PARAM; 4204 4205 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); 4206 if (!lkup_exts) 4207 return ICE_ERR_NO_MEMORY; 4208 4209 /* Determine the number of words to be matched and if it exceeds a 4210 * recipe's restrictions 4211 */ 4212 for (i = 0; i < lkups_cnt; i++) { 4213 u16 count; 4214 4215 if (lkups[i].type >= ICE_PROTOCOL_LAST) { 4216 status = ICE_ERR_CFG; 4217 goto err_free_lkup_exts; 4218 } 4219 4220 count = ice_fill_valid_words(&lkups[i], lkup_exts); 4221 if (!count) { 4222 status = ICE_ERR_CFG; 4223 goto err_free_lkup_exts; 4224 } 4225 } 4226 4227 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 4228 if (!rm) { 4229 status = ICE_ERR_NO_MEMORY; 4230 goto err_free_lkup_exts; 4231 } 4232 4233 /* Get field vectors that contain fields extracted from all the protocol 4234 * headers being programmed. 4235 */ 4236 INIT_LIST_HEAD(&rm->fv_list); 4237 INIT_LIST_HEAD(&rm->rg_list); 4238 4239 /* Get bitmap of field vectors (profiles) that are compatible with the 4240 * rule request; only these will be searched in the subsequent call to 4241 * ice_get_fv. 4242 */ 4243 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); 4244 4245 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); 4246 if (status) 4247 goto err_unroll; 4248 4249 /* Group match words into recipes using preferred recipe grouping 4250 * criteria. 4251 */ 4252 status = ice_create_recipe_group(hw, rm, lkup_exts); 4253 if (status) 4254 goto err_unroll; 4255 4256 /* set the recipe priority if specified */ 4257 rm->priority = (u8)rinfo->priority; 4258 4259 /* Find offsets from the field vector. Pick the first one for all the 4260 * recipes. 4261 */ 4262 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); 4263 if (status) 4264 goto err_unroll; 4265 4266 /* get bitmap of all profiles the recipe will be associated with */ 4267 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); 4268 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4269 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); 4270 set_bit((u16)fvit->profile_id, profiles); 4271 } 4272 4273 /* Look for a recipe which matches our requested fv / mask list */ 4274 *rid = ice_find_recp(hw, lkup_exts); 4275 if (*rid < ICE_MAX_NUM_RECIPES) 4276 /* Success if found a recipe that match the existing criteria */ 4277 goto err_unroll; 4278 4279 /* Recipe we need does not exist, add a recipe */ 4280 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles); 4281 if (status) 4282 goto err_unroll; 4283 4284 /* Associate all the recipes created with all the profiles in the 4285 * common field vector. 4286 */ 4287 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4288 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 4289 u16 j; 4290 4291 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, 4292 (u8 *)r_bitmap, NULL); 4293 if (status) 4294 goto err_unroll; 4295 4296 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, 4297 ICE_MAX_NUM_RECIPES); 4298 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4299 if (status) 4300 goto err_unroll; 4301 4302 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, 4303 (u8 *)r_bitmap, 4304 NULL); 4305 ice_release_change_lock(hw); 4306 4307 if (status) 4308 goto err_unroll; 4309 4310 /* Update profile to recipe bitmap array */ 4311 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, 4312 ICE_MAX_NUM_RECIPES); 4313 4314 /* Update recipe to profile bitmap array */ 4315 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) 4316 set_bit((u16)fvit->profile_id, recipe_to_profile[j]); 4317 } 4318 4319 *rid = rm->root_rid; 4320 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, 4321 sizeof(*lkup_exts)); 4322 err_unroll: 4323 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { 4324 list_del(&r_entry->l_entry); 4325 devm_kfree(ice_hw_to_dev(hw), r_entry); 4326 } 4327 4328 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { 4329 list_del(&fvit->list_entry); 4330 devm_kfree(ice_hw_to_dev(hw), fvit); 4331 } 4332 4333 if (rm->root_buf) 4334 devm_kfree(ice_hw_to_dev(hw), rm->root_buf); 4335 4336 kfree(rm); 4337 4338 err_free_lkup_exts: 4339 kfree(lkup_exts); 4340 4341 return status; 4342 } 4343 4344 /** 4345 * ice_find_dummy_packet - find dummy packet 4346 * 4347 * @lkups: lookup elements or match criteria for the advanced recipe, one 4348 * structure per protocol header 4349 * @lkups_cnt: number of protocols 4350 * @pkt: dummy packet to fill according to filter match criteria 4351 * @pkt_len: packet length of dummy packet 4352 * @offsets: pointer to receive the pointer to the offsets for the packet 4353 */ 4354 static void 4355 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4356 const u8 **pkt, u16 *pkt_len, 4357 const struct ice_dummy_pkt_offsets **offsets) 4358 { 4359 bool tcp = false, udp = false, ipv6 = false, vlan = false; 4360 u16 i; 4361 4362 for (i = 0; i < lkups_cnt; i++) { 4363 if (lkups[i].type == ICE_UDP_ILOS) 4364 udp = true; 4365 else if (lkups[i].type == ICE_TCP_IL) 4366 tcp = true; 4367 else if (lkups[i].type == ICE_IPV6_OFOS) 4368 ipv6 = true; 4369 else if (lkups[i].type == ICE_VLAN_OFOS) 4370 vlan = true; 4371 else if (lkups[i].type == ICE_ETYPE_OL && 4372 lkups[i].h_u.ethertype.ethtype_id == 4373 cpu_to_be16(ICE_IPV6_ETHER_ID) && 4374 lkups[i].m_u.ethertype.ethtype_id == 4375 cpu_to_be16(0xFFFF)) 4376 ipv6 = true; 4377 } 4378 4379 if (udp && !ipv6) { 4380 if (vlan) { 4381 *pkt = dummy_vlan_udp_packet; 4382 *pkt_len = sizeof(dummy_vlan_udp_packet); 4383 *offsets = dummy_vlan_udp_packet_offsets; 4384 return; 4385 } 4386 *pkt = dummy_udp_packet; 4387 *pkt_len = sizeof(dummy_udp_packet); 4388 *offsets = dummy_udp_packet_offsets; 4389 return; 4390 } else if (udp && ipv6) { 4391 if (vlan) { 4392 *pkt = dummy_vlan_udp_ipv6_packet; 4393 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); 4394 *offsets = dummy_vlan_udp_ipv6_packet_offsets; 4395 return; 4396 } 4397 *pkt = dummy_udp_ipv6_packet; 4398 *pkt_len = sizeof(dummy_udp_ipv6_packet); 4399 *offsets = dummy_udp_ipv6_packet_offsets; 4400 return; 4401 } else if ((tcp && ipv6) || ipv6) { 4402 if (vlan) { 4403 *pkt = dummy_vlan_tcp_ipv6_packet; 4404 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); 4405 *offsets = dummy_vlan_tcp_ipv6_packet_offsets; 4406 return; 4407 } 4408 *pkt = dummy_tcp_ipv6_packet; 4409 *pkt_len = sizeof(dummy_tcp_ipv6_packet); 4410 *offsets = dummy_tcp_ipv6_packet_offsets; 4411 return; 4412 } 4413 4414 if (vlan) { 4415 *pkt = dummy_vlan_tcp_packet; 4416 *pkt_len = sizeof(dummy_vlan_tcp_packet); 4417 *offsets = dummy_vlan_tcp_packet_offsets; 4418 } else { 4419 *pkt = dummy_tcp_packet; 4420 *pkt_len = sizeof(dummy_tcp_packet); 4421 *offsets = dummy_tcp_packet_offsets; 4422 } 4423 } 4424 4425 /** 4426 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria 4427 * 4428 * @lkups: lookup elements or match criteria for the advanced recipe, one 4429 * structure per protocol header 4430 * @lkups_cnt: number of protocols 4431 * @s_rule: stores rule information from the match criteria 4432 * @dummy_pkt: dummy packet to fill according to filter match criteria 4433 * @pkt_len: packet length of dummy packet 4434 * @offsets: offset info for the dummy packet 4435 */ 4436 static enum ice_status 4437 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4438 struct ice_aqc_sw_rules_elem *s_rule, 4439 const u8 *dummy_pkt, u16 pkt_len, 4440 const struct ice_dummy_pkt_offsets *offsets) 4441 { 4442 u8 *pkt; 4443 u16 i; 4444 4445 /* Start with a packet with a pre-defined/dummy content. Then, fill 4446 * in the header values to be looked up or matched. 4447 */ 4448 pkt = s_rule->pdata.lkup_tx_rx.hdr; 4449 4450 memcpy(pkt, dummy_pkt, pkt_len); 4451 4452 for (i = 0; i < lkups_cnt; i++) { 4453 enum ice_protocol_type type; 4454 u16 offset = 0, len = 0, j; 4455 bool found = false; 4456 4457 /* find the start of this layer; it should be found since this 4458 * was already checked when search for the dummy packet 4459 */ 4460 type = lkups[i].type; 4461 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { 4462 if (type == offsets[j].type) { 4463 offset = offsets[j].offset; 4464 found = true; 4465 break; 4466 } 4467 } 4468 /* this should never happen in a correct calling sequence */ 4469 if (!found) 4470 return ICE_ERR_PARAM; 4471 4472 switch (lkups[i].type) { 4473 case ICE_MAC_OFOS: 4474 case ICE_MAC_IL: 4475 len = sizeof(struct ice_ether_hdr); 4476 break; 4477 case ICE_ETYPE_OL: 4478 len = sizeof(struct ice_ethtype_hdr); 4479 break; 4480 case ICE_VLAN_OFOS: 4481 len = sizeof(struct ice_vlan_hdr); 4482 break; 4483 case ICE_IPV4_OFOS: 4484 case ICE_IPV4_IL: 4485 len = sizeof(struct ice_ipv4_hdr); 4486 break; 4487 case ICE_IPV6_OFOS: 4488 case ICE_IPV6_IL: 4489 len = sizeof(struct ice_ipv6_hdr); 4490 break; 4491 case ICE_TCP_IL: 4492 case ICE_UDP_OF: 4493 case ICE_UDP_ILOS: 4494 len = sizeof(struct ice_l4_hdr); 4495 break; 4496 case ICE_SCTP_IL: 4497 len = sizeof(struct ice_sctp_hdr); 4498 break; 4499 default: 4500 return ICE_ERR_PARAM; 4501 } 4502 4503 /* the length should be a word multiple */ 4504 if (len % ICE_BYTES_PER_WORD) 4505 return ICE_ERR_CFG; 4506 4507 /* We have the offset to the header start, the length, the 4508 * caller's header values and mask. Use this information to 4509 * copy the data into the dummy packet appropriately based on 4510 * the mask. Note that we need to only write the bits as 4511 * indicated by the mask to make sure we don't improperly write 4512 * over any significant packet data. 4513 */ 4514 for (j = 0; j < len / sizeof(u16); j++) 4515 if (((u16 *)&lkups[i].m_u)[j]) 4516 ((u16 *)(pkt + offset))[j] = 4517 (((u16 *)(pkt + offset))[j] & 4518 ~((u16 *)&lkups[i].m_u)[j]) | 4519 (((u16 *)&lkups[i].h_u)[j] & 4520 ((u16 *)&lkups[i].m_u)[j]); 4521 } 4522 4523 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); 4524 4525 return 0; 4526 } 4527 4528 /** 4529 * ice_find_adv_rule_entry - Search a rule entry 4530 * @hw: pointer to the hardware structure 4531 * @lkups: lookup elements or match criteria for the advanced recipe, one 4532 * structure per protocol header 4533 * @lkups_cnt: number of protocols 4534 * @recp_id: recipe ID for which we are finding the rule 4535 * @rinfo: other information regarding the rule e.g. priority and action info 4536 * 4537 * Helper function to search for a given advance rule entry 4538 * Returns pointer to entry storing the rule if found 4539 */ 4540 static struct ice_adv_fltr_mgmt_list_entry * 4541 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4542 u16 lkups_cnt, u16 recp_id, 4543 struct ice_adv_rule_info *rinfo) 4544 { 4545 struct ice_adv_fltr_mgmt_list_entry *list_itr; 4546 struct ice_switch_info *sw = hw->switch_info; 4547 int i; 4548 4549 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, 4550 list_entry) { 4551 bool lkups_matched = true; 4552 4553 if (lkups_cnt != list_itr->lkups_cnt) 4554 continue; 4555 for (i = 0; i < list_itr->lkups_cnt; i++) 4556 if (memcmp(&list_itr->lkups[i], &lkups[i], 4557 sizeof(*lkups))) { 4558 lkups_matched = false; 4559 break; 4560 } 4561 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && 4562 lkups_matched) 4563 return list_itr; 4564 } 4565 return NULL; 4566 } 4567 4568 /** 4569 * ice_adv_add_update_vsi_list 4570 * @hw: pointer to the hardware structure 4571 * @m_entry: pointer to current adv filter management list entry 4572 * @cur_fltr: filter information from the book keeping entry 4573 * @new_fltr: filter information with the new VSI to be added 4574 * 4575 * Call AQ command to add or update previously created VSI list with new VSI. 4576 * 4577 * Helper function to do book keeping associated with adding filter information 4578 * The algorithm to do the booking keeping is described below : 4579 * When a VSI needs to subscribe to a given advanced filter 4580 * if only one VSI has been added till now 4581 * Allocate a new VSI list and add two VSIs 4582 * to this list using switch rule command 4583 * Update the previously created switch rule with the 4584 * newly created VSI list ID 4585 * if a VSI list was previously created 4586 * Add the new VSI to the previously created VSI list set 4587 * using the update switch rule command 4588 */ 4589 static enum ice_status 4590 ice_adv_add_update_vsi_list(struct ice_hw *hw, 4591 struct ice_adv_fltr_mgmt_list_entry *m_entry, 4592 struct ice_adv_rule_info *cur_fltr, 4593 struct ice_adv_rule_info *new_fltr) 4594 { 4595 enum ice_status status; 4596 u16 vsi_list_id = 0; 4597 4598 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 4599 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || 4600 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) 4601 return ICE_ERR_NOT_IMPL; 4602 4603 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 4604 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && 4605 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || 4606 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) 4607 return ICE_ERR_NOT_IMPL; 4608 4609 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 4610 /* Only one entry existed in the mapping and it was not already 4611 * a part of a VSI list. So, create a VSI list with the old and 4612 * new VSIs. 4613 */ 4614 struct ice_fltr_info tmp_fltr; 4615 u16 vsi_handle_arr[2]; 4616 4617 /* A rule already exists with the new VSI being added */ 4618 if (cur_fltr->sw_act.fwd_id.hw_vsi_id == 4619 new_fltr->sw_act.fwd_id.hw_vsi_id) 4620 return ICE_ERR_ALREADY_EXISTS; 4621 4622 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; 4623 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; 4624 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 4625 &vsi_list_id, 4626 ICE_SW_LKUP_LAST); 4627 if (status) 4628 return status; 4629 4630 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 4631 tmp_fltr.flag = m_entry->rule_info.sw_act.flag; 4632 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 4633 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 4634 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 4635 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; 4636 4637 /* Update the previous switch rule of "forward to VSI" to 4638 * "fwd to VSI list" 4639 */ 4640 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 4641 if (status) 4642 return status; 4643 4644 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; 4645 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; 4646 m_entry->vsi_list_info = 4647 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 4648 vsi_list_id); 4649 } else { 4650 u16 vsi_handle = new_fltr->sw_act.vsi_handle; 4651 4652 if (!m_entry->vsi_list_info) 4653 return ICE_ERR_CFG; 4654 4655 /* A rule already exists with the new VSI being added */ 4656 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 4657 return 0; 4658 4659 /* Update the previously created VSI list set with 4660 * the new VSI ID passed in 4661 */ 4662 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; 4663 4664 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 4665 vsi_list_id, false, 4666 ice_aqc_opc_update_sw_rules, 4667 ICE_SW_LKUP_LAST); 4668 /* update VSI list mapping info with new VSI ID */ 4669 if (!status) 4670 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 4671 } 4672 if (!status) 4673 m_entry->vsi_count++; 4674 return status; 4675 } 4676 4677 /** 4678 * ice_add_adv_rule - helper function to create an advanced switch rule 4679 * @hw: pointer to the hardware structure 4680 * @lkups: information on the words that needs to be looked up. All words 4681 * together makes one recipe 4682 * @lkups_cnt: num of entries in the lkups array 4683 * @rinfo: other information related to the rule that needs to be programmed 4684 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be 4685 * ignored is case of error. 4686 * 4687 * This function can program only 1 rule at a time. The lkups is used to 4688 * describe the all the words that forms the "lookup" portion of the recipe. 4689 * These words can span multiple protocols. Callers to this function need to 4690 * pass in a list of protocol headers with lookup information along and mask 4691 * that determines which words are valid from the given protocol header. 4692 * rinfo describes other information related to this rule such as forwarding 4693 * IDs, priority of this rule, etc. 4694 */ 4695 enum ice_status 4696 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4697 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, 4698 struct ice_rule_query_data *added_entry) 4699 { 4700 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; 4701 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; 4702 const struct ice_dummy_pkt_offsets *pkt_offsets; 4703 struct ice_aqc_sw_rules_elem *s_rule = NULL; 4704 struct list_head *rule_head; 4705 struct ice_switch_info *sw; 4706 enum ice_status status; 4707 const u8 *pkt = NULL; 4708 u16 word_cnt; 4709 u32 act = 0; 4710 u8 q_rgn; 4711 4712 /* Initialize profile to result index bitmap */ 4713 if (!hw->switch_info->prof_res_bm_init) { 4714 hw->switch_info->prof_res_bm_init = 1; 4715 ice_init_prof_result_bm(hw); 4716 } 4717 4718 if (!lkups_cnt) 4719 return ICE_ERR_PARAM; 4720 4721 /* get # of words we need to match */ 4722 word_cnt = 0; 4723 for (i = 0; i < lkups_cnt; i++) { 4724 u16 j, *ptr; 4725 4726 ptr = (u16 *)&lkups[i].m_u; 4727 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) 4728 if (ptr[j] != 0) 4729 word_cnt++; 4730 } 4731 4732 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) 4733 return ICE_ERR_PARAM; 4734 4735 /* make sure that we can locate a dummy packet */ 4736 ice_find_dummy_packet(lkups, lkups_cnt, &pkt, &pkt_len, 4737 &pkt_offsets); 4738 if (!pkt) { 4739 status = ICE_ERR_PARAM; 4740 goto err_ice_add_adv_rule; 4741 } 4742 4743 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 4744 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 4745 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 4746 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) 4747 return ICE_ERR_CFG; 4748 4749 vsi_handle = rinfo->sw_act.vsi_handle; 4750 if (!ice_is_vsi_valid(hw, vsi_handle)) 4751 return ICE_ERR_PARAM; 4752 4753 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 4754 rinfo->sw_act.fwd_id.hw_vsi_id = 4755 ice_get_hw_vsi_num(hw, vsi_handle); 4756 if (rinfo->sw_act.flag & ICE_FLTR_TX) 4757 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); 4758 4759 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); 4760 if (status) 4761 return status; 4762 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 4763 if (m_entry) { 4764 /* we have to add VSI to VSI_LIST and increment vsi_count. 4765 * Also Update VSI list so that we can change forwarding rule 4766 * if the rule already exists, we will check if it exists with 4767 * same vsi_id, if not then add it to the VSI list if it already 4768 * exists if not then create a VSI list and add the existing VSI 4769 * ID and the new VSI ID to the list 4770 * We will add that VSI to the list 4771 */ 4772 status = ice_adv_add_update_vsi_list(hw, m_entry, 4773 &m_entry->rule_info, 4774 rinfo); 4775 if (added_entry) { 4776 added_entry->rid = rid; 4777 added_entry->rule_id = m_entry->rule_info.fltr_rule_id; 4778 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 4779 } 4780 return status; 4781 } 4782 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; 4783 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 4784 if (!s_rule) 4785 return ICE_ERR_NO_MEMORY; 4786 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; 4787 switch (rinfo->sw_act.fltr_act) { 4788 case ICE_FWD_TO_VSI: 4789 act |= (rinfo->sw_act.fwd_id.hw_vsi_id << 4790 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; 4791 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; 4792 break; 4793 case ICE_FWD_TO_Q: 4794 act |= ICE_SINGLE_ACT_TO_Q; 4795 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 4796 ICE_SINGLE_ACT_Q_INDEX_M; 4797 break; 4798 case ICE_FWD_TO_QGRP: 4799 q_rgn = rinfo->sw_act.qgrp_size > 0 ? 4800 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; 4801 act |= ICE_SINGLE_ACT_TO_Q; 4802 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 4803 ICE_SINGLE_ACT_Q_INDEX_M; 4804 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 4805 ICE_SINGLE_ACT_Q_REGION_M; 4806 break; 4807 case ICE_DROP_PACKET: 4808 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 4809 ICE_SINGLE_ACT_VALID_BIT; 4810 break; 4811 default: 4812 status = ICE_ERR_CFG; 4813 goto err_ice_add_adv_rule; 4814 } 4815 4816 /* set the rule LOOKUP type based on caller specified 'Rx' 4817 * instead of hardcoding it to be either LOOKUP_TX/RX 4818 * 4819 * for 'Rx' set the source to be the port number 4820 * for 'Tx' set the source to be the source HW VSI number (determined 4821 * by caller) 4822 */ 4823 if (rinfo->rx) { 4824 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 4825 s_rule->pdata.lkup_tx_rx.src = 4826 cpu_to_le16(hw->port_info->lport); 4827 } else { 4828 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 4829 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); 4830 } 4831 4832 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); 4833 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 4834 4835 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, 4836 pkt_len, pkt_offsets); 4837 if (status) 4838 goto err_ice_add_adv_rule; 4839 4840 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 4841 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, 4842 NULL); 4843 if (status) 4844 goto err_ice_add_adv_rule; 4845 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), 4846 sizeof(struct ice_adv_fltr_mgmt_list_entry), 4847 GFP_KERNEL); 4848 if (!adv_fltr) { 4849 status = ICE_ERR_NO_MEMORY; 4850 goto err_ice_add_adv_rule; 4851 } 4852 4853 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, 4854 lkups_cnt * sizeof(*lkups), GFP_KERNEL); 4855 if (!adv_fltr->lkups) { 4856 status = ICE_ERR_NO_MEMORY; 4857 goto err_ice_add_adv_rule; 4858 } 4859 4860 adv_fltr->lkups_cnt = lkups_cnt; 4861 adv_fltr->rule_info = *rinfo; 4862 adv_fltr->rule_info.fltr_rule_id = 4863 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 4864 sw = hw->switch_info; 4865 sw->recp_list[rid].adv_rule = true; 4866 rule_head = &sw->recp_list[rid].filt_rules; 4867 4868 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 4869 adv_fltr->vsi_count = 1; 4870 4871 /* Add rule entry to book keeping list */ 4872 list_add(&adv_fltr->list_entry, rule_head); 4873 if (added_entry) { 4874 added_entry->rid = rid; 4875 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; 4876 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 4877 } 4878 err_ice_add_adv_rule: 4879 if (status && adv_fltr) { 4880 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); 4881 devm_kfree(ice_hw_to_dev(hw), adv_fltr); 4882 } 4883 4884 kfree(s_rule); 4885 4886 return status; 4887 } 4888 4889 /** 4890 * ice_replay_vsi_fltr - Replay filters for requested VSI 4891 * @hw: pointer to the hardware structure 4892 * @vsi_handle: driver VSI handle 4893 * @recp_id: Recipe ID for which rules need to be replayed 4894 * @list_head: list for which filters need to be replayed 4895 * 4896 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 4897 * It is required to pass valid VSI handle. 4898 */ 4899 static enum ice_status 4900 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 4901 struct list_head *list_head) 4902 { 4903 struct ice_fltr_mgmt_list_entry *itr; 4904 enum ice_status status = 0; 4905 u16 hw_vsi_id; 4906 4907 if (list_empty(list_head)) 4908 return status; 4909 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4910 4911 list_for_each_entry(itr, list_head, list_entry) { 4912 struct ice_fltr_list_entry f_entry; 4913 4914 f_entry.fltr_info = itr->fltr_info; 4915 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 4916 itr->fltr_info.vsi_handle == vsi_handle) { 4917 /* update the src in case it is VSI num */ 4918 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 4919 f_entry.fltr_info.src = hw_vsi_id; 4920 status = ice_add_rule_internal(hw, recp_id, &f_entry); 4921 if (status) 4922 goto end; 4923 continue; 4924 } 4925 if (!itr->vsi_list_info || 4926 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 4927 continue; 4928 /* Clearing it so that the logic can add it back */ 4929 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 4930 f_entry.fltr_info.vsi_handle = vsi_handle; 4931 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4932 /* update the src in case it is VSI num */ 4933 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 4934 f_entry.fltr_info.src = hw_vsi_id; 4935 if (recp_id == ICE_SW_LKUP_VLAN) 4936 status = ice_add_vlan_internal(hw, &f_entry); 4937 else 4938 status = ice_add_rule_internal(hw, recp_id, &f_entry); 4939 if (status) 4940 goto end; 4941 } 4942 end: 4943 return status; 4944 } 4945 4946 /** 4947 * ice_adv_rem_update_vsi_list 4948 * @hw: pointer to the hardware structure 4949 * @vsi_handle: VSI handle of the VSI to remove 4950 * @fm_list: filter management entry for which the VSI list management needs to 4951 * be done 4952 */ 4953 static enum ice_status 4954 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 4955 struct ice_adv_fltr_mgmt_list_entry *fm_list) 4956 { 4957 struct ice_vsi_list_map_info *vsi_list_info; 4958 enum ice_sw_lkup_type lkup_type; 4959 enum ice_status status; 4960 u16 vsi_list_id; 4961 4962 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || 4963 fm_list->vsi_count == 0) 4964 return ICE_ERR_PARAM; 4965 4966 /* A rule with the VSI being removed does not exist */ 4967 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 4968 return ICE_ERR_DOES_NOT_EXIST; 4969 4970 lkup_type = ICE_SW_LKUP_LAST; 4971 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; 4972 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 4973 ice_aqc_opc_update_sw_rules, 4974 lkup_type); 4975 if (status) 4976 return status; 4977 4978 fm_list->vsi_count--; 4979 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 4980 vsi_list_info = fm_list->vsi_list_info; 4981 if (fm_list->vsi_count == 1) { 4982 struct ice_fltr_info tmp_fltr; 4983 u16 rem_vsi_handle; 4984 4985 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 4986 ICE_MAX_VSI); 4987 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 4988 return ICE_ERR_OUT_OF_RANGE; 4989 4990 /* Make sure VSI list is empty before removing it below */ 4991 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 4992 vsi_list_id, true, 4993 ice_aqc_opc_update_sw_rules, 4994 lkup_type); 4995 if (status) 4996 return status; 4997 4998 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 4999 tmp_fltr.flag = fm_list->rule_info.sw_act.flag; 5000 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; 5001 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 5002 tmp_fltr.fltr_act = ICE_FWD_TO_VSI; 5003 tmp_fltr.fwd_id.hw_vsi_id = 5004 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5005 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = 5006 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5007 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; 5008 5009 /* Update the previous switch rule of "MAC forward to VSI" to 5010 * "MAC fwd to VSI list" 5011 */ 5012 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5013 if (status) { 5014 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 5015 tmp_fltr.fwd_id.hw_vsi_id, status); 5016 return status; 5017 } 5018 fm_list->vsi_list_info->ref_cnt--; 5019 5020 /* Remove the VSI list since it is no longer used */ 5021 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 5022 if (status) { 5023 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 5024 vsi_list_id, status); 5025 return status; 5026 } 5027 5028 list_del(&vsi_list_info->list_entry); 5029 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 5030 fm_list->vsi_list_info = NULL; 5031 } 5032 5033 return status; 5034 } 5035 5036 /** 5037 * ice_rem_adv_rule - removes existing advanced switch rule 5038 * @hw: pointer to the hardware structure 5039 * @lkups: information on the words that needs to be looked up. All words 5040 * together makes one recipe 5041 * @lkups_cnt: num of entries in the lkups array 5042 * @rinfo: Its the pointer to the rule information for the rule 5043 * 5044 * This function can be used to remove 1 rule at a time. The lkups is 5045 * used to describe all the words that forms the "lookup" portion of the 5046 * rule. These words can span multiple protocols. Callers to this function 5047 * need to pass in a list of protocol headers with lookup information along 5048 * and mask that determines which words are valid from the given protocol 5049 * header. rinfo describes other information related to this rule such as 5050 * forwarding IDs, priority of this rule, etc. 5051 */ 5052 static enum ice_status 5053 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5054 u16 lkups_cnt, struct ice_adv_rule_info *rinfo) 5055 { 5056 struct ice_adv_fltr_mgmt_list_entry *list_elem; 5057 struct ice_prot_lkup_ext lkup_exts; 5058 enum ice_status status = 0; 5059 bool remove_rule = false; 5060 struct mutex *rule_lock; /* Lock to protect filter rule list */ 5061 u16 i, rid, vsi_handle; 5062 5063 memset(&lkup_exts, 0, sizeof(lkup_exts)); 5064 for (i = 0; i < lkups_cnt; i++) { 5065 u16 count; 5066 5067 if (lkups[i].type >= ICE_PROTOCOL_LAST) 5068 return ICE_ERR_CFG; 5069 5070 count = ice_fill_valid_words(&lkups[i], &lkup_exts); 5071 if (!count) 5072 return ICE_ERR_CFG; 5073 } 5074 5075 rid = ice_find_recp(hw, &lkup_exts); 5076 /* If did not find a recipe that match the existing criteria */ 5077 if (rid == ICE_MAX_NUM_RECIPES) 5078 return ICE_ERR_PARAM; 5079 5080 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; 5081 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 5082 /* the rule is already removed */ 5083 if (!list_elem) 5084 return 0; 5085 mutex_lock(rule_lock); 5086 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { 5087 remove_rule = true; 5088 } else if (list_elem->vsi_count > 1) { 5089 remove_rule = false; 5090 vsi_handle = rinfo->sw_act.vsi_handle; 5091 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5092 } else { 5093 vsi_handle = rinfo->sw_act.vsi_handle; 5094 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5095 if (status) { 5096 mutex_unlock(rule_lock); 5097 return status; 5098 } 5099 if (list_elem->vsi_count == 0) 5100 remove_rule = true; 5101 } 5102 mutex_unlock(rule_lock); 5103 if (remove_rule) { 5104 struct ice_aqc_sw_rules_elem *s_rule; 5105 u16 rule_buf_sz; 5106 5107 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 5108 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 5109 if (!s_rule) 5110 return ICE_ERR_NO_MEMORY; 5111 s_rule->pdata.lkup_tx_rx.act = 0; 5112 s_rule->pdata.lkup_tx_rx.index = 5113 cpu_to_le16(list_elem->rule_info.fltr_rule_id); 5114 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 5115 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 5116 rule_buf_sz, 1, 5117 ice_aqc_opc_remove_sw_rules, NULL); 5118 if (!status || status == ICE_ERR_DOES_NOT_EXIST) { 5119 struct ice_switch_info *sw = hw->switch_info; 5120 5121 mutex_lock(rule_lock); 5122 list_del(&list_elem->list_entry); 5123 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); 5124 devm_kfree(ice_hw_to_dev(hw), list_elem); 5125 mutex_unlock(rule_lock); 5126 if (list_empty(&sw->recp_list[rid].filt_rules)) 5127 sw->recp_list[rid].adv_rule = false; 5128 } 5129 kfree(s_rule); 5130 } 5131 return status; 5132 } 5133 5134 /** 5135 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID 5136 * @hw: pointer to the hardware structure 5137 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID 5138 * 5139 * This function is used to remove 1 rule at a time. The removal is based on 5140 * the remove_entry parameter. This function will remove rule for a given 5141 * vsi_handle with a given rule_id which is passed as parameter in remove_entry 5142 */ 5143 enum ice_status 5144 ice_rem_adv_rule_by_id(struct ice_hw *hw, 5145 struct ice_rule_query_data *remove_entry) 5146 { 5147 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5148 struct list_head *list_head; 5149 struct ice_adv_rule_info rinfo; 5150 struct ice_switch_info *sw; 5151 5152 sw = hw->switch_info; 5153 if (!sw->recp_list[remove_entry->rid].recp_created) 5154 return ICE_ERR_PARAM; 5155 list_head = &sw->recp_list[remove_entry->rid].filt_rules; 5156 list_for_each_entry(list_itr, list_head, list_entry) { 5157 if (list_itr->rule_info.fltr_rule_id == 5158 remove_entry->rule_id) { 5159 rinfo = list_itr->rule_info; 5160 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; 5161 return ice_rem_adv_rule(hw, list_itr->lkups, 5162 list_itr->lkups_cnt, &rinfo); 5163 } 5164 } 5165 /* either list is empty or unable to find rule */ 5166 return ICE_ERR_DOES_NOT_EXIST; 5167 } 5168 5169 /** 5170 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 5171 * @hw: pointer to the hardware structure 5172 * @vsi_handle: driver VSI handle 5173 * 5174 * Replays filters for requested VSI via vsi_handle. 5175 */ 5176 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 5177 { 5178 struct ice_switch_info *sw = hw->switch_info; 5179 enum ice_status status = 0; 5180 u8 i; 5181 5182 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 5183 struct list_head *head; 5184 5185 head = &sw->recp_list[i].filt_replay_rules; 5186 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 5187 if (status) 5188 return status; 5189 } 5190 return status; 5191 } 5192 5193 /** 5194 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 5195 * @hw: pointer to the HW struct 5196 * 5197 * Deletes the filter replay rules. 5198 */ 5199 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 5200 { 5201 struct ice_switch_info *sw = hw->switch_info; 5202 u8 i; 5203 5204 if (!sw) 5205 return; 5206 5207 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 5208 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 5209 struct list_head *l_head; 5210 5211 l_head = &sw->recp_list[i].filt_replay_rules; 5212 if (!sw->recp_list[i].adv_rule) 5213 ice_rem_sw_rule_info(hw, l_head); 5214 else 5215 ice_rem_adv_rule_info(hw, l_head); 5216 } 5217 } 5218 } 5219