1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_flex_pipe.h" 6 #include "ice_flow.h" 7 8 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { 9 /* SWITCH */ 10 { 11 ICE_SID_XLT0_SW, 12 ICE_SID_XLT_KEY_BUILDER_SW, 13 ICE_SID_XLT1_SW, 14 ICE_SID_XLT2_SW, 15 ICE_SID_PROFID_TCAM_SW, 16 ICE_SID_PROFID_REDIR_SW, 17 ICE_SID_FLD_VEC_SW, 18 ICE_SID_CDID_KEY_BUILDER_SW, 19 ICE_SID_CDID_REDIR_SW 20 }, 21 22 /* ACL */ 23 { 24 ICE_SID_XLT0_ACL, 25 ICE_SID_XLT_KEY_BUILDER_ACL, 26 ICE_SID_XLT1_ACL, 27 ICE_SID_XLT2_ACL, 28 ICE_SID_PROFID_TCAM_ACL, 29 ICE_SID_PROFID_REDIR_ACL, 30 ICE_SID_FLD_VEC_ACL, 31 ICE_SID_CDID_KEY_BUILDER_ACL, 32 ICE_SID_CDID_REDIR_ACL 33 }, 34 35 /* FD */ 36 { 37 ICE_SID_XLT0_FD, 38 ICE_SID_XLT_KEY_BUILDER_FD, 39 ICE_SID_XLT1_FD, 40 ICE_SID_XLT2_FD, 41 ICE_SID_PROFID_TCAM_FD, 42 ICE_SID_PROFID_REDIR_FD, 43 ICE_SID_FLD_VEC_FD, 44 ICE_SID_CDID_KEY_BUILDER_FD, 45 ICE_SID_CDID_REDIR_FD 46 }, 47 48 /* RSS */ 49 { 50 ICE_SID_XLT0_RSS, 51 ICE_SID_XLT_KEY_BUILDER_RSS, 52 ICE_SID_XLT1_RSS, 53 ICE_SID_XLT2_RSS, 54 ICE_SID_PROFID_TCAM_RSS, 55 ICE_SID_PROFID_REDIR_RSS, 56 ICE_SID_FLD_VEC_RSS, 57 ICE_SID_CDID_KEY_BUILDER_RSS, 58 ICE_SID_CDID_REDIR_RSS 59 }, 60 61 /* PE */ 62 { 63 ICE_SID_XLT0_PE, 64 ICE_SID_XLT_KEY_BUILDER_PE, 65 ICE_SID_XLT1_PE, 66 ICE_SID_XLT2_PE, 67 ICE_SID_PROFID_TCAM_PE, 68 ICE_SID_PROFID_REDIR_PE, 69 ICE_SID_FLD_VEC_PE, 70 ICE_SID_CDID_KEY_BUILDER_PE, 71 ICE_SID_CDID_REDIR_PE 72 } 73 }; 74 75 /** 76 * ice_sect_id - returns section ID 77 * @blk: block type 78 * @sect: section type 79 * 80 * This helper function returns the proper section ID given a block type and a 81 * section type. 82 */ 83 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) 84 { 85 return ice_sect_lkup[blk][sect]; 86 } 87 88 /** 89 * ice_pkg_val_buf 90 * @buf: pointer to the ice buffer 91 * 92 * This helper function validates a buffer's header. 93 */ 94 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 95 { 96 struct ice_buf_hdr *hdr; 97 u16 section_count; 98 u16 data_end; 99 100 hdr = (struct ice_buf_hdr *)buf->buf; 101 /* verify data */ 102 section_count = le16_to_cpu(hdr->section_count); 103 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 104 return NULL; 105 106 data_end = le16_to_cpu(hdr->data_end); 107 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 108 return NULL; 109 110 return hdr; 111 } 112 113 /** 114 * ice_find_buf_table 115 * @ice_seg: pointer to the ice segment 116 * 117 * Returns the address of the buffer table within the ice segment. 118 */ 119 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 120 { 121 struct ice_nvm_table *nvms; 122 123 nvms = (struct ice_nvm_table *) 124 (ice_seg->device_table + 125 le32_to_cpu(ice_seg->device_table_count)); 126 127 return (__force struct ice_buf_table *) 128 (nvms->vers + le32_to_cpu(nvms->table_count)); 129 } 130 131 /** 132 * ice_pkg_enum_buf 133 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 134 * @state: pointer to the enum state 135 * 136 * This function will enumerate all the buffers in the ice segment. The first 137 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 138 * ice_seg is set to NULL which continues the enumeration. When the function 139 * returns a NULL pointer, then the end of the buffers has been reached, or an 140 * unexpected value has been detected (for example an invalid section count or 141 * an invalid buffer end value). 142 */ 143 static struct ice_buf_hdr * 144 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 145 { 146 if (ice_seg) { 147 state->buf_table = ice_find_buf_table(ice_seg); 148 if (!state->buf_table) 149 return NULL; 150 151 state->buf_idx = 0; 152 return ice_pkg_val_buf(state->buf_table->buf_array); 153 } 154 155 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) 156 return ice_pkg_val_buf(state->buf_table->buf_array + 157 state->buf_idx); 158 else 159 return NULL; 160 } 161 162 /** 163 * ice_pkg_advance_sect 164 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 165 * @state: pointer to the enum state 166 * 167 * This helper function will advance the section within the ice segment, 168 * also advancing the buffer if needed. 169 */ 170 static bool 171 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 172 { 173 if (!ice_seg && !state->buf) 174 return false; 175 176 if (!ice_seg && state->buf) 177 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) 178 return true; 179 180 state->buf = ice_pkg_enum_buf(ice_seg, state); 181 if (!state->buf) 182 return false; 183 184 /* start of new buffer, reset section index */ 185 state->sect_idx = 0; 186 return true; 187 } 188 189 /** 190 * ice_pkg_enum_section 191 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 192 * @state: pointer to the enum state 193 * @sect_type: section type to enumerate 194 * 195 * This function will enumerate all the sections of a particular type in the 196 * ice segment. The first call is made with the ice_seg parameter non-NULL; 197 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 198 * When the function returns a NULL pointer, then the end of the matching 199 * sections has been reached. 200 */ 201 static void * 202 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 203 u32 sect_type) 204 { 205 u16 offset, size; 206 207 if (ice_seg) 208 state->type = sect_type; 209 210 if (!ice_pkg_advance_sect(ice_seg, state)) 211 return NULL; 212 213 /* scan for next matching section */ 214 while (state->buf->section_entry[state->sect_idx].type != 215 cpu_to_le32(state->type)) 216 if (!ice_pkg_advance_sect(NULL, state)) 217 return NULL; 218 219 /* validate section */ 220 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 221 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 222 return NULL; 223 224 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); 225 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 226 return NULL; 227 228 /* make sure the section fits in the buffer */ 229 if (offset + size > ICE_PKG_BUF_SIZE) 230 return NULL; 231 232 state->sect_type = 233 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); 234 235 /* calc pointer to this section */ 236 state->sect = ((u8 *)state->buf) + 237 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 238 239 return state->sect; 240 } 241 242 /* Key creation */ 243 244 #define ICE_DC_KEY 0x1 /* don't care */ 245 #define ICE_DC_KEYINV 0x1 246 #define ICE_NM_KEY 0x0 /* never match */ 247 #define ICE_NM_KEYINV 0x0 248 #define ICE_0_KEY 0x1 /* match 0 */ 249 #define ICE_0_KEYINV 0x0 250 #define ICE_1_KEY 0x0 /* match 1 */ 251 #define ICE_1_KEYINV 0x1 252 253 /** 254 * ice_gen_key_word - generate 16-bits of a key/mask word 255 * @val: the value 256 * @valid: valid bits mask (change only the valid bits) 257 * @dont_care: don't care mask 258 * @nvr_mtch: never match mask 259 * @key: pointer to an array of where the resulting key portion 260 * @key_inv: pointer to an array of where the resulting key invert portion 261 * 262 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask 263 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits 264 * of key and 8 bits of key invert. 265 * 266 * '0' = b01, always match a 0 bit 267 * '1' = b10, always match a 1 bit 268 * '?' = b11, don't care bit (always matches) 269 * '~' = b00, never match bit 270 * 271 * Input: 272 * val: b0 1 0 1 0 1 273 * dont_care: b0 0 1 1 0 0 274 * never_mtch: b0 0 0 0 1 1 275 * ------------------------------ 276 * Result: key: b01 10 11 11 00 00 277 */ 278 static enum ice_status 279 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, 280 u8 *key_inv) 281 { 282 u8 in_key = *key, in_key_inv = *key_inv; 283 u8 i; 284 285 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ 286 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) 287 return ICE_ERR_CFG; 288 289 *key = 0; 290 *key_inv = 0; 291 292 /* encode the 8 bits into 8-bit key and 8-bit key invert */ 293 for (i = 0; i < 8; i++) { 294 *key >>= 1; 295 *key_inv >>= 1; 296 297 if (!(valid & 0x1)) { /* change only valid bits */ 298 *key |= (in_key & 0x1) << 7; 299 *key_inv |= (in_key_inv & 0x1) << 7; 300 } else if (dont_care & 0x1) { /* don't care bit */ 301 *key |= ICE_DC_KEY << 7; 302 *key_inv |= ICE_DC_KEYINV << 7; 303 } else if (nvr_mtch & 0x1) { /* never match bit */ 304 *key |= ICE_NM_KEY << 7; 305 *key_inv |= ICE_NM_KEYINV << 7; 306 } else if (val & 0x01) { /* exact 1 match */ 307 *key |= ICE_1_KEY << 7; 308 *key_inv |= ICE_1_KEYINV << 7; 309 } else { /* exact 0 match */ 310 *key |= ICE_0_KEY << 7; 311 *key_inv |= ICE_0_KEYINV << 7; 312 } 313 314 dont_care >>= 1; 315 nvr_mtch >>= 1; 316 valid >>= 1; 317 val >>= 1; 318 in_key >>= 1; 319 in_key_inv >>= 1; 320 } 321 322 return 0; 323 } 324 325 /** 326 * ice_bits_max_set - determine if the number of bits set is within a maximum 327 * @mask: pointer to the byte array which is the mask 328 * @size: the number of bytes in the mask 329 * @max: the max number of set bits 330 * 331 * This function determines if there are at most 'max' number of bits set in an 332 * array. Returns true if the number for bits set is <= max or will return false 333 * otherwise. 334 */ 335 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) 336 { 337 u16 count = 0; 338 u16 i; 339 340 /* check each byte */ 341 for (i = 0; i < size; i++) { 342 /* if 0, go to next byte */ 343 if (!mask[i]) 344 continue; 345 346 /* We know there is at least one set bit in this byte because of 347 * the above check; if we already have found 'max' number of 348 * bits set, then we can return failure now. 349 */ 350 if (count == max) 351 return false; 352 353 /* count the bits in this byte, checking threshold */ 354 count += hweight8(mask[i]); 355 if (count > max) 356 return false; 357 } 358 359 return true; 360 } 361 362 /** 363 * ice_set_key - generate a variable sized key with multiples of 16-bits 364 * @key: pointer to where the key will be stored 365 * @size: the size of the complete key in bytes (must be even) 366 * @val: array of 8-bit values that makes up the value portion of the key 367 * @upd: array of 8-bit masks that determine what key portion to update 368 * @dc: array of 8-bit masks that make up the don't care mask 369 * @nm: array of 8-bit masks that make up the never match mask 370 * @off: the offset of the first byte in the key to update 371 * @len: the number of bytes in the key update 372 * 373 * This function generates a key from a value, a don't care mask and a never 374 * match mask. 375 * upd, dc, and nm are optional parameters, and can be NULL: 376 * upd == NULL --> udp mask is all 1's (update all bits) 377 * dc == NULL --> dc mask is all 0's (no don't care bits) 378 * nm == NULL --> nm mask is all 0's (no never match bits) 379 */ 380 static enum ice_status 381 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, 382 u16 len) 383 { 384 u16 half_size; 385 u16 i; 386 387 /* size must be a multiple of 2 bytes. */ 388 if (size % 2) 389 return ICE_ERR_CFG; 390 391 half_size = size / 2; 392 if (off + len > half_size) 393 return ICE_ERR_CFG; 394 395 /* Make sure at most one bit is set in the never match mask. Having more 396 * than one never match mask bit set will cause HW to consume excessive 397 * power otherwise; this is a power management efficiency check. 398 */ 399 #define ICE_NVR_MTCH_BITS_MAX 1 400 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) 401 return ICE_ERR_CFG; 402 403 for (i = 0; i < len; i++) 404 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, 405 dc ? dc[i] : 0, nm ? nm[i] : 0, 406 key + off + i, key + half_size + off + i)) 407 return ICE_ERR_CFG; 408 409 return 0; 410 } 411 412 /** 413 * ice_acquire_global_cfg_lock 414 * @hw: pointer to the HW structure 415 * @access: access type (read or write) 416 * 417 * This function will request ownership of the global config lock for reading 418 * or writing of the package. When attempting to obtain write access, the 419 * caller must check for the following two return values: 420 * 421 * ICE_SUCCESS - Means the caller has acquired the global config lock 422 * and can perform writing of the package. 423 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the 424 * package or has found that no update was necessary; in 425 * this case, the caller can just skip performing any 426 * update of the package. 427 */ 428 static enum ice_status 429 ice_acquire_global_cfg_lock(struct ice_hw *hw, 430 enum ice_aq_res_access_type access) 431 { 432 enum ice_status status; 433 434 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 435 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 436 437 if (!status) 438 mutex_lock(&ice_global_cfg_lock_sw); 439 else if (status == ICE_ERR_AQ_NO_WORK) 440 ice_debug(hw, ICE_DBG_PKG, 441 "Global config lock: No work to do\n"); 442 443 return status; 444 } 445 446 /** 447 * ice_release_global_cfg_lock 448 * @hw: pointer to the HW structure 449 * 450 * This function will release the global config lock. 451 */ 452 static void ice_release_global_cfg_lock(struct ice_hw *hw) 453 { 454 mutex_unlock(&ice_global_cfg_lock_sw); 455 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 456 } 457 458 /** 459 * ice_acquire_change_lock 460 * @hw: pointer to the HW structure 461 * @access: access type (read or write) 462 * 463 * This function will request ownership of the change lock. 464 */ 465 static enum ice_status 466 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) 467 { 468 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, 469 ICE_CHANGE_LOCK_TIMEOUT); 470 } 471 472 /** 473 * ice_release_change_lock 474 * @hw: pointer to the HW structure 475 * 476 * This function will release the change lock using the proper Admin Command. 477 */ 478 static void ice_release_change_lock(struct ice_hw *hw) 479 { 480 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); 481 } 482 483 /** 484 * ice_aq_download_pkg 485 * @hw: pointer to the hardware structure 486 * @pkg_buf: the package buffer to transfer 487 * @buf_size: the size of the package buffer 488 * @last_buf: last buffer indicator 489 * @error_offset: returns error offset 490 * @error_info: returns error information 491 * @cd: pointer to command details structure or NULL 492 * 493 * Download Package (0x0C40) 494 */ 495 static enum ice_status 496 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 497 u16 buf_size, bool last_buf, u32 *error_offset, 498 u32 *error_info, struct ice_sq_cd *cd) 499 { 500 struct ice_aqc_download_pkg *cmd; 501 struct ice_aq_desc desc; 502 enum ice_status status; 503 504 if (error_offset) 505 *error_offset = 0; 506 if (error_info) 507 *error_info = 0; 508 509 cmd = &desc.params.download_pkg; 510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 511 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 512 513 if (last_buf) 514 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 515 516 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 517 if (status == ICE_ERR_AQ_ERROR) { 518 /* Read error from buffer only when the FW returned an error */ 519 struct ice_aqc_download_pkg_resp *resp; 520 521 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 522 if (error_offset) 523 *error_offset = le32_to_cpu(resp->error_offset); 524 if (error_info) 525 *error_info = le32_to_cpu(resp->error_info); 526 } 527 528 return status; 529 } 530 531 /** 532 * ice_aq_update_pkg 533 * @hw: pointer to the hardware structure 534 * @pkg_buf: the package cmd buffer 535 * @buf_size: the size of the package cmd buffer 536 * @last_buf: last buffer indicator 537 * @error_offset: returns error offset 538 * @error_info: returns error information 539 * @cd: pointer to command details structure or NULL 540 * 541 * Update Package (0x0C42) 542 */ 543 static enum ice_status 544 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, 545 bool last_buf, u32 *error_offset, u32 *error_info, 546 struct ice_sq_cd *cd) 547 { 548 struct ice_aqc_download_pkg *cmd; 549 struct ice_aq_desc desc; 550 enum ice_status status; 551 552 if (error_offset) 553 *error_offset = 0; 554 if (error_info) 555 *error_info = 0; 556 557 cmd = &desc.params.download_pkg; 558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 560 561 if (last_buf) 562 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 563 564 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 565 if (status == ICE_ERR_AQ_ERROR) { 566 /* Read error from buffer only when the FW returned an error */ 567 struct ice_aqc_download_pkg_resp *resp; 568 569 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 570 if (error_offset) 571 *error_offset = le32_to_cpu(resp->error_offset); 572 if (error_info) 573 *error_info = le32_to_cpu(resp->error_info); 574 } 575 576 return status; 577 } 578 579 /** 580 * ice_find_seg_in_pkg 581 * @hw: pointer to the hardware structure 582 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 583 * @pkg_hdr: pointer to the package header to be searched 584 * 585 * This function searches a package file for a particular segment type. On 586 * success it returns a pointer to the segment header, otherwise it will 587 * return NULL. 588 */ 589 static struct ice_generic_seg_hdr * 590 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 591 struct ice_pkg_hdr *pkg_hdr) 592 { 593 u32 i; 594 595 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 596 pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, 597 pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); 598 599 /* Search all package segments for the requested segment type */ 600 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 601 struct ice_generic_seg_hdr *seg; 602 603 seg = (struct ice_generic_seg_hdr *) 604 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); 605 606 if (le32_to_cpu(seg->seg_type) == seg_type) 607 return seg; 608 } 609 610 return NULL; 611 } 612 613 /** 614 * ice_update_pkg 615 * @hw: pointer to the hardware structure 616 * @bufs: pointer to an array of buffers 617 * @count: the number of buffers in the array 618 * 619 * Obtains change lock and updates package. 620 */ 621 static enum ice_status 622 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 623 { 624 enum ice_status status; 625 u32 offset, info, i; 626 627 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 628 if (status) 629 return status; 630 631 for (i = 0; i < count; i++) { 632 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 633 bool last = ((i + 1) == count); 634 635 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), 636 last, &offset, &info, NULL); 637 638 if (status) { 639 ice_debug(hw, ICE_DBG_PKG, 640 "Update pkg failed: err %d off %d inf %d\n", 641 status, offset, info); 642 break; 643 } 644 } 645 646 ice_release_change_lock(hw); 647 648 return status; 649 } 650 651 /** 652 * ice_dwnld_cfg_bufs 653 * @hw: pointer to the hardware structure 654 * @bufs: pointer to an array of buffers 655 * @count: the number of buffers in the array 656 * 657 * Obtains global config lock and downloads the package configuration buffers 658 * to the firmware. Metadata buffers are skipped, and the first metadata buffer 659 * found indicates that the rest of the buffers are all metadata buffers. 660 */ 661 static enum ice_status 662 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 663 { 664 enum ice_status status; 665 struct ice_buf_hdr *bh; 666 u32 offset, info, i; 667 668 if (!bufs || !count) 669 return ICE_ERR_PARAM; 670 671 /* If the first buffer's first section has its metadata bit set 672 * then there are no buffers to be downloaded, and the operation is 673 * considered a success. 674 */ 675 bh = (struct ice_buf_hdr *)bufs; 676 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) 677 return 0; 678 679 /* reset pkg_dwnld_status in case this function is called in the 680 * reset/rebuild flow 681 */ 682 hw->pkg_dwnld_status = ICE_AQ_RC_OK; 683 684 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 685 if (status) { 686 if (status == ICE_ERR_AQ_NO_WORK) 687 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; 688 else 689 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 690 return status; 691 } 692 693 for (i = 0; i < count; i++) { 694 bool last = ((i + 1) == count); 695 696 if (!last) { 697 /* check next buffer for metadata flag */ 698 bh = (struct ice_buf_hdr *)(bufs + i + 1); 699 700 /* A set metadata flag in the next buffer will signal 701 * that the current buffer will be the last buffer 702 * downloaded 703 */ 704 if (le16_to_cpu(bh->section_count)) 705 if (le32_to_cpu(bh->section_entry[0].type) & 706 ICE_METADATA_BUF) 707 last = true; 708 } 709 710 bh = (struct ice_buf_hdr *)(bufs + i); 711 712 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 713 &offset, &info, NULL); 714 715 /* Save AQ status from download package */ 716 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 717 if (status) { 718 ice_debug(hw, ICE_DBG_PKG, 719 "Pkg download failed: err %d off %d inf %d\n", 720 status, offset, info); 721 722 break; 723 } 724 725 if (last) 726 break; 727 } 728 729 ice_release_global_cfg_lock(hw); 730 731 return status; 732 } 733 734 /** 735 * ice_aq_get_pkg_info_list 736 * @hw: pointer to the hardware structure 737 * @pkg_info: the buffer which will receive the information list 738 * @buf_size: the size of the pkg_info information buffer 739 * @cd: pointer to command details structure or NULL 740 * 741 * Get Package Info List (0x0C43) 742 */ 743 static enum ice_status 744 ice_aq_get_pkg_info_list(struct ice_hw *hw, 745 struct ice_aqc_get_pkg_info_resp *pkg_info, 746 u16 buf_size, struct ice_sq_cd *cd) 747 { 748 struct ice_aq_desc desc; 749 750 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 751 752 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 753 } 754 755 /** 756 * ice_download_pkg 757 * @hw: pointer to the hardware structure 758 * @ice_seg: pointer to the segment of the package to be downloaded 759 * 760 * Handles the download of a complete package. 761 */ 762 static enum ice_status 763 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) 764 { 765 struct ice_buf_table *ice_buf_tbl; 766 767 ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", 768 ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, 769 ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); 770 771 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 772 le32_to_cpu(ice_seg->hdr.seg_type), 773 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); 774 775 ice_buf_tbl = ice_find_buf_table(ice_seg); 776 777 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 778 le32_to_cpu(ice_buf_tbl->buf_count)); 779 780 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 781 le32_to_cpu(ice_buf_tbl->buf_count)); 782 } 783 784 /** 785 * ice_init_pkg_info 786 * @hw: pointer to the hardware structure 787 * @pkg_hdr: pointer to the driver's package hdr 788 * 789 * Saves off the package details into the HW structure. 790 */ 791 static enum ice_status 792 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 793 { 794 struct ice_global_metadata_seg *meta_seg; 795 struct ice_generic_seg_hdr *seg_hdr; 796 797 if (!pkg_hdr) 798 return ICE_ERR_PARAM; 799 800 meta_seg = (struct ice_global_metadata_seg *) 801 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); 802 if (meta_seg) { 803 hw->pkg_ver = meta_seg->pkg_ver; 804 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); 805 806 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 807 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, 808 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, 809 meta_seg->pkg_name); 810 } else { 811 ice_debug(hw, ICE_DBG_INIT, 812 "Did not find metadata segment in driver package\n"); 813 return ICE_ERR_CFG; 814 } 815 816 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); 817 if (seg_hdr) { 818 hw->ice_pkg_ver = seg_hdr->seg_ver; 819 memcpy(hw->ice_pkg_name, seg_hdr->seg_name, 820 sizeof(hw->ice_pkg_name)); 821 822 ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", 823 seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, 824 seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, 825 seg_hdr->seg_name); 826 } else { 827 ice_debug(hw, ICE_DBG_INIT, 828 "Did not find ice segment in driver package\n"); 829 return ICE_ERR_CFG; 830 } 831 832 return 0; 833 } 834 835 /** 836 * ice_get_pkg_info 837 * @hw: pointer to the hardware structure 838 * 839 * Store details of the package currently loaded in HW into the HW structure. 840 */ 841 static enum ice_status ice_get_pkg_info(struct ice_hw *hw) 842 { 843 struct ice_aqc_get_pkg_info_resp *pkg_info; 844 enum ice_status status; 845 u16 size; 846 u32 i; 847 848 size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * 849 (ICE_PKG_CNT - 1)); 850 pkg_info = kzalloc(size, GFP_KERNEL); 851 if (!pkg_info) 852 return ICE_ERR_NO_MEMORY; 853 854 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); 855 if (status) 856 goto init_pkg_free_alloc; 857 858 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { 859 #define ICE_PKG_FLAG_COUNT 4 860 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 861 u8 place = 0; 862 863 if (pkg_info->pkg_info[i].is_active) { 864 flags[place++] = 'A'; 865 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 866 memcpy(hw->active_pkg_name, 867 pkg_info->pkg_info[i].name, 868 sizeof(hw->active_pkg_name)); 869 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 870 } 871 if (pkg_info->pkg_info[i].is_active_at_boot) 872 flags[place++] = 'B'; 873 if (pkg_info->pkg_info[i].is_modified) 874 flags[place++] = 'M'; 875 if (pkg_info->pkg_info[i].is_in_nvm) 876 flags[place++] = 'N'; 877 878 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", 879 i, pkg_info->pkg_info[i].ver.major, 880 pkg_info->pkg_info[i].ver.minor, 881 pkg_info->pkg_info[i].ver.update, 882 pkg_info->pkg_info[i].ver.draft, 883 pkg_info->pkg_info[i].name, flags); 884 } 885 886 init_pkg_free_alloc: 887 kfree(pkg_info); 888 889 return status; 890 } 891 892 /** 893 * ice_verify_pkg - verify package 894 * @pkg: pointer to the package buffer 895 * @len: size of the package buffer 896 * 897 * Verifies various attributes of the package file, including length, format 898 * version, and the requirement of at least one segment. 899 */ 900 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 901 { 902 u32 seg_count; 903 u32 i; 904 905 if (len < sizeof(*pkg)) 906 return ICE_ERR_BUF_TOO_SHORT; 907 908 if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || 909 pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || 910 pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || 911 pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) 912 return ICE_ERR_CFG; 913 914 /* pkg must have at least one segment */ 915 seg_count = le32_to_cpu(pkg->seg_count); 916 if (seg_count < 1) 917 return ICE_ERR_CFG; 918 919 /* make sure segment array fits in package length */ 920 if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) 921 return ICE_ERR_BUF_TOO_SHORT; 922 923 /* all segments must fit within length */ 924 for (i = 0; i < seg_count; i++) { 925 u32 off = le32_to_cpu(pkg->seg_offset[i]); 926 struct ice_generic_seg_hdr *seg; 927 928 /* segment header must fit */ 929 if (len < off + sizeof(*seg)) 930 return ICE_ERR_BUF_TOO_SHORT; 931 932 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); 933 934 /* segment body must fit */ 935 if (len < off + le32_to_cpu(seg->seg_size)) 936 return ICE_ERR_BUF_TOO_SHORT; 937 } 938 939 return 0; 940 } 941 942 /** 943 * ice_free_seg - free package segment pointer 944 * @hw: pointer to the hardware structure 945 * 946 * Frees the package segment pointer in the proper manner, depending on if the 947 * segment was allocated or just the passed in pointer was stored. 948 */ 949 void ice_free_seg(struct ice_hw *hw) 950 { 951 if (hw->pkg_copy) { 952 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); 953 hw->pkg_copy = NULL; 954 hw->pkg_size = 0; 955 } 956 hw->seg = NULL; 957 } 958 959 /** 960 * ice_init_pkg_regs - initialize additional package registers 961 * @hw: pointer to the hardware structure 962 */ 963 static void ice_init_pkg_regs(struct ice_hw *hw) 964 { 965 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 966 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 967 #define ICE_SW_BLK_IDX 0 968 969 /* setup Switch block input mask, which is 48-bits in two parts */ 970 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 971 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 972 } 973 974 /** 975 * ice_chk_pkg_version - check package version for compatibility with driver 976 * @pkg_ver: pointer to a version structure to check 977 * 978 * Check to make sure that the package about to be downloaded is compatible with 979 * the driver. To be compatible, the major and minor components of the package 980 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 981 * definitions. 982 */ 983 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 984 { 985 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || 986 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) 987 return ICE_ERR_NOT_SUPPORTED; 988 989 return 0; 990 } 991 992 /** 993 * ice_init_pkg - initialize/download package 994 * @hw: pointer to the hardware structure 995 * @buf: pointer to the package buffer 996 * @len: size of the package buffer 997 * 998 * This function initializes a package. The package contains HW tables 999 * required to do packet processing. First, the function extracts package 1000 * information such as version. Then it finds the ice configuration segment 1001 * within the package; this function then saves a copy of the segment pointer 1002 * within the supplied package buffer. Next, the function will cache any hints 1003 * from the package, followed by downloading the package itself. Note, that if 1004 * a previous PF driver has already downloaded the package successfully, then 1005 * the current driver will not have to download the package again. 1006 * 1007 * The local package contents will be used to query default behavior and to 1008 * update specific sections of the HW's version of the package (e.g. to update 1009 * the parse graph to understand new protocols). 1010 * 1011 * This function stores a pointer to the package buffer memory, and it is 1012 * expected that the supplied buffer will not be freed immediately. If the 1013 * package buffer needs to be freed, such as when read from a file, use 1014 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 1015 * case. 1016 */ 1017 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 1018 { 1019 struct ice_pkg_hdr *pkg; 1020 enum ice_status status; 1021 struct ice_seg *seg; 1022 1023 if (!buf || !len) 1024 return ICE_ERR_PARAM; 1025 1026 pkg = (struct ice_pkg_hdr *)buf; 1027 status = ice_verify_pkg(pkg, len); 1028 if (status) { 1029 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 1030 status); 1031 return status; 1032 } 1033 1034 /* initialize package info */ 1035 status = ice_init_pkg_info(hw, pkg); 1036 if (status) 1037 return status; 1038 1039 /* before downloading the package, check package version for 1040 * compatibility with driver 1041 */ 1042 status = ice_chk_pkg_version(&hw->pkg_ver); 1043 if (status) 1044 return status; 1045 1046 /* find segment in given package */ 1047 seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); 1048 if (!seg) { 1049 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 1050 return ICE_ERR_CFG; 1051 } 1052 1053 /* download package */ 1054 status = ice_download_pkg(hw, seg); 1055 if (status == ICE_ERR_AQ_NO_WORK) { 1056 ice_debug(hw, ICE_DBG_INIT, 1057 "package previously loaded - no work.\n"); 1058 status = 0; 1059 } 1060 1061 /* Get information on the package currently loaded in HW, then make sure 1062 * the driver is compatible with this version. 1063 */ 1064 if (!status) { 1065 status = ice_get_pkg_info(hw); 1066 if (!status) 1067 status = ice_chk_pkg_version(&hw->active_pkg_ver); 1068 } 1069 1070 if (!status) { 1071 hw->seg = seg; 1072 /* on successful package download update other required 1073 * registers to support the package and fill HW tables 1074 * with package content. 1075 */ 1076 ice_init_pkg_regs(hw); 1077 ice_fill_blk_tbls(hw); 1078 } else { 1079 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", 1080 status); 1081 } 1082 1083 return status; 1084 } 1085 1086 /** 1087 * ice_copy_and_init_pkg - initialize/download a copy of the package 1088 * @hw: pointer to the hardware structure 1089 * @buf: pointer to the package buffer 1090 * @len: size of the package buffer 1091 * 1092 * This function copies the package buffer, and then calls ice_init_pkg() to 1093 * initialize the copied package contents. 1094 * 1095 * The copying is necessary if the package buffer supplied is constant, or if 1096 * the memory may disappear shortly after calling this function. 1097 * 1098 * If the package buffer resides in the data segment and can be modified, the 1099 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 1100 * 1101 * However, if the package buffer needs to be copied first, such as when being 1102 * read from a file, the caller should use ice_copy_and_init_pkg(). 1103 * 1104 * This function will first copy the package buffer, before calling 1105 * ice_init_pkg(). The caller is free to immediately destroy the original 1106 * package buffer, as the new copy will be managed by this function and 1107 * related routines. 1108 */ 1109 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) 1110 { 1111 enum ice_status status; 1112 u8 *buf_copy; 1113 1114 if (!buf || !len) 1115 return ICE_ERR_PARAM; 1116 1117 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); 1118 1119 status = ice_init_pkg(hw, buf_copy, len); 1120 if (status) { 1121 /* Free the copy, since we failed to initialize the package */ 1122 devm_kfree(ice_hw_to_dev(hw), buf_copy); 1123 } else { 1124 /* Track the copied pkg so we can free it later */ 1125 hw->pkg_copy = buf_copy; 1126 hw->pkg_size = len; 1127 } 1128 1129 return status; 1130 } 1131 1132 /** 1133 * ice_pkg_buf_alloc 1134 * @hw: pointer to the HW structure 1135 * 1136 * Allocates a package buffer and returns a pointer to the buffer header. 1137 * Note: all package contents must be in Little Endian form. 1138 */ 1139 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 1140 { 1141 struct ice_buf_build *bld; 1142 struct ice_buf_hdr *buf; 1143 1144 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); 1145 if (!bld) 1146 return NULL; 1147 1148 buf = (struct ice_buf_hdr *)bld; 1149 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, 1150 section_entry)); 1151 return bld; 1152 } 1153 1154 /** 1155 * ice_pkg_buf_free 1156 * @hw: pointer to the HW structure 1157 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1158 * 1159 * Frees a package buffer 1160 */ 1161 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 1162 { 1163 devm_kfree(ice_hw_to_dev(hw), bld); 1164 } 1165 1166 /** 1167 * ice_pkg_buf_reserve_section 1168 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1169 * @count: the number of sections to reserve 1170 * 1171 * Reserves one or more section table entries in a package buffer. This routine 1172 * can be called multiple times as long as they are made before calling 1173 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 1174 * is called once, the number of sections that can be allocated will not be able 1175 * to be increased; not using all reserved sections is fine, but this will 1176 * result in some wasted space in the buffer. 1177 * Note: all package contents must be in Little Endian form. 1178 */ 1179 static enum ice_status 1180 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 1181 { 1182 struct ice_buf_hdr *buf; 1183 u16 section_count; 1184 u16 data_end; 1185 1186 if (!bld) 1187 return ICE_ERR_PARAM; 1188 1189 buf = (struct ice_buf_hdr *)&bld->buf; 1190 1191 /* already an active section, can't increase table size */ 1192 section_count = le16_to_cpu(buf->section_count); 1193 if (section_count > 0) 1194 return ICE_ERR_CFG; 1195 1196 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 1197 return ICE_ERR_CFG; 1198 bld->reserved_section_table_entries += count; 1199 1200 data_end = le16_to_cpu(buf->data_end) + 1201 (count * sizeof(buf->section_entry[0])); 1202 buf->data_end = cpu_to_le16(data_end); 1203 1204 return 0; 1205 } 1206 1207 /** 1208 * ice_pkg_buf_alloc_section 1209 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1210 * @type: the section type value 1211 * @size: the size of the section to reserve (in bytes) 1212 * 1213 * Reserves memory in the buffer for a section's content and updates the 1214 * buffers' status accordingly. This routine returns a pointer to the first 1215 * byte of the section start within the buffer, which is used to fill in the 1216 * section contents. 1217 * Note: all package contents must be in Little Endian form. 1218 */ 1219 static void * 1220 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 1221 { 1222 struct ice_buf_hdr *buf; 1223 u16 sect_count; 1224 u16 data_end; 1225 1226 if (!bld || !type || !size) 1227 return NULL; 1228 1229 buf = (struct ice_buf_hdr *)&bld->buf; 1230 1231 /* check for enough space left in buffer */ 1232 data_end = le16_to_cpu(buf->data_end); 1233 1234 /* section start must align on 4 byte boundary */ 1235 data_end = ALIGN(data_end, 4); 1236 1237 if ((data_end + size) > ICE_MAX_S_DATA_END) 1238 return NULL; 1239 1240 /* check for more available section table entries */ 1241 sect_count = le16_to_cpu(buf->section_count); 1242 if (sect_count < bld->reserved_section_table_entries) { 1243 void *section_ptr = ((u8 *)buf) + data_end; 1244 1245 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); 1246 buf->section_entry[sect_count].size = cpu_to_le16(size); 1247 buf->section_entry[sect_count].type = cpu_to_le32(type); 1248 1249 data_end += size; 1250 buf->data_end = cpu_to_le16(data_end); 1251 1252 buf->section_count = cpu_to_le16(sect_count + 1); 1253 return section_ptr; 1254 } 1255 1256 /* no free section table entries */ 1257 return NULL; 1258 } 1259 1260 /** 1261 * ice_pkg_buf_get_active_sections 1262 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1263 * 1264 * Returns the number of active sections. Before using the package buffer 1265 * in an update package command, the caller should make sure that there is at 1266 * least one active section - otherwise, the buffer is not legal and should 1267 * not be used. 1268 * Note: all package contents must be in Little Endian form. 1269 */ 1270 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 1271 { 1272 struct ice_buf_hdr *buf; 1273 1274 if (!bld) 1275 return 0; 1276 1277 buf = (struct ice_buf_hdr *)&bld->buf; 1278 return le16_to_cpu(buf->section_count); 1279 } 1280 1281 /** 1282 * ice_pkg_buf 1283 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1284 * 1285 * Return a pointer to the buffer's header 1286 */ 1287 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 1288 { 1289 if (!bld) 1290 return NULL; 1291 1292 return &bld->buf; 1293 } 1294 1295 /* PTG Management */ 1296 1297 /** 1298 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) 1299 * @hw: pointer to the hardware structure 1300 * @blk: HW block 1301 * @ptype: the ptype to search for 1302 * @ptg: pointer to variable that receives the PTG 1303 * 1304 * This function will search the PTGs for a particular ptype, returning the 1305 * PTG ID that contains it through the PTG parameter, with the value of 1306 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. 1307 */ 1308 static enum ice_status 1309 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) 1310 { 1311 if (ptype >= ICE_XLT1_CNT || !ptg) 1312 return ICE_ERR_PARAM; 1313 1314 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; 1315 return 0; 1316 } 1317 1318 /** 1319 * ice_ptg_alloc_val - Allocates a new packet type group ID by value 1320 * @hw: pointer to the hardware structure 1321 * @blk: HW block 1322 * @ptg: the PTG to allocate 1323 * 1324 * This function allocates a given packet type group ID specified by the PTG 1325 * parameter. 1326 */ 1327 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) 1328 { 1329 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; 1330 } 1331 1332 /** 1333 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group 1334 * @hw: pointer to the hardware structure 1335 * @blk: HW block 1336 * @ptype: the ptype to remove 1337 * @ptg: the PTG to remove the ptype from 1338 * 1339 * This function will remove the ptype from the specific PTG, and move it to 1340 * the default PTG (ICE_DEFAULT_PTG). 1341 */ 1342 static enum ice_status 1343 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 1344 { 1345 struct ice_ptg_ptype **ch; 1346 struct ice_ptg_ptype *p; 1347 1348 if (ptype > ICE_XLT1_CNT - 1) 1349 return ICE_ERR_PARAM; 1350 1351 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) 1352 return ICE_ERR_DOES_NOT_EXIST; 1353 1354 /* Should not happen if .in_use is set, bad config */ 1355 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) 1356 return ICE_ERR_CFG; 1357 1358 /* find the ptype within this PTG, and bypass the link over it */ 1359 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 1360 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 1361 while (p) { 1362 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { 1363 *ch = p->next_ptype; 1364 break; 1365 } 1366 1367 ch = &p->next_ptype; 1368 p = p->next_ptype; 1369 } 1370 1371 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; 1372 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; 1373 1374 return 0; 1375 } 1376 1377 /** 1378 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group 1379 * @hw: pointer to the hardware structure 1380 * @blk: HW block 1381 * @ptype: the ptype to add or move 1382 * @ptg: the PTG to add or move the ptype to 1383 * 1384 * This function will either add or move a ptype to a particular PTG depending 1385 * on if the ptype is already part of another group. Note that using a 1386 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the 1387 * default PTG. 1388 */ 1389 static enum ice_status 1390 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 1391 { 1392 enum ice_status status; 1393 u8 original_ptg; 1394 1395 if (ptype > ICE_XLT1_CNT - 1) 1396 return ICE_ERR_PARAM; 1397 1398 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) 1399 return ICE_ERR_DOES_NOT_EXIST; 1400 1401 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); 1402 if (status) 1403 return status; 1404 1405 /* Is ptype already in the correct PTG? */ 1406 if (original_ptg == ptg) 1407 return 0; 1408 1409 /* Remove from original PTG and move back to the default PTG */ 1410 if (original_ptg != ICE_DEFAULT_PTG) 1411 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); 1412 1413 /* Moving to default PTG? Then we're done with this request */ 1414 if (ptg == ICE_DEFAULT_PTG) 1415 return 0; 1416 1417 /* Add ptype to PTG at beginning of list */ 1418 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = 1419 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 1420 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = 1421 &hw->blk[blk].xlt1.ptypes[ptype]; 1422 1423 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; 1424 hw->blk[blk].xlt1.t[ptype] = ptg; 1425 1426 return 0; 1427 } 1428 1429 /* Block / table size info */ 1430 struct ice_blk_size_details { 1431 u16 xlt1; /* # XLT1 entries */ 1432 u16 xlt2; /* # XLT2 entries */ 1433 u16 prof_tcam; /* # profile ID TCAM entries */ 1434 u16 prof_id; /* # profile IDs */ 1435 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ 1436 u16 prof_redir; /* # profile redirection entries */ 1437 u16 es; /* # extraction sequence entries */ 1438 u16 fvw; /* # field vector words */ 1439 u8 overwrite; /* overwrite existing entries allowed */ 1440 u8 reverse; /* reverse FV order */ 1441 }; 1442 1443 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { 1444 /** 1445 * Table Definitions 1446 * XLT1 - Number of entries in XLT1 table 1447 * XLT2 - Number of entries in XLT2 table 1448 * TCAM - Number of entries Profile ID TCAM table 1449 * CDID - Control Domain ID of the hardware block 1450 * PRED - Number of entries in the Profile Redirection Table 1451 * FV - Number of entries in the Field Vector 1452 * FVW - Width (in WORDs) of the Field Vector 1453 * OVR - Overwrite existing table entries 1454 * REV - Reverse FV 1455 */ 1456 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ 1457 /* Overwrite , Reverse FV */ 1458 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, 1459 false, false }, 1460 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, 1461 false, false }, 1462 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 1463 false, true }, 1464 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 1465 true, true }, 1466 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, 1467 false, false }, 1468 }; 1469 1470 enum ice_sid_all { 1471 ICE_SID_XLT1_OFF = 0, 1472 ICE_SID_XLT2_OFF, 1473 ICE_SID_PR_OFF, 1474 ICE_SID_PR_REDIR_OFF, 1475 ICE_SID_ES_OFF, 1476 ICE_SID_OFF_COUNT, 1477 }; 1478 1479 /* Characteristic handling */ 1480 1481 /** 1482 * ice_match_prop_lst - determine if properties of two lists match 1483 * @list1: first properties list 1484 * @list2: second properties list 1485 * 1486 * Count, cookies and the order must match in order to be considered equivalent. 1487 */ 1488 static bool 1489 ice_match_prop_lst(struct list_head *list1, struct list_head *list2) 1490 { 1491 struct ice_vsig_prof *tmp1; 1492 struct ice_vsig_prof *tmp2; 1493 u16 chk_count = 0; 1494 u16 count = 0; 1495 1496 /* compare counts */ 1497 list_for_each_entry(tmp1, list1, list) 1498 count++; 1499 list_for_each_entry(tmp2, list2, list) 1500 chk_count++; 1501 if (!count || count != chk_count) 1502 return false; 1503 1504 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list); 1505 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list); 1506 1507 /* profile cookies must compare, and in the exact same order to take 1508 * into account priority 1509 */ 1510 while (count--) { 1511 if (tmp2->profile_cookie != tmp1->profile_cookie) 1512 return false; 1513 1514 tmp1 = list_next_entry(tmp1, list); 1515 tmp2 = list_next_entry(tmp2, list); 1516 } 1517 1518 return true; 1519 } 1520 1521 /* VSIG Management */ 1522 1523 /** 1524 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI 1525 * @hw: pointer to the hardware structure 1526 * @blk: HW block 1527 * @vsi: VSI of interest 1528 * @vsig: pointer to receive the VSI group 1529 * 1530 * This function will lookup the VSI entry in the XLT2 list and return 1531 * the VSI group its associated with. 1532 */ 1533 static enum ice_status 1534 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) 1535 { 1536 if (!vsig || vsi >= ICE_MAX_VSI) 1537 return ICE_ERR_PARAM; 1538 1539 /* As long as there's a default or valid VSIG associated with the input 1540 * VSI, the functions returns a success. Any handling of VSIG will be 1541 * done by the following add, update or remove functions. 1542 */ 1543 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 1544 1545 return 0; 1546 } 1547 1548 /** 1549 * ice_vsig_alloc_val - allocate a new VSIG by value 1550 * @hw: pointer to the hardware structure 1551 * @blk: HW block 1552 * @vsig: the VSIG to allocate 1553 * 1554 * This function will allocate a given VSIG specified by the VSIG parameter. 1555 */ 1556 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) 1557 { 1558 u16 idx = vsig & ICE_VSIG_IDX_M; 1559 1560 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { 1561 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 1562 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; 1563 } 1564 1565 return ICE_VSIG_VALUE(idx, hw->pf_id); 1566 } 1567 1568 /** 1569 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG 1570 * @hw: pointer to the hardware structure 1571 * @blk: HW block 1572 * 1573 * This function will iterate through the VSIG list and mark the first 1574 * unused entry for the new VSIG entry as used and return that value. 1575 */ 1576 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) 1577 { 1578 u16 i; 1579 1580 for (i = 1; i < ICE_MAX_VSIGS; i++) 1581 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) 1582 return ice_vsig_alloc_val(hw, blk, i); 1583 1584 return ICE_DEFAULT_VSIG; 1585 } 1586 1587 /** 1588 * ice_find_dup_props_vsig - find VSI group with a specified set of properties 1589 * @hw: pointer to the hardware structure 1590 * @blk: HW block 1591 * @chs: characteristic list 1592 * @vsig: returns the VSIG with the matching profiles, if found 1593 * 1594 * Each VSIG is associated with a characteristic set; i.e. all VSIs under 1595 * a group have the same characteristic set. To check if there exists a VSIG 1596 * which has the same characteristics as the input characteristics; this 1597 * function will iterate through the XLT2 list and return the VSIG that has a 1598 * matching configuration. In order to make sure that priorities are accounted 1599 * for, the list must match exactly, including the order in which the 1600 * characteristics are listed. 1601 */ 1602 static enum ice_status 1603 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, 1604 struct list_head *chs, u16 *vsig) 1605 { 1606 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; 1607 u16 i; 1608 1609 for (i = 0; i < xlt2->count; i++) 1610 if (xlt2->vsig_tbl[i].in_use && 1611 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { 1612 *vsig = ICE_VSIG_VALUE(i, hw->pf_id); 1613 return 0; 1614 } 1615 1616 return ICE_ERR_DOES_NOT_EXIST; 1617 } 1618 1619 /** 1620 * ice_vsig_free - free VSI group 1621 * @hw: pointer to the hardware structure 1622 * @blk: HW block 1623 * @vsig: VSIG to remove 1624 * 1625 * The function will remove all VSIs associated with the input VSIG and move 1626 * them to the DEFAULT_VSIG and mark the VSIG available. 1627 */ 1628 static enum ice_status 1629 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) 1630 { 1631 struct ice_vsig_prof *dtmp, *del; 1632 struct ice_vsig_vsi *vsi_cur; 1633 u16 idx; 1634 1635 idx = vsig & ICE_VSIG_IDX_M; 1636 if (idx >= ICE_MAX_VSIGS) 1637 return ICE_ERR_PARAM; 1638 1639 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 1640 return ICE_ERR_DOES_NOT_EXIST; 1641 1642 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; 1643 1644 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 1645 /* If the VSIG has at least 1 VSI then iterate through the 1646 * list and remove the VSIs before deleting the group. 1647 */ 1648 if (vsi_cur) { 1649 /* remove all vsis associated with this VSIG XLT2 entry */ 1650 do { 1651 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 1652 1653 vsi_cur->vsig = ICE_DEFAULT_VSIG; 1654 vsi_cur->changed = 1; 1655 vsi_cur->next_vsi = NULL; 1656 vsi_cur = tmp; 1657 } while (vsi_cur); 1658 1659 /* NULL terminate head of VSI list */ 1660 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; 1661 } 1662 1663 /* free characteristic list */ 1664 list_for_each_entry_safe(del, dtmp, 1665 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 1666 list) { 1667 list_del(&del->list); 1668 devm_kfree(ice_hw_to_dev(hw), del); 1669 } 1670 1671 /* if VSIG characteristic list was cleared for reset 1672 * re-initialize the list head 1673 */ 1674 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 1675 1676 return 0; 1677 } 1678 1679 /** 1680 * ice_vsig_remove_vsi - remove VSI from VSIG 1681 * @hw: pointer to the hardware structure 1682 * @blk: HW block 1683 * @vsi: VSI to remove 1684 * @vsig: VSI group to remove from 1685 * 1686 * The function will remove the input VSI from its VSI group and move it 1687 * to the DEFAULT_VSIG. 1688 */ 1689 static enum ice_status 1690 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 1691 { 1692 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; 1693 u16 idx; 1694 1695 idx = vsig & ICE_VSIG_IDX_M; 1696 1697 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 1698 return ICE_ERR_PARAM; 1699 1700 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 1701 return ICE_ERR_DOES_NOT_EXIST; 1702 1703 /* entry already in default VSIG, don't have to remove */ 1704 if (idx == ICE_DEFAULT_VSIG) 1705 return 0; 1706 1707 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 1708 if (!(*vsi_head)) 1709 return ICE_ERR_CFG; 1710 1711 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; 1712 vsi_cur = (*vsi_head); 1713 1714 /* iterate the VSI list, skip over the entry to be removed */ 1715 while (vsi_cur) { 1716 if (vsi_tgt == vsi_cur) { 1717 (*vsi_head) = vsi_cur->next_vsi; 1718 break; 1719 } 1720 vsi_head = &vsi_cur->next_vsi; 1721 vsi_cur = vsi_cur->next_vsi; 1722 } 1723 1724 /* verify if VSI was removed from group list */ 1725 if (!vsi_cur) 1726 return ICE_ERR_DOES_NOT_EXIST; 1727 1728 vsi_cur->vsig = ICE_DEFAULT_VSIG; 1729 vsi_cur->changed = 1; 1730 vsi_cur->next_vsi = NULL; 1731 1732 return 0; 1733 } 1734 1735 /** 1736 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group 1737 * @hw: pointer to the hardware structure 1738 * @blk: HW block 1739 * @vsi: VSI to move 1740 * @vsig: destination VSI group 1741 * 1742 * This function will move or add the input VSI to the target VSIG. 1743 * The function will find the original VSIG the VSI belongs to and 1744 * move the entry to the DEFAULT_VSIG, update the original VSIG and 1745 * then move entry to the new VSIG. 1746 */ 1747 static enum ice_status 1748 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 1749 { 1750 struct ice_vsig_vsi *tmp; 1751 enum ice_status status; 1752 u16 orig_vsig, idx; 1753 1754 idx = vsig & ICE_VSIG_IDX_M; 1755 1756 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 1757 return ICE_ERR_PARAM; 1758 1759 /* if VSIG not in use and VSIG is not default type this VSIG 1760 * doesn't exist. 1761 */ 1762 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && 1763 vsig != ICE_DEFAULT_VSIG) 1764 return ICE_ERR_DOES_NOT_EXIST; 1765 1766 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 1767 if (status) 1768 return status; 1769 1770 /* no update required if vsigs match */ 1771 if (orig_vsig == vsig) 1772 return 0; 1773 1774 if (orig_vsig != ICE_DEFAULT_VSIG) { 1775 /* remove entry from orig_vsig and add to default VSIG */ 1776 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); 1777 if (status) 1778 return status; 1779 } 1780 1781 if (idx == ICE_DEFAULT_VSIG) 1782 return 0; 1783 1784 /* Create VSI entry and add VSIG and prop_mask values */ 1785 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; 1786 hw->blk[blk].xlt2.vsis[vsi].changed = 1; 1787 1788 /* Add new entry to the head of the VSIG list */ 1789 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 1790 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = 1791 &hw->blk[blk].xlt2.vsis[vsi]; 1792 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; 1793 hw->blk[blk].xlt2.t[vsi] = vsig; 1794 1795 return 0; 1796 } 1797 1798 /** 1799 * ice_find_prof_id - find profile ID for a given field vector 1800 * @hw: pointer to the hardware structure 1801 * @blk: HW block 1802 * @fv: field vector to search for 1803 * @prof_id: receives the profile ID 1804 */ 1805 static enum ice_status 1806 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, 1807 struct ice_fv_word *fv, u8 *prof_id) 1808 { 1809 struct ice_es *es = &hw->blk[blk].es; 1810 u16 off, i; 1811 1812 for (i = 0; i < es->count; i++) { 1813 off = i * es->fvw; 1814 1815 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) 1816 continue; 1817 1818 *prof_id = i; 1819 return 0; 1820 } 1821 1822 return ICE_ERR_DOES_NOT_EXIST; 1823 } 1824 1825 /** 1826 * ice_prof_id_rsrc_type - get profile ID resource type for a block type 1827 * @blk: the block type 1828 * @rsrc_type: pointer to variable to receive the resource type 1829 */ 1830 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) 1831 { 1832 switch (blk) { 1833 case ICE_BLK_RSS: 1834 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; 1835 break; 1836 default: 1837 return false; 1838 } 1839 return true; 1840 } 1841 1842 /** 1843 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type 1844 * @blk: the block type 1845 * @rsrc_type: pointer to variable to receive the resource type 1846 */ 1847 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) 1848 { 1849 switch (blk) { 1850 case ICE_BLK_RSS: 1851 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; 1852 break; 1853 default: 1854 return false; 1855 } 1856 return true; 1857 } 1858 1859 /** 1860 * ice_alloc_tcam_ent - allocate hardware TCAM entry 1861 * @hw: pointer to the HW struct 1862 * @blk: the block to allocate the TCAM for 1863 * @tcam_idx: pointer to variable to receive the TCAM entry 1864 * 1865 * This function allocates a new entry in a Profile ID TCAM for a specific 1866 * block. 1867 */ 1868 static enum ice_status 1869 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx) 1870 { 1871 u16 res_type; 1872 1873 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 1874 return ICE_ERR_PARAM; 1875 1876 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx); 1877 } 1878 1879 /** 1880 * ice_free_tcam_ent - free hardware TCAM entry 1881 * @hw: pointer to the HW struct 1882 * @blk: the block from which to free the TCAM entry 1883 * @tcam_idx: the TCAM entry to free 1884 * 1885 * This function frees an entry in a Profile ID TCAM for a specific block. 1886 */ 1887 static enum ice_status 1888 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) 1889 { 1890 u16 res_type; 1891 1892 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 1893 return ICE_ERR_PARAM; 1894 1895 return ice_free_hw_res(hw, res_type, 1, &tcam_idx); 1896 } 1897 1898 /** 1899 * ice_alloc_prof_id - allocate profile ID 1900 * @hw: pointer to the HW struct 1901 * @blk: the block to allocate the profile ID for 1902 * @prof_id: pointer to variable to receive the profile ID 1903 * 1904 * This function allocates a new profile ID, which also corresponds to a Field 1905 * Vector (Extraction Sequence) entry. 1906 */ 1907 static enum ice_status 1908 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) 1909 { 1910 enum ice_status status; 1911 u16 res_type; 1912 u16 get_prof; 1913 1914 if (!ice_prof_id_rsrc_type(blk, &res_type)) 1915 return ICE_ERR_PARAM; 1916 1917 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); 1918 if (!status) 1919 *prof_id = (u8)get_prof; 1920 1921 return status; 1922 } 1923 1924 /** 1925 * ice_free_prof_id - free profile ID 1926 * @hw: pointer to the HW struct 1927 * @blk: the block from which to free the profile ID 1928 * @prof_id: the profile ID to free 1929 * 1930 * This function frees a profile ID, which also corresponds to a Field Vector. 1931 */ 1932 static enum ice_status 1933 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 1934 { 1935 u16 tmp_prof_id = (u16)prof_id; 1936 u16 res_type; 1937 1938 if (!ice_prof_id_rsrc_type(blk, &res_type)) 1939 return ICE_ERR_PARAM; 1940 1941 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); 1942 } 1943 1944 /** 1945 * ice_prof_inc_ref - increment reference count for profile 1946 * @hw: pointer to the HW struct 1947 * @blk: the block from which to free the profile ID 1948 * @prof_id: the profile ID for which to increment the reference count 1949 */ 1950 static enum ice_status 1951 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 1952 { 1953 if (prof_id > hw->blk[blk].es.count) 1954 return ICE_ERR_PARAM; 1955 1956 hw->blk[blk].es.ref_count[prof_id]++; 1957 1958 return 0; 1959 } 1960 1961 /** 1962 * ice_write_es - write an extraction sequence to hardware 1963 * @hw: pointer to the HW struct 1964 * @blk: the block in which to write the extraction sequence 1965 * @prof_id: the profile ID to write 1966 * @fv: pointer to the extraction sequence to write - NULL to clear extraction 1967 */ 1968 static void 1969 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, 1970 struct ice_fv_word *fv) 1971 { 1972 u16 off; 1973 1974 off = prof_id * hw->blk[blk].es.fvw; 1975 if (!fv) { 1976 memset(&hw->blk[blk].es.t[off], 0, 1977 hw->blk[blk].es.fvw * sizeof(*fv)); 1978 hw->blk[blk].es.written[prof_id] = false; 1979 } else { 1980 memcpy(&hw->blk[blk].es.t[off], fv, 1981 hw->blk[blk].es.fvw * sizeof(*fv)); 1982 } 1983 } 1984 1985 /** 1986 * ice_prof_dec_ref - decrement reference count for profile 1987 * @hw: pointer to the HW struct 1988 * @blk: the block from which to free the profile ID 1989 * @prof_id: the profile ID for which to decrement the reference count 1990 */ 1991 static enum ice_status 1992 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 1993 { 1994 if (prof_id > hw->blk[blk].es.count) 1995 return ICE_ERR_PARAM; 1996 1997 if (hw->blk[blk].es.ref_count[prof_id] > 0) { 1998 if (!--hw->blk[blk].es.ref_count[prof_id]) { 1999 ice_write_es(hw, blk, prof_id, NULL); 2000 return ice_free_prof_id(hw, blk, prof_id); 2001 } 2002 } 2003 2004 return 0; 2005 } 2006 2007 /* Block / table section IDs */ 2008 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { 2009 /* SWITCH */ 2010 { ICE_SID_XLT1_SW, 2011 ICE_SID_XLT2_SW, 2012 ICE_SID_PROFID_TCAM_SW, 2013 ICE_SID_PROFID_REDIR_SW, 2014 ICE_SID_FLD_VEC_SW 2015 }, 2016 2017 /* ACL */ 2018 { ICE_SID_XLT1_ACL, 2019 ICE_SID_XLT2_ACL, 2020 ICE_SID_PROFID_TCAM_ACL, 2021 ICE_SID_PROFID_REDIR_ACL, 2022 ICE_SID_FLD_VEC_ACL 2023 }, 2024 2025 /* FD */ 2026 { ICE_SID_XLT1_FD, 2027 ICE_SID_XLT2_FD, 2028 ICE_SID_PROFID_TCAM_FD, 2029 ICE_SID_PROFID_REDIR_FD, 2030 ICE_SID_FLD_VEC_FD 2031 }, 2032 2033 /* RSS */ 2034 { ICE_SID_XLT1_RSS, 2035 ICE_SID_XLT2_RSS, 2036 ICE_SID_PROFID_TCAM_RSS, 2037 ICE_SID_PROFID_REDIR_RSS, 2038 ICE_SID_FLD_VEC_RSS 2039 }, 2040 2041 /* PE */ 2042 { ICE_SID_XLT1_PE, 2043 ICE_SID_XLT2_PE, 2044 ICE_SID_PROFID_TCAM_PE, 2045 ICE_SID_PROFID_REDIR_PE, 2046 ICE_SID_FLD_VEC_PE 2047 } 2048 }; 2049 2050 /** 2051 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables 2052 * @hw: pointer to the hardware structure 2053 * @blk: the HW block to initialize 2054 */ 2055 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) 2056 { 2057 u16 pt; 2058 2059 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { 2060 u8 ptg; 2061 2062 ptg = hw->blk[blk].xlt1.t[pt]; 2063 if (ptg != ICE_DEFAULT_PTG) { 2064 ice_ptg_alloc_val(hw, blk, ptg); 2065 ice_ptg_add_mv_ptype(hw, blk, pt, ptg); 2066 } 2067 } 2068 } 2069 2070 /** 2071 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables 2072 * @hw: pointer to the hardware structure 2073 * @blk: the HW block to initialize 2074 */ 2075 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) 2076 { 2077 u16 vsi; 2078 2079 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { 2080 u16 vsig; 2081 2082 vsig = hw->blk[blk].xlt2.t[vsi]; 2083 if (vsig) { 2084 ice_vsig_alloc_val(hw, blk, vsig); 2085 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 2086 /* no changes at this time, since this has been 2087 * initialized from the original package 2088 */ 2089 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 2090 } 2091 } 2092 } 2093 2094 /** 2095 * ice_init_sw_db - init software database from HW tables 2096 * @hw: pointer to the hardware structure 2097 */ 2098 static void ice_init_sw_db(struct ice_hw *hw) 2099 { 2100 u16 i; 2101 2102 for (i = 0; i < ICE_BLK_COUNT; i++) { 2103 ice_init_sw_xlt1_db(hw, (enum ice_block)i); 2104 ice_init_sw_xlt2_db(hw, (enum ice_block)i); 2105 } 2106 } 2107 2108 /** 2109 * ice_fill_tbl - Reads content of a single table type into database 2110 * @hw: pointer to the hardware structure 2111 * @block_id: Block ID of the table to copy 2112 * @sid: Section ID of the table to copy 2113 * 2114 * Will attempt to read the entire content of a given table of a single block 2115 * into the driver database. We assume that the buffer will always 2116 * be as large or larger than the data contained in the package. If 2117 * this condition is not met, there is most likely an error in the package 2118 * contents. 2119 */ 2120 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) 2121 { 2122 u32 dst_len, sect_len, offset = 0; 2123 struct ice_prof_redir_section *pr; 2124 struct ice_prof_id_section *pid; 2125 struct ice_xlt1_section *xlt1; 2126 struct ice_xlt2_section *xlt2; 2127 struct ice_sw_fv_section *es; 2128 struct ice_pkg_enum state; 2129 u8 *src, *dst; 2130 void *sect; 2131 2132 /* if the HW segment pointer is null then the first iteration of 2133 * ice_pkg_enum_section() will fail. In this case the HW tables will 2134 * not be filled and return success. 2135 */ 2136 if (!hw->seg) { 2137 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); 2138 return; 2139 } 2140 2141 memset(&state, 0, sizeof(state)); 2142 2143 sect = ice_pkg_enum_section(hw->seg, &state, sid); 2144 2145 while (sect) { 2146 switch (sid) { 2147 case ICE_SID_XLT1_SW: 2148 case ICE_SID_XLT1_FD: 2149 case ICE_SID_XLT1_RSS: 2150 case ICE_SID_XLT1_ACL: 2151 case ICE_SID_XLT1_PE: 2152 xlt1 = (struct ice_xlt1_section *)sect; 2153 src = xlt1->value; 2154 sect_len = le16_to_cpu(xlt1->count) * 2155 sizeof(*hw->blk[block_id].xlt1.t); 2156 dst = hw->blk[block_id].xlt1.t; 2157 dst_len = hw->blk[block_id].xlt1.count * 2158 sizeof(*hw->blk[block_id].xlt1.t); 2159 break; 2160 case ICE_SID_XLT2_SW: 2161 case ICE_SID_XLT2_FD: 2162 case ICE_SID_XLT2_RSS: 2163 case ICE_SID_XLT2_ACL: 2164 case ICE_SID_XLT2_PE: 2165 xlt2 = (struct ice_xlt2_section *)sect; 2166 src = (__force u8 *)xlt2->value; 2167 sect_len = le16_to_cpu(xlt2->count) * 2168 sizeof(*hw->blk[block_id].xlt2.t); 2169 dst = (u8 *)hw->blk[block_id].xlt2.t; 2170 dst_len = hw->blk[block_id].xlt2.count * 2171 sizeof(*hw->blk[block_id].xlt2.t); 2172 break; 2173 case ICE_SID_PROFID_TCAM_SW: 2174 case ICE_SID_PROFID_TCAM_FD: 2175 case ICE_SID_PROFID_TCAM_RSS: 2176 case ICE_SID_PROFID_TCAM_ACL: 2177 case ICE_SID_PROFID_TCAM_PE: 2178 pid = (struct ice_prof_id_section *)sect; 2179 src = (u8 *)pid->entry; 2180 sect_len = le16_to_cpu(pid->count) * 2181 sizeof(*hw->blk[block_id].prof.t); 2182 dst = (u8 *)hw->blk[block_id].prof.t; 2183 dst_len = hw->blk[block_id].prof.count * 2184 sizeof(*hw->blk[block_id].prof.t); 2185 break; 2186 case ICE_SID_PROFID_REDIR_SW: 2187 case ICE_SID_PROFID_REDIR_FD: 2188 case ICE_SID_PROFID_REDIR_RSS: 2189 case ICE_SID_PROFID_REDIR_ACL: 2190 case ICE_SID_PROFID_REDIR_PE: 2191 pr = (struct ice_prof_redir_section *)sect; 2192 src = pr->redir_value; 2193 sect_len = le16_to_cpu(pr->count) * 2194 sizeof(*hw->blk[block_id].prof_redir.t); 2195 dst = hw->blk[block_id].prof_redir.t; 2196 dst_len = hw->blk[block_id].prof_redir.count * 2197 sizeof(*hw->blk[block_id].prof_redir.t); 2198 break; 2199 case ICE_SID_FLD_VEC_SW: 2200 case ICE_SID_FLD_VEC_FD: 2201 case ICE_SID_FLD_VEC_RSS: 2202 case ICE_SID_FLD_VEC_ACL: 2203 case ICE_SID_FLD_VEC_PE: 2204 es = (struct ice_sw_fv_section *)sect; 2205 src = (u8 *)es->fv; 2206 sect_len = (u32)(le16_to_cpu(es->count) * 2207 hw->blk[block_id].es.fvw) * 2208 sizeof(*hw->blk[block_id].es.t); 2209 dst = (u8 *)hw->blk[block_id].es.t; 2210 dst_len = (u32)(hw->blk[block_id].es.count * 2211 hw->blk[block_id].es.fvw) * 2212 sizeof(*hw->blk[block_id].es.t); 2213 break; 2214 default: 2215 return; 2216 } 2217 2218 /* if the section offset exceeds destination length, terminate 2219 * table fill. 2220 */ 2221 if (offset > dst_len) 2222 return; 2223 2224 /* if the sum of section size and offset exceed destination size 2225 * then we are out of bounds of the HW table size for that PF. 2226 * Changing section length to fill the remaining table space 2227 * of that PF. 2228 */ 2229 if ((offset + sect_len) > dst_len) 2230 sect_len = dst_len - offset; 2231 2232 memcpy(dst + offset, src, sect_len); 2233 offset += sect_len; 2234 sect = ice_pkg_enum_section(NULL, &state, sid); 2235 } 2236 } 2237 2238 /** 2239 * ice_fill_blk_tbls - Read package context for tables 2240 * @hw: pointer to the hardware structure 2241 * 2242 * Reads the current package contents and populates the driver 2243 * database with the data iteratively for all advanced feature 2244 * blocks. Assume that the HW tables have been allocated. 2245 */ 2246 void ice_fill_blk_tbls(struct ice_hw *hw) 2247 { 2248 u8 i; 2249 2250 for (i = 0; i < ICE_BLK_COUNT; i++) { 2251 enum ice_block blk_id = (enum ice_block)i; 2252 2253 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); 2254 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); 2255 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); 2256 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); 2257 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); 2258 } 2259 2260 ice_init_sw_db(hw); 2261 } 2262 2263 /** 2264 * ice_free_prof_map - free profile map 2265 * @hw: pointer to the hardware structure 2266 * @blk_idx: HW block index 2267 */ 2268 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) 2269 { 2270 struct ice_es *es = &hw->blk[blk_idx].es; 2271 struct ice_prof_map *del, *tmp; 2272 2273 mutex_lock(&es->prof_map_lock); 2274 list_for_each_entry_safe(del, tmp, &es->prof_map, list) { 2275 list_del(&del->list); 2276 devm_kfree(ice_hw_to_dev(hw), del); 2277 } 2278 INIT_LIST_HEAD(&es->prof_map); 2279 mutex_unlock(&es->prof_map_lock); 2280 } 2281 2282 /** 2283 * ice_free_flow_profs - free flow profile entries 2284 * @hw: pointer to the hardware structure 2285 * @blk_idx: HW block index 2286 */ 2287 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) 2288 { 2289 struct ice_flow_prof *p, *tmp; 2290 2291 mutex_lock(&hw->fl_profs_locks[blk_idx]); 2292 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) { 2293 list_del(&p->l_entry); 2294 devm_kfree(ice_hw_to_dev(hw), p); 2295 } 2296 mutex_unlock(&hw->fl_profs_locks[blk_idx]); 2297 2298 /* if driver is in reset and tables are being cleared 2299 * re-initialize the flow profile list heads 2300 */ 2301 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 2302 } 2303 2304 /** 2305 * ice_free_vsig_tbl - free complete VSIG table entries 2306 * @hw: pointer to the hardware structure 2307 * @blk: the HW block on which to free the VSIG table entries 2308 */ 2309 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) 2310 { 2311 u16 i; 2312 2313 if (!hw->blk[blk].xlt2.vsig_tbl) 2314 return; 2315 2316 for (i = 1; i < ICE_MAX_VSIGS; i++) 2317 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) 2318 ice_vsig_free(hw, blk, i); 2319 } 2320 2321 /** 2322 * ice_free_hw_tbls - free hardware table memory 2323 * @hw: pointer to the hardware structure 2324 */ 2325 void ice_free_hw_tbls(struct ice_hw *hw) 2326 { 2327 struct ice_rss_cfg *r, *rt; 2328 u8 i; 2329 2330 for (i = 0; i < ICE_BLK_COUNT; i++) { 2331 if (hw->blk[i].is_list_init) { 2332 struct ice_es *es = &hw->blk[i].es; 2333 2334 ice_free_prof_map(hw, i); 2335 mutex_destroy(&es->prof_map_lock); 2336 2337 ice_free_flow_profs(hw, i); 2338 mutex_destroy(&hw->fl_profs_locks[i]); 2339 2340 hw->blk[i].is_list_init = false; 2341 } 2342 ice_free_vsig_tbl(hw, (enum ice_block)i); 2343 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes); 2344 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl); 2345 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t); 2346 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t); 2347 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl); 2348 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis); 2349 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); 2350 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); 2351 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); 2352 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); 2353 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); 2354 } 2355 2356 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { 2357 list_del(&r->l_entry); 2358 devm_kfree(ice_hw_to_dev(hw), r); 2359 } 2360 mutex_destroy(&hw->rss_locks); 2361 memset(hw->blk, 0, sizeof(hw->blk)); 2362 } 2363 2364 /** 2365 * ice_init_flow_profs - init flow profile locks and list heads 2366 * @hw: pointer to the hardware structure 2367 * @blk_idx: HW block index 2368 */ 2369 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) 2370 { 2371 mutex_init(&hw->fl_profs_locks[blk_idx]); 2372 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 2373 } 2374 2375 /** 2376 * ice_clear_hw_tbls - clear HW tables and flow profiles 2377 * @hw: pointer to the hardware structure 2378 */ 2379 void ice_clear_hw_tbls(struct ice_hw *hw) 2380 { 2381 u8 i; 2382 2383 for (i = 0; i < ICE_BLK_COUNT; i++) { 2384 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 2385 struct ice_prof_tcam *prof = &hw->blk[i].prof; 2386 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 2387 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 2388 struct ice_es *es = &hw->blk[i].es; 2389 2390 if (hw->blk[i].is_list_init) { 2391 ice_free_prof_map(hw, i); 2392 ice_free_flow_profs(hw, i); 2393 } 2394 2395 ice_free_vsig_tbl(hw, (enum ice_block)i); 2396 2397 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); 2398 memset(xlt1->ptg_tbl, 0, 2399 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); 2400 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); 2401 2402 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); 2403 memset(xlt2->vsig_tbl, 0, 2404 xlt2->count * sizeof(*xlt2->vsig_tbl)); 2405 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); 2406 2407 memset(prof->t, 0, prof->count * sizeof(*prof->t)); 2408 memset(prof_redir->t, 0, 2409 prof_redir->count * sizeof(*prof_redir->t)); 2410 2411 memset(es->t, 0, es->count * sizeof(*es->t)); 2412 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); 2413 memset(es->written, 0, es->count * sizeof(*es->written)); 2414 } 2415 } 2416 2417 /** 2418 * ice_init_hw_tbls - init hardware table memory 2419 * @hw: pointer to the hardware structure 2420 */ 2421 enum ice_status ice_init_hw_tbls(struct ice_hw *hw) 2422 { 2423 u8 i; 2424 2425 mutex_init(&hw->rss_locks); 2426 INIT_LIST_HEAD(&hw->rss_list_head); 2427 for (i = 0; i < ICE_BLK_COUNT; i++) { 2428 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 2429 struct ice_prof_tcam *prof = &hw->blk[i].prof; 2430 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 2431 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 2432 struct ice_es *es = &hw->blk[i].es; 2433 u16 j; 2434 2435 if (hw->blk[i].is_list_init) 2436 continue; 2437 2438 ice_init_flow_profs(hw, i); 2439 mutex_init(&es->prof_map_lock); 2440 INIT_LIST_HEAD(&es->prof_map); 2441 hw->blk[i].is_list_init = true; 2442 2443 hw->blk[i].overwrite = blk_sizes[i].overwrite; 2444 es->reverse = blk_sizes[i].reverse; 2445 2446 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; 2447 xlt1->count = blk_sizes[i].xlt1; 2448 2449 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, 2450 sizeof(*xlt1->ptypes), GFP_KERNEL); 2451 2452 if (!xlt1->ptypes) 2453 goto err; 2454 2455 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, 2456 sizeof(*xlt1->ptg_tbl), 2457 GFP_KERNEL); 2458 2459 if (!xlt1->ptg_tbl) 2460 goto err; 2461 2462 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, 2463 sizeof(*xlt1->t), GFP_KERNEL); 2464 if (!xlt1->t) 2465 goto err; 2466 2467 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; 2468 xlt2->count = blk_sizes[i].xlt2; 2469 2470 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, 2471 sizeof(*xlt2->vsis), GFP_KERNEL); 2472 2473 if (!xlt2->vsis) 2474 goto err; 2475 2476 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, 2477 sizeof(*xlt2->vsig_tbl), 2478 GFP_KERNEL); 2479 if (!xlt2->vsig_tbl) 2480 goto err; 2481 2482 for (j = 0; j < xlt2->count; j++) 2483 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); 2484 2485 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, 2486 sizeof(*xlt2->t), GFP_KERNEL); 2487 if (!xlt2->t) 2488 goto err; 2489 2490 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; 2491 prof->count = blk_sizes[i].prof_tcam; 2492 prof->max_prof_id = blk_sizes[i].prof_id; 2493 prof->cdid_bits = blk_sizes[i].prof_cdid_bits; 2494 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, 2495 sizeof(*prof->t), GFP_KERNEL); 2496 2497 if (!prof->t) 2498 goto err; 2499 2500 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; 2501 prof_redir->count = blk_sizes[i].prof_redir; 2502 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), 2503 prof_redir->count, 2504 sizeof(*prof_redir->t), 2505 GFP_KERNEL); 2506 2507 if (!prof_redir->t) 2508 goto err; 2509 2510 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; 2511 es->count = blk_sizes[i].es; 2512 es->fvw = blk_sizes[i].fvw; 2513 es->t = devm_kcalloc(ice_hw_to_dev(hw), 2514 (u32)(es->count * es->fvw), 2515 sizeof(*es->t), GFP_KERNEL); 2516 if (!es->t) 2517 goto err; 2518 2519 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, 2520 sizeof(*es->ref_count), 2521 GFP_KERNEL); 2522 2523 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, 2524 sizeof(*es->written), GFP_KERNEL); 2525 if (!es->ref_count) 2526 goto err; 2527 } 2528 return 0; 2529 2530 err: 2531 ice_free_hw_tbls(hw); 2532 return ICE_ERR_NO_MEMORY; 2533 } 2534 2535 /** 2536 * ice_prof_gen_key - generate profile ID key 2537 * @hw: pointer to the HW struct 2538 * @blk: the block in which to write profile ID to 2539 * @ptg: packet type group (PTG) portion of key 2540 * @vsig: VSIG portion of key 2541 * @cdid: CDID portion of key 2542 * @flags: flag portion of key 2543 * @vl_msk: valid mask 2544 * @dc_msk: don't care mask 2545 * @nm_msk: never match mask 2546 * @key: output of profile ID key 2547 */ 2548 static enum ice_status 2549 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, 2550 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 2551 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], 2552 u8 key[ICE_TCAM_KEY_SZ]) 2553 { 2554 struct ice_prof_id_key inkey; 2555 2556 inkey.xlt1 = ptg; 2557 inkey.xlt2_cdid = cpu_to_le16(vsig); 2558 inkey.flags = cpu_to_le16(flags); 2559 2560 switch (hw->blk[blk].prof.cdid_bits) { 2561 case 0: 2562 break; 2563 case 2: 2564 #define ICE_CD_2_M 0xC000U 2565 #define ICE_CD_2_S 14 2566 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M); 2567 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S); 2568 break; 2569 case 4: 2570 #define ICE_CD_4_M 0xF000U 2571 #define ICE_CD_4_S 12 2572 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M); 2573 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S); 2574 break; 2575 case 8: 2576 #define ICE_CD_8_M 0xFF00U 2577 #define ICE_CD_8_S 16 2578 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M); 2579 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S); 2580 break; 2581 default: 2582 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); 2583 break; 2584 } 2585 2586 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, 2587 nm_msk, 0, ICE_TCAM_KEY_SZ / 2); 2588 } 2589 2590 /** 2591 * ice_tcam_write_entry - write TCAM entry 2592 * @hw: pointer to the HW struct 2593 * @blk: the block in which to write profile ID to 2594 * @idx: the entry index to write to 2595 * @prof_id: profile ID 2596 * @ptg: packet type group (PTG) portion of key 2597 * @vsig: VSIG portion of key 2598 * @cdid: CDID portion of key 2599 * @flags: flag portion of key 2600 * @vl_msk: valid mask 2601 * @dc_msk: don't care mask 2602 * @nm_msk: never match mask 2603 */ 2604 static enum ice_status 2605 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, 2606 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, 2607 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 2608 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], 2609 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) 2610 { 2611 struct ice_prof_tcam_entry; 2612 enum ice_status status; 2613 2614 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, 2615 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); 2616 if (!status) { 2617 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx); 2618 hw->blk[blk].prof.t[idx].prof_id = prof_id; 2619 } 2620 2621 return status; 2622 } 2623 2624 /** 2625 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG 2626 * @hw: pointer to the hardware structure 2627 * @blk: HW block 2628 * @vsig: VSIG to query 2629 * @refs: pointer to variable to receive the reference count 2630 */ 2631 static enum ice_status 2632 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) 2633 { 2634 u16 idx = vsig & ICE_VSIG_IDX_M; 2635 struct ice_vsig_vsi *ptr; 2636 2637 *refs = 0; 2638 2639 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 2640 return ICE_ERR_DOES_NOT_EXIST; 2641 2642 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2643 while (ptr) { 2644 (*refs)++; 2645 ptr = ptr->next_vsi; 2646 } 2647 2648 return 0; 2649 } 2650 2651 /** 2652 * ice_has_prof_vsig - check to see if VSIG has a specific profile 2653 * @hw: pointer to the hardware structure 2654 * @blk: HW block 2655 * @vsig: VSIG to check against 2656 * @hdl: profile handle 2657 */ 2658 static bool 2659 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) 2660 { 2661 u16 idx = vsig & ICE_VSIG_IDX_M; 2662 struct ice_vsig_prof *ent; 2663 2664 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2665 list) 2666 if (ent->profile_cookie == hdl) 2667 return true; 2668 2669 ice_debug(hw, ICE_DBG_INIT, 2670 "Characteristic list for VSI group %d not found.\n", 2671 vsig); 2672 return false; 2673 } 2674 2675 /** 2676 * ice_prof_bld_es - build profile ID extraction sequence changes 2677 * @hw: pointer to the HW struct 2678 * @blk: hardware block 2679 * @bld: the update package buffer build to add to 2680 * @chgs: the list of changes to make in hardware 2681 */ 2682 static enum ice_status 2683 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, 2684 struct ice_buf_build *bld, struct list_head *chgs) 2685 { 2686 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); 2687 struct ice_chs_chg *tmp; 2688 2689 list_for_each_entry(tmp, chgs, list_entry) 2690 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { 2691 u16 off = tmp->prof_id * hw->blk[blk].es.fvw; 2692 struct ice_pkg_es *p; 2693 u32 id; 2694 2695 id = ice_sect_id(blk, ICE_VEC_TBL); 2696 p = (struct ice_pkg_es *) 2697 ice_pkg_buf_alloc_section(bld, id, sizeof(*p) + 2698 vec_size - 2699 sizeof(p->es[0])); 2700 2701 if (!p) 2702 return ICE_ERR_MAX_LIMIT; 2703 2704 p->count = cpu_to_le16(1); 2705 p->offset = cpu_to_le16(tmp->prof_id); 2706 2707 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size); 2708 } 2709 2710 return 0; 2711 } 2712 2713 /** 2714 * ice_prof_bld_tcam - build profile ID TCAM changes 2715 * @hw: pointer to the HW struct 2716 * @blk: hardware block 2717 * @bld: the update package buffer build to add to 2718 * @chgs: the list of changes to make in hardware 2719 */ 2720 static enum ice_status 2721 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, 2722 struct ice_buf_build *bld, struct list_head *chgs) 2723 { 2724 struct ice_chs_chg *tmp; 2725 2726 list_for_each_entry(tmp, chgs, list_entry) 2727 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { 2728 struct ice_prof_id_section *p; 2729 u32 id; 2730 2731 id = ice_sect_id(blk, ICE_PROF_TCAM); 2732 p = (struct ice_prof_id_section *) 2733 ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); 2734 2735 if (!p) 2736 return ICE_ERR_MAX_LIMIT; 2737 2738 p->count = cpu_to_le16(1); 2739 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); 2740 p->entry[0].prof_id = tmp->prof_id; 2741 2742 memcpy(p->entry[0].key, 2743 &hw->blk[blk].prof.t[tmp->tcam_idx].key, 2744 sizeof(hw->blk[blk].prof.t->key)); 2745 } 2746 2747 return 0; 2748 } 2749 2750 /** 2751 * ice_prof_bld_xlt1 - build XLT1 changes 2752 * @blk: hardware block 2753 * @bld: the update package buffer build to add to 2754 * @chgs: the list of changes to make in hardware 2755 */ 2756 static enum ice_status 2757 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, 2758 struct list_head *chgs) 2759 { 2760 struct ice_chs_chg *tmp; 2761 2762 list_for_each_entry(tmp, chgs, list_entry) 2763 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { 2764 struct ice_xlt1_section *p; 2765 u32 id; 2766 2767 id = ice_sect_id(blk, ICE_XLT1); 2768 p = (struct ice_xlt1_section *) 2769 ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); 2770 2771 if (!p) 2772 return ICE_ERR_MAX_LIMIT; 2773 2774 p->count = cpu_to_le16(1); 2775 p->offset = cpu_to_le16(tmp->ptype); 2776 p->value[0] = tmp->ptg; 2777 } 2778 2779 return 0; 2780 } 2781 2782 /** 2783 * ice_prof_bld_xlt2 - build XLT2 changes 2784 * @blk: hardware block 2785 * @bld: the update package buffer build to add to 2786 * @chgs: the list of changes to make in hardware 2787 */ 2788 static enum ice_status 2789 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, 2790 struct list_head *chgs) 2791 { 2792 struct ice_chs_chg *tmp; 2793 2794 list_for_each_entry(tmp, chgs, list_entry) { 2795 struct ice_xlt2_section *p; 2796 u32 id; 2797 2798 switch (tmp->type) { 2799 case ICE_VSIG_ADD: 2800 case ICE_VSI_MOVE: 2801 case ICE_VSIG_REM: 2802 id = ice_sect_id(blk, ICE_XLT2); 2803 p = (struct ice_xlt2_section *) 2804 ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); 2805 2806 if (!p) 2807 return ICE_ERR_MAX_LIMIT; 2808 2809 p->count = cpu_to_le16(1); 2810 p->offset = cpu_to_le16(tmp->vsi); 2811 p->value[0] = cpu_to_le16(tmp->vsig); 2812 break; 2813 default: 2814 break; 2815 } 2816 } 2817 2818 return 0; 2819 } 2820 2821 /** 2822 * ice_upd_prof_hw - update hardware using the change list 2823 * @hw: pointer to the HW struct 2824 * @blk: hardware block 2825 * @chgs: the list of changes to make in hardware 2826 */ 2827 static enum ice_status 2828 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, 2829 struct list_head *chgs) 2830 { 2831 struct ice_buf_build *b; 2832 struct ice_chs_chg *tmp; 2833 enum ice_status status; 2834 u16 pkg_sects; 2835 u16 xlt1 = 0; 2836 u16 xlt2 = 0; 2837 u16 tcam = 0; 2838 u16 es = 0; 2839 u16 sects; 2840 2841 /* count number of sections we need */ 2842 list_for_each_entry(tmp, chgs, list_entry) { 2843 switch (tmp->type) { 2844 case ICE_PTG_ES_ADD: 2845 if (tmp->add_ptg) 2846 xlt1++; 2847 if (tmp->add_prof) 2848 es++; 2849 break; 2850 case ICE_TCAM_ADD: 2851 tcam++; 2852 break; 2853 case ICE_VSIG_ADD: 2854 case ICE_VSI_MOVE: 2855 case ICE_VSIG_REM: 2856 xlt2++; 2857 break; 2858 default: 2859 break; 2860 } 2861 } 2862 sects = xlt1 + xlt2 + tcam + es; 2863 2864 if (!sects) 2865 return 0; 2866 2867 /* Build update package buffer */ 2868 b = ice_pkg_buf_alloc(hw); 2869 if (!b) 2870 return ICE_ERR_NO_MEMORY; 2871 2872 status = ice_pkg_buf_reserve_section(b, sects); 2873 if (status) 2874 goto error_tmp; 2875 2876 /* Preserve order of table update: ES, TCAM, PTG, VSIG */ 2877 if (es) { 2878 status = ice_prof_bld_es(hw, blk, b, chgs); 2879 if (status) 2880 goto error_tmp; 2881 } 2882 2883 if (tcam) { 2884 status = ice_prof_bld_tcam(hw, blk, b, chgs); 2885 if (status) 2886 goto error_tmp; 2887 } 2888 2889 if (xlt1) { 2890 status = ice_prof_bld_xlt1(blk, b, chgs); 2891 if (status) 2892 goto error_tmp; 2893 } 2894 2895 if (xlt2) { 2896 status = ice_prof_bld_xlt2(blk, b, chgs); 2897 if (status) 2898 goto error_tmp; 2899 } 2900 2901 /* After package buffer build check if the section count in buffer is 2902 * non-zero and matches the number of sections detected for package 2903 * update. 2904 */ 2905 pkg_sects = ice_pkg_buf_get_active_sections(b); 2906 if (!pkg_sects || pkg_sects != sects) { 2907 status = ICE_ERR_INVAL_SIZE; 2908 goto error_tmp; 2909 } 2910 2911 /* update package */ 2912 status = ice_update_pkg(hw, ice_pkg_buf(b), 1); 2913 if (status == ICE_ERR_AQ_ERROR) 2914 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); 2915 2916 error_tmp: 2917 ice_pkg_buf_free(hw, b); 2918 return status; 2919 } 2920 2921 /** 2922 * ice_add_prof - add profile 2923 * @hw: pointer to the HW struct 2924 * @blk: hardware block 2925 * @id: profile tracking ID 2926 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) 2927 * @es: extraction sequence (length of array is determined by the block) 2928 * 2929 * This function registers a profile, which matches a set of PTGs with a 2930 * particular extraction sequence. While the hardware profile is allocated 2931 * it will not be written until the first call to ice_add_flow that specifies 2932 * the ID value used here. 2933 */ 2934 enum ice_status 2935 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], 2936 struct ice_fv_word *es) 2937 { 2938 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); 2939 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); 2940 struct ice_prof_map *prof; 2941 enum ice_status status; 2942 u32 byte = 0; 2943 u8 prof_id; 2944 2945 bitmap_zero(ptgs_used, ICE_XLT1_CNT); 2946 2947 mutex_lock(&hw->blk[blk].es.prof_map_lock); 2948 2949 /* search for existing profile */ 2950 status = ice_find_prof_id(hw, blk, es, &prof_id); 2951 if (status) { 2952 /* allocate profile ID */ 2953 status = ice_alloc_prof_id(hw, blk, &prof_id); 2954 if (status) 2955 goto err_ice_add_prof; 2956 2957 /* and write new es */ 2958 ice_write_es(hw, blk, prof_id, es); 2959 } 2960 2961 ice_prof_inc_ref(hw, blk, prof_id); 2962 2963 /* add profile info */ 2964 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); 2965 if (!prof) 2966 goto err_ice_add_prof; 2967 2968 prof->profile_cookie = id; 2969 prof->prof_id = prof_id; 2970 prof->ptg_cnt = 0; 2971 prof->context = 0; 2972 2973 /* build list of ptgs */ 2974 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { 2975 u32 bit; 2976 2977 if (!ptypes[byte]) { 2978 bytes--; 2979 byte++; 2980 continue; 2981 } 2982 2983 /* Examine 8 bits per byte */ 2984 for_each_set_bit(bit, (unsigned long *)&ptypes[byte], 2985 BITS_PER_BYTE) { 2986 u16 ptype; 2987 u8 ptg; 2988 u8 m; 2989 2990 ptype = byte * BITS_PER_BYTE + bit; 2991 2992 /* The package should place all ptypes in a non-zero 2993 * PTG, so the following call should never fail. 2994 */ 2995 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) 2996 continue; 2997 2998 /* If PTG is already added, skip and continue */ 2999 if (test_bit(ptg, ptgs_used)) 3000 continue; 3001 3002 set_bit(ptg, ptgs_used); 3003 prof->ptg[prof->ptg_cnt] = ptg; 3004 3005 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) 3006 break; 3007 3008 /* nothing left in byte, then exit */ 3009 m = ~((1 << (bit + 1)) - 1); 3010 if (!(ptypes[byte] & m)) 3011 break; 3012 } 3013 3014 bytes--; 3015 byte++; 3016 } 3017 3018 list_add(&prof->list, &hw->blk[blk].es.prof_map); 3019 status = 0; 3020 3021 err_ice_add_prof: 3022 mutex_unlock(&hw->blk[blk].es.prof_map_lock); 3023 return status; 3024 } 3025 3026 /** 3027 * ice_search_prof_id_low - Search for a profile tracking ID low level 3028 * @hw: pointer to the HW struct 3029 * @blk: hardware block 3030 * @id: profile tracking ID 3031 * 3032 * This will search for a profile tracking ID which was previously added. This 3033 * version assumes that the caller has already acquired the prof map lock. 3034 */ 3035 static struct ice_prof_map * 3036 ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) 3037 { 3038 struct ice_prof_map *entry = NULL; 3039 struct ice_prof_map *map; 3040 3041 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list) 3042 if (map->profile_cookie == id) { 3043 entry = map; 3044 break; 3045 } 3046 3047 return entry; 3048 } 3049 3050 /** 3051 * ice_search_prof_id - Search for a profile tracking ID 3052 * @hw: pointer to the HW struct 3053 * @blk: hardware block 3054 * @id: profile tracking ID 3055 * 3056 * This will search for a profile tracking ID which was previously added. 3057 */ 3058 static struct ice_prof_map * 3059 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) 3060 { 3061 struct ice_prof_map *entry; 3062 3063 mutex_lock(&hw->blk[blk].es.prof_map_lock); 3064 entry = ice_search_prof_id_low(hw, blk, id); 3065 mutex_unlock(&hw->blk[blk].es.prof_map_lock); 3066 3067 return entry; 3068 } 3069 3070 /** 3071 * ice_vsig_prof_id_count - count profiles in a VSIG 3072 * @hw: pointer to the HW struct 3073 * @blk: hardware block 3074 * @vsig: VSIG to remove the profile from 3075 */ 3076 static u16 3077 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) 3078 { 3079 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; 3080 struct ice_vsig_prof *p; 3081 3082 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3083 list) 3084 count++; 3085 3086 return count; 3087 } 3088 3089 /** 3090 * ice_rel_tcam_idx - release a TCAM index 3091 * @hw: pointer to the HW struct 3092 * @blk: hardware block 3093 * @idx: the index to release 3094 */ 3095 static enum ice_status 3096 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) 3097 { 3098 /* Masks to invoke a never match entry */ 3099 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3100 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; 3101 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 3102 enum ice_status status; 3103 3104 /* write the TCAM entry */ 3105 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, 3106 dc_msk, nm_msk); 3107 if (status) 3108 return status; 3109 3110 /* release the TCAM entry */ 3111 status = ice_free_tcam_ent(hw, blk, idx); 3112 3113 return status; 3114 } 3115 3116 /** 3117 * ice_rem_prof_id - remove one profile from a VSIG 3118 * @hw: pointer to the HW struct 3119 * @blk: hardware block 3120 * @prof: pointer to profile structure to remove 3121 */ 3122 static enum ice_status 3123 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, 3124 struct ice_vsig_prof *prof) 3125 { 3126 enum ice_status status; 3127 u16 i; 3128 3129 for (i = 0; i < prof->tcam_count; i++) 3130 if (prof->tcam[i].in_use) { 3131 prof->tcam[i].in_use = false; 3132 status = ice_rel_tcam_idx(hw, blk, 3133 prof->tcam[i].tcam_idx); 3134 if (status) 3135 return ICE_ERR_HW_TABLE; 3136 } 3137 3138 return 0; 3139 } 3140 3141 /** 3142 * ice_rem_vsig - remove VSIG 3143 * @hw: pointer to the HW struct 3144 * @blk: hardware block 3145 * @vsig: the VSIG to remove 3146 * @chg: the change list 3147 */ 3148 static enum ice_status 3149 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 3150 struct list_head *chg) 3151 { 3152 u16 idx = vsig & ICE_VSIG_IDX_M; 3153 struct ice_vsig_vsi *vsi_cur; 3154 struct ice_vsig_prof *d, *t; 3155 enum ice_status status; 3156 3157 /* remove TCAM entries */ 3158 list_for_each_entry_safe(d, t, 3159 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3160 list) { 3161 status = ice_rem_prof_id(hw, blk, d); 3162 if (status) 3163 return status; 3164 3165 list_del(&d->list); 3166 devm_kfree(ice_hw_to_dev(hw), d); 3167 } 3168 3169 /* Move all VSIS associated with this VSIG to the default VSIG */ 3170 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3171 /* If the VSIG has at least 1 VSI then iterate through the list 3172 * and remove the VSIs before deleting the group. 3173 */ 3174 if (vsi_cur) 3175 do { 3176 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 3177 struct ice_chs_chg *p; 3178 3179 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), 3180 GFP_KERNEL); 3181 if (!p) 3182 return ICE_ERR_NO_MEMORY; 3183 3184 p->type = ICE_VSIG_REM; 3185 p->orig_vsig = vsig; 3186 p->vsig = ICE_DEFAULT_VSIG; 3187 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; 3188 3189 list_add(&p->list_entry, chg); 3190 3191 vsi_cur = tmp; 3192 } while (vsi_cur); 3193 3194 return ice_vsig_free(hw, blk, vsig); 3195 } 3196 3197 /** 3198 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG 3199 * @hw: pointer to the HW struct 3200 * @blk: hardware block 3201 * @vsig: VSIG to remove the profile from 3202 * @hdl: profile handle indicating which profile to remove 3203 * @chg: list to receive a record of changes 3204 */ 3205 static enum ice_status 3206 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 3207 struct list_head *chg) 3208 { 3209 u16 idx = vsig & ICE_VSIG_IDX_M; 3210 struct ice_vsig_prof *p, *t; 3211 enum ice_status status; 3212 3213 list_for_each_entry_safe(p, t, 3214 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3215 list) 3216 if (p->profile_cookie == hdl) { 3217 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) 3218 /* this is the last profile, remove the VSIG */ 3219 return ice_rem_vsig(hw, blk, vsig, chg); 3220 3221 status = ice_rem_prof_id(hw, blk, p); 3222 if (!status) { 3223 list_del(&p->list); 3224 devm_kfree(ice_hw_to_dev(hw), p); 3225 } 3226 return status; 3227 } 3228 3229 return ICE_ERR_DOES_NOT_EXIST; 3230 } 3231 3232 /** 3233 * ice_rem_flow_all - remove all flows with a particular profile 3234 * @hw: pointer to the HW struct 3235 * @blk: hardware block 3236 * @id: profile tracking ID 3237 */ 3238 static enum ice_status 3239 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) 3240 { 3241 struct ice_chs_chg *del, *tmp; 3242 enum ice_status status; 3243 struct list_head chg; 3244 u16 i; 3245 3246 INIT_LIST_HEAD(&chg); 3247 3248 for (i = 1; i < ICE_MAX_VSIGS; i++) 3249 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { 3250 if (ice_has_prof_vsig(hw, blk, i, id)) { 3251 status = ice_rem_prof_id_vsig(hw, blk, i, id, 3252 &chg); 3253 if (status) 3254 goto err_ice_rem_flow_all; 3255 } 3256 } 3257 3258 status = ice_upd_prof_hw(hw, blk, &chg); 3259 3260 err_ice_rem_flow_all: 3261 list_for_each_entry_safe(del, tmp, &chg, list_entry) { 3262 list_del(&del->list_entry); 3263 devm_kfree(ice_hw_to_dev(hw), del); 3264 } 3265 3266 return status; 3267 } 3268 3269 /** 3270 * ice_rem_prof - remove profile 3271 * @hw: pointer to the HW struct 3272 * @blk: hardware block 3273 * @id: profile tracking ID 3274 * 3275 * This will remove the profile specified by the ID parameter, which was 3276 * previously created through ice_add_prof. If any existing entries 3277 * are associated with this profile, they will be removed as well. 3278 */ 3279 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) 3280 { 3281 struct ice_prof_map *pmap; 3282 enum ice_status status; 3283 3284 mutex_lock(&hw->blk[blk].es.prof_map_lock); 3285 3286 pmap = ice_search_prof_id_low(hw, blk, id); 3287 if (!pmap) { 3288 status = ICE_ERR_DOES_NOT_EXIST; 3289 goto err_ice_rem_prof; 3290 } 3291 3292 /* remove all flows with this profile */ 3293 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); 3294 if (status) 3295 goto err_ice_rem_prof; 3296 3297 /* dereference profile, and possibly remove */ 3298 ice_prof_dec_ref(hw, blk, pmap->prof_id); 3299 3300 list_del(&pmap->list); 3301 devm_kfree(ice_hw_to_dev(hw), pmap); 3302 3303 err_ice_rem_prof: 3304 mutex_unlock(&hw->blk[blk].es.prof_map_lock); 3305 return status; 3306 } 3307 3308 /** 3309 * ice_get_prof - get profile 3310 * @hw: pointer to the HW struct 3311 * @blk: hardware block 3312 * @hdl: profile handle 3313 * @chg: change list 3314 */ 3315 static enum ice_status 3316 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, 3317 struct list_head *chg) 3318 { 3319 struct ice_prof_map *map; 3320 struct ice_chs_chg *p; 3321 u16 i; 3322 3323 /* Get the details on the profile specified by the handle ID */ 3324 map = ice_search_prof_id(hw, blk, hdl); 3325 if (!map) 3326 return ICE_ERR_DOES_NOT_EXIST; 3327 3328 for (i = 0; i < map->ptg_cnt; i++) 3329 if (!hw->blk[blk].es.written[map->prof_id]) { 3330 /* add ES to change list */ 3331 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), 3332 GFP_KERNEL); 3333 if (!p) 3334 goto err_ice_get_prof; 3335 3336 p->type = ICE_PTG_ES_ADD; 3337 p->ptype = 0; 3338 p->ptg = map->ptg[i]; 3339 p->add_ptg = 0; 3340 3341 p->add_prof = 1; 3342 p->prof_id = map->prof_id; 3343 3344 hw->blk[blk].es.written[map->prof_id] = true; 3345 3346 list_add(&p->list_entry, chg); 3347 } 3348 3349 return 0; 3350 3351 err_ice_get_prof: 3352 /* let caller clean up the change list */ 3353 return ICE_ERR_NO_MEMORY; 3354 } 3355 3356 /** 3357 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG 3358 * @hw: pointer to the HW struct 3359 * @blk: hardware block 3360 * @vsig: VSIG from which to copy the list 3361 * @lst: output list 3362 * 3363 * This routine makes a copy of the list of profiles in the specified VSIG. 3364 */ 3365 static enum ice_status 3366 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 3367 struct list_head *lst) 3368 { 3369 struct ice_vsig_prof *ent1, *ent2; 3370 u16 idx = vsig & ICE_VSIG_IDX_M; 3371 3372 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3373 list) { 3374 struct ice_vsig_prof *p; 3375 3376 /* copy to the input list */ 3377 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p), 3378 GFP_KERNEL); 3379 if (!p) 3380 goto err_ice_get_profs_vsig; 3381 3382 list_add_tail(&p->list, lst); 3383 } 3384 3385 return 0; 3386 3387 err_ice_get_profs_vsig: 3388 list_for_each_entry_safe(ent1, ent2, lst, list) { 3389 list_del(&ent1->list); 3390 devm_kfree(ice_hw_to_dev(hw), ent1); 3391 } 3392 3393 return ICE_ERR_NO_MEMORY; 3394 } 3395 3396 /** 3397 * ice_add_prof_to_lst - add profile entry to a list 3398 * @hw: pointer to the HW struct 3399 * @blk: hardware block 3400 * @lst: the list to be added to 3401 * @hdl: profile handle of entry to add 3402 */ 3403 static enum ice_status 3404 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, 3405 struct list_head *lst, u64 hdl) 3406 { 3407 struct ice_prof_map *map; 3408 struct ice_vsig_prof *p; 3409 u16 i; 3410 3411 map = ice_search_prof_id(hw, blk, hdl); 3412 if (!map) 3413 return ICE_ERR_DOES_NOT_EXIST; 3414 3415 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 3416 if (!p) 3417 return ICE_ERR_NO_MEMORY; 3418 3419 p->profile_cookie = map->profile_cookie; 3420 p->prof_id = map->prof_id; 3421 p->tcam_count = map->ptg_cnt; 3422 3423 for (i = 0; i < map->ptg_cnt; i++) { 3424 p->tcam[i].prof_id = map->prof_id; 3425 p->tcam[i].tcam_idx = ICE_INVALID_TCAM; 3426 p->tcam[i].ptg = map->ptg[i]; 3427 } 3428 3429 list_add(&p->list, lst); 3430 3431 return 0; 3432 } 3433 3434 /** 3435 * ice_move_vsi - move VSI to another VSIG 3436 * @hw: pointer to the HW struct 3437 * @blk: hardware block 3438 * @vsi: the VSI to move 3439 * @vsig: the VSIG to move the VSI to 3440 * @chg: the change list 3441 */ 3442 static enum ice_status 3443 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, 3444 struct list_head *chg) 3445 { 3446 enum ice_status status; 3447 struct ice_chs_chg *p; 3448 u16 orig_vsig; 3449 3450 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 3451 if (!p) 3452 return ICE_ERR_NO_MEMORY; 3453 3454 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 3455 if (!status) 3456 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 3457 3458 if (status) { 3459 devm_kfree(ice_hw_to_dev(hw), p); 3460 return status; 3461 } 3462 3463 p->type = ICE_VSI_MOVE; 3464 p->vsi = vsi; 3465 p->orig_vsig = orig_vsig; 3466 p->vsig = vsig; 3467 3468 list_add(&p->list_entry, chg); 3469 3470 return 0; 3471 } 3472 3473 /** 3474 * ice_prof_tcam_ena_dis - add enable or disable TCAM change 3475 * @hw: pointer to the HW struct 3476 * @blk: hardware block 3477 * @enable: true to enable, false to disable 3478 * @vsig: the VSIG of the TCAM entry 3479 * @tcam: pointer the TCAM info structure of the TCAM to disable 3480 * @chg: the change list 3481 * 3482 * This function appends an enable or disable TCAM entry in the change log 3483 */ 3484 static enum ice_status 3485 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, 3486 u16 vsig, struct ice_tcam_inf *tcam, 3487 struct list_head *chg) 3488 { 3489 enum ice_status status; 3490 struct ice_chs_chg *p; 3491 3492 /* Default: enable means change the low flag bit to don't care */ 3493 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 3494 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 3495 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 3496 3497 /* if disabling, free the TCAM */ 3498 if (!enable) { 3499 status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx); 3500 tcam->tcam_idx = 0; 3501 tcam->in_use = 0; 3502 return status; 3503 } 3504 3505 /* for re-enabling, reallocate a TCAM */ 3506 status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx); 3507 if (status) 3508 return status; 3509 3510 /* add TCAM to change list */ 3511 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 3512 if (!p) 3513 return ICE_ERR_NO_MEMORY; 3514 3515 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, 3516 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, 3517 nm_msk); 3518 if (status) 3519 goto err_ice_prof_tcam_ena_dis; 3520 3521 tcam->in_use = 1; 3522 3523 p->type = ICE_TCAM_ADD; 3524 p->add_tcam_idx = true; 3525 p->prof_id = tcam->prof_id; 3526 p->ptg = tcam->ptg; 3527 p->vsig = 0; 3528 p->tcam_idx = tcam->tcam_idx; 3529 3530 /* log change */ 3531 list_add(&p->list_entry, chg); 3532 3533 return 0; 3534 3535 err_ice_prof_tcam_ena_dis: 3536 devm_kfree(ice_hw_to_dev(hw), p); 3537 return status; 3538 } 3539 3540 /** 3541 * ice_adj_prof_priorities - adjust profile based on priorities 3542 * @hw: pointer to the HW struct 3543 * @blk: hardware block 3544 * @vsig: the VSIG for which to adjust profile priorities 3545 * @chg: the change list 3546 */ 3547 static enum ice_status 3548 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, 3549 struct list_head *chg) 3550 { 3551 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); 3552 struct ice_vsig_prof *t; 3553 enum ice_status status; 3554 u16 idx; 3555 3556 bitmap_zero(ptgs_used, ICE_XLT1_CNT); 3557 idx = vsig & ICE_VSIG_IDX_M; 3558 3559 /* Priority is based on the order in which the profiles are added. The 3560 * newest added profile has highest priority and the oldest added 3561 * profile has the lowest priority. Since the profile property list for 3562 * a VSIG is sorted from newest to oldest, this code traverses the list 3563 * in order and enables the first of each PTG that it finds (that is not 3564 * already enabled); it also disables any duplicate PTGs that it finds 3565 * in the older profiles (that are currently enabled). 3566 */ 3567 3568 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3569 list) { 3570 u16 i; 3571 3572 for (i = 0; i < t->tcam_count; i++) { 3573 /* Scan the priorities from newest to oldest. 3574 * Make sure that the newest profiles take priority. 3575 */ 3576 if (test_bit(t->tcam[i].ptg, ptgs_used) && 3577 t->tcam[i].in_use) { 3578 /* need to mark this PTG as never match, as it 3579 * was already in use and therefore duplicate 3580 * (and lower priority) 3581 */ 3582 status = ice_prof_tcam_ena_dis(hw, blk, false, 3583 vsig, 3584 &t->tcam[i], 3585 chg); 3586 if (status) 3587 return status; 3588 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) && 3589 !t->tcam[i].in_use) { 3590 /* need to enable this PTG, as it in not in use 3591 * and not enabled (highest priority) 3592 */ 3593 status = ice_prof_tcam_ena_dis(hw, blk, true, 3594 vsig, 3595 &t->tcam[i], 3596 chg); 3597 if (status) 3598 return status; 3599 } 3600 3601 /* keep track of used ptgs */ 3602 set_bit(t->tcam[i].ptg, ptgs_used); 3603 } 3604 } 3605 3606 return 0; 3607 } 3608 3609 /** 3610 * ice_add_prof_id_vsig - add profile to VSIG 3611 * @hw: pointer to the HW struct 3612 * @blk: hardware block 3613 * @vsig: the VSIG to which this profile is to be added 3614 * @hdl: the profile handle indicating the profile to add 3615 * @chg: the change list 3616 */ 3617 static enum ice_status 3618 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 3619 struct list_head *chg) 3620 { 3621 /* Masks that ignore flags */ 3622 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3623 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 3624 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 3625 struct ice_prof_map *map; 3626 struct ice_vsig_prof *t; 3627 struct ice_chs_chg *p; 3628 u16 i; 3629 3630 /* Get the details on the profile specified by the handle ID */ 3631 map = ice_search_prof_id(hw, blk, hdl); 3632 if (!map) 3633 return ICE_ERR_DOES_NOT_EXIST; 3634 3635 /* Error, if this VSIG already has this profile */ 3636 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) 3637 return ICE_ERR_ALREADY_EXISTS; 3638 3639 /* new VSIG profile structure */ 3640 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); 3641 if (!t) 3642 return ICE_ERR_NO_MEMORY; 3643 3644 t->profile_cookie = map->profile_cookie; 3645 t->prof_id = map->prof_id; 3646 t->tcam_count = map->ptg_cnt; 3647 3648 /* create TCAM entries */ 3649 for (i = 0; i < map->ptg_cnt; i++) { 3650 enum ice_status status; 3651 u16 tcam_idx; 3652 3653 /* add TCAM to change list */ 3654 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 3655 if (!p) 3656 goto err_ice_add_prof_id_vsig; 3657 3658 /* allocate the TCAM entry index */ 3659 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); 3660 if (status) { 3661 devm_kfree(ice_hw_to_dev(hw), p); 3662 goto err_ice_add_prof_id_vsig; 3663 } 3664 3665 t->tcam[i].ptg = map->ptg[i]; 3666 t->tcam[i].prof_id = map->prof_id; 3667 t->tcam[i].tcam_idx = tcam_idx; 3668 t->tcam[i].in_use = true; 3669 3670 p->type = ICE_TCAM_ADD; 3671 p->add_tcam_idx = true; 3672 p->prof_id = t->tcam[i].prof_id; 3673 p->ptg = t->tcam[i].ptg; 3674 p->vsig = vsig; 3675 p->tcam_idx = t->tcam[i].tcam_idx; 3676 3677 /* write the TCAM entry */ 3678 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, 3679 t->tcam[i].prof_id, 3680 t->tcam[i].ptg, vsig, 0, 0, 3681 vl_msk, dc_msk, nm_msk); 3682 if (status) 3683 goto err_ice_add_prof_id_vsig; 3684 3685 /* log change */ 3686 list_add(&p->list_entry, chg); 3687 } 3688 3689 /* add profile to VSIG */ 3690 list_add(&t->list, 3691 &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst); 3692 3693 return 0; 3694 3695 err_ice_add_prof_id_vsig: 3696 /* let caller clean up the change list */ 3697 devm_kfree(ice_hw_to_dev(hw), t); 3698 return ICE_ERR_NO_MEMORY; 3699 } 3700 3701 /** 3702 * ice_create_prof_id_vsig - add a new VSIG with a single profile 3703 * @hw: pointer to the HW struct 3704 * @blk: hardware block 3705 * @vsi: the initial VSI that will be in VSIG 3706 * @hdl: the profile handle of the profile that will be added to the VSIG 3707 * @chg: the change list 3708 */ 3709 static enum ice_status 3710 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, 3711 struct list_head *chg) 3712 { 3713 enum ice_status status; 3714 struct ice_chs_chg *p; 3715 u16 new_vsig; 3716 3717 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 3718 if (!p) 3719 return ICE_ERR_NO_MEMORY; 3720 3721 new_vsig = ice_vsig_alloc(hw, blk); 3722 if (!new_vsig) { 3723 status = ICE_ERR_HW_TABLE; 3724 goto err_ice_create_prof_id_vsig; 3725 } 3726 3727 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); 3728 if (status) 3729 goto err_ice_create_prof_id_vsig; 3730 3731 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg); 3732 if (status) 3733 goto err_ice_create_prof_id_vsig; 3734 3735 p->type = ICE_VSIG_ADD; 3736 p->vsi = vsi; 3737 p->orig_vsig = ICE_DEFAULT_VSIG; 3738 p->vsig = new_vsig; 3739 3740 list_add(&p->list_entry, chg); 3741 3742 return 0; 3743 3744 err_ice_create_prof_id_vsig: 3745 /* let caller clean up the change list */ 3746 devm_kfree(ice_hw_to_dev(hw), p); 3747 return status; 3748 } 3749 3750 /** 3751 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles 3752 * @hw: pointer to the HW struct 3753 * @blk: hardware block 3754 * @vsi: the initial VSI that will be in VSIG 3755 * @lst: the list of profile that will be added to the VSIG 3756 * @chg: the change list 3757 */ 3758 static enum ice_status 3759 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, 3760 struct list_head *lst, struct list_head *chg) 3761 { 3762 struct ice_vsig_prof *t; 3763 enum ice_status status; 3764 u16 vsig; 3765 3766 vsig = ice_vsig_alloc(hw, blk); 3767 if (!vsig) 3768 return ICE_ERR_HW_TABLE; 3769 3770 status = ice_move_vsi(hw, blk, vsi, vsig, chg); 3771 if (status) 3772 return status; 3773 3774 list_for_each_entry(t, lst, list) { 3775 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, 3776 chg); 3777 if (status) 3778 return status; 3779 } 3780 3781 return 0; 3782 } 3783 3784 /** 3785 * ice_find_prof_vsig - find a VSIG with a specific profile handle 3786 * @hw: pointer to the HW struct 3787 * @blk: hardware block 3788 * @hdl: the profile handle of the profile to search for 3789 * @vsig: returns the VSIG with the matching profile 3790 */ 3791 static bool 3792 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) 3793 { 3794 struct ice_vsig_prof *t; 3795 enum ice_status status; 3796 struct list_head lst; 3797 3798 INIT_LIST_HEAD(&lst); 3799 3800 t = kzalloc(sizeof(*t), GFP_KERNEL); 3801 if (!t) 3802 return false; 3803 3804 t->profile_cookie = hdl; 3805 list_add(&t->list, &lst); 3806 3807 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); 3808 3809 list_del(&t->list); 3810 kfree(t); 3811 3812 return !status; 3813 } 3814 3815 /** 3816 * ice_add_prof_id_flow - add profile flow 3817 * @hw: pointer to the HW struct 3818 * @blk: hardware block 3819 * @vsi: the VSI to enable with the profile specified by ID 3820 * @hdl: profile handle 3821 * 3822 * Calling this function will update the hardware tables to enable the 3823 * profile indicated by the ID parameter for the VSIs specified in the VSI 3824 * array. Once successfully called, the flow will be enabled. 3825 */ 3826 enum ice_status 3827 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 3828 { 3829 struct ice_vsig_prof *tmp1, *del1; 3830 struct ice_chs_chg *tmp, *del; 3831 struct list_head union_lst; 3832 enum ice_status status; 3833 struct list_head chg; 3834 u16 vsig; 3835 3836 INIT_LIST_HEAD(&union_lst); 3837 INIT_LIST_HEAD(&chg); 3838 3839 /* Get profile */ 3840 status = ice_get_prof(hw, blk, hdl, &chg); 3841 if (status) 3842 return status; 3843 3844 /* determine if VSI is already part of a VSIG */ 3845 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 3846 if (!status && vsig) { 3847 bool only_vsi; 3848 u16 or_vsig; 3849 u16 ref; 3850 3851 /* found in VSIG */ 3852 or_vsig = vsig; 3853 3854 /* make sure that there is no overlap/conflict between the new 3855 * characteristics and the existing ones; we don't support that 3856 * scenario 3857 */ 3858 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { 3859 status = ICE_ERR_ALREADY_EXISTS; 3860 goto err_ice_add_prof_id_flow; 3861 } 3862 3863 /* last VSI in the VSIG? */ 3864 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 3865 if (status) 3866 goto err_ice_add_prof_id_flow; 3867 only_vsi = (ref == 1); 3868 3869 /* create a union of the current profiles and the one being 3870 * added 3871 */ 3872 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); 3873 if (status) 3874 goto err_ice_add_prof_id_flow; 3875 3876 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); 3877 if (status) 3878 goto err_ice_add_prof_id_flow; 3879 3880 /* search for an existing VSIG with an exact charc match */ 3881 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); 3882 if (!status) { 3883 /* move VSI to the VSIG that matches */ 3884 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 3885 if (status) 3886 goto err_ice_add_prof_id_flow; 3887 3888 /* VSI has been moved out of or_vsig. If the or_vsig had 3889 * only that VSI it is now empty and can be removed. 3890 */ 3891 if (only_vsi) { 3892 status = ice_rem_vsig(hw, blk, or_vsig, &chg); 3893 if (status) 3894 goto err_ice_add_prof_id_flow; 3895 } 3896 } else if (only_vsi) { 3897 /* If the original VSIG only contains one VSI, then it 3898 * will be the requesting VSI. In this case the VSI is 3899 * not sharing entries and we can simply add the new 3900 * profile to the VSIG. 3901 */ 3902 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg); 3903 if (status) 3904 goto err_ice_add_prof_id_flow; 3905 3906 /* Adjust priorities */ 3907 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 3908 if (status) 3909 goto err_ice_add_prof_id_flow; 3910 } else { 3911 /* No match, so we need a new VSIG */ 3912 status = ice_create_vsig_from_lst(hw, blk, vsi, 3913 &union_lst, &chg); 3914 if (status) 3915 goto err_ice_add_prof_id_flow; 3916 3917 /* Adjust priorities */ 3918 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 3919 if (status) 3920 goto err_ice_add_prof_id_flow; 3921 } 3922 } else { 3923 /* need to find or add a VSIG */ 3924 /* search for an existing VSIG with an exact charc match */ 3925 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { 3926 /* found an exact match */ 3927 /* add or move VSI to the VSIG that matches */ 3928 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 3929 if (status) 3930 goto err_ice_add_prof_id_flow; 3931 } else { 3932 /* we did not find an exact match */ 3933 /* we need to add a VSIG */ 3934 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, 3935 &chg); 3936 if (status) 3937 goto err_ice_add_prof_id_flow; 3938 } 3939 } 3940 3941 /* update hardware */ 3942 if (!status) 3943 status = ice_upd_prof_hw(hw, blk, &chg); 3944 3945 err_ice_add_prof_id_flow: 3946 list_for_each_entry_safe(del, tmp, &chg, list_entry) { 3947 list_del(&del->list_entry); 3948 devm_kfree(ice_hw_to_dev(hw), del); 3949 } 3950 3951 list_for_each_entry_safe(del1, tmp1, &union_lst, list) { 3952 list_del(&del1->list); 3953 devm_kfree(ice_hw_to_dev(hw), del1); 3954 } 3955 3956 return status; 3957 } 3958 3959 /** 3960 * ice_rem_prof_from_list - remove a profile from list 3961 * @hw: pointer to the HW struct 3962 * @lst: list to remove the profile from 3963 * @hdl: the profile handle indicating the profile to remove 3964 */ 3965 static enum ice_status 3966 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) 3967 { 3968 struct ice_vsig_prof *ent, *tmp; 3969 3970 list_for_each_entry_safe(ent, tmp, lst, list) 3971 if (ent->profile_cookie == hdl) { 3972 list_del(&ent->list); 3973 devm_kfree(ice_hw_to_dev(hw), ent); 3974 return 0; 3975 } 3976 3977 return ICE_ERR_DOES_NOT_EXIST; 3978 } 3979 3980 /** 3981 * ice_rem_prof_id_flow - remove flow 3982 * @hw: pointer to the HW struct 3983 * @blk: hardware block 3984 * @vsi: the VSI from which to remove the profile specified by ID 3985 * @hdl: profile tracking handle 3986 * 3987 * Calling this function will update the hardware tables to remove the 3988 * profile indicated by the ID parameter for the VSIs specified in the VSI 3989 * array. Once successfully called, the flow will be disabled. 3990 */ 3991 enum ice_status 3992 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 3993 { 3994 struct ice_vsig_prof *tmp1, *del1; 3995 struct ice_chs_chg *tmp, *del; 3996 struct list_head chg, copy; 3997 enum ice_status status; 3998 u16 vsig; 3999 4000 INIT_LIST_HEAD(©); 4001 INIT_LIST_HEAD(&chg); 4002 4003 /* determine if VSI is already part of a VSIG */ 4004 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 4005 if (!status && vsig) { 4006 bool last_profile; 4007 bool only_vsi; 4008 u16 ref; 4009 4010 /* found in VSIG */ 4011 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; 4012 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 4013 if (status) 4014 goto err_ice_rem_prof_id_flow; 4015 only_vsi = (ref == 1); 4016 4017 if (only_vsi) { 4018 /* If the original VSIG only contains one reference, 4019 * which will be the requesting VSI, then the VSI is not 4020 * sharing entries and we can simply remove the specific 4021 * characteristics from the VSIG. 4022 */ 4023 4024 if (last_profile) { 4025 /* If there are no profiles left for this VSIG, 4026 * then simply remove the the VSIG. 4027 */ 4028 status = ice_rem_vsig(hw, blk, vsig, &chg); 4029 if (status) 4030 goto err_ice_rem_prof_id_flow; 4031 } else { 4032 status = ice_rem_prof_id_vsig(hw, blk, vsig, 4033 hdl, &chg); 4034 if (status) 4035 goto err_ice_rem_prof_id_flow; 4036 4037 /* Adjust priorities */ 4038 status = ice_adj_prof_priorities(hw, blk, vsig, 4039 &chg); 4040 if (status) 4041 goto err_ice_rem_prof_id_flow; 4042 } 4043 4044 } else { 4045 /* Make a copy of the VSIG's list of Profiles */ 4046 status = ice_get_profs_vsig(hw, blk, vsig, ©); 4047 if (status) 4048 goto err_ice_rem_prof_id_flow; 4049 4050 /* Remove specified profile entry from the list */ 4051 status = ice_rem_prof_from_list(hw, ©, hdl); 4052 if (status) 4053 goto err_ice_rem_prof_id_flow; 4054 4055 if (list_empty(©)) { 4056 status = ice_move_vsi(hw, blk, vsi, 4057 ICE_DEFAULT_VSIG, &chg); 4058 if (status) 4059 goto err_ice_rem_prof_id_flow; 4060 4061 } else if (!ice_find_dup_props_vsig(hw, blk, ©, 4062 &vsig)) { 4063 /* found an exact match */ 4064 /* add or move VSI to the VSIG that matches */ 4065 /* Search for a VSIG with a matching profile 4066 * list 4067 */ 4068 4069 /* Found match, move VSI to the matching VSIG */ 4070 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 4071 if (status) 4072 goto err_ice_rem_prof_id_flow; 4073 } else { 4074 /* since no existing VSIG supports this 4075 * characteristic pattern, we need to create a 4076 * new VSIG and TCAM entries 4077 */ 4078 status = ice_create_vsig_from_lst(hw, blk, vsi, 4079 ©, &chg); 4080 if (status) 4081 goto err_ice_rem_prof_id_flow; 4082 4083 /* Adjust priorities */ 4084 status = ice_adj_prof_priorities(hw, blk, vsig, 4085 &chg); 4086 if (status) 4087 goto err_ice_rem_prof_id_flow; 4088 } 4089 } 4090 } else { 4091 status = ICE_ERR_DOES_NOT_EXIST; 4092 } 4093 4094 /* update hardware tables */ 4095 if (!status) 4096 status = ice_upd_prof_hw(hw, blk, &chg); 4097 4098 err_ice_rem_prof_id_flow: 4099 list_for_each_entry_safe(del, tmp, &chg, list_entry) { 4100 list_del(&del->list_entry); 4101 devm_kfree(ice_hw_to_dev(hw), del); 4102 } 4103 4104 list_for_each_entry_safe(del1, tmp1, ©, list) { 4105 list_del(&del1->list); 4106 devm_kfree(ice_hw_to_dev(hw), del1); 4107 } 4108 4109 return status; 4110 } 4111