1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_flex_pipe.h" 6 #include "ice_flow.h" 7 8 /* To support tunneling entries by PF, the package will append the PF number to 9 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 10 */ 11 static const struct ice_tunnel_type_scan tnls[] = { 12 { TNL_VXLAN, "TNL_VXLAN_PF" }, 13 { TNL_GENEVE, "TNL_GENEVE_PF" }, 14 { TNL_LAST, "" } 15 }; 16 17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { 18 /* SWITCH */ 19 { 20 ICE_SID_XLT0_SW, 21 ICE_SID_XLT_KEY_BUILDER_SW, 22 ICE_SID_XLT1_SW, 23 ICE_SID_XLT2_SW, 24 ICE_SID_PROFID_TCAM_SW, 25 ICE_SID_PROFID_REDIR_SW, 26 ICE_SID_FLD_VEC_SW, 27 ICE_SID_CDID_KEY_BUILDER_SW, 28 ICE_SID_CDID_REDIR_SW 29 }, 30 31 /* ACL */ 32 { 33 ICE_SID_XLT0_ACL, 34 ICE_SID_XLT_KEY_BUILDER_ACL, 35 ICE_SID_XLT1_ACL, 36 ICE_SID_XLT2_ACL, 37 ICE_SID_PROFID_TCAM_ACL, 38 ICE_SID_PROFID_REDIR_ACL, 39 ICE_SID_FLD_VEC_ACL, 40 ICE_SID_CDID_KEY_BUILDER_ACL, 41 ICE_SID_CDID_REDIR_ACL 42 }, 43 44 /* FD */ 45 { 46 ICE_SID_XLT0_FD, 47 ICE_SID_XLT_KEY_BUILDER_FD, 48 ICE_SID_XLT1_FD, 49 ICE_SID_XLT2_FD, 50 ICE_SID_PROFID_TCAM_FD, 51 ICE_SID_PROFID_REDIR_FD, 52 ICE_SID_FLD_VEC_FD, 53 ICE_SID_CDID_KEY_BUILDER_FD, 54 ICE_SID_CDID_REDIR_FD 55 }, 56 57 /* RSS */ 58 { 59 ICE_SID_XLT0_RSS, 60 ICE_SID_XLT_KEY_BUILDER_RSS, 61 ICE_SID_XLT1_RSS, 62 ICE_SID_XLT2_RSS, 63 ICE_SID_PROFID_TCAM_RSS, 64 ICE_SID_PROFID_REDIR_RSS, 65 ICE_SID_FLD_VEC_RSS, 66 ICE_SID_CDID_KEY_BUILDER_RSS, 67 ICE_SID_CDID_REDIR_RSS 68 }, 69 70 /* PE */ 71 { 72 ICE_SID_XLT0_PE, 73 ICE_SID_XLT_KEY_BUILDER_PE, 74 ICE_SID_XLT1_PE, 75 ICE_SID_XLT2_PE, 76 ICE_SID_PROFID_TCAM_PE, 77 ICE_SID_PROFID_REDIR_PE, 78 ICE_SID_FLD_VEC_PE, 79 ICE_SID_CDID_KEY_BUILDER_PE, 80 ICE_SID_CDID_REDIR_PE 81 } 82 }; 83 84 /** 85 * ice_sect_id - returns section ID 86 * @blk: block type 87 * @sect: section type 88 * 89 * This helper function returns the proper section ID given a block type and a 90 * section type. 91 */ 92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) 93 { 94 return ice_sect_lkup[blk][sect]; 95 } 96 97 /** 98 * ice_pkg_val_buf 99 * @buf: pointer to the ice buffer 100 * 101 * This helper function validates a buffer's header. 102 */ 103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 104 { 105 struct ice_buf_hdr *hdr; 106 u16 section_count; 107 u16 data_end; 108 109 hdr = (struct ice_buf_hdr *)buf->buf; 110 /* verify data */ 111 section_count = le16_to_cpu(hdr->section_count); 112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 113 return NULL; 114 115 data_end = le16_to_cpu(hdr->data_end); 116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 117 return NULL; 118 119 return hdr; 120 } 121 122 /** 123 * ice_find_buf_table 124 * @ice_seg: pointer to the ice segment 125 * 126 * Returns the address of the buffer table within the ice segment. 127 */ 128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 129 { 130 struct ice_nvm_table *nvms; 131 132 nvms = (struct ice_nvm_table *) 133 (ice_seg->device_table + 134 le32_to_cpu(ice_seg->device_table_count)); 135 136 return (__force struct ice_buf_table *) 137 (nvms->vers + le32_to_cpu(nvms->table_count)); 138 } 139 140 /** 141 * ice_pkg_enum_buf 142 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 143 * @state: pointer to the enum state 144 * 145 * This function will enumerate all the buffers in the ice segment. The first 146 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 147 * ice_seg is set to NULL which continues the enumeration. When the function 148 * returns a NULL pointer, then the end of the buffers has been reached, or an 149 * unexpected value has been detected (for example an invalid section count or 150 * an invalid buffer end value). 151 */ 152 static struct ice_buf_hdr * 153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 154 { 155 if (ice_seg) { 156 state->buf_table = ice_find_buf_table(ice_seg); 157 if (!state->buf_table) 158 return NULL; 159 160 state->buf_idx = 0; 161 return ice_pkg_val_buf(state->buf_table->buf_array); 162 } 163 164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) 165 return ice_pkg_val_buf(state->buf_table->buf_array + 166 state->buf_idx); 167 else 168 return NULL; 169 } 170 171 /** 172 * ice_pkg_advance_sect 173 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 174 * @state: pointer to the enum state 175 * 176 * This helper function will advance the section within the ice segment, 177 * also advancing the buffer if needed. 178 */ 179 static bool 180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 181 { 182 if (!ice_seg && !state->buf) 183 return false; 184 185 if (!ice_seg && state->buf) 186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) 187 return true; 188 189 state->buf = ice_pkg_enum_buf(ice_seg, state); 190 if (!state->buf) 191 return false; 192 193 /* start of new buffer, reset section index */ 194 state->sect_idx = 0; 195 return true; 196 } 197 198 /** 199 * ice_pkg_enum_section 200 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 201 * @state: pointer to the enum state 202 * @sect_type: section type to enumerate 203 * 204 * This function will enumerate all the sections of a particular type in the 205 * ice segment. The first call is made with the ice_seg parameter non-NULL; 206 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 207 * When the function returns a NULL pointer, then the end of the matching 208 * sections has been reached. 209 */ 210 static void * 211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 212 u32 sect_type) 213 { 214 u16 offset, size; 215 216 if (ice_seg) 217 state->type = sect_type; 218 219 if (!ice_pkg_advance_sect(ice_seg, state)) 220 return NULL; 221 222 /* scan for next matching section */ 223 while (state->buf->section_entry[state->sect_idx].type != 224 cpu_to_le32(state->type)) 225 if (!ice_pkg_advance_sect(NULL, state)) 226 return NULL; 227 228 /* validate section */ 229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 231 return NULL; 232 233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); 234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 235 return NULL; 236 237 /* make sure the section fits in the buffer */ 238 if (offset + size > ICE_PKG_BUF_SIZE) 239 return NULL; 240 241 state->sect_type = 242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); 243 244 /* calc pointer to this section */ 245 state->sect = ((u8 *)state->buf) + 246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 247 248 return state->sect; 249 } 250 251 /** 252 * ice_pkg_enum_entry 253 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 254 * @state: pointer to the enum state 255 * @sect_type: section type to enumerate 256 * @offset: pointer to variable that receives the offset in the table (optional) 257 * @handler: function that handles access to the entries into the section type 258 * 259 * This function will enumerate all the entries in particular section type in 260 * the ice segment. The first call is made with the ice_seg parameter non-NULL; 261 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 262 * When the function returns a NULL pointer, then the end of the entries has 263 * been reached. 264 * 265 * Since each section may have a different header and entry size, the handler 266 * function is needed to determine the number and location entries in each 267 * section. 268 * 269 * The offset parameter is optional, but should be used for sections that 270 * contain an offset for each section table. For such cases, the section handler 271 * function must return the appropriate offset + index to give the absolution 272 * offset for each entry. For example, if the base for a section's header 273 * indicates a base offset of 10, and the index for the entry is 2, then 274 * section handler function should set the offset to 10 + 2 = 12. 275 */ 276 static void * 277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 278 u32 sect_type, u32 *offset, 279 void *(*handler)(u32 sect_type, void *section, 280 u32 index, u32 *offset)) 281 { 282 void *entry; 283 284 if (ice_seg) { 285 if (!handler) 286 return NULL; 287 288 if (!ice_pkg_enum_section(ice_seg, state, sect_type)) 289 return NULL; 290 291 state->entry_idx = 0; 292 state->handler = handler; 293 } else { 294 state->entry_idx++; 295 } 296 297 if (!state->handler) 298 return NULL; 299 300 /* get entry */ 301 entry = state->handler(state->sect_type, state->sect, state->entry_idx, 302 offset); 303 if (!entry) { 304 /* end of a section, look for another section of this type */ 305 if (!ice_pkg_enum_section(NULL, state, 0)) 306 return NULL; 307 308 state->entry_idx = 0; 309 entry = state->handler(state->sect_type, state->sect, 310 state->entry_idx, offset); 311 } 312 313 return entry; 314 } 315 316 /** 317 * ice_boost_tcam_handler 318 * @sect_type: section type 319 * @section: pointer to section 320 * @index: index of the boost TCAM entry to be returned 321 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections 322 * 323 * This is a callback function that can be passed to ice_pkg_enum_entry. 324 * Handles enumeration of individual boost TCAM entries. 325 */ 326 static void * 327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) 328 { 329 struct ice_boost_tcam_section *boost; 330 331 if (!section) 332 return NULL; 333 334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) 335 return NULL; 336 337 if (index > ICE_MAX_BST_TCAMS_IN_BUF) 338 return NULL; 339 340 if (offset) 341 *offset = 0; 342 343 boost = section; 344 if (index >= le16_to_cpu(boost->count)) 345 return NULL; 346 347 return boost->tcam + index; 348 } 349 350 /** 351 * ice_find_boost_entry 352 * @ice_seg: pointer to the ice segment (non-NULL) 353 * @addr: Boost TCAM address of entry to search for 354 * @entry: returns pointer to the entry 355 * 356 * Finds a particular Boost TCAM entry and returns a pointer to that entry 357 * if it is found. The ice_seg parameter must not be NULL since the first call 358 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. 359 */ 360 static enum ice_status 361 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, 362 struct ice_boost_tcam_entry **entry) 363 { 364 struct ice_boost_tcam_entry *tcam; 365 struct ice_pkg_enum state; 366 367 memset(&state, 0, sizeof(state)); 368 369 if (!ice_seg) 370 return ICE_ERR_PARAM; 371 372 do { 373 tcam = ice_pkg_enum_entry(ice_seg, &state, 374 ICE_SID_RXPARSER_BOOST_TCAM, NULL, 375 ice_boost_tcam_handler); 376 if (tcam && le16_to_cpu(tcam->addr) == addr) { 377 *entry = tcam; 378 return 0; 379 } 380 381 ice_seg = NULL; 382 } while (tcam); 383 384 *entry = NULL; 385 return ICE_ERR_CFG; 386 } 387 388 /** 389 * ice_label_enum_handler 390 * @sect_type: section type 391 * @section: pointer to section 392 * @index: index of the label entry to be returned 393 * @offset: pointer to receive absolute offset, always zero for label sections 394 * 395 * This is a callback function that can be passed to ice_pkg_enum_entry. 396 * Handles enumeration of individual label entries. 397 */ 398 static void * 399 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, 400 u32 *offset) 401 { 402 struct ice_label_section *labels; 403 404 if (!section) 405 return NULL; 406 407 if (index > ICE_MAX_LABELS_IN_BUF) 408 return NULL; 409 410 if (offset) 411 *offset = 0; 412 413 labels = section; 414 if (index >= le16_to_cpu(labels->count)) 415 return NULL; 416 417 return labels->label + index; 418 } 419 420 /** 421 * ice_enum_labels 422 * @ice_seg: pointer to the ice segment (NULL on subsequent calls) 423 * @type: the section type that will contain the label (0 on subsequent calls) 424 * @state: ice_pkg_enum structure that will hold the state of the enumeration 425 * @value: pointer to a value that will return the label's value if found 426 * 427 * Enumerates a list of labels in the package. The caller will call 428 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call 429 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL 430 * the end of the list has been reached. 431 */ 432 static char * 433 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, 434 u16 *value) 435 { 436 struct ice_label *label; 437 438 /* Check for valid label section on first call */ 439 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) 440 return NULL; 441 442 label = ice_pkg_enum_entry(ice_seg, state, type, NULL, 443 ice_label_enum_handler); 444 if (!label) 445 return NULL; 446 447 *value = le16_to_cpu(label->value); 448 return label->name; 449 } 450 451 /** 452 * ice_init_pkg_hints 453 * @hw: pointer to the HW structure 454 * @ice_seg: pointer to the segment of the package scan (non-NULL) 455 * 456 * This function will scan the package and save off relevant information 457 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL 458 * since the first call to ice_enum_labels requires a pointer to an actual 459 * ice_seg structure. 460 */ 461 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) 462 { 463 struct ice_pkg_enum state; 464 char *label_name; 465 u16 val; 466 int i; 467 468 memset(&hw->tnl, 0, sizeof(hw->tnl)); 469 memset(&state, 0, sizeof(state)); 470 471 if (!ice_seg) 472 return; 473 474 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, 475 &val); 476 477 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 478 for (i = 0; tnls[i].type != TNL_LAST; i++) { 479 size_t len = strlen(tnls[i].label_prefix); 480 481 /* Look for matching label start, before continuing */ 482 if (strncmp(label_name, tnls[i].label_prefix, len)) 483 continue; 484 485 /* Make sure this label matches our PF. Note that the PF 486 * character ('0' - '7') will be located where our 487 * prefix string's null terminator is located. 488 */ 489 if ((label_name[len] - '0') == hw->pf_id) { 490 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 491 hw->tnl.tbl[hw->tnl.count].valid = false; 492 hw->tnl.tbl[hw->tnl.count].in_use = false; 493 hw->tnl.tbl[hw->tnl.count].marked = false; 494 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 495 hw->tnl.tbl[hw->tnl.count].port = 0; 496 hw->tnl.count++; 497 break; 498 } 499 } 500 501 label_name = ice_enum_labels(NULL, 0, &state, &val); 502 } 503 504 /* Cache the appropriate boost TCAM entry pointers */ 505 for (i = 0; i < hw->tnl.count; i++) { 506 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, 507 &hw->tnl.tbl[i].boost_entry); 508 if (hw->tnl.tbl[i].boost_entry) 509 hw->tnl.tbl[i].valid = true; 510 } 511 } 512 513 /* Key creation */ 514 515 #define ICE_DC_KEY 0x1 /* don't care */ 516 #define ICE_DC_KEYINV 0x1 517 #define ICE_NM_KEY 0x0 /* never match */ 518 #define ICE_NM_KEYINV 0x0 519 #define ICE_0_KEY 0x1 /* match 0 */ 520 #define ICE_0_KEYINV 0x0 521 #define ICE_1_KEY 0x0 /* match 1 */ 522 #define ICE_1_KEYINV 0x1 523 524 /** 525 * ice_gen_key_word - generate 16-bits of a key/mask word 526 * @val: the value 527 * @valid: valid bits mask (change only the valid bits) 528 * @dont_care: don't care mask 529 * @nvr_mtch: never match mask 530 * @key: pointer to an array of where the resulting key portion 531 * @key_inv: pointer to an array of where the resulting key invert portion 532 * 533 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask 534 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits 535 * of key and 8 bits of key invert. 536 * 537 * '0' = b01, always match a 0 bit 538 * '1' = b10, always match a 1 bit 539 * '?' = b11, don't care bit (always matches) 540 * '~' = b00, never match bit 541 * 542 * Input: 543 * val: b0 1 0 1 0 1 544 * dont_care: b0 0 1 1 0 0 545 * never_mtch: b0 0 0 0 1 1 546 * ------------------------------ 547 * Result: key: b01 10 11 11 00 00 548 */ 549 static enum ice_status 550 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, 551 u8 *key_inv) 552 { 553 u8 in_key = *key, in_key_inv = *key_inv; 554 u8 i; 555 556 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ 557 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) 558 return ICE_ERR_CFG; 559 560 *key = 0; 561 *key_inv = 0; 562 563 /* encode the 8 bits into 8-bit key and 8-bit key invert */ 564 for (i = 0; i < 8; i++) { 565 *key >>= 1; 566 *key_inv >>= 1; 567 568 if (!(valid & 0x1)) { /* change only valid bits */ 569 *key |= (in_key & 0x1) << 7; 570 *key_inv |= (in_key_inv & 0x1) << 7; 571 } else if (dont_care & 0x1) { /* don't care bit */ 572 *key |= ICE_DC_KEY << 7; 573 *key_inv |= ICE_DC_KEYINV << 7; 574 } else if (nvr_mtch & 0x1) { /* never match bit */ 575 *key |= ICE_NM_KEY << 7; 576 *key_inv |= ICE_NM_KEYINV << 7; 577 } else if (val & 0x01) { /* exact 1 match */ 578 *key |= ICE_1_KEY << 7; 579 *key_inv |= ICE_1_KEYINV << 7; 580 } else { /* exact 0 match */ 581 *key |= ICE_0_KEY << 7; 582 *key_inv |= ICE_0_KEYINV << 7; 583 } 584 585 dont_care >>= 1; 586 nvr_mtch >>= 1; 587 valid >>= 1; 588 val >>= 1; 589 in_key >>= 1; 590 in_key_inv >>= 1; 591 } 592 593 return 0; 594 } 595 596 /** 597 * ice_bits_max_set - determine if the number of bits set is within a maximum 598 * @mask: pointer to the byte array which is the mask 599 * @size: the number of bytes in the mask 600 * @max: the max number of set bits 601 * 602 * This function determines if there are at most 'max' number of bits set in an 603 * array. Returns true if the number for bits set is <= max or will return false 604 * otherwise. 605 */ 606 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) 607 { 608 u16 count = 0; 609 u16 i; 610 611 /* check each byte */ 612 for (i = 0; i < size; i++) { 613 /* if 0, go to next byte */ 614 if (!mask[i]) 615 continue; 616 617 /* We know there is at least one set bit in this byte because of 618 * the above check; if we already have found 'max' number of 619 * bits set, then we can return failure now. 620 */ 621 if (count == max) 622 return false; 623 624 /* count the bits in this byte, checking threshold */ 625 count += hweight8(mask[i]); 626 if (count > max) 627 return false; 628 } 629 630 return true; 631 } 632 633 /** 634 * ice_set_key - generate a variable sized key with multiples of 16-bits 635 * @key: pointer to where the key will be stored 636 * @size: the size of the complete key in bytes (must be even) 637 * @val: array of 8-bit values that makes up the value portion of the key 638 * @upd: array of 8-bit masks that determine what key portion to update 639 * @dc: array of 8-bit masks that make up the don't care mask 640 * @nm: array of 8-bit masks that make up the never match mask 641 * @off: the offset of the first byte in the key to update 642 * @len: the number of bytes in the key update 643 * 644 * This function generates a key from a value, a don't care mask and a never 645 * match mask. 646 * upd, dc, and nm are optional parameters, and can be NULL: 647 * upd == NULL --> udp mask is all 1's (update all bits) 648 * dc == NULL --> dc mask is all 0's (no don't care bits) 649 * nm == NULL --> nm mask is all 0's (no never match bits) 650 */ 651 static enum ice_status 652 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, 653 u16 len) 654 { 655 u16 half_size; 656 u16 i; 657 658 /* size must be a multiple of 2 bytes. */ 659 if (size % 2) 660 return ICE_ERR_CFG; 661 662 half_size = size / 2; 663 if (off + len > half_size) 664 return ICE_ERR_CFG; 665 666 /* Make sure at most one bit is set in the never match mask. Having more 667 * than one never match mask bit set will cause HW to consume excessive 668 * power otherwise; this is a power management efficiency check. 669 */ 670 #define ICE_NVR_MTCH_BITS_MAX 1 671 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) 672 return ICE_ERR_CFG; 673 674 for (i = 0; i < len; i++) 675 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, 676 dc ? dc[i] : 0, nm ? nm[i] : 0, 677 key + off + i, key + half_size + off + i)) 678 return ICE_ERR_CFG; 679 680 return 0; 681 } 682 683 /** 684 * ice_acquire_global_cfg_lock 685 * @hw: pointer to the HW structure 686 * @access: access type (read or write) 687 * 688 * This function will request ownership of the global config lock for reading 689 * or writing of the package. When attempting to obtain write access, the 690 * caller must check for the following two return values: 691 * 692 * ICE_SUCCESS - Means the caller has acquired the global config lock 693 * and can perform writing of the package. 694 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the 695 * package or has found that no update was necessary; in 696 * this case, the caller can just skip performing any 697 * update of the package. 698 */ 699 static enum ice_status 700 ice_acquire_global_cfg_lock(struct ice_hw *hw, 701 enum ice_aq_res_access_type access) 702 { 703 enum ice_status status; 704 705 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 706 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 707 708 if (!status) 709 mutex_lock(&ice_global_cfg_lock_sw); 710 else if (status == ICE_ERR_AQ_NO_WORK) 711 ice_debug(hw, ICE_DBG_PKG, 712 "Global config lock: No work to do\n"); 713 714 return status; 715 } 716 717 /** 718 * ice_release_global_cfg_lock 719 * @hw: pointer to the HW structure 720 * 721 * This function will release the global config lock. 722 */ 723 static void ice_release_global_cfg_lock(struct ice_hw *hw) 724 { 725 mutex_unlock(&ice_global_cfg_lock_sw); 726 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 727 } 728 729 /** 730 * ice_acquire_change_lock 731 * @hw: pointer to the HW structure 732 * @access: access type (read or write) 733 * 734 * This function will request ownership of the change lock. 735 */ 736 static enum ice_status 737 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) 738 { 739 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, 740 ICE_CHANGE_LOCK_TIMEOUT); 741 } 742 743 /** 744 * ice_release_change_lock 745 * @hw: pointer to the HW structure 746 * 747 * This function will release the change lock using the proper Admin Command. 748 */ 749 static void ice_release_change_lock(struct ice_hw *hw) 750 { 751 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); 752 } 753 754 /** 755 * ice_aq_download_pkg 756 * @hw: pointer to the hardware structure 757 * @pkg_buf: the package buffer to transfer 758 * @buf_size: the size of the package buffer 759 * @last_buf: last buffer indicator 760 * @error_offset: returns error offset 761 * @error_info: returns error information 762 * @cd: pointer to command details structure or NULL 763 * 764 * Download Package (0x0C40) 765 */ 766 static enum ice_status 767 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 768 u16 buf_size, bool last_buf, u32 *error_offset, 769 u32 *error_info, struct ice_sq_cd *cd) 770 { 771 struct ice_aqc_download_pkg *cmd; 772 struct ice_aq_desc desc; 773 enum ice_status status; 774 775 if (error_offset) 776 *error_offset = 0; 777 if (error_info) 778 *error_info = 0; 779 780 cmd = &desc.params.download_pkg; 781 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 782 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 783 784 if (last_buf) 785 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 786 787 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 788 if (status == ICE_ERR_AQ_ERROR) { 789 /* Read error from buffer only when the FW returned an error */ 790 struct ice_aqc_download_pkg_resp *resp; 791 792 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 793 if (error_offset) 794 *error_offset = le32_to_cpu(resp->error_offset); 795 if (error_info) 796 *error_info = le32_to_cpu(resp->error_info); 797 } 798 799 return status; 800 } 801 802 /** 803 * ice_aq_update_pkg 804 * @hw: pointer to the hardware structure 805 * @pkg_buf: the package cmd buffer 806 * @buf_size: the size of the package cmd buffer 807 * @last_buf: last buffer indicator 808 * @error_offset: returns error offset 809 * @error_info: returns error information 810 * @cd: pointer to command details structure or NULL 811 * 812 * Update Package (0x0C42) 813 */ 814 static enum ice_status 815 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, 816 bool last_buf, u32 *error_offset, u32 *error_info, 817 struct ice_sq_cd *cd) 818 { 819 struct ice_aqc_download_pkg *cmd; 820 struct ice_aq_desc desc; 821 enum ice_status status; 822 823 if (error_offset) 824 *error_offset = 0; 825 if (error_info) 826 *error_info = 0; 827 828 cmd = &desc.params.download_pkg; 829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 830 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 831 832 if (last_buf) 833 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 834 835 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 836 if (status == ICE_ERR_AQ_ERROR) { 837 /* Read error from buffer only when the FW returned an error */ 838 struct ice_aqc_download_pkg_resp *resp; 839 840 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 841 if (error_offset) 842 *error_offset = le32_to_cpu(resp->error_offset); 843 if (error_info) 844 *error_info = le32_to_cpu(resp->error_info); 845 } 846 847 return status; 848 } 849 850 /** 851 * ice_find_seg_in_pkg 852 * @hw: pointer to the hardware structure 853 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 854 * @pkg_hdr: pointer to the package header to be searched 855 * 856 * This function searches a package file for a particular segment type. On 857 * success it returns a pointer to the segment header, otherwise it will 858 * return NULL. 859 */ 860 static struct ice_generic_seg_hdr * 861 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 862 struct ice_pkg_hdr *pkg_hdr) 863 { 864 u32 i; 865 866 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 867 pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, 868 pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); 869 870 /* Search all package segments for the requested segment type */ 871 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 872 struct ice_generic_seg_hdr *seg; 873 874 seg = (struct ice_generic_seg_hdr *) 875 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); 876 877 if (le32_to_cpu(seg->seg_type) == seg_type) 878 return seg; 879 } 880 881 return NULL; 882 } 883 884 /** 885 * ice_update_pkg 886 * @hw: pointer to the hardware structure 887 * @bufs: pointer to an array of buffers 888 * @count: the number of buffers in the array 889 * 890 * Obtains change lock and updates package. 891 */ 892 static enum ice_status 893 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 894 { 895 enum ice_status status; 896 u32 offset, info, i; 897 898 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 899 if (status) 900 return status; 901 902 for (i = 0; i < count; i++) { 903 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 904 bool last = ((i + 1) == count); 905 906 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), 907 last, &offset, &info, NULL); 908 909 if (status) { 910 ice_debug(hw, ICE_DBG_PKG, 911 "Update pkg failed: err %d off %d inf %d\n", 912 status, offset, info); 913 break; 914 } 915 } 916 917 ice_release_change_lock(hw); 918 919 return status; 920 } 921 922 /** 923 * ice_dwnld_cfg_bufs 924 * @hw: pointer to the hardware structure 925 * @bufs: pointer to an array of buffers 926 * @count: the number of buffers in the array 927 * 928 * Obtains global config lock and downloads the package configuration buffers 929 * to the firmware. Metadata buffers are skipped, and the first metadata buffer 930 * found indicates that the rest of the buffers are all metadata buffers. 931 */ 932 static enum ice_status 933 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 934 { 935 enum ice_status status; 936 struct ice_buf_hdr *bh; 937 u32 offset, info, i; 938 939 if (!bufs || !count) 940 return ICE_ERR_PARAM; 941 942 /* If the first buffer's first section has its metadata bit set 943 * then there are no buffers to be downloaded, and the operation is 944 * considered a success. 945 */ 946 bh = (struct ice_buf_hdr *)bufs; 947 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) 948 return 0; 949 950 /* reset pkg_dwnld_status in case this function is called in the 951 * reset/rebuild flow 952 */ 953 hw->pkg_dwnld_status = ICE_AQ_RC_OK; 954 955 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 956 if (status) { 957 if (status == ICE_ERR_AQ_NO_WORK) 958 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; 959 else 960 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 961 return status; 962 } 963 964 for (i = 0; i < count; i++) { 965 bool last = ((i + 1) == count); 966 967 if (!last) { 968 /* check next buffer for metadata flag */ 969 bh = (struct ice_buf_hdr *)(bufs + i + 1); 970 971 /* A set metadata flag in the next buffer will signal 972 * that the current buffer will be the last buffer 973 * downloaded 974 */ 975 if (le16_to_cpu(bh->section_count)) 976 if (le32_to_cpu(bh->section_entry[0].type) & 977 ICE_METADATA_BUF) 978 last = true; 979 } 980 981 bh = (struct ice_buf_hdr *)(bufs + i); 982 983 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 984 &offset, &info, NULL); 985 986 /* Save AQ status from download package */ 987 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 988 if (status) { 989 ice_debug(hw, ICE_DBG_PKG, 990 "Pkg download failed: err %d off %d inf %d\n", 991 status, offset, info); 992 993 break; 994 } 995 996 if (last) 997 break; 998 } 999 1000 ice_release_global_cfg_lock(hw); 1001 1002 return status; 1003 } 1004 1005 /** 1006 * ice_aq_get_pkg_info_list 1007 * @hw: pointer to the hardware structure 1008 * @pkg_info: the buffer which will receive the information list 1009 * @buf_size: the size of the pkg_info information buffer 1010 * @cd: pointer to command details structure or NULL 1011 * 1012 * Get Package Info List (0x0C43) 1013 */ 1014 static enum ice_status 1015 ice_aq_get_pkg_info_list(struct ice_hw *hw, 1016 struct ice_aqc_get_pkg_info_resp *pkg_info, 1017 u16 buf_size, struct ice_sq_cd *cd) 1018 { 1019 struct ice_aq_desc desc; 1020 1021 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 1022 1023 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 1024 } 1025 1026 /** 1027 * ice_download_pkg 1028 * @hw: pointer to the hardware structure 1029 * @ice_seg: pointer to the segment of the package to be downloaded 1030 * 1031 * Handles the download of a complete package. 1032 */ 1033 static enum ice_status 1034 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) 1035 { 1036 struct ice_buf_table *ice_buf_tbl; 1037 1038 ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", 1039 ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, 1040 ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); 1041 1042 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1043 le32_to_cpu(ice_seg->hdr.seg_type), 1044 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); 1045 1046 ice_buf_tbl = ice_find_buf_table(ice_seg); 1047 1048 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 1049 le32_to_cpu(ice_buf_tbl->buf_count)); 1050 1051 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 1052 le32_to_cpu(ice_buf_tbl->buf_count)); 1053 } 1054 1055 /** 1056 * ice_init_pkg_info 1057 * @hw: pointer to the hardware structure 1058 * @pkg_hdr: pointer to the driver's package hdr 1059 * 1060 * Saves off the package details into the HW structure. 1061 */ 1062 static enum ice_status 1063 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1064 { 1065 struct ice_global_metadata_seg *meta_seg; 1066 struct ice_generic_seg_hdr *seg_hdr; 1067 1068 if (!pkg_hdr) 1069 return ICE_ERR_PARAM; 1070 1071 meta_seg = (struct ice_global_metadata_seg *) 1072 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); 1073 if (meta_seg) { 1074 hw->pkg_ver = meta_seg->pkg_ver; 1075 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); 1076 1077 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 1078 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, 1079 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, 1080 meta_seg->pkg_name); 1081 } else { 1082 ice_debug(hw, ICE_DBG_INIT, 1083 "Did not find metadata segment in driver package\n"); 1084 return ICE_ERR_CFG; 1085 } 1086 1087 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); 1088 if (seg_hdr) { 1089 hw->ice_pkg_ver = seg_hdr->seg_ver; 1090 memcpy(hw->ice_pkg_name, seg_hdr->seg_name, 1091 sizeof(hw->ice_pkg_name)); 1092 1093 ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", 1094 seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, 1095 seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, 1096 seg_hdr->seg_name); 1097 } else { 1098 ice_debug(hw, ICE_DBG_INIT, 1099 "Did not find ice segment in driver package\n"); 1100 return ICE_ERR_CFG; 1101 } 1102 1103 return 0; 1104 } 1105 1106 /** 1107 * ice_get_pkg_info 1108 * @hw: pointer to the hardware structure 1109 * 1110 * Store details of the package currently loaded in HW into the HW structure. 1111 */ 1112 static enum ice_status ice_get_pkg_info(struct ice_hw *hw) 1113 { 1114 struct ice_aqc_get_pkg_info_resp *pkg_info; 1115 enum ice_status status; 1116 u16 size; 1117 u32 i; 1118 1119 size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * 1120 (ICE_PKG_CNT - 1)); 1121 pkg_info = kzalloc(size, GFP_KERNEL); 1122 if (!pkg_info) 1123 return ICE_ERR_NO_MEMORY; 1124 1125 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); 1126 if (status) 1127 goto init_pkg_free_alloc; 1128 1129 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { 1130 #define ICE_PKG_FLAG_COUNT 4 1131 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 1132 u8 place = 0; 1133 1134 if (pkg_info->pkg_info[i].is_active) { 1135 flags[place++] = 'A'; 1136 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 1137 memcpy(hw->active_pkg_name, 1138 pkg_info->pkg_info[i].name, 1139 sizeof(hw->active_pkg_name)); 1140 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 1141 } 1142 if (pkg_info->pkg_info[i].is_active_at_boot) 1143 flags[place++] = 'B'; 1144 if (pkg_info->pkg_info[i].is_modified) 1145 flags[place++] = 'M'; 1146 if (pkg_info->pkg_info[i].is_in_nvm) 1147 flags[place++] = 'N'; 1148 1149 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", 1150 i, pkg_info->pkg_info[i].ver.major, 1151 pkg_info->pkg_info[i].ver.minor, 1152 pkg_info->pkg_info[i].ver.update, 1153 pkg_info->pkg_info[i].ver.draft, 1154 pkg_info->pkg_info[i].name, flags); 1155 } 1156 1157 init_pkg_free_alloc: 1158 kfree(pkg_info); 1159 1160 return status; 1161 } 1162 1163 /** 1164 * ice_verify_pkg - verify package 1165 * @pkg: pointer to the package buffer 1166 * @len: size of the package buffer 1167 * 1168 * Verifies various attributes of the package file, including length, format 1169 * version, and the requirement of at least one segment. 1170 */ 1171 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 1172 { 1173 u32 seg_count; 1174 u32 i; 1175 1176 if (len < sizeof(*pkg)) 1177 return ICE_ERR_BUF_TOO_SHORT; 1178 1179 if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || 1180 pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || 1181 pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || 1182 pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) 1183 return ICE_ERR_CFG; 1184 1185 /* pkg must have at least one segment */ 1186 seg_count = le32_to_cpu(pkg->seg_count); 1187 if (seg_count < 1) 1188 return ICE_ERR_CFG; 1189 1190 /* make sure segment array fits in package length */ 1191 if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) 1192 return ICE_ERR_BUF_TOO_SHORT; 1193 1194 /* all segments must fit within length */ 1195 for (i = 0; i < seg_count; i++) { 1196 u32 off = le32_to_cpu(pkg->seg_offset[i]); 1197 struct ice_generic_seg_hdr *seg; 1198 1199 /* segment header must fit */ 1200 if (len < off + sizeof(*seg)) 1201 return ICE_ERR_BUF_TOO_SHORT; 1202 1203 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); 1204 1205 /* segment body must fit */ 1206 if (len < off + le32_to_cpu(seg->seg_size)) 1207 return ICE_ERR_BUF_TOO_SHORT; 1208 } 1209 1210 return 0; 1211 } 1212 1213 /** 1214 * ice_free_seg - free package segment pointer 1215 * @hw: pointer to the hardware structure 1216 * 1217 * Frees the package segment pointer in the proper manner, depending on if the 1218 * segment was allocated or just the passed in pointer was stored. 1219 */ 1220 void ice_free_seg(struct ice_hw *hw) 1221 { 1222 if (hw->pkg_copy) { 1223 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); 1224 hw->pkg_copy = NULL; 1225 hw->pkg_size = 0; 1226 } 1227 hw->seg = NULL; 1228 } 1229 1230 /** 1231 * ice_init_pkg_regs - initialize additional package registers 1232 * @hw: pointer to the hardware structure 1233 */ 1234 static void ice_init_pkg_regs(struct ice_hw *hw) 1235 { 1236 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 1237 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 1238 #define ICE_SW_BLK_IDX 0 1239 1240 /* setup Switch block input mask, which is 48-bits in two parts */ 1241 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 1242 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 1243 } 1244 1245 /** 1246 * ice_chk_pkg_version - check package version for compatibility with driver 1247 * @pkg_ver: pointer to a version structure to check 1248 * 1249 * Check to make sure that the package about to be downloaded is compatible with 1250 * the driver. To be compatible, the major and minor components of the package 1251 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 1252 * definitions. 1253 */ 1254 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 1255 { 1256 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || 1257 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) 1258 return ICE_ERR_NOT_SUPPORTED; 1259 1260 return 0; 1261 } 1262 1263 /** 1264 * ice_init_pkg - initialize/download package 1265 * @hw: pointer to the hardware structure 1266 * @buf: pointer to the package buffer 1267 * @len: size of the package buffer 1268 * 1269 * This function initializes a package. The package contains HW tables 1270 * required to do packet processing. First, the function extracts package 1271 * information such as version. Then it finds the ice configuration segment 1272 * within the package; this function then saves a copy of the segment pointer 1273 * within the supplied package buffer. Next, the function will cache any hints 1274 * from the package, followed by downloading the package itself. Note, that if 1275 * a previous PF driver has already downloaded the package successfully, then 1276 * the current driver will not have to download the package again. 1277 * 1278 * The local package contents will be used to query default behavior and to 1279 * update specific sections of the HW's version of the package (e.g. to update 1280 * the parse graph to understand new protocols). 1281 * 1282 * This function stores a pointer to the package buffer memory, and it is 1283 * expected that the supplied buffer will not be freed immediately. If the 1284 * package buffer needs to be freed, such as when read from a file, use 1285 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 1286 * case. 1287 */ 1288 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 1289 { 1290 struct ice_pkg_hdr *pkg; 1291 enum ice_status status; 1292 struct ice_seg *seg; 1293 1294 if (!buf || !len) 1295 return ICE_ERR_PARAM; 1296 1297 pkg = (struct ice_pkg_hdr *)buf; 1298 status = ice_verify_pkg(pkg, len); 1299 if (status) { 1300 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 1301 status); 1302 return status; 1303 } 1304 1305 /* initialize package info */ 1306 status = ice_init_pkg_info(hw, pkg); 1307 if (status) 1308 return status; 1309 1310 /* before downloading the package, check package version for 1311 * compatibility with driver 1312 */ 1313 status = ice_chk_pkg_version(&hw->pkg_ver); 1314 if (status) 1315 return status; 1316 1317 /* find segment in given package */ 1318 seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); 1319 if (!seg) { 1320 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 1321 return ICE_ERR_CFG; 1322 } 1323 1324 /* initialize package hints and then download package */ 1325 ice_init_pkg_hints(hw, seg); 1326 status = ice_download_pkg(hw, seg); 1327 if (status == ICE_ERR_AQ_NO_WORK) { 1328 ice_debug(hw, ICE_DBG_INIT, 1329 "package previously loaded - no work.\n"); 1330 status = 0; 1331 } 1332 1333 /* Get information on the package currently loaded in HW, then make sure 1334 * the driver is compatible with this version. 1335 */ 1336 if (!status) { 1337 status = ice_get_pkg_info(hw); 1338 if (!status) 1339 status = ice_chk_pkg_version(&hw->active_pkg_ver); 1340 } 1341 1342 if (!status) { 1343 hw->seg = seg; 1344 /* on successful package download update other required 1345 * registers to support the package and fill HW tables 1346 * with package content. 1347 */ 1348 ice_init_pkg_regs(hw); 1349 ice_fill_blk_tbls(hw); 1350 } else { 1351 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", 1352 status); 1353 } 1354 1355 return status; 1356 } 1357 1358 /** 1359 * ice_copy_and_init_pkg - initialize/download a copy of the package 1360 * @hw: pointer to the hardware structure 1361 * @buf: pointer to the package buffer 1362 * @len: size of the package buffer 1363 * 1364 * This function copies the package buffer, and then calls ice_init_pkg() to 1365 * initialize the copied package contents. 1366 * 1367 * The copying is necessary if the package buffer supplied is constant, or if 1368 * the memory may disappear shortly after calling this function. 1369 * 1370 * If the package buffer resides in the data segment and can be modified, the 1371 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 1372 * 1373 * However, if the package buffer needs to be copied first, such as when being 1374 * read from a file, the caller should use ice_copy_and_init_pkg(). 1375 * 1376 * This function will first copy the package buffer, before calling 1377 * ice_init_pkg(). The caller is free to immediately destroy the original 1378 * package buffer, as the new copy will be managed by this function and 1379 * related routines. 1380 */ 1381 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) 1382 { 1383 enum ice_status status; 1384 u8 *buf_copy; 1385 1386 if (!buf || !len) 1387 return ICE_ERR_PARAM; 1388 1389 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); 1390 1391 status = ice_init_pkg(hw, buf_copy, len); 1392 if (status) { 1393 /* Free the copy, since we failed to initialize the package */ 1394 devm_kfree(ice_hw_to_dev(hw), buf_copy); 1395 } else { 1396 /* Track the copied pkg so we can free it later */ 1397 hw->pkg_copy = buf_copy; 1398 hw->pkg_size = len; 1399 } 1400 1401 return status; 1402 } 1403 1404 /** 1405 * ice_pkg_buf_alloc 1406 * @hw: pointer to the HW structure 1407 * 1408 * Allocates a package buffer and returns a pointer to the buffer header. 1409 * Note: all package contents must be in Little Endian form. 1410 */ 1411 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 1412 { 1413 struct ice_buf_build *bld; 1414 struct ice_buf_hdr *buf; 1415 1416 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); 1417 if (!bld) 1418 return NULL; 1419 1420 buf = (struct ice_buf_hdr *)bld; 1421 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, 1422 section_entry)); 1423 return bld; 1424 } 1425 1426 /** 1427 * ice_pkg_buf_free 1428 * @hw: pointer to the HW structure 1429 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1430 * 1431 * Frees a package buffer 1432 */ 1433 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 1434 { 1435 devm_kfree(ice_hw_to_dev(hw), bld); 1436 } 1437 1438 /** 1439 * ice_pkg_buf_reserve_section 1440 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1441 * @count: the number of sections to reserve 1442 * 1443 * Reserves one or more section table entries in a package buffer. This routine 1444 * can be called multiple times as long as they are made before calling 1445 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 1446 * is called once, the number of sections that can be allocated will not be able 1447 * to be increased; not using all reserved sections is fine, but this will 1448 * result in some wasted space in the buffer. 1449 * Note: all package contents must be in Little Endian form. 1450 */ 1451 static enum ice_status 1452 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 1453 { 1454 struct ice_buf_hdr *buf; 1455 u16 section_count; 1456 u16 data_end; 1457 1458 if (!bld) 1459 return ICE_ERR_PARAM; 1460 1461 buf = (struct ice_buf_hdr *)&bld->buf; 1462 1463 /* already an active section, can't increase table size */ 1464 section_count = le16_to_cpu(buf->section_count); 1465 if (section_count > 0) 1466 return ICE_ERR_CFG; 1467 1468 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 1469 return ICE_ERR_CFG; 1470 bld->reserved_section_table_entries += count; 1471 1472 data_end = le16_to_cpu(buf->data_end) + 1473 (count * sizeof(buf->section_entry[0])); 1474 buf->data_end = cpu_to_le16(data_end); 1475 1476 return 0; 1477 } 1478 1479 /** 1480 * ice_pkg_buf_alloc_section 1481 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1482 * @type: the section type value 1483 * @size: the size of the section to reserve (in bytes) 1484 * 1485 * Reserves memory in the buffer for a section's content and updates the 1486 * buffers' status accordingly. This routine returns a pointer to the first 1487 * byte of the section start within the buffer, which is used to fill in the 1488 * section contents. 1489 * Note: all package contents must be in Little Endian form. 1490 */ 1491 static void * 1492 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 1493 { 1494 struct ice_buf_hdr *buf; 1495 u16 sect_count; 1496 u16 data_end; 1497 1498 if (!bld || !type || !size) 1499 return NULL; 1500 1501 buf = (struct ice_buf_hdr *)&bld->buf; 1502 1503 /* check for enough space left in buffer */ 1504 data_end = le16_to_cpu(buf->data_end); 1505 1506 /* section start must align on 4 byte boundary */ 1507 data_end = ALIGN(data_end, 4); 1508 1509 if ((data_end + size) > ICE_MAX_S_DATA_END) 1510 return NULL; 1511 1512 /* check for more available section table entries */ 1513 sect_count = le16_to_cpu(buf->section_count); 1514 if (sect_count < bld->reserved_section_table_entries) { 1515 void *section_ptr = ((u8 *)buf) + data_end; 1516 1517 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); 1518 buf->section_entry[sect_count].size = cpu_to_le16(size); 1519 buf->section_entry[sect_count].type = cpu_to_le32(type); 1520 1521 data_end += size; 1522 buf->data_end = cpu_to_le16(data_end); 1523 1524 buf->section_count = cpu_to_le16(sect_count + 1); 1525 return section_ptr; 1526 } 1527 1528 /* no free section table entries */ 1529 return NULL; 1530 } 1531 1532 /** 1533 * ice_pkg_buf_get_active_sections 1534 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1535 * 1536 * Returns the number of active sections. Before using the package buffer 1537 * in an update package command, the caller should make sure that there is at 1538 * least one active section - otherwise, the buffer is not legal and should 1539 * not be used. 1540 * Note: all package contents must be in Little Endian form. 1541 */ 1542 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 1543 { 1544 struct ice_buf_hdr *buf; 1545 1546 if (!bld) 1547 return 0; 1548 1549 buf = (struct ice_buf_hdr *)&bld->buf; 1550 return le16_to_cpu(buf->section_count); 1551 } 1552 1553 /** 1554 * ice_pkg_buf 1555 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1556 * 1557 * Return a pointer to the buffer's header 1558 */ 1559 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 1560 { 1561 if (!bld) 1562 return NULL; 1563 1564 return &bld->buf; 1565 } 1566 1567 /** 1568 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage 1569 * @hw: pointer to the HW structure 1570 * @port: port to search for 1571 * @index: optionally returns index 1572 * 1573 * Returns whether a port is already in use as a tunnel, and optionally its 1574 * index 1575 */ 1576 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) 1577 { 1578 u16 i; 1579 1580 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 1581 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 1582 if (index) 1583 *index = i; 1584 return true; 1585 } 1586 1587 return false; 1588 } 1589 1590 /** 1591 * ice_tunnel_port_in_use 1592 * @hw: pointer to the HW structure 1593 * @port: port to search for 1594 * @index: optionally returns index 1595 * 1596 * Returns whether a port is already in use as a tunnel, and optionally its 1597 * index 1598 */ 1599 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) 1600 { 1601 bool res; 1602 1603 mutex_lock(&hw->tnl_lock); 1604 res = ice_tunnel_port_in_use_hlpr(hw, port, index); 1605 mutex_unlock(&hw->tnl_lock); 1606 1607 return res; 1608 } 1609 1610 /** 1611 * ice_find_free_tunnel_entry 1612 * @hw: pointer to the HW structure 1613 * @type: tunnel type 1614 * @index: optionally returns index 1615 * 1616 * Returns whether there is a free tunnel entry, and optionally its index 1617 */ 1618 static bool 1619 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, 1620 u16 *index) 1621 { 1622 u16 i; 1623 1624 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 1625 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && 1626 hw->tnl.tbl[i].type == type) { 1627 if (index) 1628 *index = i; 1629 return true; 1630 } 1631 1632 return false; 1633 } 1634 1635 /** 1636 * ice_create_tunnel 1637 * @hw: pointer to the HW structure 1638 * @type: type of tunnel 1639 * @port: port of tunnel to create 1640 * 1641 * Create a tunnel by updating the parse graph in the parser. We do that by 1642 * creating a package buffer with the tunnel info and issuing an update package 1643 * command. 1644 */ 1645 enum ice_status 1646 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) 1647 { 1648 struct ice_boost_tcam_section *sect_rx, *sect_tx; 1649 enum ice_status status = ICE_ERR_MAX_LIMIT; 1650 struct ice_buf_build *bld; 1651 u16 index; 1652 1653 mutex_lock(&hw->tnl_lock); 1654 1655 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { 1656 hw->tnl.tbl[index].ref++; 1657 status = 0; 1658 goto ice_create_tunnel_end; 1659 } 1660 1661 if (!ice_find_free_tunnel_entry(hw, type, &index)) { 1662 status = ICE_ERR_OUT_OF_RANGE; 1663 goto ice_create_tunnel_end; 1664 } 1665 1666 bld = ice_pkg_buf_alloc(hw); 1667 if (!bld) { 1668 status = ICE_ERR_NO_MEMORY; 1669 goto ice_create_tunnel_end; 1670 } 1671 1672 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 1673 if (ice_pkg_buf_reserve_section(bld, 2)) 1674 goto ice_create_tunnel_err; 1675 1676 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 1677 sizeof(*sect_rx)); 1678 if (!sect_rx) 1679 goto ice_create_tunnel_err; 1680 sect_rx->count = cpu_to_le16(1); 1681 1682 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 1683 sizeof(*sect_tx)); 1684 if (!sect_tx) 1685 goto ice_create_tunnel_err; 1686 sect_tx->count = cpu_to_le16(1); 1687 1688 /* copy original boost entry to update package buffer */ 1689 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, 1690 sizeof(*sect_rx->tcam)); 1691 1692 /* over-write the never-match dest port key bits with the encoded port 1693 * bits 1694 */ 1695 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), 1696 (u8 *)&port, NULL, NULL, NULL, 1697 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), 1698 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); 1699 1700 /* exact copy of entry to Tx section entry */ 1701 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam)); 1702 1703 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 1704 if (!status) { 1705 hw->tnl.tbl[index].port = port; 1706 hw->tnl.tbl[index].in_use = true; 1707 hw->tnl.tbl[index].ref = 1; 1708 } 1709 1710 ice_create_tunnel_err: 1711 ice_pkg_buf_free(hw, bld); 1712 1713 ice_create_tunnel_end: 1714 mutex_unlock(&hw->tnl_lock); 1715 1716 return status; 1717 } 1718 1719 /** 1720 * ice_destroy_tunnel 1721 * @hw: pointer to the HW structure 1722 * @port: port of tunnel to destroy (ignored if the all parameter is true) 1723 * @all: flag that states to destroy all tunnels 1724 * 1725 * Destroys a tunnel or all tunnels by creating an update package buffer 1726 * targeting the specific updates requested and then performing an update 1727 * package. 1728 */ 1729 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) 1730 { 1731 struct ice_boost_tcam_section *sect_rx, *sect_tx; 1732 enum ice_status status = ICE_ERR_MAX_LIMIT; 1733 struct ice_buf_build *bld; 1734 u16 count = 0; 1735 u16 index; 1736 u16 size; 1737 u16 i; 1738 1739 mutex_lock(&hw->tnl_lock); 1740 1741 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) 1742 if (hw->tnl.tbl[index].ref > 1) { 1743 hw->tnl.tbl[index].ref--; 1744 status = 0; 1745 goto ice_destroy_tunnel_end; 1746 } 1747 1748 /* determine count */ 1749 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 1750 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 1751 (all || hw->tnl.tbl[i].port == port)) 1752 count++; 1753 1754 if (!count) { 1755 status = ICE_ERR_PARAM; 1756 goto ice_destroy_tunnel_end; 1757 } 1758 1759 /* size of section - there is at least one entry */ 1760 size = struct_size(sect_rx, tcam, count - 1); 1761 1762 bld = ice_pkg_buf_alloc(hw); 1763 if (!bld) { 1764 status = ICE_ERR_NO_MEMORY; 1765 goto ice_destroy_tunnel_end; 1766 } 1767 1768 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 1769 if (ice_pkg_buf_reserve_section(bld, 2)) 1770 goto ice_destroy_tunnel_err; 1771 1772 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 1773 size); 1774 if (!sect_rx) 1775 goto ice_destroy_tunnel_err; 1776 sect_rx->count = cpu_to_le16(1); 1777 1778 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 1779 size); 1780 if (!sect_tx) 1781 goto ice_destroy_tunnel_err; 1782 sect_tx->count = cpu_to_le16(1); 1783 1784 /* copy original boost entry to update package buffer, one copy to Rx 1785 * section, another copy to the Tx section 1786 */ 1787 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 1788 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 1789 (all || hw->tnl.tbl[i].port == port)) { 1790 memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry, 1791 sizeof(*sect_rx->tcam)); 1792 memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry, 1793 sizeof(*sect_tx->tcam)); 1794 hw->tnl.tbl[i].marked = true; 1795 } 1796 1797 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 1798 if (!status) 1799 for (i = 0; i < hw->tnl.count && 1800 i < ICE_TUNNEL_MAX_ENTRIES; i++) 1801 if (hw->tnl.tbl[i].marked) { 1802 hw->tnl.tbl[i].ref = 0; 1803 hw->tnl.tbl[i].port = 0; 1804 hw->tnl.tbl[i].in_use = false; 1805 hw->tnl.tbl[i].marked = false; 1806 } 1807 1808 ice_destroy_tunnel_err: 1809 ice_pkg_buf_free(hw, bld); 1810 1811 ice_destroy_tunnel_end: 1812 mutex_unlock(&hw->tnl_lock); 1813 1814 return status; 1815 } 1816 1817 /* PTG Management */ 1818 1819 /** 1820 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) 1821 * @hw: pointer to the hardware structure 1822 * @blk: HW block 1823 * @ptype: the ptype to search for 1824 * @ptg: pointer to variable that receives the PTG 1825 * 1826 * This function will search the PTGs for a particular ptype, returning the 1827 * PTG ID that contains it through the PTG parameter, with the value of 1828 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. 1829 */ 1830 static enum ice_status 1831 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) 1832 { 1833 if (ptype >= ICE_XLT1_CNT || !ptg) 1834 return ICE_ERR_PARAM; 1835 1836 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; 1837 return 0; 1838 } 1839 1840 /** 1841 * ice_ptg_alloc_val - Allocates a new packet type group ID by value 1842 * @hw: pointer to the hardware structure 1843 * @blk: HW block 1844 * @ptg: the PTG to allocate 1845 * 1846 * This function allocates a given packet type group ID specified by the PTG 1847 * parameter. 1848 */ 1849 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) 1850 { 1851 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; 1852 } 1853 1854 /** 1855 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group 1856 * @hw: pointer to the hardware structure 1857 * @blk: HW block 1858 * @ptype: the ptype to remove 1859 * @ptg: the PTG to remove the ptype from 1860 * 1861 * This function will remove the ptype from the specific PTG, and move it to 1862 * the default PTG (ICE_DEFAULT_PTG). 1863 */ 1864 static enum ice_status 1865 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 1866 { 1867 struct ice_ptg_ptype **ch; 1868 struct ice_ptg_ptype *p; 1869 1870 if (ptype > ICE_XLT1_CNT - 1) 1871 return ICE_ERR_PARAM; 1872 1873 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) 1874 return ICE_ERR_DOES_NOT_EXIST; 1875 1876 /* Should not happen if .in_use is set, bad config */ 1877 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) 1878 return ICE_ERR_CFG; 1879 1880 /* find the ptype within this PTG, and bypass the link over it */ 1881 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 1882 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 1883 while (p) { 1884 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { 1885 *ch = p->next_ptype; 1886 break; 1887 } 1888 1889 ch = &p->next_ptype; 1890 p = p->next_ptype; 1891 } 1892 1893 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; 1894 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; 1895 1896 return 0; 1897 } 1898 1899 /** 1900 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group 1901 * @hw: pointer to the hardware structure 1902 * @blk: HW block 1903 * @ptype: the ptype to add or move 1904 * @ptg: the PTG to add or move the ptype to 1905 * 1906 * This function will either add or move a ptype to a particular PTG depending 1907 * on if the ptype is already part of another group. Note that using a 1908 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the 1909 * default PTG. 1910 */ 1911 static enum ice_status 1912 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 1913 { 1914 enum ice_status status; 1915 u8 original_ptg; 1916 1917 if (ptype > ICE_XLT1_CNT - 1) 1918 return ICE_ERR_PARAM; 1919 1920 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) 1921 return ICE_ERR_DOES_NOT_EXIST; 1922 1923 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); 1924 if (status) 1925 return status; 1926 1927 /* Is ptype already in the correct PTG? */ 1928 if (original_ptg == ptg) 1929 return 0; 1930 1931 /* Remove from original PTG and move back to the default PTG */ 1932 if (original_ptg != ICE_DEFAULT_PTG) 1933 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); 1934 1935 /* Moving to default PTG? Then we're done with this request */ 1936 if (ptg == ICE_DEFAULT_PTG) 1937 return 0; 1938 1939 /* Add ptype to PTG at beginning of list */ 1940 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = 1941 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 1942 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = 1943 &hw->blk[blk].xlt1.ptypes[ptype]; 1944 1945 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; 1946 hw->blk[blk].xlt1.t[ptype] = ptg; 1947 1948 return 0; 1949 } 1950 1951 /* Block / table size info */ 1952 struct ice_blk_size_details { 1953 u16 xlt1; /* # XLT1 entries */ 1954 u16 xlt2; /* # XLT2 entries */ 1955 u16 prof_tcam; /* # profile ID TCAM entries */ 1956 u16 prof_id; /* # profile IDs */ 1957 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ 1958 u16 prof_redir; /* # profile redirection entries */ 1959 u16 es; /* # extraction sequence entries */ 1960 u16 fvw; /* # field vector words */ 1961 u8 overwrite; /* overwrite existing entries allowed */ 1962 u8 reverse; /* reverse FV order */ 1963 }; 1964 1965 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { 1966 /** 1967 * Table Definitions 1968 * XLT1 - Number of entries in XLT1 table 1969 * XLT2 - Number of entries in XLT2 table 1970 * TCAM - Number of entries Profile ID TCAM table 1971 * CDID - Control Domain ID of the hardware block 1972 * PRED - Number of entries in the Profile Redirection Table 1973 * FV - Number of entries in the Field Vector 1974 * FVW - Width (in WORDs) of the Field Vector 1975 * OVR - Overwrite existing table entries 1976 * REV - Reverse FV 1977 */ 1978 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ 1979 /* Overwrite , Reverse FV */ 1980 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, 1981 false, false }, 1982 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, 1983 false, false }, 1984 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 1985 false, true }, 1986 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 1987 true, true }, 1988 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, 1989 false, false }, 1990 }; 1991 1992 enum ice_sid_all { 1993 ICE_SID_XLT1_OFF = 0, 1994 ICE_SID_XLT2_OFF, 1995 ICE_SID_PR_OFF, 1996 ICE_SID_PR_REDIR_OFF, 1997 ICE_SID_ES_OFF, 1998 ICE_SID_OFF_COUNT, 1999 }; 2000 2001 /* Characteristic handling */ 2002 2003 /** 2004 * ice_match_prop_lst - determine if properties of two lists match 2005 * @list1: first properties list 2006 * @list2: second properties list 2007 * 2008 * Count, cookies and the order must match in order to be considered equivalent. 2009 */ 2010 static bool 2011 ice_match_prop_lst(struct list_head *list1, struct list_head *list2) 2012 { 2013 struct ice_vsig_prof *tmp1; 2014 struct ice_vsig_prof *tmp2; 2015 u16 chk_count = 0; 2016 u16 count = 0; 2017 2018 /* compare counts */ 2019 list_for_each_entry(tmp1, list1, list) 2020 count++; 2021 list_for_each_entry(tmp2, list2, list) 2022 chk_count++; 2023 if (!count || count != chk_count) 2024 return false; 2025 2026 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list); 2027 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list); 2028 2029 /* profile cookies must compare, and in the exact same order to take 2030 * into account priority 2031 */ 2032 while (count--) { 2033 if (tmp2->profile_cookie != tmp1->profile_cookie) 2034 return false; 2035 2036 tmp1 = list_next_entry(tmp1, list); 2037 tmp2 = list_next_entry(tmp2, list); 2038 } 2039 2040 return true; 2041 } 2042 2043 /* VSIG Management */ 2044 2045 /** 2046 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI 2047 * @hw: pointer to the hardware structure 2048 * @blk: HW block 2049 * @vsi: VSI of interest 2050 * @vsig: pointer to receive the VSI group 2051 * 2052 * This function will lookup the VSI entry in the XLT2 list and return 2053 * the VSI group its associated with. 2054 */ 2055 static enum ice_status 2056 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) 2057 { 2058 if (!vsig || vsi >= ICE_MAX_VSI) 2059 return ICE_ERR_PARAM; 2060 2061 /* As long as there's a default or valid VSIG associated with the input 2062 * VSI, the functions returns a success. Any handling of VSIG will be 2063 * done by the following add, update or remove functions. 2064 */ 2065 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 2066 2067 return 0; 2068 } 2069 2070 /** 2071 * ice_vsig_alloc_val - allocate a new VSIG by value 2072 * @hw: pointer to the hardware structure 2073 * @blk: HW block 2074 * @vsig: the VSIG to allocate 2075 * 2076 * This function will allocate a given VSIG specified by the VSIG parameter. 2077 */ 2078 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2079 { 2080 u16 idx = vsig & ICE_VSIG_IDX_M; 2081 2082 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { 2083 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 2084 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; 2085 } 2086 2087 return ICE_VSIG_VALUE(idx, hw->pf_id); 2088 } 2089 2090 /** 2091 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG 2092 * @hw: pointer to the hardware structure 2093 * @blk: HW block 2094 * 2095 * This function will iterate through the VSIG list and mark the first 2096 * unused entry for the new VSIG entry as used and return that value. 2097 */ 2098 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) 2099 { 2100 u16 i; 2101 2102 for (i = 1; i < ICE_MAX_VSIGS; i++) 2103 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) 2104 return ice_vsig_alloc_val(hw, blk, i); 2105 2106 return ICE_DEFAULT_VSIG; 2107 } 2108 2109 /** 2110 * ice_find_dup_props_vsig - find VSI group with a specified set of properties 2111 * @hw: pointer to the hardware structure 2112 * @blk: HW block 2113 * @chs: characteristic list 2114 * @vsig: returns the VSIG with the matching profiles, if found 2115 * 2116 * Each VSIG is associated with a characteristic set; i.e. all VSIs under 2117 * a group have the same characteristic set. To check if there exists a VSIG 2118 * which has the same characteristics as the input characteristics; this 2119 * function will iterate through the XLT2 list and return the VSIG that has a 2120 * matching configuration. In order to make sure that priorities are accounted 2121 * for, the list must match exactly, including the order in which the 2122 * characteristics are listed. 2123 */ 2124 static enum ice_status 2125 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, 2126 struct list_head *chs, u16 *vsig) 2127 { 2128 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; 2129 u16 i; 2130 2131 for (i = 0; i < xlt2->count; i++) 2132 if (xlt2->vsig_tbl[i].in_use && 2133 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { 2134 *vsig = ICE_VSIG_VALUE(i, hw->pf_id); 2135 return 0; 2136 } 2137 2138 return ICE_ERR_DOES_NOT_EXIST; 2139 } 2140 2141 /** 2142 * ice_vsig_free - free VSI group 2143 * @hw: pointer to the hardware structure 2144 * @blk: HW block 2145 * @vsig: VSIG to remove 2146 * 2147 * The function will remove all VSIs associated with the input VSIG and move 2148 * them to the DEFAULT_VSIG and mark the VSIG available. 2149 */ 2150 static enum ice_status 2151 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2152 { 2153 struct ice_vsig_prof *dtmp, *del; 2154 struct ice_vsig_vsi *vsi_cur; 2155 u16 idx; 2156 2157 idx = vsig & ICE_VSIG_IDX_M; 2158 if (idx >= ICE_MAX_VSIGS) 2159 return ICE_ERR_PARAM; 2160 2161 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 2162 return ICE_ERR_DOES_NOT_EXIST; 2163 2164 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; 2165 2166 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2167 /* If the VSIG has at least 1 VSI then iterate through the 2168 * list and remove the VSIs before deleting the group. 2169 */ 2170 if (vsi_cur) { 2171 /* remove all vsis associated with this VSIG XLT2 entry */ 2172 do { 2173 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 2174 2175 vsi_cur->vsig = ICE_DEFAULT_VSIG; 2176 vsi_cur->changed = 1; 2177 vsi_cur->next_vsi = NULL; 2178 vsi_cur = tmp; 2179 } while (vsi_cur); 2180 2181 /* NULL terminate head of VSI list */ 2182 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; 2183 } 2184 2185 /* free characteristic list */ 2186 list_for_each_entry_safe(del, dtmp, 2187 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2188 list) { 2189 list_del(&del->list); 2190 devm_kfree(ice_hw_to_dev(hw), del); 2191 } 2192 2193 /* if VSIG characteristic list was cleared for reset 2194 * re-initialize the list head 2195 */ 2196 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 2197 2198 return 0; 2199 } 2200 2201 /** 2202 * ice_vsig_remove_vsi - remove VSI from VSIG 2203 * @hw: pointer to the hardware structure 2204 * @blk: HW block 2205 * @vsi: VSI to remove 2206 * @vsig: VSI group to remove from 2207 * 2208 * The function will remove the input VSI from its VSI group and move it 2209 * to the DEFAULT_VSIG. 2210 */ 2211 static enum ice_status 2212 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 2213 { 2214 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; 2215 u16 idx; 2216 2217 idx = vsig & ICE_VSIG_IDX_M; 2218 2219 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 2220 return ICE_ERR_PARAM; 2221 2222 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 2223 return ICE_ERR_DOES_NOT_EXIST; 2224 2225 /* entry already in default VSIG, don't have to remove */ 2226 if (idx == ICE_DEFAULT_VSIG) 2227 return 0; 2228 2229 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2230 if (!(*vsi_head)) 2231 return ICE_ERR_CFG; 2232 2233 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; 2234 vsi_cur = (*vsi_head); 2235 2236 /* iterate the VSI list, skip over the entry to be removed */ 2237 while (vsi_cur) { 2238 if (vsi_tgt == vsi_cur) { 2239 (*vsi_head) = vsi_cur->next_vsi; 2240 break; 2241 } 2242 vsi_head = &vsi_cur->next_vsi; 2243 vsi_cur = vsi_cur->next_vsi; 2244 } 2245 2246 /* verify if VSI was removed from group list */ 2247 if (!vsi_cur) 2248 return ICE_ERR_DOES_NOT_EXIST; 2249 2250 vsi_cur->vsig = ICE_DEFAULT_VSIG; 2251 vsi_cur->changed = 1; 2252 vsi_cur->next_vsi = NULL; 2253 2254 return 0; 2255 } 2256 2257 /** 2258 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group 2259 * @hw: pointer to the hardware structure 2260 * @blk: HW block 2261 * @vsi: VSI to move 2262 * @vsig: destination VSI group 2263 * 2264 * This function will move or add the input VSI to the target VSIG. 2265 * The function will find the original VSIG the VSI belongs to and 2266 * move the entry to the DEFAULT_VSIG, update the original VSIG and 2267 * then move entry to the new VSIG. 2268 */ 2269 static enum ice_status 2270 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 2271 { 2272 struct ice_vsig_vsi *tmp; 2273 enum ice_status status; 2274 u16 orig_vsig, idx; 2275 2276 idx = vsig & ICE_VSIG_IDX_M; 2277 2278 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 2279 return ICE_ERR_PARAM; 2280 2281 /* if VSIG not in use and VSIG is not default type this VSIG 2282 * doesn't exist. 2283 */ 2284 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && 2285 vsig != ICE_DEFAULT_VSIG) 2286 return ICE_ERR_DOES_NOT_EXIST; 2287 2288 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 2289 if (status) 2290 return status; 2291 2292 /* no update required if vsigs match */ 2293 if (orig_vsig == vsig) 2294 return 0; 2295 2296 if (orig_vsig != ICE_DEFAULT_VSIG) { 2297 /* remove entry from orig_vsig and add to default VSIG */ 2298 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); 2299 if (status) 2300 return status; 2301 } 2302 2303 if (idx == ICE_DEFAULT_VSIG) 2304 return 0; 2305 2306 /* Create VSI entry and add VSIG and prop_mask values */ 2307 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; 2308 hw->blk[blk].xlt2.vsis[vsi].changed = 1; 2309 2310 /* Add new entry to the head of the VSIG list */ 2311 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2312 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = 2313 &hw->blk[blk].xlt2.vsis[vsi]; 2314 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; 2315 hw->blk[blk].xlt2.t[vsi] = vsig; 2316 2317 return 0; 2318 } 2319 2320 /** 2321 * ice_find_prof_id - find profile ID for a given field vector 2322 * @hw: pointer to the hardware structure 2323 * @blk: HW block 2324 * @fv: field vector to search for 2325 * @prof_id: receives the profile ID 2326 */ 2327 static enum ice_status 2328 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, 2329 struct ice_fv_word *fv, u8 *prof_id) 2330 { 2331 struct ice_es *es = &hw->blk[blk].es; 2332 u16 off; 2333 u8 i; 2334 2335 for (i = 0; i < (u8)es->count; i++) { 2336 off = i * es->fvw; 2337 2338 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) 2339 continue; 2340 2341 *prof_id = i; 2342 return 0; 2343 } 2344 2345 return ICE_ERR_DOES_NOT_EXIST; 2346 } 2347 2348 /** 2349 * ice_prof_id_rsrc_type - get profile ID resource type for a block type 2350 * @blk: the block type 2351 * @rsrc_type: pointer to variable to receive the resource type 2352 */ 2353 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) 2354 { 2355 switch (blk) { 2356 case ICE_BLK_FD: 2357 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID; 2358 break; 2359 case ICE_BLK_RSS: 2360 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; 2361 break; 2362 default: 2363 return false; 2364 } 2365 return true; 2366 } 2367 2368 /** 2369 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type 2370 * @blk: the block type 2371 * @rsrc_type: pointer to variable to receive the resource type 2372 */ 2373 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) 2374 { 2375 switch (blk) { 2376 case ICE_BLK_FD: 2377 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM; 2378 break; 2379 case ICE_BLK_RSS: 2380 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; 2381 break; 2382 default: 2383 return false; 2384 } 2385 return true; 2386 } 2387 2388 /** 2389 * ice_alloc_tcam_ent - allocate hardware TCAM entry 2390 * @hw: pointer to the HW struct 2391 * @blk: the block to allocate the TCAM for 2392 * @tcam_idx: pointer to variable to receive the TCAM entry 2393 * 2394 * This function allocates a new entry in a Profile ID TCAM for a specific 2395 * block. 2396 */ 2397 static enum ice_status 2398 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx) 2399 { 2400 u16 res_type; 2401 2402 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 2403 return ICE_ERR_PARAM; 2404 2405 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx); 2406 } 2407 2408 /** 2409 * ice_free_tcam_ent - free hardware TCAM entry 2410 * @hw: pointer to the HW struct 2411 * @blk: the block from which to free the TCAM entry 2412 * @tcam_idx: the TCAM entry to free 2413 * 2414 * This function frees an entry in a Profile ID TCAM for a specific block. 2415 */ 2416 static enum ice_status 2417 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) 2418 { 2419 u16 res_type; 2420 2421 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 2422 return ICE_ERR_PARAM; 2423 2424 return ice_free_hw_res(hw, res_type, 1, &tcam_idx); 2425 } 2426 2427 /** 2428 * ice_alloc_prof_id - allocate profile ID 2429 * @hw: pointer to the HW struct 2430 * @blk: the block to allocate the profile ID for 2431 * @prof_id: pointer to variable to receive the profile ID 2432 * 2433 * This function allocates a new profile ID, which also corresponds to a Field 2434 * Vector (Extraction Sequence) entry. 2435 */ 2436 static enum ice_status 2437 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) 2438 { 2439 enum ice_status status; 2440 u16 res_type; 2441 u16 get_prof; 2442 2443 if (!ice_prof_id_rsrc_type(blk, &res_type)) 2444 return ICE_ERR_PARAM; 2445 2446 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); 2447 if (!status) 2448 *prof_id = (u8)get_prof; 2449 2450 return status; 2451 } 2452 2453 /** 2454 * ice_free_prof_id - free profile ID 2455 * @hw: pointer to the HW struct 2456 * @blk: the block from which to free the profile ID 2457 * @prof_id: the profile ID to free 2458 * 2459 * This function frees a profile ID, which also corresponds to a Field Vector. 2460 */ 2461 static enum ice_status 2462 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 2463 { 2464 u16 tmp_prof_id = (u16)prof_id; 2465 u16 res_type; 2466 2467 if (!ice_prof_id_rsrc_type(blk, &res_type)) 2468 return ICE_ERR_PARAM; 2469 2470 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); 2471 } 2472 2473 /** 2474 * ice_prof_inc_ref - increment reference count for profile 2475 * @hw: pointer to the HW struct 2476 * @blk: the block from which to free the profile ID 2477 * @prof_id: the profile ID for which to increment the reference count 2478 */ 2479 static enum ice_status 2480 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 2481 { 2482 if (prof_id > hw->blk[blk].es.count) 2483 return ICE_ERR_PARAM; 2484 2485 hw->blk[blk].es.ref_count[prof_id]++; 2486 2487 return 0; 2488 } 2489 2490 /** 2491 * ice_write_es - write an extraction sequence to hardware 2492 * @hw: pointer to the HW struct 2493 * @blk: the block in which to write the extraction sequence 2494 * @prof_id: the profile ID to write 2495 * @fv: pointer to the extraction sequence to write - NULL to clear extraction 2496 */ 2497 static void 2498 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, 2499 struct ice_fv_word *fv) 2500 { 2501 u16 off; 2502 2503 off = prof_id * hw->blk[blk].es.fvw; 2504 if (!fv) { 2505 memset(&hw->blk[blk].es.t[off], 0, 2506 hw->blk[blk].es.fvw * sizeof(*fv)); 2507 hw->blk[blk].es.written[prof_id] = false; 2508 } else { 2509 memcpy(&hw->blk[blk].es.t[off], fv, 2510 hw->blk[blk].es.fvw * sizeof(*fv)); 2511 } 2512 } 2513 2514 /** 2515 * ice_prof_dec_ref - decrement reference count for profile 2516 * @hw: pointer to the HW struct 2517 * @blk: the block from which to free the profile ID 2518 * @prof_id: the profile ID for which to decrement the reference count 2519 */ 2520 static enum ice_status 2521 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 2522 { 2523 if (prof_id > hw->blk[blk].es.count) 2524 return ICE_ERR_PARAM; 2525 2526 if (hw->blk[blk].es.ref_count[prof_id] > 0) { 2527 if (!--hw->blk[blk].es.ref_count[prof_id]) { 2528 ice_write_es(hw, blk, prof_id, NULL); 2529 return ice_free_prof_id(hw, blk, prof_id); 2530 } 2531 } 2532 2533 return 0; 2534 } 2535 2536 /* Block / table section IDs */ 2537 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { 2538 /* SWITCH */ 2539 { ICE_SID_XLT1_SW, 2540 ICE_SID_XLT2_SW, 2541 ICE_SID_PROFID_TCAM_SW, 2542 ICE_SID_PROFID_REDIR_SW, 2543 ICE_SID_FLD_VEC_SW 2544 }, 2545 2546 /* ACL */ 2547 { ICE_SID_XLT1_ACL, 2548 ICE_SID_XLT2_ACL, 2549 ICE_SID_PROFID_TCAM_ACL, 2550 ICE_SID_PROFID_REDIR_ACL, 2551 ICE_SID_FLD_VEC_ACL 2552 }, 2553 2554 /* FD */ 2555 { ICE_SID_XLT1_FD, 2556 ICE_SID_XLT2_FD, 2557 ICE_SID_PROFID_TCAM_FD, 2558 ICE_SID_PROFID_REDIR_FD, 2559 ICE_SID_FLD_VEC_FD 2560 }, 2561 2562 /* RSS */ 2563 { ICE_SID_XLT1_RSS, 2564 ICE_SID_XLT2_RSS, 2565 ICE_SID_PROFID_TCAM_RSS, 2566 ICE_SID_PROFID_REDIR_RSS, 2567 ICE_SID_FLD_VEC_RSS 2568 }, 2569 2570 /* PE */ 2571 { ICE_SID_XLT1_PE, 2572 ICE_SID_XLT2_PE, 2573 ICE_SID_PROFID_TCAM_PE, 2574 ICE_SID_PROFID_REDIR_PE, 2575 ICE_SID_FLD_VEC_PE 2576 } 2577 }; 2578 2579 /** 2580 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables 2581 * @hw: pointer to the hardware structure 2582 * @blk: the HW block to initialize 2583 */ 2584 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) 2585 { 2586 u16 pt; 2587 2588 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { 2589 u8 ptg; 2590 2591 ptg = hw->blk[blk].xlt1.t[pt]; 2592 if (ptg != ICE_DEFAULT_PTG) { 2593 ice_ptg_alloc_val(hw, blk, ptg); 2594 ice_ptg_add_mv_ptype(hw, blk, pt, ptg); 2595 } 2596 } 2597 } 2598 2599 /** 2600 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables 2601 * @hw: pointer to the hardware structure 2602 * @blk: the HW block to initialize 2603 */ 2604 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) 2605 { 2606 u16 vsi; 2607 2608 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { 2609 u16 vsig; 2610 2611 vsig = hw->blk[blk].xlt2.t[vsi]; 2612 if (vsig) { 2613 ice_vsig_alloc_val(hw, blk, vsig); 2614 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 2615 /* no changes at this time, since this has been 2616 * initialized from the original package 2617 */ 2618 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 2619 } 2620 } 2621 } 2622 2623 /** 2624 * ice_init_sw_db - init software database from HW tables 2625 * @hw: pointer to the hardware structure 2626 */ 2627 static void ice_init_sw_db(struct ice_hw *hw) 2628 { 2629 u16 i; 2630 2631 for (i = 0; i < ICE_BLK_COUNT; i++) { 2632 ice_init_sw_xlt1_db(hw, (enum ice_block)i); 2633 ice_init_sw_xlt2_db(hw, (enum ice_block)i); 2634 } 2635 } 2636 2637 /** 2638 * ice_fill_tbl - Reads content of a single table type into database 2639 * @hw: pointer to the hardware structure 2640 * @block_id: Block ID of the table to copy 2641 * @sid: Section ID of the table to copy 2642 * 2643 * Will attempt to read the entire content of a given table of a single block 2644 * into the driver database. We assume that the buffer will always 2645 * be as large or larger than the data contained in the package. If 2646 * this condition is not met, there is most likely an error in the package 2647 * contents. 2648 */ 2649 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) 2650 { 2651 u32 dst_len, sect_len, offset = 0; 2652 struct ice_prof_redir_section *pr; 2653 struct ice_prof_id_section *pid; 2654 struct ice_xlt1_section *xlt1; 2655 struct ice_xlt2_section *xlt2; 2656 struct ice_sw_fv_section *es; 2657 struct ice_pkg_enum state; 2658 u8 *src, *dst; 2659 void *sect; 2660 2661 /* if the HW segment pointer is null then the first iteration of 2662 * ice_pkg_enum_section() will fail. In this case the HW tables will 2663 * not be filled and return success. 2664 */ 2665 if (!hw->seg) { 2666 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); 2667 return; 2668 } 2669 2670 memset(&state, 0, sizeof(state)); 2671 2672 sect = ice_pkg_enum_section(hw->seg, &state, sid); 2673 2674 while (sect) { 2675 switch (sid) { 2676 case ICE_SID_XLT1_SW: 2677 case ICE_SID_XLT1_FD: 2678 case ICE_SID_XLT1_RSS: 2679 case ICE_SID_XLT1_ACL: 2680 case ICE_SID_XLT1_PE: 2681 xlt1 = (struct ice_xlt1_section *)sect; 2682 src = xlt1->value; 2683 sect_len = le16_to_cpu(xlt1->count) * 2684 sizeof(*hw->blk[block_id].xlt1.t); 2685 dst = hw->blk[block_id].xlt1.t; 2686 dst_len = hw->blk[block_id].xlt1.count * 2687 sizeof(*hw->blk[block_id].xlt1.t); 2688 break; 2689 case ICE_SID_XLT2_SW: 2690 case ICE_SID_XLT2_FD: 2691 case ICE_SID_XLT2_RSS: 2692 case ICE_SID_XLT2_ACL: 2693 case ICE_SID_XLT2_PE: 2694 xlt2 = (struct ice_xlt2_section *)sect; 2695 src = (__force u8 *)xlt2->value; 2696 sect_len = le16_to_cpu(xlt2->count) * 2697 sizeof(*hw->blk[block_id].xlt2.t); 2698 dst = (u8 *)hw->blk[block_id].xlt2.t; 2699 dst_len = hw->blk[block_id].xlt2.count * 2700 sizeof(*hw->blk[block_id].xlt2.t); 2701 break; 2702 case ICE_SID_PROFID_TCAM_SW: 2703 case ICE_SID_PROFID_TCAM_FD: 2704 case ICE_SID_PROFID_TCAM_RSS: 2705 case ICE_SID_PROFID_TCAM_ACL: 2706 case ICE_SID_PROFID_TCAM_PE: 2707 pid = (struct ice_prof_id_section *)sect; 2708 src = (u8 *)pid->entry; 2709 sect_len = le16_to_cpu(pid->count) * 2710 sizeof(*hw->blk[block_id].prof.t); 2711 dst = (u8 *)hw->blk[block_id].prof.t; 2712 dst_len = hw->blk[block_id].prof.count * 2713 sizeof(*hw->blk[block_id].prof.t); 2714 break; 2715 case ICE_SID_PROFID_REDIR_SW: 2716 case ICE_SID_PROFID_REDIR_FD: 2717 case ICE_SID_PROFID_REDIR_RSS: 2718 case ICE_SID_PROFID_REDIR_ACL: 2719 case ICE_SID_PROFID_REDIR_PE: 2720 pr = (struct ice_prof_redir_section *)sect; 2721 src = pr->redir_value; 2722 sect_len = le16_to_cpu(pr->count) * 2723 sizeof(*hw->blk[block_id].prof_redir.t); 2724 dst = hw->blk[block_id].prof_redir.t; 2725 dst_len = hw->blk[block_id].prof_redir.count * 2726 sizeof(*hw->blk[block_id].prof_redir.t); 2727 break; 2728 case ICE_SID_FLD_VEC_SW: 2729 case ICE_SID_FLD_VEC_FD: 2730 case ICE_SID_FLD_VEC_RSS: 2731 case ICE_SID_FLD_VEC_ACL: 2732 case ICE_SID_FLD_VEC_PE: 2733 es = (struct ice_sw_fv_section *)sect; 2734 src = (u8 *)es->fv; 2735 sect_len = (u32)(le16_to_cpu(es->count) * 2736 hw->blk[block_id].es.fvw) * 2737 sizeof(*hw->blk[block_id].es.t); 2738 dst = (u8 *)hw->blk[block_id].es.t; 2739 dst_len = (u32)(hw->blk[block_id].es.count * 2740 hw->blk[block_id].es.fvw) * 2741 sizeof(*hw->blk[block_id].es.t); 2742 break; 2743 default: 2744 return; 2745 } 2746 2747 /* if the section offset exceeds destination length, terminate 2748 * table fill. 2749 */ 2750 if (offset > dst_len) 2751 return; 2752 2753 /* if the sum of section size and offset exceed destination size 2754 * then we are out of bounds of the HW table size for that PF. 2755 * Changing section length to fill the remaining table space 2756 * of that PF. 2757 */ 2758 if ((offset + sect_len) > dst_len) 2759 sect_len = dst_len - offset; 2760 2761 memcpy(dst + offset, src, sect_len); 2762 offset += sect_len; 2763 sect = ice_pkg_enum_section(NULL, &state, sid); 2764 } 2765 } 2766 2767 /** 2768 * ice_fill_blk_tbls - Read package context for tables 2769 * @hw: pointer to the hardware structure 2770 * 2771 * Reads the current package contents and populates the driver 2772 * database with the data iteratively for all advanced feature 2773 * blocks. Assume that the HW tables have been allocated. 2774 */ 2775 void ice_fill_blk_tbls(struct ice_hw *hw) 2776 { 2777 u8 i; 2778 2779 for (i = 0; i < ICE_BLK_COUNT; i++) { 2780 enum ice_block blk_id = (enum ice_block)i; 2781 2782 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); 2783 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); 2784 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); 2785 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); 2786 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); 2787 } 2788 2789 ice_init_sw_db(hw); 2790 } 2791 2792 /** 2793 * ice_free_prof_map - free profile map 2794 * @hw: pointer to the hardware structure 2795 * @blk_idx: HW block index 2796 */ 2797 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) 2798 { 2799 struct ice_es *es = &hw->blk[blk_idx].es; 2800 struct ice_prof_map *del, *tmp; 2801 2802 mutex_lock(&es->prof_map_lock); 2803 list_for_each_entry_safe(del, tmp, &es->prof_map, list) { 2804 list_del(&del->list); 2805 devm_kfree(ice_hw_to_dev(hw), del); 2806 } 2807 INIT_LIST_HEAD(&es->prof_map); 2808 mutex_unlock(&es->prof_map_lock); 2809 } 2810 2811 /** 2812 * ice_free_flow_profs - free flow profile entries 2813 * @hw: pointer to the hardware structure 2814 * @blk_idx: HW block index 2815 */ 2816 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) 2817 { 2818 struct ice_flow_prof *p, *tmp; 2819 2820 mutex_lock(&hw->fl_profs_locks[blk_idx]); 2821 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) { 2822 struct ice_flow_entry *e, *t; 2823 2824 list_for_each_entry_safe(e, t, &p->entries, l_entry) 2825 ice_flow_rem_entry(hw, (enum ice_block)blk_idx, 2826 ICE_FLOW_ENTRY_HNDL(e)); 2827 2828 list_del(&p->l_entry); 2829 devm_kfree(ice_hw_to_dev(hw), p); 2830 } 2831 mutex_unlock(&hw->fl_profs_locks[blk_idx]); 2832 2833 /* if driver is in reset and tables are being cleared 2834 * re-initialize the flow profile list heads 2835 */ 2836 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 2837 } 2838 2839 /** 2840 * ice_free_vsig_tbl - free complete VSIG table entries 2841 * @hw: pointer to the hardware structure 2842 * @blk: the HW block on which to free the VSIG table entries 2843 */ 2844 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) 2845 { 2846 u16 i; 2847 2848 if (!hw->blk[blk].xlt2.vsig_tbl) 2849 return; 2850 2851 for (i = 1; i < ICE_MAX_VSIGS; i++) 2852 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) 2853 ice_vsig_free(hw, blk, i); 2854 } 2855 2856 /** 2857 * ice_free_hw_tbls - free hardware table memory 2858 * @hw: pointer to the hardware structure 2859 */ 2860 void ice_free_hw_tbls(struct ice_hw *hw) 2861 { 2862 struct ice_rss_cfg *r, *rt; 2863 u8 i; 2864 2865 for (i = 0; i < ICE_BLK_COUNT; i++) { 2866 if (hw->blk[i].is_list_init) { 2867 struct ice_es *es = &hw->blk[i].es; 2868 2869 ice_free_prof_map(hw, i); 2870 mutex_destroy(&es->prof_map_lock); 2871 2872 ice_free_flow_profs(hw, i); 2873 mutex_destroy(&hw->fl_profs_locks[i]); 2874 2875 hw->blk[i].is_list_init = false; 2876 } 2877 ice_free_vsig_tbl(hw, (enum ice_block)i); 2878 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes); 2879 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl); 2880 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t); 2881 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t); 2882 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl); 2883 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis); 2884 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); 2885 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); 2886 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); 2887 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); 2888 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); 2889 } 2890 2891 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { 2892 list_del(&r->l_entry); 2893 devm_kfree(ice_hw_to_dev(hw), r); 2894 } 2895 mutex_destroy(&hw->rss_locks); 2896 memset(hw->blk, 0, sizeof(hw->blk)); 2897 } 2898 2899 /** 2900 * ice_init_flow_profs - init flow profile locks and list heads 2901 * @hw: pointer to the hardware structure 2902 * @blk_idx: HW block index 2903 */ 2904 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) 2905 { 2906 mutex_init(&hw->fl_profs_locks[blk_idx]); 2907 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 2908 } 2909 2910 /** 2911 * ice_clear_hw_tbls - clear HW tables and flow profiles 2912 * @hw: pointer to the hardware structure 2913 */ 2914 void ice_clear_hw_tbls(struct ice_hw *hw) 2915 { 2916 u8 i; 2917 2918 for (i = 0; i < ICE_BLK_COUNT; i++) { 2919 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 2920 struct ice_prof_tcam *prof = &hw->blk[i].prof; 2921 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 2922 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 2923 struct ice_es *es = &hw->blk[i].es; 2924 2925 if (hw->blk[i].is_list_init) { 2926 ice_free_prof_map(hw, i); 2927 ice_free_flow_profs(hw, i); 2928 } 2929 2930 ice_free_vsig_tbl(hw, (enum ice_block)i); 2931 2932 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); 2933 memset(xlt1->ptg_tbl, 0, 2934 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); 2935 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); 2936 2937 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); 2938 memset(xlt2->vsig_tbl, 0, 2939 xlt2->count * sizeof(*xlt2->vsig_tbl)); 2940 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); 2941 2942 memset(prof->t, 0, prof->count * sizeof(*prof->t)); 2943 memset(prof_redir->t, 0, 2944 prof_redir->count * sizeof(*prof_redir->t)); 2945 2946 memset(es->t, 0, es->count * sizeof(*es->t)); 2947 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); 2948 memset(es->written, 0, es->count * sizeof(*es->written)); 2949 } 2950 } 2951 2952 /** 2953 * ice_init_hw_tbls - init hardware table memory 2954 * @hw: pointer to the hardware structure 2955 */ 2956 enum ice_status ice_init_hw_tbls(struct ice_hw *hw) 2957 { 2958 u8 i; 2959 2960 mutex_init(&hw->rss_locks); 2961 INIT_LIST_HEAD(&hw->rss_list_head); 2962 for (i = 0; i < ICE_BLK_COUNT; i++) { 2963 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 2964 struct ice_prof_tcam *prof = &hw->blk[i].prof; 2965 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 2966 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 2967 struct ice_es *es = &hw->blk[i].es; 2968 u16 j; 2969 2970 if (hw->blk[i].is_list_init) 2971 continue; 2972 2973 ice_init_flow_profs(hw, i); 2974 mutex_init(&es->prof_map_lock); 2975 INIT_LIST_HEAD(&es->prof_map); 2976 hw->blk[i].is_list_init = true; 2977 2978 hw->blk[i].overwrite = blk_sizes[i].overwrite; 2979 es->reverse = blk_sizes[i].reverse; 2980 2981 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; 2982 xlt1->count = blk_sizes[i].xlt1; 2983 2984 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, 2985 sizeof(*xlt1->ptypes), GFP_KERNEL); 2986 2987 if (!xlt1->ptypes) 2988 goto err; 2989 2990 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, 2991 sizeof(*xlt1->ptg_tbl), 2992 GFP_KERNEL); 2993 2994 if (!xlt1->ptg_tbl) 2995 goto err; 2996 2997 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, 2998 sizeof(*xlt1->t), GFP_KERNEL); 2999 if (!xlt1->t) 3000 goto err; 3001 3002 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; 3003 xlt2->count = blk_sizes[i].xlt2; 3004 3005 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, 3006 sizeof(*xlt2->vsis), GFP_KERNEL); 3007 3008 if (!xlt2->vsis) 3009 goto err; 3010 3011 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, 3012 sizeof(*xlt2->vsig_tbl), 3013 GFP_KERNEL); 3014 if (!xlt2->vsig_tbl) 3015 goto err; 3016 3017 for (j = 0; j < xlt2->count; j++) 3018 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); 3019 3020 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, 3021 sizeof(*xlt2->t), GFP_KERNEL); 3022 if (!xlt2->t) 3023 goto err; 3024 3025 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; 3026 prof->count = blk_sizes[i].prof_tcam; 3027 prof->max_prof_id = blk_sizes[i].prof_id; 3028 prof->cdid_bits = blk_sizes[i].prof_cdid_bits; 3029 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, 3030 sizeof(*prof->t), GFP_KERNEL); 3031 3032 if (!prof->t) 3033 goto err; 3034 3035 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; 3036 prof_redir->count = blk_sizes[i].prof_redir; 3037 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), 3038 prof_redir->count, 3039 sizeof(*prof_redir->t), 3040 GFP_KERNEL); 3041 3042 if (!prof_redir->t) 3043 goto err; 3044 3045 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; 3046 es->count = blk_sizes[i].es; 3047 es->fvw = blk_sizes[i].fvw; 3048 es->t = devm_kcalloc(ice_hw_to_dev(hw), 3049 (u32)(es->count * es->fvw), 3050 sizeof(*es->t), GFP_KERNEL); 3051 if (!es->t) 3052 goto err; 3053 3054 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, 3055 sizeof(*es->ref_count), 3056 GFP_KERNEL); 3057 3058 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, 3059 sizeof(*es->written), GFP_KERNEL); 3060 if (!es->ref_count) 3061 goto err; 3062 } 3063 return 0; 3064 3065 err: 3066 ice_free_hw_tbls(hw); 3067 return ICE_ERR_NO_MEMORY; 3068 } 3069 3070 /** 3071 * ice_prof_gen_key - generate profile ID key 3072 * @hw: pointer to the HW struct 3073 * @blk: the block in which to write profile ID to 3074 * @ptg: packet type group (PTG) portion of key 3075 * @vsig: VSIG portion of key 3076 * @cdid: CDID portion of key 3077 * @flags: flag portion of key 3078 * @vl_msk: valid mask 3079 * @dc_msk: don't care mask 3080 * @nm_msk: never match mask 3081 * @key: output of profile ID key 3082 */ 3083 static enum ice_status 3084 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, 3085 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 3086 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], 3087 u8 key[ICE_TCAM_KEY_SZ]) 3088 { 3089 struct ice_prof_id_key inkey; 3090 3091 inkey.xlt1 = ptg; 3092 inkey.xlt2_cdid = cpu_to_le16(vsig); 3093 inkey.flags = cpu_to_le16(flags); 3094 3095 switch (hw->blk[blk].prof.cdid_bits) { 3096 case 0: 3097 break; 3098 case 2: 3099 #define ICE_CD_2_M 0xC000U 3100 #define ICE_CD_2_S 14 3101 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M); 3102 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S); 3103 break; 3104 case 4: 3105 #define ICE_CD_4_M 0xF000U 3106 #define ICE_CD_4_S 12 3107 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M); 3108 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S); 3109 break; 3110 case 8: 3111 #define ICE_CD_8_M 0xFF00U 3112 #define ICE_CD_8_S 16 3113 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M); 3114 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S); 3115 break; 3116 default: 3117 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); 3118 break; 3119 } 3120 3121 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, 3122 nm_msk, 0, ICE_TCAM_KEY_SZ / 2); 3123 } 3124 3125 /** 3126 * ice_tcam_write_entry - write TCAM entry 3127 * @hw: pointer to the HW struct 3128 * @blk: the block in which to write profile ID to 3129 * @idx: the entry index to write to 3130 * @prof_id: profile ID 3131 * @ptg: packet type group (PTG) portion of key 3132 * @vsig: VSIG portion of key 3133 * @cdid: CDID portion of key 3134 * @flags: flag portion of key 3135 * @vl_msk: valid mask 3136 * @dc_msk: don't care mask 3137 * @nm_msk: never match mask 3138 */ 3139 static enum ice_status 3140 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, 3141 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, 3142 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 3143 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], 3144 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) 3145 { 3146 struct ice_prof_tcam_entry; 3147 enum ice_status status; 3148 3149 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, 3150 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); 3151 if (!status) { 3152 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx); 3153 hw->blk[blk].prof.t[idx].prof_id = prof_id; 3154 } 3155 3156 return status; 3157 } 3158 3159 /** 3160 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG 3161 * @hw: pointer to the hardware structure 3162 * @blk: HW block 3163 * @vsig: VSIG to query 3164 * @refs: pointer to variable to receive the reference count 3165 */ 3166 static enum ice_status 3167 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) 3168 { 3169 u16 idx = vsig & ICE_VSIG_IDX_M; 3170 struct ice_vsig_vsi *ptr; 3171 3172 *refs = 0; 3173 3174 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 3175 return ICE_ERR_DOES_NOT_EXIST; 3176 3177 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3178 while (ptr) { 3179 (*refs)++; 3180 ptr = ptr->next_vsi; 3181 } 3182 3183 return 0; 3184 } 3185 3186 /** 3187 * ice_has_prof_vsig - check to see if VSIG has a specific profile 3188 * @hw: pointer to the hardware structure 3189 * @blk: HW block 3190 * @vsig: VSIG to check against 3191 * @hdl: profile handle 3192 */ 3193 static bool 3194 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) 3195 { 3196 u16 idx = vsig & ICE_VSIG_IDX_M; 3197 struct ice_vsig_prof *ent; 3198 3199 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3200 list) 3201 if (ent->profile_cookie == hdl) 3202 return true; 3203 3204 ice_debug(hw, ICE_DBG_INIT, 3205 "Characteristic list for VSI group %d not found.\n", 3206 vsig); 3207 return false; 3208 } 3209 3210 /** 3211 * ice_prof_bld_es - build profile ID extraction sequence changes 3212 * @hw: pointer to the HW struct 3213 * @blk: hardware block 3214 * @bld: the update package buffer build to add to 3215 * @chgs: the list of changes to make in hardware 3216 */ 3217 static enum ice_status 3218 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, 3219 struct ice_buf_build *bld, struct list_head *chgs) 3220 { 3221 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); 3222 struct ice_chs_chg *tmp; 3223 3224 list_for_each_entry(tmp, chgs, list_entry) 3225 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { 3226 u16 off = tmp->prof_id * hw->blk[blk].es.fvw; 3227 struct ice_pkg_es *p; 3228 u32 id; 3229 3230 id = ice_sect_id(blk, ICE_VEC_TBL); 3231 p = (struct ice_pkg_es *) 3232 ice_pkg_buf_alloc_section(bld, id, sizeof(*p) + 3233 vec_size - 3234 sizeof(p->es[0])); 3235 3236 if (!p) 3237 return ICE_ERR_MAX_LIMIT; 3238 3239 p->count = cpu_to_le16(1); 3240 p->offset = cpu_to_le16(tmp->prof_id); 3241 3242 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size); 3243 } 3244 3245 return 0; 3246 } 3247 3248 /** 3249 * ice_prof_bld_tcam - build profile ID TCAM changes 3250 * @hw: pointer to the HW struct 3251 * @blk: hardware block 3252 * @bld: the update package buffer build to add to 3253 * @chgs: the list of changes to make in hardware 3254 */ 3255 static enum ice_status 3256 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, 3257 struct ice_buf_build *bld, struct list_head *chgs) 3258 { 3259 struct ice_chs_chg *tmp; 3260 3261 list_for_each_entry(tmp, chgs, list_entry) 3262 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { 3263 struct ice_prof_id_section *p; 3264 u32 id; 3265 3266 id = ice_sect_id(blk, ICE_PROF_TCAM); 3267 p = (struct ice_prof_id_section *) 3268 ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); 3269 3270 if (!p) 3271 return ICE_ERR_MAX_LIMIT; 3272 3273 p->count = cpu_to_le16(1); 3274 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); 3275 p->entry[0].prof_id = tmp->prof_id; 3276 3277 memcpy(p->entry[0].key, 3278 &hw->blk[blk].prof.t[tmp->tcam_idx].key, 3279 sizeof(hw->blk[blk].prof.t->key)); 3280 } 3281 3282 return 0; 3283 } 3284 3285 /** 3286 * ice_prof_bld_xlt1 - build XLT1 changes 3287 * @blk: hardware block 3288 * @bld: the update package buffer build to add to 3289 * @chgs: the list of changes to make in hardware 3290 */ 3291 static enum ice_status 3292 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, 3293 struct list_head *chgs) 3294 { 3295 struct ice_chs_chg *tmp; 3296 3297 list_for_each_entry(tmp, chgs, list_entry) 3298 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { 3299 struct ice_xlt1_section *p; 3300 u32 id; 3301 3302 id = ice_sect_id(blk, ICE_XLT1); 3303 p = (struct ice_xlt1_section *) 3304 ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); 3305 3306 if (!p) 3307 return ICE_ERR_MAX_LIMIT; 3308 3309 p->count = cpu_to_le16(1); 3310 p->offset = cpu_to_le16(tmp->ptype); 3311 p->value[0] = tmp->ptg; 3312 } 3313 3314 return 0; 3315 } 3316 3317 /** 3318 * ice_prof_bld_xlt2 - build XLT2 changes 3319 * @blk: hardware block 3320 * @bld: the update package buffer build to add to 3321 * @chgs: the list of changes to make in hardware 3322 */ 3323 static enum ice_status 3324 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, 3325 struct list_head *chgs) 3326 { 3327 struct ice_chs_chg *tmp; 3328 3329 list_for_each_entry(tmp, chgs, list_entry) { 3330 struct ice_xlt2_section *p; 3331 u32 id; 3332 3333 switch (tmp->type) { 3334 case ICE_VSIG_ADD: 3335 case ICE_VSI_MOVE: 3336 case ICE_VSIG_REM: 3337 id = ice_sect_id(blk, ICE_XLT2); 3338 p = (struct ice_xlt2_section *) 3339 ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); 3340 3341 if (!p) 3342 return ICE_ERR_MAX_LIMIT; 3343 3344 p->count = cpu_to_le16(1); 3345 p->offset = cpu_to_le16(tmp->vsi); 3346 p->value[0] = cpu_to_le16(tmp->vsig); 3347 break; 3348 default: 3349 break; 3350 } 3351 } 3352 3353 return 0; 3354 } 3355 3356 /** 3357 * ice_upd_prof_hw - update hardware using the change list 3358 * @hw: pointer to the HW struct 3359 * @blk: hardware block 3360 * @chgs: the list of changes to make in hardware 3361 */ 3362 static enum ice_status 3363 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, 3364 struct list_head *chgs) 3365 { 3366 struct ice_buf_build *b; 3367 struct ice_chs_chg *tmp; 3368 enum ice_status status; 3369 u16 pkg_sects; 3370 u16 xlt1 = 0; 3371 u16 xlt2 = 0; 3372 u16 tcam = 0; 3373 u16 es = 0; 3374 u16 sects; 3375 3376 /* count number of sections we need */ 3377 list_for_each_entry(tmp, chgs, list_entry) { 3378 switch (tmp->type) { 3379 case ICE_PTG_ES_ADD: 3380 if (tmp->add_ptg) 3381 xlt1++; 3382 if (tmp->add_prof) 3383 es++; 3384 break; 3385 case ICE_TCAM_ADD: 3386 tcam++; 3387 break; 3388 case ICE_VSIG_ADD: 3389 case ICE_VSI_MOVE: 3390 case ICE_VSIG_REM: 3391 xlt2++; 3392 break; 3393 default: 3394 break; 3395 } 3396 } 3397 sects = xlt1 + xlt2 + tcam + es; 3398 3399 if (!sects) 3400 return 0; 3401 3402 /* Build update package buffer */ 3403 b = ice_pkg_buf_alloc(hw); 3404 if (!b) 3405 return ICE_ERR_NO_MEMORY; 3406 3407 status = ice_pkg_buf_reserve_section(b, sects); 3408 if (status) 3409 goto error_tmp; 3410 3411 /* Preserve order of table update: ES, TCAM, PTG, VSIG */ 3412 if (es) { 3413 status = ice_prof_bld_es(hw, blk, b, chgs); 3414 if (status) 3415 goto error_tmp; 3416 } 3417 3418 if (tcam) { 3419 status = ice_prof_bld_tcam(hw, blk, b, chgs); 3420 if (status) 3421 goto error_tmp; 3422 } 3423 3424 if (xlt1) { 3425 status = ice_prof_bld_xlt1(blk, b, chgs); 3426 if (status) 3427 goto error_tmp; 3428 } 3429 3430 if (xlt2) { 3431 status = ice_prof_bld_xlt2(blk, b, chgs); 3432 if (status) 3433 goto error_tmp; 3434 } 3435 3436 /* After package buffer build check if the section count in buffer is 3437 * non-zero and matches the number of sections detected for package 3438 * update. 3439 */ 3440 pkg_sects = ice_pkg_buf_get_active_sections(b); 3441 if (!pkg_sects || pkg_sects != sects) { 3442 status = ICE_ERR_INVAL_SIZE; 3443 goto error_tmp; 3444 } 3445 3446 /* update package */ 3447 status = ice_update_pkg(hw, ice_pkg_buf(b), 1); 3448 if (status == ICE_ERR_AQ_ERROR) 3449 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); 3450 3451 error_tmp: 3452 ice_pkg_buf_free(hw, b); 3453 return status; 3454 } 3455 3456 /** 3457 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile 3458 * @hw: pointer to the HW struct 3459 * @prof_id: profile ID 3460 * @mask_sel: mask select 3461 * 3462 * This function enable any of the masks selected by the mask select parameter 3463 * for the profile specified. 3464 */ 3465 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel) 3466 { 3467 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel); 3468 3469 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id, 3470 GLQF_FDMASK_SEL(prof_id), mask_sel); 3471 } 3472 3473 struct ice_fd_src_dst_pair { 3474 u8 prot_id; 3475 u8 count; 3476 u16 off; 3477 }; 3478 3479 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { 3480 /* These are defined in pairs */ 3481 { ICE_PROT_IPV4_OF_OR_S, 2, 12 }, 3482 { ICE_PROT_IPV4_OF_OR_S, 2, 16 }, 3483 3484 { ICE_PROT_IPV4_IL, 2, 12 }, 3485 { ICE_PROT_IPV4_IL, 2, 16 }, 3486 3487 { ICE_PROT_TCP_IL, 1, 0 }, 3488 { ICE_PROT_TCP_IL, 1, 2 }, 3489 3490 { ICE_PROT_UDP_OF, 1, 0 }, 3491 { ICE_PROT_UDP_OF, 1, 2 }, 3492 3493 { ICE_PROT_UDP_IL_OR_S, 1, 0 }, 3494 { ICE_PROT_UDP_IL_OR_S, 1, 2 }, 3495 3496 { ICE_PROT_SCTP_IL, 1, 0 }, 3497 { ICE_PROT_SCTP_IL, 1, 2 } 3498 }; 3499 3500 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs) 3501 3502 /** 3503 * ice_update_fd_swap - set register appropriately for a FD FV extraction 3504 * @hw: pointer to the HW struct 3505 * @prof_id: profile ID 3506 * @es: extraction sequence (length of array is determined by the block) 3507 */ 3508 static enum ice_status 3509 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) 3510 { 3511 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); 3512 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 }; 3513 #define ICE_FD_FV_NOT_FOUND (-2) 3514 s8 first_free = ICE_FD_FV_NOT_FOUND; 3515 u8 used[ICE_MAX_FV_WORDS] = { 0 }; 3516 s8 orig_free, si; 3517 u32 mask_sel = 0; 3518 u8 i, j, k; 3519 3520 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); 3521 3522 /* This code assumes that the Flow Director field vectors are assigned 3523 * from the end of the FV indexes working towards the zero index, that 3524 * only complete fields will be included and will be consecutive, and 3525 * that there are no gaps between valid indexes. 3526 */ 3527 3528 /* Determine swap fields present */ 3529 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) { 3530 /* Find the first free entry, assuming right to left population. 3531 * This is where we can start adding additional pairs if needed. 3532 */ 3533 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id != 3534 ICE_PROT_INVALID) 3535 first_free = i - 1; 3536 3537 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) 3538 if (es[i].prot_id == ice_fd_pairs[j].prot_id && 3539 es[i].off == ice_fd_pairs[j].off) { 3540 set_bit(j, pair_list); 3541 pair_start[j] = i; 3542 } 3543 } 3544 3545 orig_free = first_free; 3546 3547 /* determine missing swap fields that need to be added */ 3548 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) { 3549 u8 bit1 = test_bit(i + 1, pair_list); 3550 u8 bit0 = test_bit(i, pair_list); 3551 3552 if (bit0 ^ bit1) { 3553 u8 index; 3554 3555 /* add the appropriate 'paired' entry */ 3556 if (!bit0) 3557 index = i; 3558 else 3559 index = i + 1; 3560 3561 /* check for room */ 3562 if (first_free + 1 < (s8)ice_fd_pairs[index].count) 3563 return ICE_ERR_MAX_LIMIT; 3564 3565 /* place in extraction sequence */ 3566 for (k = 0; k < ice_fd_pairs[index].count; k++) { 3567 es[first_free - k].prot_id = 3568 ice_fd_pairs[index].prot_id; 3569 es[first_free - k].off = 3570 ice_fd_pairs[index].off + (k * 2); 3571 3572 if (k > first_free) 3573 return ICE_ERR_OUT_OF_RANGE; 3574 3575 /* keep track of non-relevant fields */ 3576 mask_sel |= BIT(first_free - k); 3577 } 3578 3579 pair_start[index] = first_free; 3580 first_free -= ice_fd_pairs[index].count; 3581 } 3582 } 3583 3584 /* fill in the swap array */ 3585 si = hw->blk[ICE_BLK_FD].es.fvw - 1; 3586 while (si >= 0) { 3587 u8 indexes_used = 1; 3588 3589 /* assume flat at this index */ 3590 #define ICE_SWAP_VALID 0x80 3591 used[si] = si | ICE_SWAP_VALID; 3592 3593 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) { 3594 si -= indexes_used; 3595 continue; 3596 } 3597 3598 /* check for a swap location */ 3599 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) 3600 if (es[si].prot_id == ice_fd_pairs[j].prot_id && 3601 es[si].off == ice_fd_pairs[j].off) { 3602 u8 idx; 3603 3604 /* determine the appropriate matching field */ 3605 idx = j + ((j % 2) ? -1 : 1); 3606 3607 indexes_used = ice_fd_pairs[idx].count; 3608 for (k = 0; k < indexes_used; k++) { 3609 used[si - k] = (pair_start[idx] - k) | 3610 ICE_SWAP_VALID; 3611 } 3612 3613 break; 3614 } 3615 3616 si -= indexes_used; 3617 } 3618 3619 /* for each set of 4 swap and 4 inset indexes, write the appropriate 3620 * register 3621 */ 3622 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) { 3623 u32 raw_swap = 0; 3624 u32 raw_in = 0; 3625 3626 for (k = 0; k < 4; k++) { 3627 u8 idx; 3628 3629 idx = (j * 4) + k; 3630 if (used[idx] && !(mask_sel & BIT(idx))) { 3631 raw_swap |= used[idx] << (k * BITS_PER_BYTE); 3632 #define ICE_INSET_DFLT 0x9f 3633 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE); 3634 } 3635 } 3636 3637 /* write the appropriate swap register set */ 3638 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap); 3639 3640 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n", 3641 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap); 3642 3643 /* write the appropriate inset register set */ 3644 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in); 3645 3646 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n", 3647 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in); 3648 } 3649 3650 /* initially clear the mask select for this profile */ 3651 ice_update_fd_mask(hw, prof_id, 0); 3652 3653 return 0; 3654 } 3655 3656 /** 3657 * ice_add_prof - add profile 3658 * @hw: pointer to the HW struct 3659 * @blk: hardware block 3660 * @id: profile tracking ID 3661 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) 3662 * @es: extraction sequence (length of array is determined by the block) 3663 * 3664 * This function registers a profile, which matches a set of PTGs with a 3665 * particular extraction sequence. While the hardware profile is allocated 3666 * it will not be written until the first call to ice_add_flow that specifies 3667 * the ID value used here. 3668 */ 3669 enum ice_status 3670 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], 3671 struct ice_fv_word *es) 3672 { 3673 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); 3674 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); 3675 struct ice_prof_map *prof; 3676 enum ice_status status; 3677 u8 byte = 0; 3678 u8 prof_id; 3679 3680 bitmap_zero(ptgs_used, ICE_XLT1_CNT); 3681 3682 mutex_lock(&hw->blk[blk].es.prof_map_lock); 3683 3684 /* search for existing profile */ 3685 status = ice_find_prof_id(hw, blk, es, &prof_id); 3686 if (status) { 3687 /* allocate profile ID */ 3688 status = ice_alloc_prof_id(hw, blk, &prof_id); 3689 if (status) 3690 goto err_ice_add_prof; 3691 if (blk == ICE_BLK_FD) { 3692 /* For Flow Director block, the extraction sequence may 3693 * need to be altered in the case where there are paired 3694 * fields that have no match. This is necessary because 3695 * for Flow Director, src and dest fields need to paired 3696 * for filter programming and these values are swapped 3697 * during Tx. 3698 */ 3699 status = ice_update_fd_swap(hw, prof_id, es); 3700 if (status) 3701 goto err_ice_add_prof; 3702 } 3703 3704 /* and write new es */ 3705 ice_write_es(hw, blk, prof_id, es); 3706 } 3707 3708 ice_prof_inc_ref(hw, blk, prof_id); 3709 3710 /* add profile info */ 3711 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); 3712 if (!prof) { 3713 status = ICE_ERR_NO_MEMORY; 3714 goto err_ice_add_prof; 3715 } 3716 3717 prof->profile_cookie = id; 3718 prof->prof_id = prof_id; 3719 prof->ptg_cnt = 0; 3720 prof->context = 0; 3721 3722 /* build list of ptgs */ 3723 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { 3724 u8 bit; 3725 3726 if (!ptypes[byte]) { 3727 bytes--; 3728 byte++; 3729 continue; 3730 } 3731 3732 /* Examine 8 bits per byte */ 3733 for_each_set_bit(bit, (unsigned long *)&ptypes[byte], 3734 BITS_PER_BYTE) { 3735 u16 ptype; 3736 u8 ptg; 3737 u8 m; 3738 3739 ptype = byte * BITS_PER_BYTE + bit; 3740 3741 /* The package should place all ptypes in a non-zero 3742 * PTG, so the following call should never fail. 3743 */ 3744 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) 3745 continue; 3746 3747 /* If PTG is already added, skip and continue */ 3748 if (test_bit(ptg, ptgs_used)) 3749 continue; 3750 3751 set_bit(ptg, ptgs_used); 3752 prof->ptg[prof->ptg_cnt] = ptg; 3753 3754 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) 3755 break; 3756 3757 /* nothing left in byte, then exit */ 3758 m = ~(u8)((1 << (bit + 1)) - 1); 3759 if (!(ptypes[byte] & m)) 3760 break; 3761 } 3762 3763 bytes--; 3764 byte++; 3765 } 3766 3767 list_add(&prof->list, &hw->blk[blk].es.prof_map); 3768 status = 0; 3769 3770 err_ice_add_prof: 3771 mutex_unlock(&hw->blk[blk].es.prof_map_lock); 3772 return status; 3773 } 3774 3775 /** 3776 * ice_search_prof_id_low - Search for a profile tracking ID low level 3777 * @hw: pointer to the HW struct 3778 * @blk: hardware block 3779 * @id: profile tracking ID 3780 * 3781 * This will search for a profile tracking ID which was previously added. This 3782 * version assumes that the caller has already acquired the prof map lock. 3783 */ 3784 static struct ice_prof_map * 3785 ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) 3786 { 3787 struct ice_prof_map *entry = NULL; 3788 struct ice_prof_map *map; 3789 3790 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list) 3791 if (map->profile_cookie == id) { 3792 entry = map; 3793 break; 3794 } 3795 3796 return entry; 3797 } 3798 3799 /** 3800 * ice_search_prof_id - Search for a profile tracking ID 3801 * @hw: pointer to the HW struct 3802 * @blk: hardware block 3803 * @id: profile tracking ID 3804 * 3805 * This will search for a profile tracking ID which was previously added. 3806 */ 3807 static struct ice_prof_map * 3808 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) 3809 { 3810 struct ice_prof_map *entry; 3811 3812 mutex_lock(&hw->blk[blk].es.prof_map_lock); 3813 entry = ice_search_prof_id_low(hw, blk, id); 3814 mutex_unlock(&hw->blk[blk].es.prof_map_lock); 3815 3816 return entry; 3817 } 3818 3819 /** 3820 * ice_vsig_prof_id_count - count profiles in a VSIG 3821 * @hw: pointer to the HW struct 3822 * @blk: hardware block 3823 * @vsig: VSIG to remove the profile from 3824 */ 3825 static u16 3826 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) 3827 { 3828 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; 3829 struct ice_vsig_prof *p; 3830 3831 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3832 list) 3833 count++; 3834 3835 return count; 3836 } 3837 3838 /** 3839 * ice_rel_tcam_idx - release a TCAM index 3840 * @hw: pointer to the HW struct 3841 * @blk: hardware block 3842 * @idx: the index to release 3843 */ 3844 static enum ice_status 3845 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) 3846 { 3847 /* Masks to invoke a never match entry */ 3848 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3849 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; 3850 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 3851 enum ice_status status; 3852 3853 /* write the TCAM entry */ 3854 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, 3855 dc_msk, nm_msk); 3856 if (status) 3857 return status; 3858 3859 /* release the TCAM entry */ 3860 status = ice_free_tcam_ent(hw, blk, idx); 3861 3862 return status; 3863 } 3864 3865 /** 3866 * ice_rem_prof_id - remove one profile from a VSIG 3867 * @hw: pointer to the HW struct 3868 * @blk: hardware block 3869 * @prof: pointer to profile structure to remove 3870 */ 3871 static enum ice_status 3872 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, 3873 struct ice_vsig_prof *prof) 3874 { 3875 enum ice_status status; 3876 u16 i; 3877 3878 for (i = 0; i < prof->tcam_count; i++) 3879 if (prof->tcam[i].in_use) { 3880 prof->tcam[i].in_use = false; 3881 status = ice_rel_tcam_idx(hw, blk, 3882 prof->tcam[i].tcam_idx); 3883 if (status) 3884 return ICE_ERR_HW_TABLE; 3885 } 3886 3887 return 0; 3888 } 3889 3890 /** 3891 * ice_rem_vsig - remove VSIG 3892 * @hw: pointer to the HW struct 3893 * @blk: hardware block 3894 * @vsig: the VSIG to remove 3895 * @chg: the change list 3896 */ 3897 static enum ice_status 3898 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 3899 struct list_head *chg) 3900 { 3901 u16 idx = vsig & ICE_VSIG_IDX_M; 3902 struct ice_vsig_vsi *vsi_cur; 3903 struct ice_vsig_prof *d, *t; 3904 enum ice_status status; 3905 3906 /* remove TCAM entries */ 3907 list_for_each_entry_safe(d, t, 3908 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3909 list) { 3910 status = ice_rem_prof_id(hw, blk, d); 3911 if (status) 3912 return status; 3913 3914 list_del(&d->list); 3915 devm_kfree(ice_hw_to_dev(hw), d); 3916 } 3917 3918 /* Move all VSIS associated with this VSIG to the default VSIG */ 3919 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3920 /* If the VSIG has at least 1 VSI then iterate through the list 3921 * and remove the VSIs before deleting the group. 3922 */ 3923 if (vsi_cur) 3924 do { 3925 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 3926 struct ice_chs_chg *p; 3927 3928 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), 3929 GFP_KERNEL); 3930 if (!p) 3931 return ICE_ERR_NO_MEMORY; 3932 3933 p->type = ICE_VSIG_REM; 3934 p->orig_vsig = vsig; 3935 p->vsig = ICE_DEFAULT_VSIG; 3936 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; 3937 3938 list_add(&p->list_entry, chg); 3939 3940 vsi_cur = tmp; 3941 } while (vsi_cur); 3942 3943 return ice_vsig_free(hw, blk, vsig); 3944 } 3945 3946 /** 3947 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG 3948 * @hw: pointer to the HW struct 3949 * @blk: hardware block 3950 * @vsig: VSIG to remove the profile from 3951 * @hdl: profile handle indicating which profile to remove 3952 * @chg: list to receive a record of changes 3953 */ 3954 static enum ice_status 3955 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 3956 struct list_head *chg) 3957 { 3958 u16 idx = vsig & ICE_VSIG_IDX_M; 3959 struct ice_vsig_prof *p, *t; 3960 enum ice_status status; 3961 3962 list_for_each_entry_safe(p, t, 3963 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3964 list) 3965 if (p->profile_cookie == hdl) { 3966 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) 3967 /* this is the last profile, remove the VSIG */ 3968 return ice_rem_vsig(hw, blk, vsig, chg); 3969 3970 status = ice_rem_prof_id(hw, blk, p); 3971 if (!status) { 3972 list_del(&p->list); 3973 devm_kfree(ice_hw_to_dev(hw), p); 3974 } 3975 return status; 3976 } 3977 3978 return ICE_ERR_DOES_NOT_EXIST; 3979 } 3980 3981 /** 3982 * ice_rem_flow_all - remove all flows with a particular profile 3983 * @hw: pointer to the HW struct 3984 * @blk: hardware block 3985 * @id: profile tracking ID 3986 */ 3987 static enum ice_status 3988 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) 3989 { 3990 struct ice_chs_chg *del, *tmp; 3991 enum ice_status status; 3992 struct list_head chg; 3993 u16 i; 3994 3995 INIT_LIST_HEAD(&chg); 3996 3997 for (i = 1; i < ICE_MAX_VSIGS; i++) 3998 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { 3999 if (ice_has_prof_vsig(hw, blk, i, id)) { 4000 status = ice_rem_prof_id_vsig(hw, blk, i, id, 4001 &chg); 4002 if (status) 4003 goto err_ice_rem_flow_all; 4004 } 4005 } 4006 4007 status = ice_upd_prof_hw(hw, blk, &chg); 4008 4009 err_ice_rem_flow_all: 4010 list_for_each_entry_safe(del, tmp, &chg, list_entry) { 4011 list_del(&del->list_entry); 4012 devm_kfree(ice_hw_to_dev(hw), del); 4013 } 4014 4015 return status; 4016 } 4017 4018 /** 4019 * ice_rem_prof - remove profile 4020 * @hw: pointer to the HW struct 4021 * @blk: hardware block 4022 * @id: profile tracking ID 4023 * 4024 * This will remove the profile specified by the ID parameter, which was 4025 * previously created through ice_add_prof. If any existing entries 4026 * are associated with this profile, they will be removed as well. 4027 */ 4028 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) 4029 { 4030 struct ice_prof_map *pmap; 4031 enum ice_status status; 4032 4033 mutex_lock(&hw->blk[blk].es.prof_map_lock); 4034 4035 pmap = ice_search_prof_id_low(hw, blk, id); 4036 if (!pmap) { 4037 status = ICE_ERR_DOES_NOT_EXIST; 4038 goto err_ice_rem_prof; 4039 } 4040 4041 /* remove all flows with this profile */ 4042 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); 4043 if (status) 4044 goto err_ice_rem_prof; 4045 4046 /* dereference profile, and possibly remove */ 4047 ice_prof_dec_ref(hw, blk, pmap->prof_id); 4048 4049 list_del(&pmap->list); 4050 devm_kfree(ice_hw_to_dev(hw), pmap); 4051 4052 err_ice_rem_prof: 4053 mutex_unlock(&hw->blk[blk].es.prof_map_lock); 4054 return status; 4055 } 4056 4057 /** 4058 * ice_get_prof - get profile 4059 * @hw: pointer to the HW struct 4060 * @blk: hardware block 4061 * @hdl: profile handle 4062 * @chg: change list 4063 */ 4064 static enum ice_status 4065 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, 4066 struct list_head *chg) 4067 { 4068 struct ice_prof_map *map; 4069 struct ice_chs_chg *p; 4070 u16 i; 4071 4072 /* Get the details on the profile specified by the handle ID */ 4073 map = ice_search_prof_id(hw, blk, hdl); 4074 if (!map) 4075 return ICE_ERR_DOES_NOT_EXIST; 4076 4077 for (i = 0; i < map->ptg_cnt; i++) 4078 if (!hw->blk[blk].es.written[map->prof_id]) { 4079 /* add ES to change list */ 4080 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), 4081 GFP_KERNEL); 4082 if (!p) 4083 goto err_ice_get_prof; 4084 4085 p->type = ICE_PTG_ES_ADD; 4086 p->ptype = 0; 4087 p->ptg = map->ptg[i]; 4088 p->add_ptg = 0; 4089 4090 p->add_prof = 1; 4091 p->prof_id = map->prof_id; 4092 4093 hw->blk[blk].es.written[map->prof_id] = true; 4094 4095 list_add(&p->list_entry, chg); 4096 } 4097 4098 return 0; 4099 4100 err_ice_get_prof: 4101 /* let caller clean up the change list */ 4102 return ICE_ERR_NO_MEMORY; 4103 } 4104 4105 /** 4106 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG 4107 * @hw: pointer to the HW struct 4108 * @blk: hardware block 4109 * @vsig: VSIG from which to copy the list 4110 * @lst: output list 4111 * 4112 * This routine makes a copy of the list of profiles in the specified VSIG. 4113 */ 4114 static enum ice_status 4115 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4116 struct list_head *lst) 4117 { 4118 struct ice_vsig_prof *ent1, *ent2; 4119 u16 idx = vsig & ICE_VSIG_IDX_M; 4120 4121 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4122 list) { 4123 struct ice_vsig_prof *p; 4124 4125 /* copy to the input list */ 4126 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p), 4127 GFP_KERNEL); 4128 if (!p) 4129 goto err_ice_get_profs_vsig; 4130 4131 list_add_tail(&p->list, lst); 4132 } 4133 4134 return 0; 4135 4136 err_ice_get_profs_vsig: 4137 list_for_each_entry_safe(ent1, ent2, lst, list) { 4138 list_del(&ent1->list); 4139 devm_kfree(ice_hw_to_dev(hw), ent1); 4140 } 4141 4142 return ICE_ERR_NO_MEMORY; 4143 } 4144 4145 /** 4146 * ice_add_prof_to_lst - add profile entry to a list 4147 * @hw: pointer to the HW struct 4148 * @blk: hardware block 4149 * @lst: the list to be added to 4150 * @hdl: profile handle of entry to add 4151 */ 4152 static enum ice_status 4153 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, 4154 struct list_head *lst, u64 hdl) 4155 { 4156 struct ice_prof_map *map; 4157 struct ice_vsig_prof *p; 4158 u16 i; 4159 4160 map = ice_search_prof_id(hw, blk, hdl); 4161 if (!map) 4162 return ICE_ERR_DOES_NOT_EXIST; 4163 4164 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 4165 if (!p) 4166 return ICE_ERR_NO_MEMORY; 4167 4168 p->profile_cookie = map->profile_cookie; 4169 p->prof_id = map->prof_id; 4170 p->tcam_count = map->ptg_cnt; 4171 4172 for (i = 0; i < map->ptg_cnt; i++) { 4173 p->tcam[i].prof_id = map->prof_id; 4174 p->tcam[i].tcam_idx = ICE_INVALID_TCAM; 4175 p->tcam[i].ptg = map->ptg[i]; 4176 } 4177 4178 list_add(&p->list, lst); 4179 4180 return 0; 4181 } 4182 4183 /** 4184 * ice_move_vsi - move VSI to another VSIG 4185 * @hw: pointer to the HW struct 4186 * @blk: hardware block 4187 * @vsi: the VSI to move 4188 * @vsig: the VSIG to move the VSI to 4189 * @chg: the change list 4190 */ 4191 static enum ice_status 4192 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, 4193 struct list_head *chg) 4194 { 4195 enum ice_status status; 4196 struct ice_chs_chg *p; 4197 u16 orig_vsig; 4198 4199 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 4200 if (!p) 4201 return ICE_ERR_NO_MEMORY; 4202 4203 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 4204 if (!status) 4205 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 4206 4207 if (status) { 4208 devm_kfree(ice_hw_to_dev(hw), p); 4209 return status; 4210 } 4211 4212 p->type = ICE_VSI_MOVE; 4213 p->vsi = vsi; 4214 p->orig_vsig = orig_vsig; 4215 p->vsig = vsig; 4216 4217 list_add(&p->list_entry, chg); 4218 4219 return 0; 4220 } 4221 4222 /** 4223 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list 4224 * @hw: pointer to the HW struct 4225 * @idx: the index of the TCAM entry to remove 4226 * @chg: the list of change structures to search 4227 */ 4228 static void 4229 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) 4230 { 4231 struct ice_chs_chg *pos, *tmp; 4232 4233 list_for_each_entry_safe(tmp, pos, chg, list_entry) 4234 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { 4235 list_del(&tmp->list_entry); 4236 devm_kfree(ice_hw_to_dev(hw), tmp); 4237 } 4238 } 4239 4240 /** 4241 * ice_prof_tcam_ena_dis - add enable or disable TCAM change 4242 * @hw: pointer to the HW struct 4243 * @blk: hardware block 4244 * @enable: true to enable, false to disable 4245 * @vsig: the VSIG of the TCAM entry 4246 * @tcam: pointer the TCAM info structure of the TCAM to disable 4247 * @chg: the change list 4248 * 4249 * This function appends an enable or disable TCAM entry in the change log 4250 */ 4251 static enum ice_status 4252 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, 4253 u16 vsig, struct ice_tcam_inf *tcam, 4254 struct list_head *chg) 4255 { 4256 enum ice_status status; 4257 struct ice_chs_chg *p; 4258 4259 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4260 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 4261 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 4262 4263 /* if disabling, free the TCAM */ 4264 if (!enable) { 4265 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); 4266 4267 /* if we have already created a change for this TCAM entry, then 4268 * we need to remove that entry, in order to prevent writing to 4269 * a TCAM entry we no longer will have ownership of. 4270 */ 4271 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); 4272 tcam->tcam_idx = 0; 4273 tcam->in_use = 0; 4274 return status; 4275 } 4276 4277 /* for re-enabling, reallocate a TCAM */ 4278 status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx); 4279 if (status) 4280 return status; 4281 4282 /* add TCAM to change list */ 4283 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 4284 if (!p) 4285 return ICE_ERR_NO_MEMORY; 4286 4287 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, 4288 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, 4289 nm_msk); 4290 if (status) 4291 goto err_ice_prof_tcam_ena_dis; 4292 4293 tcam->in_use = 1; 4294 4295 p->type = ICE_TCAM_ADD; 4296 p->add_tcam_idx = true; 4297 p->prof_id = tcam->prof_id; 4298 p->ptg = tcam->ptg; 4299 p->vsig = 0; 4300 p->tcam_idx = tcam->tcam_idx; 4301 4302 /* log change */ 4303 list_add(&p->list_entry, chg); 4304 4305 return 0; 4306 4307 err_ice_prof_tcam_ena_dis: 4308 devm_kfree(ice_hw_to_dev(hw), p); 4309 return status; 4310 } 4311 4312 /** 4313 * ice_adj_prof_priorities - adjust profile based on priorities 4314 * @hw: pointer to the HW struct 4315 * @blk: hardware block 4316 * @vsig: the VSIG for which to adjust profile priorities 4317 * @chg: the change list 4318 */ 4319 static enum ice_status 4320 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4321 struct list_head *chg) 4322 { 4323 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); 4324 struct ice_vsig_prof *t; 4325 enum ice_status status; 4326 u16 idx; 4327 4328 bitmap_zero(ptgs_used, ICE_XLT1_CNT); 4329 idx = vsig & ICE_VSIG_IDX_M; 4330 4331 /* Priority is based on the order in which the profiles are added. The 4332 * newest added profile has highest priority and the oldest added 4333 * profile has the lowest priority. Since the profile property list for 4334 * a VSIG is sorted from newest to oldest, this code traverses the list 4335 * in order and enables the first of each PTG that it finds (that is not 4336 * already enabled); it also disables any duplicate PTGs that it finds 4337 * in the older profiles (that are currently enabled). 4338 */ 4339 4340 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4341 list) { 4342 u16 i; 4343 4344 for (i = 0; i < t->tcam_count; i++) { 4345 /* Scan the priorities from newest to oldest. 4346 * Make sure that the newest profiles take priority. 4347 */ 4348 if (test_bit(t->tcam[i].ptg, ptgs_used) && 4349 t->tcam[i].in_use) { 4350 /* need to mark this PTG as never match, as it 4351 * was already in use and therefore duplicate 4352 * (and lower priority) 4353 */ 4354 status = ice_prof_tcam_ena_dis(hw, blk, false, 4355 vsig, 4356 &t->tcam[i], 4357 chg); 4358 if (status) 4359 return status; 4360 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) && 4361 !t->tcam[i].in_use) { 4362 /* need to enable this PTG, as it in not in use 4363 * and not enabled (highest priority) 4364 */ 4365 status = ice_prof_tcam_ena_dis(hw, blk, true, 4366 vsig, 4367 &t->tcam[i], 4368 chg); 4369 if (status) 4370 return status; 4371 } 4372 4373 /* keep track of used ptgs */ 4374 set_bit(t->tcam[i].ptg, ptgs_used); 4375 } 4376 } 4377 4378 return 0; 4379 } 4380 4381 /** 4382 * ice_add_prof_id_vsig - add profile to VSIG 4383 * @hw: pointer to the HW struct 4384 * @blk: hardware block 4385 * @vsig: the VSIG to which this profile is to be added 4386 * @hdl: the profile handle indicating the profile to add 4387 * @rev: true to add entries to the end of the list 4388 * @chg: the change list 4389 */ 4390 static enum ice_status 4391 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 4392 bool rev, struct list_head *chg) 4393 { 4394 /* Masks that ignore flags */ 4395 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4396 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 4397 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 4398 struct ice_prof_map *map; 4399 struct ice_vsig_prof *t; 4400 struct ice_chs_chg *p; 4401 u16 vsig_idx, i; 4402 4403 /* Get the details on the profile specified by the handle ID */ 4404 map = ice_search_prof_id(hw, blk, hdl); 4405 if (!map) 4406 return ICE_ERR_DOES_NOT_EXIST; 4407 4408 /* Error, if this VSIG already has this profile */ 4409 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) 4410 return ICE_ERR_ALREADY_EXISTS; 4411 4412 /* new VSIG profile structure */ 4413 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); 4414 if (!t) 4415 return ICE_ERR_NO_MEMORY; 4416 4417 t->profile_cookie = map->profile_cookie; 4418 t->prof_id = map->prof_id; 4419 t->tcam_count = map->ptg_cnt; 4420 4421 /* create TCAM entries */ 4422 for (i = 0; i < map->ptg_cnt; i++) { 4423 enum ice_status status; 4424 u16 tcam_idx; 4425 4426 /* add TCAM to change list */ 4427 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 4428 if (!p) 4429 goto err_ice_add_prof_id_vsig; 4430 4431 /* allocate the TCAM entry index */ 4432 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); 4433 if (status) { 4434 devm_kfree(ice_hw_to_dev(hw), p); 4435 goto err_ice_add_prof_id_vsig; 4436 } 4437 4438 t->tcam[i].ptg = map->ptg[i]; 4439 t->tcam[i].prof_id = map->prof_id; 4440 t->tcam[i].tcam_idx = tcam_idx; 4441 t->tcam[i].in_use = true; 4442 4443 p->type = ICE_TCAM_ADD; 4444 p->add_tcam_idx = true; 4445 p->prof_id = t->tcam[i].prof_id; 4446 p->ptg = t->tcam[i].ptg; 4447 p->vsig = vsig; 4448 p->tcam_idx = t->tcam[i].tcam_idx; 4449 4450 /* write the TCAM entry */ 4451 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, 4452 t->tcam[i].prof_id, 4453 t->tcam[i].ptg, vsig, 0, 0, 4454 vl_msk, dc_msk, nm_msk); 4455 if (status) { 4456 devm_kfree(ice_hw_to_dev(hw), p); 4457 goto err_ice_add_prof_id_vsig; 4458 } 4459 4460 /* log change */ 4461 list_add(&p->list_entry, chg); 4462 } 4463 4464 /* add profile to VSIG */ 4465 vsig_idx = vsig & ICE_VSIG_IDX_M; 4466 if (rev) 4467 list_add_tail(&t->list, 4468 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 4469 else 4470 list_add(&t->list, 4471 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 4472 4473 return 0; 4474 4475 err_ice_add_prof_id_vsig: 4476 /* let caller clean up the change list */ 4477 devm_kfree(ice_hw_to_dev(hw), t); 4478 return ICE_ERR_NO_MEMORY; 4479 } 4480 4481 /** 4482 * ice_create_prof_id_vsig - add a new VSIG with a single profile 4483 * @hw: pointer to the HW struct 4484 * @blk: hardware block 4485 * @vsi: the initial VSI that will be in VSIG 4486 * @hdl: the profile handle of the profile that will be added to the VSIG 4487 * @chg: the change list 4488 */ 4489 static enum ice_status 4490 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, 4491 struct list_head *chg) 4492 { 4493 enum ice_status status; 4494 struct ice_chs_chg *p; 4495 u16 new_vsig; 4496 4497 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); 4498 if (!p) 4499 return ICE_ERR_NO_MEMORY; 4500 4501 new_vsig = ice_vsig_alloc(hw, blk); 4502 if (!new_vsig) { 4503 status = ICE_ERR_HW_TABLE; 4504 goto err_ice_create_prof_id_vsig; 4505 } 4506 4507 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); 4508 if (status) 4509 goto err_ice_create_prof_id_vsig; 4510 4511 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); 4512 if (status) 4513 goto err_ice_create_prof_id_vsig; 4514 4515 p->type = ICE_VSIG_ADD; 4516 p->vsi = vsi; 4517 p->orig_vsig = ICE_DEFAULT_VSIG; 4518 p->vsig = new_vsig; 4519 4520 list_add(&p->list_entry, chg); 4521 4522 return 0; 4523 4524 err_ice_create_prof_id_vsig: 4525 /* let caller clean up the change list */ 4526 devm_kfree(ice_hw_to_dev(hw), p); 4527 return status; 4528 } 4529 4530 /** 4531 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles 4532 * @hw: pointer to the HW struct 4533 * @blk: hardware block 4534 * @vsi: the initial VSI that will be in VSIG 4535 * @lst: the list of profile that will be added to the VSIG 4536 * @new_vsig: return of new VSIG 4537 * @chg: the change list 4538 */ 4539 static enum ice_status 4540 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, 4541 struct list_head *lst, u16 *new_vsig, 4542 struct list_head *chg) 4543 { 4544 struct ice_vsig_prof *t; 4545 enum ice_status status; 4546 u16 vsig; 4547 4548 vsig = ice_vsig_alloc(hw, blk); 4549 if (!vsig) 4550 return ICE_ERR_HW_TABLE; 4551 4552 status = ice_move_vsi(hw, blk, vsi, vsig, chg); 4553 if (status) 4554 return status; 4555 4556 list_for_each_entry(t, lst, list) { 4557 /* Reverse the order here since we are copying the list */ 4558 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, 4559 true, chg); 4560 if (status) 4561 return status; 4562 } 4563 4564 *new_vsig = vsig; 4565 4566 return 0; 4567 } 4568 4569 /** 4570 * ice_find_prof_vsig - find a VSIG with a specific profile handle 4571 * @hw: pointer to the HW struct 4572 * @blk: hardware block 4573 * @hdl: the profile handle of the profile to search for 4574 * @vsig: returns the VSIG with the matching profile 4575 */ 4576 static bool 4577 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) 4578 { 4579 struct ice_vsig_prof *t; 4580 enum ice_status status; 4581 struct list_head lst; 4582 4583 INIT_LIST_HEAD(&lst); 4584 4585 t = kzalloc(sizeof(*t), GFP_KERNEL); 4586 if (!t) 4587 return false; 4588 4589 t->profile_cookie = hdl; 4590 list_add(&t->list, &lst); 4591 4592 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); 4593 4594 list_del(&t->list); 4595 kfree(t); 4596 4597 return !status; 4598 } 4599 4600 /** 4601 * ice_add_prof_id_flow - add profile flow 4602 * @hw: pointer to the HW struct 4603 * @blk: hardware block 4604 * @vsi: the VSI to enable with the profile specified by ID 4605 * @hdl: profile handle 4606 * 4607 * Calling this function will update the hardware tables to enable the 4608 * profile indicated by the ID parameter for the VSIs specified in the VSI 4609 * array. Once successfully called, the flow will be enabled. 4610 */ 4611 enum ice_status 4612 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 4613 { 4614 struct ice_vsig_prof *tmp1, *del1; 4615 struct ice_chs_chg *tmp, *del; 4616 struct list_head union_lst; 4617 enum ice_status status; 4618 struct list_head chg; 4619 u16 vsig; 4620 4621 INIT_LIST_HEAD(&union_lst); 4622 INIT_LIST_HEAD(&chg); 4623 4624 /* Get profile */ 4625 status = ice_get_prof(hw, blk, hdl, &chg); 4626 if (status) 4627 return status; 4628 4629 /* determine if VSI is already part of a VSIG */ 4630 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 4631 if (!status && vsig) { 4632 bool only_vsi; 4633 u16 or_vsig; 4634 u16 ref; 4635 4636 /* found in VSIG */ 4637 or_vsig = vsig; 4638 4639 /* make sure that there is no overlap/conflict between the new 4640 * characteristics and the existing ones; we don't support that 4641 * scenario 4642 */ 4643 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { 4644 status = ICE_ERR_ALREADY_EXISTS; 4645 goto err_ice_add_prof_id_flow; 4646 } 4647 4648 /* last VSI in the VSIG? */ 4649 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 4650 if (status) 4651 goto err_ice_add_prof_id_flow; 4652 only_vsi = (ref == 1); 4653 4654 /* create a union of the current profiles and the one being 4655 * added 4656 */ 4657 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); 4658 if (status) 4659 goto err_ice_add_prof_id_flow; 4660 4661 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); 4662 if (status) 4663 goto err_ice_add_prof_id_flow; 4664 4665 /* search for an existing VSIG with an exact charc match */ 4666 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); 4667 if (!status) { 4668 /* move VSI to the VSIG that matches */ 4669 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 4670 if (status) 4671 goto err_ice_add_prof_id_flow; 4672 4673 /* VSI has been moved out of or_vsig. If the or_vsig had 4674 * only that VSI it is now empty and can be removed. 4675 */ 4676 if (only_vsi) { 4677 status = ice_rem_vsig(hw, blk, or_vsig, &chg); 4678 if (status) 4679 goto err_ice_add_prof_id_flow; 4680 } 4681 } else if (only_vsi) { 4682 /* If the original VSIG only contains one VSI, then it 4683 * will be the requesting VSI. In this case the VSI is 4684 * not sharing entries and we can simply add the new 4685 * profile to the VSIG. 4686 */ 4687 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, 4688 &chg); 4689 if (status) 4690 goto err_ice_add_prof_id_flow; 4691 4692 /* Adjust priorities */ 4693 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 4694 if (status) 4695 goto err_ice_add_prof_id_flow; 4696 } else { 4697 /* No match, so we need a new VSIG */ 4698 status = ice_create_vsig_from_lst(hw, blk, vsi, 4699 &union_lst, &vsig, 4700 &chg); 4701 if (status) 4702 goto err_ice_add_prof_id_flow; 4703 4704 /* Adjust priorities */ 4705 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 4706 if (status) 4707 goto err_ice_add_prof_id_flow; 4708 } 4709 } else { 4710 /* need to find or add a VSIG */ 4711 /* search for an existing VSIG with an exact charc match */ 4712 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { 4713 /* found an exact match */ 4714 /* add or move VSI to the VSIG that matches */ 4715 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 4716 if (status) 4717 goto err_ice_add_prof_id_flow; 4718 } else { 4719 /* we did not find an exact match */ 4720 /* we need to add a VSIG */ 4721 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, 4722 &chg); 4723 if (status) 4724 goto err_ice_add_prof_id_flow; 4725 } 4726 } 4727 4728 /* update hardware */ 4729 if (!status) 4730 status = ice_upd_prof_hw(hw, blk, &chg); 4731 4732 err_ice_add_prof_id_flow: 4733 list_for_each_entry_safe(del, tmp, &chg, list_entry) { 4734 list_del(&del->list_entry); 4735 devm_kfree(ice_hw_to_dev(hw), del); 4736 } 4737 4738 list_for_each_entry_safe(del1, tmp1, &union_lst, list) { 4739 list_del(&del1->list); 4740 devm_kfree(ice_hw_to_dev(hw), del1); 4741 } 4742 4743 return status; 4744 } 4745 4746 /** 4747 * ice_rem_prof_from_list - remove a profile from list 4748 * @hw: pointer to the HW struct 4749 * @lst: list to remove the profile from 4750 * @hdl: the profile handle indicating the profile to remove 4751 */ 4752 static enum ice_status 4753 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) 4754 { 4755 struct ice_vsig_prof *ent, *tmp; 4756 4757 list_for_each_entry_safe(ent, tmp, lst, list) 4758 if (ent->profile_cookie == hdl) { 4759 list_del(&ent->list); 4760 devm_kfree(ice_hw_to_dev(hw), ent); 4761 return 0; 4762 } 4763 4764 return ICE_ERR_DOES_NOT_EXIST; 4765 } 4766 4767 /** 4768 * ice_rem_prof_id_flow - remove flow 4769 * @hw: pointer to the HW struct 4770 * @blk: hardware block 4771 * @vsi: the VSI from which to remove the profile specified by ID 4772 * @hdl: profile tracking handle 4773 * 4774 * Calling this function will update the hardware tables to remove the 4775 * profile indicated by the ID parameter for the VSIs specified in the VSI 4776 * array. Once successfully called, the flow will be disabled. 4777 */ 4778 enum ice_status 4779 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 4780 { 4781 struct ice_vsig_prof *tmp1, *del1; 4782 struct ice_chs_chg *tmp, *del; 4783 struct list_head chg, copy; 4784 enum ice_status status; 4785 u16 vsig; 4786 4787 INIT_LIST_HEAD(©); 4788 INIT_LIST_HEAD(&chg); 4789 4790 /* determine if VSI is already part of a VSIG */ 4791 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 4792 if (!status && vsig) { 4793 bool last_profile; 4794 bool only_vsi; 4795 u16 ref; 4796 4797 /* found in VSIG */ 4798 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; 4799 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 4800 if (status) 4801 goto err_ice_rem_prof_id_flow; 4802 only_vsi = (ref == 1); 4803 4804 if (only_vsi) { 4805 /* If the original VSIG only contains one reference, 4806 * which will be the requesting VSI, then the VSI is not 4807 * sharing entries and we can simply remove the specific 4808 * characteristics from the VSIG. 4809 */ 4810 4811 if (last_profile) { 4812 /* If there are no profiles left for this VSIG, 4813 * then simply remove the the VSIG. 4814 */ 4815 status = ice_rem_vsig(hw, blk, vsig, &chg); 4816 if (status) 4817 goto err_ice_rem_prof_id_flow; 4818 } else { 4819 status = ice_rem_prof_id_vsig(hw, blk, vsig, 4820 hdl, &chg); 4821 if (status) 4822 goto err_ice_rem_prof_id_flow; 4823 4824 /* Adjust priorities */ 4825 status = ice_adj_prof_priorities(hw, blk, vsig, 4826 &chg); 4827 if (status) 4828 goto err_ice_rem_prof_id_flow; 4829 } 4830 4831 } else { 4832 /* Make a copy of the VSIG's list of Profiles */ 4833 status = ice_get_profs_vsig(hw, blk, vsig, ©); 4834 if (status) 4835 goto err_ice_rem_prof_id_flow; 4836 4837 /* Remove specified profile entry from the list */ 4838 status = ice_rem_prof_from_list(hw, ©, hdl); 4839 if (status) 4840 goto err_ice_rem_prof_id_flow; 4841 4842 if (list_empty(©)) { 4843 status = ice_move_vsi(hw, blk, vsi, 4844 ICE_DEFAULT_VSIG, &chg); 4845 if (status) 4846 goto err_ice_rem_prof_id_flow; 4847 4848 } else if (!ice_find_dup_props_vsig(hw, blk, ©, 4849 &vsig)) { 4850 /* found an exact match */ 4851 /* add or move VSI to the VSIG that matches */ 4852 /* Search for a VSIG with a matching profile 4853 * list 4854 */ 4855 4856 /* Found match, move VSI to the matching VSIG */ 4857 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 4858 if (status) 4859 goto err_ice_rem_prof_id_flow; 4860 } else { 4861 /* since no existing VSIG supports this 4862 * characteristic pattern, we need to create a 4863 * new VSIG and TCAM entries 4864 */ 4865 status = ice_create_vsig_from_lst(hw, blk, vsi, 4866 ©, &vsig, 4867 &chg); 4868 if (status) 4869 goto err_ice_rem_prof_id_flow; 4870 4871 /* Adjust priorities */ 4872 status = ice_adj_prof_priorities(hw, blk, vsig, 4873 &chg); 4874 if (status) 4875 goto err_ice_rem_prof_id_flow; 4876 } 4877 } 4878 } else { 4879 status = ICE_ERR_DOES_NOT_EXIST; 4880 } 4881 4882 /* update hardware tables */ 4883 if (!status) 4884 status = ice_upd_prof_hw(hw, blk, &chg); 4885 4886 err_ice_rem_prof_id_flow: 4887 list_for_each_entry_safe(del, tmp, &chg, list_entry) { 4888 list_del(&del->list_entry); 4889 devm_kfree(ice_hw_to_dev(hw), del); 4890 } 4891 4892 list_for_each_entry_safe(del1, tmp1, ©, list) { 4893 list_del(&del1->list); 4894 devm_kfree(ice_hw_to_dev(hw), del1); 4895 } 4896 4897 return status; 4898 } 4899