1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_dbg_level = 0xFEFF; 51 unsigned int csio_port_mask = 0xf; 52 53 /* Default FW event queue entries. */ 54 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 55 56 /* Default MSI param level */ 57 int csio_msi = 2; 58 59 /* FCoE function instances */ 60 static int dev_num; 61 62 /* FCoE Adapter types & its description */ 63 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, 67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, 84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, 85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} 86 }; 87 88 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 89 static void csio_hw_mbm_cleanup(struct csio_hw *); 90 91 /* State machine forward declarations */ 92 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 93 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 94 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 95 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 96 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 97 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 98 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 99 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 100 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 101 102 static void csio_hw_initialize(struct csio_hw *hw); 103 static void csio_evtq_stop(struct csio_hw *hw); 104 static void csio_evtq_start(struct csio_hw *hw); 105 106 int csio_is_hw_ready(struct csio_hw *hw) 107 { 108 return csio_match_state(hw, csio_hws_ready); 109 } 110 111 int csio_is_hw_removing(struct csio_hw *hw) 112 { 113 return csio_match_state(hw, csio_hws_removing); 114 } 115 116 117 /* 118 * csio_hw_wait_op_done_val - wait until an operation is completed 119 * @hw: the HW module 120 * @reg: the register to check for completion 121 * @mask: a single-bit field within @reg that indicates completion 122 * @polarity: the value of the field when the operation is completed 123 * @attempts: number of check iterations 124 * @delay: delay in usecs between iterations 125 * @valp: where to store the value of the register at completion time 126 * 127 * Wait until an operation is completed by checking a bit in a register 128 * up to @attempts times. If @valp is not NULL the value of the register 129 * at the time it indicated completion is stored there. Returns 0 if the 130 * operation completes and -EAGAIN otherwise. 131 */ 132 int 133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 134 int polarity, int attempts, int delay, uint32_t *valp) 135 { 136 uint32_t val; 137 while (1) { 138 val = csio_rd_reg32(hw, reg); 139 140 if (!!(val & mask) == polarity) { 141 if (valp) 142 *valp = val; 143 return 0; 144 } 145 146 if (--attempts == 0) 147 return -EAGAIN; 148 if (delay) 149 udelay(delay); 150 } 151 } 152 153 /* 154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 155 * @hw: the adapter 156 * @addr: the indirect TP register address 157 * @mask: specifies the field within the register to modify 158 * @val: new value for the field 159 * 160 * Sets a field of an indirect TP register to the given value. 161 */ 162 void 163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 164 unsigned int mask, unsigned int val) 165 { 166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); 167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; 168 csio_wr_reg32(hw, val, TP_PIO_DATA_A); 169 } 170 171 void 172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 173 uint32_t value) 174 { 175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 176 177 csio_wr_reg32(hw, val | value, reg); 178 /* Flush */ 179 csio_rd_reg32(hw, reg); 180 181 } 182 183 static int 184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 185 { 186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 187 addr, len, buf, 0); 188 } 189 190 /* 191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 192 */ 193 #define EEPROM_MAX_RD_POLL 40 194 #define EEPROM_MAX_WR_POLL 6 195 #define EEPROM_STAT_ADDR 0x7bfc 196 #define VPD_BASE 0x400 197 #define VPD_BASE_OLD 0 198 #define VPD_LEN 1024 199 #define VPD_INFO_FLD_HDR_SIZE 3 200 201 /* 202 * csio_hw_seeprom_read - read a serial EEPROM location 203 * @hw: hw to read 204 * @addr: EEPROM virtual address 205 * @data: where to store the read data 206 * 207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 208 * VPD capability. Note that this function must be called with a virtual 209 * address. 210 */ 211 static int 212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 213 { 214 uint16_t val = 0; 215 int attempts = EEPROM_MAX_RD_POLL; 216 uint32_t base = hw->params.pci.vpd_cap_addr; 217 218 if (addr >= EEPROMVSIZE || (addr & 3)) 219 return -EINVAL; 220 221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 222 223 do { 224 udelay(10); 225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 226 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 227 228 if (!(val & PCI_VPD_ADDR_F)) { 229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 230 return -EINVAL; 231 } 232 233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 234 *data = le32_to_cpu(*(__le32 *)data); 235 236 return 0; 237 } 238 239 /* 240 * Partial EEPROM Vital Product Data structure. Includes only the ID and 241 * VPD-R sections. 242 */ 243 struct t4_vpd_hdr { 244 u8 id_tag; 245 u8 id_len[2]; 246 u8 id_data[ID_LEN]; 247 u8 vpdr_tag; 248 u8 vpdr_len[2]; 249 }; 250 251 /* 252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 253 * the VPD 254 * @v: Pointer to buffered vpd data structure 255 * @kw: The keyword to search for 256 * 257 * Returns the value of the information field keyword or 258 * -EINVAL otherwise. 259 */ 260 static int 261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 262 { 263 int32_t i; 264 int32_t offset , len; 265 const uint8_t *buf = &v->id_tag; 266 const uint8_t *vpdr_len = &v->vpdr_tag; 267 offset = sizeof(struct t4_vpd_hdr); 268 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 269 270 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 271 return -EINVAL; 272 273 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 274 if (memcmp(buf + i , kw, 2) == 0) { 275 i += VPD_INFO_FLD_HDR_SIZE; 276 return i; 277 } 278 279 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 280 } 281 282 return -EINVAL; 283 } 284 285 static int 286 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 287 { 288 *pos = pci_find_capability(pdev, cap); 289 if (*pos) 290 return 0; 291 292 return -1; 293 } 294 295 /* 296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 297 * @hw: HW module 298 * @p: where to store the parameters 299 * 300 * Reads card parameters stored in VPD EEPROM. 301 */ 302 static int 303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 304 { 305 int i, ret, ec, sn, addr; 306 uint8_t *vpd, csum; 307 const struct t4_vpd_hdr *v; 308 /* To get around compilation warning from strstrip */ 309 char *s; 310 311 if (csio_is_valid_vpd(hw)) 312 return 0; 313 314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 315 &hw->params.pci.vpd_cap_addr); 316 if (ret) 317 return -EINVAL; 318 319 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 320 if (vpd == NULL) 321 return -ENOMEM; 322 323 /* 324 * Card information normally starts at VPD_BASE but early cards had 325 * it at 0. 326 */ 327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 328 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 329 330 for (i = 0; i < VPD_LEN; i += 4) { 331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 332 if (ret) { 333 kfree(vpd); 334 return ret; 335 } 336 } 337 338 /* Reset the VPD flag! */ 339 hw->flags &= (~CSIO_HWF_VPD_VALID); 340 341 v = (const struct t4_vpd_hdr *)vpd; 342 343 #define FIND_VPD_KW(var, name) do { \ 344 var = csio_hw_get_vpd_keyword_val(v, name); \ 345 if (var < 0) { \ 346 csio_err(hw, "missing VPD keyword " name "\n"); \ 347 kfree(vpd); \ 348 return -EINVAL; \ 349 } \ 350 } while (0) 351 352 FIND_VPD_KW(i, "RV"); 353 for (csum = 0; i >= 0; i--) 354 csum += vpd[i]; 355 356 if (csum) { 357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 358 kfree(vpd); 359 return -EINVAL; 360 } 361 FIND_VPD_KW(ec, "EC"); 362 FIND_VPD_KW(sn, "SN"); 363 #undef FIND_VPD_KW 364 365 memcpy(p->id, v->id_data, ID_LEN); 366 s = strstrip(p->id); 367 memcpy(p->ec, vpd + ec, EC_LEN); 368 s = strstrip(p->ec); 369 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 370 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 371 s = strstrip(p->sn); 372 373 csio_valid_vpd_copied(hw); 374 375 kfree(vpd); 376 return 0; 377 } 378 379 /* 380 * csio_hw_sf1_read - read data from the serial flash 381 * @hw: the HW module 382 * @byte_cnt: number of bytes to read 383 * @cont: whether another operation will be chained 384 * @lock: whether to lock SF for PL access only 385 * @valp: where to store the read data 386 * 387 * Reads up to 4 bytes of data from the serial flash. The location of 388 * the read needs to be specified prior to calling this by issuing the 389 * appropriate commands to the serial flash. 390 */ 391 static int 392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 393 int32_t lock, uint32_t *valp) 394 { 395 int ret; 396 397 if (!byte_cnt || byte_cnt > 4) 398 return -EINVAL; 399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 400 return -EBUSY; 401 402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | 403 BYTECNT_V(byte_cnt - 1), SF_OP_A); 404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 405 10, NULL); 406 if (!ret) 407 *valp = csio_rd_reg32(hw, SF_DATA_A); 408 return ret; 409 } 410 411 /* 412 * csio_hw_sf1_write - write data to the serial flash 413 * @hw: the HW module 414 * @byte_cnt: number of bytes to write 415 * @cont: whether another operation will be chained 416 * @lock: whether to lock SF for PL access only 417 * @val: value to write 418 * 419 * Writes up to 4 bytes of data to the serial flash. The location of 420 * the write needs to be specified prior to calling this by issuing the 421 * appropriate commands to the serial flash. 422 */ 423 static int 424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 425 int32_t lock, uint32_t val) 426 { 427 if (!byte_cnt || byte_cnt > 4) 428 return -EINVAL; 429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 430 return -EBUSY; 431 432 csio_wr_reg32(hw, val, SF_DATA_A); 433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | 434 OP_V(1) | SF_LOCK_V(lock), SF_OP_A); 435 436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 437 10, NULL); 438 } 439 440 /* 441 * csio_hw_flash_wait_op - wait for a flash operation to complete 442 * @hw: the HW module 443 * @attempts: max number of polls of the status register 444 * @delay: delay between polls in ms 445 * 446 * Wait for a flash operation to complete by polling the status register. 447 */ 448 static int 449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 450 { 451 int ret; 452 uint32_t status; 453 454 while (1) { 455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 456 if (ret != 0) 457 return ret; 458 459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 460 if (ret != 0) 461 return ret; 462 463 if (!(status & 1)) 464 return 0; 465 if (--attempts == 0) 466 return -EAGAIN; 467 if (delay) 468 msleep(delay); 469 } 470 } 471 472 /* 473 * csio_hw_read_flash - read words from serial flash 474 * @hw: the HW module 475 * @addr: the start address for the read 476 * @nwords: how many 32-bit words to read 477 * @data: where to store the read data 478 * @byte_oriented: whether to store data as bytes or as words 479 * 480 * Read the specified number of 32-bit words from the serial flash. 481 * If @byte_oriented is set the read data is stored as a byte array 482 * (i.e., big-endian), otherwise as 32-bit words in the platform's 483 * natural endianess. 484 */ 485 static int 486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 487 uint32_t *data, int32_t byte_oriented) 488 { 489 int ret; 490 491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 492 return -EINVAL; 493 494 addr = swab32(addr) | SF_RD_DATA_FAST; 495 496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 497 if (ret != 0) 498 return ret; 499 500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 501 if (ret != 0) 502 return ret; 503 504 for ( ; nwords; nwords--, data++) { 505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 506 if (nwords == 1) 507 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 508 if (ret) 509 return ret; 510 if (byte_oriented) 511 *data = (__force __u32) htonl(*data); 512 } 513 return 0; 514 } 515 516 /* 517 * csio_hw_write_flash - write up to a page of data to the serial flash 518 * @hw: the hw 519 * @addr: the start address to write 520 * @n: length of data to write in bytes 521 * @data: the data to write 522 * 523 * Writes up to a page of data (256 bytes) to the serial flash starting 524 * at the given address. All the data must be written to the same page. 525 */ 526 static int 527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 528 uint32_t n, const uint8_t *data) 529 { 530 int ret = -EINVAL; 531 uint32_t buf[64]; 532 uint32_t i, c, left, val, offset = addr & 0xff; 533 534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 535 return -EINVAL; 536 537 val = swab32(addr) | SF_PROG_PAGE; 538 539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 540 if (ret != 0) 541 goto unlock; 542 543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 544 if (ret != 0) 545 goto unlock; 546 547 for (left = n; left; left -= c) { 548 c = min(left, 4U); 549 for (val = 0, i = 0; i < c; ++i) 550 val = (val << 8) + *data++; 551 552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 553 if (ret) 554 goto unlock; 555 } 556 ret = csio_hw_flash_wait_op(hw, 8, 1); 557 if (ret) 558 goto unlock; 559 560 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 561 562 /* Read the page to verify the write succeeded */ 563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 564 if (ret) 565 return ret; 566 567 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 568 csio_err(hw, 569 "failed to correctly write the flash page at %#x\n", 570 addr); 571 return -EINVAL; 572 } 573 574 return 0; 575 576 unlock: 577 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 578 return ret; 579 } 580 581 /* 582 * csio_hw_flash_erase_sectors - erase a range of flash sectors 583 * @hw: the HW module 584 * @start: the first sector to erase 585 * @end: the last sector to erase 586 * 587 * Erases the sectors in the given inclusive range. 588 */ 589 static int 590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 591 { 592 int ret = 0; 593 594 while (start <= end) { 595 596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 597 if (ret != 0) 598 goto out; 599 600 ret = csio_hw_sf1_write(hw, 4, 0, 1, 601 SF_ERASE_SECTOR | (start << 8)); 602 if (ret != 0) 603 goto out; 604 605 ret = csio_hw_flash_wait_op(hw, 14, 500); 606 if (ret != 0) 607 goto out; 608 609 start++; 610 } 611 out: 612 if (ret) 613 csio_err(hw, "erase of flash sector %d failed, error %d\n", 614 start, ret); 615 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 616 return 0; 617 } 618 619 static void 620 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 621 { 622 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev), 624 FW_HDR_FW_VER_MINOR_G(hw->fwrev), 625 FW_HDR_FW_VER_MICRO_G(hw->fwrev), 626 FW_HDR_FW_VER_BUILD_G(hw->fwrev)); 627 } 628 629 /* 630 * csio_hw_get_fw_version - read the firmware version 631 * @hw: HW module 632 * @vers: where to place the version 633 * 634 * Reads the FW version from flash. 635 */ 636 static int 637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 638 { 639 return csio_hw_read_flash(hw, FLASH_FW_START + 640 offsetof(struct fw_hdr, fw_ver), 1, 641 vers, 0); 642 } 643 644 /* 645 * csio_hw_get_tp_version - read the TP microcode version 646 * @hw: HW module 647 * @vers: where to place the version 648 * 649 * Reads the TP microcode version from flash. 650 */ 651 static int 652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 653 { 654 return csio_hw_read_flash(hw, FLASH_FW_START + 655 offsetof(struct fw_hdr, tp_microcode_ver), 1, 656 vers, 0); 657 } 658 659 /* 660 * csio_hw_fw_dload - download firmware. 661 * @hw: HW module 662 * @fw_data: firmware image to write. 663 * @size: image size 664 * 665 * Write the supplied firmware image to the card's serial flash. 666 */ 667 static int 668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 669 { 670 uint32_t csum; 671 int32_t addr; 672 int ret; 673 uint32_t i; 674 uint8_t first_page[SF_PAGE_SIZE]; 675 const __be32 *p = (const __be32 *)fw_data; 676 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 677 uint32_t sf_sec_size; 678 679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 680 csio_err(hw, "Serial Flash data invalid\n"); 681 return -EINVAL; 682 } 683 684 if (!size) { 685 csio_err(hw, "FW image has no data\n"); 686 return -EINVAL; 687 } 688 689 if (size & 511) { 690 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 691 return -EINVAL; 692 } 693 694 if (ntohs(hdr->len512) * 512 != size) { 695 csio_err(hw, "FW image size differs from size in FW header\n"); 696 return -EINVAL; 697 } 698 699 if (size > FLASH_FW_MAX_SIZE) { 700 csio_err(hw, "FW image too large, max is %u bytes\n", 701 FLASH_FW_MAX_SIZE); 702 return -EINVAL; 703 } 704 705 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 706 csum += ntohl(p[i]); 707 708 if (csum != 0xffffffff) { 709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 710 return -EINVAL; 711 } 712 713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 714 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 715 716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 717 FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); 718 719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, 720 FLASH_FW_START_SEC + i - 1); 721 if (ret) { 722 csio_err(hw, "Flash Erase failed\n"); 723 goto out; 724 } 725 726 /* 727 * We write the correct version at the end so the driver can see a bad 728 * version if the FW write fails. Start by writing a copy of the 729 * first page with a bad version. 730 */ 731 memcpy(first_page, fw_data, SF_PAGE_SIZE); 732 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); 734 if (ret) 735 goto out; 736 737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 738 FW_IMG_START, FW_IMG_START + size); 739 740 addr = FLASH_FW_START; 741 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 742 addr += SF_PAGE_SIZE; 743 fw_data += SF_PAGE_SIZE; 744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 745 if (ret) 746 goto out; 747 } 748 749 ret = csio_hw_write_flash(hw, 750 FLASH_FW_START + 751 offsetof(struct fw_hdr, fw_ver), 752 sizeof(hdr->fw_ver), 753 (const uint8_t *)&hdr->fw_ver); 754 755 out: 756 if (ret) 757 csio_err(hw, "firmware download failed, error %d\n", ret); 758 return ret; 759 } 760 761 static int 762 csio_hw_get_flash_params(struct csio_hw *hw) 763 { 764 int ret; 765 uint32_t info = 0; 766 767 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 768 if (!ret) 769 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 770 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 771 if (ret != 0) 772 return ret; 773 774 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 775 return -EINVAL; 776 info >>= 16; /* log2 of size */ 777 if (info >= 0x14 && info < 0x18) 778 hw->params.sf_nsec = 1 << (info - 16); 779 else if (info == 0x18) 780 hw->params.sf_nsec = 64; 781 else 782 return -EINVAL; 783 hw->params.sf_size = 1 << info; 784 785 return 0; 786 } 787 788 /*****************************************************************************/ 789 /* HW State machine assists */ 790 /*****************************************************************************/ 791 792 static int 793 csio_hw_dev_ready(struct csio_hw *hw) 794 { 795 uint32_t reg; 796 int cnt = 6; 797 int src_pf; 798 799 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && 800 (--cnt != 0)) 801 mdelay(100); 802 803 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 804 src_pf = SOURCEPF_G(reg); 805 else 806 src_pf = T6_SOURCEPF_G(reg); 807 808 if ((cnt == 0) && (((int32_t)(src_pf) < 0) || 809 (src_pf >= CSIO_MAX_PFN))) { 810 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 811 return -EIO; 812 } 813 814 hw->pfn = src_pf; 815 816 return 0; 817 } 818 819 /* 820 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 821 * @hw: HW module 822 * @state: Device state 823 * 824 * FW_HELLO_CMD has to be polled for completion. 825 */ 826 static int 827 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 828 { 829 struct csio_mb *mbp; 830 int rv = 0; 831 enum fw_retval retval; 832 uint8_t mpfn; 833 char state_str[16]; 834 int retries = FW_CMD_HELLO_RETRIES; 835 836 memset(state_str, 0, sizeof(state_str)); 837 838 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 839 if (!mbp) { 840 rv = -ENOMEM; 841 CSIO_INC_STATS(hw, n_err_nomem); 842 goto out; 843 } 844 845 retry: 846 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 847 hw->pfn, CSIO_MASTER_MAY, NULL); 848 849 rv = csio_mb_issue(hw, mbp); 850 if (rv) { 851 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 852 goto out_free_mb; 853 } 854 855 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 856 if (retval != FW_SUCCESS) { 857 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 858 rv = -EINVAL; 859 goto out_free_mb; 860 } 861 862 /* Firmware has designated us to be master */ 863 if (hw->pfn == mpfn) { 864 hw->flags |= CSIO_HWF_MASTER; 865 } else if (*state == CSIO_DEV_STATE_UNINIT) { 866 /* 867 * If we're not the Master PF then we need to wait around for 868 * the Master PF Driver to finish setting up the adapter. 869 * 870 * Note that we also do this wait if we're a non-Master-capable 871 * PF and there is no current Master PF; a Master PF may show up 872 * momentarily and we wouldn't want to fail pointlessly. (This 873 * can happen when an OS loads lots of different drivers rapidly 874 * at the same time). In this case, the Master PF returned by 875 * the firmware will be PCIE_FW_MASTER_MASK so the test below 876 * will work ... 877 */ 878 879 int waiting = FW_CMD_HELLO_TIMEOUT; 880 881 /* 882 * Wait for the firmware to either indicate an error or 883 * initialized state. If we see either of these we bail out 884 * and report the issue to the caller. If we exhaust the 885 * "hello timeout" and we haven't exhausted our retries, try 886 * again. Otherwise bail with a timeout error. 887 */ 888 for (;;) { 889 uint32_t pcie_fw; 890 891 spin_unlock_irq(&hw->lock); 892 msleep(50); 893 spin_lock_irq(&hw->lock); 894 waiting -= 50; 895 896 /* 897 * If neither Error nor Initialialized are indicated 898 * by the firmware keep waiting till we exaust our 899 * timeout ... and then retry if we haven't exhausted 900 * our retries ... 901 */ 902 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); 903 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { 904 if (waiting <= 0) { 905 if (retries-- > 0) 906 goto retry; 907 908 rv = -ETIMEDOUT; 909 break; 910 } 911 continue; 912 } 913 914 /* 915 * We either have an Error or Initialized condition 916 * report errors preferentially. 917 */ 918 if (state) { 919 if (pcie_fw & PCIE_FW_ERR_F) { 920 *state = CSIO_DEV_STATE_ERR; 921 rv = -ETIMEDOUT; 922 } else if (pcie_fw & PCIE_FW_INIT_F) 923 *state = CSIO_DEV_STATE_INIT; 924 } 925 926 /* 927 * If we arrived before a Master PF was selected and 928 * there's not a valid Master PF, grab its identity 929 * for our caller. 930 */ 931 if (mpfn == PCIE_FW_MASTER_M && 932 (pcie_fw & PCIE_FW_MASTER_VLD_F)) 933 mpfn = PCIE_FW_MASTER_G(pcie_fw); 934 break; 935 } 936 hw->flags &= ~CSIO_HWF_MASTER; 937 } 938 939 switch (*state) { 940 case CSIO_DEV_STATE_UNINIT: 941 strcpy(state_str, "Initializing"); 942 break; 943 case CSIO_DEV_STATE_INIT: 944 strcpy(state_str, "Initialized"); 945 break; 946 case CSIO_DEV_STATE_ERR: 947 strcpy(state_str, "Error"); 948 break; 949 default: 950 strcpy(state_str, "Unknown"); 951 break; 952 } 953 954 if (hw->pfn == mpfn) 955 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 956 hw->pfn, state_str); 957 else 958 csio_info(hw, 959 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 960 hw->pfn, mpfn, state_str); 961 962 out_free_mb: 963 mempool_free(mbp, hw->mb_mempool); 964 out: 965 return rv; 966 } 967 968 /* 969 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 970 * @hw: HW module 971 * 972 */ 973 static int 974 csio_do_bye(struct csio_hw *hw) 975 { 976 struct csio_mb *mbp; 977 enum fw_retval retval; 978 979 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 980 if (!mbp) { 981 CSIO_INC_STATS(hw, n_err_nomem); 982 return -ENOMEM; 983 } 984 985 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 986 987 if (csio_mb_issue(hw, mbp)) { 988 csio_err(hw, "Issue of BYE command failed\n"); 989 mempool_free(mbp, hw->mb_mempool); 990 return -EINVAL; 991 } 992 993 retval = csio_mb_fw_retval(mbp); 994 if (retval != FW_SUCCESS) { 995 mempool_free(mbp, hw->mb_mempool); 996 return -EINVAL; 997 } 998 999 mempool_free(mbp, hw->mb_mempool); 1000 1001 return 0; 1002 } 1003 1004 /* 1005 * csio_do_reset- Perform the device reset. 1006 * @hw: HW module 1007 * @fw_rst: FW reset 1008 * 1009 * If fw_rst is set, issues FW reset mbox cmd otherwise 1010 * does PIO reset. 1011 * Performs reset of the function. 1012 */ 1013 static int 1014 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1015 { 1016 struct csio_mb *mbp; 1017 enum fw_retval retval; 1018 1019 if (!fw_rst) { 1020 /* PIO reset */ 1021 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1022 mdelay(2000); 1023 return 0; 1024 } 1025 1026 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1027 if (!mbp) { 1028 CSIO_INC_STATS(hw, n_err_nomem); 1029 return -ENOMEM; 1030 } 1031 1032 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1033 PIORSTMODE_F | PIORST_F, 0, NULL); 1034 1035 if (csio_mb_issue(hw, mbp)) { 1036 csio_err(hw, "Issue of RESET command failed.n"); 1037 mempool_free(mbp, hw->mb_mempool); 1038 return -EINVAL; 1039 } 1040 1041 retval = csio_mb_fw_retval(mbp); 1042 if (retval != FW_SUCCESS) { 1043 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1044 mempool_free(mbp, hw->mb_mempool); 1045 return -EINVAL; 1046 } 1047 1048 mempool_free(mbp, hw->mb_mempool); 1049 1050 return 0; 1051 } 1052 1053 static int 1054 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1055 { 1056 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1057 uint16_t caps; 1058 1059 caps = ntohs(rsp->fcoecaps); 1060 1061 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1062 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1063 return -EINVAL; 1064 } 1065 1066 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1067 csio_err(hw, "No FCoE Control Offload capability\n"); 1068 return -EINVAL; 1069 } 1070 1071 return 0; 1072 } 1073 1074 /* 1075 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1076 * @hw: the HW module 1077 * @mbox: mailbox to use for the FW RESET command (if desired) 1078 * @force: force uP into RESET even if FW RESET command fails 1079 * 1080 * Issues a RESET command to firmware (if desired) with a HALT indication 1081 * and then puts the microprocessor into RESET state. The RESET command 1082 * will only be issued if a legitimate mailbox is provided (mbox <= 1083 * PCIE_FW_MASTER_MASK). 1084 * 1085 * This is generally used in order for the host to safely manipulate the 1086 * adapter without fear of conflicting with whatever the firmware might 1087 * be doing. The only way out of this state is to RESTART the firmware 1088 * ... 1089 */ 1090 static int 1091 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1092 { 1093 enum fw_retval retval = 0; 1094 1095 /* 1096 * If a legitimate mailbox is provided, issue a RESET command 1097 * with a HALT indication. 1098 */ 1099 if (mbox <= PCIE_FW_MASTER_M) { 1100 struct csio_mb *mbp; 1101 1102 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1103 if (!mbp) { 1104 CSIO_INC_STATS(hw, n_err_nomem); 1105 return -ENOMEM; 1106 } 1107 1108 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1109 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, 1110 NULL); 1111 1112 if (csio_mb_issue(hw, mbp)) { 1113 csio_err(hw, "Issue of RESET command failed!\n"); 1114 mempool_free(mbp, hw->mb_mempool); 1115 return -EINVAL; 1116 } 1117 1118 retval = csio_mb_fw_retval(mbp); 1119 mempool_free(mbp, hw->mb_mempool); 1120 } 1121 1122 /* 1123 * Normally we won't complete the operation if the firmware RESET 1124 * command fails but if our caller insists we'll go ahead and put the 1125 * uP into RESET. This can be useful if the firmware is hung or even 1126 * missing ... We'll have to take the risk of putting the uP into 1127 * RESET without the cooperation of firmware in that case. 1128 * 1129 * We also force the firmware's HALT flag to be on in case we bypassed 1130 * the firmware RESET command above or we're dealing with old firmware 1131 * which doesn't have the HALT capability. This will serve as a flag 1132 * for the incoming firmware to know that it's coming out of a HALT 1133 * rather than a RESET ... if it's new enough to understand that ... 1134 */ 1135 if (retval == 0 || force) { 1136 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); 1137 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 1138 PCIE_FW_HALT_F); 1139 } 1140 1141 /* 1142 * And we always return the result of the firmware RESET command 1143 * even when we force the uP into RESET ... 1144 */ 1145 return retval ? -EINVAL : 0; 1146 } 1147 1148 /* 1149 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1150 * @hw: the HW module 1151 * @reset: if we want to do a RESET to restart things 1152 * 1153 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1154 * return the previous PF Master remains as the new PF Master and there 1155 * is no need to issue a new HELLO command, etc. 1156 * 1157 * We do this in two ways: 1158 * 1159 * 1. If we're dealing with newer firmware we'll simply want to take 1160 * the chip's microprocessor out of RESET. This will cause the 1161 * firmware to start up from its start vector. And then we'll loop 1162 * until the firmware indicates it's started again (PCIE_FW.HALT 1163 * reset to 0) or we timeout. 1164 * 1165 * 2. If we're dealing with older firmware then we'll need to RESET 1166 * the chip since older firmware won't recognize the PCIE_FW.HALT 1167 * flag and automatically RESET itself on startup. 1168 */ 1169 static int 1170 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1171 { 1172 if (reset) { 1173 /* 1174 * Since we're directing the RESET instead of the firmware 1175 * doing it automatically, we need to clear the PCIE_FW.HALT 1176 * bit. 1177 */ 1178 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); 1179 1180 /* 1181 * If we've been given a valid mailbox, first try to get the 1182 * firmware to do the RESET. If that works, great and we can 1183 * return success. Otherwise, if we haven't been given a 1184 * valid mailbox or the RESET command failed, fall back to 1185 * hitting the chip with a hammer. 1186 */ 1187 if (mbox <= PCIE_FW_MASTER_M) { 1188 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1189 msleep(100); 1190 if (csio_do_reset(hw, true) == 0) 1191 return 0; 1192 } 1193 1194 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1195 msleep(2000); 1196 } else { 1197 int ms; 1198 1199 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1200 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1201 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) 1202 return 0; 1203 msleep(100); 1204 ms += 100; 1205 } 1206 return -ETIMEDOUT; 1207 } 1208 return 0; 1209 } 1210 1211 /* 1212 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1213 * @hw: the HW module 1214 * @mbox: mailbox to use for the FW RESET command (if desired) 1215 * @fw_data: the firmware image to write 1216 * @size: image size 1217 * @force: force upgrade even if firmware doesn't cooperate 1218 * 1219 * Perform all of the steps necessary for upgrading an adapter's 1220 * firmware image. Normally this requires the cooperation of the 1221 * existing firmware in order to halt all existing activities 1222 * but if an invalid mailbox token is passed in we skip that step 1223 * (though we'll still put the adapter microprocessor into RESET in 1224 * that case). 1225 * 1226 * On successful return the new firmware will have been loaded and 1227 * the adapter will have been fully RESET losing all previous setup 1228 * state. On unsuccessful return the adapter may be completely hosed ... 1229 * positive errno indicates that the adapter is ~probably~ intact, a 1230 * negative errno indicates that things are looking bad ... 1231 */ 1232 static int 1233 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1234 const u8 *fw_data, uint32_t size, int32_t force) 1235 { 1236 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1237 int reset, ret; 1238 1239 ret = csio_hw_fw_halt(hw, mbox, force); 1240 if (ret != 0 && !force) 1241 return ret; 1242 1243 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1244 if (ret != 0) 1245 return ret; 1246 1247 /* 1248 * Older versions of the firmware don't understand the new 1249 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1250 * restart. So for newly loaded older firmware we'll have to do the 1251 * RESET for it so it starts up on a clean slate. We can tell if 1252 * the newly loaded firmware will handle this right by checking 1253 * its header flags to see if it advertises the capability. 1254 */ 1255 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1256 return csio_hw_fw_restart(hw, mbox, reset); 1257 } 1258 1259 /* 1260 * csio_get_device_params - Get device parameters. 1261 * @hw: HW module 1262 * 1263 */ 1264 static int 1265 csio_get_device_params(struct csio_hw *hw) 1266 { 1267 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1268 struct csio_mb *mbp; 1269 enum fw_retval retval; 1270 u32 param[6]; 1271 int i, j = 0; 1272 1273 /* Initialize portids to -1 */ 1274 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1275 hw->pport[i].portid = -1; 1276 1277 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1278 if (!mbp) { 1279 CSIO_INC_STATS(hw, n_err_nomem); 1280 return -ENOMEM; 1281 } 1282 1283 /* Get port vec information. */ 1284 param[0] = FW_PARAM_DEV(PORTVEC); 1285 1286 /* Get Core clock. */ 1287 param[1] = FW_PARAM_DEV(CCLK); 1288 1289 /* Get EQ id start and end. */ 1290 param[2] = FW_PARAM_PFVF(EQ_START); 1291 param[3] = FW_PARAM_PFVF(EQ_END); 1292 1293 /* Get IQ id start and end. */ 1294 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1295 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1296 1297 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1298 ARRAY_SIZE(param), param, NULL, false, NULL); 1299 if (csio_mb_issue(hw, mbp)) { 1300 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1301 mempool_free(mbp, hw->mb_mempool); 1302 return -EINVAL; 1303 } 1304 1305 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1306 ARRAY_SIZE(param), param); 1307 if (retval != FW_SUCCESS) { 1308 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1309 retval); 1310 mempool_free(mbp, hw->mb_mempool); 1311 return -EINVAL; 1312 } 1313 1314 /* cache the information. */ 1315 hw->port_vec = param[0]; 1316 hw->vpd.cclk = param[1]; 1317 wrm->fw_eq_start = param[2]; 1318 wrm->fw_iq_start = param[4]; 1319 1320 /* Using FW configured max iqs & eqs */ 1321 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1322 !csio_is_hw_master(hw)) { 1323 hw->cfg_niq = param[5] - param[4] + 1; 1324 hw->cfg_neq = param[3] - param[2] + 1; 1325 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1326 hw->cfg_niq, hw->cfg_neq); 1327 } 1328 1329 hw->port_vec &= csio_port_mask; 1330 1331 hw->num_pports = hweight32(hw->port_vec); 1332 1333 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1334 hw->port_vec, hw->num_pports); 1335 1336 for (i = 0; i < hw->num_pports; i++) { 1337 while ((hw->port_vec & (1 << j)) == 0) 1338 j++; 1339 hw->pport[i].portid = j++; 1340 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1341 } 1342 mempool_free(mbp, hw->mb_mempool); 1343 1344 return 0; 1345 } 1346 1347 1348 /* 1349 * csio_config_device_caps - Get and set device capabilities. 1350 * @hw: HW module 1351 * 1352 */ 1353 static int 1354 csio_config_device_caps(struct csio_hw *hw) 1355 { 1356 struct csio_mb *mbp; 1357 enum fw_retval retval; 1358 int rv = -EINVAL; 1359 1360 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1361 if (!mbp) { 1362 CSIO_INC_STATS(hw, n_err_nomem); 1363 return -ENOMEM; 1364 } 1365 1366 /* Get device capabilities */ 1367 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1368 1369 if (csio_mb_issue(hw, mbp)) { 1370 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1371 goto out; 1372 } 1373 1374 retval = csio_mb_fw_retval(mbp); 1375 if (retval != FW_SUCCESS) { 1376 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1377 goto out; 1378 } 1379 1380 /* Validate device capabilities */ 1381 rv = csio_hw_validate_caps(hw, mbp); 1382 if (rv != 0) 1383 goto out; 1384 1385 /* Don't config device capabilities if already configured */ 1386 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1387 rv = 0; 1388 goto out; 1389 } 1390 1391 /* Write back desired device capabilities */ 1392 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1393 false, true, NULL); 1394 1395 if (csio_mb_issue(hw, mbp)) { 1396 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1397 goto out; 1398 } 1399 1400 retval = csio_mb_fw_retval(mbp); 1401 if (retval != FW_SUCCESS) { 1402 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1403 goto out; 1404 } 1405 1406 rv = 0; 1407 out: 1408 mempool_free(mbp, hw->mb_mempool); 1409 return rv; 1410 } 1411 1412 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) 1413 { 1414 enum cc_fec cc_fec = 0; 1415 1416 if (fw_fec & FW_PORT_CAP32_FEC_RS) 1417 cc_fec |= FEC_RS; 1418 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) 1419 cc_fec |= FEC_BASER_RS; 1420 1421 return cc_fec; 1422 } 1423 1424 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) 1425 { 1426 fw_port_cap32_t fw_pause = 0; 1427 1428 if (cc_pause & PAUSE_RX) 1429 fw_pause |= FW_PORT_CAP32_FC_RX; 1430 if (cc_pause & PAUSE_TX) 1431 fw_pause |= FW_PORT_CAP32_FC_TX; 1432 1433 return fw_pause; 1434 } 1435 1436 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) 1437 { 1438 fw_port_cap32_t fw_fec = 0; 1439 1440 if (cc_fec & FEC_RS) 1441 fw_fec |= FW_PORT_CAP32_FEC_RS; 1442 if (cc_fec & FEC_BASER_RS) 1443 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; 1444 1445 return fw_fec; 1446 } 1447 1448 /** 1449 * fwcap_to_fwspeed - return highest speed in Port Capabilities 1450 * @acaps: advertised Port Capabilities 1451 * 1452 * Get the highest speed for the port from the advertised Port 1453 * Capabilities. 1454 */ 1455 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) 1456 { 1457 #define TEST_SPEED_RETURN(__caps_speed) \ 1458 do { \ 1459 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 1460 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 1461 } while (0) 1462 1463 TEST_SPEED_RETURN(400G); 1464 TEST_SPEED_RETURN(200G); 1465 TEST_SPEED_RETURN(100G); 1466 TEST_SPEED_RETURN(50G); 1467 TEST_SPEED_RETURN(40G); 1468 TEST_SPEED_RETURN(25G); 1469 TEST_SPEED_RETURN(10G); 1470 TEST_SPEED_RETURN(1G); 1471 TEST_SPEED_RETURN(100M); 1472 1473 #undef TEST_SPEED_RETURN 1474 1475 return 0; 1476 } 1477 1478 /** 1479 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 1480 * @caps16: a 16-bit Port Capabilities value 1481 * 1482 * Returns the equivalent 32-bit Port Capabilities value. 1483 */ 1484 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) 1485 { 1486 fw_port_cap32_t caps32 = 0; 1487 1488 #define CAP16_TO_CAP32(__cap) \ 1489 do { \ 1490 if (caps16 & FW_PORT_CAP_##__cap) \ 1491 caps32 |= FW_PORT_CAP32_##__cap; \ 1492 } while (0) 1493 1494 CAP16_TO_CAP32(SPEED_100M); 1495 CAP16_TO_CAP32(SPEED_1G); 1496 CAP16_TO_CAP32(SPEED_25G); 1497 CAP16_TO_CAP32(SPEED_10G); 1498 CAP16_TO_CAP32(SPEED_40G); 1499 CAP16_TO_CAP32(SPEED_100G); 1500 CAP16_TO_CAP32(FC_RX); 1501 CAP16_TO_CAP32(FC_TX); 1502 CAP16_TO_CAP32(ANEG); 1503 CAP16_TO_CAP32(MDIAUTO); 1504 CAP16_TO_CAP32(MDISTRAIGHT); 1505 CAP16_TO_CAP32(FEC_RS); 1506 CAP16_TO_CAP32(FEC_BASER_RS); 1507 CAP16_TO_CAP32(802_3_PAUSE); 1508 CAP16_TO_CAP32(802_3_ASM_DIR); 1509 1510 #undef CAP16_TO_CAP32 1511 1512 return caps32; 1513 } 1514 1515 /** 1516 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 1517 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 1518 * 1519 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new 1520 * 32-bit Port Capabilities value. 1521 */ 1522 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) 1523 { 1524 fw_port_cap32_t linkattr = 0; 1525 1526 /* The format of the Link Status in the old 1527 * 16-bit Port Information message isn't the same as the 1528 * 16-bit Port Capabilities bitfield used everywhere else. 1529 */ 1530 if (lstatus & FW_PORT_CMD_RXPAUSE_F) 1531 linkattr |= FW_PORT_CAP32_FC_RX; 1532 if (lstatus & FW_PORT_CMD_TXPAUSE_F) 1533 linkattr |= FW_PORT_CAP32_FC_TX; 1534 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1535 linkattr |= FW_PORT_CAP32_SPEED_100M; 1536 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1537 linkattr |= FW_PORT_CAP32_SPEED_1G; 1538 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1539 linkattr |= FW_PORT_CAP32_SPEED_10G; 1540 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) 1541 linkattr |= FW_PORT_CAP32_SPEED_25G; 1542 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1543 linkattr |= FW_PORT_CAP32_SPEED_40G; 1544 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) 1545 linkattr |= FW_PORT_CAP32_SPEED_100G; 1546 1547 return linkattr; 1548 } 1549 1550 /** 1551 * csio_init_link_config - initialize a link's SW state 1552 * @lc: pointer to structure holding the link state 1553 * @pcaps: link Port Capabilities 1554 * @acaps: link current Advertised Port Capabilities 1555 * 1556 * Initializes the SW state maintained for each link, including the link's 1557 * capabilities and default speed/flow-control/autonegotiation settings. 1558 */ 1559 static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, 1560 fw_port_cap32_t acaps) 1561 { 1562 lc->pcaps = pcaps; 1563 lc->def_acaps = acaps; 1564 lc->lpacaps = 0; 1565 lc->speed_caps = 0; 1566 lc->speed = 0; 1567 lc->requested_fc = PAUSE_RX | PAUSE_TX; 1568 lc->fc = lc->requested_fc; 1569 1570 /* 1571 * For Forward Error Control, we default to whatever the Firmware 1572 * tells us the Link is currently advertising. 1573 */ 1574 lc->requested_fec = FEC_AUTO; 1575 lc->fec = fwcap_to_cc_fec(lc->def_acaps); 1576 1577 /* If the Port is capable of Auto-Negtotiation, initialize it as 1578 * "enabled" and copy over all of the Physical Port Capabilities 1579 * to the Advertised Port Capabilities. Otherwise mark it as 1580 * Auto-Negotiate disabled and select the highest supported speed 1581 * for the link. Note parallel structure in t4_link_l1cfg_core() 1582 * and t4_handle_get_port_info(). 1583 */ 1584 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 1585 lc->acaps = lc->pcaps & ADVERT_MASK; 1586 lc->autoneg = AUTONEG_ENABLE; 1587 lc->requested_fc |= PAUSE_AUTONEG; 1588 } else { 1589 lc->acaps = 0; 1590 lc->autoneg = AUTONEG_DISABLE; 1591 } 1592 } 1593 1594 static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps, 1595 uint32_t *rcaps) 1596 { 1597 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); 1598 fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap; 1599 1600 lc->link_ok = 0; 1601 1602 /* 1603 * Convert driver coding of Pause Frame Flow Control settings into the 1604 * Firmware's API. 1605 */ 1606 fw_fc = cc_to_fwcap_pause(lc->requested_fc); 1607 1608 /* 1609 * Convert Common Code Forward Error Control settings into the 1610 * Firmware's API. If the current Requested FEC has "Automatic" 1611 * (IEEE 802.3) specified, then we use whatever the Firmware 1612 * sent us as part of it's IEEE 802.3-based interpratation of 1613 * the Transceiver Module EPROM FEC parameters. Otherwise we 1614 * use whatever is in the current Requested FEC settings. 1615 */ 1616 if (lc->requested_fec & FEC_AUTO) 1617 cc_fec = fwcap_to_cc_fec(lc->def_acaps); 1618 else 1619 cc_fec = lc->requested_fec; 1620 fw_fec = cc_to_fwcap_fec(cc_fec); 1621 1622 /* Figure out what our Requested Port Capabilities are going to be. 1623 * Note parallel structure in t4_handle_get_port_info() and 1624 * init_link_config(). 1625 */ 1626 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { 1627 lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; 1628 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; 1629 lc->fec = cc_fec; 1630 } else if (lc->autoneg == AUTONEG_DISABLE) { 1631 lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; 1632 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; 1633 lc->fec = cc_fec; 1634 } else { 1635 lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi; 1636 } 1637 1638 *rcaps = lrcap; 1639 } 1640 1641 /* 1642 * csio_enable_ports - Bring up all available ports. 1643 * @hw: HW module. 1644 * 1645 */ 1646 static int 1647 csio_enable_ports(struct csio_hw *hw) 1648 { 1649 struct csio_mb *mbp; 1650 u16 fw_caps = FW_CAPS_UNKNOWN; 1651 enum fw_retval retval; 1652 uint8_t portid; 1653 fw_port_cap32_t pcaps, acaps, rcaps; 1654 int i; 1655 1656 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1657 if (!mbp) { 1658 CSIO_INC_STATS(hw, n_err_nomem); 1659 return -ENOMEM; 1660 } 1661 1662 for (i = 0; i < hw->num_pports; i++) { 1663 portid = hw->pport[i].portid; 1664 1665 if (fw_caps == FW_CAPS_UNKNOWN) { 1666 u32 param, val; 1667 1668 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | 1669 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); 1670 val = 1; 1671 1672 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, 1673 hw->pfn, 0, 1, ¶m, &val, false, 1674 NULL); 1675 1676 if (csio_mb_issue(hw, mbp)) { 1677 csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n", 1678 portid); 1679 mempool_free(mbp, hw->mb_mempool); 1680 return -EINVAL; 1681 } 1682 1683 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, 1684 &val); 1685 if (retval != FW_SUCCESS) { 1686 csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", 1687 portid, retval); 1688 mempool_free(mbp, hw->mb_mempool); 1689 return -EINVAL; 1690 } 1691 1692 fw_caps = val; 1693 } 1694 1695 /* Read PORT information */ 1696 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1697 false, 0, fw_caps, NULL); 1698 1699 if (csio_mb_issue(hw, mbp)) { 1700 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1701 portid); 1702 mempool_free(mbp, hw->mb_mempool); 1703 return -EINVAL; 1704 } 1705 1706 csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps, 1707 &pcaps, &acaps); 1708 if (retval != FW_SUCCESS) { 1709 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1710 portid, retval); 1711 mempool_free(mbp, hw->mb_mempool); 1712 return -EINVAL; 1713 } 1714 1715 csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps); 1716 1717 csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps); 1718 1719 /* Write back PORT information */ 1720 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1721 true, rcaps, fw_caps, NULL); 1722 1723 if (csio_mb_issue(hw, mbp)) { 1724 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1725 portid); 1726 mempool_free(mbp, hw->mb_mempool); 1727 return -EINVAL; 1728 } 1729 1730 retval = csio_mb_fw_retval(mbp); 1731 if (retval != FW_SUCCESS) { 1732 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1733 portid, retval); 1734 mempool_free(mbp, hw->mb_mempool); 1735 return -EINVAL; 1736 } 1737 1738 } /* For all ports */ 1739 1740 mempool_free(mbp, hw->mb_mempool); 1741 1742 return 0; 1743 } 1744 1745 /* 1746 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1747 * @hw: HW module 1748 * Issued with lock held. 1749 */ 1750 static int 1751 csio_get_fcoe_resinfo(struct csio_hw *hw) 1752 { 1753 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1754 struct fw_fcoe_res_info_cmd *rsp; 1755 struct csio_mb *mbp; 1756 enum fw_retval retval; 1757 1758 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1759 if (!mbp) { 1760 CSIO_INC_STATS(hw, n_err_nomem); 1761 return -ENOMEM; 1762 } 1763 1764 /* Get FCoE FW resource information */ 1765 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1766 1767 if (csio_mb_issue(hw, mbp)) { 1768 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1769 mempool_free(mbp, hw->mb_mempool); 1770 return -EINVAL; 1771 } 1772 1773 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1774 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 1775 if (retval != FW_SUCCESS) { 1776 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1777 retval); 1778 mempool_free(mbp, hw->mb_mempool); 1779 return -EINVAL; 1780 } 1781 1782 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1783 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1784 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1785 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1786 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1787 res_info->max_ssns = ntohl(rsp->max_ssns); 1788 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1789 res_info->used_ssns = ntohl(rsp->used_ssns); 1790 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1791 res_info->max_vnps = ntohl(rsp->max_vnps); 1792 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1793 res_info->used_vnps = ntohl(rsp->used_vnps); 1794 1795 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1796 res_info->max_xchgs); 1797 mempool_free(mbp, hw->mb_mempool); 1798 1799 return 0; 1800 } 1801 1802 static int 1803 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1804 { 1805 struct csio_mb *mbp; 1806 enum fw_retval retval; 1807 u32 _param[1]; 1808 1809 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1810 if (!mbp) { 1811 CSIO_INC_STATS(hw, n_err_nomem); 1812 return -ENOMEM; 1813 } 1814 1815 /* 1816 * Find out whether we're dealing with a version of 1817 * the firmware which has configuration file support. 1818 */ 1819 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 1820 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 1821 1822 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1823 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1824 if (csio_mb_issue(hw, mbp)) { 1825 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1826 mempool_free(mbp, hw->mb_mempool); 1827 return -EINVAL; 1828 } 1829 1830 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1831 ARRAY_SIZE(_param), _param); 1832 if (retval != FW_SUCCESS) { 1833 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1834 retval); 1835 mempool_free(mbp, hw->mb_mempool); 1836 return -EINVAL; 1837 } 1838 1839 mempool_free(mbp, hw->mb_mempool); 1840 *param = _param[0]; 1841 1842 return 0; 1843 } 1844 1845 static int 1846 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1847 { 1848 int ret = 0; 1849 const struct firmware *cf; 1850 struct pci_dev *pci_dev = hw->pdev; 1851 struct device *dev = &pci_dev->dev; 1852 unsigned int mtype = 0, maddr = 0; 1853 uint32_t *cfg_data; 1854 int value_to_add = 0; 1855 const char *fw_cfg_file; 1856 1857 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 1858 fw_cfg_file = FW_CFG_NAME_T5; 1859 else 1860 fw_cfg_file = FW_CFG_NAME_T6; 1861 1862 if (request_firmware(&cf, fw_cfg_file, dev) < 0) { 1863 csio_err(hw, "could not find config file %s, err: %d\n", 1864 fw_cfg_file, ret); 1865 return -ENOENT; 1866 } 1867 1868 if (cf->size%4 != 0) 1869 value_to_add = 4 - (cf->size % 4); 1870 1871 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1872 if (cfg_data == NULL) { 1873 ret = -ENOMEM; 1874 goto leave; 1875 } 1876 1877 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1878 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1879 ret = -EINVAL; 1880 goto leave; 1881 } 1882 1883 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1884 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1885 1886 ret = csio_memory_write(hw, mtype, maddr, 1887 cf->size + value_to_add, cfg_data); 1888 1889 if ((ret == 0) && (value_to_add != 0)) { 1890 union { 1891 u32 word; 1892 char buf[4]; 1893 } last; 1894 size_t size = cf->size & ~0x3; 1895 int i; 1896 1897 last.word = cfg_data[size >> 2]; 1898 for (i = value_to_add; i < 4; i++) 1899 last.buf[i] = 0; 1900 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1901 } 1902 if (ret == 0) { 1903 csio_info(hw, "config file upgraded to %s\n", fw_cfg_file); 1904 snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file); 1905 } 1906 1907 leave: 1908 kfree(cfg_data); 1909 release_firmware(cf); 1910 return ret; 1911 } 1912 1913 /* 1914 * HW initialization: contact FW, obtain config, perform basic init. 1915 * 1916 * If the firmware we're dealing with has Configuration File support, then 1917 * we use that to perform all configuration -- either using the configuration 1918 * file stored in flash on the adapter or using a filesystem-local file 1919 * if available. 1920 * 1921 * If we don't have configuration file support in the firmware, then we'll 1922 * have to set things up the old fashioned way with hard-coded register 1923 * writes and firmware commands ... 1924 */ 1925 1926 /* 1927 * Attempt to initialize the HW via a Firmware Configuration File. 1928 */ 1929 static int 1930 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 1931 { 1932 struct csio_mb *mbp = NULL; 1933 struct fw_caps_config_cmd *caps_cmd; 1934 unsigned int mtype, maddr; 1935 int rv = -EINVAL; 1936 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 1937 char path[64]; 1938 char *config_name = NULL; 1939 1940 /* 1941 * Reset device if necessary 1942 */ 1943 if (reset) { 1944 rv = csio_do_reset(hw, true); 1945 if (rv != 0) 1946 goto bye; 1947 } 1948 1949 /* 1950 * If we have a configuration file in host , 1951 * then use that. Otherwise, use the configuration file stored 1952 * in the HW flash ... 1953 */ 1954 spin_unlock_irq(&hw->lock); 1955 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 1956 spin_lock_irq(&hw->lock); 1957 if (rv != 0) { 1958 /* 1959 * config file was not found. Use default 1960 * config file from flash. 1961 */ 1962 config_name = "On FLASH"; 1963 mtype = FW_MEMTYPE_CF_FLASH; 1964 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 1965 } else { 1966 config_name = path; 1967 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1968 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1969 } 1970 1971 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1972 if (!mbp) { 1973 CSIO_INC_STATS(hw, n_err_nomem); 1974 return -ENOMEM; 1975 } 1976 /* 1977 * Tell the firmware to process the indicated Configuration File. 1978 * If there are no errors and the caller has provided return value 1979 * pointers for the [fini] section version, checksum and computed 1980 * checksum, pass those back to the caller. 1981 */ 1982 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1983 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1984 caps_cmd->op_to_write = 1985 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1986 FW_CMD_REQUEST_F | 1987 FW_CMD_READ_F); 1988 caps_cmd->cfvalid_to_len16 = 1989 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | 1990 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | 1991 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | 1992 FW_LEN16(*caps_cmd)); 1993 1994 if (csio_mb_issue(hw, mbp)) { 1995 rv = -EINVAL; 1996 goto bye; 1997 } 1998 1999 rv = csio_mb_fw_retval(mbp); 2000 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware 2001 * Configuration File in FLASH), our last gasp effort is to use the 2002 * Firmware Configuration File which is embedded in the 2003 * firmware. A very few early versions of the firmware didn't 2004 * have one embedded but we can ignore those. 2005 */ 2006 if (rv == ENOENT) { 2007 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 2008 caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2009 FW_CMD_REQUEST_F | 2010 FW_CMD_READ_F); 2011 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 2012 2013 if (csio_mb_issue(hw, mbp)) { 2014 rv = -EINVAL; 2015 goto bye; 2016 } 2017 2018 rv = csio_mb_fw_retval(mbp); 2019 config_name = "Firmware Default"; 2020 } 2021 if (rv != FW_SUCCESS) 2022 goto bye; 2023 2024 finiver = ntohl(caps_cmd->finiver); 2025 finicsum = ntohl(caps_cmd->finicsum); 2026 cfcsum = ntohl(caps_cmd->cfcsum); 2027 2028 /* 2029 * And now tell the firmware to use the configuration we just loaded. 2030 */ 2031 caps_cmd->op_to_write = 2032 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2033 FW_CMD_REQUEST_F | 2034 FW_CMD_WRITE_F); 2035 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 2036 2037 if (csio_mb_issue(hw, mbp)) { 2038 rv = -EINVAL; 2039 goto bye; 2040 } 2041 2042 rv = csio_mb_fw_retval(mbp); 2043 if (rv != FW_SUCCESS) { 2044 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 2045 goto bye; 2046 } 2047 2048 if (finicsum != cfcsum) { 2049 csio_warn(hw, 2050 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 2051 finicsum, cfcsum); 2052 } 2053 2054 /* Validate device capabilities */ 2055 rv = csio_hw_validate_caps(hw, mbp); 2056 if (rv != 0) 2057 goto bye; 2058 2059 mempool_free(mbp, hw->mb_mempool); 2060 mbp = NULL; 2061 2062 /* 2063 * Note that we're operating with parameters 2064 * not supplied by the driver, rather than from hard-wired 2065 * initialization constants buried in the driver. 2066 */ 2067 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2068 2069 /* device parameters */ 2070 rv = csio_get_device_params(hw); 2071 if (rv != 0) 2072 goto bye; 2073 2074 /* Configure SGE */ 2075 csio_wr_sge_init(hw); 2076 2077 /* 2078 * And finally tell the firmware to initialize itself using the 2079 * parameters from the Configuration File. 2080 */ 2081 /* Post event to notify completion of configuration */ 2082 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2083 2084 csio_info(hw, "Successfully configure using Firmware " 2085 "Configuration File %s, version %#x, computed checksum %#x\n", 2086 config_name, finiver, cfcsum); 2087 return 0; 2088 2089 /* 2090 * Something bad happened. Return the error ... 2091 */ 2092 bye: 2093 if (mbp) 2094 mempool_free(mbp, hw->mb_mempool); 2095 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 2096 csio_warn(hw, "Configuration file error %d\n", rv); 2097 return rv; 2098 } 2099 2100 /* Is the given firmware API compatible with the one the driver was compiled 2101 * with? 2102 */ 2103 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2104 { 2105 2106 /* short circuit if it's the exact same firmware version */ 2107 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2108 return 1; 2109 2110 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2111 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2112 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 2113 return 1; 2114 #undef SAME_INTF 2115 2116 return 0; 2117 } 2118 2119 /* The firmware in the filesystem is usable, but should it be installed? 2120 * This routine explains itself in detail if it indicates the filesystem 2121 * firmware should be installed. 2122 */ 2123 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, 2124 int k, int c) 2125 { 2126 const char *reason; 2127 2128 if (!card_fw_usable) { 2129 reason = "incompatible or unusable"; 2130 goto install; 2131 } 2132 2133 if (k > c) { 2134 reason = "older than the version supported with this driver"; 2135 goto install; 2136 } 2137 2138 return 0; 2139 2140 install: 2141 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " 2142 "installing firmware %u.%u.%u.%u on card.\n", 2143 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 2144 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, 2145 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 2146 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 2147 2148 return 1; 2149 } 2150 2151 static struct fw_info fw_info_array[] = { 2152 { 2153 .chip = CHELSIO_T5, 2154 .fs_name = FW_CFG_NAME_T5, 2155 .fw_mod_name = FW_FNAME_T5, 2156 .fw_hdr = { 2157 .chip = FW_HDR_CHIP_T5, 2158 .fw_ver = __cpu_to_be32(FW_VERSION(T5)), 2159 .intfver_nic = FW_INTFVER(T5, NIC), 2160 .intfver_vnic = FW_INTFVER(T5, VNIC), 2161 .intfver_ri = FW_INTFVER(T5, RI), 2162 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2163 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2164 }, 2165 }, { 2166 .chip = CHELSIO_T6, 2167 .fs_name = FW_CFG_NAME_T6, 2168 .fw_mod_name = FW_FNAME_T6, 2169 .fw_hdr = { 2170 .chip = FW_HDR_CHIP_T6, 2171 .fw_ver = __cpu_to_be32(FW_VERSION(T6)), 2172 .intfver_nic = FW_INTFVER(T6, NIC), 2173 .intfver_vnic = FW_INTFVER(T6, VNIC), 2174 .intfver_ri = FW_INTFVER(T6, RI), 2175 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2176 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2177 }, 2178 } 2179 }; 2180 2181 static struct fw_info *find_fw_info(int chip) 2182 { 2183 int i; 2184 2185 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { 2186 if (fw_info_array[i].chip == chip) 2187 return &fw_info_array[i]; 2188 } 2189 return NULL; 2190 } 2191 2192 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, 2193 const u8 *fw_data, unsigned int fw_size, 2194 struct fw_hdr *card_fw, enum csio_dev_state state, 2195 int *reset) 2196 { 2197 int ret, card_fw_usable, fs_fw_usable; 2198 const struct fw_hdr *fs_fw; 2199 const struct fw_hdr *drv_fw; 2200 2201 drv_fw = &fw_info->fw_hdr; 2202 2203 /* Read the header of the firmware on the card */ 2204 ret = csio_hw_read_flash(hw, FLASH_FW_START, 2205 sizeof(*card_fw) / sizeof(uint32_t), 2206 (uint32_t *)card_fw, 1); 2207 if (ret == 0) { 2208 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 2209 } else { 2210 csio_err(hw, 2211 "Unable to read card's firmware header: %d\n", ret); 2212 card_fw_usable = 0; 2213 } 2214 2215 if (fw_data != NULL) { 2216 fs_fw = (const void *)fw_data; 2217 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 2218 } else { 2219 fs_fw = NULL; 2220 fs_fw_usable = 0; 2221 } 2222 2223 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2224 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 2225 /* Common case: the firmware on the card is an exact match and 2226 * the filesystem one is an exact match too, or the filesystem 2227 * one is absent/incompatible. 2228 */ 2229 } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && 2230 csio_should_install_fs_fw(hw, card_fw_usable, 2231 be32_to_cpu(fs_fw->fw_ver), 2232 be32_to_cpu(card_fw->fw_ver))) { 2233 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, 2234 fw_size, 0); 2235 if (ret != 0) { 2236 csio_err(hw, 2237 "failed to install firmware: %d\n", ret); 2238 goto bye; 2239 } 2240 2241 /* Installed successfully, update the cached header too. */ 2242 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 2243 card_fw_usable = 1; 2244 *reset = 0; /* already reset as part of load_fw */ 2245 } 2246 2247 if (!card_fw_usable) { 2248 uint32_t d, c, k; 2249 2250 d = be32_to_cpu(drv_fw->fw_ver); 2251 c = be32_to_cpu(card_fw->fw_ver); 2252 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 2253 2254 csio_err(hw, "Cannot find a usable firmware: " 2255 "chip state %d, " 2256 "driver compiled with %d.%d.%d.%d, " 2257 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 2258 state, 2259 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), 2260 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), 2261 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 2262 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), 2263 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 2264 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 2265 ret = EINVAL; 2266 goto bye; 2267 } 2268 2269 /* We're using whatever's on the card and it's known to be good. */ 2270 hw->fwrev = be32_to_cpu(card_fw->fw_ver); 2271 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 2272 2273 bye: 2274 return ret; 2275 } 2276 2277 /* 2278 * Returns -EINVAL if attempts to flash the firmware failed 2279 * else returns 0, 2280 * if flashing was not attempted because the card had the 2281 * latest firmware ECANCELED is returned 2282 */ 2283 static int 2284 csio_hw_flash_fw(struct csio_hw *hw, int *reset) 2285 { 2286 int ret = -ECANCELED; 2287 const struct firmware *fw; 2288 struct fw_info *fw_info; 2289 struct fw_hdr *card_fw; 2290 struct pci_dev *pci_dev = hw->pdev; 2291 struct device *dev = &pci_dev->dev ; 2292 const u8 *fw_data = NULL; 2293 unsigned int fw_size = 0; 2294 const char *fw_bin_file; 2295 2296 /* This is the firmware whose headers the driver was compiled 2297 * against 2298 */ 2299 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); 2300 if (fw_info == NULL) { 2301 csio_err(hw, 2302 "unable to get firmware info for chip %d.\n", 2303 CHELSIO_CHIP_VERSION(hw->chip_id)); 2304 return -EINVAL; 2305 } 2306 2307 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 2308 fw_bin_file = FW_FNAME_T5; 2309 else 2310 fw_bin_file = FW_FNAME_T6; 2311 2312 if (request_firmware(&fw, fw_bin_file, dev) < 0) { 2313 csio_err(hw, "could not find firmware image %s, err: %d\n", 2314 fw_bin_file, ret); 2315 } else { 2316 fw_data = fw->data; 2317 fw_size = fw->size; 2318 } 2319 2320 /* allocate memory to read the header of the firmware on the 2321 * card 2322 */ 2323 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); 2324 2325 /* upgrade FW logic */ 2326 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, 2327 hw->fw_state, reset); 2328 2329 /* Cleaning up */ 2330 if (fw != NULL) 2331 release_firmware(fw); 2332 kfree(card_fw); 2333 return ret; 2334 } 2335 2336 static int csio_hw_check_fwver(struct csio_hw *hw) 2337 { 2338 if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) && 2339 (hw->fwrev < CSIO_MIN_T6_FW)) { 2340 csio_hw_print_fw_version(hw, "T6 unsupported fw"); 2341 return -1; 2342 } 2343 2344 return 0; 2345 } 2346 2347 /* 2348 * csio_hw_configure - Configure HW 2349 * @hw - HW module 2350 * 2351 */ 2352 static void 2353 csio_hw_configure(struct csio_hw *hw) 2354 { 2355 int reset = 1; 2356 int rv; 2357 u32 param[1]; 2358 2359 rv = csio_hw_dev_ready(hw); 2360 if (rv != 0) { 2361 CSIO_INC_STATS(hw, n_err_fatal); 2362 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2363 goto out; 2364 } 2365 2366 /* HW version */ 2367 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); 2368 2369 /* Needed for FW download */ 2370 rv = csio_hw_get_flash_params(hw); 2371 if (rv != 0) { 2372 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2373 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2374 goto out; 2375 } 2376 2377 /* Set PCIe completion timeout to 4 seconds */ 2378 if (pci_is_pcie(hw->pdev)) 2379 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, 2380 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); 2381 2382 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2383 2384 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2385 if (rv != 0) 2386 goto out; 2387 2388 csio_hw_print_fw_version(hw, "Firmware revision"); 2389 2390 rv = csio_do_hello(hw, &hw->fw_state); 2391 if (rv != 0) { 2392 CSIO_INC_STATS(hw, n_err_fatal); 2393 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2394 goto out; 2395 } 2396 2397 /* Read vpd */ 2398 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2399 if (rv != 0) 2400 goto out; 2401 2402 csio_hw_get_fw_version(hw, &hw->fwrev); 2403 csio_hw_get_tp_version(hw, &hw->tp_vers); 2404 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2405 2406 /* Do firmware update */ 2407 spin_unlock_irq(&hw->lock); 2408 rv = csio_hw_flash_fw(hw, &reset); 2409 spin_lock_irq(&hw->lock); 2410 2411 if (rv != 0) 2412 goto out; 2413 2414 rv = csio_hw_check_fwver(hw); 2415 if (rv < 0) 2416 goto out; 2417 2418 /* If the firmware doesn't support Configuration Files, 2419 * return an error. 2420 */ 2421 rv = csio_hw_check_fwconfig(hw, param); 2422 if (rv != 0) { 2423 csio_info(hw, "Firmware doesn't support " 2424 "Firmware Configuration files\n"); 2425 goto out; 2426 } 2427 2428 /* The firmware provides us with a memory buffer where we can 2429 * load a Configuration File from the host if we want to 2430 * override the Configuration File in flash. 2431 */ 2432 rv = csio_hw_use_fwconfig(hw, reset, param); 2433 if (rv == -ENOENT) { 2434 csio_info(hw, "Could not initialize " 2435 "adapter, error%d\n", rv); 2436 goto out; 2437 } 2438 if (rv != 0) { 2439 csio_info(hw, "Could not initialize " 2440 "adapter, error%d\n", rv); 2441 goto out; 2442 } 2443 2444 } else { 2445 rv = csio_hw_check_fwver(hw); 2446 if (rv < 0) 2447 goto out; 2448 2449 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2450 2451 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2452 2453 /* device parameters */ 2454 rv = csio_get_device_params(hw); 2455 if (rv != 0) 2456 goto out; 2457 2458 /* Get device capabilities */ 2459 rv = csio_config_device_caps(hw); 2460 if (rv != 0) 2461 goto out; 2462 2463 /* Configure SGE */ 2464 csio_wr_sge_init(hw); 2465 2466 /* Post event to notify completion of configuration */ 2467 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2468 goto out; 2469 } 2470 } /* if not master */ 2471 2472 out: 2473 return; 2474 } 2475 2476 /* 2477 * csio_hw_initialize - Initialize HW 2478 * @hw - HW module 2479 * 2480 */ 2481 static void 2482 csio_hw_initialize(struct csio_hw *hw) 2483 { 2484 struct csio_mb *mbp; 2485 enum fw_retval retval; 2486 int rv; 2487 int i; 2488 2489 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2490 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2491 if (!mbp) 2492 goto out; 2493 2494 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2495 2496 if (csio_mb_issue(hw, mbp)) { 2497 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2498 goto free_and_out; 2499 } 2500 2501 retval = csio_mb_fw_retval(mbp); 2502 if (retval != FW_SUCCESS) { 2503 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2504 retval); 2505 goto free_and_out; 2506 } 2507 2508 mempool_free(mbp, hw->mb_mempool); 2509 } 2510 2511 rv = csio_get_fcoe_resinfo(hw); 2512 if (rv != 0) { 2513 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2514 goto out; 2515 } 2516 2517 spin_unlock_irq(&hw->lock); 2518 rv = csio_config_queues(hw); 2519 spin_lock_irq(&hw->lock); 2520 2521 if (rv != 0) { 2522 csio_err(hw, "Config of queues failed!: %d\n", rv); 2523 goto out; 2524 } 2525 2526 for (i = 0; i < hw->num_pports; i++) 2527 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2528 2529 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2530 rv = csio_enable_ports(hw); 2531 if (rv != 0) { 2532 csio_err(hw, "Failed to enable ports: %d\n", rv); 2533 goto out; 2534 } 2535 } 2536 2537 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2538 return; 2539 2540 free_and_out: 2541 mempool_free(mbp, hw->mb_mempool); 2542 out: 2543 return; 2544 } 2545 2546 #define PF_INTR_MASK (PFSW_F | PFCIM_F) 2547 2548 /* 2549 * csio_hw_intr_enable - Enable HW interrupts 2550 * @hw: Pointer to HW module. 2551 * 2552 * Enable interrupts in HW registers. 2553 */ 2554 static void 2555 csio_hw_intr_enable(struct csio_hw *hw) 2556 { 2557 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2558 u32 pf = 0; 2559 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); 2560 2561 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 2562 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2563 else 2564 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2565 2566 /* 2567 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2568 * by FW, so do nothing for INTX. 2569 */ 2570 if (hw->intr_mode == CSIO_IM_MSIX) 2571 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2572 AIVEC_V(AIVEC_M), vec); 2573 else if (hw->intr_mode == CSIO_IM_MSI) 2574 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2575 AIVEC_V(AIVEC_M), 0); 2576 2577 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); 2578 2579 /* Turn on MB interrupts - this will internally flush PIO as well */ 2580 csio_mb_intr_enable(hw); 2581 2582 /* These are common registers - only a master can modify them */ 2583 if (csio_is_hw_master(hw)) { 2584 /* 2585 * Disable the Serial FLASH interrupt, if enabled! 2586 */ 2587 pl &= (~SF_F); 2588 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); 2589 2590 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | 2591 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | 2592 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | 2593 ERR_DATA_CPL_ON_HIGH_QID1_F | 2594 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | 2595 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | 2596 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | 2597 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, 2598 SGE_INT_ENABLE3_A); 2599 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); 2600 } 2601 2602 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2603 2604 } 2605 2606 /* 2607 * csio_hw_intr_disable - Disable HW interrupts 2608 * @hw: Pointer to HW module. 2609 * 2610 * Turn off Mailbox and PCI_PF_CFG interrupts. 2611 */ 2612 void 2613 csio_hw_intr_disable(struct csio_hw *hw) 2614 { 2615 u32 pf = 0; 2616 2617 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 2618 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2619 else 2620 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2621 2622 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2623 return; 2624 2625 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2626 2627 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); 2628 if (csio_is_hw_master(hw)) 2629 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); 2630 2631 /* Turn off MB interrupts */ 2632 csio_mb_intr_disable(hw); 2633 2634 } 2635 2636 void 2637 csio_hw_fatal_err(struct csio_hw *hw) 2638 { 2639 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); 2640 csio_hw_intr_disable(hw); 2641 2642 /* Do not reset HW, we may need FW state for debugging */ 2643 csio_fatal(hw, "HW Fatal error encountered!\n"); 2644 } 2645 2646 /*****************************************************************************/ 2647 /* START: HW SM */ 2648 /*****************************************************************************/ 2649 /* 2650 * csio_hws_uninit - Uninit state 2651 * @hw - HW module 2652 * @evt - Event 2653 * 2654 */ 2655 static void 2656 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2657 { 2658 hw->prev_evt = hw->cur_evt; 2659 hw->cur_evt = evt; 2660 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2661 2662 switch (evt) { 2663 case CSIO_HWE_CFG: 2664 csio_set_state(&hw->sm, csio_hws_configuring); 2665 csio_hw_configure(hw); 2666 break; 2667 2668 default: 2669 CSIO_INC_STATS(hw, n_evt_unexp); 2670 break; 2671 } 2672 } 2673 2674 /* 2675 * csio_hws_configuring - Configuring state 2676 * @hw - HW module 2677 * @evt - Event 2678 * 2679 */ 2680 static void 2681 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2682 { 2683 hw->prev_evt = hw->cur_evt; 2684 hw->cur_evt = evt; 2685 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2686 2687 switch (evt) { 2688 case CSIO_HWE_INIT: 2689 csio_set_state(&hw->sm, csio_hws_initializing); 2690 csio_hw_initialize(hw); 2691 break; 2692 2693 case CSIO_HWE_INIT_DONE: 2694 csio_set_state(&hw->sm, csio_hws_ready); 2695 /* Fan out event to all lnode SMs */ 2696 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2697 break; 2698 2699 case CSIO_HWE_FATAL: 2700 csio_set_state(&hw->sm, csio_hws_uninit); 2701 break; 2702 2703 case CSIO_HWE_PCI_REMOVE: 2704 csio_do_bye(hw); 2705 break; 2706 default: 2707 CSIO_INC_STATS(hw, n_evt_unexp); 2708 break; 2709 } 2710 } 2711 2712 /* 2713 * csio_hws_initializing - Initialiazing state 2714 * @hw - HW module 2715 * @evt - Event 2716 * 2717 */ 2718 static void 2719 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2720 { 2721 hw->prev_evt = hw->cur_evt; 2722 hw->cur_evt = evt; 2723 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2724 2725 switch (evt) { 2726 case CSIO_HWE_INIT_DONE: 2727 csio_set_state(&hw->sm, csio_hws_ready); 2728 2729 /* Fan out event to all lnode SMs */ 2730 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2731 2732 /* Enable interrupts */ 2733 csio_hw_intr_enable(hw); 2734 break; 2735 2736 case CSIO_HWE_FATAL: 2737 csio_set_state(&hw->sm, csio_hws_uninit); 2738 break; 2739 2740 case CSIO_HWE_PCI_REMOVE: 2741 csio_do_bye(hw); 2742 break; 2743 2744 default: 2745 CSIO_INC_STATS(hw, n_evt_unexp); 2746 break; 2747 } 2748 } 2749 2750 /* 2751 * csio_hws_ready - Ready state 2752 * @hw - HW module 2753 * @evt - Event 2754 * 2755 */ 2756 static void 2757 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2758 { 2759 /* Remember the event */ 2760 hw->evtflag = evt; 2761 2762 hw->prev_evt = hw->cur_evt; 2763 hw->cur_evt = evt; 2764 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2765 2766 switch (evt) { 2767 case CSIO_HWE_HBA_RESET: 2768 case CSIO_HWE_FW_DLOAD: 2769 case CSIO_HWE_SUSPEND: 2770 case CSIO_HWE_PCI_REMOVE: 2771 case CSIO_HWE_PCIERR_DETECTED: 2772 csio_set_state(&hw->sm, csio_hws_quiescing); 2773 /* cleanup all outstanding cmds */ 2774 if (evt == CSIO_HWE_HBA_RESET || 2775 evt == CSIO_HWE_PCIERR_DETECTED) 2776 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2777 else 2778 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2779 2780 csio_hw_intr_disable(hw); 2781 csio_hw_mbm_cleanup(hw); 2782 csio_evtq_stop(hw); 2783 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2784 csio_evtq_flush(hw); 2785 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2786 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2787 break; 2788 2789 case CSIO_HWE_FATAL: 2790 csio_set_state(&hw->sm, csio_hws_uninit); 2791 break; 2792 2793 default: 2794 CSIO_INC_STATS(hw, n_evt_unexp); 2795 break; 2796 } 2797 } 2798 2799 /* 2800 * csio_hws_quiescing - Quiescing state 2801 * @hw - HW module 2802 * @evt - Event 2803 * 2804 */ 2805 static void 2806 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2807 { 2808 hw->prev_evt = hw->cur_evt; 2809 hw->cur_evt = evt; 2810 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2811 2812 switch (evt) { 2813 case CSIO_HWE_QUIESCED: 2814 switch (hw->evtflag) { 2815 case CSIO_HWE_FW_DLOAD: 2816 csio_set_state(&hw->sm, csio_hws_resetting); 2817 /* Download firmware */ 2818 /* Fall through */ 2819 2820 case CSIO_HWE_HBA_RESET: 2821 csio_set_state(&hw->sm, csio_hws_resetting); 2822 /* Start reset of the HBA */ 2823 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2824 csio_wr_destroy_queues(hw, false); 2825 csio_do_reset(hw, false); 2826 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2827 break; 2828 2829 case CSIO_HWE_PCI_REMOVE: 2830 csio_set_state(&hw->sm, csio_hws_removing); 2831 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2832 csio_wr_destroy_queues(hw, true); 2833 /* Now send the bye command */ 2834 csio_do_bye(hw); 2835 break; 2836 2837 case CSIO_HWE_SUSPEND: 2838 csio_set_state(&hw->sm, csio_hws_quiesced); 2839 break; 2840 2841 case CSIO_HWE_PCIERR_DETECTED: 2842 csio_set_state(&hw->sm, csio_hws_pcierr); 2843 csio_wr_destroy_queues(hw, false); 2844 break; 2845 2846 default: 2847 CSIO_INC_STATS(hw, n_evt_unexp); 2848 break; 2849 2850 } 2851 break; 2852 2853 default: 2854 CSIO_INC_STATS(hw, n_evt_unexp); 2855 break; 2856 } 2857 } 2858 2859 /* 2860 * csio_hws_quiesced - Quiesced state 2861 * @hw - HW module 2862 * @evt - Event 2863 * 2864 */ 2865 static void 2866 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2867 { 2868 hw->prev_evt = hw->cur_evt; 2869 hw->cur_evt = evt; 2870 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2871 2872 switch (evt) { 2873 case CSIO_HWE_RESUME: 2874 csio_set_state(&hw->sm, csio_hws_configuring); 2875 csio_hw_configure(hw); 2876 break; 2877 2878 default: 2879 CSIO_INC_STATS(hw, n_evt_unexp); 2880 break; 2881 } 2882 } 2883 2884 /* 2885 * csio_hws_resetting - HW Resetting state 2886 * @hw - HW module 2887 * @evt - Event 2888 * 2889 */ 2890 static void 2891 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2892 { 2893 hw->prev_evt = hw->cur_evt; 2894 hw->cur_evt = evt; 2895 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2896 2897 switch (evt) { 2898 case CSIO_HWE_HBA_RESET_DONE: 2899 csio_evtq_start(hw); 2900 csio_set_state(&hw->sm, csio_hws_configuring); 2901 csio_hw_configure(hw); 2902 break; 2903 2904 default: 2905 CSIO_INC_STATS(hw, n_evt_unexp); 2906 break; 2907 } 2908 } 2909 2910 /* 2911 * csio_hws_removing - PCI Hotplug removing state 2912 * @hw - HW module 2913 * @evt - Event 2914 * 2915 */ 2916 static void 2917 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2918 { 2919 hw->prev_evt = hw->cur_evt; 2920 hw->cur_evt = evt; 2921 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2922 2923 switch (evt) { 2924 case CSIO_HWE_HBA_RESET: 2925 if (!csio_is_hw_master(hw)) 2926 break; 2927 /* 2928 * The BYE should have alerady been issued, so we cant 2929 * use the mailbox interface. Hence we use the PL_RST 2930 * register directly. 2931 */ 2932 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2933 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 2934 mdelay(2000); 2935 break; 2936 2937 /* Should never receive any new events */ 2938 default: 2939 CSIO_INC_STATS(hw, n_evt_unexp); 2940 break; 2941 2942 } 2943 } 2944 2945 /* 2946 * csio_hws_pcierr - PCI Error state 2947 * @hw - HW module 2948 * @evt - Event 2949 * 2950 */ 2951 static void 2952 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2953 { 2954 hw->prev_evt = hw->cur_evt; 2955 hw->cur_evt = evt; 2956 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2957 2958 switch (evt) { 2959 case CSIO_HWE_PCIERR_SLOT_RESET: 2960 csio_evtq_start(hw); 2961 csio_set_state(&hw->sm, csio_hws_configuring); 2962 csio_hw_configure(hw); 2963 break; 2964 2965 default: 2966 CSIO_INC_STATS(hw, n_evt_unexp); 2967 break; 2968 } 2969 } 2970 2971 /*****************************************************************************/ 2972 /* END: HW SM */ 2973 /*****************************************************************************/ 2974 2975 /* 2976 * csio_handle_intr_status - table driven interrupt handler 2977 * @hw: HW instance 2978 * @reg: the interrupt status register to process 2979 * @acts: table of interrupt actions 2980 * 2981 * A table driven interrupt handler that applies a set of masks to an 2982 * interrupt status word and performs the corresponding actions if the 2983 * interrupts described by the mask have occured. The actions include 2984 * optionally emitting a warning or alert message. The table is terminated 2985 * by an entry specifying mask 0. Returns the number of fatal interrupt 2986 * conditions. 2987 */ 2988 int 2989 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2990 const struct intr_info *acts) 2991 { 2992 int fatal = 0; 2993 unsigned int mask = 0; 2994 unsigned int status = csio_rd_reg32(hw, reg); 2995 2996 for ( ; acts->mask; ++acts) { 2997 if (!(status & acts->mask)) 2998 continue; 2999 if (acts->fatal) { 3000 fatal++; 3001 csio_fatal(hw, "Fatal %s (0x%x)\n", 3002 acts->msg, status & acts->mask); 3003 } else if (acts->msg) 3004 csio_info(hw, "%s (0x%x)\n", 3005 acts->msg, status & acts->mask); 3006 mask |= acts->mask; 3007 } 3008 status &= mask; 3009 if (status) /* clear processed interrupts */ 3010 csio_wr_reg32(hw, status, reg); 3011 return fatal; 3012 } 3013 3014 /* 3015 * TP interrupt handler. 3016 */ 3017 static void csio_tp_intr_handler(struct csio_hw *hw) 3018 { 3019 static struct intr_info tp_intr_info[] = { 3020 { 0x3fffffff, "TP parity error", -1, 1 }, 3021 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, 3022 { 0, NULL, 0, 0 } 3023 }; 3024 3025 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) 3026 csio_hw_fatal_err(hw); 3027 } 3028 3029 /* 3030 * SGE interrupt handler. 3031 */ 3032 static void csio_sge_intr_handler(struct csio_hw *hw) 3033 { 3034 uint64_t v; 3035 3036 static struct intr_info sge_intr_info[] = { 3037 { ERR_CPL_EXCEED_IQE_SIZE_F, 3038 "SGE received CPL exceeding IQE size", -1, 1 }, 3039 { ERR_INVALID_CIDX_INC_F, 3040 "SGE GTS CIDX increment too large", -1, 0 }, 3041 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, 3042 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, 3043 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, 3044 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 3045 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, 3046 0 }, 3047 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, 3048 0 }, 3049 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, 3050 0 }, 3051 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, 3052 0 }, 3053 { ERR_ING_CTXT_PRIO_F, 3054 "SGE too many priority ingress contexts", -1, 0 }, 3055 { ERR_EGR_CTXT_PRIO_F, 3056 "SGE too many priority egress contexts", -1, 0 }, 3057 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, 3058 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, 3059 { 0, NULL, 0, 0 } 3060 }; 3061 3062 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | 3063 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); 3064 if (v) { 3065 csio_fatal(hw, "SGE parity error (%#llx)\n", 3066 (unsigned long long)v); 3067 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 3068 SGE_INT_CAUSE1_A); 3069 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); 3070 } 3071 3072 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); 3073 3074 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || 3075 v != 0) 3076 csio_hw_fatal_err(hw); 3077 } 3078 3079 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ 3080 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) 3081 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ 3082 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) 3083 3084 /* 3085 * CIM interrupt handler. 3086 */ 3087 static void csio_cim_intr_handler(struct csio_hw *hw) 3088 { 3089 static struct intr_info cim_intr_info[] = { 3090 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, 3091 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 3092 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 3093 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, 3094 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, 3095 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, 3096 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, 3097 { 0, NULL, 0, 0 } 3098 }; 3099 static struct intr_info cim_upintr_info[] = { 3100 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, 3101 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, 3102 { ILLWRINT_F, "CIM illegal write", -1, 1 }, 3103 { ILLRDINT_F, "CIM illegal read", -1, 1 }, 3104 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, 3105 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, 3106 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, 3107 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, 3108 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, 3109 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, 3110 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, 3111 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, 3112 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, 3113 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, 3114 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, 3115 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, 3116 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, 3117 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, 3118 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, 3119 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, 3120 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, 3121 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, 3122 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, 3123 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, 3124 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, 3125 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, 3126 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, 3127 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, 3128 { 0, NULL, 0, 0 } 3129 }; 3130 3131 int fat; 3132 3133 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, 3134 cim_intr_info) + 3135 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, 3136 cim_upintr_info); 3137 if (fat) 3138 csio_hw_fatal_err(hw); 3139 } 3140 3141 /* 3142 * ULP RX interrupt handler. 3143 */ 3144 static void csio_ulprx_intr_handler(struct csio_hw *hw) 3145 { 3146 static struct intr_info ulprx_intr_info[] = { 3147 { 0x1800000, "ULPRX context error", -1, 1 }, 3148 { 0x7fffff, "ULPRX parity error", -1, 1 }, 3149 { 0, NULL, 0, 0 } 3150 }; 3151 3152 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) 3153 csio_hw_fatal_err(hw); 3154 } 3155 3156 /* 3157 * ULP TX interrupt handler. 3158 */ 3159 static void csio_ulptx_intr_handler(struct csio_hw *hw) 3160 { 3161 static struct intr_info ulptx_intr_info[] = { 3162 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, 3163 0 }, 3164 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, 3165 0 }, 3166 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, 3167 0 }, 3168 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, 3169 0 }, 3170 { 0xfffffff, "ULPTX parity error", -1, 1 }, 3171 { 0, NULL, 0, 0 } 3172 }; 3173 3174 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) 3175 csio_hw_fatal_err(hw); 3176 } 3177 3178 /* 3179 * PM TX interrupt handler. 3180 */ 3181 static void csio_pmtx_intr_handler(struct csio_hw *hw) 3182 { 3183 static struct intr_info pmtx_intr_info[] = { 3184 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, 3185 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, 3186 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, 3187 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, 3188 { 0xffffff0, "PMTX framing error", -1, 1 }, 3189 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, 3190 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, 3191 1 }, 3192 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, 3193 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, 3194 { 0, NULL, 0, 0 } 3195 }; 3196 3197 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) 3198 csio_hw_fatal_err(hw); 3199 } 3200 3201 /* 3202 * PM RX interrupt handler. 3203 */ 3204 static void csio_pmrx_intr_handler(struct csio_hw *hw) 3205 { 3206 static struct intr_info pmrx_intr_info[] = { 3207 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, 3208 { 0x3ffff0, "PMRX framing error", -1, 1 }, 3209 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, 3210 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, 3211 1 }, 3212 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, 3213 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, 3214 { 0, NULL, 0, 0 } 3215 }; 3216 3217 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) 3218 csio_hw_fatal_err(hw); 3219 } 3220 3221 /* 3222 * CPL switch interrupt handler. 3223 */ 3224 static void csio_cplsw_intr_handler(struct csio_hw *hw) 3225 { 3226 static struct intr_info cplsw_intr_info[] = { 3227 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, 3228 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, 3229 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, 3230 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, 3231 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, 3232 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, 3233 { 0, NULL, 0, 0 } 3234 }; 3235 3236 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) 3237 csio_hw_fatal_err(hw); 3238 } 3239 3240 /* 3241 * LE interrupt handler. 3242 */ 3243 static void csio_le_intr_handler(struct csio_hw *hw) 3244 { 3245 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); 3246 3247 static struct intr_info le_intr_info[] = { 3248 { LIPMISS_F, "LE LIP miss", -1, 0 }, 3249 { LIP0_F, "LE 0 LIP error", -1, 0 }, 3250 { PARITYERR_F, "LE parity error", -1, 1 }, 3251 { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 3252 { REQQPARERR_F, "LE request queue parity error", -1, 1 }, 3253 { 0, NULL, 0, 0 } 3254 }; 3255 3256 static struct intr_info t6_le_intr_info[] = { 3257 { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, 3258 { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, 3259 { TCAMINTPERR_F, "LE parity error", -1, 1 }, 3260 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 3261 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, 3262 { 0, NULL, 0, 0 } 3263 }; 3264 3265 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, 3266 (chip == CHELSIO_T5) ? 3267 le_intr_info : t6_le_intr_info)) 3268 csio_hw_fatal_err(hw); 3269 } 3270 3271 /* 3272 * MPS interrupt handler. 3273 */ 3274 static void csio_mps_intr_handler(struct csio_hw *hw) 3275 { 3276 static struct intr_info mps_rx_intr_info[] = { 3277 { 0xffffff, "MPS Rx parity error", -1, 1 }, 3278 { 0, NULL, 0, 0 } 3279 }; 3280 static struct intr_info mps_tx_intr_info[] = { 3281 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, 3282 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 3283 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", 3284 -1, 1 }, 3285 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", 3286 -1, 1 }, 3287 { BUBBLE_F, "MPS Tx underflow", -1, 1 }, 3288 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, 3289 { FRMERR_F, "MPS Tx framing error", -1, 1 }, 3290 { 0, NULL, 0, 0 } 3291 }; 3292 static struct intr_info mps_trc_intr_info[] = { 3293 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, 3294 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", 3295 -1, 1 }, 3296 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, 3297 { 0, NULL, 0, 0 } 3298 }; 3299 static struct intr_info mps_stat_sram_intr_info[] = { 3300 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 3301 { 0, NULL, 0, 0 } 3302 }; 3303 static struct intr_info mps_stat_tx_intr_info[] = { 3304 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 3305 { 0, NULL, 0, 0 } 3306 }; 3307 static struct intr_info mps_stat_rx_intr_info[] = { 3308 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 3309 { 0, NULL, 0, 0 } 3310 }; 3311 static struct intr_info mps_cls_intr_info[] = { 3312 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, 3313 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, 3314 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, 3315 { 0, NULL, 0, 0 } 3316 }; 3317 3318 int fat; 3319 3320 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, 3321 mps_rx_intr_info) + 3322 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, 3323 mps_tx_intr_info) + 3324 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, 3325 mps_trc_intr_info) + 3326 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, 3327 mps_stat_sram_intr_info) + 3328 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, 3329 mps_stat_tx_intr_info) + 3330 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, 3331 mps_stat_rx_intr_info) + 3332 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, 3333 mps_cls_intr_info); 3334 3335 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); 3336 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ 3337 if (fat) 3338 csio_hw_fatal_err(hw); 3339 } 3340 3341 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ 3342 ECC_UE_INT_CAUSE_F) 3343 3344 /* 3345 * EDC/MC interrupt handler. 3346 */ 3347 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3348 { 3349 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3350 3351 unsigned int addr, cnt_addr, v; 3352 3353 if (idx <= MEM_EDC1) { 3354 addr = EDC_REG(EDC_INT_CAUSE_A, idx); 3355 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); 3356 } else { 3357 addr = MC_INT_CAUSE_A; 3358 cnt_addr = MC_ECC_STATUS_A; 3359 } 3360 3361 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3362 if (v & PERR_INT_CAUSE_F) 3363 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3364 if (v & ECC_CE_INT_CAUSE_F) { 3365 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); 3366 3367 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); 3368 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3369 cnt, name[idx], cnt > 1 ? "s" : ""); 3370 } 3371 if (v & ECC_UE_INT_CAUSE_F) 3372 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3373 3374 csio_wr_reg32(hw, v, addr); 3375 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) 3376 csio_hw_fatal_err(hw); 3377 } 3378 3379 /* 3380 * MA interrupt handler. 3381 */ 3382 static void csio_ma_intr_handler(struct csio_hw *hw) 3383 { 3384 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); 3385 3386 if (status & MEM_PERR_INT_CAUSE_F) 3387 csio_fatal(hw, "MA parity error, parity status %#x\n", 3388 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); 3389 if (status & MEM_WRAP_INT_CAUSE_F) { 3390 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); 3391 csio_fatal(hw, 3392 "MA address wrap-around error by client %u to address %#x\n", 3393 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); 3394 } 3395 csio_wr_reg32(hw, status, MA_INT_CAUSE_A); 3396 csio_hw_fatal_err(hw); 3397 } 3398 3399 /* 3400 * SMB interrupt handler. 3401 */ 3402 static void csio_smb_intr_handler(struct csio_hw *hw) 3403 { 3404 static struct intr_info smb_intr_info[] = { 3405 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, 3406 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, 3407 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, 3408 { 0, NULL, 0, 0 } 3409 }; 3410 3411 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) 3412 csio_hw_fatal_err(hw); 3413 } 3414 3415 /* 3416 * NC-SI interrupt handler. 3417 */ 3418 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3419 { 3420 static struct intr_info ncsi_intr_info[] = { 3421 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, 3422 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, 3423 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, 3424 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, 3425 { 0, NULL, 0, 0 } 3426 }; 3427 3428 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) 3429 csio_hw_fatal_err(hw); 3430 } 3431 3432 /* 3433 * XGMAC interrupt handler. 3434 */ 3435 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3436 { 3437 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3438 3439 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; 3440 if (!v) 3441 return; 3442 3443 if (v & TXFIFO_PRTY_ERR_F) 3444 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3445 if (v & RXFIFO_PRTY_ERR_F) 3446 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3447 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3448 csio_hw_fatal_err(hw); 3449 } 3450 3451 /* 3452 * PL interrupt handler. 3453 */ 3454 static void csio_pl_intr_handler(struct csio_hw *hw) 3455 { 3456 static struct intr_info pl_intr_info[] = { 3457 { FATALPERR_F, "T4 fatal parity error", -1, 1 }, 3458 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, 3459 { 0, NULL, 0, 0 } 3460 }; 3461 3462 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) 3463 csio_hw_fatal_err(hw); 3464 } 3465 3466 /* 3467 * csio_hw_slow_intr_handler - control path interrupt handler 3468 * @hw: HW module 3469 * 3470 * Interrupt handler for non-data global interrupt events, e.g., errors. 3471 * The designation 'slow' is because it involves register reads, while 3472 * data interrupts typically don't involve any MMIOs. 3473 */ 3474 int 3475 csio_hw_slow_intr_handler(struct csio_hw *hw) 3476 { 3477 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); 3478 3479 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3480 CSIO_INC_STATS(hw, n_plint_unexp); 3481 return 0; 3482 } 3483 3484 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3485 3486 CSIO_INC_STATS(hw, n_plint_cnt); 3487 3488 if (cause & CIM_F) 3489 csio_cim_intr_handler(hw); 3490 3491 if (cause & MPS_F) 3492 csio_mps_intr_handler(hw); 3493 3494 if (cause & NCSI_F) 3495 csio_ncsi_intr_handler(hw); 3496 3497 if (cause & PL_F) 3498 csio_pl_intr_handler(hw); 3499 3500 if (cause & SMB_F) 3501 csio_smb_intr_handler(hw); 3502 3503 if (cause & XGMAC0_F) 3504 csio_xgmac_intr_handler(hw, 0); 3505 3506 if (cause & XGMAC1_F) 3507 csio_xgmac_intr_handler(hw, 1); 3508 3509 if (cause & XGMAC_KR0_F) 3510 csio_xgmac_intr_handler(hw, 2); 3511 3512 if (cause & XGMAC_KR1_F) 3513 csio_xgmac_intr_handler(hw, 3); 3514 3515 if (cause & PCIE_F) 3516 hw->chip_ops->chip_pcie_intr_handler(hw); 3517 3518 if (cause & MC_F) 3519 csio_mem_intr_handler(hw, MEM_MC); 3520 3521 if (cause & EDC0_F) 3522 csio_mem_intr_handler(hw, MEM_EDC0); 3523 3524 if (cause & EDC1_F) 3525 csio_mem_intr_handler(hw, MEM_EDC1); 3526 3527 if (cause & LE_F) 3528 csio_le_intr_handler(hw); 3529 3530 if (cause & TP_F) 3531 csio_tp_intr_handler(hw); 3532 3533 if (cause & MA_F) 3534 csio_ma_intr_handler(hw); 3535 3536 if (cause & PM_TX_F) 3537 csio_pmtx_intr_handler(hw); 3538 3539 if (cause & PM_RX_F) 3540 csio_pmrx_intr_handler(hw); 3541 3542 if (cause & ULP_RX_F) 3543 csio_ulprx_intr_handler(hw); 3544 3545 if (cause & CPL_SWITCH_F) 3546 csio_cplsw_intr_handler(hw); 3547 3548 if (cause & SGE_F) 3549 csio_sge_intr_handler(hw); 3550 3551 if (cause & ULP_TX_F) 3552 csio_ulptx_intr_handler(hw); 3553 3554 /* Clear the interrupts just processed for which we are the master. */ 3555 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); 3556 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ 3557 3558 return 1; 3559 } 3560 3561 /***************************************************************************** 3562 * HW <--> mailbox interfacing routines. 3563 ****************************************************************************/ 3564 /* 3565 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3566 * 3567 * @data: Private data pointer. 3568 * 3569 * Called from worker thread context. 3570 */ 3571 static void 3572 csio_mberr_worker(void *data) 3573 { 3574 struct csio_hw *hw = (struct csio_hw *)data; 3575 struct csio_mbm *mbm = &hw->mbm; 3576 LIST_HEAD(cbfn_q); 3577 struct csio_mb *mbp_next; 3578 int rv; 3579 3580 del_timer_sync(&mbm->timer); 3581 3582 spin_lock_irq(&hw->lock); 3583 if (list_empty(&mbm->cbfn_q)) { 3584 spin_unlock_irq(&hw->lock); 3585 return; 3586 } 3587 3588 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3589 mbm->stats.n_cbfnq = 0; 3590 3591 /* Try to start waiting mailboxes */ 3592 if (!list_empty(&mbm->req_q)) { 3593 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3594 list_del_init(&mbp_next->list); 3595 3596 rv = csio_mb_issue(hw, mbp_next); 3597 if (rv != 0) 3598 list_add_tail(&mbp_next->list, &mbm->req_q); 3599 else 3600 CSIO_DEC_STATS(mbm, n_activeq); 3601 } 3602 spin_unlock_irq(&hw->lock); 3603 3604 /* Now callback completions */ 3605 csio_mb_completions(hw, &cbfn_q); 3606 } 3607 3608 /* 3609 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3610 * 3611 * @data: private data pointer 3612 * 3613 **/ 3614 static void 3615 csio_hw_mb_timer(struct timer_list *t) 3616 { 3617 struct csio_mbm *mbm = from_timer(mbm, t, timer); 3618 struct csio_hw *hw = mbm->hw; 3619 struct csio_mb *mbp = NULL; 3620 3621 spin_lock_irq(&hw->lock); 3622 mbp = csio_mb_tmo_handler(hw); 3623 spin_unlock_irq(&hw->lock); 3624 3625 /* Call back the function for the timed-out Mailbox */ 3626 if (mbp) 3627 mbp->mb_cbfn(hw, mbp); 3628 3629 } 3630 3631 /* 3632 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3633 * @hw: HW module 3634 * 3635 * Called with lock held, should exit with lock held. 3636 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3637 * into a local queue. Drops lock and calls the completions. Holds 3638 * lock and returns. 3639 */ 3640 static void 3641 csio_hw_mbm_cleanup(struct csio_hw *hw) 3642 { 3643 LIST_HEAD(cbfn_q); 3644 3645 csio_mb_cancel_all(hw, &cbfn_q); 3646 3647 spin_unlock_irq(&hw->lock); 3648 csio_mb_completions(hw, &cbfn_q); 3649 spin_lock_irq(&hw->lock); 3650 } 3651 3652 /***************************************************************************** 3653 * Event handling 3654 ****************************************************************************/ 3655 int 3656 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3657 uint16_t len) 3658 { 3659 struct csio_evt_msg *evt_entry = NULL; 3660 3661 if (type >= CSIO_EVT_MAX) 3662 return -EINVAL; 3663 3664 if (len > CSIO_EVT_MSG_SIZE) 3665 return -EINVAL; 3666 3667 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3668 return -EINVAL; 3669 3670 if (list_empty(&hw->evt_free_q)) { 3671 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3672 type, len); 3673 return -ENOMEM; 3674 } 3675 3676 evt_entry = list_first_entry(&hw->evt_free_q, 3677 struct csio_evt_msg, list); 3678 list_del_init(&evt_entry->list); 3679 3680 /* copy event msg and queue the event */ 3681 evt_entry->type = type; 3682 memcpy((void *)evt_entry->data, evt_msg, len); 3683 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3684 3685 CSIO_DEC_STATS(hw, n_evt_freeq); 3686 CSIO_INC_STATS(hw, n_evt_activeq); 3687 3688 return 0; 3689 } 3690 3691 static int 3692 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3693 uint16_t len, bool msg_sg) 3694 { 3695 struct csio_evt_msg *evt_entry = NULL; 3696 struct csio_fl_dma_buf *fl_sg; 3697 uint32_t off = 0; 3698 unsigned long flags; 3699 int n, ret = 0; 3700 3701 if (type >= CSIO_EVT_MAX) 3702 return -EINVAL; 3703 3704 if (len > CSIO_EVT_MSG_SIZE) 3705 return -EINVAL; 3706 3707 spin_lock_irqsave(&hw->lock, flags); 3708 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3709 ret = -EINVAL; 3710 goto out; 3711 } 3712 3713 if (list_empty(&hw->evt_free_q)) { 3714 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3715 type, len); 3716 ret = -ENOMEM; 3717 goto out; 3718 } 3719 3720 evt_entry = list_first_entry(&hw->evt_free_q, 3721 struct csio_evt_msg, list); 3722 list_del_init(&evt_entry->list); 3723 3724 /* copy event msg and queue the event */ 3725 evt_entry->type = type; 3726 3727 /* If Payload in SG list*/ 3728 if (msg_sg) { 3729 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3730 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3731 memcpy((void *)((uintptr_t)evt_entry->data + off), 3732 fl_sg->flbufs[n].vaddr, 3733 fl_sg->flbufs[n].len); 3734 off += fl_sg->flbufs[n].len; 3735 } 3736 } else 3737 memcpy((void *)evt_entry->data, evt_msg, len); 3738 3739 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3740 CSIO_DEC_STATS(hw, n_evt_freeq); 3741 CSIO_INC_STATS(hw, n_evt_activeq); 3742 out: 3743 spin_unlock_irqrestore(&hw->lock, flags); 3744 return ret; 3745 } 3746 3747 static void 3748 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3749 { 3750 if (evt_entry) { 3751 spin_lock_irq(&hw->lock); 3752 list_del_init(&evt_entry->list); 3753 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3754 CSIO_DEC_STATS(hw, n_evt_activeq); 3755 CSIO_INC_STATS(hw, n_evt_freeq); 3756 spin_unlock_irq(&hw->lock); 3757 } 3758 } 3759 3760 void 3761 csio_evtq_flush(struct csio_hw *hw) 3762 { 3763 uint32_t count; 3764 count = 30; 3765 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3766 spin_unlock_irq(&hw->lock); 3767 msleep(2000); 3768 spin_lock_irq(&hw->lock); 3769 } 3770 3771 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3772 } 3773 3774 static void 3775 csio_evtq_stop(struct csio_hw *hw) 3776 { 3777 hw->flags |= CSIO_HWF_FWEVT_STOP; 3778 } 3779 3780 static void 3781 csio_evtq_start(struct csio_hw *hw) 3782 { 3783 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3784 } 3785 3786 static void 3787 csio_evtq_cleanup(struct csio_hw *hw) 3788 { 3789 struct list_head *evt_entry, *next_entry; 3790 3791 /* Release outstanding events from activeq to freeq*/ 3792 if (!list_empty(&hw->evt_active_q)) 3793 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3794 3795 hw->stats.n_evt_activeq = 0; 3796 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3797 3798 /* Freeup event entry */ 3799 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3800 kfree(evt_entry); 3801 CSIO_DEC_STATS(hw, n_evt_freeq); 3802 } 3803 3804 hw->stats.n_evt_freeq = 0; 3805 } 3806 3807 3808 static void 3809 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3810 struct csio_fl_dma_buf *flb, void *priv) 3811 { 3812 __u8 op; 3813 void *msg = NULL; 3814 uint32_t msg_len = 0; 3815 bool msg_sg = 0; 3816 3817 op = ((struct rss_header *) wr)->opcode; 3818 if (op == CPL_FW6_PLD) { 3819 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3820 if (!flb || !flb->totlen) { 3821 CSIO_INC_STATS(hw, n_cpl_unexp); 3822 return; 3823 } 3824 3825 msg = (void *) flb; 3826 msg_len = flb->totlen; 3827 msg_sg = 1; 3828 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3829 3830 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3831 /* skip RSS header */ 3832 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3833 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3834 sizeof(struct cpl_fw4_msg); 3835 } else { 3836 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3837 CSIO_INC_STATS(hw, n_cpl_unexp); 3838 return; 3839 } 3840 3841 /* 3842 * Enqueue event to EventQ. Events processing happens 3843 * in Event worker thread context 3844 */ 3845 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3846 (uint16_t)msg_len, msg_sg)) 3847 CSIO_INC_STATS(hw, n_evt_drop); 3848 } 3849 3850 void 3851 csio_evtq_worker(struct work_struct *work) 3852 { 3853 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3854 struct list_head *evt_entry, *next_entry; 3855 LIST_HEAD(evt_q); 3856 struct csio_evt_msg *evt_msg; 3857 struct cpl_fw6_msg *msg; 3858 struct csio_rnode *rn; 3859 int rv = 0; 3860 uint8_t evtq_stop = 0; 3861 3862 csio_dbg(hw, "event worker thread active evts#%d\n", 3863 hw->stats.n_evt_activeq); 3864 3865 spin_lock_irq(&hw->lock); 3866 while (!list_empty(&hw->evt_active_q)) { 3867 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3868 spin_unlock_irq(&hw->lock); 3869 3870 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3871 evt_msg = (struct csio_evt_msg *) evt_entry; 3872 3873 /* Drop events if queue is STOPPED */ 3874 spin_lock_irq(&hw->lock); 3875 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3876 evtq_stop = 1; 3877 spin_unlock_irq(&hw->lock); 3878 if (evtq_stop) { 3879 CSIO_INC_STATS(hw, n_evt_drop); 3880 goto free_evt; 3881 } 3882 3883 switch (evt_msg->type) { 3884 case CSIO_EVT_FW: 3885 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3886 3887 if ((msg->opcode == CPL_FW6_MSG || 3888 msg->opcode == CPL_FW4_MSG) && 3889 !msg->type) { 3890 rv = csio_mb_fwevt_handler(hw, 3891 msg->data); 3892 if (!rv) 3893 break; 3894 /* Handle any remaining fw events */ 3895 csio_fcoe_fwevt_handler(hw, 3896 msg->opcode, msg->data); 3897 } else if (msg->opcode == CPL_FW6_PLD) { 3898 3899 csio_fcoe_fwevt_handler(hw, 3900 msg->opcode, msg->data); 3901 } else { 3902 csio_warn(hw, 3903 "Unhandled FW msg op %x type %x\n", 3904 msg->opcode, msg->type); 3905 CSIO_INC_STATS(hw, n_evt_drop); 3906 } 3907 break; 3908 3909 case CSIO_EVT_MBX: 3910 csio_mberr_worker(hw); 3911 break; 3912 3913 case CSIO_EVT_DEV_LOSS: 3914 memcpy(&rn, evt_msg->data, sizeof(rn)); 3915 csio_rnode_devloss_handler(rn); 3916 break; 3917 3918 default: 3919 csio_warn(hw, "Unhandled event %x on evtq\n", 3920 evt_msg->type); 3921 CSIO_INC_STATS(hw, n_evt_unexp); 3922 break; 3923 } 3924 free_evt: 3925 csio_free_evt(hw, evt_msg); 3926 } 3927 3928 spin_lock_irq(&hw->lock); 3929 } 3930 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3931 spin_unlock_irq(&hw->lock); 3932 } 3933 3934 int 3935 csio_fwevtq_handler(struct csio_hw *hw) 3936 { 3937 int rv; 3938 3939 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 3940 CSIO_INC_STATS(hw, n_int_stray); 3941 return -EINVAL; 3942 } 3943 3944 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 3945 csio_process_fwevtq_entry, NULL); 3946 return rv; 3947 } 3948 3949 /**************************************************************************** 3950 * Entry points 3951 ****************************************************************************/ 3952 3953 /* Management module */ 3954 /* 3955 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 3956 * mgmt - mgmt module 3957 * @io_req - io request 3958 * 3959 * Return - 0:if given IO Req exists in active Q. 3960 * -EINVAL :if lookup fails. 3961 */ 3962 int 3963 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 3964 { 3965 struct list_head *tmp; 3966 3967 /* Lookup ioreq in the ACTIVEQ */ 3968 list_for_each(tmp, &mgmtm->active_q) { 3969 if (io_req == (struct csio_ioreq *)tmp) 3970 return 0; 3971 } 3972 return -EINVAL; 3973 } 3974 3975 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 3976 3977 /* 3978 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 3979 * @data - Event data. 3980 * 3981 * Return - none. 3982 */ 3983 static void 3984 csio_mgmt_tmo_handler(struct timer_list *t) 3985 { 3986 struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); 3987 struct list_head *tmp; 3988 struct csio_ioreq *io_req; 3989 3990 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 3991 3992 spin_lock_irq(&mgmtm->hw->lock); 3993 3994 list_for_each(tmp, &mgmtm->active_q) { 3995 io_req = (struct csio_ioreq *) tmp; 3996 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 3997 3998 if (!io_req->tmo) { 3999 /* Dequeue the request from retry Q. */ 4000 tmp = csio_list_prev(tmp); 4001 list_del_init(&io_req->sm.sm_list); 4002 if (io_req->io_cbfn) { 4003 /* io_req will be freed by completion handler */ 4004 io_req->wr_status = -ETIMEDOUT; 4005 io_req->io_cbfn(mgmtm->hw, io_req); 4006 } else { 4007 CSIO_DB_ASSERT(0); 4008 } 4009 } 4010 } 4011 4012 /* If retry queue is not empty, re-arm timer */ 4013 if (!list_empty(&mgmtm->active_q)) 4014 mod_timer(&mgmtm->mgmt_timer, 4015 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 4016 spin_unlock_irq(&mgmtm->hw->lock); 4017 } 4018 4019 static void 4020 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 4021 { 4022 struct csio_hw *hw = mgmtm->hw; 4023 struct csio_ioreq *io_req; 4024 struct list_head *tmp; 4025 uint32_t count; 4026 4027 count = 30; 4028 /* Wait for all outstanding req to complete gracefully */ 4029 while ((!list_empty(&mgmtm->active_q)) && count--) { 4030 spin_unlock_irq(&hw->lock); 4031 msleep(2000); 4032 spin_lock_irq(&hw->lock); 4033 } 4034 4035 /* release outstanding req from ACTIVEQ */ 4036 list_for_each(tmp, &mgmtm->active_q) { 4037 io_req = (struct csio_ioreq *) tmp; 4038 tmp = csio_list_prev(tmp); 4039 list_del_init(&io_req->sm.sm_list); 4040 mgmtm->stats.n_active--; 4041 if (io_req->io_cbfn) { 4042 /* io_req will be freed by completion handler */ 4043 io_req->wr_status = -ETIMEDOUT; 4044 io_req->io_cbfn(mgmtm->hw, io_req); 4045 } 4046 } 4047 } 4048 4049 /* 4050 * csio_mgmt_init - Mgmt module init entry point 4051 * @mgmtsm - mgmt module 4052 * @hw - HW module 4053 * 4054 * Initialize mgmt timer, resource wait queue, active queue, 4055 * completion q. Allocate Egress and Ingress 4056 * WR queues and save off the queue index returned by the WR 4057 * module for future use. Allocate and save off mgmt reqs in the 4058 * mgmt_req_freelist for future use. Make sure their SM is initialized 4059 * to uninit state. 4060 * Returns: 0 - on success 4061 * -ENOMEM - on error. 4062 */ 4063 static int 4064 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 4065 { 4066 timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); 4067 4068 INIT_LIST_HEAD(&mgmtm->active_q); 4069 INIT_LIST_HEAD(&mgmtm->cbfn_q); 4070 4071 mgmtm->hw = hw; 4072 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 4073 4074 return 0; 4075 } 4076 4077 /* 4078 * csio_mgmtm_exit - MGMT module exit entry point 4079 * @mgmtsm - mgmt module 4080 * 4081 * This function called during MGMT module uninit. 4082 * Stop timers, free ioreqs allocated. 4083 * Returns: None 4084 * 4085 */ 4086 static void 4087 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 4088 { 4089 del_timer_sync(&mgmtm->mgmt_timer); 4090 } 4091 4092 4093 /** 4094 * csio_hw_start - Kicks off the HW State machine 4095 * @hw: Pointer to HW module. 4096 * 4097 * It is assumed that the initialization is a synchronous operation. 4098 * So when we return afer posting the event, the HW SM should be in 4099 * the ready state, if there were no errors during init. 4100 */ 4101 int 4102 csio_hw_start(struct csio_hw *hw) 4103 { 4104 spin_lock_irq(&hw->lock); 4105 csio_post_event(&hw->sm, CSIO_HWE_CFG); 4106 spin_unlock_irq(&hw->lock); 4107 4108 if (csio_is_hw_ready(hw)) 4109 return 0; 4110 else if (csio_match_state(hw, csio_hws_uninit)) 4111 return -EINVAL; 4112 else 4113 return -ENODEV; 4114 } 4115 4116 int 4117 csio_hw_stop(struct csio_hw *hw) 4118 { 4119 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 4120 4121 if (csio_is_hw_removing(hw)) 4122 return 0; 4123 else 4124 return -EINVAL; 4125 } 4126 4127 /* Max reset retries */ 4128 #define CSIO_MAX_RESET_RETRIES 3 4129 4130 /** 4131 * csio_hw_reset - Reset the hardware 4132 * @hw: HW module. 4133 * 4134 * Caller should hold lock across this function. 4135 */ 4136 int 4137 csio_hw_reset(struct csio_hw *hw) 4138 { 4139 if (!csio_is_hw_master(hw)) 4140 return -EPERM; 4141 4142 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 4143 csio_dbg(hw, "Max hw reset attempts reached.."); 4144 return -EINVAL; 4145 } 4146 4147 hw->rst_retries++; 4148 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 4149 4150 if (csio_is_hw_ready(hw)) { 4151 hw->rst_retries = 0; 4152 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 4153 return 0; 4154 } else 4155 return -EINVAL; 4156 } 4157 4158 /* 4159 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 4160 * @hw: HW module. 4161 */ 4162 static void 4163 csio_hw_get_device_id(struct csio_hw *hw) 4164 { 4165 /* Is the adapter device id cached already ?*/ 4166 if (csio_is_dev_id_cached(hw)) 4167 return; 4168 4169 /* Get the PCI vendor & device id */ 4170 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 4171 &hw->params.pci.vendor_id); 4172 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 4173 &hw->params.pci.device_id); 4174 4175 csio_dev_id_cached(hw); 4176 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 4177 4178 } /* csio_hw_get_device_id */ 4179 4180 /* 4181 * csio_hw_set_description - Set the model, description of the hw. 4182 * @hw: HW module. 4183 * @ven_id: PCI Vendor ID 4184 * @dev_id: PCI Device ID 4185 */ 4186 static void 4187 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 4188 { 4189 uint32_t adap_type, prot_type; 4190 4191 if (ven_id == CSIO_VENDOR_ID) { 4192 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 4193 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 4194 4195 if (prot_type == CSIO_T5_FCOE_ASIC) { 4196 memcpy(hw->hw_ver, 4197 csio_t5_fcoe_adapters[adap_type].model_no, 16); 4198 memcpy(hw->model_desc, 4199 csio_t5_fcoe_adapters[adap_type].description, 4200 32); 4201 } else { 4202 char tempName[32] = "Chelsio FCoE Controller"; 4203 memcpy(hw->model_desc, tempName, 32); 4204 } 4205 } 4206 } /* csio_hw_set_description */ 4207 4208 /** 4209 * csio_hw_init - Initialize HW module. 4210 * @hw: Pointer to HW module. 4211 * 4212 * Initialize the members of the HW module. 4213 */ 4214 int 4215 csio_hw_init(struct csio_hw *hw) 4216 { 4217 int rv = -EINVAL; 4218 uint32_t i; 4219 uint16_t ven_id, dev_id; 4220 struct csio_evt_msg *evt_entry; 4221 4222 INIT_LIST_HEAD(&hw->sm.sm_list); 4223 csio_init_state(&hw->sm, csio_hws_uninit); 4224 spin_lock_init(&hw->lock); 4225 INIT_LIST_HEAD(&hw->sln_head); 4226 4227 /* Get the PCI vendor & device id */ 4228 csio_hw_get_device_id(hw); 4229 4230 strcpy(hw->name, CSIO_HW_NAME); 4231 4232 /* Initialize the HW chip ops T5 specific ops */ 4233 hw->chip_ops = &t5_ops; 4234 4235 /* Set the model & its description */ 4236 4237 ven_id = hw->params.pci.vendor_id; 4238 dev_id = hw->params.pci.device_id; 4239 4240 csio_hw_set_description(hw, ven_id, dev_id); 4241 4242 /* Initialize default log level */ 4243 hw->params.log_level = (uint32_t) csio_dbg_level; 4244 4245 csio_set_fwevt_intr_idx(hw, -1); 4246 csio_set_nondata_intr_idx(hw, -1); 4247 4248 /* Init all the modules: Mailbox, WorkRequest and Transport */ 4249 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 4250 goto err; 4251 4252 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 4253 if (rv) 4254 goto err_mbm_exit; 4255 4256 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 4257 if (rv) 4258 goto err_wrm_exit; 4259 4260 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 4261 if (rv) 4262 goto err_scsim_exit; 4263 /* Pre-allocate evtq and initialize them */ 4264 INIT_LIST_HEAD(&hw->evt_active_q); 4265 INIT_LIST_HEAD(&hw->evt_free_q); 4266 for (i = 0; i < csio_evtq_sz; i++) { 4267 4268 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 4269 if (!evt_entry) { 4270 rv = -ENOMEM; 4271 csio_err(hw, "Failed to initialize eventq"); 4272 goto err_evtq_cleanup; 4273 } 4274 4275 list_add_tail(&evt_entry->list, &hw->evt_free_q); 4276 CSIO_INC_STATS(hw, n_evt_freeq); 4277 } 4278 4279 hw->dev_num = dev_num; 4280 dev_num++; 4281 4282 return 0; 4283 4284 err_evtq_cleanup: 4285 csio_evtq_cleanup(hw); 4286 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4287 err_scsim_exit: 4288 csio_scsim_exit(csio_hw_to_scsim(hw)); 4289 err_wrm_exit: 4290 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4291 err_mbm_exit: 4292 csio_mbm_exit(csio_hw_to_mbm(hw)); 4293 err: 4294 return rv; 4295 } 4296 4297 /** 4298 * csio_hw_exit - Un-initialize HW module. 4299 * @hw: Pointer to HW module. 4300 * 4301 */ 4302 void 4303 csio_hw_exit(struct csio_hw *hw) 4304 { 4305 csio_evtq_cleanup(hw); 4306 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4307 csio_scsim_exit(csio_hw_to_scsim(hw)); 4308 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4309 csio_mbm_exit(csio_hw_to_mbm(hw)); 4310 } 4311