1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_dbg_level = 0xFEFF; 51 unsigned int csio_port_mask = 0xf; 52 53 /* Default FW event queue entries. */ 54 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 55 56 /* Default MSI param level */ 57 int csio_msi = 2; 58 59 /* FCoE function instances */ 60 static int dev_num; 61 62 /* FCoE Adapter types & its description */ 63 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, 67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, 84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, 85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} 86 }; 87 88 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 89 static void csio_hw_mbm_cleanup(struct csio_hw *); 90 91 /* State machine forward declarations */ 92 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 93 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 94 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 95 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 96 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 97 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 98 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 99 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 100 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 101 102 static void csio_hw_initialize(struct csio_hw *hw); 103 static void csio_evtq_stop(struct csio_hw *hw); 104 static void csio_evtq_start(struct csio_hw *hw); 105 106 int csio_is_hw_ready(struct csio_hw *hw) 107 { 108 return csio_match_state(hw, csio_hws_ready); 109 } 110 111 int csio_is_hw_removing(struct csio_hw *hw) 112 { 113 return csio_match_state(hw, csio_hws_removing); 114 } 115 116 117 /* 118 * csio_hw_wait_op_done_val - wait until an operation is completed 119 * @hw: the HW module 120 * @reg: the register to check for completion 121 * @mask: a single-bit field within @reg that indicates completion 122 * @polarity: the value of the field when the operation is completed 123 * @attempts: number of check iterations 124 * @delay: delay in usecs between iterations 125 * @valp: where to store the value of the register at completion time 126 * 127 * Wait until an operation is completed by checking a bit in a register 128 * up to @attempts times. If @valp is not NULL the value of the register 129 * at the time it indicated completion is stored there. Returns 0 if the 130 * operation completes and -EAGAIN otherwise. 131 */ 132 int 133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 134 int polarity, int attempts, int delay, uint32_t *valp) 135 { 136 uint32_t val; 137 while (1) { 138 val = csio_rd_reg32(hw, reg); 139 140 if (!!(val & mask) == polarity) { 141 if (valp) 142 *valp = val; 143 return 0; 144 } 145 146 if (--attempts == 0) 147 return -EAGAIN; 148 if (delay) 149 udelay(delay); 150 } 151 } 152 153 /* 154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 155 * @hw: the adapter 156 * @addr: the indirect TP register address 157 * @mask: specifies the field within the register to modify 158 * @val: new value for the field 159 * 160 * Sets a field of an indirect TP register to the given value. 161 */ 162 void 163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 164 unsigned int mask, unsigned int val) 165 { 166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); 167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; 168 csio_wr_reg32(hw, val, TP_PIO_DATA_A); 169 } 170 171 void 172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 173 uint32_t value) 174 { 175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 176 177 csio_wr_reg32(hw, val | value, reg); 178 /* Flush */ 179 csio_rd_reg32(hw, reg); 180 181 } 182 183 static int 184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 185 { 186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 187 addr, len, buf, 0); 188 } 189 190 /* 191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 192 */ 193 #define EEPROM_MAX_RD_POLL 40 194 #define EEPROM_MAX_WR_POLL 6 195 #define EEPROM_STAT_ADDR 0x7bfc 196 #define VPD_BASE 0x400 197 #define VPD_BASE_OLD 0 198 #define VPD_LEN 1024 199 #define VPD_INFO_FLD_HDR_SIZE 3 200 201 /* 202 * csio_hw_seeprom_read - read a serial EEPROM location 203 * @hw: hw to read 204 * @addr: EEPROM virtual address 205 * @data: where to store the read data 206 * 207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 208 * VPD capability. Note that this function must be called with a virtual 209 * address. 210 */ 211 static int 212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 213 { 214 uint16_t val = 0; 215 int attempts = EEPROM_MAX_RD_POLL; 216 uint32_t base = hw->params.pci.vpd_cap_addr; 217 218 if (addr >= EEPROMVSIZE || (addr & 3)) 219 return -EINVAL; 220 221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 222 223 do { 224 udelay(10); 225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 226 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 227 228 if (!(val & PCI_VPD_ADDR_F)) { 229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 230 return -EINVAL; 231 } 232 233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 234 *data = le32_to_cpu(*(__le32 *)data); 235 236 return 0; 237 } 238 239 /* 240 * Partial EEPROM Vital Product Data structure. Includes only the ID and 241 * VPD-R sections. 242 */ 243 struct t4_vpd_hdr { 244 u8 id_tag; 245 u8 id_len[2]; 246 u8 id_data[ID_LEN]; 247 u8 vpdr_tag; 248 u8 vpdr_len[2]; 249 }; 250 251 /* 252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 253 * the VPD 254 * @v: Pointer to buffered vpd data structure 255 * @kw: The keyword to search for 256 * 257 * Returns the value of the information field keyword or 258 * -EINVAL otherwise. 259 */ 260 static int 261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 262 { 263 int32_t i; 264 int32_t offset , len; 265 const uint8_t *buf = &v->id_tag; 266 const uint8_t *vpdr_len = &v->vpdr_tag; 267 offset = sizeof(struct t4_vpd_hdr); 268 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 269 270 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 271 return -EINVAL; 272 273 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 274 if (memcmp(buf + i , kw, 2) == 0) { 275 i += VPD_INFO_FLD_HDR_SIZE; 276 return i; 277 } 278 279 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 280 } 281 282 return -EINVAL; 283 } 284 285 static int 286 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 287 { 288 *pos = pci_find_capability(pdev, cap); 289 if (*pos) 290 return 0; 291 292 return -1; 293 } 294 295 /* 296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 297 * @hw: HW module 298 * @p: where to store the parameters 299 * 300 * Reads card parameters stored in VPD EEPROM. 301 */ 302 static int 303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 304 { 305 int i, ret, ec, sn, addr; 306 uint8_t *vpd, csum; 307 const struct t4_vpd_hdr *v; 308 /* To get around compilation warning from strstrip */ 309 char *s; 310 311 if (csio_is_valid_vpd(hw)) 312 return 0; 313 314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 315 &hw->params.pci.vpd_cap_addr); 316 if (ret) 317 return -EINVAL; 318 319 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 320 if (vpd == NULL) 321 return -ENOMEM; 322 323 /* 324 * Card information normally starts at VPD_BASE but early cards had 325 * it at 0. 326 */ 327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 328 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 329 330 for (i = 0; i < VPD_LEN; i += 4) { 331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 332 if (ret) { 333 kfree(vpd); 334 return ret; 335 } 336 } 337 338 /* Reset the VPD flag! */ 339 hw->flags &= (~CSIO_HWF_VPD_VALID); 340 341 v = (const struct t4_vpd_hdr *)vpd; 342 343 #define FIND_VPD_KW(var, name) do { \ 344 var = csio_hw_get_vpd_keyword_val(v, name); \ 345 if (var < 0) { \ 346 csio_err(hw, "missing VPD keyword " name "\n"); \ 347 kfree(vpd); \ 348 return -EINVAL; \ 349 } \ 350 } while (0) 351 352 FIND_VPD_KW(i, "RV"); 353 for (csum = 0; i >= 0; i--) 354 csum += vpd[i]; 355 356 if (csum) { 357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 358 kfree(vpd); 359 return -EINVAL; 360 } 361 FIND_VPD_KW(ec, "EC"); 362 FIND_VPD_KW(sn, "SN"); 363 #undef FIND_VPD_KW 364 365 memcpy(p->id, v->id_data, ID_LEN); 366 s = strstrip(p->id); 367 memcpy(p->ec, vpd + ec, EC_LEN); 368 s = strstrip(p->ec); 369 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 370 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 371 s = strstrip(p->sn); 372 373 csio_valid_vpd_copied(hw); 374 375 kfree(vpd); 376 return 0; 377 } 378 379 /* 380 * csio_hw_sf1_read - read data from the serial flash 381 * @hw: the HW module 382 * @byte_cnt: number of bytes to read 383 * @cont: whether another operation will be chained 384 * @lock: whether to lock SF for PL access only 385 * @valp: where to store the read data 386 * 387 * Reads up to 4 bytes of data from the serial flash. The location of 388 * the read needs to be specified prior to calling this by issuing the 389 * appropriate commands to the serial flash. 390 */ 391 static int 392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 393 int32_t lock, uint32_t *valp) 394 { 395 int ret; 396 397 if (!byte_cnt || byte_cnt > 4) 398 return -EINVAL; 399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 400 return -EBUSY; 401 402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | 403 BYTECNT_V(byte_cnt - 1), SF_OP_A); 404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 405 10, NULL); 406 if (!ret) 407 *valp = csio_rd_reg32(hw, SF_DATA_A); 408 return ret; 409 } 410 411 /* 412 * csio_hw_sf1_write - write data to the serial flash 413 * @hw: the HW module 414 * @byte_cnt: number of bytes to write 415 * @cont: whether another operation will be chained 416 * @lock: whether to lock SF for PL access only 417 * @val: value to write 418 * 419 * Writes up to 4 bytes of data to the serial flash. The location of 420 * the write needs to be specified prior to calling this by issuing the 421 * appropriate commands to the serial flash. 422 */ 423 static int 424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 425 int32_t lock, uint32_t val) 426 { 427 if (!byte_cnt || byte_cnt > 4) 428 return -EINVAL; 429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 430 return -EBUSY; 431 432 csio_wr_reg32(hw, val, SF_DATA_A); 433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | 434 OP_V(1) | SF_LOCK_V(lock), SF_OP_A); 435 436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 437 10, NULL); 438 } 439 440 /* 441 * csio_hw_flash_wait_op - wait for a flash operation to complete 442 * @hw: the HW module 443 * @attempts: max number of polls of the status register 444 * @delay: delay between polls in ms 445 * 446 * Wait for a flash operation to complete by polling the status register. 447 */ 448 static int 449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 450 { 451 int ret; 452 uint32_t status; 453 454 while (1) { 455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 456 if (ret != 0) 457 return ret; 458 459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 460 if (ret != 0) 461 return ret; 462 463 if (!(status & 1)) 464 return 0; 465 if (--attempts == 0) 466 return -EAGAIN; 467 if (delay) 468 msleep(delay); 469 } 470 } 471 472 /* 473 * csio_hw_read_flash - read words from serial flash 474 * @hw: the HW module 475 * @addr: the start address for the read 476 * @nwords: how many 32-bit words to read 477 * @data: where to store the read data 478 * @byte_oriented: whether to store data as bytes or as words 479 * 480 * Read the specified number of 32-bit words from the serial flash. 481 * If @byte_oriented is set the read data is stored as a byte array 482 * (i.e., big-endian), otherwise as 32-bit words in the platform's 483 * natural endianess. 484 */ 485 static int 486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 487 uint32_t *data, int32_t byte_oriented) 488 { 489 int ret; 490 491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 492 return -EINVAL; 493 494 addr = swab32(addr) | SF_RD_DATA_FAST; 495 496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 497 if (ret != 0) 498 return ret; 499 500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 501 if (ret != 0) 502 return ret; 503 504 for ( ; nwords; nwords--, data++) { 505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 506 if (nwords == 1) 507 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 508 if (ret) 509 return ret; 510 if (byte_oriented) 511 *data = (__force __u32) htonl(*data); 512 } 513 return 0; 514 } 515 516 /* 517 * csio_hw_write_flash - write up to a page of data to the serial flash 518 * @hw: the hw 519 * @addr: the start address to write 520 * @n: length of data to write in bytes 521 * @data: the data to write 522 * 523 * Writes up to a page of data (256 bytes) to the serial flash starting 524 * at the given address. All the data must be written to the same page. 525 */ 526 static int 527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 528 uint32_t n, const uint8_t *data) 529 { 530 int ret = -EINVAL; 531 uint32_t buf[64]; 532 uint32_t i, c, left, val, offset = addr & 0xff; 533 534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 535 return -EINVAL; 536 537 val = swab32(addr) | SF_PROG_PAGE; 538 539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 540 if (ret != 0) 541 goto unlock; 542 543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 544 if (ret != 0) 545 goto unlock; 546 547 for (left = n; left; left -= c) { 548 c = min(left, 4U); 549 for (val = 0, i = 0; i < c; ++i) 550 val = (val << 8) + *data++; 551 552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 553 if (ret) 554 goto unlock; 555 } 556 ret = csio_hw_flash_wait_op(hw, 8, 1); 557 if (ret) 558 goto unlock; 559 560 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 561 562 /* Read the page to verify the write succeeded */ 563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 564 if (ret) 565 return ret; 566 567 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 568 csio_err(hw, 569 "failed to correctly write the flash page at %#x\n", 570 addr); 571 return -EINVAL; 572 } 573 574 return 0; 575 576 unlock: 577 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 578 return ret; 579 } 580 581 /* 582 * csio_hw_flash_erase_sectors - erase a range of flash sectors 583 * @hw: the HW module 584 * @start: the first sector to erase 585 * @end: the last sector to erase 586 * 587 * Erases the sectors in the given inclusive range. 588 */ 589 static int 590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 591 { 592 int ret = 0; 593 594 while (start <= end) { 595 596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 597 if (ret != 0) 598 goto out; 599 600 ret = csio_hw_sf1_write(hw, 4, 0, 1, 601 SF_ERASE_SECTOR | (start << 8)); 602 if (ret != 0) 603 goto out; 604 605 ret = csio_hw_flash_wait_op(hw, 14, 500); 606 if (ret != 0) 607 goto out; 608 609 start++; 610 } 611 out: 612 if (ret) 613 csio_err(hw, "erase of flash sector %d failed, error %d\n", 614 start, ret); 615 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 616 return 0; 617 } 618 619 static void 620 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 621 { 622 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev), 624 FW_HDR_FW_VER_MINOR_G(hw->fwrev), 625 FW_HDR_FW_VER_MICRO_G(hw->fwrev), 626 FW_HDR_FW_VER_BUILD_G(hw->fwrev)); 627 } 628 629 /* 630 * csio_hw_get_fw_version - read the firmware version 631 * @hw: HW module 632 * @vers: where to place the version 633 * 634 * Reads the FW version from flash. 635 */ 636 static int 637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 638 { 639 return csio_hw_read_flash(hw, FLASH_FW_START + 640 offsetof(struct fw_hdr, fw_ver), 1, 641 vers, 0); 642 } 643 644 /* 645 * csio_hw_get_tp_version - read the TP microcode version 646 * @hw: HW module 647 * @vers: where to place the version 648 * 649 * Reads the TP microcode version from flash. 650 */ 651 static int 652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 653 { 654 return csio_hw_read_flash(hw, FLASH_FW_START + 655 offsetof(struct fw_hdr, tp_microcode_ver), 1, 656 vers, 0); 657 } 658 659 /* 660 * csio_hw_fw_dload - download firmware. 661 * @hw: HW module 662 * @fw_data: firmware image to write. 663 * @size: image size 664 * 665 * Write the supplied firmware image to the card's serial flash. 666 */ 667 static int 668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 669 { 670 uint32_t csum; 671 int32_t addr; 672 int ret; 673 uint32_t i; 674 uint8_t first_page[SF_PAGE_SIZE]; 675 const __be32 *p = (const __be32 *)fw_data; 676 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 677 uint32_t sf_sec_size; 678 679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 680 csio_err(hw, "Serial Flash data invalid\n"); 681 return -EINVAL; 682 } 683 684 if (!size) { 685 csio_err(hw, "FW image has no data\n"); 686 return -EINVAL; 687 } 688 689 if (size & 511) { 690 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 691 return -EINVAL; 692 } 693 694 if (ntohs(hdr->len512) * 512 != size) { 695 csio_err(hw, "FW image size differs from size in FW header\n"); 696 return -EINVAL; 697 } 698 699 if (size > FLASH_FW_MAX_SIZE) { 700 csio_err(hw, "FW image too large, max is %u bytes\n", 701 FLASH_FW_MAX_SIZE); 702 return -EINVAL; 703 } 704 705 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 706 csum += ntohl(p[i]); 707 708 if (csum != 0xffffffff) { 709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 710 return -EINVAL; 711 } 712 713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 714 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 715 716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 717 FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); 718 719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, 720 FLASH_FW_START_SEC + i - 1); 721 if (ret) { 722 csio_err(hw, "Flash Erase failed\n"); 723 goto out; 724 } 725 726 /* 727 * We write the correct version at the end so the driver can see a bad 728 * version if the FW write fails. Start by writing a copy of the 729 * first page with a bad version. 730 */ 731 memcpy(first_page, fw_data, SF_PAGE_SIZE); 732 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); 734 if (ret) 735 goto out; 736 737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 738 FW_IMG_START, FW_IMG_START + size); 739 740 addr = FLASH_FW_START; 741 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 742 addr += SF_PAGE_SIZE; 743 fw_data += SF_PAGE_SIZE; 744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 745 if (ret) 746 goto out; 747 } 748 749 ret = csio_hw_write_flash(hw, 750 FLASH_FW_START + 751 offsetof(struct fw_hdr, fw_ver), 752 sizeof(hdr->fw_ver), 753 (const uint8_t *)&hdr->fw_ver); 754 755 out: 756 if (ret) 757 csio_err(hw, "firmware download failed, error %d\n", ret); 758 return ret; 759 } 760 761 static int 762 csio_hw_get_flash_params(struct csio_hw *hw) 763 { 764 int ret; 765 uint32_t info = 0; 766 767 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 768 if (!ret) 769 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 770 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 771 if (ret != 0) 772 return ret; 773 774 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 775 return -EINVAL; 776 info >>= 16; /* log2 of size */ 777 if (info >= 0x14 && info < 0x18) 778 hw->params.sf_nsec = 1 << (info - 16); 779 else if (info == 0x18) 780 hw->params.sf_nsec = 64; 781 else 782 return -EINVAL; 783 hw->params.sf_size = 1 << info; 784 785 return 0; 786 } 787 788 /*****************************************************************************/ 789 /* HW State machine assists */ 790 /*****************************************************************************/ 791 792 static int 793 csio_hw_dev_ready(struct csio_hw *hw) 794 { 795 uint32_t reg; 796 int cnt = 6; 797 798 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && 799 (--cnt != 0)) 800 mdelay(100); 801 802 if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) || 803 (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) { 804 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 805 return -EIO; 806 } 807 808 hw->pfn = SOURCEPF_G(reg); 809 810 return 0; 811 } 812 813 /* 814 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 815 * @hw: HW module 816 * @state: Device state 817 * 818 * FW_HELLO_CMD has to be polled for completion. 819 */ 820 static int 821 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 822 { 823 struct csio_mb *mbp; 824 int rv = 0; 825 enum fw_retval retval; 826 uint8_t mpfn; 827 char state_str[16]; 828 int retries = FW_CMD_HELLO_RETRIES; 829 830 memset(state_str, 0, sizeof(state_str)); 831 832 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 833 if (!mbp) { 834 rv = -ENOMEM; 835 CSIO_INC_STATS(hw, n_err_nomem); 836 goto out; 837 } 838 839 retry: 840 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 841 hw->pfn, CSIO_MASTER_MAY, NULL); 842 843 rv = csio_mb_issue(hw, mbp); 844 if (rv) { 845 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 846 goto out_free_mb; 847 } 848 849 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 850 if (retval != FW_SUCCESS) { 851 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 852 rv = -EINVAL; 853 goto out_free_mb; 854 } 855 856 /* Firmware has designated us to be master */ 857 if (hw->pfn == mpfn) { 858 hw->flags |= CSIO_HWF_MASTER; 859 } else if (*state == CSIO_DEV_STATE_UNINIT) { 860 /* 861 * If we're not the Master PF then we need to wait around for 862 * the Master PF Driver to finish setting up the adapter. 863 * 864 * Note that we also do this wait if we're a non-Master-capable 865 * PF and there is no current Master PF; a Master PF may show up 866 * momentarily and we wouldn't want to fail pointlessly. (This 867 * can happen when an OS loads lots of different drivers rapidly 868 * at the same time). In this case, the Master PF returned by 869 * the firmware will be PCIE_FW_MASTER_MASK so the test below 870 * will work ... 871 */ 872 873 int waiting = FW_CMD_HELLO_TIMEOUT; 874 875 /* 876 * Wait for the firmware to either indicate an error or 877 * initialized state. If we see either of these we bail out 878 * and report the issue to the caller. If we exhaust the 879 * "hello timeout" and we haven't exhausted our retries, try 880 * again. Otherwise bail with a timeout error. 881 */ 882 for (;;) { 883 uint32_t pcie_fw; 884 885 spin_unlock_irq(&hw->lock); 886 msleep(50); 887 spin_lock_irq(&hw->lock); 888 waiting -= 50; 889 890 /* 891 * If neither Error nor Initialialized are indicated 892 * by the firmware keep waiting till we exaust our 893 * timeout ... and then retry if we haven't exhausted 894 * our retries ... 895 */ 896 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); 897 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { 898 if (waiting <= 0) { 899 if (retries-- > 0) 900 goto retry; 901 902 rv = -ETIMEDOUT; 903 break; 904 } 905 continue; 906 } 907 908 /* 909 * We either have an Error or Initialized condition 910 * report errors preferentially. 911 */ 912 if (state) { 913 if (pcie_fw & PCIE_FW_ERR_F) { 914 *state = CSIO_DEV_STATE_ERR; 915 rv = -ETIMEDOUT; 916 } else if (pcie_fw & PCIE_FW_INIT_F) 917 *state = CSIO_DEV_STATE_INIT; 918 } 919 920 /* 921 * If we arrived before a Master PF was selected and 922 * there's not a valid Master PF, grab its identity 923 * for our caller. 924 */ 925 if (mpfn == PCIE_FW_MASTER_M && 926 (pcie_fw & PCIE_FW_MASTER_VLD_F)) 927 mpfn = PCIE_FW_MASTER_G(pcie_fw); 928 break; 929 } 930 hw->flags &= ~CSIO_HWF_MASTER; 931 } 932 933 switch (*state) { 934 case CSIO_DEV_STATE_UNINIT: 935 strcpy(state_str, "Initializing"); 936 break; 937 case CSIO_DEV_STATE_INIT: 938 strcpy(state_str, "Initialized"); 939 break; 940 case CSIO_DEV_STATE_ERR: 941 strcpy(state_str, "Error"); 942 break; 943 default: 944 strcpy(state_str, "Unknown"); 945 break; 946 } 947 948 if (hw->pfn == mpfn) 949 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 950 hw->pfn, state_str); 951 else 952 csio_info(hw, 953 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 954 hw->pfn, mpfn, state_str); 955 956 out_free_mb: 957 mempool_free(mbp, hw->mb_mempool); 958 out: 959 return rv; 960 } 961 962 /* 963 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 964 * @hw: HW module 965 * 966 */ 967 static int 968 csio_do_bye(struct csio_hw *hw) 969 { 970 struct csio_mb *mbp; 971 enum fw_retval retval; 972 973 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 974 if (!mbp) { 975 CSIO_INC_STATS(hw, n_err_nomem); 976 return -ENOMEM; 977 } 978 979 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 980 981 if (csio_mb_issue(hw, mbp)) { 982 csio_err(hw, "Issue of BYE command failed\n"); 983 mempool_free(mbp, hw->mb_mempool); 984 return -EINVAL; 985 } 986 987 retval = csio_mb_fw_retval(mbp); 988 if (retval != FW_SUCCESS) { 989 mempool_free(mbp, hw->mb_mempool); 990 return -EINVAL; 991 } 992 993 mempool_free(mbp, hw->mb_mempool); 994 995 return 0; 996 } 997 998 /* 999 * csio_do_reset- Perform the device reset. 1000 * @hw: HW module 1001 * @fw_rst: FW reset 1002 * 1003 * If fw_rst is set, issues FW reset mbox cmd otherwise 1004 * does PIO reset. 1005 * Performs reset of the function. 1006 */ 1007 static int 1008 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1009 { 1010 struct csio_mb *mbp; 1011 enum fw_retval retval; 1012 1013 if (!fw_rst) { 1014 /* PIO reset */ 1015 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1016 mdelay(2000); 1017 return 0; 1018 } 1019 1020 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1021 if (!mbp) { 1022 CSIO_INC_STATS(hw, n_err_nomem); 1023 return -ENOMEM; 1024 } 1025 1026 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1027 PIORSTMODE_F | PIORST_F, 0, NULL); 1028 1029 if (csio_mb_issue(hw, mbp)) { 1030 csio_err(hw, "Issue of RESET command failed.n"); 1031 mempool_free(mbp, hw->mb_mempool); 1032 return -EINVAL; 1033 } 1034 1035 retval = csio_mb_fw_retval(mbp); 1036 if (retval != FW_SUCCESS) { 1037 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1038 mempool_free(mbp, hw->mb_mempool); 1039 return -EINVAL; 1040 } 1041 1042 mempool_free(mbp, hw->mb_mempool); 1043 1044 return 0; 1045 } 1046 1047 static int 1048 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1049 { 1050 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1051 uint16_t caps; 1052 1053 caps = ntohs(rsp->fcoecaps); 1054 1055 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1056 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1057 return -EINVAL; 1058 } 1059 1060 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1061 csio_err(hw, "No FCoE Control Offload capability\n"); 1062 return -EINVAL; 1063 } 1064 1065 return 0; 1066 } 1067 1068 /* 1069 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1070 * @hw: the HW module 1071 * @mbox: mailbox to use for the FW RESET command (if desired) 1072 * @force: force uP into RESET even if FW RESET command fails 1073 * 1074 * Issues a RESET command to firmware (if desired) with a HALT indication 1075 * and then puts the microprocessor into RESET state. The RESET command 1076 * will only be issued if a legitimate mailbox is provided (mbox <= 1077 * PCIE_FW_MASTER_MASK). 1078 * 1079 * This is generally used in order for the host to safely manipulate the 1080 * adapter without fear of conflicting with whatever the firmware might 1081 * be doing. The only way out of this state is to RESTART the firmware 1082 * ... 1083 */ 1084 static int 1085 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1086 { 1087 enum fw_retval retval = 0; 1088 1089 /* 1090 * If a legitimate mailbox is provided, issue a RESET command 1091 * with a HALT indication. 1092 */ 1093 if (mbox <= PCIE_FW_MASTER_M) { 1094 struct csio_mb *mbp; 1095 1096 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1097 if (!mbp) { 1098 CSIO_INC_STATS(hw, n_err_nomem); 1099 return -ENOMEM; 1100 } 1101 1102 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1103 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, 1104 NULL); 1105 1106 if (csio_mb_issue(hw, mbp)) { 1107 csio_err(hw, "Issue of RESET command failed!\n"); 1108 mempool_free(mbp, hw->mb_mempool); 1109 return -EINVAL; 1110 } 1111 1112 retval = csio_mb_fw_retval(mbp); 1113 mempool_free(mbp, hw->mb_mempool); 1114 } 1115 1116 /* 1117 * Normally we won't complete the operation if the firmware RESET 1118 * command fails but if our caller insists we'll go ahead and put the 1119 * uP into RESET. This can be useful if the firmware is hung or even 1120 * missing ... We'll have to take the risk of putting the uP into 1121 * RESET without the cooperation of firmware in that case. 1122 * 1123 * We also force the firmware's HALT flag to be on in case we bypassed 1124 * the firmware RESET command above or we're dealing with old firmware 1125 * which doesn't have the HALT capability. This will serve as a flag 1126 * for the incoming firmware to know that it's coming out of a HALT 1127 * rather than a RESET ... if it's new enough to understand that ... 1128 */ 1129 if (retval == 0 || force) { 1130 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); 1131 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 1132 PCIE_FW_HALT_F); 1133 } 1134 1135 /* 1136 * And we always return the result of the firmware RESET command 1137 * even when we force the uP into RESET ... 1138 */ 1139 return retval ? -EINVAL : 0; 1140 } 1141 1142 /* 1143 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1144 * @hw: the HW module 1145 * @reset: if we want to do a RESET to restart things 1146 * 1147 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1148 * return the previous PF Master remains as the new PF Master and there 1149 * is no need to issue a new HELLO command, etc. 1150 * 1151 * We do this in two ways: 1152 * 1153 * 1. If we're dealing with newer firmware we'll simply want to take 1154 * the chip's microprocessor out of RESET. This will cause the 1155 * firmware to start up from its start vector. And then we'll loop 1156 * until the firmware indicates it's started again (PCIE_FW.HALT 1157 * reset to 0) or we timeout. 1158 * 1159 * 2. If we're dealing with older firmware then we'll need to RESET 1160 * the chip since older firmware won't recognize the PCIE_FW.HALT 1161 * flag and automatically RESET itself on startup. 1162 */ 1163 static int 1164 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1165 { 1166 if (reset) { 1167 /* 1168 * Since we're directing the RESET instead of the firmware 1169 * doing it automatically, we need to clear the PCIE_FW.HALT 1170 * bit. 1171 */ 1172 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); 1173 1174 /* 1175 * If we've been given a valid mailbox, first try to get the 1176 * firmware to do the RESET. If that works, great and we can 1177 * return success. Otherwise, if we haven't been given a 1178 * valid mailbox or the RESET command failed, fall back to 1179 * hitting the chip with a hammer. 1180 */ 1181 if (mbox <= PCIE_FW_MASTER_M) { 1182 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1183 msleep(100); 1184 if (csio_do_reset(hw, true) == 0) 1185 return 0; 1186 } 1187 1188 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1189 msleep(2000); 1190 } else { 1191 int ms; 1192 1193 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1194 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1195 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) 1196 return 0; 1197 msleep(100); 1198 ms += 100; 1199 } 1200 return -ETIMEDOUT; 1201 } 1202 return 0; 1203 } 1204 1205 /* 1206 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1207 * @hw: the HW module 1208 * @mbox: mailbox to use for the FW RESET command (if desired) 1209 * @fw_data: the firmware image to write 1210 * @size: image size 1211 * @force: force upgrade even if firmware doesn't cooperate 1212 * 1213 * Perform all of the steps necessary for upgrading an adapter's 1214 * firmware image. Normally this requires the cooperation of the 1215 * existing firmware in order to halt all existing activities 1216 * but if an invalid mailbox token is passed in we skip that step 1217 * (though we'll still put the adapter microprocessor into RESET in 1218 * that case). 1219 * 1220 * On successful return the new firmware will have been loaded and 1221 * the adapter will have been fully RESET losing all previous setup 1222 * state. On unsuccessful return the adapter may be completely hosed ... 1223 * positive errno indicates that the adapter is ~probably~ intact, a 1224 * negative errno indicates that things are looking bad ... 1225 */ 1226 static int 1227 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1228 const u8 *fw_data, uint32_t size, int32_t force) 1229 { 1230 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1231 int reset, ret; 1232 1233 ret = csio_hw_fw_halt(hw, mbox, force); 1234 if (ret != 0 && !force) 1235 return ret; 1236 1237 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1238 if (ret != 0) 1239 return ret; 1240 1241 /* 1242 * Older versions of the firmware don't understand the new 1243 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1244 * restart. So for newly loaded older firmware we'll have to do the 1245 * RESET for it so it starts up on a clean slate. We can tell if 1246 * the newly loaded firmware will handle this right by checking 1247 * its header flags to see if it advertises the capability. 1248 */ 1249 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1250 return csio_hw_fw_restart(hw, mbox, reset); 1251 } 1252 1253 /* 1254 * csio_get_device_params - Get device parameters. 1255 * @hw: HW module 1256 * 1257 */ 1258 static int 1259 csio_get_device_params(struct csio_hw *hw) 1260 { 1261 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1262 struct csio_mb *mbp; 1263 enum fw_retval retval; 1264 u32 param[6]; 1265 int i, j = 0; 1266 1267 /* Initialize portids to -1 */ 1268 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1269 hw->pport[i].portid = -1; 1270 1271 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1272 if (!mbp) { 1273 CSIO_INC_STATS(hw, n_err_nomem); 1274 return -ENOMEM; 1275 } 1276 1277 /* Get port vec information. */ 1278 param[0] = FW_PARAM_DEV(PORTVEC); 1279 1280 /* Get Core clock. */ 1281 param[1] = FW_PARAM_DEV(CCLK); 1282 1283 /* Get EQ id start and end. */ 1284 param[2] = FW_PARAM_PFVF(EQ_START); 1285 param[3] = FW_PARAM_PFVF(EQ_END); 1286 1287 /* Get IQ id start and end. */ 1288 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1289 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1290 1291 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1292 ARRAY_SIZE(param), param, NULL, false, NULL); 1293 if (csio_mb_issue(hw, mbp)) { 1294 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1295 mempool_free(mbp, hw->mb_mempool); 1296 return -EINVAL; 1297 } 1298 1299 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1300 ARRAY_SIZE(param), param); 1301 if (retval != FW_SUCCESS) { 1302 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1303 retval); 1304 mempool_free(mbp, hw->mb_mempool); 1305 return -EINVAL; 1306 } 1307 1308 /* cache the information. */ 1309 hw->port_vec = param[0]; 1310 hw->vpd.cclk = param[1]; 1311 wrm->fw_eq_start = param[2]; 1312 wrm->fw_iq_start = param[4]; 1313 1314 /* Using FW configured max iqs & eqs */ 1315 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1316 !csio_is_hw_master(hw)) { 1317 hw->cfg_niq = param[5] - param[4] + 1; 1318 hw->cfg_neq = param[3] - param[2] + 1; 1319 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1320 hw->cfg_niq, hw->cfg_neq); 1321 } 1322 1323 hw->port_vec &= csio_port_mask; 1324 1325 hw->num_pports = hweight32(hw->port_vec); 1326 1327 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1328 hw->port_vec, hw->num_pports); 1329 1330 for (i = 0; i < hw->num_pports; i++) { 1331 while ((hw->port_vec & (1 << j)) == 0) 1332 j++; 1333 hw->pport[i].portid = j++; 1334 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1335 } 1336 mempool_free(mbp, hw->mb_mempool); 1337 1338 return 0; 1339 } 1340 1341 1342 /* 1343 * csio_config_device_caps - Get and set device capabilities. 1344 * @hw: HW module 1345 * 1346 */ 1347 static int 1348 csio_config_device_caps(struct csio_hw *hw) 1349 { 1350 struct csio_mb *mbp; 1351 enum fw_retval retval; 1352 int rv = -EINVAL; 1353 1354 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1355 if (!mbp) { 1356 CSIO_INC_STATS(hw, n_err_nomem); 1357 return -ENOMEM; 1358 } 1359 1360 /* Get device capabilities */ 1361 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1362 1363 if (csio_mb_issue(hw, mbp)) { 1364 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1365 goto out; 1366 } 1367 1368 retval = csio_mb_fw_retval(mbp); 1369 if (retval != FW_SUCCESS) { 1370 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1371 goto out; 1372 } 1373 1374 /* Validate device capabilities */ 1375 rv = csio_hw_validate_caps(hw, mbp); 1376 if (rv != 0) 1377 goto out; 1378 1379 /* Don't config device capabilities if already configured */ 1380 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1381 rv = 0; 1382 goto out; 1383 } 1384 1385 /* Write back desired device capabilities */ 1386 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1387 false, true, NULL); 1388 1389 if (csio_mb_issue(hw, mbp)) { 1390 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1391 goto out; 1392 } 1393 1394 retval = csio_mb_fw_retval(mbp); 1395 if (retval != FW_SUCCESS) { 1396 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1397 goto out; 1398 } 1399 1400 rv = 0; 1401 out: 1402 mempool_free(mbp, hw->mb_mempool); 1403 return rv; 1404 } 1405 1406 /* 1407 * csio_enable_ports - Bring up all available ports. 1408 * @hw: HW module. 1409 * 1410 */ 1411 static int 1412 csio_enable_ports(struct csio_hw *hw) 1413 { 1414 struct csio_mb *mbp; 1415 enum fw_retval retval; 1416 uint8_t portid; 1417 int i; 1418 1419 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1420 if (!mbp) { 1421 CSIO_INC_STATS(hw, n_err_nomem); 1422 return -ENOMEM; 1423 } 1424 1425 for (i = 0; i < hw->num_pports; i++) { 1426 portid = hw->pport[i].portid; 1427 1428 /* Read PORT information */ 1429 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1430 false, 0, 0, NULL); 1431 1432 if (csio_mb_issue(hw, mbp)) { 1433 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1434 portid); 1435 mempool_free(mbp, hw->mb_mempool); 1436 return -EINVAL; 1437 } 1438 1439 csio_mb_process_read_port_rsp(hw, mbp, &retval, 1440 &hw->pport[i].pcap); 1441 if (retval != FW_SUCCESS) { 1442 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1443 portid, retval); 1444 mempool_free(mbp, hw->mb_mempool); 1445 return -EINVAL; 1446 } 1447 1448 /* Write back PORT information */ 1449 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, 1450 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); 1451 1452 if (csio_mb_issue(hw, mbp)) { 1453 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1454 portid); 1455 mempool_free(mbp, hw->mb_mempool); 1456 return -EINVAL; 1457 } 1458 1459 retval = csio_mb_fw_retval(mbp); 1460 if (retval != FW_SUCCESS) { 1461 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1462 portid, retval); 1463 mempool_free(mbp, hw->mb_mempool); 1464 return -EINVAL; 1465 } 1466 1467 } /* For all ports */ 1468 1469 mempool_free(mbp, hw->mb_mempool); 1470 1471 return 0; 1472 } 1473 1474 /* 1475 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1476 * @hw: HW module 1477 * Issued with lock held. 1478 */ 1479 static int 1480 csio_get_fcoe_resinfo(struct csio_hw *hw) 1481 { 1482 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1483 struct fw_fcoe_res_info_cmd *rsp; 1484 struct csio_mb *mbp; 1485 enum fw_retval retval; 1486 1487 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1488 if (!mbp) { 1489 CSIO_INC_STATS(hw, n_err_nomem); 1490 return -ENOMEM; 1491 } 1492 1493 /* Get FCoE FW resource information */ 1494 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1495 1496 if (csio_mb_issue(hw, mbp)) { 1497 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1498 mempool_free(mbp, hw->mb_mempool); 1499 return -EINVAL; 1500 } 1501 1502 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1503 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 1504 if (retval != FW_SUCCESS) { 1505 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1506 retval); 1507 mempool_free(mbp, hw->mb_mempool); 1508 return -EINVAL; 1509 } 1510 1511 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1512 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1513 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1514 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1515 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1516 res_info->max_ssns = ntohl(rsp->max_ssns); 1517 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1518 res_info->used_ssns = ntohl(rsp->used_ssns); 1519 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1520 res_info->max_vnps = ntohl(rsp->max_vnps); 1521 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1522 res_info->used_vnps = ntohl(rsp->used_vnps); 1523 1524 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1525 res_info->max_xchgs); 1526 mempool_free(mbp, hw->mb_mempool); 1527 1528 return 0; 1529 } 1530 1531 static int 1532 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1533 { 1534 struct csio_mb *mbp; 1535 enum fw_retval retval; 1536 u32 _param[1]; 1537 1538 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1539 if (!mbp) { 1540 CSIO_INC_STATS(hw, n_err_nomem); 1541 return -ENOMEM; 1542 } 1543 1544 /* 1545 * Find out whether we're dealing with a version of 1546 * the firmware which has configuration file support. 1547 */ 1548 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 1549 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 1550 1551 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1552 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1553 if (csio_mb_issue(hw, mbp)) { 1554 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1555 mempool_free(mbp, hw->mb_mempool); 1556 return -EINVAL; 1557 } 1558 1559 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1560 ARRAY_SIZE(_param), _param); 1561 if (retval != FW_SUCCESS) { 1562 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1563 retval); 1564 mempool_free(mbp, hw->mb_mempool); 1565 return -EINVAL; 1566 } 1567 1568 mempool_free(mbp, hw->mb_mempool); 1569 *param = _param[0]; 1570 1571 return 0; 1572 } 1573 1574 static int 1575 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1576 { 1577 int ret = 0; 1578 const struct firmware *cf; 1579 struct pci_dev *pci_dev = hw->pdev; 1580 struct device *dev = &pci_dev->dev; 1581 unsigned int mtype = 0, maddr = 0; 1582 uint32_t *cfg_data; 1583 int value_to_add = 0; 1584 1585 if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) { 1586 csio_err(hw, "could not find config file %s, err: %d\n", 1587 FW_CFG_NAME_T5, ret); 1588 return -ENOENT; 1589 } 1590 1591 if (cf->size%4 != 0) 1592 value_to_add = 4 - (cf->size % 4); 1593 1594 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1595 if (cfg_data == NULL) { 1596 ret = -ENOMEM; 1597 goto leave; 1598 } 1599 1600 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1601 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1602 ret = -EINVAL; 1603 goto leave; 1604 } 1605 1606 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1607 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1608 1609 ret = csio_memory_write(hw, mtype, maddr, 1610 cf->size + value_to_add, cfg_data); 1611 1612 if ((ret == 0) && (value_to_add != 0)) { 1613 union { 1614 u32 word; 1615 char buf[4]; 1616 } last; 1617 size_t size = cf->size & ~0x3; 1618 int i; 1619 1620 last.word = cfg_data[size >> 2]; 1621 for (i = value_to_add; i < 4; i++) 1622 last.buf[i] = 0; 1623 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1624 } 1625 if (ret == 0) { 1626 csio_info(hw, "config file upgraded to %s\n", 1627 FW_CFG_NAME_T5); 1628 snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5); 1629 } 1630 1631 leave: 1632 kfree(cfg_data); 1633 release_firmware(cf); 1634 return ret; 1635 } 1636 1637 /* 1638 * HW initialization: contact FW, obtain config, perform basic init. 1639 * 1640 * If the firmware we're dealing with has Configuration File support, then 1641 * we use that to perform all configuration -- either using the configuration 1642 * file stored in flash on the adapter or using a filesystem-local file 1643 * if available. 1644 * 1645 * If we don't have configuration file support in the firmware, then we'll 1646 * have to set things up the old fashioned way with hard-coded register 1647 * writes and firmware commands ... 1648 */ 1649 1650 /* 1651 * Attempt to initialize the HW via a Firmware Configuration File. 1652 */ 1653 static int 1654 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 1655 { 1656 struct csio_mb *mbp = NULL; 1657 struct fw_caps_config_cmd *caps_cmd; 1658 unsigned int mtype, maddr; 1659 int rv = -EINVAL; 1660 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 1661 char path[64]; 1662 char *config_name = NULL; 1663 1664 /* 1665 * Reset device if necessary 1666 */ 1667 if (reset) { 1668 rv = csio_do_reset(hw, true); 1669 if (rv != 0) 1670 goto bye; 1671 } 1672 1673 /* 1674 * If we have a configuration file in host , 1675 * then use that. Otherwise, use the configuration file stored 1676 * in the HW flash ... 1677 */ 1678 spin_unlock_irq(&hw->lock); 1679 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 1680 spin_lock_irq(&hw->lock); 1681 if (rv != 0) { 1682 /* 1683 * config file was not found. Use default 1684 * config file from flash. 1685 */ 1686 config_name = "On FLASH"; 1687 mtype = FW_MEMTYPE_CF_FLASH; 1688 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 1689 } else { 1690 config_name = path; 1691 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1692 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1693 } 1694 1695 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1696 if (!mbp) { 1697 CSIO_INC_STATS(hw, n_err_nomem); 1698 return -ENOMEM; 1699 } 1700 /* 1701 * Tell the firmware to process the indicated Configuration File. 1702 * If there are no errors and the caller has provided return value 1703 * pointers for the [fini] section version, checksum and computed 1704 * checksum, pass those back to the caller. 1705 */ 1706 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1707 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1708 caps_cmd->op_to_write = 1709 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1710 FW_CMD_REQUEST_F | 1711 FW_CMD_READ_F); 1712 caps_cmd->cfvalid_to_len16 = 1713 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | 1714 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | 1715 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | 1716 FW_LEN16(*caps_cmd)); 1717 1718 if (csio_mb_issue(hw, mbp)) { 1719 rv = -EINVAL; 1720 goto bye; 1721 } 1722 1723 rv = csio_mb_fw_retval(mbp); 1724 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware 1725 * Configuration File in FLASH), our last gasp effort is to use the 1726 * Firmware Configuration File which is embedded in the 1727 * firmware. A very few early versions of the firmware didn't 1728 * have one embedded but we can ignore those. 1729 */ 1730 if (rv == ENOENT) { 1731 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1732 caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1733 FW_CMD_REQUEST_F | 1734 FW_CMD_READ_F); 1735 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1736 1737 if (csio_mb_issue(hw, mbp)) { 1738 rv = -EINVAL; 1739 goto bye; 1740 } 1741 1742 rv = csio_mb_fw_retval(mbp); 1743 config_name = "Firmware Default"; 1744 } 1745 if (rv != FW_SUCCESS) 1746 goto bye; 1747 1748 finiver = ntohl(caps_cmd->finiver); 1749 finicsum = ntohl(caps_cmd->finicsum); 1750 cfcsum = ntohl(caps_cmd->cfcsum); 1751 1752 /* 1753 * And now tell the firmware to use the configuration we just loaded. 1754 */ 1755 caps_cmd->op_to_write = 1756 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1757 FW_CMD_REQUEST_F | 1758 FW_CMD_WRITE_F); 1759 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1760 1761 if (csio_mb_issue(hw, mbp)) { 1762 rv = -EINVAL; 1763 goto bye; 1764 } 1765 1766 rv = csio_mb_fw_retval(mbp); 1767 if (rv != FW_SUCCESS) { 1768 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1769 goto bye; 1770 } 1771 1772 if (finicsum != cfcsum) { 1773 csio_warn(hw, 1774 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1775 finicsum, cfcsum); 1776 } 1777 1778 /* Validate device capabilities */ 1779 rv = csio_hw_validate_caps(hw, mbp); 1780 if (rv != 0) 1781 goto bye; 1782 1783 mempool_free(mbp, hw->mb_mempool); 1784 mbp = NULL; 1785 1786 /* 1787 * Note that we're operating with parameters 1788 * not supplied by the driver, rather than from hard-wired 1789 * initialization constants buried in the driver. 1790 */ 1791 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 1792 1793 /* device parameters */ 1794 rv = csio_get_device_params(hw); 1795 if (rv != 0) 1796 goto bye; 1797 1798 /* Configure SGE */ 1799 csio_wr_sge_init(hw); 1800 1801 /* 1802 * And finally tell the firmware to initialize itself using the 1803 * parameters from the Configuration File. 1804 */ 1805 /* Post event to notify completion of configuration */ 1806 csio_post_event(&hw->sm, CSIO_HWE_INIT); 1807 1808 csio_info(hw, "Successfully configure using Firmware " 1809 "Configuration File %s, version %#x, computed checksum %#x\n", 1810 config_name, finiver, cfcsum); 1811 return 0; 1812 1813 /* 1814 * Something bad happened. Return the error ... 1815 */ 1816 bye: 1817 if (mbp) 1818 mempool_free(mbp, hw->mb_mempool); 1819 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 1820 csio_warn(hw, "Configuration file error %d\n", rv); 1821 return rv; 1822 } 1823 1824 /* Is the given firmware API compatible with the one the driver was compiled 1825 * with? 1826 */ 1827 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 1828 { 1829 1830 /* short circuit if it's the exact same firmware version */ 1831 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 1832 return 1; 1833 1834 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 1835 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 1836 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 1837 return 1; 1838 #undef SAME_INTF 1839 1840 return 0; 1841 } 1842 1843 /* The firmware in the filesystem is usable, but should it be installed? 1844 * This routine explains itself in detail if it indicates the filesystem 1845 * firmware should be installed. 1846 */ 1847 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, 1848 int k, int c) 1849 { 1850 const char *reason; 1851 1852 if (!card_fw_usable) { 1853 reason = "incompatible or unusable"; 1854 goto install; 1855 } 1856 1857 if (k > c) { 1858 reason = "older than the version supported with this driver"; 1859 goto install; 1860 } 1861 1862 return 0; 1863 1864 install: 1865 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " 1866 "installing firmware %u.%u.%u.%u on card.\n", 1867 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 1868 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, 1869 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 1870 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 1871 1872 return 1; 1873 } 1874 1875 static struct fw_info fw_info_array[] = { 1876 { 1877 .chip = CHELSIO_T5, 1878 .fs_name = FW_CFG_NAME_T5, 1879 .fw_mod_name = FW_FNAME_T5, 1880 .fw_hdr = { 1881 .chip = FW_HDR_CHIP_T5, 1882 .fw_ver = __cpu_to_be32(FW_VERSION(T5)), 1883 .intfver_nic = FW_INTFVER(T5, NIC), 1884 .intfver_vnic = FW_INTFVER(T5, VNIC), 1885 .intfver_ri = FW_INTFVER(T5, RI), 1886 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 1887 .intfver_fcoe = FW_INTFVER(T5, FCOE), 1888 }, 1889 } 1890 }; 1891 1892 static struct fw_info *find_fw_info(int chip) 1893 { 1894 int i; 1895 1896 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { 1897 if (fw_info_array[i].chip == chip) 1898 return &fw_info_array[i]; 1899 } 1900 return NULL; 1901 } 1902 1903 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, 1904 const u8 *fw_data, unsigned int fw_size, 1905 struct fw_hdr *card_fw, enum csio_dev_state state, 1906 int *reset) 1907 { 1908 int ret, card_fw_usable, fs_fw_usable; 1909 const struct fw_hdr *fs_fw; 1910 const struct fw_hdr *drv_fw; 1911 1912 drv_fw = &fw_info->fw_hdr; 1913 1914 /* Read the header of the firmware on the card */ 1915 ret = csio_hw_read_flash(hw, FLASH_FW_START, 1916 sizeof(*card_fw) / sizeof(uint32_t), 1917 (uint32_t *)card_fw, 1); 1918 if (ret == 0) { 1919 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 1920 } else { 1921 csio_err(hw, 1922 "Unable to read card's firmware header: %d\n", ret); 1923 card_fw_usable = 0; 1924 } 1925 1926 if (fw_data != NULL) { 1927 fs_fw = (const void *)fw_data; 1928 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 1929 } else { 1930 fs_fw = NULL; 1931 fs_fw_usable = 0; 1932 } 1933 1934 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 1935 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 1936 /* Common case: the firmware on the card is an exact match and 1937 * the filesystem one is an exact match too, or the filesystem 1938 * one is absent/incompatible. 1939 */ 1940 } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && 1941 csio_should_install_fs_fw(hw, card_fw_usable, 1942 be32_to_cpu(fs_fw->fw_ver), 1943 be32_to_cpu(card_fw->fw_ver))) { 1944 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, 1945 fw_size, 0); 1946 if (ret != 0) { 1947 csio_err(hw, 1948 "failed to install firmware: %d\n", ret); 1949 goto bye; 1950 } 1951 1952 /* Installed successfully, update the cached header too. */ 1953 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1954 card_fw_usable = 1; 1955 *reset = 0; /* already reset as part of load_fw */ 1956 } 1957 1958 if (!card_fw_usable) { 1959 uint32_t d, c, k; 1960 1961 d = be32_to_cpu(drv_fw->fw_ver); 1962 c = be32_to_cpu(card_fw->fw_ver); 1963 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 1964 1965 csio_err(hw, "Cannot find a usable firmware: " 1966 "chip state %d, " 1967 "driver compiled with %d.%d.%d.%d, " 1968 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 1969 state, 1970 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), 1971 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), 1972 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 1973 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), 1974 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 1975 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 1976 ret = EINVAL; 1977 goto bye; 1978 } 1979 1980 /* We're using whatever's on the card and it's known to be good. */ 1981 hw->fwrev = be32_to_cpu(card_fw->fw_ver); 1982 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 1983 1984 bye: 1985 return ret; 1986 } 1987 1988 /* 1989 * Returns -EINVAL if attempts to flash the firmware failed 1990 * else returns 0, 1991 * if flashing was not attempted because the card had the 1992 * latest firmware ECANCELED is returned 1993 */ 1994 static int 1995 csio_hw_flash_fw(struct csio_hw *hw, int *reset) 1996 { 1997 int ret = -ECANCELED; 1998 const struct firmware *fw; 1999 struct fw_info *fw_info; 2000 struct fw_hdr *card_fw; 2001 struct pci_dev *pci_dev = hw->pdev; 2002 struct device *dev = &pci_dev->dev ; 2003 const u8 *fw_data = NULL; 2004 unsigned int fw_size = 0; 2005 2006 /* This is the firmware whose headers the driver was compiled 2007 * against 2008 */ 2009 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); 2010 if (fw_info == NULL) { 2011 csio_err(hw, 2012 "unable to get firmware info for chip %d.\n", 2013 CHELSIO_CHIP_VERSION(hw->chip_id)); 2014 return -EINVAL; 2015 } 2016 2017 if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) { 2018 csio_err(hw, "could not find firmware image %s, err: %d\n", 2019 FW_FNAME_T5, ret); 2020 } else { 2021 fw_data = fw->data; 2022 fw_size = fw->size; 2023 } 2024 2025 /* allocate memory to read the header of the firmware on the 2026 * card 2027 */ 2028 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); 2029 2030 /* upgrade FW logic */ 2031 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, 2032 hw->fw_state, reset); 2033 2034 /* Cleaning up */ 2035 if (fw != NULL) 2036 release_firmware(fw); 2037 kfree(card_fw); 2038 return ret; 2039 } 2040 2041 /* 2042 * csio_hw_configure - Configure HW 2043 * @hw - HW module 2044 * 2045 */ 2046 static void 2047 csio_hw_configure(struct csio_hw *hw) 2048 { 2049 int reset = 1; 2050 int rv; 2051 u32 param[1]; 2052 2053 rv = csio_hw_dev_ready(hw); 2054 if (rv != 0) { 2055 CSIO_INC_STATS(hw, n_err_fatal); 2056 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2057 goto out; 2058 } 2059 2060 /* HW version */ 2061 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); 2062 2063 /* Needed for FW download */ 2064 rv = csio_hw_get_flash_params(hw); 2065 if (rv != 0) { 2066 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2067 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2068 goto out; 2069 } 2070 2071 /* Set PCIe completion timeout to 4 seconds */ 2072 if (pci_is_pcie(hw->pdev)) 2073 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, 2074 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); 2075 2076 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2077 2078 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2079 if (rv != 0) 2080 goto out; 2081 2082 csio_hw_print_fw_version(hw, "Firmware revision"); 2083 2084 rv = csio_do_hello(hw, &hw->fw_state); 2085 if (rv != 0) { 2086 CSIO_INC_STATS(hw, n_err_fatal); 2087 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2088 goto out; 2089 } 2090 2091 /* Read vpd */ 2092 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2093 if (rv != 0) 2094 goto out; 2095 2096 csio_hw_get_fw_version(hw, &hw->fwrev); 2097 csio_hw_get_tp_version(hw, &hw->tp_vers); 2098 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2099 2100 /* Do firmware update */ 2101 spin_unlock_irq(&hw->lock); 2102 rv = csio_hw_flash_fw(hw, &reset); 2103 spin_lock_irq(&hw->lock); 2104 2105 if (rv != 0) 2106 goto out; 2107 2108 /* If the firmware doesn't support Configuration Files, 2109 * return an error. 2110 */ 2111 rv = csio_hw_check_fwconfig(hw, param); 2112 if (rv != 0) { 2113 csio_info(hw, "Firmware doesn't support " 2114 "Firmware Configuration files\n"); 2115 goto out; 2116 } 2117 2118 /* The firmware provides us with a memory buffer where we can 2119 * load a Configuration File from the host if we want to 2120 * override the Configuration File in flash. 2121 */ 2122 rv = csio_hw_use_fwconfig(hw, reset, param); 2123 if (rv == -ENOENT) { 2124 csio_info(hw, "Could not initialize " 2125 "adapter, error%d\n", rv); 2126 goto out; 2127 } 2128 if (rv != 0) { 2129 csio_info(hw, "Could not initialize " 2130 "adapter, error%d\n", rv); 2131 goto out; 2132 } 2133 2134 } else { 2135 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2136 2137 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2138 2139 /* device parameters */ 2140 rv = csio_get_device_params(hw); 2141 if (rv != 0) 2142 goto out; 2143 2144 /* Get device capabilities */ 2145 rv = csio_config_device_caps(hw); 2146 if (rv != 0) 2147 goto out; 2148 2149 /* Configure SGE */ 2150 csio_wr_sge_init(hw); 2151 2152 /* Post event to notify completion of configuration */ 2153 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2154 goto out; 2155 } 2156 } /* if not master */ 2157 2158 out: 2159 return; 2160 } 2161 2162 /* 2163 * csio_hw_initialize - Initialize HW 2164 * @hw - HW module 2165 * 2166 */ 2167 static void 2168 csio_hw_initialize(struct csio_hw *hw) 2169 { 2170 struct csio_mb *mbp; 2171 enum fw_retval retval; 2172 int rv; 2173 int i; 2174 2175 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2176 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2177 if (!mbp) 2178 goto out; 2179 2180 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2181 2182 if (csio_mb_issue(hw, mbp)) { 2183 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2184 goto free_and_out; 2185 } 2186 2187 retval = csio_mb_fw_retval(mbp); 2188 if (retval != FW_SUCCESS) { 2189 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2190 retval); 2191 goto free_and_out; 2192 } 2193 2194 mempool_free(mbp, hw->mb_mempool); 2195 } 2196 2197 rv = csio_get_fcoe_resinfo(hw); 2198 if (rv != 0) { 2199 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2200 goto out; 2201 } 2202 2203 spin_unlock_irq(&hw->lock); 2204 rv = csio_config_queues(hw); 2205 spin_lock_irq(&hw->lock); 2206 2207 if (rv != 0) { 2208 csio_err(hw, "Config of queues failed!: %d\n", rv); 2209 goto out; 2210 } 2211 2212 for (i = 0; i < hw->num_pports; i++) 2213 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2214 2215 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2216 rv = csio_enable_ports(hw); 2217 if (rv != 0) { 2218 csio_err(hw, "Failed to enable ports: %d\n", rv); 2219 goto out; 2220 } 2221 } 2222 2223 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2224 return; 2225 2226 free_and_out: 2227 mempool_free(mbp, hw->mb_mempool); 2228 out: 2229 return; 2230 } 2231 2232 #define PF_INTR_MASK (PFSW_F | PFCIM_F) 2233 2234 /* 2235 * csio_hw_intr_enable - Enable HW interrupts 2236 * @hw: Pointer to HW module. 2237 * 2238 * Enable interrupts in HW registers. 2239 */ 2240 static void 2241 csio_hw_intr_enable(struct csio_hw *hw) 2242 { 2243 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2244 uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2245 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); 2246 2247 /* 2248 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2249 * by FW, so do nothing for INTX. 2250 */ 2251 if (hw->intr_mode == CSIO_IM_MSIX) 2252 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2253 AIVEC_V(AIVEC_M), vec); 2254 else if (hw->intr_mode == CSIO_IM_MSI) 2255 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2256 AIVEC_V(AIVEC_M), 0); 2257 2258 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); 2259 2260 /* Turn on MB interrupts - this will internally flush PIO as well */ 2261 csio_mb_intr_enable(hw); 2262 2263 /* These are common registers - only a master can modify them */ 2264 if (csio_is_hw_master(hw)) { 2265 /* 2266 * Disable the Serial FLASH interrupt, if enabled! 2267 */ 2268 pl &= (~SF_F); 2269 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); 2270 2271 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | 2272 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | 2273 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | 2274 ERR_DATA_CPL_ON_HIGH_QID1_F | 2275 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | 2276 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | 2277 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | 2278 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, 2279 SGE_INT_ENABLE3_A); 2280 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); 2281 } 2282 2283 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2284 2285 } 2286 2287 /* 2288 * csio_hw_intr_disable - Disable HW interrupts 2289 * @hw: Pointer to HW module. 2290 * 2291 * Turn off Mailbox and PCI_PF_CFG interrupts. 2292 */ 2293 void 2294 csio_hw_intr_disable(struct csio_hw *hw) 2295 { 2296 uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2297 2298 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2299 return; 2300 2301 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2302 2303 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); 2304 if (csio_is_hw_master(hw)) 2305 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); 2306 2307 /* Turn off MB interrupts */ 2308 csio_mb_intr_disable(hw); 2309 2310 } 2311 2312 void 2313 csio_hw_fatal_err(struct csio_hw *hw) 2314 { 2315 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); 2316 csio_hw_intr_disable(hw); 2317 2318 /* Do not reset HW, we may need FW state for debugging */ 2319 csio_fatal(hw, "HW Fatal error encountered!\n"); 2320 } 2321 2322 /*****************************************************************************/ 2323 /* START: HW SM */ 2324 /*****************************************************************************/ 2325 /* 2326 * csio_hws_uninit - Uninit state 2327 * @hw - HW module 2328 * @evt - Event 2329 * 2330 */ 2331 static void 2332 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2333 { 2334 hw->prev_evt = hw->cur_evt; 2335 hw->cur_evt = evt; 2336 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2337 2338 switch (evt) { 2339 case CSIO_HWE_CFG: 2340 csio_set_state(&hw->sm, csio_hws_configuring); 2341 csio_hw_configure(hw); 2342 break; 2343 2344 default: 2345 CSIO_INC_STATS(hw, n_evt_unexp); 2346 break; 2347 } 2348 } 2349 2350 /* 2351 * csio_hws_configuring - Configuring state 2352 * @hw - HW module 2353 * @evt - Event 2354 * 2355 */ 2356 static void 2357 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2358 { 2359 hw->prev_evt = hw->cur_evt; 2360 hw->cur_evt = evt; 2361 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2362 2363 switch (evt) { 2364 case CSIO_HWE_INIT: 2365 csio_set_state(&hw->sm, csio_hws_initializing); 2366 csio_hw_initialize(hw); 2367 break; 2368 2369 case CSIO_HWE_INIT_DONE: 2370 csio_set_state(&hw->sm, csio_hws_ready); 2371 /* Fan out event to all lnode SMs */ 2372 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2373 break; 2374 2375 case CSIO_HWE_FATAL: 2376 csio_set_state(&hw->sm, csio_hws_uninit); 2377 break; 2378 2379 case CSIO_HWE_PCI_REMOVE: 2380 csio_do_bye(hw); 2381 break; 2382 default: 2383 CSIO_INC_STATS(hw, n_evt_unexp); 2384 break; 2385 } 2386 } 2387 2388 /* 2389 * csio_hws_initializing - Initialiazing state 2390 * @hw - HW module 2391 * @evt - Event 2392 * 2393 */ 2394 static void 2395 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2396 { 2397 hw->prev_evt = hw->cur_evt; 2398 hw->cur_evt = evt; 2399 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2400 2401 switch (evt) { 2402 case CSIO_HWE_INIT_DONE: 2403 csio_set_state(&hw->sm, csio_hws_ready); 2404 2405 /* Fan out event to all lnode SMs */ 2406 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2407 2408 /* Enable interrupts */ 2409 csio_hw_intr_enable(hw); 2410 break; 2411 2412 case CSIO_HWE_FATAL: 2413 csio_set_state(&hw->sm, csio_hws_uninit); 2414 break; 2415 2416 case CSIO_HWE_PCI_REMOVE: 2417 csio_do_bye(hw); 2418 break; 2419 2420 default: 2421 CSIO_INC_STATS(hw, n_evt_unexp); 2422 break; 2423 } 2424 } 2425 2426 /* 2427 * csio_hws_ready - Ready state 2428 * @hw - HW module 2429 * @evt - Event 2430 * 2431 */ 2432 static void 2433 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2434 { 2435 /* Remember the event */ 2436 hw->evtflag = evt; 2437 2438 hw->prev_evt = hw->cur_evt; 2439 hw->cur_evt = evt; 2440 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2441 2442 switch (evt) { 2443 case CSIO_HWE_HBA_RESET: 2444 case CSIO_HWE_FW_DLOAD: 2445 case CSIO_HWE_SUSPEND: 2446 case CSIO_HWE_PCI_REMOVE: 2447 case CSIO_HWE_PCIERR_DETECTED: 2448 csio_set_state(&hw->sm, csio_hws_quiescing); 2449 /* cleanup all outstanding cmds */ 2450 if (evt == CSIO_HWE_HBA_RESET || 2451 evt == CSIO_HWE_PCIERR_DETECTED) 2452 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2453 else 2454 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2455 2456 csio_hw_intr_disable(hw); 2457 csio_hw_mbm_cleanup(hw); 2458 csio_evtq_stop(hw); 2459 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2460 csio_evtq_flush(hw); 2461 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2462 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2463 break; 2464 2465 case CSIO_HWE_FATAL: 2466 csio_set_state(&hw->sm, csio_hws_uninit); 2467 break; 2468 2469 default: 2470 CSIO_INC_STATS(hw, n_evt_unexp); 2471 break; 2472 } 2473 } 2474 2475 /* 2476 * csio_hws_quiescing - Quiescing state 2477 * @hw - HW module 2478 * @evt - Event 2479 * 2480 */ 2481 static void 2482 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2483 { 2484 hw->prev_evt = hw->cur_evt; 2485 hw->cur_evt = evt; 2486 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2487 2488 switch (evt) { 2489 case CSIO_HWE_QUIESCED: 2490 switch (hw->evtflag) { 2491 case CSIO_HWE_FW_DLOAD: 2492 csio_set_state(&hw->sm, csio_hws_resetting); 2493 /* Download firmware */ 2494 /* Fall through */ 2495 2496 case CSIO_HWE_HBA_RESET: 2497 csio_set_state(&hw->sm, csio_hws_resetting); 2498 /* Start reset of the HBA */ 2499 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2500 csio_wr_destroy_queues(hw, false); 2501 csio_do_reset(hw, false); 2502 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2503 break; 2504 2505 case CSIO_HWE_PCI_REMOVE: 2506 csio_set_state(&hw->sm, csio_hws_removing); 2507 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2508 csio_wr_destroy_queues(hw, true); 2509 /* Now send the bye command */ 2510 csio_do_bye(hw); 2511 break; 2512 2513 case CSIO_HWE_SUSPEND: 2514 csio_set_state(&hw->sm, csio_hws_quiesced); 2515 break; 2516 2517 case CSIO_HWE_PCIERR_DETECTED: 2518 csio_set_state(&hw->sm, csio_hws_pcierr); 2519 csio_wr_destroy_queues(hw, false); 2520 break; 2521 2522 default: 2523 CSIO_INC_STATS(hw, n_evt_unexp); 2524 break; 2525 2526 } 2527 break; 2528 2529 default: 2530 CSIO_INC_STATS(hw, n_evt_unexp); 2531 break; 2532 } 2533 } 2534 2535 /* 2536 * csio_hws_quiesced - Quiesced state 2537 * @hw - HW module 2538 * @evt - Event 2539 * 2540 */ 2541 static void 2542 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2543 { 2544 hw->prev_evt = hw->cur_evt; 2545 hw->cur_evt = evt; 2546 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2547 2548 switch (evt) { 2549 case CSIO_HWE_RESUME: 2550 csio_set_state(&hw->sm, csio_hws_configuring); 2551 csio_hw_configure(hw); 2552 break; 2553 2554 default: 2555 CSIO_INC_STATS(hw, n_evt_unexp); 2556 break; 2557 } 2558 } 2559 2560 /* 2561 * csio_hws_resetting - HW Resetting state 2562 * @hw - HW module 2563 * @evt - Event 2564 * 2565 */ 2566 static void 2567 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2568 { 2569 hw->prev_evt = hw->cur_evt; 2570 hw->cur_evt = evt; 2571 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2572 2573 switch (evt) { 2574 case CSIO_HWE_HBA_RESET_DONE: 2575 csio_evtq_start(hw); 2576 csio_set_state(&hw->sm, csio_hws_configuring); 2577 csio_hw_configure(hw); 2578 break; 2579 2580 default: 2581 CSIO_INC_STATS(hw, n_evt_unexp); 2582 break; 2583 } 2584 } 2585 2586 /* 2587 * csio_hws_removing - PCI Hotplug removing state 2588 * @hw - HW module 2589 * @evt - Event 2590 * 2591 */ 2592 static void 2593 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2594 { 2595 hw->prev_evt = hw->cur_evt; 2596 hw->cur_evt = evt; 2597 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2598 2599 switch (evt) { 2600 case CSIO_HWE_HBA_RESET: 2601 if (!csio_is_hw_master(hw)) 2602 break; 2603 /* 2604 * The BYE should have alerady been issued, so we cant 2605 * use the mailbox interface. Hence we use the PL_RST 2606 * register directly. 2607 */ 2608 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2609 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 2610 mdelay(2000); 2611 break; 2612 2613 /* Should never receive any new events */ 2614 default: 2615 CSIO_INC_STATS(hw, n_evt_unexp); 2616 break; 2617 2618 } 2619 } 2620 2621 /* 2622 * csio_hws_pcierr - PCI Error state 2623 * @hw - HW module 2624 * @evt - Event 2625 * 2626 */ 2627 static void 2628 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2629 { 2630 hw->prev_evt = hw->cur_evt; 2631 hw->cur_evt = evt; 2632 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2633 2634 switch (evt) { 2635 case CSIO_HWE_PCIERR_SLOT_RESET: 2636 csio_evtq_start(hw); 2637 csio_set_state(&hw->sm, csio_hws_configuring); 2638 csio_hw_configure(hw); 2639 break; 2640 2641 default: 2642 CSIO_INC_STATS(hw, n_evt_unexp); 2643 break; 2644 } 2645 } 2646 2647 /*****************************************************************************/ 2648 /* END: HW SM */ 2649 /*****************************************************************************/ 2650 2651 /* 2652 * csio_handle_intr_status - table driven interrupt handler 2653 * @hw: HW instance 2654 * @reg: the interrupt status register to process 2655 * @acts: table of interrupt actions 2656 * 2657 * A table driven interrupt handler that applies a set of masks to an 2658 * interrupt status word and performs the corresponding actions if the 2659 * interrupts described by the mask have occured. The actions include 2660 * optionally emitting a warning or alert message. The table is terminated 2661 * by an entry specifying mask 0. Returns the number of fatal interrupt 2662 * conditions. 2663 */ 2664 int 2665 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2666 const struct intr_info *acts) 2667 { 2668 int fatal = 0; 2669 unsigned int mask = 0; 2670 unsigned int status = csio_rd_reg32(hw, reg); 2671 2672 for ( ; acts->mask; ++acts) { 2673 if (!(status & acts->mask)) 2674 continue; 2675 if (acts->fatal) { 2676 fatal++; 2677 csio_fatal(hw, "Fatal %s (0x%x)\n", 2678 acts->msg, status & acts->mask); 2679 } else if (acts->msg) 2680 csio_info(hw, "%s (0x%x)\n", 2681 acts->msg, status & acts->mask); 2682 mask |= acts->mask; 2683 } 2684 status &= mask; 2685 if (status) /* clear processed interrupts */ 2686 csio_wr_reg32(hw, status, reg); 2687 return fatal; 2688 } 2689 2690 /* 2691 * TP interrupt handler. 2692 */ 2693 static void csio_tp_intr_handler(struct csio_hw *hw) 2694 { 2695 static struct intr_info tp_intr_info[] = { 2696 { 0x3fffffff, "TP parity error", -1, 1 }, 2697 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, 2698 { 0, NULL, 0, 0 } 2699 }; 2700 2701 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) 2702 csio_hw_fatal_err(hw); 2703 } 2704 2705 /* 2706 * SGE interrupt handler. 2707 */ 2708 static void csio_sge_intr_handler(struct csio_hw *hw) 2709 { 2710 uint64_t v; 2711 2712 static struct intr_info sge_intr_info[] = { 2713 { ERR_CPL_EXCEED_IQE_SIZE_F, 2714 "SGE received CPL exceeding IQE size", -1, 1 }, 2715 { ERR_INVALID_CIDX_INC_F, 2716 "SGE GTS CIDX increment too large", -1, 0 }, 2717 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, 2718 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, 2719 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, 2720 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2721 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, 2722 0 }, 2723 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, 2724 0 }, 2725 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, 2726 0 }, 2727 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, 2728 0 }, 2729 { ERR_ING_CTXT_PRIO_F, 2730 "SGE too many priority ingress contexts", -1, 0 }, 2731 { ERR_EGR_CTXT_PRIO_F, 2732 "SGE too many priority egress contexts", -1, 0 }, 2733 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, 2734 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, 2735 { 0, NULL, 0, 0 } 2736 }; 2737 2738 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | 2739 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); 2740 if (v) { 2741 csio_fatal(hw, "SGE parity error (%#llx)\n", 2742 (unsigned long long)v); 2743 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 2744 SGE_INT_CAUSE1_A); 2745 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); 2746 } 2747 2748 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); 2749 2750 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || 2751 v != 0) 2752 csio_hw_fatal_err(hw); 2753 } 2754 2755 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ 2756 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) 2757 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ 2758 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) 2759 2760 /* 2761 * CIM interrupt handler. 2762 */ 2763 static void csio_cim_intr_handler(struct csio_hw *hw) 2764 { 2765 static struct intr_info cim_intr_info[] = { 2766 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, 2767 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2768 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2769 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, 2770 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, 2771 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, 2772 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, 2773 { 0, NULL, 0, 0 } 2774 }; 2775 static struct intr_info cim_upintr_info[] = { 2776 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, 2777 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, 2778 { ILLWRINT_F, "CIM illegal write", -1, 1 }, 2779 { ILLRDINT_F, "CIM illegal read", -1, 1 }, 2780 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, 2781 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, 2782 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, 2783 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, 2784 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, 2785 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, 2786 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, 2787 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, 2788 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, 2789 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, 2790 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, 2791 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, 2792 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, 2793 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, 2794 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, 2795 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, 2796 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, 2797 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, 2798 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, 2799 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, 2800 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, 2801 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, 2802 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, 2803 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, 2804 { 0, NULL, 0, 0 } 2805 }; 2806 2807 int fat; 2808 2809 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, 2810 cim_intr_info) + 2811 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, 2812 cim_upintr_info); 2813 if (fat) 2814 csio_hw_fatal_err(hw); 2815 } 2816 2817 /* 2818 * ULP RX interrupt handler. 2819 */ 2820 static void csio_ulprx_intr_handler(struct csio_hw *hw) 2821 { 2822 static struct intr_info ulprx_intr_info[] = { 2823 { 0x1800000, "ULPRX context error", -1, 1 }, 2824 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2825 { 0, NULL, 0, 0 } 2826 }; 2827 2828 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) 2829 csio_hw_fatal_err(hw); 2830 } 2831 2832 /* 2833 * ULP TX interrupt handler. 2834 */ 2835 static void csio_ulptx_intr_handler(struct csio_hw *hw) 2836 { 2837 static struct intr_info ulptx_intr_info[] = { 2838 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, 2839 0 }, 2840 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, 2841 0 }, 2842 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, 2843 0 }, 2844 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, 2845 0 }, 2846 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2847 { 0, NULL, 0, 0 } 2848 }; 2849 2850 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) 2851 csio_hw_fatal_err(hw); 2852 } 2853 2854 /* 2855 * PM TX interrupt handler. 2856 */ 2857 static void csio_pmtx_intr_handler(struct csio_hw *hw) 2858 { 2859 static struct intr_info pmtx_intr_info[] = { 2860 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, 2861 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, 2862 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, 2863 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, 2864 { 0xffffff0, "PMTX framing error", -1, 1 }, 2865 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, 2866 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, 2867 1 }, 2868 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, 2869 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, 2870 { 0, NULL, 0, 0 } 2871 }; 2872 2873 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) 2874 csio_hw_fatal_err(hw); 2875 } 2876 2877 /* 2878 * PM RX interrupt handler. 2879 */ 2880 static void csio_pmrx_intr_handler(struct csio_hw *hw) 2881 { 2882 static struct intr_info pmrx_intr_info[] = { 2883 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, 2884 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2885 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, 2886 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, 2887 1 }, 2888 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, 2889 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, 2890 { 0, NULL, 0, 0 } 2891 }; 2892 2893 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) 2894 csio_hw_fatal_err(hw); 2895 } 2896 2897 /* 2898 * CPL switch interrupt handler. 2899 */ 2900 static void csio_cplsw_intr_handler(struct csio_hw *hw) 2901 { 2902 static struct intr_info cplsw_intr_info[] = { 2903 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, 2904 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, 2905 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, 2906 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, 2907 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, 2908 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, 2909 { 0, NULL, 0, 0 } 2910 }; 2911 2912 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) 2913 csio_hw_fatal_err(hw); 2914 } 2915 2916 /* 2917 * LE interrupt handler. 2918 */ 2919 static void csio_le_intr_handler(struct csio_hw *hw) 2920 { 2921 static struct intr_info le_intr_info[] = { 2922 { LIPMISS_F, "LE LIP miss", -1, 0 }, 2923 { LIP0_F, "LE 0 LIP error", -1, 0 }, 2924 { PARITYERR_F, "LE parity error", -1, 1 }, 2925 { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 2926 { REQQPARERR_F, "LE request queue parity error", -1, 1 }, 2927 { 0, NULL, 0, 0 } 2928 }; 2929 2930 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info)) 2931 csio_hw_fatal_err(hw); 2932 } 2933 2934 /* 2935 * MPS interrupt handler. 2936 */ 2937 static void csio_mps_intr_handler(struct csio_hw *hw) 2938 { 2939 static struct intr_info mps_rx_intr_info[] = { 2940 { 0xffffff, "MPS Rx parity error", -1, 1 }, 2941 { 0, NULL, 0, 0 } 2942 }; 2943 static struct intr_info mps_tx_intr_info[] = { 2944 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, 2945 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 2946 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", 2947 -1, 1 }, 2948 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", 2949 -1, 1 }, 2950 { BUBBLE_F, "MPS Tx underflow", -1, 1 }, 2951 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, 2952 { FRMERR_F, "MPS Tx framing error", -1, 1 }, 2953 { 0, NULL, 0, 0 } 2954 }; 2955 static struct intr_info mps_trc_intr_info[] = { 2956 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, 2957 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", 2958 -1, 1 }, 2959 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, 2960 { 0, NULL, 0, 0 } 2961 }; 2962 static struct intr_info mps_stat_sram_intr_info[] = { 2963 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 2964 { 0, NULL, 0, 0 } 2965 }; 2966 static struct intr_info mps_stat_tx_intr_info[] = { 2967 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 2968 { 0, NULL, 0, 0 } 2969 }; 2970 static struct intr_info mps_stat_rx_intr_info[] = { 2971 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 2972 { 0, NULL, 0, 0 } 2973 }; 2974 static struct intr_info mps_cls_intr_info[] = { 2975 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, 2976 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, 2977 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, 2978 { 0, NULL, 0, 0 } 2979 }; 2980 2981 int fat; 2982 2983 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, 2984 mps_rx_intr_info) + 2985 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, 2986 mps_tx_intr_info) + 2987 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, 2988 mps_trc_intr_info) + 2989 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, 2990 mps_stat_sram_intr_info) + 2991 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, 2992 mps_stat_tx_intr_info) + 2993 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, 2994 mps_stat_rx_intr_info) + 2995 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, 2996 mps_cls_intr_info); 2997 2998 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); 2999 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ 3000 if (fat) 3001 csio_hw_fatal_err(hw); 3002 } 3003 3004 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ 3005 ECC_UE_INT_CAUSE_F) 3006 3007 /* 3008 * EDC/MC interrupt handler. 3009 */ 3010 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3011 { 3012 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3013 3014 unsigned int addr, cnt_addr, v; 3015 3016 if (idx <= MEM_EDC1) { 3017 addr = EDC_REG(EDC_INT_CAUSE_A, idx); 3018 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); 3019 } else { 3020 addr = MC_INT_CAUSE_A; 3021 cnt_addr = MC_ECC_STATUS_A; 3022 } 3023 3024 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3025 if (v & PERR_INT_CAUSE_F) 3026 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3027 if (v & ECC_CE_INT_CAUSE_F) { 3028 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); 3029 3030 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); 3031 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3032 cnt, name[idx], cnt > 1 ? "s" : ""); 3033 } 3034 if (v & ECC_UE_INT_CAUSE_F) 3035 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3036 3037 csio_wr_reg32(hw, v, addr); 3038 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) 3039 csio_hw_fatal_err(hw); 3040 } 3041 3042 /* 3043 * MA interrupt handler. 3044 */ 3045 static void csio_ma_intr_handler(struct csio_hw *hw) 3046 { 3047 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); 3048 3049 if (status & MEM_PERR_INT_CAUSE_F) 3050 csio_fatal(hw, "MA parity error, parity status %#x\n", 3051 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); 3052 if (status & MEM_WRAP_INT_CAUSE_F) { 3053 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); 3054 csio_fatal(hw, 3055 "MA address wrap-around error by client %u to address %#x\n", 3056 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); 3057 } 3058 csio_wr_reg32(hw, status, MA_INT_CAUSE_A); 3059 csio_hw_fatal_err(hw); 3060 } 3061 3062 /* 3063 * SMB interrupt handler. 3064 */ 3065 static void csio_smb_intr_handler(struct csio_hw *hw) 3066 { 3067 static struct intr_info smb_intr_info[] = { 3068 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, 3069 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, 3070 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, 3071 { 0, NULL, 0, 0 } 3072 }; 3073 3074 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) 3075 csio_hw_fatal_err(hw); 3076 } 3077 3078 /* 3079 * NC-SI interrupt handler. 3080 */ 3081 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3082 { 3083 static struct intr_info ncsi_intr_info[] = { 3084 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, 3085 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, 3086 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, 3087 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, 3088 { 0, NULL, 0, 0 } 3089 }; 3090 3091 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) 3092 csio_hw_fatal_err(hw); 3093 } 3094 3095 /* 3096 * XGMAC interrupt handler. 3097 */ 3098 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3099 { 3100 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3101 3102 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; 3103 if (!v) 3104 return; 3105 3106 if (v & TXFIFO_PRTY_ERR_F) 3107 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3108 if (v & RXFIFO_PRTY_ERR_F) 3109 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3110 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3111 csio_hw_fatal_err(hw); 3112 } 3113 3114 /* 3115 * PL interrupt handler. 3116 */ 3117 static void csio_pl_intr_handler(struct csio_hw *hw) 3118 { 3119 static struct intr_info pl_intr_info[] = { 3120 { FATALPERR_F, "T4 fatal parity error", -1, 1 }, 3121 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, 3122 { 0, NULL, 0, 0 } 3123 }; 3124 3125 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) 3126 csio_hw_fatal_err(hw); 3127 } 3128 3129 /* 3130 * csio_hw_slow_intr_handler - control path interrupt handler 3131 * @hw: HW module 3132 * 3133 * Interrupt handler for non-data global interrupt events, e.g., errors. 3134 * The designation 'slow' is because it involves register reads, while 3135 * data interrupts typically don't involve any MMIOs. 3136 */ 3137 int 3138 csio_hw_slow_intr_handler(struct csio_hw *hw) 3139 { 3140 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); 3141 3142 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3143 CSIO_INC_STATS(hw, n_plint_unexp); 3144 return 0; 3145 } 3146 3147 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3148 3149 CSIO_INC_STATS(hw, n_plint_cnt); 3150 3151 if (cause & CIM_F) 3152 csio_cim_intr_handler(hw); 3153 3154 if (cause & MPS_F) 3155 csio_mps_intr_handler(hw); 3156 3157 if (cause & NCSI_F) 3158 csio_ncsi_intr_handler(hw); 3159 3160 if (cause & PL_F) 3161 csio_pl_intr_handler(hw); 3162 3163 if (cause & SMB_F) 3164 csio_smb_intr_handler(hw); 3165 3166 if (cause & XGMAC0_F) 3167 csio_xgmac_intr_handler(hw, 0); 3168 3169 if (cause & XGMAC1_F) 3170 csio_xgmac_intr_handler(hw, 1); 3171 3172 if (cause & XGMAC_KR0_F) 3173 csio_xgmac_intr_handler(hw, 2); 3174 3175 if (cause & XGMAC_KR1_F) 3176 csio_xgmac_intr_handler(hw, 3); 3177 3178 if (cause & PCIE_F) 3179 hw->chip_ops->chip_pcie_intr_handler(hw); 3180 3181 if (cause & MC_F) 3182 csio_mem_intr_handler(hw, MEM_MC); 3183 3184 if (cause & EDC0_F) 3185 csio_mem_intr_handler(hw, MEM_EDC0); 3186 3187 if (cause & EDC1_F) 3188 csio_mem_intr_handler(hw, MEM_EDC1); 3189 3190 if (cause & LE_F) 3191 csio_le_intr_handler(hw); 3192 3193 if (cause & TP_F) 3194 csio_tp_intr_handler(hw); 3195 3196 if (cause & MA_F) 3197 csio_ma_intr_handler(hw); 3198 3199 if (cause & PM_TX_F) 3200 csio_pmtx_intr_handler(hw); 3201 3202 if (cause & PM_RX_F) 3203 csio_pmrx_intr_handler(hw); 3204 3205 if (cause & ULP_RX_F) 3206 csio_ulprx_intr_handler(hw); 3207 3208 if (cause & CPL_SWITCH_F) 3209 csio_cplsw_intr_handler(hw); 3210 3211 if (cause & SGE_F) 3212 csio_sge_intr_handler(hw); 3213 3214 if (cause & ULP_TX_F) 3215 csio_ulptx_intr_handler(hw); 3216 3217 /* Clear the interrupts just processed for which we are the master. */ 3218 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); 3219 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ 3220 3221 return 1; 3222 } 3223 3224 /***************************************************************************** 3225 * HW <--> mailbox interfacing routines. 3226 ****************************************************************************/ 3227 /* 3228 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3229 * 3230 * @data: Private data pointer. 3231 * 3232 * Called from worker thread context. 3233 */ 3234 static void 3235 csio_mberr_worker(void *data) 3236 { 3237 struct csio_hw *hw = (struct csio_hw *)data; 3238 struct csio_mbm *mbm = &hw->mbm; 3239 LIST_HEAD(cbfn_q); 3240 struct csio_mb *mbp_next; 3241 int rv; 3242 3243 del_timer_sync(&mbm->timer); 3244 3245 spin_lock_irq(&hw->lock); 3246 if (list_empty(&mbm->cbfn_q)) { 3247 spin_unlock_irq(&hw->lock); 3248 return; 3249 } 3250 3251 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3252 mbm->stats.n_cbfnq = 0; 3253 3254 /* Try to start waiting mailboxes */ 3255 if (!list_empty(&mbm->req_q)) { 3256 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3257 list_del_init(&mbp_next->list); 3258 3259 rv = csio_mb_issue(hw, mbp_next); 3260 if (rv != 0) 3261 list_add_tail(&mbp_next->list, &mbm->req_q); 3262 else 3263 CSIO_DEC_STATS(mbm, n_activeq); 3264 } 3265 spin_unlock_irq(&hw->lock); 3266 3267 /* Now callback completions */ 3268 csio_mb_completions(hw, &cbfn_q); 3269 } 3270 3271 /* 3272 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3273 * 3274 * @data: private data pointer 3275 * 3276 **/ 3277 static void 3278 csio_hw_mb_timer(uintptr_t data) 3279 { 3280 struct csio_hw *hw = (struct csio_hw *)data; 3281 struct csio_mb *mbp = NULL; 3282 3283 spin_lock_irq(&hw->lock); 3284 mbp = csio_mb_tmo_handler(hw); 3285 spin_unlock_irq(&hw->lock); 3286 3287 /* Call back the function for the timed-out Mailbox */ 3288 if (mbp) 3289 mbp->mb_cbfn(hw, mbp); 3290 3291 } 3292 3293 /* 3294 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3295 * @hw: HW module 3296 * 3297 * Called with lock held, should exit with lock held. 3298 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3299 * into a local queue. Drops lock and calls the completions. Holds 3300 * lock and returns. 3301 */ 3302 static void 3303 csio_hw_mbm_cleanup(struct csio_hw *hw) 3304 { 3305 LIST_HEAD(cbfn_q); 3306 3307 csio_mb_cancel_all(hw, &cbfn_q); 3308 3309 spin_unlock_irq(&hw->lock); 3310 csio_mb_completions(hw, &cbfn_q); 3311 spin_lock_irq(&hw->lock); 3312 } 3313 3314 /***************************************************************************** 3315 * Event handling 3316 ****************************************************************************/ 3317 int 3318 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3319 uint16_t len) 3320 { 3321 struct csio_evt_msg *evt_entry = NULL; 3322 3323 if (type >= CSIO_EVT_MAX) 3324 return -EINVAL; 3325 3326 if (len > CSIO_EVT_MSG_SIZE) 3327 return -EINVAL; 3328 3329 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3330 return -EINVAL; 3331 3332 if (list_empty(&hw->evt_free_q)) { 3333 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3334 type, len); 3335 return -ENOMEM; 3336 } 3337 3338 evt_entry = list_first_entry(&hw->evt_free_q, 3339 struct csio_evt_msg, list); 3340 list_del_init(&evt_entry->list); 3341 3342 /* copy event msg and queue the event */ 3343 evt_entry->type = type; 3344 memcpy((void *)evt_entry->data, evt_msg, len); 3345 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3346 3347 CSIO_DEC_STATS(hw, n_evt_freeq); 3348 CSIO_INC_STATS(hw, n_evt_activeq); 3349 3350 return 0; 3351 } 3352 3353 static int 3354 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3355 uint16_t len, bool msg_sg) 3356 { 3357 struct csio_evt_msg *evt_entry = NULL; 3358 struct csio_fl_dma_buf *fl_sg; 3359 uint32_t off = 0; 3360 unsigned long flags; 3361 int n, ret = 0; 3362 3363 if (type >= CSIO_EVT_MAX) 3364 return -EINVAL; 3365 3366 if (len > CSIO_EVT_MSG_SIZE) 3367 return -EINVAL; 3368 3369 spin_lock_irqsave(&hw->lock, flags); 3370 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3371 ret = -EINVAL; 3372 goto out; 3373 } 3374 3375 if (list_empty(&hw->evt_free_q)) { 3376 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3377 type, len); 3378 ret = -ENOMEM; 3379 goto out; 3380 } 3381 3382 evt_entry = list_first_entry(&hw->evt_free_q, 3383 struct csio_evt_msg, list); 3384 list_del_init(&evt_entry->list); 3385 3386 /* copy event msg and queue the event */ 3387 evt_entry->type = type; 3388 3389 /* If Payload in SG list*/ 3390 if (msg_sg) { 3391 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3392 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3393 memcpy((void *)((uintptr_t)evt_entry->data + off), 3394 fl_sg->flbufs[n].vaddr, 3395 fl_sg->flbufs[n].len); 3396 off += fl_sg->flbufs[n].len; 3397 } 3398 } else 3399 memcpy((void *)evt_entry->data, evt_msg, len); 3400 3401 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3402 CSIO_DEC_STATS(hw, n_evt_freeq); 3403 CSIO_INC_STATS(hw, n_evt_activeq); 3404 out: 3405 spin_unlock_irqrestore(&hw->lock, flags); 3406 return ret; 3407 } 3408 3409 static void 3410 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3411 { 3412 if (evt_entry) { 3413 spin_lock_irq(&hw->lock); 3414 list_del_init(&evt_entry->list); 3415 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3416 CSIO_DEC_STATS(hw, n_evt_activeq); 3417 CSIO_INC_STATS(hw, n_evt_freeq); 3418 spin_unlock_irq(&hw->lock); 3419 } 3420 } 3421 3422 void 3423 csio_evtq_flush(struct csio_hw *hw) 3424 { 3425 uint32_t count; 3426 count = 30; 3427 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3428 spin_unlock_irq(&hw->lock); 3429 msleep(2000); 3430 spin_lock_irq(&hw->lock); 3431 } 3432 3433 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3434 } 3435 3436 static void 3437 csio_evtq_stop(struct csio_hw *hw) 3438 { 3439 hw->flags |= CSIO_HWF_FWEVT_STOP; 3440 } 3441 3442 static void 3443 csio_evtq_start(struct csio_hw *hw) 3444 { 3445 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3446 } 3447 3448 static void 3449 csio_evtq_cleanup(struct csio_hw *hw) 3450 { 3451 struct list_head *evt_entry, *next_entry; 3452 3453 /* Release outstanding events from activeq to freeq*/ 3454 if (!list_empty(&hw->evt_active_q)) 3455 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3456 3457 hw->stats.n_evt_activeq = 0; 3458 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3459 3460 /* Freeup event entry */ 3461 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3462 kfree(evt_entry); 3463 CSIO_DEC_STATS(hw, n_evt_freeq); 3464 } 3465 3466 hw->stats.n_evt_freeq = 0; 3467 } 3468 3469 3470 static void 3471 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3472 struct csio_fl_dma_buf *flb, void *priv) 3473 { 3474 __u8 op; 3475 void *msg = NULL; 3476 uint32_t msg_len = 0; 3477 bool msg_sg = 0; 3478 3479 op = ((struct rss_header *) wr)->opcode; 3480 if (op == CPL_FW6_PLD) { 3481 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3482 if (!flb || !flb->totlen) { 3483 CSIO_INC_STATS(hw, n_cpl_unexp); 3484 return; 3485 } 3486 3487 msg = (void *) flb; 3488 msg_len = flb->totlen; 3489 msg_sg = 1; 3490 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3491 3492 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3493 /* skip RSS header */ 3494 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3495 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3496 sizeof(struct cpl_fw4_msg); 3497 } else { 3498 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3499 CSIO_INC_STATS(hw, n_cpl_unexp); 3500 return; 3501 } 3502 3503 /* 3504 * Enqueue event to EventQ. Events processing happens 3505 * in Event worker thread context 3506 */ 3507 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3508 (uint16_t)msg_len, msg_sg)) 3509 CSIO_INC_STATS(hw, n_evt_drop); 3510 } 3511 3512 void 3513 csio_evtq_worker(struct work_struct *work) 3514 { 3515 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3516 struct list_head *evt_entry, *next_entry; 3517 LIST_HEAD(evt_q); 3518 struct csio_evt_msg *evt_msg; 3519 struct cpl_fw6_msg *msg; 3520 struct csio_rnode *rn; 3521 int rv = 0; 3522 uint8_t evtq_stop = 0; 3523 3524 csio_dbg(hw, "event worker thread active evts#%d\n", 3525 hw->stats.n_evt_activeq); 3526 3527 spin_lock_irq(&hw->lock); 3528 while (!list_empty(&hw->evt_active_q)) { 3529 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3530 spin_unlock_irq(&hw->lock); 3531 3532 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3533 evt_msg = (struct csio_evt_msg *) evt_entry; 3534 3535 /* Drop events if queue is STOPPED */ 3536 spin_lock_irq(&hw->lock); 3537 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3538 evtq_stop = 1; 3539 spin_unlock_irq(&hw->lock); 3540 if (evtq_stop) { 3541 CSIO_INC_STATS(hw, n_evt_drop); 3542 goto free_evt; 3543 } 3544 3545 switch (evt_msg->type) { 3546 case CSIO_EVT_FW: 3547 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3548 3549 if ((msg->opcode == CPL_FW6_MSG || 3550 msg->opcode == CPL_FW4_MSG) && 3551 !msg->type) { 3552 rv = csio_mb_fwevt_handler(hw, 3553 msg->data); 3554 if (!rv) 3555 break; 3556 /* Handle any remaining fw events */ 3557 csio_fcoe_fwevt_handler(hw, 3558 msg->opcode, msg->data); 3559 } else if (msg->opcode == CPL_FW6_PLD) { 3560 3561 csio_fcoe_fwevt_handler(hw, 3562 msg->opcode, msg->data); 3563 } else { 3564 csio_warn(hw, 3565 "Unhandled FW msg op %x type %x\n", 3566 msg->opcode, msg->type); 3567 CSIO_INC_STATS(hw, n_evt_drop); 3568 } 3569 break; 3570 3571 case CSIO_EVT_MBX: 3572 csio_mberr_worker(hw); 3573 break; 3574 3575 case CSIO_EVT_DEV_LOSS: 3576 memcpy(&rn, evt_msg->data, sizeof(rn)); 3577 csio_rnode_devloss_handler(rn); 3578 break; 3579 3580 default: 3581 csio_warn(hw, "Unhandled event %x on evtq\n", 3582 evt_msg->type); 3583 CSIO_INC_STATS(hw, n_evt_unexp); 3584 break; 3585 } 3586 free_evt: 3587 csio_free_evt(hw, evt_msg); 3588 } 3589 3590 spin_lock_irq(&hw->lock); 3591 } 3592 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3593 spin_unlock_irq(&hw->lock); 3594 } 3595 3596 int 3597 csio_fwevtq_handler(struct csio_hw *hw) 3598 { 3599 int rv; 3600 3601 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 3602 CSIO_INC_STATS(hw, n_int_stray); 3603 return -EINVAL; 3604 } 3605 3606 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 3607 csio_process_fwevtq_entry, NULL); 3608 return rv; 3609 } 3610 3611 /**************************************************************************** 3612 * Entry points 3613 ****************************************************************************/ 3614 3615 /* Management module */ 3616 /* 3617 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 3618 * mgmt - mgmt module 3619 * @io_req - io request 3620 * 3621 * Return - 0:if given IO Req exists in active Q. 3622 * -EINVAL :if lookup fails. 3623 */ 3624 int 3625 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 3626 { 3627 struct list_head *tmp; 3628 3629 /* Lookup ioreq in the ACTIVEQ */ 3630 list_for_each(tmp, &mgmtm->active_q) { 3631 if (io_req == (struct csio_ioreq *)tmp) 3632 return 0; 3633 } 3634 return -EINVAL; 3635 } 3636 3637 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 3638 3639 /* 3640 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 3641 * @data - Event data. 3642 * 3643 * Return - none. 3644 */ 3645 static void 3646 csio_mgmt_tmo_handler(uintptr_t data) 3647 { 3648 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; 3649 struct list_head *tmp; 3650 struct csio_ioreq *io_req; 3651 3652 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 3653 3654 spin_lock_irq(&mgmtm->hw->lock); 3655 3656 list_for_each(tmp, &mgmtm->active_q) { 3657 io_req = (struct csio_ioreq *) tmp; 3658 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 3659 3660 if (!io_req->tmo) { 3661 /* Dequeue the request from retry Q. */ 3662 tmp = csio_list_prev(tmp); 3663 list_del_init(&io_req->sm.sm_list); 3664 if (io_req->io_cbfn) { 3665 /* io_req will be freed by completion handler */ 3666 io_req->wr_status = -ETIMEDOUT; 3667 io_req->io_cbfn(mgmtm->hw, io_req); 3668 } else { 3669 CSIO_DB_ASSERT(0); 3670 } 3671 } 3672 } 3673 3674 /* If retry queue is not empty, re-arm timer */ 3675 if (!list_empty(&mgmtm->active_q)) 3676 mod_timer(&mgmtm->mgmt_timer, 3677 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 3678 spin_unlock_irq(&mgmtm->hw->lock); 3679 } 3680 3681 static void 3682 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 3683 { 3684 struct csio_hw *hw = mgmtm->hw; 3685 struct csio_ioreq *io_req; 3686 struct list_head *tmp; 3687 uint32_t count; 3688 3689 count = 30; 3690 /* Wait for all outstanding req to complete gracefully */ 3691 while ((!list_empty(&mgmtm->active_q)) && count--) { 3692 spin_unlock_irq(&hw->lock); 3693 msleep(2000); 3694 spin_lock_irq(&hw->lock); 3695 } 3696 3697 /* release outstanding req from ACTIVEQ */ 3698 list_for_each(tmp, &mgmtm->active_q) { 3699 io_req = (struct csio_ioreq *) tmp; 3700 tmp = csio_list_prev(tmp); 3701 list_del_init(&io_req->sm.sm_list); 3702 mgmtm->stats.n_active--; 3703 if (io_req->io_cbfn) { 3704 /* io_req will be freed by completion handler */ 3705 io_req->wr_status = -ETIMEDOUT; 3706 io_req->io_cbfn(mgmtm->hw, io_req); 3707 } 3708 } 3709 } 3710 3711 /* 3712 * csio_mgmt_init - Mgmt module init entry point 3713 * @mgmtsm - mgmt module 3714 * @hw - HW module 3715 * 3716 * Initialize mgmt timer, resource wait queue, active queue, 3717 * completion q. Allocate Egress and Ingress 3718 * WR queues and save off the queue index returned by the WR 3719 * module for future use. Allocate and save off mgmt reqs in the 3720 * mgmt_req_freelist for future use. Make sure their SM is initialized 3721 * to uninit state. 3722 * Returns: 0 - on success 3723 * -ENOMEM - on error. 3724 */ 3725 static int 3726 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 3727 { 3728 struct timer_list *timer = &mgmtm->mgmt_timer; 3729 3730 init_timer(timer); 3731 timer->function = csio_mgmt_tmo_handler; 3732 timer->data = (unsigned long)mgmtm; 3733 3734 INIT_LIST_HEAD(&mgmtm->active_q); 3735 INIT_LIST_HEAD(&mgmtm->cbfn_q); 3736 3737 mgmtm->hw = hw; 3738 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 3739 3740 return 0; 3741 } 3742 3743 /* 3744 * csio_mgmtm_exit - MGMT module exit entry point 3745 * @mgmtsm - mgmt module 3746 * 3747 * This function called during MGMT module uninit. 3748 * Stop timers, free ioreqs allocated. 3749 * Returns: None 3750 * 3751 */ 3752 static void 3753 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 3754 { 3755 del_timer_sync(&mgmtm->mgmt_timer); 3756 } 3757 3758 3759 /** 3760 * csio_hw_start - Kicks off the HW State machine 3761 * @hw: Pointer to HW module. 3762 * 3763 * It is assumed that the initialization is a synchronous operation. 3764 * So when we return afer posting the event, the HW SM should be in 3765 * the ready state, if there were no errors during init. 3766 */ 3767 int 3768 csio_hw_start(struct csio_hw *hw) 3769 { 3770 spin_lock_irq(&hw->lock); 3771 csio_post_event(&hw->sm, CSIO_HWE_CFG); 3772 spin_unlock_irq(&hw->lock); 3773 3774 if (csio_is_hw_ready(hw)) 3775 return 0; 3776 else 3777 return -EINVAL; 3778 } 3779 3780 int 3781 csio_hw_stop(struct csio_hw *hw) 3782 { 3783 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 3784 3785 if (csio_is_hw_removing(hw)) 3786 return 0; 3787 else 3788 return -EINVAL; 3789 } 3790 3791 /* Max reset retries */ 3792 #define CSIO_MAX_RESET_RETRIES 3 3793 3794 /** 3795 * csio_hw_reset - Reset the hardware 3796 * @hw: HW module. 3797 * 3798 * Caller should hold lock across this function. 3799 */ 3800 int 3801 csio_hw_reset(struct csio_hw *hw) 3802 { 3803 if (!csio_is_hw_master(hw)) 3804 return -EPERM; 3805 3806 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 3807 csio_dbg(hw, "Max hw reset attempts reached.."); 3808 return -EINVAL; 3809 } 3810 3811 hw->rst_retries++; 3812 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 3813 3814 if (csio_is_hw_ready(hw)) { 3815 hw->rst_retries = 0; 3816 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 3817 return 0; 3818 } else 3819 return -EINVAL; 3820 } 3821 3822 /* 3823 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 3824 * @hw: HW module. 3825 */ 3826 static void 3827 csio_hw_get_device_id(struct csio_hw *hw) 3828 { 3829 /* Is the adapter device id cached already ?*/ 3830 if (csio_is_dev_id_cached(hw)) 3831 return; 3832 3833 /* Get the PCI vendor & device id */ 3834 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 3835 &hw->params.pci.vendor_id); 3836 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 3837 &hw->params.pci.device_id); 3838 3839 csio_dev_id_cached(hw); 3840 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 3841 3842 } /* csio_hw_get_device_id */ 3843 3844 /* 3845 * csio_hw_set_description - Set the model, description of the hw. 3846 * @hw: HW module. 3847 * @ven_id: PCI Vendor ID 3848 * @dev_id: PCI Device ID 3849 */ 3850 static void 3851 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 3852 { 3853 uint32_t adap_type, prot_type; 3854 3855 if (ven_id == CSIO_VENDOR_ID) { 3856 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3857 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3858 3859 if (prot_type == CSIO_T5_FCOE_ASIC) { 3860 memcpy(hw->hw_ver, 3861 csio_t5_fcoe_adapters[adap_type].model_no, 16); 3862 memcpy(hw->model_desc, 3863 csio_t5_fcoe_adapters[adap_type].description, 3864 32); 3865 } else { 3866 char tempName[32] = "Chelsio FCoE Controller"; 3867 memcpy(hw->model_desc, tempName, 32); 3868 } 3869 } 3870 } /* csio_hw_set_description */ 3871 3872 /** 3873 * csio_hw_init - Initialize HW module. 3874 * @hw: Pointer to HW module. 3875 * 3876 * Initialize the members of the HW module. 3877 */ 3878 int 3879 csio_hw_init(struct csio_hw *hw) 3880 { 3881 int rv = -EINVAL; 3882 uint32_t i; 3883 uint16_t ven_id, dev_id; 3884 struct csio_evt_msg *evt_entry; 3885 3886 INIT_LIST_HEAD(&hw->sm.sm_list); 3887 csio_init_state(&hw->sm, csio_hws_uninit); 3888 spin_lock_init(&hw->lock); 3889 INIT_LIST_HEAD(&hw->sln_head); 3890 3891 /* Get the PCI vendor & device id */ 3892 csio_hw_get_device_id(hw); 3893 3894 strcpy(hw->name, CSIO_HW_NAME); 3895 3896 /* Initialize the HW chip ops T5 specific ops */ 3897 hw->chip_ops = &t5_ops; 3898 3899 /* Set the model & its description */ 3900 3901 ven_id = hw->params.pci.vendor_id; 3902 dev_id = hw->params.pci.device_id; 3903 3904 csio_hw_set_description(hw, ven_id, dev_id); 3905 3906 /* Initialize default log level */ 3907 hw->params.log_level = (uint32_t) csio_dbg_level; 3908 3909 csio_set_fwevt_intr_idx(hw, -1); 3910 csio_set_nondata_intr_idx(hw, -1); 3911 3912 /* Init all the modules: Mailbox, WorkRequest and Transport */ 3913 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 3914 goto err; 3915 3916 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 3917 if (rv) 3918 goto err_mbm_exit; 3919 3920 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 3921 if (rv) 3922 goto err_wrm_exit; 3923 3924 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 3925 if (rv) 3926 goto err_scsim_exit; 3927 /* Pre-allocate evtq and initialize them */ 3928 INIT_LIST_HEAD(&hw->evt_active_q); 3929 INIT_LIST_HEAD(&hw->evt_free_q); 3930 for (i = 0; i < csio_evtq_sz; i++) { 3931 3932 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 3933 if (!evt_entry) { 3934 rv = -ENOMEM; 3935 csio_err(hw, "Failed to initialize eventq"); 3936 goto err_evtq_cleanup; 3937 } 3938 3939 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3940 CSIO_INC_STATS(hw, n_evt_freeq); 3941 } 3942 3943 hw->dev_num = dev_num; 3944 dev_num++; 3945 3946 return 0; 3947 3948 err_evtq_cleanup: 3949 csio_evtq_cleanup(hw); 3950 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 3951 err_scsim_exit: 3952 csio_scsim_exit(csio_hw_to_scsim(hw)); 3953 err_wrm_exit: 3954 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 3955 err_mbm_exit: 3956 csio_mbm_exit(csio_hw_to_mbm(hw)); 3957 err: 3958 return rv; 3959 } 3960 3961 /** 3962 * csio_hw_exit - Un-initialize HW module. 3963 * @hw: Pointer to HW module. 3964 * 3965 */ 3966 void 3967 csio_hw_exit(struct csio_hw *hw) 3968 { 3969 csio_evtq_cleanup(hw); 3970 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 3971 csio_scsim_exit(csio_hw_to_scsim(hw)); 3972 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 3973 csio_mbm_exit(csio_hw_to_mbm(hw)); 3974 } 3975