1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_dbg_level = 0xFEFF; 51 unsigned int csio_port_mask = 0xf; 52 53 /* Default FW event queue entries. */ 54 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 55 56 /* Default MSI param level */ 57 int csio_msi = 2; 58 59 /* FCoE function instances */ 60 static int dev_num; 61 62 /* FCoE Adapter types & its description */ 63 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, 67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, 84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, 85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} 86 }; 87 88 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 89 static void csio_hw_mbm_cleanup(struct csio_hw *); 90 91 /* State machine forward declarations */ 92 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 93 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 94 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 95 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 96 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 97 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 98 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 99 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 100 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 101 102 static void csio_hw_initialize(struct csio_hw *hw); 103 static void csio_evtq_stop(struct csio_hw *hw); 104 static void csio_evtq_start(struct csio_hw *hw); 105 106 int csio_is_hw_ready(struct csio_hw *hw) 107 { 108 return csio_match_state(hw, csio_hws_ready); 109 } 110 111 int csio_is_hw_removing(struct csio_hw *hw) 112 { 113 return csio_match_state(hw, csio_hws_removing); 114 } 115 116 117 /* 118 * csio_hw_wait_op_done_val - wait until an operation is completed 119 * @hw: the HW module 120 * @reg: the register to check for completion 121 * @mask: a single-bit field within @reg that indicates completion 122 * @polarity: the value of the field when the operation is completed 123 * @attempts: number of check iterations 124 * @delay: delay in usecs between iterations 125 * @valp: where to store the value of the register at completion time 126 * 127 * Wait until an operation is completed by checking a bit in a register 128 * up to @attempts times. If @valp is not NULL the value of the register 129 * at the time it indicated completion is stored there. Returns 0 if the 130 * operation completes and -EAGAIN otherwise. 131 */ 132 int 133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 134 int polarity, int attempts, int delay, uint32_t *valp) 135 { 136 uint32_t val; 137 while (1) { 138 val = csio_rd_reg32(hw, reg); 139 140 if (!!(val & mask) == polarity) { 141 if (valp) 142 *valp = val; 143 return 0; 144 } 145 146 if (--attempts == 0) 147 return -EAGAIN; 148 if (delay) 149 udelay(delay); 150 } 151 } 152 153 /* 154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 155 * @hw: the adapter 156 * @addr: the indirect TP register address 157 * @mask: specifies the field within the register to modify 158 * @val: new value for the field 159 * 160 * Sets a field of an indirect TP register to the given value. 161 */ 162 void 163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 164 unsigned int mask, unsigned int val) 165 { 166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); 167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; 168 csio_wr_reg32(hw, val, TP_PIO_DATA_A); 169 } 170 171 void 172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 173 uint32_t value) 174 { 175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 176 177 csio_wr_reg32(hw, val | value, reg); 178 /* Flush */ 179 csio_rd_reg32(hw, reg); 180 181 } 182 183 static int 184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 185 { 186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 187 addr, len, buf, 0); 188 } 189 190 /* 191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 192 */ 193 #define EEPROM_MAX_RD_POLL 40 194 #define EEPROM_MAX_WR_POLL 6 195 #define EEPROM_STAT_ADDR 0x7bfc 196 #define VPD_BASE 0x400 197 #define VPD_BASE_OLD 0 198 #define VPD_LEN 1024 199 #define VPD_INFO_FLD_HDR_SIZE 3 200 201 /* 202 * csio_hw_seeprom_read - read a serial EEPROM location 203 * @hw: hw to read 204 * @addr: EEPROM virtual address 205 * @data: where to store the read data 206 * 207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 208 * VPD capability. Note that this function must be called with a virtual 209 * address. 210 */ 211 static int 212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 213 { 214 uint16_t val = 0; 215 int attempts = EEPROM_MAX_RD_POLL; 216 uint32_t base = hw->params.pci.vpd_cap_addr; 217 218 if (addr >= EEPROMVSIZE || (addr & 3)) 219 return -EINVAL; 220 221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 222 223 do { 224 udelay(10); 225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 226 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 227 228 if (!(val & PCI_VPD_ADDR_F)) { 229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 230 return -EINVAL; 231 } 232 233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 234 *data = le32_to_cpu(*(__le32 *)data); 235 236 return 0; 237 } 238 239 /* 240 * Partial EEPROM Vital Product Data structure. Includes only the ID and 241 * VPD-R sections. 242 */ 243 struct t4_vpd_hdr { 244 u8 id_tag; 245 u8 id_len[2]; 246 u8 id_data[ID_LEN]; 247 u8 vpdr_tag; 248 u8 vpdr_len[2]; 249 }; 250 251 /* 252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 253 * the VPD 254 * @v: Pointer to buffered vpd data structure 255 * @kw: The keyword to search for 256 * 257 * Returns the value of the information field keyword or 258 * -EINVAL otherwise. 259 */ 260 static int 261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 262 { 263 int32_t i; 264 int32_t offset , len; 265 const uint8_t *buf = &v->id_tag; 266 const uint8_t *vpdr_len = &v->vpdr_tag; 267 offset = sizeof(struct t4_vpd_hdr); 268 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 269 270 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 271 return -EINVAL; 272 273 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 274 if (memcmp(buf + i , kw, 2) == 0) { 275 i += VPD_INFO_FLD_HDR_SIZE; 276 return i; 277 } 278 279 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 280 } 281 282 return -EINVAL; 283 } 284 285 static int 286 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 287 { 288 *pos = pci_find_capability(pdev, cap); 289 if (*pos) 290 return 0; 291 292 return -1; 293 } 294 295 /* 296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 297 * @hw: HW module 298 * @p: where to store the parameters 299 * 300 * Reads card parameters stored in VPD EEPROM. 301 */ 302 static int 303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 304 { 305 int i, ret, ec, sn, addr; 306 uint8_t *vpd, csum; 307 const struct t4_vpd_hdr *v; 308 /* To get around compilation warning from strstrip */ 309 char *s; 310 311 if (csio_is_valid_vpd(hw)) 312 return 0; 313 314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 315 &hw->params.pci.vpd_cap_addr); 316 if (ret) 317 return -EINVAL; 318 319 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 320 if (vpd == NULL) 321 return -ENOMEM; 322 323 /* 324 * Card information normally starts at VPD_BASE but early cards had 325 * it at 0. 326 */ 327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 328 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 329 330 for (i = 0; i < VPD_LEN; i += 4) { 331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 332 if (ret) { 333 kfree(vpd); 334 return ret; 335 } 336 } 337 338 /* Reset the VPD flag! */ 339 hw->flags &= (~CSIO_HWF_VPD_VALID); 340 341 v = (const struct t4_vpd_hdr *)vpd; 342 343 #define FIND_VPD_KW(var, name) do { \ 344 var = csio_hw_get_vpd_keyword_val(v, name); \ 345 if (var < 0) { \ 346 csio_err(hw, "missing VPD keyword " name "\n"); \ 347 kfree(vpd); \ 348 return -EINVAL; \ 349 } \ 350 } while (0) 351 352 FIND_VPD_KW(i, "RV"); 353 for (csum = 0; i >= 0; i--) 354 csum += vpd[i]; 355 356 if (csum) { 357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 358 kfree(vpd); 359 return -EINVAL; 360 } 361 FIND_VPD_KW(ec, "EC"); 362 FIND_VPD_KW(sn, "SN"); 363 #undef FIND_VPD_KW 364 365 memcpy(p->id, v->id_data, ID_LEN); 366 s = strstrip(p->id); 367 memcpy(p->ec, vpd + ec, EC_LEN); 368 s = strstrip(p->ec); 369 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 370 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 371 s = strstrip(p->sn); 372 373 csio_valid_vpd_copied(hw); 374 375 kfree(vpd); 376 return 0; 377 } 378 379 /* 380 * csio_hw_sf1_read - read data from the serial flash 381 * @hw: the HW module 382 * @byte_cnt: number of bytes to read 383 * @cont: whether another operation will be chained 384 * @lock: whether to lock SF for PL access only 385 * @valp: where to store the read data 386 * 387 * Reads up to 4 bytes of data from the serial flash. The location of 388 * the read needs to be specified prior to calling this by issuing the 389 * appropriate commands to the serial flash. 390 */ 391 static int 392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 393 int32_t lock, uint32_t *valp) 394 { 395 int ret; 396 397 if (!byte_cnt || byte_cnt > 4) 398 return -EINVAL; 399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 400 return -EBUSY; 401 402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | 403 BYTECNT_V(byte_cnt - 1), SF_OP_A); 404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 405 10, NULL); 406 if (!ret) 407 *valp = csio_rd_reg32(hw, SF_DATA_A); 408 return ret; 409 } 410 411 /* 412 * csio_hw_sf1_write - write data to the serial flash 413 * @hw: the HW module 414 * @byte_cnt: number of bytes to write 415 * @cont: whether another operation will be chained 416 * @lock: whether to lock SF for PL access only 417 * @val: value to write 418 * 419 * Writes up to 4 bytes of data to the serial flash. The location of 420 * the write needs to be specified prior to calling this by issuing the 421 * appropriate commands to the serial flash. 422 */ 423 static int 424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 425 int32_t lock, uint32_t val) 426 { 427 if (!byte_cnt || byte_cnt > 4) 428 return -EINVAL; 429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 430 return -EBUSY; 431 432 csio_wr_reg32(hw, val, SF_DATA_A); 433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | 434 OP_V(1) | SF_LOCK_V(lock), SF_OP_A); 435 436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 437 10, NULL); 438 } 439 440 /* 441 * csio_hw_flash_wait_op - wait for a flash operation to complete 442 * @hw: the HW module 443 * @attempts: max number of polls of the status register 444 * @delay: delay between polls in ms 445 * 446 * Wait for a flash operation to complete by polling the status register. 447 */ 448 static int 449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 450 { 451 int ret; 452 uint32_t status; 453 454 while (1) { 455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 456 if (ret != 0) 457 return ret; 458 459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 460 if (ret != 0) 461 return ret; 462 463 if (!(status & 1)) 464 return 0; 465 if (--attempts == 0) 466 return -EAGAIN; 467 if (delay) 468 msleep(delay); 469 } 470 } 471 472 /* 473 * csio_hw_read_flash - read words from serial flash 474 * @hw: the HW module 475 * @addr: the start address for the read 476 * @nwords: how many 32-bit words to read 477 * @data: where to store the read data 478 * @byte_oriented: whether to store data as bytes or as words 479 * 480 * Read the specified number of 32-bit words from the serial flash. 481 * If @byte_oriented is set the read data is stored as a byte array 482 * (i.e., big-endian), otherwise as 32-bit words in the platform's 483 * natural endianess. 484 */ 485 static int 486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 487 uint32_t *data, int32_t byte_oriented) 488 { 489 int ret; 490 491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 492 return -EINVAL; 493 494 addr = swab32(addr) | SF_RD_DATA_FAST; 495 496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 497 if (ret != 0) 498 return ret; 499 500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 501 if (ret != 0) 502 return ret; 503 504 for ( ; nwords; nwords--, data++) { 505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 506 if (nwords == 1) 507 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 508 if (ret) 509 return ret; 510 if (byte_oriented) 511 *data = (__force __u32) htonl(*data); 512 } 513 return 0; 514 } 515 516 /* 517 * csio_hw_write_flash - write up to a page of data to the serial flash 518 * @hw: the hw 519 * @addr: the start address to write 520 * @n: length of data to write in bytes 521 * @data: the data to write 522 * 523 * Writes up to a page of data (256 bytes) to the serial flash starting 524 * at the given address. All the data must be written to the same page. 525 */ 526 static int 527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 528 uint32_t n, const uint8_t *data) 529 { 530 int ret = -EINVAL; 531 uint32_t buf[64]; 532 uint32_t i, c, left, val, offset = addr & 0xff; 533 534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 535 return -EINVAL; 536 537 val = swab32(addr) | SF_PROG_PAGE; 538 539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 540 if (ret != 0) 541 goto unlock; 542 543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 544 if (ret != 0) 545 goto unlock; 546 547 for (left = n; left; left -= c) { 548 c = min(left, 4U); 549 for (val = 0, i = 0; i < c; ++i) 550 val = (val << 8) + *data++; 551 552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 553 if (ret) 554 goto unlock; 555 } 556 ret = csio_hw_flash_wait_op(hw, 8, 1); 557 if (ret) 558 goto unlock; 559 560 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 561 562 /* Read the page to verify the write succeeded */ 563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 564 if (ret) 565 return ret; 566 567 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 568 csio_err(hw, 569 "failed to correctly write the flash page at %#x\n", 570 addr); 571 return -EINVAL; 572 } 573 574 return 0; 575 576 unlock: 577 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 578 return ret; 579 } 580 581 /* 582 * csio_hw_flash_erase_sectors - erase a range of flash sectors 583 * @hw: the HW module 584 * @start: the first sector to erase 585 * @end: the last sector to erase 586 * 587 * Erases the sectors in the given inclusive range. 588 */ 589 static int 590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 591 { 592 int ret = 0; 593 594 while (start <= end) { 595 596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 597 if (ret != 0) 598 goto out; 599 600 ret = csio_hw_sf1_write(hw, 4, 0, 1, 601 SF_ERASE_SECTOR | (start << 8)); 602 if (ret != 0) 603 goto out; 604 605 ret = csio_hw_flash_wait_op(hw, 14, 500); 606 if (ret != 0) 607 goto out; 608 609 start++; 610 } 611 out: 612 if (ret) 613 csio_err(hw, "erase of flash sector %d failed, error %d\n", 614 start, ret); 615 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 616 return 0; 617 } 618 619 static void 620 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 621 { 622 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev), 624 FW_HDR_FW_VER_MINOR_G(hw->fwrev), 625 FW_HDR_FW_VER_MICRO_G(hw->fwrev), 626 FW_HDR_FW_VER_BUILD_G(hw->fwrev)); 627 } 628 629 /* 630 * csio_hw_get_fw_version - read the firmware version 631 * @hw: HW module 632 * @vers: where to place the version 633 * 634 * Reads the FW version from flash. 635 */ 636 static int 637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 638 { 639 return csio_hw_read_flash(hw, FLASH_FW_START + 640 offsetof(struct fw_hdr, fw_ver), 1, 641 vers, 0); 642 } 643 644 /* 645 * csio_hw_get_tp_version - read the TP microcode version 646 * @hw: HW module 647 * @vers: where to place the version 648 * 649 * Reads the TP microcode version from flash. 650 */ 651 static int 652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 653 { 654 return csio_hw_read_flash(hw, FLASH_FW_START + 655 offsetof(struct fw_hdr, tp_microcode_ver), 1, 656 vers, 0); 657 } 658 659 /* 660 * csio_hw_fw_dload - download firmware. 661 * @hw: HW module 662 * @fw_data: firmware image to write. 663 * @size: image size 664 * 665 * Write the supplied firmware image to the card's serial flash. 666 */ 667 static int 668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 669 { 670 uint32_t csum; 671 int32_t addr; 672 int ret; 673 uint32_t i; 674 uint8_t first_page[SF_PAGE_SIZE]; 675 const __be32 *p = (const __be32 *)fw_data; 676 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 677 uint32_t sf_sec_size; 678 679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 680 csio_err(hw, "Serial Flash data invalid\n"); 681 return -EINVAL; 682 } 683 684 if (!size) { 685 csio_err(hw, "FW image has no data\n"); 686 return -EINVAL; 687 } 688 689 if (size & 511) { 690 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 691 return -EINVAL; 692 } 693 694 if (ntohs(hdr->len512) * 512 != size) { 695 csio_err(hw, "FW image size differs from size in FW header\n"); 696 return -EINVAL; 697 } 698 699 if (size > FLASH_FW_MAX_SIZE) { 700 csio_err(hw, "FW image too large, max is %u bytes\n", 701 FLASH_FW_MAX_SIZE); 702 return -EINVAL; 703 } 704 705 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 706 csum += ntohl(p[i]); 707 708 if (csum != 0xffffffff) { 709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 710 return -EINVAL; 711 } 712 713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 714 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 715 716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 717 FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); 718 719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, 720 FLASH_FW_START_SEC + i - 1); 721 if (ret) { 722 csio_err(hw, "Flash Erase failed\n"); 723 goto out; 724 } 725 726 /* 727 * We write the correct version at the end so the driver can see a bad 728 * version if the FW write fails. Start by writing a copy of the 729 * first page with a bad version. 730 */ 731 memcpy(first_page, fw_data, SF_PAGE_SIZE); 732 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); 734 if (ret) 735 goto out; 736 737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 738 FW_IMG_START, FW_IMG_START + size); 739 740 addr = FLASH_FW_START; 741 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 742 addr += SF_PAGE_SIZE; 743 fw_data += SF_PAGE_SIZE; 744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 745 if (ret) 746 goto out; 747 } 748 749 ret = csio_hw_write_flash(hw, 750 FLASH_FW_START + 751 offsetof(struct fw_hdr, fw_ver), 752 sizeof(hdr->fw_ver), 753 (const uint8_t *)&hdr->fw_ver); 754 755 out: 756 if (ret) 757 csio_err(hw, "firmware download failed, error %d\n", ret); 758 return ret; 759 } 760 761 static int 762 csio_hw_get_flash_params(struct csio_hw *hw) 763 { 764 int ret; 765 uint32_t info = 0; 766 767 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 768 if (!ret) 769 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 770 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 771 if (ret != 0) 772 return ret; 773 774 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 775 return -EINVAL; 776 info >>= 16; /* log2 of size */ 777 if (info >= 0x14 && info < 0x18) 778 hw->params.sf_nsec = 1 << (info - 16); 779 else if (info == 0x18) 780 hw->params.sf_nsec = 64; 781 else 782 return -EINVAL; 783 hw->params.sf_size = 1 << info; 784 785 return 0; 786 } 787 788 /*****************************************************************************/ 789 /* HW State machine assists */ 790 /*****************************************************************************/ 791 792 static int 793 csio_hw_dev_ready(struct csio_hw *hw) 794 { 795 uint32_t reg; 796 int cnt = 6; 797 int src_pf; 798 799 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && 800 (--cnt != 0)) 801 mdelay(100); 802 803 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 804 src_pf = SOURCEPF_G(reg); 805 else 806 src_pf = T6_SOURCEPF_G(reg); 807 808 if ((cnt == 0) && (((int32_t)(src_pf) < 0) || 809 (src_pf >= CSIO_MAX_PFN))) { 810 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 811 return -EIO; 812 } 813 814 hw->pfn = src_pf; 815 816 return 0; 817 } 818 819 /* 820 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 821 * @hw: HW module 822 * @state: Device state 823 * 824 * FW_HELLO_CMD has to be polled for completion. 825 */ 826 static int 827 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 828 { 829 struct csio_mb *mbp; 830 int rv = 0; 831 enum fw_retval retval; 832 uint8_t mpfn; 833 char state_str[16]; 834 int retries = FW_CMD_HELLO_RETRIES; 835 836 memset(state_str, 0, sizeof(state_str)); 837 838 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 839 if (!mbp) { 840 rv = -ENOMEM; 841 CSIO_INC_STATS(hw, n_err_nomem); 842 goto out; 843 } 844 845 retry: 846 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 847 hw->pfn, CSIO_MASTER_MAY, NULL); 848 849 rv = csio_mb_issue(hw, mbp); 850 if (rv) { 851 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 852 goto out_free_mb; 853 } 854 855 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 856 if (retval != FW_SUCCESS) { 857 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 858 rv = -EINVAL; 859 goto out_free_mb; 860 } 861 862 /* Firmware has designated us to be master */ 863 if (hw->pfn == mpfn) { 864 hw->flags |= CSIO_HWF_MASTER; 865 } else if (*state == CSIO_DEV_STATE_UNINIT) { 866 /* 867 * If we're not the Master PF then we need to wait around for 868 * the Master PF Driver to finish setting up the adapter. 869 * 870 * Note that we also do this wait if we're a non-Master-capable 871 * PF and there is no current Master PF; a Master PF may show up 872 * momentarily and we wouldn't want to fail pointlessly. (This 873 * can happen when an OS loads lots of different drivers rapidly 874 * at the same time). In this case, the Master PF returned by 875 * the firmware will be PCIE_FW_MASTER_MASK so the test below 876 * will work ... 877 */ 878 879 int waiting = FW_CMD_HELLO_TIMEOUT; 880 881 /* 882 * Wait for the firmware to either indicate an error or 883 * initialized state. If we see either of these we bail out 884 * and report the issue to the caller. If we exhaust the 885 * "hello timeout" and we haven't exhausted our retries, try 886 * again. Otherwise bail with a timeout error. 887 */ 888 for (;;) { 889 uint32_t pcie_fw; 890 891 spin_unlock_irq(&hw->lock); 892 msleep(50); 893 spin_lock_irq(&hw->lock); 894 waiting -= 50; 895 896 /* 897 * If neither Error nor Initialialized are indicated 898 * by the firmware keep waiting till we exaust our 899 * timeout ... and then retry if we haven't exhausted 900 * our retries ... 901 */ 902 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); 903 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { 904 if (waiting <= 0) { 905 if (retries-- > 0) 906 goto retry; 907 908 rv = -ETIMEDOUT; 909 break; 910 } 911 continue; 912 } 913 914 /* 915 * We either have an Error or Initialized condition 916 * report errors preferentially. 917 */ 918 if (state) { 919 if (pcie_fw & PCIE_FW_ERR_F) { 920 *state = CSIO_DEV_STATE_ERR; 921 rv = -ETIMEDOUT; 922 } else if (pcie_fw & PCIE_FW_INIT_F) 923 *state = CSIO_DEV_STATE_INIT; 924 } 925 926 /* 927 * If we arrived before a Master PF was selected and 928 * there's not a valid Master PF, grab its identity 929 * for our caller. 930 */ 931 if (mpfn == PCIE_FW_MASTER_M && 932 (pcie_fw & PCIE_FW_MASTER_VLD_F)) 933 mpfn = PCIE_FW_MASTER_G(pcie_fw); 934 break; 935 } 936 hw->flags &= ~CSIO_HWF_MASTER; 937 } 938 939 switch (*state) { 940 case CSIO_DEV_STATE_UNINIT: 941 strcpy(state_str, "Initializing"); 942 break; 943 case CSIO_DEV_STATE_INIT: 944 strcpy(state_str, "Initialized"); 945 break; 946 case CSIO_DEV_STATE_ERR: 947 strcpy(state_str, "Error"); 948 break; 949 default: 950 strcpy(state_str, "Unknown"); 951 break; 952 } 953 954 if (hw->pfn == mpfn) 955 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 956 hw->pfn, state_str); 957 else 958 csio_info(hw, 959 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 960 hw->pfn, mpfn, state_str); 961 962 out_free_mb: 963 mempool_free(mbp, hw->mb_mempool); 964 out: 965 return rv; 966 } 967 968 /* 969 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 970 * @hw: HW module 971 * 972 */ 973 static int 974 csio_do_bye(struct csio_hw *hw) 975 { 976 struct csio_mb *mbp; 977 enum fw_retval retval; 978 979 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 980 if (!mbp) { 981 CSIO_INC_STATS(hw, n_err_nomem); 982 return -ENOMEM; 983 } 984 985 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 986 987 if (csio_mb_issue(hw, mbp)) { 988 csio_err(hw, "Issue of BYE command failed\n"); 989 mempool_free(mbp, hw->mb_mempool); 990 return -EINVAL; 991 } 992 993 retval = csio_mb_fw_retval(mbp); 994 if (retval != FW_SUCCESS) { 995 mempool_free(mbp, hw->mb_mempool); 996 return -EINVAL; 997 } 998 999 mempool_free(mbp, hw->mb_mempool); 1000 1001 return 0; 1002 } 1003 1004 /* 1005 * csio_do_reset- Perform the device reset. 1006 * @hw: HW module 1007 * @fw_rst: FW reset 1008 * 1009 * If fw_rst is set, issues FW reset mbox cmd otherwise 1010 * does PIO reset. 1011 * Performs reset of the function. 1012 */ 1013 static int 1014 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1015 { 1016 struct csio_mb *mbp; 1017 enum fw_retval retval; 1018 1019 if (!fw_rst) { 1020 /* PIO reset */ 1021 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1022 mdelay(2000); 1023 return 0; 1024 } 1025 1026 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1027 if (!mbp) { 1028 CSIO_INC_STATS(hw, n_err_nomem); 1029 return -ENOMEM; 1030 } 1031 1032 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1033 PIORSTMODE_F | PIORST_F, 0, NULL); 1034 1035 if (csio_mb_issue(hw, mbp)) { 1036 csio_err(hw, "Issue of RESET command failed.n"); 1037 mempool_free(mbp, hw->mb_mempool); 1038 return -EINVAL; 1039 } 1040 1041 retval = csio_mb_fw_retval(mbp); 1042 if (retval != FW_SUCCESS) { 1043 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1044 mempool_free(mbp, hw->mb_mempool); 1045 return -EINVAL; 1046 } 1047 1048 mempool_free(mbp, hw->mb_mempool); 1049 1050 return 0; 1051 } 1052 1053 static int 1054 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1055 { 1056 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1057 uint16_t caps; 1058 1059 caps = ntohs(rsp->fcoecaps); 1060 1061 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1062 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1063 return -EINVAL; 1064 } 1065 1066 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1067 csio_err(hw, "No FCoE Control Offload capability\n"); 1068 return -EINVAL; 1069 } 1070 1071 return 0; 1072 } 1073 1074 /* 1075 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1076 * @hw: the HW module 1077 * @mbox: mailbox to use for the FW RESET command (if desired) 1078 * @force: force uP into RESET even if FW RESET command fails 1079 * 1080 * Issues a RESET command to firmware (if desired) with a HALT indication 1081 * and then puts the microprocessor into RESET state. The RESET command 1082 * will only be issued if a legitimate mailbox is provided (mbox <= 1083 * PCIE_FW_MASTER_MASK). 1084 * 1085 * This is generally used in order for the host to safely manipulate the 1086 * adapter without fear of conflicting with whatever the firmware might 1087 * be doing. The only way out of this state is to RESTART the firmware 1088 * ... 1089 */ 1090 static int 1091 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1092 { 1093 enum fw_retval retval = 0; 1094 1095 /* 1096 * If a legitimate mailbox is provided, issue a RESET command 1097 * with a HALT indication. 1098 */ 1099 if (mbox <= PCIE_FW_MASTER_M) { 1100 struct csio_mb *mbp; 1101 1102 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1103 if (!mbp) { 1104 CSIO_INC_STATS(hw, n_err_nomem); 1105 return -ENOMEM; 1106 } 1107 1108 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1109 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, 1110 NULL); 1111 1112 if (csio_mb_issue(hw, mbp)) { 1113 csio_err(hw, "Issue of RESET command failed!\n"); 1114 mempool_free(mbp, hw->mb_mempool); 1115 return -EINVAL; 1116 } 1117 1118 retval = csio_mb_fw_retval(mbp); 1119 mempool_free(mbp, hw->mb_mempool); 1120 } 1121 1122 /* 1123 * Normally we won't complete the operation if the firmware RESET 1124 * command fails but if our caller insists we'll go ahead and put the 1125 * uP into RESET. This can be useful if the firmware is hung or even 1126 * missing ... We'll have to take the risk of putting the uP into 1127 * RESET without the cooperation of firmware in that case. 1128 * 1129 * We also force the firmware's HALT flag to be on in case we bypassed 1130 * the firmware RESET command above or we're dealing with old firmware 1131 * which doesn't have the HALT capability. This will serve as a flag 1132 * for the incoming firmware to know that it's coming out of a HALT 1133 * rather than a RESET ... if it's new enough to understand that ... 1134 */ 1135 if (retval == 0 || force) { 1136 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); 1137 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 1138 PCIE_FW_HALT_F); 1139 } 1140 1141 /* 1142 * And we always return the result of the firmware RESET command 1143 * even when we force the uP into RESET ... 1144 */ 1145 return retval ? -EINVAL : 0; 1146 } 1147 1148 /* 1149 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1150 * @hw: the HW module 1151 * @reset: if we want to do a RESET to restart things 1152 * 1153 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1154 * return the previous PF Master remains as the new PF Master and there 1155 * is no need to issue a new HELLO command, etc. 1156 * 1157 * We do this in two ways: 1158 * 1159 * 1. If we're dealing with newer firmware we'll simply want to take 1160 * the chip's microprocessor out of RESET. This will cause the 1161 * firmware to start up from its start vector. And then we'll loop 1162 * until the firmware indicates it's started again (PCIE_FW.HALT 1163 * reset to 0) or we timeout. 1164 * 1165 * 2. If we're dealing with older firmware then we'll need to RESET 1166 * the chip since older firmware won't recognize the PCIE_FW.HALT 1167 * flag and automatically RESET itself on startup. 1168 */ 1169 static int 1170 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1171 { 1172 if (reset) { 1173 /* 1174 * Since we're directing the RESET instead of the firmware 1175 * doing it automatically, we need to clear the PCIE_FW.HALT 1176 * bit. 1177 */ 1178 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); 1179 1180 /* 1181 * If we've been given a valid mailbox, first try to get the 1182 * firmware to do the RESET. If that works, great and we can 1183 * return success. Otherwise, if we haven't been given a 1184 * valid mailbox or the RESET command failed, fall back to 1185 * hitting the chip with a hammer. 1186 */ 1187 if (mbox <= PCIE_FW_MASTER_M) { 1188 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1189 msleep(100); 1190 if (csio_do_reset(hw, true) == 0) 1191 return 0; 1192 } 1193 1194 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1195 msleep(2000); 1196 } else { 1197 int ms; 1198 1199 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1200 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1201 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) 1202 return 0; 1203 msleep(100); 1204 ms += 100; 1205 } 1206 return -ETIMEDOUT; 1207 } 1208 return 0; 1209 } 1210 1211 /* 1212 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1213 * @hw: the HW module 1214 * @mbox: mailbox to use for the FW RESET command (if desired) 1215 * @fw_data: the firmware image to write 1216 * @size: image size 1217 * @force: force upgrade even if firmware doesn't cooperate 1218 * 1219 * Perform all of the steps necessary for upgrading an adapter's 1220 * firmware image. Normally this requires the cooperation of the 1221 * existing firmware in order to halt all existing activities 1222 * but if an invalid mailbox token is passed in we skip that step 1223 * (though we'll still put the adapter microprocessor into RESET in 1224 * that case). 1225 * 1226 * On successful return the new firmware will have been loaded and 1227 * the adapter will have been fully RESET losing all previous setup 1228 * state. On unsuccessful return the adapter may be completely hosed ... 1229 * positive errno indicates that the adapter is ~probably~ intact, a 1230 * negative errno indicates that things are looking bad ... 1231 */ 1232 static int 1233 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1234 const u8 *fw_data, uint32_t size, int32_t force) 1235 { 1236 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1237 int reset, ret; 1238 1239 ret = csio_hw_fw_halt(hw, mbox, force); 1240 if (ret != 0 && !force) 1241 return ret; 1242 1243 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1244 if (ret != 0) 1245 return ret; 1246 1247 /* 1248 * Older versions of the firmware don't understand the new 1249 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1250 * restart. So for newly loaded older firmware we'll have to do the 1251 * RESET for it so it starts up on a clean slate. We can tell if 1252 * the newly loaded firmware will handle this right by checking 1253 * its header flags to see if it advertises the capability. 1254 */ 1255 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1256 return csio_hw_fw_restart(hw, mbox, reset); 1257 } 1258 1259 /* 1260 * csio_get_device_params - Get device parameters. 1261 * @hw: HW module 1262 * 1263 */ 1264 static int 1265 csio_get_device_params(struct csio_hw *hw) 1266 { 1267 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1268 struct csio_mb *mbp; 1269 enum fw_retval retval; 1270 u32 param[6]; 1271 int i, j = 0; 1272 1273 /* Initialize portids to -1 */ 1274 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1275 hw->pport[i].portid = -1; 1276 1277 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1278 if (!mbp) { 1279 CSIO_INC_STATS(hw, n_err_nomem); 1280 return -ENOMEM; 1281 } 1282 1283 /* Get port vec information. */ 1284 param[0] = FW_PARAM_DEV(PORTVEC); 1285 1286 /* Get Core clock. */ 1287 param[1] = FW_PARAM_DEV(CCLK); 1288 1289 /* Get EQ id start and end. */ 1290 param[2] = FW_PARAM_PFVF(EQ_START); 1291 param[3] = FW_PARAM_PFVF(EQ_END); 1292 1293 /* Get IQ id start and end. */ 1294 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1295 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1296 1297 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1298 ARRAY_SIZE(param), param, NULL, false, NULL); 1299 if (csio_mb_issue(hw, mbp)) { 1300 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1301 mempool_free(mbp, hw->mb_mempool); 1302 return -EINVAL; 1303 } 1304 1305 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1306 ARRAY_SIZE(param), param); 1307 if (retval != FW_SUCCESS) { 1308 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1309 retval); 1310 mempool_free(mbp, hw->mb_mempool); 1311 return -EINVAL; 1312 } 1313 1314 /* cache the information. */ 1315 hw->port_vec = param[0]; 1316 hw->vpd.cclk = param[1]; 1317 wrm->fw_eq_start = param[2]; 1318 wrm->fw_iq_start = param[4]; 1319 1320 /* Using FW configured max iqs & eqs */ 1321 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1322 !csio_is_hw_master(hw)) { 1323 hw->cfg_niq = param[5] - param[4] + 1; 1324 hw->cfg_neq = param[3] - param[2] + 1; 1325 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1326 hw->cfg_niq, hw->cfg_neq); 1327 } 1328 1329 hw->port_vec &= csio_port_mask; 1330 1331 hw->num_pports = hweight32(hw->port_vec); 1332 1333 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1334 hw->port_vec, hw->num_pports); 1335 1336 for (i = 0; i < hw->num_pports; i++) { 1337 while ((hw->port_vec & (1 << j)) == 0) 1338 j++; 1339 hw->pport[i].portid = j++; 1340 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1341 } 1342 mempool_free(mbp, hw->mb_mempool); 1343 1344 return 0; 1345 } 1346 1347 1348 /* 1349 * csio_config_device_caps - Get and set device capabilities. 1350 * @hw: HW module 1351 * 1352 */ 1353 static int 1354 csio_config_device_caps(struct csio_hw *hw) 1355 { 1356 struct csio_mb *mbp; 1357 enum fw_retval retval; 1358 int rv = -EINVAL; 1359 1360 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1361 if (!mbp) { 1362 CSIO_INC_STATS(hw, n_err_nomem); 1363 return -ENOMEM; 1364 } 1365 1366 /* Get device capabilities */ 1367 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1368 1369 if (csio_mb_issue(hw, mbp)) { 1370 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1371 goto out; 1372 } 1373 1374 retval = csio_mb_fw_retval(mbp); 1375 if (retval != FW_SUCCESS) { 1376 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1377 goto out; 1378 } 1379 1380 /* Validate device capabilities */ 1381 rv = csio_hw_validate_caps(hw, mbp); 1382 if (rv != 0) 1383 goto out; 1384 1385 /* Don't config device capabilities if already configured */ 1386 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1387 rv = 0; 1388 goto out; 1389 } 1390 1391 /* Write back desired device capabilities */ 1392 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1393 false, true, NULL); 1394 1395 if (csio_mb_issue(hw, mbp)) { 1396 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1397 goto out; 1398 } 1399 1400 retval = csio_mb_fw_retval(mbp); 1401 if (retval != FW_SUCCESS) { 1402 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1403 goto out; 1404 } 1405 1406 rv = 0; 1407 out: 1408 mempool_free(mbp, hw->mb_mempool); 1409 return rv; 1410 } 1411 1412 /* 1413 * csio_enable_ports - Bring up all available ports. 1414 * @hw: HW module. 1415 * 1416 */ 1417 static int 1418 csio_enable_ports(struct csio_hw *hw) 1419 { 1420 struct csio_mb *mbp; 1421 enum fw_retval retval; 1422 uint8_t portid; 1423 int i; 1424 1425 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1426 if (!mbp) { 1427 CSIO_INC_STATS(hw, n_err_nomem); 1428 return -ENOMEM; 1429 } 1430 1431 for (i = 0; i < hw->num_pports; i++) { 1432 portid = hw->pport[i].portid; 1433 1434 /* Read PORT information */ 1435 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1436 false, 0, 0, NULL); 1437 1438 if (csio_mb_issue(hw, mbp)) { 1439 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1440 portid); 1441 mempool_free(mbp, hw->mb_mempool); 1442 return -EINVAL; 1443 } 1444 1445 csio_mb_process_read_port_rsp(hw, mbp, &retval, 1446 &hw->pport[i].pcap); 1447 if (retval != FW_SUCCESS) { 1448 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1449 portid, retval); 1450 mempool_free(mbp, hw->mb_mempool); 1451 return -EINVAL; 1452 } 1453 1454 /* Write back PORT information */ 1455 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, 1456 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); 1457 1458 if (csio_mb_issue(hw, mbp)) { 1459 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1460 portid); 1461 mempool_free(mbp, hw->mb_mempool); 1462 return -EINVAL; 1463 } 1464 1465 retval = csio_mb_fw_retval(mbp); 1466 if (retval != FW_SUCCESS) { 1467 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1468 portid, retval); 1469 mempool_free(mbp, hw->mb_mempool); 1470 return -EINVAL; 1471 } 1472 1473 } /* For all ports */ 1474 1475 mempool_free(mbp, hw->mb_mempool); 1476 1477 return 0; 1478 } 1479 1480 /* 1481 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1482 * @hw: HW module 1483 * Issued with lock held. 1484 */ 1485 static int 1486 csio_get_fcoe_resinfo(struct csio_hw *hw) 1487 { 1488 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1489 struct fw_fcoe_res_info_cmd *rsp; 1490 struct csio_mb *mbp; 1491 enum fw_retval retval; 1492 1493 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1494 if (!mbp) { 1495 CSIO_INC_STATS(hw, n_err_nomem); 1496 return -ENOMEM; 1497 } 1498 1499 /* Get FCoE FW resource information */ 1500 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1501 1502 if (csio_mb_issue(hw, mbp)) { 1503 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1504 mempool_free(mbp, hw->mb_mempool); 1505 return -EINVAL; 1506 } 1507 1508 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1509 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 1510 if (retval != FW_SUCCESS) { 1511 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1512 retval); 1513 mempool_free(mbp, hw->mb_mempool); 1514 return -EINVAL; 1515 } 1516 1517 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1518 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1519 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1520 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1521 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1522 res_info->max_ssns = ntohl(rsp->max_ssns); 1523 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1524 res_info->used_ssns = ntohl(rsp->used_ssns); 1525 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1526 res_info->max_vnps = ntohl(rsp->max_vnps); 1527 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1528 res_info->used_vnps = ntohl(rsp->used_vnps); 1529 1530 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1531 res_info->max_xchgs); 1532 mempool_free(mbp, hw->mb_mempool); 1533 1534 return 0; 1535 } 1536 1537 static int 1538 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1539 { 1540 struct csio_mb *mbp; 1541 enum fw_retval retval; 1542 u32 _param[1]; 1543 1544 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1545 if (!mbp) { 1546 CSIO_INC_STATS(hw, n_err_nomem); 1547 return -ENOMEM; 1548 } 1549 1550 /* 1551 * Find out whether we're dealing with a version of 1552 * the firmware which has configuration file support. 1553 */ 1554 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 1555 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 1556 1557 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1558 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1559 if (csio_mb_issue(hw, mbp)) { 1560 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1561 mempool_free(mbp, hw->mb_mempool); 1562 return -EINVAL; 1563 } 1564 1565 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1566 ARRAY_SIZE(_param), _param); 1567 if (retval != FW_SUCCESS) { 1568 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1569 retval); 1570 mempool_free(mbp, hw->mb_mempool); 1571 return -EINVAL; 1572 } 1573 1574 mempool_free(mbp, hw->mb_mempool); 1575 *param = _param[0]; 1576 1577 return 0; 1578 } 1579 1580 static int 1581 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1582 { 1583 int ret = 0; 1584 const struct firmware *cf; 1585 struct pci_dev *pci_dev = hw->pdev; 1586 struct device *dev = &pci_dev->dev; 1587 unsigned int mtype = 0, maddr = 0; 1588 uint32_t *cfg_data; 1589 int value_to_add = 0; 1590 const char *fw_cfg_file; 1591 1592 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 1593 fw_cfg_file = FW_CFG_NAME_T5; 1594 else 1595 fw_cfg_file = FW_CFG_NAME_T6; 1596 1597 if (request_firmware(&cf, fw_cfg_file, dev) < 0) { 1598 csio_err(hw, "could not find config file %s, err: %d\n", 1599 fw_cfg_file, ret); 1600 return -ENOENT; 1601 } 1602 1603 if (cf->size%4 != 0) 1604 value_to_add = 4 - (cf->size % 4); 1605 1606 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1607 if (cfg_data == NULL) { 1608 ret = -ENOMEM; 1609 goto leave; 1610 } 1611 1612 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1613 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1614 ret = -EINVAL; 1615 goto leave; 1616 } 1617 1618 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1619 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1620 1621 ret = csio_memory_write(hw, mtype, maddr, 1622 cf->size + value_to_add, cfg_data); 1623 1624 if ((ret == 0) && (value_to_add != 0)) { 1625 union { 1626 u32 word; 1627 char buf[4]; 1628 } last; 1629 size_t size = cf->size & ~0x3; 1630 int i; 1631 1632 last.word = cfg_data[size >> 2]; 1633 for (i = value_to_add; i < 4; i++) 1634 last.buf[i] = 0; 1635 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1636 } 1637 if (ret == 0) { 1638 csio_info(hw, "config file upgraded to %s\n", fw_cfg_file); 1639 snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file); 1640 } 1641 1642 leave: 1643 kfree(cfg_data); 1644 release_firmware(cf); 1645 return ret; 1646 } 1647 1648 /* 1649 * HW initialization: contact FW, obtain config, perform basic init. 1650 * 1651 * If the firmware we're dealing with has Configuration File support, then 1652 * we use that to perform all configuration -- either using the configuration 1653 * file stored in flash on the adapter or using a filesystem-local file 1654 * if available. 1655 * 1656 * If we don't have configuration file support in the firmware, then we'll 1657 * have to set things up the old fashioned way with hard-coded register 1658 * writes and firmware commands ... 1659 */ 1660 1661 /* 1662 * Attempt to initialize the HW via a Firmware Configuration File. 1663 */ 1664 static int 1665 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 1666 { 1667 struct csio_mb *mbp = NULL; 1668 struct fw_caps_config_cmd *caps_cmd; 1669 unsigned int mtype, maddr; 1670 int rv = -EINVAL; 1671 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 1672 char path[64]; 1673 char *config_name = NULL; 1674 1675 /* 1676 * Reset device if necessary 1677 */ 1678 if (reset) { 1679 rv = csio_do_reset(hw, true); 1680 if (rv != 0) 1681 goto bye; 1682 } 1683 1684 /* 1685 * If we have a configuration file in host , 1686 * then use that. Otherwise, use the configuration file stored 1687 * in the HW flash ... 1688 */ 1689 spin_unlock_irq(&hw->lock); 1690 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 1691 spin_lock_irq(&hw->lock); 1692 if (rv != 0) { 1693 /* 1694 * config file was not found. Use default 1695 * config file from flash. 1696 */ 1697 config_name = "On FLASH"; 1698 mtype = FW_MEMTYPE_CF_FLASH; 1699 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 1700 } else { 1701 config_name = path; 1702 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1703 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1704 } 1705 1706 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1707 if (!mbp) { 1708 CSIO_INC_STATS(hw, n_err_nomem); 1709 return -ENOMEM; 1710 } 1711 /* 1712 * Tell the firmware to process the indicated Configuration File. 1713 * If there are no errors and the caller has provided return value 1714 * pointers for the [fini] section version, checksum and computed 1715 * checksum, pass those back to the caller. 1716 */ 1717 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1718 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1719 caps_cmd->op_to_write = 1720 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1721 FW_CMD_REQUEST_F | 1722 FW_CMD_READ_F); 1723 caps_cmd->cfvalid_to_len16 = 1724 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | 1725 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | 1726 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | 1727 FW_LEN16(*caps_cmd)); 1728 1729 if (csio_mb_issue(hw, mbp)) { 1730 rv = -EINVAL; 1731 goto bye; 1732 } 1733 1734 rv = csio_mb_fw_retval(mbp); 1735 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware 1736 * Configuration File in FLASH), our last gasp effort is to use the 1737 * Firmware Configuration File which is embedded in the 1738 * firmware. A very few early versions of the firmware didn't 1739 * have one embedded but we can ignore those. 1740 */ 1741 if (rv == ENOENT) { 1742 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1743 caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1744 FW_CMD_REQUEST_F | 1745 FW_CMD_READ_F); 1746 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1747 1748 if (csio_mb_issue(hw, mbp)) { 1749 rv = -EINVAL; 1750 goto bye; 1751 } 1752 1753 rv = csio_mb_fw_retval(mbp); 1754 config_name = "Firmware Default"; 1755 } 1756 if (rv != FW_SUCCESS) 1757 goto bye; 1758 1759 finiver = ntohl(caps_cmd->finiver); 1760 finicsum = ntohl(caps_cmd->finicsum); 1761 cfcsum = ntohl(caps_cmd->cfcsum); 1762 1763 /* 1764 * And now tell the firmware to use the configuration we just loaded. 1765 */ 1766 caps_cmd->op_to_write = 1767 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1768 FW_CMD_REQUEST_F | 1769 FW_CMD_WRITE_F); 1770 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1771 1772 if (csio_mb_issue(hw, mbp)) { 1773 rv = -EINVAL; 1774 goto bye; 1775 } 1776 1777 rv = csio_mb_fw_retval(mbp); 1778 if (rv != FW_SUCCESS) { 1779 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1780 goto bye; 1781 } 1782 1783 if (finicsum != cfcsum) { 1784 csio_warn(hw, 1785 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1786 finicsum, cfcsum); 1787 } 1788 1789 /* Validate device capabilities */ 1790 rv = csio_hw_validate_caps(hw, mbp); 1791 if (rv != 0) 1792 goto bye; 1793 1794 mempool_free(mbp, hw->mb_mempool); 1795 mbp = NULL; 1796 1797 /* 1798 * Note that we're operating with parameters 1799 * not supplied by the driver, rather than from hard-wired 1800 * initialization constants buried in the driver. 1801 */ 1802 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 1803 1804 /* device parameters */ 1805 rv = csio_get_device_params(hw); 1806 if (rv != 0) 1807 goto bye; 1808 1809 /* Configure SGE */ 1810 csio_wr_sge_init(hw); 1811 1812 /* 1813 * And finally tell the firmware to initialize itself using the 1814 * parameters from the Configuration File. 1815 */ 1816 /* Post event to notify completion of configuration */ 1817 csio_post_event(&hw->sm, CSIO_HWE_INIT); 1818 1819 csio_info(hw, "Successfully configure using Firmware " 1820 "Configuration File %s, version %#x, computed checksum %#x\n", 1821 config_name, finiver, cfcsum); 1822 return 0; 1823 1824 /* 1825 * Something bad happened. Return the error ... 1826 */ 1827 bye: 1828 if (mbp) 1829 mempool_free(mbp, hw->mb_mempool); 1830 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 1831 csio_warn(hw, "Configuration file error %d\n", rv); 1832 return rv; 1833 } 1834 1835 /* Is the given firmware API compatible with the one the driver was compiled 1836 * with? 1837 */ 1838 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 1839 { 1840 1841 /* short circuit if it's the exact same firmware version */ 1842 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 1843 return 1; 1844 1845 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 1846 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 1847 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 1848 return 1; 1849 #undef SAME_INTF 1850 1851 return 0; 1852 } 1853 1854 /* The firmware in the filesystem is usable, but should it be installed? 1855 * This routine explains itself in detail if it indicates the filesystem 1856 * firmware should be installed. 1857 */ 1858 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, 1859 int k, int c) 1860 { 1861 const char *reason; 1862 1863 if (!card_fw_usable) { 1864 reason = "incompatible or unusable"; 1865 goto install; 1866 } 1867 1868 if (k > c) { 1869 reason = "older than the version supported with this driver"; 1870 goto install; 1871 } 1872 1873 return 0; 1874 1875 install: 1876 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " 1877 "installing firmware %u.%u.%u.%u on card.\n", 1878 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 1879 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, 1880 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 1881 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 1882 1883 return 1; 1884 } 1885 1886 static struct fw_info fw_info_array[] = { 1887 { 1888 .chip = CHELSIO_T5, 1889 .fs_name = FW_CFG_NAME_T5, 1890 .fw_mod_name = FW_FNAME_T5, 1891 .fw_hdr = { 1892 .chip = FW_HDR_CHIP_T5, 1893 .fw_ver = __cpu_to_be32(FW_VERSION(T5)), 1894 .intfver_nic = FW_INTFVER(T5, NIC), 1895 .intfver_vnic = FW_INTFVER(T5, VNIC), 1896 .intfver_ri = FW_INTFVER(T5, RI), 1897 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 1898 .intfver_fcoe = FW_INTFVER(T5, FCOE), 1899 }, 1900 }, { 1901 .chip = CHELSIO_T6, 1902 .fs_name = FW_CFG_NAME_T6, 1903 .fw_mod_name = FW_FNAME_T6, 1904 .fw_hdr = { 1905 .chip = FW_HDR_CHIP_T6, 1906 .fw_ver = __cpu_to_be32(FW_VERSION(T6)), 1907 .intfver_nic = FW_INTFVER(T6, NIC), 1908 .intfver_vnic = FW_INTFVER(T6, VNIC), 1909 .intfver_ri = FW_INTFVER(T6, RI), 1910 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 1911 .intfver_fcoe = FW_INTFVER(T6, FCOE), 1912 }, 1913 } 1914 }; 1915 1916 static struct fw_info *find_fw_info(int chip) 1917 { 1918 int i; 1919 1920 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { 1921 if (fw_info_array[i].chip == chip) 1922 return &fw_info_array[i]; 1923 } 1924 return NULL; 1925 } 1926 1927 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, 1928 const u8 *fw_data, unsigned int fw_size, 1929 struct fw_hdr *card_fw, enum csio_dev_state state, 1930 int *reset) 1931 { 1932 int ret, card_fw_usable, fs_fw_usable; 1933 const struct fw_hdr *fs_fw; 1934 const struct fw_hdr *drv_fw; 1935 1936 drv_fw = &fw_info->fw_hdr; 1937 1938 /* Read the header of the firmware on the card */ 1939 ret = csio_hw_read_flash(hw, FLASH_FW_START, 1940 sizeof(*card_fw) / sizeof(uint32_t), 1941 (uint32_t *)card_fw, 1); 1942 if (ret == 0) { 1943 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 1944 } else { 1945 csio_err(hw, 1946 "Unable to read card's firmware header: %d\n", ret); 1947 card_fw_usable = 0; 1948 } 1949 1950 if (fw_data != NULL) { 1951 fs_fw = (const void *)fw_data; 1952 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 1953 } else { 1954 fs_fw = NULL; 1955 fs_fw_usable = 0; 1956 } 1957 1958 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 1959 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 1960 /* Common case: the firmware on the card is an exact match and 1961 * the filesystem one is an exact match too, or the filesystem 1962 * one is absent/incompatible. 1963 */ 1964 } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && 1965 csio_should_install_fs_fw(hw, card_fw_usable, 1966 be32_to_cpu(fs_fw->fw_ver), 1967 be32_to_cpu(card_fw->fw_ver))) { 1968 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, 1969 fw_size, 0); 1970 if (ret != 0) { 1971 csio_err(hw, 1972 "failed to install firmware: %d\n", ret); 1973 goto bye; 1974 } 1975 1976 /* Installed successfully, update the cached header too. */ 1977 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1978 card_fw_usable = 1; 1979 *reset = 0; /* already reset as part of load_fw */ 1980 } 1981 1982 if (!card_fw_usable) { 1983 uint32_t d, c, k; 1984 1985 d = be32_to_cpu(drv_fw->fw_ver); 1986 c = be32_to_cpu(card_fw->fw_ver); 1987 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 1988 1989 csio_err(hw, "Cannot find a usable firmware: " 1990 "chip state %d, " 1991 "driver compiled with %d.%d.%d.%d, " 1992 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 1993 state, 1994 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), 1995 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), 1996 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 1997 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), 1998 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 1999 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 2000 ret = EINVAL; 2001 goto bye; 2002 } 2003 2004 /* We're using whatever's on the card and it's known to be good. */ 2005 hw->fwrev = be32_to_cpu(card_fw->fw_ver); 2006 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 2007 2008 bye: 2009 return ret; 2010 } 2011 2012 /* 2013 * Returns -EINVAL if attempts to flash the firmware failed 2014 * else returns 0, 2015 * if flashing was not attempted because the card had the 2016 * latest firmware ECANCELED is returned 2017 */ 2018 static int 2019 csio_hw_flash_fw(struct csio_hw *hw, int *reset) 2020 { 2021 int ret = -ECANCELED; 2022 const struct firmware *fw; 2023 struct fw_info *fw_info; 2024 struct fw_hdr *card_fw; 2025 struct pci_dev *pci_dev = hw->pdev; 2026 struct device *dev = &pci_dev->dev ; 2027 const u8 *fw_data = NULL; 2028 unsigned int fw_size = 0; 2029 const char *fw_bin_file; 2030 2031 /* This is the firmware whose headers the driver was compiled 2032 * against 2033 */ 2034 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); 2035 if (fw_info == NULL) { 2036 csio_err(hw, 2037 "unable to get firmware info for chip %d.\n", 2038 CHELSIO_CHIP_VERSION(hw->chip_id)); 2039 return -EINVAL; 2040 } 2041 2042 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 2043 fw_bin_file = FW_FNAME_T5; 2044 else 2045 fw_bin_file = FW_FNAME_T6; 2046 2047 if (request_firmware(&fw, fw_bin_file, dev) < 0) { 2048 csio_err(hw, "could not find firmware image %s, err: %d\n", 2049 fw_bin_file, ret); 2050 } else { 2051 fw_data = fw->data; 2052 fw_size = fw->size; 2053 } 2054 2055 /* allocate memory to read the header of the firmware on the 2056 * card 2057 */ 2058 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); 2059 2060 /* upgrade FW logic */ 2061 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, 2062 hw->fw_state, reset); 2063 2064 /* Cleaning up */ 2065 if (fw != NULL) 2066 release_firmware(fw); 2067 kfree(card_fw); 2068 return ret; 2069 } 2070 2071 static int csio_hw_check_fwver(struct csio_hw *hw) 2072 { 2073 if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) && 2074 (hw->fwrev < CSIO_MIN_T6_FW)) { 2075 csio_hw_print_fw_version(hw, "T6 unsupported fw"); 2076 return -1; 2077 } 2078 2079 return 0; 2080 } 2081 2082 /* 2083 * csio_hw_configure - Configure HW 2084 * @hw - HW module 2085 * 2086 */ 2087 static void 2088 csio_hw_configure(struct csio_hw *hw) 2089 { 2090 int reset = 1; 2091 int rv; 2092 u32 param[1]; 2093 2094 rv = csio_hw_dev_ready(hw); 2095 if (rv != 0) { 2096 CSIO_INC_STATS(hw, n_err_fatal); 2097 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2098 goto out; 2099 } 2100 2101 /* HW version */ 2102 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); 2103 2104 /* Needed for FW download */ 2105 rv = csio_hw_get_flash_params(hw); 2106 if (rv != 0) { 2107 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2108 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2109 goto out; 2110 } 2111 2112 /* Set PCIe completion timeout to 4 seconds */ 2113 if (pci_is_pcie(hw->pdev)) 2114 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, 2115 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); 2116 2117 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2118 2119 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2120 if (rv != 0) 2121 goto out; 2122 2123 csio_hw_print_fw_version(hw, "Firmware revision"); 2124 2125 rv = csio_do_hello(hw, &hw->fw_state); 2126 if (rv != 0) { 2127 CSIO_INC_STATS(hw, n_err_fatal); 2128 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2129 goto out; 2130 } 2131 2132 /* Read vpd */ 2133 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2134 if (rv != 0) 2135 goto out; 2136 2137 csio_hw_get_fw_version(hw, &hw->fwrev); 2138 csio_hw_get_tp_version(hw, &hw->tp_vers); 2139 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2140 2141 /* Do firmware update */ 2142 spin_unlock_irq(&hw->lock); 2143 rv = csio_hw_flash_fw(hw, &reset); 2144 spin_lock_irq(&hw->lock); 2145 2146 if (rv != 0) 2147 goto out; 2148 2149 rv = csio_hw_check_fwver(hw); 2150 if (rv < 0) 2151 goto out; 2152 2153 /* If the firmware doesn't support Configuration Files, 2154 * return an error. 2155 */ 2156 rv = csio_hw_check_fwconfig(hw, param); 2157 if (rv != 0) { 2158 csio_info(hw, "Firmware doesn't support " 2159 "Firmware Configuration files\n"); 2160 goto out; 2161 } 2162 2163 /* The firmware provides us with a memory buffer where we can 2164 * load a Configuration File from the host if we want to 2165 * override the Configuration File in flash. 2166 */ 2167 rv = csio_hw_use_fwconfig(hw, reset, param); 2168 if (rv == -ENOENT) { 2169 csio_info(hw, "Could not initialize " 2170 "adapter, error%d\n", rv); 2171 goto out; 2172 } 2173 if (rv != 0) { 2174 csio_info(hw, "Could not initialize " 2175 "adapter, error%d\n", rv); 2176 goto out; 2177 } 2178 2179 } else { 2180 rv = csio_hw_check_fwver(hw); 2181 if (rv < 0) 2182 goto out; 2183 2184 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2185 2186 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2187 2188 /* device parameters */ 2189 rv = csio_get_device_params(hw); 2190 if (rv != 0) 2191 goto out; 2192 2193 /* Get device capabilities */ 2194 rv = csio_config_device_caps(hw); 2195 if (rv != 0) 2196 goto out; 2197 2198 /* Configure SGE */ 2199 csio_wr_sge_init(hw); 2200 2201 /* Post event to notify completion of configuration */ 2202 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2203 goto out; 2204 } 2205 } /* if not master */ 2206 2207 out: 2208 return; 2209 } 2210 2211 /* 2212 * csio_hw_initialize - Initialize HW 2213 * @hw - HW module 2214 * 2215 */ 2216 static void 2217 csio_hw_initialize(struct csio_hw *hw) 2218 { 2219 struct csio_mb *mbp; 2220 enum fw_retval retval; 2221 int rv; 2222 int i; 2223 2224 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2225 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2226 if (!mbp) 2227 goto out; 2228 2229 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2230 2231 if (csio_mb_issue(hw, mbp)) { 2232 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2233 goto free_and_out; 2234 } 2235 2236 retval = csio_mb_fw_retval(mbp); 2237 if (retval != FW_SUCCESS) { 2238 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2239 retval); 2240 goto free_and_out; 2241 } 2242 2243 mempool_free(mbp, hw->mb_mempool); 2244 } 2245 2246 rv = csio_get_fcoe_resinfo(hw); 2247 if (rv != 0) { 2248 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2249 goto out; 2250 } 2251 2252 spin_unlock_irq(&hw->lock); 2253 rv = csio_config_queues(hw); 2254 spin_lock_irq(&hw->lock); 2255 2256 if (rv != 0) { 2257 csio_err(hw, "Config of queues failed!: %d\n", rv); 2258 goto out; 2259 } 2260 2261 for (i = 0; i < hw->num_pports; i++) 2262 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2263 2264 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2265 rv = csio_enable_ports(hw); 2266 if (rv != 0) { 2267 csio_err(hw, "Failed to enable ports: %d\n", rv); 2268 goto out; 2269 } 2270 } 2271 2272 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2273 return; 2274 2275 free_and_out: 2276 mempool_free(mbp, hw->mb_mempool); 2277 out: 2278 return; 2279 } 2280 2281 #define PF_INTR_MASK (PFSW_F | PFCIM_F) 2282 2283 /* 2284 * csio_hw_intr_enable - Enable HW interrupts 2285 * @hw: Pointer to HW module. 2286 * 2287 * Enable interrupts in HW registers. 2288 */ 2289 static void 2290 csio_hw_intr_enable(struct csio_hw *hw) 2291 { 2292 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2293 u32 pf = 0; 2294 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); 2295 2296 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 2297 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2298 else 2299 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2300 2301 /* 2302 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2303 * by FW, so do nothing for INTX. 2304 */ 2305 if (hw->intr_mode == CSIO_IM_MSIX) 2306 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2307 AIVEC_V(AIVEC_M), vec); 2308 else if (hw->intr_mode == CSIO_IM_MSI) 2309 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2310 AIVEC_V(AIVEC_M), 0); 2311 2312 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); 2313 2314 /* Turn on MB interrupts - this will internally flush PIO as well */ 2315 csio_mb_intr_enable(hw); 2316 2317 /* These are common registers - only a master can modify them */ 2318 if (csio_is_hw_master(hw)) { 2319 /* 2320 * Disable the Serial FLASH interrupt, if enabled! 2321 */ 2322 pl &= (~SF_F); 2323 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); 2324 2325 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | 2326 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | 2327 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | 2328 ERR_DATA_CPL_ON_HIGH_QID1_F | 2329 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | 2330 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | 2331 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | 2332 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, 2333 SGE_INT_ENABLE3_A); 2334 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); 2335 } 2336 2337 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2338 2339 } 2340 2341 /* 2342 * csio_hw_intr_disable - Disable HW interrupts 2343 * @hw: Pointer to HW module. 2344 * 2345 * Turn off Mailbox and PCI_PF_CFG interrupts. 2346 */ 2347 void 2348 csio_hw_intr_disable(struct csio_hw *hw) 2349 { 2350 u32 pf = 0; 2351 2352 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 2353 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2354 else 2355 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2356 2357 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2358 return; 2359 2360 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2361 2362 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); 2363 if (csio_is_hw_master(hw)) 2364 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); 2365 2366 /* Turn off MB interrupts */ 2367 csio_mb_intr_disable(hw); 2368 2369 } 2370 2371 void 2372 csio_hw_fatal_err(struct csio_hw *hw) 2373 { 2374 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); 2375 csio_hw_intr_disable(hw); 2376 2377 /* Do not reset HW, we may need FW state for debugging */ 2378 csio_fatal(hw, "HW Fatal error encountered!\n"); 2379 } 2380 2381 /*****************************************************************************/ 2382 /* START: HW SM */ 2383 /*****************************************************************************/ 2384 /* 2385 * csio_hws_uninit - Uninit state 2386 * @hw - HW module 2387 * @evt - Event 2388 * 2389 */ 2390 static void 2391 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2392 { 2393 hw->prev_evt = hw->cur_evt; 2394 hw->cur_evt = evt; 2395 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2396 2397 switch (evt) { 2398 case CSIO_HWE_CFG: 2399 csio_set_state(&hw->sm, csio_hws_configuring); 2400 csio_hw_configure(hw); 2401 break; 2402 2403 default: 2404 CSIO_INC_STATS(hw, n_evt_unexp); 2405 break; 2406 } 2407 } 2408 2409 /* 2410 * csio_hws_configuring - Configuring state 2411 * @hw - HW module 2412 * @evt - Event 2413 * 2414 */ 2415 static void 2416 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2417 { 2418 hw->prev_evt = hw->cur_evt; 2419 hw->cur_evt = evt; 2420 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2421 2422 switch (evt) { 2423 case CSIO_HWE_INIT: 2424 csio_set_state(&hw->sm, csio_hws_initializing); 2425 csio_hw_initialize(hw); 2426 break; 2427 2428 case CSIO_HWE_INIT_DONE: 2429 csio_set_state(&hw->sm, csio_hws_ready); 2430 /* Fan out event to all lnode SMs */ 2431 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2432 break; 2433 2434 case CSIO_HWE_FATAL: 2435 csio_set_state(&hw->sm, csio_hws_uninit); 2436 break; 2437 2438 case CSIO_HWE_PCI_REMOVE: 2439 csio_do_bye(hw); 2440 break; 2441 default: 2442 CSIO_INC_STATS(hw, n_evt_unexp); 2443 break; 2444 } 2445 } 2446 2447 /* 2448 * csio_hws_initializing - Initialiazing state 2449 * @hw - HW module 2450 * @evt - Event 2451 * 2452 */ 2453 static void 2454 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2455 { 2456 hw->prev_evt = hw->cur_evt; 2457 hw->cur_evt = evt; 2458 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2459 2460 switch (evt) { 2461 case CSIO_HWE_INIT_DONE: 2462 csio_set_state(&hw->sm, csio_hws_ready); 2463 2464 /* Fan out event to all lnode SMs */ 2465 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2466 2467 /* Enable interrupts */ 2468 csio_hw_intr_enable(hw); 2469 break; 2470 2471 case CSIO_HWE_FATAL: 2472 csio_set_state(&hw->sm, csio_hws_uninit); 2473 break; 2474 2475 case CSIO_HWE_PCI_REMOVE: 2476 csio_do_bye(hw); 2477 break; 2478 2479 default: 2480 CSIO_INC_STATS(hw, n_evt_unexp); 2481 break; 2482 } 2483 } 2484 2485 /* 2486 * csio_hws_ready - Ready state 2487 * @hw - HW module 2488 * @evt - Event 2489 * 2490 */ 2491 static void 2492 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2493 { 2494 /* Remember the event */ 2495 hw->evtflag = evt; 2496 2497 hw->prev_evt = hw->cur_evt; 2498 hw->cur_evt = evt; 2499 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2500 2501 switch (evt) { 2502 case CSIO_HWE_HBA_RESET: 2503 case CSIO_HWE_FW_DLOAD: 2504 case CSIO_HWE_SUSPEND: 2505 case CSIO_HWE_PCI_REMOVE: 2506 case CSIO_HWE_PCIERR_DETECTED: 2507 csio_set_state(&hw->sm, csio_hws_quiescing); 2508 /* cleanup all outstanding cmds */ 2509 if (evt == CSIO_HWE_HBA_RESET || 2510 evt == CSIO_HWE_PCIERR_DETECTED) 2511 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2512 else 2513 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2514 2515 csio_hw_intr_disable(hw); 2516 csio_hw_mbm_cleanup(hw); 2517 csio_evtq_stop(hw); 2518 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2519 csio_evtq_flush(hw); 2520 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2521 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2522 break; 2523 2524 case CSIO_HWE_FATAL: 2525 csio_set_state(&hw->sm, csio_hws_uninit); 2526 break; 2527 2528 default: 2529 CSIO_INC_STATS(hw, n_evt_unexp); 2530 break; 2531 } 2532 } 2533 2534 /* 2535 * csio_hws_quiescing - Quiescing state 2536 * @hw - HW module 2537 * @evt - Event 2538 * 2539 */ 2540 static void 2541 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2542 { 2543 hw->prev_evt = hw->cur_evt; 2544 hw->cur_evt = evt; 2545 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2546 2547 switch (evt) { 2548 case CSIO_HWE_QUIESCED: 2549 switch (hw->evtflag) { 2550 case CSIO_HWE_FW_DLOAD: 2551 csio_set_state(&hw->sm, csio_hws_resetting); 2552 /* Download firmware */ 2553 /* Fall through */ 2554 2555 case CSIO_HWE_HBA_RESET: 2556 csio_set_state(&hw->sm, csio_hws_resetting); 2557 /* Start reset of the HBA */ 2558 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2559 csio_wr_destroy_queues(hw, false); 2560 csio_do_reset(hw, false); 2561 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2562 break; 2563 2564 case CSIO_HWE_PCI_REMOVE: 2565 csio_set_state(&hw->sm, csio_hws_removing); 2566 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2567 csio_wr_destroy_queues(hw, true); 2568 /* Now send the bye command */ 2569 csio_do_bye(hw); 2570 break; 2571 2572 case CSIO_HWE_SUSPEND: 2573 csio_set_state(&hw->sm, csio_hws_quiesced); 2574 break; 2575 2576 case CSIO_HWE_PCIERR_DETECTED: 2577 csio_set_state(&hw->sm, csio_hws_pcierr); 2578 csio_wr_destroy_queues(hw, false); 2579 break; 2580 2581 default: 2582 CSIO_INC_STATS(hw, n_evt_unexp); 2583 break; 2584 2585 } 2586 break; 2587 2588 default: 2589 CSIO_INC_STATS(hw, n_evt_unexp); 2590 break; 2591 } 2592 } 2593 2594 /* 2595 * csio_hws_quiesced - Quiesced state 2596 * @hw - HW module 2597 * @evt - Event 2598 * 2599 */ 2600 static void 2601 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2602 { 2603 hw->prev_evt = hw->cur_evt; 2604 hw->cur_evt = evt; 2605 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2606 2607 switch (evt) { 2608 case CSIO_HWE_RESUME: 2609 csio_set_state(&hw->sm, csio_hws_configuring); 2610 csio_hw_configure(hw); 2611 break; 2612 2613 default: 2614 CSIO_INC_STATS(hw, n_evt_unexp); 2615 break; 2616 } 2617 } 2618 2619 /* 2620 * csio_hws_resetting - HW Resetting state 2621 * @hw - HW module 2622 * @evt - Event 2623 * 2624 */ 2625 static void 2626 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2627 { 2628 hw->prev_evt = hw->cur_evt; 2629 hw->cur_evt = evt; 2630 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2631 2632 switch (evt) { 2633 case CSIO_HWE_HBA_RESET_DONE: 2634 csio_evtq_start(hw); 2635 csio_set_state(&hw->sm, csio_hws_configuring); 2636 csio_hw_configure(hw); 2637 break; 2638 2639 default: 2640 CSIO_INC_STATS(hw, n_evt_unexp); 2641 break; 2642 } 2643 } 2644 2645 /* 2646 * csio_hws_removing - PCI Hotplug removing state 2647 * @hw - HW module 2648 * @evt - Event 2649 * 2650 */ 2651 static void 2652 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2653 { 2654 hw->prev_evt = hw->cur_evt; 2655 hw->cur_evt = evt; 2656 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2657 2658 switch (evt) { 2659 case CSIO_HWE_HBA_RESET: 2660 if (!csio_is_hw_master(hw)) 2661 break; 2662 /* 2663 * The BYE should have alerady been issued, so we cant 2664 * use the mailbox interface. Hence we use the PL_RST 2665 * register directly. 2666 */ 2667 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2668 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 2669 mdelay(2000); 2670 break; 2671 2672 /* Should never receive any new events */ 2673 default: 2674 CSIO_INC_STATS(hw, n_evt_unexp); 2675 break; 2676 2677 } 2678 } 2679 2680 /* 2681 * csio_hws_pcierr - PCI Error state 2682 * @hw - HW module 2683 * @evt - Event 2684 * 2685 */ 2686 static void 2687 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2688 { 2689 hw->prev_evt = hw->cur_evt; 2690 hw->cur_evt = evt; 2691 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2692 2693 switch (evt) { 2694 case CSIO_HWE_PCIERR_SLOT_RESET: 2695 csio_evtq_start(hw); 2696 csio_set_state(&hw->sm, csio_hws_configuring); 2697 csio_hw_configure(hw); 2698 break; 2699 2700 default: 2701 CSIO_INC_STATS(hw, n_evt_unexp); 2702 break; 2703 } 2704 } 2705 2706 /*****************************************************************************/ 2707 /* END: HW SM */ 2708 /*****************************************************************************/ 2709 2710 /* 2711 * csio_handle_intr_status - table driven interrupt handler 2712 * @hw: HW instance 2713 * @reg: the interrupt status register to process 2714 * @acts: table of interrupt actions 2715 * 2716 * A table driven interrupt handler that applies a set of masks to an 2717 * interrupt status word and performs the corresponding actions if the 2718 * interrupts described by the mask have occured. The actions include 2719 * optionally emitting a warning or alert message. The table is terminated 2720 * by an entry specifying mask 0. Returns the number of fatal interrupt 2721 * conditions. 2722 */ 2723 int 2724 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2725 const struct intr_info *acts) 2726 { 2727 int fatal = 0; 2728 unsigned int mask = 0; 2729 unsigned int status = csio_rd_reg32(hw, reg); 2730 2731 for ( ; acts->mask; ++acts) { 2732 if (!(status & acts->mask)) 2733 continue; 2734 if (acts->fatal) { 2735 fatal++; 2736 csio_fatal(hw, "Fatal %s (0x%x)\n", 2737 acts->msg, status & acts->mask); 2738 } else if (acts->msg) 2739 csio_info(hw, "%s (0x%x)\n", 2740 acts->msg, status & acts->mask); 2741 mask |= acts->mask; 2742 } 2743 status &= mask; 2744 if (status) /* clear processed interrupts */ 2745 csio_wr_reg32(hw, status, reg); 2746 return fatal; 2747 } 2748 2749 /* 2750 * TP interrupt handler. 2751 */ 2752 static void csio_tp_intr_handler(struct csio_hw *hw) 2753 { 2754 static struct intr_info tp_intr_info[] = { 2755 { 0x3fffffff, "TP parity error", -1, 1 }, 2756 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, 2757 { 0, NULL, 0, 0 } 2758 }; 2759 2760 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) 2761 csio_hw_fatal_err(hw); 2762 } 2763 2764 /* 2765 * SGE interrupt handler. 2766 */ 2767 static void csio_sge_intr_handler(struct csio_hw *hw) 2768 { 2769 uint64_t v; 2770 2771 static struct intr_info sge_intr_info[] = { 2772 { ERR_CPL_EXCEED_IQE_SIZE_F, 2773 "SGE received CPL exceeding IQE size", -1, 1 }, 2774 { ERR_INVALID_CIDX_INC_F, 2775 "SGE GTS CIDX increment too large", -1, 0 }, 2776 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, 2777 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, 2778 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, 2779 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2780 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, 2781 0 }, 2782 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, 2783 0 }, 2784 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, 2785 0 }, 2786 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, 2787 0 }, 2788 { ERR_ING_CTXT_PRIO_F, 2789 "SGE too many priority ingress contexts", -1, 0 }, 2790 { ERR_EGR_CTXT_PRIO_F, 2791 "SGE too many priority egress contexts", -1, 0 }, 2792 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, 2793 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, 2794 { 0, NULL, 0, 0 } 2795 }; 2796 2797 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | 2798 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); 2799 if (v) { 2800 csio_fatal(hw, "SGE parity error (%#llx)\n", 2801 (unsigned long long)v); 2802 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 2803 SGE_INT_CAUSE1_A); 2804 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); 2805 } 2806 2807 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); 2808 2809 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || 2810 v != 0) 2811 csio_hw_fatal_err(hw); 2812 } 2813 2814 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ 2815 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) 2816 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ 2817 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) 2818 2819 /* 2820 * CIM interrupt handler. 2821 */ 2822 static void csio_cim_intr_handler(struct csio_hw *hw) 2823 { 2824 static struct intr_info cim_intr_info[] = { 2825 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, 2826 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2827 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2828 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, 2829 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, 2830 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, 2831 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, 2832 { 0, NULL, 0, 0 } 2833 }; 2834 static struct intr_info cim_upintr_info[] = { 2835 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, 2836 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, 2837 { ILLWRINT_F, "CIM illegal write", -1, 1 }, 2838 { ILLRDINT_F, "CIM illegal read", -1, 1 }, 2839 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, 2840 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, 2841 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, 2842 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, 2843 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, 2844 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, 2845 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, 2846 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, 2847 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, 2848 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, 2849 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, 2850 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, 2851 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, 2852 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, 2853 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, 2854 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, 2855 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, 2856 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, 2857 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, 2858 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, 2859 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, 2860 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, 2861 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, 2862 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, 2863 { 0, NULL, 0, 0 } 2864 }; 2865 2866 int fat; 2867 2868 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, 2869 cim_intr_info) + 2870 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, 2871 cim_upintr_info); 2872 if (fat) 2873 csio_hw_fatal_err(hw); 2874 } 2875 2876 /* 2877 * ULP RX interrupt handler. 2878 */ 2879 static void csio_ulprx_intr_handler(struct csio_hw *hw) 2880 { 2881 static struct intr_info ulprx_intr_info[] = { 2882 { 0x1800000, "ULPRX context error", -1, 1 }, 2883 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2884 { 0, NULL, 0, 0 } 2885 }; 2886 2887 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) 2888 csio_hw_fatal_err(hw); 2889 } 2890 2891 /* 2892 * ULP TX interrupt handler. 2893 */ 2894 static void csio_ulptx_intr_handler(struct csio_hw *hw) 2895 { 2896 static struct intr_info ulptx_intr_info[] = { 2897 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, 2898 0 }, 2899 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, 2900 0 }, 2901 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, 2902 0 }, 2903 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, 2904 0 }, 2905 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2906 { 0, NULL, 0, 0 } 2907 }; 2908 2909 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) 2910 csio_hw_fatal_err(hw); 2911 } 2912 2913 /* 2914 * PM TX interrupt handler. 2915 */ 2916 static void csio_pmtx_intr_handler(struct csio_hw *hw) 2917 { 2918 static struct intr_info pmtx_intr_info[] = { 2919 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, 2920 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, 2921 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, 2922 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, 2923 { 0xffffff0, "PMTX framing error", -1, 1 }, 2924 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, 2925 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, 2926 1 }, 2927 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, 2928 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, 2929 { 0, NULL, 0, 0 } 2930 }; 2931 2932 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) 2933 csio_hw_fatal_err(hw); 2934 } 2935 2936 /* 2937 * PM RX interrupt handler. 2938 */ 2939 static void csio_pmrx_intr_handler(struct csio_hw *hw) 2940 { 2941 static struct intr_info pmrx_intr_info[] = { 2942 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, 2943 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2944 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, 2945 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, 2946 1 }, 2947 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, 2948 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, 2949 { 0, NULL, 0, 0 } 2950 }; 2951 2952 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) 2953 csio_hw_fatal_err(hw); 2954 } 2955 2956 /* 2957 * CPL switch interrupt handler. 2958 */ 2959 static void csio_cplsw_intr_handler(struct csio_hw *hw) 2960 { 2961 static struct intr_info cplsw_intr_info[] = { 2962 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, 2963 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, 2964 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, 2965 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, 2966 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, 2967 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, 2968 { 0, NULL, 0, 0 } 2969 }; 2970 2971 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) 2972 csio_hw_fatal_err(hw); 2973 } 2974 2975 /* 2976 * LE interrupt handler. 2977 */ 2978 static void csio_le_intr_handler(struct csio_hw *hw) 2979 { 2980 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); 2981 2982 static struct intr_info le_intr_info[] = { 2983 { LIPMISS_F, "LE LIP miss", -1, 0 }, 2984 { LIP0_F, "LE 0 LIP error", -1, 0 }, 2985 { PARITYERR_F, "LE parity error", -1, 1 }, 2986 { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 2987 { REQQPARERR_F, "LE request queue parity error", -1, 1 }, 2988 { 0, NULL, 0, 0 } 2989 }; 2990 2991 static struct intr_info t6_le_intr_info[] = { 2992 { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, 2993 { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, 2994 { TCAMINTPERR_F, "LE parity error", -1, 1 }, 2995 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 2996 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, 2997 { 0, NULL, 0, 0 } 2998 }; 2999 3000 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, 3001 (chip == CHELSIO_T5) ? 3002 le_intr_info : t6_le_intr_info)) 3003 csio_hw_fatal_err(hw); 3004 } 3005 3006 /* 3007 * MPS interrupt handler. 3008 */ 3009 static void csio_mps_intr_handler(struct csio_hw *hw) 3010 { 3011 static struct intr_info mps_rx_intr_info[] = { 3012 { 0xffffff, "MPS Rx parity error", -1, 1 }, 3013 { 0, NULL, 0, 0 } 3014 }; 3015 static struct intr_info mps_tx_intr_info[] = { 3016 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, 3017 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 3018 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", 3019 -1, 1 }, 3020 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", 3021 -1, 1 }, 3022 { BUBBLE_F, "MPS Tx underflow", -1, 1 }, 3023 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, 3024 { FRMERR_F, "MPS Tx framing error", -1, 1 }, 3025 { 0, NULL, 0, 0 } 3026 }; 3027 static struct intr_info mps_trc_intr_info[] = { 3028 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, 3029 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", 3030 -1, 1 }, 3031 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, 3032 { 0, NULL, 0, 0 } 3033 }; 3034 static struct intr_info mps_stat_sram_intr_info[] = { 3035 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 3036 { 0, NULL, 0, 0 } 3037 }; 3038 static struct intr_info mps_stat_tx_intr_info[] = { 3039 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 3040 { 0, NULL, 0, 0 } 3041 }; 3042 static struct intr_info mps_stat_rx_intr_info[] = { 3043 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 3044 { 0, NULL, 0, 0 } 3045 }; 3046 static struct intr_info mps_cls_intr_info[] = { 3047 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, 3048 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, 3049 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, 3050 { 0, NULL, 0, 0 } 3051 }; 3052 3053 int fat; 3054 3055 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, 3056 mps_rx_intr_info) + 3057 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, 3058 mps_tx_intr_info) + 3059 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, 3060 mps_trc_intr_info) + 3061 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, 3062 mps_stat_sram_intr_info) + 3063 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, 3064 mps_stat_tx_intr_info) + 3065 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, 3066 mps_stat_rx_intr_info) + 3067 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, 3068 mps_cls_intr_info); 3069 3070 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); 3071 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ 3072 if (fat) 3073 csio_hw_fatal_err(hw); 3074 } 3075 3076 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ 3077 ECC_UE_INT_CAUSE_F) 3078 3079 /* 3080 * EDC/MC interrupt handler. 3081 */ 3082 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3083 { 3084 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3085 3086 unsigned int addr, cnt_addr, v; 3087 3088 if (idx <= MEM_EDC1) { 3089 addr = EDC_REG(EDC_INT_CAUSE_A, idx); 3090 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); 3091 } else { 3092 addr = MC_INT_CAUSE_A; 3093 cnt_addr = MC_ECC_STATUS_A; 3094 } 3095 3096 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3097 if (v & PERR_INT_CAUSE_F) 3098 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3099 if (v & ECC_CE_INT_CAUSE_F) { 3100 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); 3101 3102 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); 3103 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3104 cnt, name[idx], cnt > 1 ? "s" : ""); 3105 } 3106 if (v & ECC_UE_INT_CAUSE_F) 3107 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3108 3109 csio_wr_reg32(hw, v, addr); 3110 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) 3111 csio_hw_fatal_err(hw); 3112 } 3113 3114 /* 3115 * MA interrupt handler. 3116 */ 3117 static void csio_ma_intr_handler(struct csio_hw *hw) 3118 { 3119 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); 3120 3121 if (status & MEM_PERR_INT_CAUSE_F) 3122 csio_fatal(hw, "MA parity error, parity status %#x\n", 3123 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); 3124 if (status & MEM_WRAP_INT_CAUSE_F) { 3125 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); 3126 csio_fatal(hw, 3127 "MA address wrap-around error by client %u to address %#x\n", 3128 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); 3129 } 3130 csio_wr_reg32(hw, status, MA_INT_CAUSE_A); 3131 csio_hw_fatal_err(hw); 3132 } 3133 3134 /* 3135 * SMB interrupt handler. 3136 */ 3137 static void csio_smb_intr_handler(struct csio_hw *hw) 3138 { 3139 static struct intr_info smb_intr_info[] = { 3140 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, 3141 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, 3142 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, 3143 { 0, NULL, 0, 0 } 3144 }; 3145 3146 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) 3147 csio_hw_fatal_err(hw); 3148 } 3149 3150 /* 3151 * NC-SI interrupt handler. 3152 */ 3153 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3154 { 3155 static struct intr_info ncsi_intr_info[] = { 3156 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, 3157 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, 3158 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, 3159 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, 3160 { 0, NULL, 0, 0 } 3161 }; 3162 3163 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) 3164 csio_hw_fatal_err(hw); 3165 } 3166 3167 /* 3168 * XGMAC interrupt handler. 3169 */ 3170 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3171 { 3172 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3173 3174 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; 3175 if (!v) 3176 return; 3177 3178 if (v & TXFIFO_PRTY_ERR_F) 3179 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3180 if (v & RXFIFO_PRTY_ERR_F) 3181 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3182 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3183 csio_hw_fatal_err(hw); 3184 } 3185 3186 /* 3187 * PL interrupt handler. 3188 */ 3189 static void csio_pl_intr_handler(struct csio_hw *hw) 3190 { 3191 static struct intr_info pl_intr_info[] = { 3192 { FATALPERR_F, "T4 fatal parity error", -1, 1 }, 3193 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, 3194 { 0, NULL, 0, 0 } 3195 }; 3196 3197 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) 3198 csio_hw_fatal_err(hw); 3199 } 3200 3201 /* 3202 * csio_hw_slow_intr_handler - control path interrupt handler 3203 * @hw: HW module 3204 * 3205 * Interrupt handler for non-data global interrupt events, e.g., errors. 3206 * The designation 'slow' is because it involves register reads, while 3207 * data interrupts typically don't involve any MMIOs. 3208 */ 3209 int 3210 csio_hw_slow_intr_handler(struct csio_hw *hw) 3211 { 3212 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); 3213 3214 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3215 CSIO_INC_STATS(hw, n_plint_unexp); 3216 return 0; 3217 } 3218 3219 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3220 3221 CSIO_INC_STATS(hw, n_plint_cnt); 3222 3223 if (cause & CIM_F) 3224 csio_cim_intr_handler(hw); 3225 3226 if (cause & MPS_F) 3227 csio_mps_intr_handler(hw); 3228 3229 if (cause & NCSI_F) 3230 csio_ncsi_intr_handler(hw); 3231 3232 if (cause & PL_F) 3233 csio_pl_intr_handler(hw); 3234 3235 if (cause & SMB_F) 3236 csio_smb_intr_handler(hw); 3237 3238 if (cause & XGMAC0_F) 3239 csio_xgmac_intr_handler(hw, 0); 3240 3241 if (cause & XGMAC1_F) 3242 csio_xgmac_intr_handler(hw, 1); 3243 3244 if (cause & XGMAC_KR0_F) 3245 csio_xgmac_intr_handler(hw, 2); 3246 3247 if (cause & XGMAC_KR1_F) 3248 csio_xgmac_intr_handler(hw, 3); 3249 3250 if (cause & PCIE_F) 3251 hw->chip_ops->chip_pcie_intr_handler(hw); 3252 3253 if (cause & MC_F) 3254 csio_mem_intr_handler(hw, MEM_MC); 3255 3256 if (cause & EDC0_F) 3257 csio_mem_intr_handler(hw, MEM_EDC0); 3258 3259 if (cause & EDC1_F) 3260 csio_mem_intr_handler(hw, MEM_EDC1); 3261 3262 if (cause & LE_F) 3263 csio_le_intr_handler(hw); 3264 3265 if (cause & TP_F) 3266 csio_tp_intr_handler(hw); 3267 3268 if (cause & MA_F) 3269 csio_ma_intr_handler(hw); 3270 3271 if (cause & PM_TX_F) 3272 csio_pmtx_intr_handler(hw); 3273 3274 if (cause & PM_RX_F) 3275 csio_pmrx_intr_handler(hw); 3276 3277 if (cause & ULP_RX_F) 3278 csio_ulprx_intr_handler(hw); 3279 3280 if (cause & CPL_SWITCH_F) 3281 csio_cplsw_intr_handler(hw); 3282 3283 if (cause & SGE_F) 3284 csio_sge_intr_handler(hw); 3285 3286 if (cause & ULP_TX_F) 3287 csio_ulptx_intr_handler(hw); 3288 3289 /* Clear the interrupts just processed for which we are the master. */ 3290 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); 3291 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ 3292 3293 return 1; 3294 } 3295 3296 /***************************************************************************** 3297 * HW <--> mailbox interfacing routines. 3298 ****************************************************************************/ 3299 /* 3300 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3301 * 3302 * @data: Private data pointer. 3303 * 3304 * Called from worker thread context. 3305 */ 3306 static void 3307 csio_mberr_worker(void *data) 3308 { 3309 struct csio_hw *hw = (struct csio_hw *)data; 3310 struct csio_mbm *mbm = &hw->mbm; 3311 LIST_HEAD(cbfn_q); 3312 struct csio_mb *mbp_next; 3313 int rv; 3314 3315 del_timer_sync(&mbm->timer); 3316 3317 spin_lock_irq(&hw->lock); 3318 if (list_empty(&mbm->cbfn_q)) { 3319 spin_unlock_irq(&hw->lock); 3320 return; 3321 } 3322 3323 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3324 mbm->stats.n_cbfnq = 0; 3325 3326 /* Try to start waiting mailboxes */ 3327 if (!list_empty(&mbm->req_q)) { 3328 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3329 list_del_init(&mbp_next->list); 3330 3331 rv = csio_mb_issue(hw, mbp_next); 3332 if (rv != 0) 3333 list_add_tail(&mbp_next->list, &mbm->req_q); 3334 else 3335 CSIO_DEC_STATS(mbm, n_activeq); 3336 } 3337 spin_unlock_irq(&hw->lock); 3338 3339 /* Now callback completions */ 3340 csio_mb_completions(hw, &cbfn_q); 3341 } 3342 3343 /* 3344 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3345 * 3346 * @data: private data pointer 3347 * 3348 **/ 3349 static void 3350 csio_hw_mb_timer(struct timer_list *t) 3351 { 3352 struct csio_mbm *mbm = from_timer(mbm, t, timer); 3353 struct csio_hw *hw = mbm->hw; 3354 struct csio_mb *mbp = NULL; 3355 3356 spin_lock_irq(&hw->lock); 3357 mbp = csio_mb_tmo_handler(hw); 3358 spin_unlock_irq(&hw->lock); 3359 3360 /* Call back the function for the timed-out Mailbox */ 3361 if (mbp) 3362 mbp->mb_cbfn(hw, mbp); 3363 3364 } 3365 3366 /* 3367 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3368 * @hw: HW module 3369 * 3370 * Called with lock held, should exit with lock held. 3371 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3372 * into a local queue. Drops lock and calls the completions. Holds 3373 * lock and returns. 3374 */ 3375 static void 3376 csio_hw_mbm_cleanup(struct csio_hw *hw) 3377 { 3378 LIST_HEAD(cbfn_q); 3379 3380 csio_mb_cancel_all(hw, &cbfn_q); 3381 3382 spin_unlock_irq(&hw->lock); 3383 csio_mb_completions(hw, &cbfn_q); 3384 spin_lock_irq(&hw->lock); 3385 } 3386 3387 /***************************************************************************** 3388 * Event handling 3389 ****************************************************************************/ 3390 int 3391 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3392 uint16_t len) 3393 { 3394 struct csio_evt_msg *evt_entry = NULL; 3395 3396 if (type >= CSIO_EVT_MAX) 3397 return -EINVAL; 3398 3399 if (len > CSIO_EVT_MSG_SIZE) 3400 return -EINVAL; 3401 3402 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3403 return -EINVAL; 3404 3405 if (list_empty(&hw->evt_free_q)) { 3406 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3407 type, len); 3408 return -ENOMEM; 3409 } 3410 3411 evt_entry = list_first_entry(&hw->evt_free_q, 3412 struct csio_evt_msg, list); 3413 list_del_init(&evt_entry->list); 3414 3415 /* copy event msg and queue the event */ 3416 evt_entry->type = type; 3417 memcpy((void *)evt_entry->data, evt_msg, len); 3418 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3419 3420 CSIO_DEC_STATS(hw, n_evt_freeq); 3421 CSIO_INC_STATS(hw, n_evt_activeq); 3422 3423 return 0; 3424 } 3425 3426 static int 3427 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3428 uint16_t len, bool msg_sg) 3429 { 3430 struct csio_evt_msg *evt_entry = NULL; 3431 struct csio_fl_dma_buf *fl_sg; 3432 uint32_t off = 0; 3433 unsigned long flags; 3434 int n, ret = 0; 3435 3436 if (type >= CSIO_EVT_MAX) 3437 return -EINVAL; 3438 3439 if (len > CSIO_EVT_MSG_SIZE) 3440 return -EINVAL; 3441 3442 spin_lock_irqsave(&hw->lock, flags); 3443 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3444 ret = -EINVAL; 3445 goto out; 3446 } 3447 3448 if (list_empty(&hw->evt_free_q)) { 3449 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3450 type, len); 3451 ret = -ENOMEM; 3452 goto out; 3453 } 3454 3455 evt_entry = list_first_entry(&hw->evt_free_q, 3456 struct csio_evt_msg, list); 3457 list_del_init(&evt_entry->list); 3458 3459 /* copy event msg and queue the event */ 3460 evt_entry->type = type; 3461 3462 /* If Payload in SG list*/ 3463 if (msg_sg) { 3464 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3465 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3466 memcpy((void *)((uintptr_t)evt_entry->data + off), 3467 fl_sg->flbufs[n].vaddr, 3468 fl_sg->flbufs[n].len); 3469 off += fl_sg->flbufs[n].len; 3470 } 3471 } else 3472 memcpy((void *)evt_entry->data, evt_msg, len); 3473 3474 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3475 CSIO_DEC_STATS(hw, n_evt_freeq); 3476 CSIO_INC_STATS(hw, n_evt_activeq); 3477 out: 3478 spin_unlock_irqrestore(&hw->lock, flags); 3479 return ret; 3480 } 3481 3482 static void 3483 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3484 { 3485 if (evt_entry) { 3486 spin_lock_irq(&hw->lock); 3487 list_del_init(&evt_entry->list); 3488 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3489 CSIO_DEC_STATS(hw, n_evt_activeq); 3490 CSIO_INC_STATS(hw, n_evt_freeq); 3491 spin_unlock_irq(&hw->lock); 3492 } 3493 } 3494 3495 void 3496 csio_evtq_flush(struct csio_hw *hw) 3497 { 3498 uint32_t count; 3499 count = 30; 3500 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3501 spin_unlock_irq(&hw->lock); 3502 msleep(2000); 3503 spin_lock_irq(&hw->lock); 3504 } 3505 3506 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3507 } 3508 3509 static void 3510 csio_evtq_stop(struct csio_hw *hw) 3511 { 3512 hw->flags |= CSIO_HWF_FWEVT_STOP; 3513 } 3514 3515 static void 3516 csio_evtq_start(struct csio_hw *hw) 3517 { 3518 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3519 } 3520 3521 static void 3522 csio_evtq_cleanup(struct csio_hw *hw) 3523 { 3524 struct list_head *evt_entry, *next_entry; 3525 3526 /* Release outstanding events from activeq to freeq*/ 3527 if (!list_empty(&hw->evt_active_q)) 3528 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3529 3530 hw->stats.n_evt_activeq = 0; 3531 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3532 3533 /* Freeup event entry */ 3534 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3535 kfree(evt_entry); 3536 CSIO_DEC_STATS(hw, n_evt_freeq); 3537 } 3538 3539 hw->stats.n_evt_freeq = 0; 3540 } 3541 3542 3543 static void 3544 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3545 struct csio_fl_dma_buf *flb, void *priv) 3546 { 3547 __u8 op; 3548 void *msg = NULL; 3549 uint32_t msg_len = 0; 3550 bool msg_sg = 0; 3551 3552 op = ((struct rss_header *) wr)->opcode; 3553 if (op == CPL_FW6_PLD) { 3554 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3555 if (!flb || !flb->totlen) { 3556 CSIO_INC_STATS(hw, n_cpl_unexp); 3557 return; 3558 } 3559 3560 msg = (void *) flb; 3561 msg_len = flb->totlen; 3562 msg_sg = 1; 3563 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3564 3565 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3566 /* skip RSS header */ 3567 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3568 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3569 sizeof(struct cpl_fw4_msg); 3570 } else { 3571 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3572 CSIO_INC_STATS(hw, n_cpl_unexp); 3573 return; 3574 } 3575 3576 /* 3577 * Enqueue event to EventQ. Events processing happens 3578 * in Event worker thread context 3579 */ 3580 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3581 (uint16_t)msg_len, msg_sg)) 3582 CSIO_INC_STATS(hw, n_evt_drop); 3583 } 3584 3585 void 3586 csio_evtq_worker(struct work_struct *work) 3587 { 3588 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3589 struct list_head *evt_entry, *next_entry; 3590 LIST_HEAD(evt_q); 3591 struct csio_evt_msg *evt_msg; 3592 struct cpl_fw6_msg *msg; 3593 struct csio_rnode *rn; 3594 int rv = 0; 3595 uint8_t evtq_stop = 0; 3596 3597 csio_dbg(hw, "event worker thread active evts#%d\n", 3598 hw->stats.n_evt_activeq); 3599 3600 spin_lock_irq(&hw->lock); 3601 while (!list_empty(&hw->evt_active_q)) { 3602 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3603 spin_unlock_irq(&hw->lock); 3604 3605 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3606 evt_msg = (struct csio_evt_msg *) evt_entry; 3607 3608 /* Drop events if queue is STOPPED */ 3609 spin_lock_irq(&hw->lock); 3610 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3611 evtq_stop = 1; 3612 spin_unlock_irq(&hw->lock); 3613 if (evtq_stop) { 3614 CSIO_INC_STATS(hw, n_evt_drop); 3615 goto free_evt; 3616 } 3617 3618 switch (evt_msg->type) { 3619 case CSIO_EVT_FW: 3620 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3621 3622 if ((msg->opcode == CPL_FW6_MSG || 3623 msg->opcode == CPL_FW4_MSG) && 3624 !msg->type) { 3625 rv = csio_mb_fwevt_handler(hw, 3626 msg->data); 3627 if (!rv) 3628 break; 3629 /* Handle any remaining fw events */ 3630 csio_fcoe_fwevt_handler(hw, 3631 msg->opcode, msg->data); 3632 } else if (msg->opcode == CPL_FW6_PLD) { 3633 3634 csio_fcoe_fwevt_handler(hw, 3635 msg->opcode, msg->data); 3636 } else { 3637 csio_warn(hw, 3638 "Unhandled FW msg op %x type %x\n", 3639 msg->opcode, msg->type); 3640 CSIO_INC_STATS(hw, n_evt_drop); 3641 } 3642 break; 3643 3644 case CSIO_EVT_MBX: 3645 csio_mberr_worker(hw); 3646 break; 3647 3648 case CSIO_EVT_DEV_LOSS: 3649 memcpy(&rn, evt_msg->data, sizeof(rn)); 3650 csio_rnode_devloss_handler(rn); 3651 break; 3652 3653 default: 3654 csio_warn(hw, "Unhandled event %x on evtq\n", 3655 evt_msg->type); 3656 CSIO_INC_STATS(hw, n_evt_unexp); 3657 break; 3658 } 3659 free_evt: 3660 csio_free_evt(hw, evt_msg); 3661 } 3662 3663 spin_lock_irq(&hw->lock); 3664 } 3665 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3666 spin_unlock_irq(&hw->lock); 3667 } 3668 3669 int 3670 csio_fwevtq_handler(struct csio_hw *hw) 3671 { 3672 int rv; 3673 3674 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 3675 CSIO_INC_STATS(hw, n_int_stray); 3676 return -EINVAL; 3677 } 3678 3679 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 3680 csio_process_fwevtq_entry, NULL); 3681 return rv; 3682 } 3683 3684 /**************************************************************************** 3685 * Entry points 3686 ****************************************************************************/ 3687 3688 /* Management module */ 3689 /* 3690 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 3691 * mgmt - mgmt module 3692 * @io_req - io request 3693 * 3694 * Return - 0:if given IO Req exists in active Q. 3695 * -EINVAL :if lookup fails. 3696 */ 3697 int 3698 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 3699 { 3700 struct list_head *tmp; 3701 3702 /* Lookup ioreq in the ACTIVEQ */ 3703 list_for_each(tmp, &mgmtm->active_q) { 3704 if (io_req == (struct csio_ioreq *)tmp) 3705 return 0; 3706 } 3707 return -EINVAL; 3708 } 3709 3710 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 3711 3712 /* 3713 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 3714 * @data - Event data. 3715 * 3716 * Return - none. 3717 */ 3718 static void 3719 csio_mgmt_tmo_handler(struct timer_list *t) 3720 { 3721 struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); 3722 struct list_head *tmp; 3723 struct csio_ioreq *io_req; 3724 3725 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 3726 3727 spin_lock_irq(&mgmtm->hw->lock); 3728 3729 list_for_each(tmp, &mgmtm->active_q) { 3730 io_req = (struct csio_ioreq *) tmp; 3731 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 3732 3733 if (!io_req->tmo) { 3734 /* Dequeue the request from retry Q. */ 3735 tmp = csio_list_prev(tmp); 3736 list_del_init(&io_req->sm.sm_list); 3737 if (io_req->io_cbfn) { 3738 /* io_req will be freed by completion handler */ 3739 io_req->wr_status = -ETIMEDOUT; 3740 io_req->io_cbfn(mgmtm->hw, io_req); 3741 } else { 3742 CSIO_DB_ASSERT(0); 3743 } 3744 } 3745 } 3746 3747 /* If retry queue is not empty, re-arm timer */ 3748 if (!list_empty(&mgmtm->active_q)) 3749 mod_timer(&mgmtm->mgmt_timer, 3750 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 3751 spin_unlock_irq(&mgmtm->hw->lock); 3752 } 3753 3754 static void 3755 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 3756 { 3757 struct csio_hw *hw = mgmtm->hw; 3758 struct csio_ioreq *io_req; 3759 struct list_head *tmp; 3760 uint32_t count; 3761 3762 count = 30; 3763 /* Wait for all outstanding req to complete gracefully */ 3764 while ((!list_empty(&mgmtm->active_q)) && count--) { 3765 spin_unlock_irq(&hw->lock); 3766 msleep(2000); 3767 spin_lock_irq(&hw->lock); 3768 } 3769 3770 /* release outstanding req from ACTIVEQ */ 3771 list_for_each(tmp, &mgmtm->active_q) { 3772 io_req = (struct csio_ioreq *) tmp; 3773 tmp = csio_list_prev(tmp); 3774 list_del_init(&io_req->sm.sm_list); 3775 mgmtm->stats.n_active--; 3776 if (io_req->io_cbfn) { 3777 /* io_req will be freed by completion handler */ 3778 io_req->wr_status = -ETIMEDOUT; 3779 io_req->io_cbfn(mgmtm->hw, io_req); 3780 } 3781 } 3782 } 3783 3784 /* 3785 * csio_mgmt_init - Mgmt module init entry point 3786 * @mgmtsm - mgmt module 3787 * @hw - HW module 3788 * 3789 * Initialize mgmt timer, resource wait queue, active queue, 3790 * completion q. Allocate Egress and Ingress 3791 * WR queues and save off the queue index returned by the WR 3792 * module for future use. Allocate and save off mgmt reqs in the 3793 * mgmt_req_freelist for future use. Make sure their SM is initialized 3794 * to uninit state. 3795 * Returns: 0 - on success 3796 * -ENOMEM - on error. 3797 */ 3798 static int 3799 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 3800 { 3801 timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); 3802 3803 INIT_LIST_HEAD(&mgmtm->active_q); 3804 INIT_LIST_HEAD(&mgmtm->cbfn_q); 3805 3806 mgmtm->hw = hw; 3807 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 3808 3809 return 0; 3810 } 3811 3812 /* 3813 * csio_mgmtm_exit - MGMT module exit entry point 3814 * @mgmtsm - mgmt module 3815 * 3816 * This function called during MGMT module uninit. 3817 * Stop timers, free ioreqs allocated. 3818 * Returns: None 3819 * 3820 */ 3821 static void 3822 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 3823 { 3824 del_timer_sync(&mgmtm->mgmt_timer); 3825 } 3826 3827 3828 /** 3829 * csio_hw_start - Kicks off the HW State machine 3830 * @hw: Pointer to HW module. 3831 * 3832 * It is assumed that the initialization is a synchronous operation. 3833 * So when we return afer posting the event, the HW SM should be in 3834 * the ready state, if there were no errors during init. 3835 */ 3836 int 3837 csio_hw_start(struct csio_hw *hw) 3838 { 3839 spin_lock_irq(&hw->lock); 3840 csio_post_event(&hw->sm, CSIO_HWE_CFG); 3841 spin_unlock_irq(&hw->lock); 3842 3843 if (csio_is_hw_ready(hw)) 3844 return 0; 3845 else if (csio_match_state(hw, csio_hws_uninit)) 3846 return -EINVAL; 3847 else 3848 return -ENODEV; 3849 } 3850 3851 int 3852 csio_hw_stop(struct csio_hw *hw) 3853 { 3854 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 3855 3856 if (csio_is_hw_removing(hw)) 3857 return 0; 3858 else 3859 return -EINVAL; 3860 } 3861 3862 /* Max reset retries */ 3863 #define CSIO_MAX_RESET_RETRIES 3 3864 3865 /** 3866 * csio_hw_reset - Reset the hardware 3867 * @hw: HW module. 3868 * 3869 * Caller should hold lock across this function. 3870 */ 3871 int 3872 csio_hw_reset(struct csio_hw *hw) 3873 { 3874 if (!csio_is_hw_master(hw)) 3875 return -EPERM; 3876 3877 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 3878 csio_dbg(hw, "Max hw reset attempts reached.."); 3879 return -EINVAL; 3880 } 3881 3882 hw->rst_retries++; 3883 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 3884 3885 if (csio_is_hw_ready(hw)) { 3886 hw->rst_retries = 0; 3887 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 3888 return 0; 3889 } else 3890 return -EINVAL; 3891 } 3892 3893 /* 3894 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 3895 * @hw: HW module. 3896 */ 3897 static void 3898 csio_hw_get_device_id(struct csio_hw *hw) 3899 { 3900 /* Is the adapter device id cached already ?*/ 3901 if (csio_is_dev_id_cached(hw)) 3902 return; 3903 3904 /* Get the PCI vendor & device id */ 3905 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 3906 &hw->params.pci.vendor_id); 3907 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 3908 &hw->params.pci.device_id); 3909 3910 csio_dev_id_cached(hw); 3911 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 3912 3913 } /* csio_hw_get_device_id */ 3914 3915 /* 3916 * csio_hw_set_description - Set the model, description of the hw. 3917 * @hw: HW module. 3918 * @ven_id: PCI Vendor ID 3919 * @dev_id: PCI Device ID 3920 */ 3921 static void 3922 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 3923 { 3924 uint32_t adap_type, prot_type; 3925 3926 if (ven_id == CSIO_VENDOR_ID) { 3927 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3928 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3929 3930 if (prot_type == CSIO_T5_FCOE_ASIC) { 3931 memcpy(hw->hw_ver, 3932 csio_t5_fcoe_adapters[adap_type].model_no, 16); 3933 memcpy(hw->model_desc, 3934 csio_t5_fcoe_adapters[adap_type].description, 3935 32); 3936 } else { 3937 char tempName[32] = "Chelsio FCoE Controller"; 3938 memcpy(hw->model_desc, tempName, 32); 3939 } 3940 } 3941 } /* csio_hw_set_description */ 3942 3943 /** 3944 * csio_hw_init - Initialize HW module. 3945 * @hw: Pointer to HW module. 3946 * 3947 * Initialize the members of the HW module. 3948 */ 3949 int 3950 csio_hw_init(struct csio_hw *hw) 3951 { 3952 int rv = -EINVAL; 3953 uint32_t i; 3954 uint16_t ven_id, dev_id; 3955 struct csio_evt_msg *evt_entry; 3956 3957 INIT_LIST_HEAD(&hw->sm.sm_list); 3958 csio_init_state(&hw->sm, csio_hws_uninit); 3959 spin_lock_init(&hw->lock); 3960 INIT_LIST_HEAD(&hw->sln_head); 3961 3962 /* Get the PCI vendor & device id */ 3963 csio_hw_get_device_id(hw); 3964 3965 strcpy(hw->name, CSIO_HW_NAME); 3966 3967 /* Initialize the HW chip ops T5 specific ops */ 3968 hw->chip_ops = &t5_ops; 3969 3970 /* Set the model & its description */ 3971 3972 ven_id = hw->params.pci.vendor_id; 3973 dev_id = hw->params.pci.device_id; 3974 3975 csio_hw_set_description(hw, ven_id, dev_id); 3976 3977 /* Initialize default log level */ 3978 hw->params.log_level = (uint32_t) csio_dbg_level; 3979 3980 csio_set_fwevt_intr_idx(hw, -1); 3981 csio_set_nondata_intr_idx(hw, -1); 3982 3983 /* Init all the modules: Mailbox, WorkRequest and Transport */ 3984 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 3985 goto err; 3986 3987 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 3988 if (rv) 3989 goto err_mbm_exit; 3990 3991 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 3992 if (rv) 3993 goto err_wrm_exit; 3994 3995 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 3996 if (rv) 3997 goto err_scsim_exit; 3998 /* Pre-allocate evtq and initialize them */ 3999 INIT_LIST_HEAD(&hw->evt_active_q); 4000 INIT_LIST_HEAD(&hw->evt_free_q); 4001 for (i = 0; i < csio_evtq_sz; i++) { 4002 4003 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 4004 if (!evt_entry) { 4005 rv = -ENOMEM; 4006 csio_err(hw, "Failed to initialize eventq"); 4007 goto err_evtq_cleanup; 4008 } 4009 4010 list_add_tail(&evt_entry->list, &hw->evt_free_q); 4011 CSIO_INC_STATS(hw, n_evt_freeq); 4012 } 4013 4014 hw->dev_num = dev_num; 4015 dev_num++; 4016 4017 return 0; 4018 4019 err_evtq_cleanup: 4020 csio_evtq_cleanup(hw); 4021 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4022 err_scsim_exit: 4023 csio_scsim_exit(csio_hw_to_scsim(hw)); 4024 err_wrm_exit: 4025 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4026 err_mbm_exit: 4027 csio_mbm_exit(csio_hw_to_mbm(hw)); 4028 err: 4029 return rv; 4030 } 4031 4032 /** 4033 * csio_hw_exit - Un-initialize HW module. 4034 * @hw: Pointer to HW module. 4035 * 4036 */ 4037 void 4038 csio_hw_exit(struct csio_hw *hw) 4039 { 4040 csio_evtq_cleanup(hw); 4041 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4042 csio_scsim_exit(csio_hw_to_scsim(hw)); 4043 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4044 csio_mbm_exit(csio_hw_to_mbm(hw)); 4045 } 4046