1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_dbg_level = 0xFEFF; 51 unsigned int csio_port_mask = 0xf; 52 53 /* Default FW event queue entries. */ 54 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 55 56 /* Default MSI param level */ 57 int csio_msi = 2; 58 59 /* FCoE function instances */ 60 static int dev_num; 61 62 /* FCoE Adapter types & its description */ 63 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, 67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, 84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, 85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} 86 }; 87 88 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 89 static void csio_hw_mbm_cleanup(struct csio_hw *); 90 91 /* State machine forward declarations */ 92 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 93 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 94 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 95 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 96 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 97 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 98 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 99 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 100 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 101 102 static void csio_hw_initialize(struct csio_hw *hw); 103 static void csio_evtq_stop(struct csio_hw *hw); 104 static void csio_evtq_start(struct csio_hw *hw); 105 106 int csio_is_hw_ready(struct csio_hw *hw) 107 { 108 return csio_match_state(hw, csio_hws_ready); 109 } 110 111 int csio_is_hw_removing(struct csio_hw *hw) 112 { 113 return csio_match_state(hw, csio_hws_removing); 114 } 115 116 117 /* 118 * csio_hw_wait_op_done_val - wait until an operation is completed 119 * @hw: the HW module 120 * @reg: the register to check for completion 121 * @mask: a single-bit field within @reg that indicates completion 122 * @polarity: the value of the field when the operation is completed 123 * @attempts: number of check iterations 124 * @delay: delay in usecs between iterations 125 * @valp: where to store the value of the register at completion time 126 * 127 * Wait until an operation is completed by checking a bit in a register 128 * up to @attempts times. If @valp is not NULL the value of the register 129 * at the time it indicated completion is stored there. Returns 0 if the 130 * operation completes and -EAGAIN otherwise. 131 */ 132 int 133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 134 int polarity, int attempts, int delay, uint32_t *valp) 135 { 136 uint32_t val; 137 while (1) { 138 val = csio_rd_reg32(hw, reg); 139 140 if (!!(val & mask) == polarity) { 141 if (valp) 142 *valp = val; 143 return 0; 144 } 145 146 if (--attempts == 0) 147 return -EAGAIN; 148 if (delay) 149 udelay(delay); 150 } 151 } 152 153 /* 154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 155 * @hw: the adapter 156 * @addr: the indirect TP register address 157 * @mask: specifies the field within the register to modify 158 * @val: new value for the field 159 * 160 * Sets a field of an indirect TP register to the given value. 161 */ 162 void 163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 164 unsigned int mask, unsigned int val) 165 { 166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); 167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; 168 csio_wr_reg32(hw, val, TP_PIO_DATA_A); 169 } 170 171 void 172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 173 uint32_t value) 174 { 175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 176 177 csio_wr_reg32(hw, val | value, reg); 178 /* Flush */ 179 csio_rd_reg32(hw, reg); 180 181 } 182 183 static int 184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 185 { 186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 187 addr, len, buf, 0); 188 } 189 190 /* 191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 192 */ 193 #define EEPROM_MAX_RD_POLL 40 194 #define EEPROM_MAX_WR_POLL 6 195 #define EEPROM_STAT_ADDR 0x7bfc 196 #define VPD_BASE 0x400 197 #define VPD_BASE_OLD 0 198 #define VPD_LEN 1024 199 #define VPD_INFO_FLD_HDR_SIZE 3 200 201 /* 202 * csio_hw_seeprom_read - read a serial EEPROM location 203 * @hw: hw to read 204 * @addr: EEPROM virtual address 205 * @data: where to store the read data 206 * 207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 208 * VPD capability. Note that this function must be called with a virtual 209 * address. 210 */ 211 static int 212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 213 { 214 uint16_t val = 0; 215 int attempts = EEPROM_MAX_RD_POLL; 216 uint32_t base = hw->params.pci.vpd_cap_addr; 217 218 if (addr >= EEPROMVSIZE || (addr & 3)) 219 return -EINVAL; 220 221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 222 223 do { 224 udelay(10); 225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 226 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 227 228 if (!(val & PCI_VPD_ADDR_F)) { 229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 230 return -EINVAL; 231 } 232 233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 234 *data = le32_to_cpu(*(__le32 *)data); 235 236 return 0; 237 } 238 239 /* 240 * Partial EEPROM Vital Product Data structure. Includes only the ID and 241 * VPD-R sections. 242 */ 243 struct t4_vpd_hdr { 244 u8 id_tag; 245 u8 id_len[2]; 246 u8 id_data[ID_LEN]; 247 u8 vpdr_tag; 248 u8 vpdr_len[2]; 249 }; 250 251 /* 252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 253 * the VPD 254 * @v: Pointer to buffered vpd data structure 255 * @kw: The keyword to search for 256 * 257 * Returns the value of the information field keyword or 258 * -EINVAL otherwise. 259 */ 260 static int 261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 262 { 263 int32_t i; 264 int32_t offset , len; 265 const uint8_t *buf = &v->id_tag; 266 const uint8_t *vpdr_len = &v->vpdr_tag; 267 offset = sizeof(struct t4_vpd_hdr); 268 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 269 270 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 271 return -EINVAL; 272 273 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 274 if (memcmp(buf + i , kw, 2) == 0) { 275 i += VPD_INFO_FLD_HDR_SIZE; 276 return i; 277 } 278 279 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 280 } 281 282 return -EINVAL; 283 } 284 285 static int 286 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 287 { 288 *pos = pci_find_capability(pdev, cap); 289 if (*pos) 290 return 0; 291 292 return -1; 293 } 294 295 /* 296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 297 * @hw: HW module 298 * @p: where to store the parameters 299 * 300 * Reads card parameters stored in VPD EEPROM. 301 */ 302 static int 303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 304 { 305 int i, ret, ec, sn, addr; 306 uint8_t *vpd, csum; 307 const struct t4_vpd_hdr *v; 308 /* To get around compilation warning from strstrip */ 309 char *s; 310 311 if (csio_is_valid_vpd(hw)) 312 return 0; 313 314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 315 &hw->params.pci.vpd_cap_addr); 316 if (ret) 317 return -EINVAL; 318 319 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 320 if (vpd == NULL) 321 return -ENOMEM; 322 323 /* 324 * Card information normally starts at VPD_BASE but early cards had 325 * it at 0. 326 */ 327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 328 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 329 330 for (i = 0; i < VPD_LEN; i += 4) { 331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 332 if (ret) { 333 kfree(vpd); 334 return ret; 335 } 336 } 337 338 /* Reset the VPD flag! */ 339 hw->flags &= (~CSIO_HWF_VPD_VALID); 340 341 v = (const struct t4_vpd_hdr *)vpd; 342 343 #define FIND_VPD_KW(var, name) do { \ 344 var = csio_hw_get_vpd_keyword_val(v, name); \ 345 if (var < 0) { \ 346 csio_err(hw, "missing VPD keyword " name "\n"); \ 347 kfree(vpd); \ 348 return -EINVAL; \ 349 } \ 350 } while (0) 351 352 FIND_VPD_KW(i, "RV"); 353 for (csum = 0; i >= 0; i--) 354 csum += vpd[i]; 355 356 if (csum) { 357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 358 kfree(vpd); 359 return -EINVAL; 360 } 361 FIND_VPD_KW(ec, "EC"); 362 FIND_VPD_KW(sn, "SN"); 363 #undef FIND_VPD_KW 364 365 memcpy(p->id, v->id_data, ID_LEN); 366 s = strstrip(p->id); 367 memcpy(p->ec, vpd + ec, EC_LEN); 368 s = strstrip(p->ec); 369 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 370 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 371 s = strstrip(p->sn); 372 373 csio_valid_vpd_copied(hw); 374 375 kfree(vpd); 376 return 0; 377 } 378 379 /* 380 * csio_hw_sf1_read - read data from the serial flash 381 * @hw: the HW module 382 * @byte_cnt: number of bytes to read 383 * @cont: whether another operation will be chained 384 * @lock: whether to lock SF for PL access only 385 * @valp: where to store the read data 386 * 387 * Reads up to 4 bytes of data from the serial flash. The location of 388 * the read needs to be specified prior to calling this by issuing the 389 * appropriate commands to the serial flash. 390 */ 391 static int 392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 393 int32_t lock, uint32_t *valp) 394 { 395 int ret; 396 397 if (!byte_cnt || byte_cnt > 4) 398 return -EINVAL; 399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 400 return -EBUSY; 401 402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | 403 BYTECNT_V(byte_cnt - 1), SF_OP_A); 404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 405 10, NULL); 406 if (!ret) 407 *valp = csio_rd_reg32(hw, SF_DATA_A); 408 return ret; 409 } 410 411 /* 412 * csio_hw_sf1_write - write data to the serial flash 413 * @hw: the HW module 414 * @byte_cnt: number of bytes to write 415 * @cont: whether another operation will be chained 416 * @lock: whether to lock SF for PL access only 417 * @val: value to write 418 * 419 * Writes up to 4 bytes of data to the serial flash. The location of 420 * the write needs to be specified prior to calling this by issuing the 421 * appropriate commands to the serial flash. 422 */ 423 static int 424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 425 int32_t lock, uint32_t val) 426 { 427 if (!byte_cnt || byte_cnt > 4) 428 return -EINVAL; 429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) 430 return -EBUSY; 431 432 csio_wr_reg32(hw, val, SF_DATA_A); 433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | 434 OP_V(1) | SF_LOCK_V(lock), SF_OP_A); 435 436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 437 10, NULL); 438 } 439 440 /* 441 * csio_hw_flash_wait_op - wait for a flash operation to complete 442 * @hw: the HW module 443 * @attempts: max number of polls of the status register 444 * @delay: delay between polls in ms 445 * 446 * Wait for a flash operation to complete by polling the status register. 447 */ 448 static int 449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 450 { 451 int ret; 452 uint32_t status; 453 454 while (1) { 455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 456 if (ret != 0) 457 return ret; 458 459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 460 if (ret != 0) 461 return ret; 462 463 if (!(status & 1)) 464 return 0; 465 if (--attempts == 0) 466 return -EAGAIN; 467 if (delay) 468 msleep(delay); 469 } 470 } 471 472 /* 473 * csio_hw_read_flash - read words from serial flash 474 * @hw: the HW module 475 * @addr: the start address for the read 476 * @nwords: how many 32-bit words to read 477 * @data: where to store the read data 478 * @byte_oriented: whether to store data as bytes or as words 479 * 480 * Read the specified number of 32-bit words from the serial flash. 481 * If @byte_oriented is set the read data is stored as a byte array 482 * (i.e., big-endian), otherwise as 32-bit words in the platform's 483 * natural endianess. 484 */ 485 static int 486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 487 uint32_t *data, int32_t byte_oriented) 488 { 489 int ret; 490 491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 492 return -EINVAL; 493 494 addr = swab32(addr) | SF_RD_DATA_FAST; 495 496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 497 if (ret != 0) 498 return ret; 499 500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 501 if (ret != 0) 502 return ret; 503 504 for ( ; nwords; nwords--, data++) { 505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 506 if (nwords == 1) 507 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 508 if (ret) 509 return ret; 510 if (byte_oriented) 511 *data = (__force __u32) htonl(*data); 512 } 513 return 0; 514 } 515 516 /* 517 * csio_hw_write_flash - write up to a page of data to the serial flash 518 * @hw: the hw 519 * @addr: the start address to write 520 * @n: length of data to write in bytes 521 * @data: the data to write 522 * 523 * Writes up to a page of data (256 bytes) to the serial flash starting 524 * at the given address. All the data must be written to the same page. 525 */ 526 static int 527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 528 uint32_t n, const uint8_t *data) 529 { 530 int ret = -EINVAL; 531 uint32_t buf[64]; 532 uint32_t i, c, left, val, offset = addr & 0xff; 533 534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 535 return -EINVAL; 536 537 val = swab32(addr) | SF_PROG_PAGE; 538 539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 540 if (ret != 0) 541 goto unlock; 542 543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 544 if (ret != 0) 545 goto unlock; 546 547 for (left = n; left; left -= c) { 548 c = min(left, 4U); 549 for (val = 0, i = 0; i < c; ++i) 550 val = (val << 8) + *data++; 551 552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 553 if (ret) 554 goto unlock; 555 } 556 ret = csio_hw_flash_wait_op(hw, 8, 1); 557 if (ret) 558 goto unlock; 559 560 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 561 562 /* Read the page to verify the write succeeded */ 563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 564 if (ret) 565 return ret; 566 567 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 568 csio_err(hw, 569 "failed to correctly write the flash page at %#x\n", 570 addr); 571 return -EINVAL; 572 } 573 574 return 0; 575 576 unlock: 577 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 578 return ret; 579 } 580 581 /* 582 * csio_hw_flash_erase_sectors - erase a range of flash sectors 583 * @hw: the HW module 584 * @start: the first sector to erase 585 * @end: the last sector to erase 586 * 587 * Erases the sectors in the given inclusive range. 588 */ 589 static int 590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 591 { 592 int ret = 0; 593 594 while (start <= end) { 595 596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 597 if (ret != 0) 598 goto out; 599 600 ret = csio_hw_sf1_write(hw, 4, 0, 1, 601 SF_ERASE_SECTOR | (start << 8)); 602 if (ret != 0) 603 goto out; 604 605 ret = csio_hw_flash_wait_op(hw, 14, 500); 606 if (ret != 0) 607 goto out; 608 609 start++; 610 } 611 out: 612 if (ret) 613 csio_err(hw, "erase of flash sector %d failed, error %d\n", 614 start, ret); 615 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 616 return 0; 617 } 618 619 static void 620 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 621 { 622 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev), 624 FW_HDR_FW_VER_MINOR_G(hw->fwrev), 625 FW_HDR_FW_VER_MICRO_G(hw->fwrev), 626 FW_HDR_FW_VER_BUILD_G(hw->fwrev)); 627 } 628 629 /* 630 * csio_hw_get_fw_version - read the firmware version 631 * @hw: HW module 632 * @vers: where to place the version 633 * 634 * Reads the FW version from flash. 635 */ 636 static int 637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 638 { 639 return csio_hw_read_flash(hw, FLASH_FW_START + 640 offsetof(struct fw_hdr, fw_ver), 1, 641 vers, 0); 642 } 643 644 /* 645 * csio_hw_get_tp_version - read the TP microcode version 646 * @hw: HW module 647 * @vers: where to place the version 648 * 649 * Reads the TP microcode version from flash. 650 */ 651 static int 652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 653 { 654 return csio_hw_read_flash(hw, FLASH_FW_START + 655 offsetof(struct fw_hdr, tp_microcode_ver), 1, 656 vers, 0); 657 } 658 659 /* 660 * csio_hw_fw_dload - download firmware. 661 * @hw: HW module 662 * @fw_data: firmware image to write. 663 * @size: image size 664 * 665 * Write the supplied firmware image to the card's serial flash. 666 */ 667 static int 668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 669 { 670 uint32_t csum; 671 int32_t addr; 672 int ret; 673 uint32_t i; 674 uint8_t first_page[SF_PAGE_SIZE]; 675 const __be32 *p = (const __be32 *)fw_data; 676 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 677 uint32_t sf_sec_size; 678 679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 680 csio_err(hw, "Serial Flash data invalid\n"); 681 return -EINVAL; 682 } 683 684 if (!size) { 685 csio_err(hw, "FW image has no data\n"); 686 return -EINVAL; 687 } 688 689 if (size & 511) { 690 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 691 return -EINVAL; 692 } 693 694 if (ntohs(hdr->len512) * 512 != size) { 695 csio_err(hw, "FW image size differs from size in FW header\n"); 696 return -EINVAL; 697 } 698 699 if (size > FLASH_FW_MAX_SIZE) { 700 csio_err(hw, "FW image too large, max is %u bytes\n", 701 FLASH_FW_MAX_SIZE); 702 return -EINVAL; 703 } 704 705 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 706 csum += ntohl(p[i]); 707 708 if (csum != 0xffffffff) { 709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 710 return -EINVAL; 711 } 712 713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 714 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 715 716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 717 FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); 718 719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, 720 FLASH_FW_START_SEC + i - 1); 721 if (ret) { 722 csio_err(hw, "Flash Erase failed\n"); 723 goto out; 724 } 725 726 /* 727 * We write the correct version at the end so the driver can see a bad 728 * version if the FW write fails. Start by writing a copy of the 729 * first page with a bad version. 730 */ 731 memcpy(first_page, fw_data, SF_PAGE_SIZE); 732 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); 734 if (ret) 735 goto out; 736 737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 738 FW_IMG_START, FW_IMG_START + size); 739 740 addr = FLASH_FW_START; 741 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 742 addr += SF_PAGE_SIZE; 743 fw_data += SF_PAGE_SIZE; 744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 745 if (ret) 746 goto out; 747 } 748 749 ret = csio_hw_write_flash(hw, 750 FLASH_FW_START + 751 offsetof(struct fw_hdr, fw_ver), 752 sizeof(hdr->fw_ver), 753 (const uint8_t *)&hdr->fw_ver); 754 755 out: 756 if (ret) 757 csio_err(hw, "firmware download failed, error %d\n", ret); 758 return ret; 759 } 760 761 static int 762 csio_hw_get_flash_params(struct csio_hw *hw) 763 { 764 /* Table for non-Numonix supported flash parts. Numonix parts are left 765 * to the preexisting code. All flash parts have 64KB sectors. 766 */ 767 static struct flash_desc { 768 u32 vendor_and_model_id; 769 u32 size_mb; 770 } supported_flash[] = { 771 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 772 }; 773 774 u32 part, manufacturer; 775 u32 density, size = 0; 776 u32 flashid = 0; 777 int ret; 778 779 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 780 if (!ret) 781 ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid); 782 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ 783 if (ret) 784 return ret; 785 786 /* Check to see if it's one of our non-standard supported Flash parts. 787 */ 788 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 789 if (supported_flash[part].vendor_and_model_id == flashid) { 790 hw->params.sf_size = supported_flash[part].size_mb; 791 hw->params.sf_nsec = 792 hw->params.sf_size / SF_SEC_SIZE; 793 goto found; 794 } 795 796 /* Decode Flash part size. The code below looks repetative with 797 * common encodings, but that's not guaranteed in the JEDEC 798 * specification for the Read JADEC ID command. The only thing that 799 * we're guaranteed by the JADEC specification is where the 800 * Manufacturer ID is in the returned result. After that each 801 * Manufacturer ~could~ encode things completely differently. 802 * Note, all Flash parts must have 64KB sectors. 803 */ 804 manufacturer = flashid & 0xff; 805 switch (manufacturer) { 806 case 0x20: { /* Micron/Numonix */ 807 /* This Density -> Size decoding table is taken from Micron 808 * Data Sheets. 809 */ 810 density = (flashid >> 16) & 0xff; 811 switch (density) { 812 case 0x14 ... 0x19: /* 1MB - 32MB */ 813 size = 1 << density; 814 break; 815 case 0x20: /* 64MB */ 816 size = 1 << 26; 817 break; 818 case 0x21: /* 128MB */ 819 size = 1 << 27; 820 break; 821 case 0x22: /* 256MB */ 822 size = 1 << 28; 823 } 824 break; 825 } 826 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ 827 /* This Density -> Size decoding table is taken from ISSI 828 * Data Sheets. 829 */ 830 density = (flashid >> 16) & 0xff; 831 switch (density) { 832 case 0x16: /* 32 MB */ 833 size = 1 << 25; 834 break; 835 case 0x17: /* 64MB */ 836 size = 1 << 26; 837 } 838 break; 839 } 840 case 0xc2: /* Macronix */ 841 case 0xef: /* Winbond */ { 842 /* This Density -> Size decoding table is taken from 843 * Macronix and Winbond Data Sheets. 844 */ 845 density = (flashid >> 16) & 0xff; 846 switch (density) { 847 case 0x17: /* 8MB */ 848 case 0x18: /* 16MB */ 849 size = 1 << density; 850 } 851 } 852 } 853 854 /* If we didn't recognize the FLASH part, that's no real issue: the 855 * Hardware/Software contract says that Hardware will _*ALWAYS*_ 856 * use a FLASH part which is at least 4MB in size and has 64KB 857 * sectors. The unrecognized FLASH part is likely to be much larger 858 * than 4MB, but that's all we really need. 859 */ 860 if (size == 0) { 861 csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n", 862 flashid); 863 size = 1 << 22; 864 } 865 866 /* Store decoded Flash size */ 867 hw->params.sf_size = size; 868 hw->params.sf_nsec = size / SF_SEC_SIZE; 869 870 found: 871 if (hw->params.sf_size < FLASH_MIN_SIZE) 872 csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 873 flashid, hw->params.sf_size, FLASH_MIN_SIZE); 874 return 0; 875 } 876 877 /*****************************************************************************/ 878 /* HW State machine assists */ 879 /*****************************************************************************/ 880 881 static int 882 csio_hw_dev_ready(struct csio_hw *hw) 883 { 884 uint32_t reg; 885 int cnt = 6; 886 int src_pf; 887 888 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && 889 (--cnt != 0)) 890 mdelay(100); 891 892 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 893 src_pf = SOURCEPF_G(reg); 894 else 895 src_pf = T6_SOURCEPF_G(reg); 896 897 if ((cnt == 0) && (((int32_t)(src_pf) < 0) || 898 (src_pf >= CSIO_MAX_PFN))) { 899 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 900 return -EIO; 901 } 902 903 hw->pfn = src_pf; 904 905 return 0; 906 } 907 908 /* 909 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 910 * @hw: HW module 911 * @state: Device state 912 * 913 * FW_HELLO_CMD has to be polled for completion. 914 */ 915 static int 916 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 917 { 918 struct csio_mb *mbp; 919 int rv = 0; 920 enum fw_retval retval; 921 uint8_t mpfn; 922 char state_str[16]; 923 int retries = FW_CMD_HELLO_RETRIES; 924 925 memset(state_str, 0, sizeof(state_str)); 926 927 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 928 if (!mbp) { 929 rv = -ENOMEM; 930 CSIO_INC_STATS(hw, n_err_nomem); 931 goto out; 932 } 933 934 retry: 935 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 936 hw->pfn, CSIO_MASTER_MAY, NULL); 937 938 rv = csio_mb_issue(hw, mbp); 939 if (rv) { 940 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 941 goto out_free_mb; 942 } 943 944 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 945 if (retval != FW_SUCCESS) { 946 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 947 rv = -EINVAL; 948 goto out_free_mb; 949 } 950 951 /* Firmware has designated us to be master */ 952 if (hw->pfn == mpfn) { 953 hw->flags |= CSIO_HWF_MASTER; 954 } else if (*state == CSIO_DEV_STATE_UNINIT) { 955 /* 956 * If we're not the Master PF then we need to wait around for 957 * the Master PF Driver to finish setting up the adapter. 958 * 959 * Note that we also do this wait if we're a non-Master-capable 960 * PF and there is no current Master PF; a Master PF may show up 961 * momentarily and we wouldn't want to fail pointlessly. (This 962 * can happen when an OS loads lots of different drivers rapidly 963 * at the same time). In this case, the Master PF returned by 964 * the firmware will be PCIE_FW_MASTER_MASK so the test below 965 * will work ... 966 */ 967 968 int waiting = FW_CMD_HELLO_TIMEOUT; 969 970 /* 971 * Wait for the firmware to either indicate an error or 972 * initialized state. If we see either of these we bail out 973 * and report the issue to the caller. If we exhaust the 974 * "hello timeout" and we haven't exhausted our retries, try 975 * again. Otherwise bail with a timeout error. 976 */ 977 for (;;) { 978 uint32_t pcie_fw; 979 980 spin_unlock_irq(&hw->lock); 981 msleep(50); 982 spin_lock_irq(&hw->lock); 983 waiting -= 50; 984 985 /* 986 * If neither Error nor Initialialized are indicated 987 * by the firmware keep waiting till we exaust our 988 * timeout ... and then retry if we haven't exhausted 989 * our retries ... 990 */ 991 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); 992 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { 993 if (waiting <= 0) { 994 if (retries-- > 0) 995 goto retry; 996 997 rv = -ETIMEDOUT; 998 break; 999 } 1000 continue; 1001 } 1002 1003 /* 1004 * We either have an Error or Initialized condition 1005 * report errors preferentially. 1006 */ 1007 if (state) { 1008 if (pcie_fw & PCIE_FW_ERR_F) { 1009 *state = CSIO_DEV_STATE_ERR; 1010 rv = -ETIMEDOUT; 1011 } else if (pcie_fw & PCIE_FW_INIT_F) 1012 *state = CSIO_DEV_STATE_INIT; 1013 } 1014 1015 /* 1016 * If we arrived before a Master PF was selected and 1017 * there's not a valid Master PF, grab its identity 1018 * for our caller. 1019 */ 1020 if (mpfn == PCIE_FW_MASTER_M && 1021 (pcie_fw & PCIE_FW_MASTER_VLD_F)) 1022 mpfn = PCIE_FW_MASTER_G(pcie_fw); 1023 break; 1024 } 1025 hw->flags &= ~CSIO_HWF_MASTER; 1026 } 1027 1028 switch (*state) { 1029 case CSIO_DEV_STATE_UNINIT: 1030 strcpy(state_str, "Initializing"); 1031 break; 1032 case CSIO_DEV_STATE_INIT: 1033 strcpy(state_str, "Initialized"); 1034 break; 1035 case CSIO_DEV_STATE_ERR: 1036 strcpy(state_str, "Error"); 1037 break; 1038 default: 1039 strcpy(state_str, "Unknown"); 1040 break; 1041 } 1042 1043 if (hw->pfn == mpfn) 1044 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 1045 hw->pfn, state_str); 1046 else 1047 csio_info(hw, 1048 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 1049 hw->pfn, mpfn, state_str); 1050 1051 out_free_mb: 1052 mempool_free(mbp, hw->mb_mempool); 1053 out: 1054 return rv; 1055 } 1056 1057 /* 1058 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 1059 * @hw: HW module 1060 * 1061 */ 1062 static int 1063 csio_do_bye(struct csio_hw *hw) 1064 { 1065 struct csio_mb *mbp; 1066 enum fw_retval retval; 1067 1068 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1069 if (!mbp) { 1070 CSIO_INC_STATS(hw, n_err_nomem); 1071 return -ENOMEM; 1072 } 1073 1074 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1075 1076 if (csio_mb_issue(hw, mbp)) { 1077 csio_err(hw, "Issue of BYE command failed\n"); 1078 mempool_free(mbp, hw->mb_mempool); 1079 return -EINVAL; 1080 } 1081 1082 retval = csio_mb_fw_retval(mbp); 1083 if (retval != FW_SUCCESS) { 1084 mempool_free(mbp, hw->mb_mempool); 1085 return -EINVAL; 1086 } 1087 1088 mempool_free(mbp, hw->mb_mempool); 1089 1090 return 0; 1091 } 1092 1093 /* 1094 * csio_do_reset- Perform the device reset. 1095 * @hw: HW module 1096 * @fw_rst: FW reset 1097 * 1098 * If fw_rst is set, issues FW reset mbox cmd otherwise 1099 * does PIO reset. 1100 * Performs reset of the function. 1101 */ 1102 static int 1103 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1104 { 1105 struct csio_mb *mbp; 1106 enum fw_retval retval; 1107 1108 if (!fw_rst) { 1109 /* PIO reset */ 1110 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1111 mdelay(2000); 1112 return 0; 1113 } 1114 1115 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1116 if (!mbp) { 1117 CSIO_INC_STATS(hw, n_err_nomem); 1118 return -ENOMEM; 1119 } 1120 1121 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1122 PIORSTMODE_F | PIORST_F, 0, NULL); 1123 1124 if (csio_mb_issue(hw, mbp)) { 1125 csio_err(hw, "Issue of RESET command failed.n"); 1126 mempool_free(mbp, hw->mb_mempool); 1127 return -EINVAL; 1128 } 1129 1130 retval = csio_mb_fw_retval(mbp); 1131 if (retval != FW_SUCCESS) { 1132 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1133 mempool_free(mbp, hw->mb_mempool); 1134 return -EINVAL; 1135 } 1136 1137 mempool_free(mbp, hw->mb_mempool); 1138 1139 return 0; 1140 } 1141 1142 static int 1143 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1144 { 1145 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1146 uint16_t caps; 1147 1148 caps = ntohs(rsp->fcoecaps); 1149 1150 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1151 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1152 return -EINVAL; 1153 } 1154 1155 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1156 csio_err(hw, "No FCoE Control Offload capability\n"); 1157 return -EINVAL; 1158 } 1159 1160 return 0; 1161 } 1162 1163 /* 1164 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1165 * @hw: the HW module 1166 * @mbox: mailbox to use for the FW RESET command (if desired) 1167 * @force: force uP into RESET even if FW RESET command fails 1168 * 1169 * Issues a RESET command to firmware (if desired) with a HALT indication 1170 * and then puts the microprocessor into RESET state. The RESET command 1171 * will only be issued if a legitimate mailbox is provided (mbox <= 1172 * PCIE_FW_MASTER_MASK). 1173 * 1174 * This is generally used in order for the host to safely manipulate the 1175 * adapter without fear of conflicting with whatever the firmware might 1176 * be doing. The only way out of this state is to RESTART the firmware 1177 * ... 1178 */ 1179 static int 1180 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1181 { 1182 enum fw_retval retval = 0; 1183 1184 /* 1185 * If a legitimate mailbox is provided, issue a RESET command 1186 * with a HALT indication. 1187 */ 1188 if (mbox <= PCIE_FW_MASTER_M) { 1189 struct csio_mb *mbp; 1190 1191 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1192 if (!mbp) { 1193 CSIO_INC_STATS(hw, n_err_nomem); 1194 return -ENOMEM; 1195 } 1196 1197 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1198 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, 1199 NULL); 1200 1201 if (csio_mb_issue(hw, mbp)) { 1202 csio_err(hw, "Issue of RESET command failed!\n"); 1203 mempool_free(mbp, hw->mb_mempool); 1204 return -EINVAL; 1205 } 1206 1207 retval = csio_mb_fw_retval(mbp); 1208 mempool_free(mbp, hw->mb_mempool); 1209 } 1210 1211 /* 1212 * Normally we won't complete the operation if the firmware RESET 1213 * command fails but if our caller insists we'll go ahead and put the 1214 * uP into RESET. This can be useful if the firmware is hung or even 1215 * missing ... We'll have to take the risk of putting the uP into 1216 * RESET without the cooperation of firmware in that case. 1217 * 1218 * We also force the firmware's HALT flag to be on in case we bypassed 1219 * the firmware RESET command above or we're dealing with old firmware 1220 * which doesn't have the HALT capability. This will serve as a flag 1221 * for the incoming firmware to know that it's coming out of a HALT 1222 * rather than a RESET ... if it's new enough to understand that ... 1223 */ 1224 if (retval == 0 || force) { 1225 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); 1226 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 1227 PCIE_FW_HALT_F); 1228 } 1229 1230 /* 1231 * And we always return the result of the firmware RESET command 1232 * even when we force the uP into RESET ... 1233 */ 1234 return retval ? -EINVAL : 0; 1235 } 1236 1237 /* 1238 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1239 * @hw: the HW module 1240 * @reset: if we want to do a RESET to restart things 1241 * 1242 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1243 * return the previous PF Master remains as the new PF Master and there 1244 * is no need to issue a new HELLO command, etc. 1245 * 1246 * We do this in two ways: 1247 * 1248 * 1. If we're dealing with newer firmware we'll simply want to take 1249 * the chip's microprocessor out of RESET. This will cause the 1250 * firmware to start up from its start vector. And then we'll loop 1251 * until the firmware indicates it's started again (PCIE_FW.HALT 1252 * reset to 0) or we timeout. 1253 * 1254 * 2. If we're dealing with older firmware then we'll need to RESET 1255 * the chip since older firmware won't recognize the PCIE_FW.HALT 1256 * flag and automatically RESET itself on startup. 1257 */ 1258 static int 1259 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1260 { 1261 if (reset) { 1262 /* 1263 * Since we're directing the RESET instead of the firmware 1264 * doing it automatically, we need to clear the PCIE_FW.HALT 1265 * bit. 1266 */ 1267 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); 1268 1269 /* 1270 * If we've been given a valid mailbox, first try to get the 1271 * firmware to do the RESET. If that works, great and we can 1272 * return success. Otherwise, if we haven't been given a 1273 * valid mailbox or the RESET command failed, fall back to 1274 * hitting the chip with a hammer. 1275 */ 1276 if (mbox <= PCIE_FW_MASTER_M) { 1277 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1278 msleep(100); 1279 if (csio_do_reset(hw, true) == 0) 1280 return 0; 1281 } 1282 1283 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 1284 msleep(2000); 1285 } else { 1286 int ms; 1287 1288 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); 1289 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1290 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) 1291 return 0; 1292 msleep(100); 1293 ms += 100; 1294 } 1295 return -ETIMEDOUT; 1296 } 1297 return 0; 1298 } 1299 1300 /* 1301 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1302 * @hw: the HW module 1303 * @mbox: mailbox to use for the FW RESET command (if desired) 1304 * @fw_data: the firmware image to write 1305 * @size: image size 1306 * @force: force upgrade even if firmware doesn't cooperate 1307 * 1308 * Perform all of the steps necessary for upgrading an adapter's 1309 * firmware image. Normally this requires the cooperation of the 1310 * existing firmware in order to halt all existing activities 1311 * but if an invalid mailbox token is passed in we skip that step 1312 * (though we'll still put the adapter microprocessor into RESET in 1313 * that case). 1314 * 1315 * On successful return the new firmware will have been loaded and 1316 * the adapter will have been fully RESET losing all previous setup 1317 * state. On unsuccessful return the adapter may be completely hosed ... 1318 * positive errno indicates that the adapter is ~probably~ intact, a 1319 * negative errno indicates that things are looking bad ... 1320 */ 1321 static int 1322 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1323 const u8 *fw_data, uint32_t size, int32_t force) 1324 { 1325 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1326 int reset, ret; 1327 1328 ret = csio_hw_fw_halt(hw, mbox, force); 1329 if (ret != 0 && !force) 1330 return ret; 1331 1332 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1333 if (ret != 0) 1334 return ret; 1335 1336 /* 1337 * Older versions of the firmware don't understand the new 1338 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1339 * restart. So for newly loaded older firmware we'll have to do the 1340 * RESET for it so it starts up on a clean slate. We can tell if 1341 * the newly loaded firmware will handle this right by checking 1342 * its header flags to see if it advertises the capability. 1343 */ 1344 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1345 return csio_hw_fw_restart(hw, mbox, reset); 1346 } 1347 1348 /* 1349 * csio_get_device_params - Get device parameters. 1350 * @hw: HW module 1351 * 1352 */ 1353 static int 1354 csio_get_device_params(struct csio_hw *hw) 1355 { 1356 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1357 struct csio_mb *mbp; 1358 enum fw_retval retval; 1359 u32 param[6]; 1360 int i, j = 0; 1361 1362 /* Initialize portids to -1 */ 1363 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1364 hw->pport[i].portid = -1; 1365 1366 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1367 if (!mbp) { 1368 CSIO_INC_STATS(hw, n_err_nomem); 1369 return -ENOMEM; 1370 } 1371 1372 /* Get port vec information. */ 1373 param[0] = FW_PARAM_DEV(PORTVEC); 1374 1375 /* Get Core clock. */ 1376 param[1] = FW_PARAM_DEV(CCLK); 1377 1378 /* Get EQ id start and end. */ 1379 param[2] = FW_PARAM_PFVF(EQ_START); 1380 param[3] = FW_PARAM_PFVF(EQ_END); 1381 1382 /* Get IQ id start and end. */ 1383 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1384 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1385 1386 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1387 ARRAY_SIZE(param), param, NULL, false, NULL); 1388 if (csio_mb_issue(hw, mbp)) { 1389 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1390 mempool_free(mbp, hw->mb_mempool); 1391 return -EINVAL; 1392 } 1393 1394 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1395 ARRAY_SIZE(param), param); 1396 if (retval != FW_SUCCESS) { 1397 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1398 retval); 1399 mempool_free(mbp, hw->mb_mempool); 1400 return -EINVAL; 1401 } 1402 1403 /* cache the information. */ 1404 hw->port_vec = param[0]; 1405 hw->vpd.cclk = param[1]; 1406 wrm->fw_eq_start = param[2]; 1407 wrm->fw_iq_start = param[4]; 1408 1409 /* Using FW configured max iqs & eqs */ 1410 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1411 !csio_is_hw_master(hw)) { 1412 hw->cfg_niq = param[5] - param[4] + 1; 1413 hw->cfg_neq = param[3] - param[2] + 1; 1414 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1415 hw->cfg_niq, hw->cfg_neq); 1416 } 1417 1418 hw->port_vec &= csio_port_mask; 1419 1420 hw->num_pports = hweight32(hw->port_vec); 1421 1422 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1423 hw->port_vec, hw->num_pports); 1424 1425 for (i = 0; i < hw->num_pports; i++) { 1426 while ((hw->port_vec & (1 << j)) == 0) 1427 j++; 1428 hw->pport[i].portid = j++; 1429 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1430 } 1431 mempool_free(mbp, hw->mb_mempool); 1432 1433 return 0; 1434 } 1435 1436 1437 /* 1438 * csio_config_device_caps - Get and set device capabilities. 1439 * @hw: HW module 1440 * 1441 */ 1442 static int 1443 csio_config_device_caps(struct csio_hw *hw) 1444 { 1445 struct csio_mb *mbp; 1446 enum fw_retval retval; 1447 int rv = -EINVAL; 1448 1449 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1450 if (!mbp) { 1451 CSIO_INC_STATS(hw, n_err_nomem); 1452 return -ENOMEM; 1453 } 1454 1455 /* Get device capabilities */ 1456 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1457 1458 if (csio_mb_issue(hw, mbp)) { 1459 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1460 goto out; 1461 } 1462 1463 retval = csio_mb_fw_retval(mbp); 1464 if (retval != FW_SUCCESS) { 1465 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1466 goto out; 1467 } 1468 1469 /* Validate device capabilities */ 1470 rv = csio_hw_validate_caps(hw, mbp); 1471 if (rv != 0) 1472 goto out; 1473 1474 /* Don't config device capabilities if already configured */ 1475 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1476 rv = 0; 1477 goto out; 1478 } 1479 1480 /* Write back desired device capabilities */ 1481 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1482 false, true, NULL); 1483 1484 if (csio_mb_issue(hw, mbp)) { 1485 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1486 goto out; 1487 } 1488 1489 retval = csio_mb_fw_retval(mbp); 1490 if (retval != FW_SUCCESS) { 1491 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1492 goto out; 1493 } 1494 1495 rv = 0; 1496 out: 1497 mempool_free(mbp, hw->mb_mempool); 1498 return rv; 1499 } 1500 1501 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) 1502 { 1503 enum cc_fec cc_fec = 0; 1504 1505 if (fw_fec & FW_PORT_CAP32_FEC_RS) 1506 cc_fec |= FEC_RS; 1507 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) 1508 cc_fec |= FEC_BASER_RS; 1509 1510 return cc_fec; 1511 } 1512 1513 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) 1514 { 1515 fw_port_cap32_t fw_pause = 0; 1516 1517 if (cc_pause & PAUSE_RX) 1518 fw_pause |= FW_PORT_CAP32_FC_RX; 1519 if (cc_pause & PAUSE_TX) 1520 fw_pause |= FW_PORT_CAP32_FC_TX; 1521 1522 return fw_pause; 1523 } 1524 1525 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) 1526 { 1527 fw_port_cap32_t fw_fec = 0; 1528 1529 if (cc_fec & FEC_RS) 1530 fw_fec |= FW_PORT_CAP32_FEC_RS; 1531 if (cc_fec & FEC_BASER_RS) 1532 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; 1533 1534 return fw_fec; 1535 } 1536 1537 /** 1538 * fwcap_to_fwspeed - return highest speed in Port Capabilities 1539 * @acaps: advertised Port Capabilities 1540 * 1541 * Get the highest speed for the port from the advertised Port 1542 * Capabilities. 1543 */ 1544 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) 1545 { 1546 #define TEST_SPEED_RETURN(__caps_speed) \ 1547 do { \ 1548 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 1549 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 1550 } while (0) 1551 1552 TEST_SPEED_RETURN(400G); 1553 TEST_SPEED_RETURN(200G); 1554 TEST_SPEED_RETURN(100G); 1555 TEST_SPEED_RETURN(50G); 1556 TEST_SPEED_RETURN(40G); 1557 TEST_SPEED_RETURN(25G); 1558 TEST_SPEED_RETURN(10G); 1559 TEST_SPEED_RETURN(1G); 1560 TEST_SPEED_RETURN(100M); 1561 1562 #undef TEST_SPEED_RETURN 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 1569 * @caps16: a 16-bit Port Capabilities value 1570 * 1571 * Returns the equivalent 32-bit Port Capabilities value. 1572 */ 1573 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) 1574 { 1575 fw_port_cap32_t caps32 = 0; 1576 1577 #define CAP16_TO_CAP32(__cap) \ 1578 do { \ 1579 if (caps16 & FW_PORT_CAP_##__cap) \ 1580 caps32 |= FW_PORT_CAP32_##__cap; \ 1581 } while (0) 1582 1583 CAP16_TO_CAP32(SPEED_100M); 1584 CAP16_TO_CAP32(SPEED_1G); 1585 CAP16_TO_CAP32(SPEED_25G); 1586 CAP16_TO_CAP32(SPEED_10G); 1587 CAP16_TO_CAP32(SPEED_40G); 1588 CAP16_TO_CAP32(SPEED_100G); 1589 CAP16_TO_CAP32(FC_RX); 1590 CAP16_TO_CAP32(FC_TX); 1591 CAP16_TO_CAP32(ANEG); 1592 CAP16_TO_CAP32(MDIAUTO); 1593 CAP16_TO_CAP32(MDISTRAIGHT); 1594 CAP16_TO_CAP32(FEC_RS); 1595 CAP16_TO_CAP32(FEC_BASER_RS); 1596 CAP16_TO_CAP32(802_3_PAUSE); 1597 CAP16_TO_CAP32(802_3_ASM_DIR); 1598 1599 #undef CAP16_TO_CAP32 1600 1601 return caps32; 1602 } 1603 1604 /** 1605 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 1606 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 1607 * 1608 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new 1609 * 32-bit Port Capabilities value. 1610 */ 1611 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) 1612 { 1613 fw_port_cap32_t linkattr = 0; 1614 1615 /* The format of the Link Status in the old 1616 * 16-bit Port Information message isn't the same as the 1617 * 16-bit Port Capabilities bitfield used everywhere else. 1618 */ 1619 if (lstatus & FW_PORT_CMD_RXPAUSE_F) 1620 linkattr |= FW_PORT_CAP32_FC_RX; 1621 if (lstatus & FW_PORT_CMD_TXPAUSE_F) 1622 linkattr |= FW_PORT_CAP32_FC_TX; 1623 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1624 linkattr |= FW_PORT_CAP32_SPEED_100M; 1625 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1626 linkattr |= FW_PORT_CAP32_SPEED_1G; 1627 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1628 linkattr |= FW_PORT_CAP32_SPEED_10G; 1629 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) 1630 linkattr |= FW_PORT_CAP32_SPEED_25G; 1631 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1632 linkattr |= FW_PORT_CAP32_SPEED_40G; 1633 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) 1634 linkattr |= FW_PORT_CAP32_SPEED_100G; 1635 1636 return linkattr; 1637 } 1638 1639 /** 1640 * csio_init_link_config - initialize a link's SW state 1641 * @lc: pointer to structure holding the link state 1642 * @pcaps: link Port Capabilities 1643 * @acaps: link current Advertised Port Capabilities 1644 * 1645 * Initializes the SW state maintained for each link, including the link's 1646 * capabilities and default speed/flow-control/autonegotiation settings. 1647 */ 1648 static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, 1649 fw_port_cap32_t acaps) 1650 { 1651 lc->pcaps = pcaps; 1652 lc->def_acaps = acaps; 1653 lc->lpacaps = 0; 1654 lc->speed_caps = 0; 1655 lc->speed = 0; 1656 lc->requested_fc = PAUSE_RX | PAUSE_TX; 1657 lc->fc = lc->requested_fc; 1658 1659 /* 1660 * For Forward Error Control, we default to whatever the Firmware 1661 * tells us the Link is currently advertising. 1662 */ 1663 lc->requested_fec = FEC_AUTO; 1664 lc->fec = fwcap_to_cc_fec(lc->def_acaps); 1665 1666 /* If the Port is capable of Auto-Negtotiation, initialize it as 1667 * "enabled" and copy over all of the Physical Port Capabilities 1668 * to the Advertised Port Capabilities. Otherwise mark it as 1669 * Auto-Negotiate disabled and select the highest supported speed 1670 * for the link. Note parallel structure in t4_link_l1cfg_core() 1671 * and t4_handle_get_port_info(). 1672 */ 1673 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 1674 lc->acaps = lc->pcaps & ADVERT_MASK; 1675 lc->autoneg = AUTONEG_ENABLE; 1676 lc->requested_fc |= PAUSE_AUTONEG; 1677 } else { 1678 lc->acaps = 0; 1679 lc->autoneg = AUTONEG_DISABLE; 1680 } 1681 } 1682 1683 static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps, 1684 uint32_t *rcaps) 1685 { 1686 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); 1687 fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap; 1688 1689 lc->link_ok = 0; 1690 1691 /* 1692 * Convert driver coding of Pause Frame Flow Control settings into the 1693 * Firmware's API. 1694 */ 1695 fw_fc = cc_to_fwcap_pause(lc->requested_fc); 1696 1697 /* 1698 * Convert Common Code Forward Error Control settings into the 1699 * Firmware's API. If the current Requested FEC has "Automatic" 1700 * (IEEE 802.3) specified, then we use whatever the Firmware 1701 * sent us as part of it's IEEE 802.3-based interpratation of 1702 * the Transceiver Module EPROM FEC parameters. Otherwise we 1703 * use whatever is in the current Requested FEC settings. 1704 */ 1705 if (lc->requested_fec & FEC_AUTO) 1706 cc_fec = fwcap_to_cc_fec(lc->def_acaps); 1707 else 1708 cc_fec = lc->requested_fec; 1709 fw_fec = cc_to_fwcap_fec(cc_fec); 1710 1711 /* Figure out what our Requested Port Capabilities are going to be. 1712 * Note parallel structure in t4_handle_get_port_info() and 1713 * init_link_config(). 1714 */ 1715 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { 1716 lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; 1717 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; 1718 lc->fec = cc_fec; 1719 } else if (lc->autoneg == AUTONEG_DISABLE) { 1720 lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; 1721 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; 1722 lc->fec = cc_fec; 1723 } else { 1724 lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi; 1725 } 1726 1727 *rcaps = lrcap; 1728 } 1729 1730 /* 1731 * csio_enable_ports - Bring up all available ports. 1732 * @hw: HW module. 1733 * 1734 */ 1735 static int 1736 csio_enable_ports(struct csio_hw *hw) 1737 { 1738 struct csio_mb *mbp; 1739 u16 fw_caps = FW_CAPS_UNKNOWN; 1740 enum fw_retval retval; 1741 uint8_t portid; 1742 fw_port_cap32_t pcaps, acaps, rcaps; 1743 int i; 1744 1745 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1746 if (!mbp) { 1747 CSIO_INC_STATS(hw, n_err_nomem); 1748 return -ENOMEM; 1749 } 1750 1751 for (i = 0; i < hw->num_pports; i++) { 1752 portid = hw->pport[i].portid; 1753 1754 if (fw_caps == FW_CAPS_UNKNOWN) { 1755 u32 param, val; 1756 1757 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | 1758 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); 1759 val = 1; 1760 1761 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, 1762 hw->pfn, 0, 1, ¶m, &val, false, 1763 NULL); 1764 1765 if (csio_mb_issue(hw, mbp)) { 1766 csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n", 1767 portid); 1768 mempool_free(mbp, hw->mb_mempool); 1769 return -EINVAL; 1770 } 1771 1772 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, 1773 &val); 1774 if (retval != FW_SUCCESS) { 1775 csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", 1776 portid, retval); 1777 mempool_free(mbp, hw->mb_mempool); 1778 return -EINVAL; 1779 } 1780 1781 fw_caps = val; 1782 } 1783 1784 /* Read PORT information */ 1785 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1786 false, 0, fw_caps, NULL); 1787 1788 if (csio_mb_issue(hw, mbp)) { 1789 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1790 portid); 1791 mempool_free(mbp, hw->mb_mempool); 1792 return -EINVAL; 1793 } 1794 1795 csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps, 1796 &pcaps, &acaps); 1797 if (retval != FW_SUCCESS) { 1798 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1799 portid, retval); 1800 mempool_free(mbp, hw->mb_mempool); 1801 return -EINVAL; 1802 } 1803 1804 csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps); 1805 1806 csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps); 1807 1808 /* Write back PORT information */ 1809 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1810 true, rcaps, fw_caps, NULL); 1811 1812 if (csio_mb_issue(hw, mbp)) { 1813 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1814 portid); 1815 mempool_free(mbp, hw->mb_mempool); 1816 return -EINVAL; 1817 } 1818 1819 retval = csio_mb_fw_retval(mbp); 1820 if (retval != FW_SUCCESS) { 1821 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1822 portid, retval); 1823 mempool_free(mbp, hw->mb_mempool); 1824 return -EINVAL; 1825 } 1826 1827 } /* For all ports */ 1828 1829 mempool_free(mbp, hw->mb_mempool); 1830 1831 return 0; 1832 } 1833 1834 /* 1835 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1836 * @hw: HW module 1837 * Issued with lock held. 1838 */ 1839 static int 1840 csio_get_fcoe_resinfo(struct csio_hw *hw) 1841 { 1842 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1843 struct fw_fcoe_res_info_cmd *rsp; 1844 struct csio_mb *mbp; 1845 enum fw_retval retval; 1846 1847 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1848 if (!mbp) { 1849 CSIO_INC_STATS(hw, n_err_nomem); 1850 return -ENOMEM; 1851 } 1852 1853 /* Get FCoE FW resource information */ 1854 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1855 1856 if (csio_mb_issue(hw, mbp)) { 1857 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1858 mempool_free(mbp, hw->mb_mempool); 1859 return -EINVAL; 1860 } 1861 1862 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1863 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 1864 if (retval != FW_SUCCESS) { 1865 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1866 retval); 1867 mempool_free(mbp, hw->mb_mempool); 1868 return -EINVAL; 1869 } 1870 1871 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1872 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1873 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1874 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1875 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1876 res_info->max_ssns = ntohl(rsp->max_ssns); 1877 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1878 res_info->used_ssns = ntohl(rsp->used_ssns); 1879 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1880 res_info->max_vnps = ntohl(rsp->max_vnps); 1881 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1882 res_info->used_vnps = ntohl(rsp->used_vnps); 1883 1884 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1885 res_info->max_xchgs); 1886 mempool_free(mbp, hw->mb_mempool); 1887 1888 return 0; 1889 } 1890 1891 static int 1892 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1893 { 1894 struct csio_mb *mbp; 1895 enum fw_retval retval; 1896 u32 _param[1]; 1897 1898 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1899 if (!mbp) { 1900 CSIO_INC_STATS(hw, n_err_nomem); 1901 return -ENOMEM; 1902 } 1903 1904 /* 1905 * Find out whether we're dealing with a version of 1906 * the firmware which has configuration file support. 1907 */ 1908 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 1909 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 1910 1911 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1912 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1913 if (csio_mb_issue(hw, mbp)) { 1914 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1915 mempool_free(mbp, hw->mb_mempool); 1916 return -EINVAL; 1917 } 1918 1919 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1920 ARRAY_SIZE(_param), _param); 1921 if (retval != FW_SUCCESS) { 1922 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1923 retval); 1924 mempool_free(mbp, hw->mb_mempool); 1925 return -EINVAL; 1926 } 1927 1928 mempool_free(mbp, hw->mb_mempool); 1929 *param = _param[0]; 1930 1931 return 0; 1932 } 1933 1934 static int 1935 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1936 { 1937 int ret = 0; 1938 const struct firmware *cf; 1939 struct pci_dev *pci_dev = hw->pdev; 1940 struct device *dev = &pci_dev->dev; 1941 unsigned int mtype = 0, maddr = 0; 1942 uint32_t *cfg_data; 1943 int value_to_add = 0; 1944 const char *fw_cfg_file; 1945 1946 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 1947 fw_cfg_file = FW_CFG_NAME_T5; 1948 else 1949 fw_cfg_file = FW_CFG_NAME_T6; 1950 1951 if (request_firmware(&cf, fw_cfg_file, dev) < 0) { 1952 csio_err(hw, "could not find config file %s, err: %d\n", 1953 fw_cfg_file, ret); 1954 return -ENOENT; 1955 } 1956 1957 if (cf->size%4 != 0) 1958 value_to_add = 4 - (cf->size % 4); 1959 1960 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1961 if (cfg_data == NULL) { 1962 ret = -ENOMEM; 1963 goto leave; 1964 } 1965 1966 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1967 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1968 ret = -EINVAL; 1969 goto leave; 1970 } 1971 1972 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1973 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1974 1975 ret = csio_memory_write(hw, mtype, maddr, 1976 cf->size + value_to_add, cfg_data); 1977 1978 if ((ret == 0) && (value_to_add != 0)) { 1979 union { 1980 u32 word; 1981 char buf[4]; 1982 } last; 1983 size_t size = cf->size & ~0x3; 1984 int i; 1985 1986 last.word = cfg_data[size >> 2]; 1987 for (i = value_to_add; i < 4; i++) 1988 last.buf[i] = 0; 1989 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1990 } 1991 if (ret == 0) { 1992 csio_info(hw, "config file upgraded to %s\n", fw_cfg_file); 1993 snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file); 1994 } 1995 1996 leave: 1997 kfree(cfg_data); 1998 release_firmware(cf); 1999 return ret; 2000 } 2001 2002 /* 2003 * HW initialization: contact FW, obtain config, perform basic init. 2004 * 2005 * If the firmware we're dealing with has Configuration File support, then 2006 * we use that to perform all configuration -- either using the configuration 2007 * file stored in flash on the adapter or using a filesystem-local file 2008 * if available. 2009 * 2010 * If we don't have configuration file support in the firmware, then we'll 2011 * have to set things up the old fashioned way with hard-coded register 2012 * writes and firmware commands ... 2013 */ 2014 2015 /* 2016 * Attempt to initialize the HW via a Firmware Configuration File. 2017 */ 2018 static int 2019 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 2020 { 2021 struct csio_mb *mbp = NULL; 2022 struct fw_caps_config_cmd *caps_cmd; 2023 unsigned int mtype, maddr; 2024 int rv = -EINVAL; 2025 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 2026 char path[64]; 2027 char *config_name = NULL; 2028 2029 /* 2030 * Reset device if necessary 2031 */ 2032 if (reset) { 2033 rv = csio_do_reset(hw, true); 2034 if (rv != 0) 2035 goto bye; 2036 } 2037 2038 /* 2039 * If we have a configuration file in host , 2040 * then use that. Otherwise, use the configuration file stored 2041 * in the HW flash ... 2042 */ 2043 spin_unlock_irq(&hw->lock); 2044 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 2045 spin_lock_irq(&hw->lock); 2046 if (rv != 0) { 2047 /* 2048 * config file was not found. Use default 2049 * config file from flash. 2050 */ 2051 config_name = "On FLASH"; 2052 mtype = FW_MEMTYPE_CF_FLASH; 2053 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 2054 } else { 2055 config_name = path; 2056 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 2057 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 2058 } 2059 2060 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2061 if (!mbp) { 2062 CSIO_INC_STATS(hw, n_err_nomem); 2063 return -ENOMEM; 2064 } 2065 /* 2066 * Tell the firmware to process the indicated Configuration File. 2067 * If there are no errors and the caller has provided return value 2068 * pointers for the [fini] section version, checksum and computed 2069 * checksum, pass those back to the caller. 2070 */ 2071 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 2072 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 2073 caps_cmd->op_to_write = 2074 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2075 FW_CMD_REQUEST_F | 2076 FW_CMD_READ_F); 2077 caps_cmd->cfvalid_to_len16 = 2078 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | 2079 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | 2080 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | 2081 FW_LEN16(*caps_cmd)); 2082 2083 if (csio_mb_issue(hw, mbp)) { 2084 rv = -EINVAL; 2085 goto bye; 2086 } 2087 2088 rv = csio_mb_fw_retval(mbp); 2089 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware 2090 * Configuration File in FLASH), our last gasp effort is to use the 2091 * Firmware Configuration File which is embedded in the 2092 * firmware. A very few early versions of the firmware didn't 2093 * have one embedded but we can ignore those. 2094 */ 2095 if (rv == ENOENT) { 2096 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 2097 caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2098 FW_CMD_REQUEST_F | 2099 FW_CMD_READ_F); 2100 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 2101 2102 if (csio_mb_issue(hw, mbp)) { 2103 rv = -EINVAL; 2104 goto bye; 2105 } 2106 2107 rv = csio_mb_fw_retval(mbp); 2108 config_name = "Firmware Default"; 2109 } 2110 if (rv != FW_SUCCESS) 2111 goto bye; 2112 2113 finiver = ntohl(caps_cmd->finiver); 2114 finicsum = ntohl(caps_cmd->finicsum); 2115 cfcsum = ntohl(caps_cmd->cfcsum); 2116 2117 /* 2118 * And now tell the firmware to use the configuration we just loaded. 2119 */ 2120 caps_cmd->op_to_write = 2121 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2122 FW_CMD_REQUEST_F | 2123 FW_CMD_WRITE_F); 2124 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 2125 2126 if (csio_mb_issue(hw, mbp)) { 2127 rv = -EINVAL; 2128 goto bye; 2129 } 2130 2131 rv = csio_mb_fw_retval(mbp); 2132 if (rv != FW_SUCCESS) { 2133 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 2134 goto bye; 2135 } 2136 2137 if (finicsum != cfcsum) { 2138 csio_warn(hw, 2139 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 2140 finicsum, cfcsum); 2141 } 2142 2143 /* Validate device capabilities */ 2144 rv = csio_hw_validate_caps(hw, mbp); 2145 if (rv != 0) 2146 goto bye; 2147 2148 mempool_free(mbp, hw->mb_mempool); 2149 mbp = NULL; 2150 2151 /* 2152 * Note that we're operating with parameters 2153 * not supplied by the driver, rather than from hard-wired 2154 * initialization constants buried in the driver. 2155 */ 2156 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2157 2158 /* device parameters */ 2159 rv = csio_get_device_params(hw); 2160 if (rv != 0) 2161 goto bye; 2162 2163 /* Configure SGE */ 2164 csio_wr_sge_init(hw); 2165 2166 /* 2167 * And finally tell the firmware to initialize itself using the 2168 * parameters from the Configuration File. 2169 */ 2170 /* Post event to notify completion of configuration */ 2171 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2172 2173 csio_info(hw, "Successfully configure using Firmware " 2174 "Configuration File %s, version %#x, computed checksum %#x\n", 2175 config_name, finiver, cfcsum); 2176 return 0; 2177 2178 /* 2179 * Something bad happened. Return the error ... 2180 */ 2181 bye: 2182 if (mbp) 2183 mempool_free(mbp, hw->mb_mempool); 2184 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 2185 csio_warn(hw, "Configuration file error %d\n", rv); 2186 return rv; 2187 } 2188 2189 /* Is the given firmware API compatible with the one the driver was compiled 2190 * with? 2191 */ 2192 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2193 { 2194 2195 /* short circuit if it's the exact same firmware version */ 2196 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2197 return 1; 2198 2199 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2200 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2201 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 2202 return 1; 2203 #undef SAME_INTF 2204 2205 return 0; 2206 } 2207 2208 /* The firmware in the filesystem is usable, but should it be installed? 2209 * This routine explains itself in detail if it indicates the filesystem 2210 * firmware should be installed. 2211 */ 2212 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, 2213 int k, int c) 2214 { 2215 const char *reason; 2216 2217 if (!card_fw_usable) { 2218 reason = "incompatible or unusable"; 2219 goto install; 2220 } 2221 2222 if (k > c) { 2223 reason = "older than the version supported with this driver"; 2224 goto install; 2225 } 2226 2227 return 0; 2228 2229 install: 2230 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " 2231 "installing firmware %u.%u.%u.%u on card.\n", 2232 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 2233 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, 2234 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 2235 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 2236 2237 return 1; 2238 } 2239 2240 static struct fw_info fw_info_array[] = { 2241 { 2242 .chip = CHELSIO_T5, 2243 .fs_name = FW_CFG_NAME_T5, 2244 .fw_mod_name = FW_FNAME_T5, 2245 .fw_hdr = { 2246 .chip = FW_HDR_CHIP_T5, 2247 .fw_ver = __cpu_to_be32(FW_VERSION(T5)), 2248 .intfver_nic = FW_INTFVER(T5, NIC), 2249 .intfver_vnic = FW_INTFVER(T5, VNIC), 2250 .intfver_ri = FW_INTFVER(T5, RI), 2251 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2252 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2253 }, 2254 }, { 2255 .chip = CHELSIO_T6, 2256 .fs_name = FW_CFG_NAME_T6, 2257 .fw_mod_name = FW_FNAME_T6, 2258 .fw_hdr = { 2259 .chip = FW_HDR_CHIP_T6, 2260 .fw_ver = __cpu_to_be32(FW_VERSION(T6)), 2261 .intfver_nic = FW_INTFVER(T6, NIC), 2262 .intfver_vnic = FW_INTFVER(T6, VNIC), 2263 .intfver_ri = FW_INTFVER(T6, RI), 2264 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2265 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2266 }, 2267 } 2268 }; 2269 2270 static struct fw_info *find_fw_info(int chip) 2271 { 2272 int i; 2273 2274 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { 2275 if (fw_info_array[i].chip == chip) 2276 return &fw_info_array[i]; 2277 } 2278 return NULL; 2279 } 2280 2281 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, 2282 const u8 *fw_data, unsigned int fw_size, 2283 struct fw_hdr *card_fw, enum csio_dev_state state, 2284 int *reset) 2285 { 2286 int ret, card_fw_usable, fs_fw_usable; 2287 const struct fw_hdr *fs_fw; 2288 const struct fw_hdr *drv_fw; 2289 2290 drv_fw = &fw_info->fw_hdr; 2291 2292 /* Read the header of the firmware on the card */ 2293 ret = csio_hw_read_flash(hw, FLASH_FW_START, 2294 sizeof(*card_fw) / sizeof(uint32_t), 2295 (uint32_t *)card_fw, 1); 2296 if (ret == 0) { 2297 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 2298 } else { 2299 csio_err(hw, 2300 "Unable to read card's firmware header: %d\n", ret); 2301 card_fw_usable = 0; 2302 } 2303 2304 if (fw_data != NULL) { 2305 fs_fw = (const void *)fw_data; 2306 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 2307 } else { 2308 fs_fw = NULL; 2309 fs_fw_usable = 0; 2310 } 2311 2312 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2313 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 2314 /* Common case: the firmware on the card is an exact match and 2315 * the filesystem one is an exact match too, or the filesystem 2316 * one is absent/incompatible. 2317 */ 2318 } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && 2319 csio_should_install_fs_fw(hw, card_fw_usable, 2320 be32_to_cpu(fs_fw->fw_ver), 2321 be32_to_cpu(card_fw->fw_ver))) { 2322 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, 2323 fw_size, 0); 2324 if (ret != 0) { 2325 csio_err(hw, 2326 "failed to install firmware: %d\n", ret); 2327 goto bye; 2328 } 2329 2330 /* Installed successfully, update the cached header too. */ 2331 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 2332 card_fw_usable = 1; 2333 *reset = 0; /* already reset as part of load_fw */ 2334 } 2335 2336 if (!card_fw_usable) { 2337 uint32_t d, c, k; 2338 2339 d = be32_to_cpu(drv_fw->fw_ver); 2340 c = be32_to_cpu(card_fw->fw_ver); 2341 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 2342 2343 csio_err(hw, "Cannot find a usable firmware: " 2344 "chip state %d, " 2345 "driver compiled with %d.%d.%d.%d, " 2346 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 2347 state, 2348 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), 2349 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), 2350 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 2351 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), 2352 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 2353 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 2354 ret = EINVAL; 2355 goto bye; 2356 } 2357 2358 /* We're using whatever's on the card and it's known to be good. */ 2359 hw->fwrev = be32_to_cpu(card_fw->fw_ver); 2360 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 2361 2362 bye: 2363 return ret; 2364 } 2365 2366 /* 2367 * Returns -EINVAL if attempts to flash the firmware failed 2368 * else returns 0, 2369 * if flashing was not attempted because the card had the 2370 * latest firmware ECANCELED is returned 2371 */ 2372 static int 2373 csio_hw_flash_fw(struct csio_hw *hw, int *reset) 2374 { 2375 int ret = -ECANCELED; 2376 const struct firmware *fw; 2377 struct fw_info *fw_info; 2378 struct fw_hdr *card_fw; 2379 struct pci_dev *pci_dev = hw->pdev; 2380 struct device *dev = &pci_dev->dev ; 2381 const u8 *fw_data = NULL; 2382 unsigned int fw_size = 0; 2383 const char *fw_bin_file; 2384 2385 /* This is the firmware whose headers the driver was compiled 2386 * against 2387 */ 2388 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); 2389 if (fw_info == NULL) { 2390 csio_err(hw, 2391 "unable to get firmware info for chip %d.\n", 2392 CHELSIO_CHIP_VERSION(hw->chip_id)); 2393 return -EINVAL; 2394 } 2395 2396 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 2397 fw_bin_file = FW_FNAME_T5; 2398 else 2399 fw_bin_file = FW_FNAME_T6; 2400 2401 if (request_firmware(&fw, fw_bin_file, dev) < 0) { 2402 csio_err(hw, "could not find firmware image %s, err: %d\n", 2403 fw_bin_file, ret); 2404 } else { 2405 fw_data = fw->data; 2406 fw_size = fw->size; 2407 } 2408 2409 /* allocate memory to read the header of the firmware on the 2410 * card 2411 */ 2412 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); 2413 2414 /* upgrade FW logic */ 2415 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, 2416 hw->fw_state, reset); 2417 2418 /* Cleaning up */ 2419 if (fw != NULL) 2420 release_firmware(fw); 2421 kfree(card_fw); 2422 return ret; 2423 } 2424 2425 static int csio_hw_check_fwver(struct csio_hw *hw) 2426 { 2427 if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) && 2428 (hw->fwrev < CSIO_MIN_T6_FW)) { 2429 csio_hw_print_fw_version(hw, "T6 unsupported fw"); 2430 return -1; 2431 } 2432 2433 return 0; 2434 } 2435 2436 /* 2437 * csio_hw_configure - Configure HW 2438 * @hw - HW module 2439 * 2440 */ 2441 static void 2442 csio_hw_configure(struct csio_hw *hw) 2443 { 2444 int reset = 1; 2445 int rv; 2446 u32 param[1]; 2447 2448 rv = csio_hw_dev_ready(hw); 2449 if (rv != 0) { 2450 CSIO_INC_STATS(hw, n_err_fatal); 2451 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2452 goto out; 2453 } 2454 2455 /* HW version */ 2456 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); 2457 2458 /* Needed for FW download */ 2459 rv = csio_hw_get_flash_params(hw); 2460 if (rv != 0) { 2461 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2462 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2463 goto out; 2464 } 2465 2466 /* Set PCIe completion timeout to 4 seconds */ 2467 if (pci_is_pcie(hw->pdev)) 2468 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, 2469 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); 2470 2471 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2472 2473 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2474 if (rv != 0) 2475 goto out; 2476 2477 csio_hw_print_fw_version(hw, "Firmware revision"); 2478 2479 rv = csio_do_hello(hw, &hw->fw_state); 2480 if (rv != 0) { 2481 CSIO_INC_STATS(hw, n_err_fatal); 2482 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2483 goto out; 2484 } 2485 2486 /* Read vpd */ 2487 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2488 if (rv != 0) 2489 goto out; 2490 2491 csio_hw_get_fw_version(hw, &hw->fwrev); 2492 csio_hw_get_tp_version(hw, &hw->tp_vers); 2493 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2494 2495 /* Do firmware update */ 2496 spin_unlock_irq(&hw->lock); 2497 rv = csio_hw_flash_fw(hw, &reset); 2498 spin_lock_irq(&hw->lock); 2499 2500 if (rv != 0) 2501 goto out; 2502 2503 rv = csio_hw_check_fwver(hw); 2504 if (rv < 0) 2505 goto out; 2506 2507 /* If the firmware doesn't support Configuration Files, 2508 * return an error. 2509 */ 2510 rv = csio_hw_check_fwconfig(hw, param); 2511 if (rv != 0) { 2512 csio_info(hw, "Firmware doesn't support " 2513 "Firmware Configuration files\n"); 2514 goto out; 2515 } 2516 2517 /* The firmware provides us with a memory buffer where we can 2518 * load a Configuration File from the host if we want to 2519 * override the Configuration File in flash. 2520 */ 2521 rv = csio_hw_use_fwconfig(hw, reset, param); 2522 if (rv == -ENOENT) { 2523 csio_info(hw, "Could not initialize " 2524 "adapter, error%d\n", rv); 2525 goto out; 2526 } 2527 if (rv != 0) { 2528 csio_info(hw, "Could not initialize " 2529 "adapter, error%d\n", rv); 2530 goto out; 2531 } 2532 2533 } else { 2534 rv = csio_hw_check_fwver(hw); 2535 if (rv < 0) 2536 goto out; 2537 2538 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2539 2540 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2541 2542 /* device parameters */ 2543 rv = csio_get_device_params(hw); 2544 if (rv != 0) 2545 goto out; 2546 2547 /* Get device capabilities */ 2548 rv = csio_config_device_caps(hw); 2549 if (rv != 0) 2550 goto out; 2551 2552 /* Configure SGE */ 2553 csio_wr_sge_init(hw); 2554 2555 /* Post event to notify completion of configuration */ 2556 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2557 goto out; 2558 } 2559 } /* if not master */ 2560 2561 out: 2562 return; 2563 } 2564 2565 /* 2566 * csio_hw_initialize - Initialize HW 2567 * @hw - HW module 2568 * 2569 */ 2570 static void 2571 csio_hw_initialize(struct csio_hw *hw) 2572 { 2573 struct csio_mb *mbp; 2574 enum fw_retval retval; 2575 int rv; 2576 int i; 2577 2578 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2579 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2580 if (!mbp) 2581 goto out; 2582 2583 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2584 2585 if (csio_mb_issue(hw, mbp)) { 2586 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2587 goto free_and_out; 2588 } 2589 2590 retval = csio_mb_fw_retval(mbp); 2591 if (retval != FW_SUCCESS) { 2592 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2593 retval); 2594 goto free_and_out; 2595 } 2596 2597 mempool_free(mbp, hw->mb_mempool); 2598 } 2599 2600 rv = csio_get_fcoe_resinfo(hw); 2601 if (rv != 0) { 2602 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2603 goto out; 2604 } 2605 2606 spin_unlock_irq(&hw->lock); 2607 rv = csio_config_queues(hw); 2608 spin_lock_irq(&hw->lock); 2609 2610 if (rv != 0) { 2611 csio_err(hw, "Config of queues failed!: %d\n", rv); 2612 goto out; 2613 } 2614 2615 for (i = 0; i < hw->num_pports; i++) 2616 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2617 2618 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2619 rv = csio_enable_ports(hw); 2620 if (rv != 0) { 2621 csio_err(hw, "Failed to enable ports: %d\n", rv); 2622 goto out; 2623 } 2624 } 2625 2626 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2627 return; 2628 2629 free_and_out: 2630 mempool_free(mbp, hw->mb_mempool); 2631 out: 2632 return; 2633 } 2634 2635 #define PF_INTR_MASK (PFSW_F | PFCIM_F) 2636 2637 /* 2638 * csio_hw_intr_enable - Enable HW interrupts 2639 * @hw: Pointer to HW module. 2640 * 2641 * Enable interrupts in HW registers. 2642 */ 2643 static void 2644 csio_hw_intr_enable(struct csio_hw *hw) 2645 { 2646 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2647 u32 pf = 0; 2648 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); 2649 2650 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 2651 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2652 else 2653 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2654 2655 /* 2656 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2657 * by FW, so do nothing for INTX. 2658 */ 2659 if (hw->intr_mode == CSIO_IM_MSIX) 2660 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2661 AIVEC_V(AIVEC_M), vec); 2662 else if (hw->intr_mode == CSIO_IM_MSI) 2663 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), 2664 AIVEC_V(AIVEC_M), 0); 2665 2666 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); 2667 2668 /* Turn on MB interrupts - this will internally flush PIO as well */ 2669 csio_mb_intr_enable(hw); 2670 2671 /* These are common registers - only a master can modify them */ 2672 if (csio_is_hw_master(hw)) { 2673 /* 2674 * Disable the Serial FLASH interrupt, if enabled! 2675 */ 2676 pl &= (~SF_F); 2677 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); 2678 2679 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | 2680 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | 2681 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | 2682 ERR_DATA_CPL_ON_HIGH_QID1_F | 2683 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | 2684 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | 2685 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | 2686 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, 2687 SGE_INT_ENABLE3_A); 2688 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); 2689 } 2690 2691 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2692 2693 } 2694 2695 /* 2696 * csio_hw_intr_disable - Disable HW interrupts 2697 * @hw: Pointer to HW module. 2698 * 2699 * Turn off Mailbox and PCI_PF_CFG interrupts. 2700 */ 2701 void 2702 csio_hw_intr_disable(struct csio_hw *hw) 2703 { 2704 u32 pf = 0; 2705 2706 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) 2707 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2708 else 2709 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); 2710 2711 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2712 return; 2713 2714 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2715 2716 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); 2717 if (csio_is_hw_master(hw)) 2718 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); 2719 2720 /* Turn off MB interrupts */ 2721 csio_mb_intr_disable(hw); 2722 2723 } 2724 2725 void 2726 csio_hw_fatal_err(struct csio_hw *hw) 2727 { 2728 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); 2729 csio_hw_intr_disable(hw); 2730 2731 /* Do not reset HW, we may need FW state for debugging */ 2732 csio_fatal(hw, "HW Fatal error encountered!\n"); 2733 } 2734 2735 /*****************************************************************************/ 2736 /* START: HW SM */ 2737 /*****************************************************************************/ 2738 /* 2739 * csio_hws_uninit - Uninit state 2740 * @hw - HW module 2741 * @evt - Event 2742 * 2743 */ 2744 static void 2745 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2746 { 2747 hw->prev_evt = hw->cur_evt; 2748 hw->cur_evt = evt; 2749 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2750 2751 switch (evt) { 2752 case CSIO_HWE_CFG: 2753 csio_set_state(&hw->sm, csio_hws_configuring); 2754 csio_hw_configure(hw); 2755 break; 2756 2757 default: 2758 CSIO_INC_STATS(hw, n_evt_unexp); 2759 break; 2760 } 2761 } 2762 2763 /* 2764 * csio_hws_configuring - Configuring state 2765 * @hw - HW module 2766 * @evt - Event 2767 * 2768 */ 2769 static void 2770 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2771 { 2772 hw->prev_evt = hw->cur_evt; 2773 hw->cur_evt = evt; 2774 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2775 2776 switch (evt) { 2777 case CSIO_HWE_INIT: 2778 csio_set_state(&hw->sm, csio_hws_initializing); 2779 csio_hw_initialize(hw); 2780 break; 2781 2782 case CSIO_HWE_INIT_DONE: 2783 csio_set_state(&hw->sm, csio_hws_ready); 2784 /* Fan out event to all lnode SMs */ 2785 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2786 break; 2787 2788 case CSIO_HWE_FATAL: 2789 csio_set_state(&hw->sm, csio_hws_uninit); 2790 break; 2791 2792 case CSIO_HWE_PCI_REMOVE: 2793 csio_do_bye(hw); 2794 break; 2795 default: 2796 CSIO_INC_STATS(hw, n_evt_unexp); 2797 break; 2798 } 2799 } 2800 2801 /* 2802 * csio_hws_initializing - Initialiazing state 2803 * @hw - HW module 2804 * @evt - Event 2805 * 2806 */ 2807 static void 2808 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2809 { 2810 hw->prev_evt = hw->cur_evt; 2811 hw->cur_evt = evt; 2812 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2813 2814 switch (evt) { 2815 case CSIO_HWE_INIT_DONE: 2816 csio_set_state(&hw->sm, csio_hws_ready); 2817 2818 /* Fan out event to all lnode SMs */ 2819 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2820 2821 /* Enable interrupts */ 2822 csio_hw_intr_enable(hw); 2823 break; 2824 2825 case CSIO_HWE_FATAL: 2826 csio_set_state(&hw->sm, csio_hws_uninit); 2827 break; 2828 2829 case CSIO_HWE_PCI_REMOVE: 2830 csio_do_bye(hw); 2831 break; 2832 2833 default: 2834 CSIO_INC_STATS(hw, n_evt_unexp); 2835 break; 2836 } 2837 } 2838 2839 /* 2840 * csio_hws_ready - Ready state 2841 * @hw - HW module 2842 * @evt - Event 2843 * 2844 */ 2845 static void 2846 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2847 { 2848 /* Remember the event */ 2849 hw->evtflag = evt; 2850 2851 hw->prev_evt = hw->cur_evt; 2852 hw->cur_evt = evt; 2853 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2854 2855 switch (evt) { 2856 case CSIO_HWE_HBA_RESET: 2857 case CSIO_HWE_FW_DLOAD: 2858 case CSIO_HWE_SUSPEND: 2859 case CSIO_HWE_PCI_REMOVE: 2860 case CSIO_HWE_PCIERR_DETECTED: 2861 csio_set_state(&hw->sm, csio_hws_quiescing); 2862 /* cleanup all outstanding cmds */ 2863 if (evt == CSIO_HWE_HBA_RESET || 2864 evt == CSIO_HWE_PCIERR_DETECTED) 2865 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2866 else 2867 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2868 2869 csio_hw_intr_disable(hw); 2870 csio_hw_mbm_cleanup(hw); 2871 csio_evtq_stop(hw); 2872 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2873 csio_evtq_flush(hw); 2874 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2875 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2876 break; 2877 2878 case CSIO_HWE_FATAL: 2879 csio_set_state(&hw->sm, csio_hws_uninit); 2880 break; 2881 2882 default: 2883 CSIO_INC_STATS(hw, n_evt_unexp); 2884 break; 2885 } 2886 } 2887 2888 /* 2889 * csio_hws_quiescing - Quiescing state 2890 * @hw - HW module 2891 * @evt - Event 2892 * 2893 */ 2894 static void 2895 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2896 { 2897 hw->prev_evt = hw->cur_evt; 2898 hw->cur_evt = evt; 2899 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2900 2901 switch (evt) { 2902 case CSIO_HWE_QUIESCED: 2903 switch (hw->evtflag) { 2904 case CSIO_HWE_FW_DLOAD: 2905 csio_set_state(&hw->sm, csio_hws_resetting); 2906 /* Download firmware */ 2907 /* Fall through */ 2908 2909 case CSIO_HWE_HBA_RESET: 2910 csio_set_state(&hw->sm, csio_hws_resetting); 2911 /* Start reset of the HBA */ 2912 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2913 csio_wr_destroy_queues(hw, false); 2914 csio_do_reset(hw, false); 2915 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2916 break; 2917 2918 case CSIO_HWE_PCI_REMOVE: 2919 csio_set_state(&hw->sm, csio_hws_removing); 2920 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2921 csio_wr_destroy_queues(hw, true); 2922 /* Now send the bye command */ 2923 csio_do_bye(hw); 2924 break; 2925 2926 case CSIO_HWE_SUSPEND: 2927 csio_set_state(&hw->sm, csio_hws_quiesced); 2928 break; 2929 2930 case CSIO_HWE_PCIERR_DETECTED: 2931 csio_set_state(&hw->sm, csio_hws_pcierr); 2932 csio_wr_destroy_queues(hw, false); 2933 break; 2934 2935 default: 2936 CSIO_INC_STATS(hw, n_evt_unexp); 2937 break; 2938 2939 } 2940 break; 2941 2942 default: 2943 CSIO_INC_STATS(hw, n_evt_unexp); 2944 break; 2945 } 2946 } 2947 2948 /* 2949 * csio_hws_quiesced - Quiesced state 2950 * @hw - HW module 2951 * @evt - Event 2952 * 2953 */ 2954 static void 2955 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2956 { 2957 hw->prev_evt = hw->cur_evt; 2958 hw->cur_evt = evt; 2959 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2960 2961 switch (evt) { 2962 case CSIO_HWE_RESUME: 2963 csio_set_state(&hw->sm, csio_hws_configuring); 2964 csio_hw_configure(hw); 2965 break; 2966 2967 default: 2968 CSIO_INC_STATS(hw, n_evt_unexp); 2969 break; 2970 } 2971 } 2972 2973 /* 2974 * csio_hws_resetting - HW Resetting state 2975 * @hw - HW module 2976 * @evt - Event 2977 * 2978 */ 2979 static void 2980 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2981 { 2982 hw->prev_evt = hw->cur_evt; 2983 hw->cur_evt = evt; 2984 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2985 2986 switch (evt) { 2987 case CSIO_HWE_HBA_RESET_DONE: 2988 csio_evtq_start(hw); 2989 csio_set_state(&hw->sm, csio_hws_configuring); 2990 csio_hw_configure(hw); 2991 break; 2992 2993 default: 2994 CSIO_INC_STATS(hw, n_evt_unexp); 2995 break; 2996 } 2997 } 2998 2999 /* 3000 * csio_hws_removing - PCI Hotplug removing state 3001 * @hw - HW module 3002 * @evt - Event 3003 * 3004 */ 3005 static void 3006 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 3007 { 3008 hw->prev_evt = hw->cur_evt; 3009 hw->cur_evt = evt; 3010 CSIO_INC_STATS(hw, n_evt_sm[evt]); 3011 3012 switch (evt) { 3013 case CSIO_HWE_HBA_RESET: 3014 if (!csio_is_hw_master(hw)) 3015 break; 3016 /* 3017 * The BYE should have alerady been issued, so we cant 3018 * use the mailbox interface. Hence we use the PL_RST 3019 * register directly. 3020 */ 3021 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 3022 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); 3023 mdelay(2000); 3024 break; 3025 3026 /* Should never receive any new events */ 3027 default: 3028 CSIO_INC_STATS(hw, n_evt_unexp); 3029 break; 3030 3031 } 3032 } 3033 3034 /* 3035 * csio_hws_pcierr - PCI Error state 3036 * @hw - HW module 3037 * @evt - Event 3038 * 3039 */ 3040 static void 3041 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 3042 { 3043 hw->prev_evt = hw->cur_evt; 3044 hw->cur_evt = evt; 3045 CSIO_INC_STATS(hw, n_evt_sm[evt]); 3046 3047 switch (evt) { 3048 case CSIO_HWE_PCIERR_SLOT_RESET: 3049 csio_evtq_start(hw); 3050 csio_set_state(&hw->sm, csio_hws_configuring); 3051 csio_hw_configure(hw); 3052 break; 3053 3054 default: 3055 CSIO_INC_STATS(hw, n_evt_unexp); 3056 break; 3057 } 3058 } 3059 3060 /*****************************************************************************/ 3061 /* END: HW SM */ 3062 /*****************************************************************************/ 3063 3064 /* 3065 * csio_handle_intr_status - table driven interrupt handler 3066 * @hw: HW instance 3067 * @reg: the interrupt status register to process 3068 * @acts: table of interrupt actions 3069 * 3070 * A table driven interrupt handler that applies a set of masks to an 3071 * interrupt status word and performs the corresponding actions if the 3072 * interrupts described by the mask have occured. The actions include 3073 * optionally emitting a warning or alert message. The table is terminated 3074 * by an entry specifying mask 0. Returns the number of fatal interrupt 3075 * conditions. 3076 */ 3077 int 3078 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 3079 const struct intr_info *acts) 3080 { 3081 int fatal = 0; 3082 unsigned int mask = 0; 3083 unsigned int status = csio_rd_reg32(hw, reg); 3084 3085 for ( ; acts->mask; ++acts) { 3086 if (!(status & acts->mask)) 3087 continue; 3088 if (acts->fatal) { 3089 fatal++; 3090 csio_fatal(hw, "Fatal %s (0x%x)\n", 3091 acts->msg, status & acts->mask); 3092 } else if (acts->msg) 3093 csio_info(hw, "%s (0x%x)\n", 3094 acts->msg, status & acts->mask); 3095 mask |= acts->mask; 3096 } 3097 status &= mask; 3098 if (status) /* clear processed interrupts */ 3099 csio_wr_reg32(hw, status, reg); 3100 return fatal; 3101 } 3102 3103 /* 3104 * TP interrupt handler. 3105 */ 3106 static void csio_tp_intr_handler(struct csio_hw *hw) 3107 { 3108 static struct intr_info tp_intr_info[] = { 3109 { 0x3fffffff, "TP parity error", -1, 1 }, 3110 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, 3111 { 0, NULL, 0, 0 } 3112 }; 3113 3114 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) 3115 csio_hw_fatal_err(hw); 3116 } 3117 3118 /* 3119 * SGE interrupt handler. 3120 */ 3121 static void csio_sge_intr_handler(struct csio_hw *hw) 3122 { 3123 uint64_t v; 3124 3125 static struct intr_info sge_intr_info[] = { 3126 { ERR_CPL_EXCEED_IQE_SIZE_F, 3127 "SGE received CPL exceeding IQE size", -1, 1 }, 3128 { ERR_INVALID_CIDX_INC_F, 3129 "SGE GTS CIDX increment too large", -1, 0 }, 3130 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, 3131 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, 3132 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, 3133 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 3134 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, 3135 0 }, 3136 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, 3137 0 }, 3138 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, 3139 0 }, 3140 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, 3141 0 }, 3142 { ERR_ING_CTXT_PRIO_F, 3143 "SGE too many priority ingress contexts", -1, 0 }, 3144 { ERR_EGR_CTXT_PRIO_F, 3145 "SGE too many priority egress contexts", -1, 0 }, 3146 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, 3147 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, 3148 { 0, NULL, 0, 0 } 3149 }; 3150 3151 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | 3152 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); 3153 if (v) { 3154 csio_fatal(hw, "SGE parity error (%#llx)\n", 3155 (unsigned long long)v); 3156 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 3157 SGE_INT_CAUSE1_A); 3158 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); 3159 } 3160 3161 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); 3162 3163 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || 3164 v != 0) 3165 csio_hw_fatal_err(hw); 3166 } 3167 3168 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ 3169 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) 3170 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ 3171 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) 3172 3173 /* 3174 * CIM interrupt handler. 3175 */ 3176 static void csio_cim_intr_handler(struct csio_hw *hw) 3177 { 3178 static struct intr_info cim_intr_info[] = { 3179 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, 3180 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 3181 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 3182 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, 3183 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, 3184 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, 3185 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, 3186 { 0, NULL, 0, 0 } 3187 }; 3188 static struct intr_info cim_upintr_info[] = { 3189 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, 3190 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, 3191 { ILLWRINT_F, "CIM illegal write", -1, 1 }, 3192 { ILLRDINT_F, "CIM illegal read", -1, 1 }, 3193 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, 3194 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, 3195 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, 3196 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, 3197 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, 3198 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, 3199 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, 3200 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, 3201 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, 3202 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, 3203 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, 3204 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, 3205 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, 3206 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, 3207 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, 3208 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, 3209 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, 3210 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, 3211 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, 3212 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, 3213 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, 3214 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, 3215 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, 3216 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, 3217 { 0, NULL, 0, 0 } 3218 }; 3219 3220 int fat; 3221 3222 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, 3223 cim_intr_info) + 3224 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, 3225 cim_upintr_info); 3226 if (fat) 3227 csio_hw_fatal_err(hw); 3228 } 3229 3230 /* 3231 * ULP RX interrupt handler. 3232 */ 3233 static void csio_ulprx_intr_handler(struct csio_hw *hw) 3234 { 3235 static struct intr_info ulprx_intr_info[] = { 3236 { 0x1800000, "ULPRX context error", -1, 1 }, 3237 { 0x7fffff, "ULPRX parity error", -1, 1 }, 3238 { 0, NULL, 0, 0 } 3239 }; 3240 3241 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) 3242 csio_hw_fatal_err(hw); 3243 } 3244 3245 /* 3246 * ULP TX interrupt handler. 3247 */ 3248 static void csio_ulptx_intr_handler(struct csio_hw *hw) 3249 { 3250 static struct intr_info ulptx_intr_info[] = { 3251 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, 3252 0 }, 3253 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, 3254 0 }, 3255 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, 3256 0 }, 3257 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, 3258 0 }, 3259 { 0xfffffff, "ULPTX parity error", -1, 1 }, 3260 { 0, NULL, 0, 0 } 3261 }; 3262 3263 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) 3264 csio_hw_fatal_err(hw); 3265 } 3266 3267 /* 3268 * PM TX interrupt handler. 3269 */ 3270 static void csio_pmtx_intr_handler(struct csio_hw *hw) 3271 { 3272 static struct intr_info pmtx_intr_info[] = { 3273 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, 3274 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, 3275 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, 3276 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, 3277 { 0xffffff0, "PMTX framing error", -1, 1 }, 3278 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, 3279 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, 3280 1 }, 3281 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, 3282 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, 3283 { 0, NULL, 0, 0 } 3284 }; 3285 3286 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) 3287 csio_hw_fatal_err(hw); 3288 } 3289 3290 /* 3291 * PM RX interrupt handler. 3292 */ 3293 static void csio_pmrx_intr_handler(struct csio_hw *hw) 3294 { 3295 static struct intr_info pmrx_intr_info[] = { 3296 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, 3297 { 0x3ffff0, "PMRX framing error", -1, 1 }, 3298 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, 3299 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, 3300 1 }, 3301 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, 3302 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, 3303 { 0, NULL, 0, 0 } 3304 }; 3305 3306 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) 3307 csio_hw_fatal_err(hw); 3308 } 3309 3310 /* 3311 * CPL switch interrupt handler. 3312 */ 3313 static void csio_cplsw_intr_handler(struct csio_hw *hw) 3314 { 3315 static struct intr_info cplsw_intr_info[] = { 3316 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, 3317 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, 3318 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, 3319 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, 3320 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, 3321 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, 3322 { 0, NULL, 0, 0 } 3323 }; 3324 3325 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) 3326 csio_hw_fatal_err(hw); 3327 } 3328 3329 /* 3330 * LE interrupt handler. 3331 */ 3332 static void csio_le_intr_handler(struct csio_hw *hw) 3333 { 3334 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); 3335 3336 static struct intr_info le_intr_info[] = { 3337 { LIPMISS_F, "LE LIP miss", -1, 0 }, 3338 { LIP0_F, "LE 0 LIP error", -1, 0 }, 3339 { PARITYERR_F, "LE parity error", -1, 1 }, 3340 { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 3341 { REQQPARERR_F, "LE request queue parity error", -1, 1 }, 3342 { 0, NULL, 0, 0 } 3343 }; 3344 3345 static struct intr_info t6_le_intr_info[] = { 3346 { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, 3347 { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, 3348 { TCAMINTPERR_F, "LE parity error", -1, 1 }, 3349 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 3350 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, 3351 { 0, NULL, 0, 0 } 3352 }; 3353 3354 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, 3355 (chip == CHELSIO_T5) ? 3356 le_intr_info : t6_le_intr_info)) 3357 csio_hw_fatal_err(hw); 3358 } 3359 3360 /* 3361 * MPS interrupt handler. 3362 */ 3363 static void csio_mps_intr_handler(struct csio_hw *hw) 3364 { 3365 static struct intr_info mps_rx_intr_info[] = { 3366 { 0xffffff, "MPS Rx parity error", -1, 1 }, 3367 { 0, NULL, 0, 0 } 3368 }; 3369 static struct intr_info mps_tx_intr_info[] = { 3370 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, 3371 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 3372 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", 3373 -1, 1 }, 3374 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", 3375 -1, 1 }, 3376 { BUBBLE_F, "MPS Tx underflow", -1, 1 }, 3377 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, 3378 { FRMERR_F, "MPS Tx framing error", -1, 1 }, 3379 { 0, NULL, 0, 0 } 3380 }; 3381 static struct intr_info mps_trc_intr_info[] = { 3382 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, 3383 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", 3384 -1, 1 }, 3385 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, 3386 { 0, NULL, 0, 0 } 3387 }; 3388 static struct intr_info mps_stat_sram_intr_info[] = { 3389 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 3390 { 0, NULL, 0, 0 } 3391 }; 3392 static struct intr_info mps_stat_tx_intr_info[] = { 3393 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 3394 { 0, NULL, 0, 0 } 3395 }; 3396 static struct intr_info mps_stat_rx_intr_info[] = { 3397 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 3398 { 0, NULL, 0, 0 } 3399 }; 3400 static struct intr_info mps_cls_intr_info[] = { 3401 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, 3402 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, 3403 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, 3404 { 0, NULL, 0, 0 } 3405 }; 3406 3407 int fat; 3408 3409 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, 3410 mps_rx_intr_info) + 3411 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, 3412 mps_tx_intr_info) + 3413 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, 3414 mps_trc_intr_info) + 3415 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, 3416 mps_stat_sram_intr_info) + 3417 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, 3418 mps_stat_tx_intr_info) + 3419 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, 3420 mps_stat_rx_intr_info) + 3421 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, 3422 mps_cls_intr_info); 3423 3424 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); 3425 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ 3426 if (fat) 3427 csio_hw_fatal_err(hw); 3428 } 3429 3430 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ 3431 ECC_UE_INT_CAUSE_F) 3432 3433 /* 3434 * EDC/MC interrupt handler. 3435 */ 3436 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3437 { 3438 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3439 3440 unsigned int addr, cnt_addr, v; 3441 3442 if (idx <= MEM_EDC1) { 3443 addr = EDC_REG(EDC_INT_CAUSE_A, idx); 3444 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); 3445 } else { 3446 addr = MC_INT_CAUSE_A; 3447 cnt_addr = MC_ECC_STATUS_A; 3448 } 3449 3450 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3451 if (v & PERR_INT_CAUSE_F) 3452 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3453 if (v & ECC_CE_INT_CAUSE_F) { 3454 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); 3455 3456 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); 3457 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3458 cnt, name[idx], cnt > 1 ? "s" : ""); 3459 } 3460 if (v & ECC_UE_INT_CAUSE_F) 3461 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3462 3463 csio_wr_reg32(hw, v, addr); 3464 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) 3465 csio_hw_fatal_err(hw); 3466 } 3467 3468 /* 3469 * MA interrupt handler. 3470 */ 3471 static void csio_ma_intr_handler(struct csio_hw *hw) 3472 { 3473 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); 3474 3475 if (status & MEM_PERR_INT_CAUSE_F) 3476 csio_fatal(hw, "MA parity error, parity status %#x\n", 3477 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); 3478 if (status & MEM_WRAP_INT_CAUSE_F) { 3479 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); 3480 csio_fatal(hw, 3481 "MA address wrap-around error by client %u to address %#x\n", 3482 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); 3483 } 3484 csio_wr_reg32(hw, status, MA_INT_CAUSE_A); 3485 csio_hw_fatal_err(hw); 3486 } 3487 3488 /* 3489 * SMB interrupt handler. 3490 */ 3491 static void csio_smb_intr_handler(struct csio_hw *hw) 3492 { 3493 static struct intr_info smb_intr_info[] = { 3494 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, 3495 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, 3496 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, 3497 { 0, NULL, 0, 0 } 3498 }; 3499 3500 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) 3501 csio_hw_fatal_err(hw); 3502 } 3503 3504 /* 3505 * NC-SI interrupt handler. 3506 */ 3507 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3508 { 3509 static struct intr_info ncsi_intr_info[] = { 3510 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, 3511 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, 3512 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, 3513 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, 3514 { 0, NULL, 0, 0 } 3515 }; 3516 3517 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) 3518 csio_hw_fatal_err(hw); 3519 } 3520 3521 /* 3522 * XGMAC interrupt handler. 3523 */ 3524 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3525 { 3526 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3527 3528 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; 3529 if (!v) 3530 return; 3531 3532 if (v & TXFIFO_PRTY_ERR_F) 3533 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3534 if (v & RXFIFO_PRTY_ERR_F) 3535 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3536 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); 3537 csio_hw_fatal_err(hw); 3538 } 3539 3540 /* 3541 * PL interrupt handler. 3542 */ 3543 static void csio_pl_intr_handler(struct csio_hw *hw) 3544 { 3545 static struct intr_info pl_intr_info[] = { 3546 { FATALPERR_F, "T4 fatal parity error", -1, 1 }, 3547 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, 3548 { 0, NULL, 0, 0 } 3549 }; 3550 3551 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) 3552 csio_hw_fatal_err(hw); 3553 } 3554 3555 /* 3556 * csio_hw_slow_intr_handler - control path interrupt handler 3557 * @hw: HW module 3558 * 3559 * Interrupt handler for non-data global interrupt events, e.g., errors. 3560 * The designation 'slow' is because it involves register reads, while 3561 * data interrupts typically don't involve any MMIOs. 3562 */ 3563 int 3564 csio_hw_slow_intr_handler(struct csio_hw *hw) 3565 { 3566 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); 3567 3568 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3569 CSIO_INC_STATS(hw, n_plint_unexp); 3570 return 0; 3571 } 3572 3573 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3574 3575 CSIO_INC_STATS(hw, n_plint_cnt); 3576 3577 if (cause & CIM_F) 3578 csio_cim_intr_handler(hw); 3579 3580 if (cause & MPS_F) 3581 csio_mps_intr_handler(hw); 3582 3583 if (cause & NCSI_F) 3584 csio_ncsi_intr_handler(hw); 3585 3586 if (cause & PL_F) 3587 csio_pl_intr_handler(hw); 3588 3589 if (cause & SMB_F) 3590 csio_smb_intr_handler(hw); 3591 3592 if (cause & XGMAC0_F) 3593 csio_xgmac_intr_handler(hw, 0); 3594 3595 if (cause & XGMAC1_F) 3596 csio_xgmac_intr_handler(hw, 1); 3597 3598 if (cause & XGMAC_KR0_F) 3599 csio_xgmac_intr_handler(hw, 2); 3600 3601 if (cause & XGMAC_KR1_F) 3602 csio_xgmac_intr_handler(hw, 3); 3603 3604 if (cause & PCIE_F) 3605 hw->chip_ops->chip_pcie_intr_handler(hw); 3606 3607 if (cause & MC_F) 3608 csio_mem_intr_handler(hw, MEM_MC); 3609 3610 if (cause & EDC0_F) 3611 csio_mem_intr_handler(hw, MEM_EDC0); 3612 3613 if (cause & EDC1_F) 3614 csio_mem_intr_handler(hw, MEM_EDC1); 3615 3616 if (cause & LE_F) 3617 csio_le_intr_handler(hw); 3618 3619 if (cause & TP_F) 3620 csio_tp_intr_handler(hw); 3621 3622 if (cause & MA_F) 3623 csio_ma_intr_handler(hw); 3624 3625 if (cause & PM_TX_F) 3626 csio_pmtx_intr_handler(hw); 3627 3628 if (cause & PM_RX_F) 3629 csio_pmrx_intr_handler(hw); 3630 3631 if (cause & ULP_RX_F) 3632 csio_ulprx_intr_handler(hw); 3633 3634 if (cause & CPL_SWITCH_F) 3635 csio_cplsw_intr_handler(hw); 3636 3637 if (cause & SGE_F) 3638 csio_sge_intr_handler(hw); 3639 3640 if (cause & ULP_TX_F) 3641 csio_ulptx_intr_handler(hw); 3642 3643 /* Clear the interrupts just processed for which we are the master. */ 3644 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); 3645 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ 3646 3647 return 1; 3648 } 3649 3650 /***************************************************************************** 3651 * HW <--> mailbox interfacing routines. 3652 ****************************************************************************/ 3653 /* 3654 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3655 * 3656 * @data: Private data pointer. 3657 * 3658 * Called from worker thread context. 3659 */ 3660 static void 3661 csio_mberr_worker(void *data) 3662 { 3663 struct csio_hw *hw = (struct csio_hw *)data; 3664 struct csio_mbm *mbm = &hw->mbm; 3665 LIST_HEAD(cbfn_q); 3666 struct csio_mb *mbp_next; 3667 int rv; 3668 3669 del_timer_sync(&mbm->timer); 3670 3671 spin_lock_irq(&hw->lock); 3672 if (list_empty(&mbm->cbfn_q)) { 3673 spin_unlock_irq(&hw->lock); 3674 return; 3675 } 3676 3677 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3678 mbm->stats.n_cbfnq = 0; 3679 3680 /* Try to start waiting mailboxes */ 3681 if (!list_empty(&mbm->req_q)) { 3682 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3683 list_del_init(&mbp_next->list); 3684 3685 rv = csio_mb_issue(hw, mbp_next); 3686 if (rv != 0) 3687 list_add_tail(&mbp_next->list, &mbm->req_q); 3688 else 3689 CSIO_DEC_STATS(mbm, n_activeq); 3690 } 3691 spin_unlock_irq(&hw->lock); 3692 3693 /* Now callback completions */ 3694 csio_mb_completions(hw, &cbfn_q); 3695 } 3696 3697 /* 3698 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3699 * 3700 * @data: private data pointer 3701 * 3702 **/ 3703 static void 3704 csio_hw_mb_timer(struct timer_list *t) 3705 { 3706 struct csio_mbm *mbm = from_timer(mbm, t, timer); 3707 struct csio_hw *hw = mbm->hw; 3708 struct csio_mb *mbp = NULL; 3709 3710 spin_lock_irq(&hw->lock); 3711 mbp = csio_mb_tmo_handler(hw); 3712 spin_unlock_irq(&hw->lock); 3713 3714 /* Call back the function for the timed-out Mailbox */ 3715 if (mbp) 3716 mbp->mb_cbfn(hw, mbp); 3717 3718 } 3719 3720 /* 3721 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3722 * @hw: HW module 3723 * 3724 * Called with lock held, should exit with lock held. 3725 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3726 * into a local queue. Drops lock and calls the completions. Holds 3727 * lock and returns. 3728 */ 3729 static void 3730 csio_hw_mbm_cleanup(struct csio_hw *hw) 3731 { 3732 LIST_HEAD(cbfn_q); 3733 3734 csio_mb_cancel_all(hw, &cbfn_q); 3735 3736 spin_unlock_irq(&hw->lock); 3737 csio_mb_completions(hw, &cbfn_q); 3738 spin_lock_irq(&hw->lock); 3739 } 3740 3741 /***************************************************************************** 3742 * Event handling 3743 ****************************************************************************/ 3744 int 3745 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3746 uint16_t len) 3747 { 3748 struct csio_evt_msg *evt_entry = NULL; 3749 3750 if (type >= CSIO_EVT_MAX) 3751 return -EINVAL; 3752 3753 if (len > CSIO_EVT_MSG_SIZE) 3754 return -EINVAL; 3755 3756 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3757 return -EINVAL; 3758 3759 if (list_empty(&hw->evt_free_q)) { 3760 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3761 type, len); 3762 return -ENOMEM; 3763 } 3764 3765 evt_entry = list_first_entry(&hw->evt_free_q, 3766 struct csio_evt_msg, list); 3767 list_del_init(&evt_entry->list); 3768 3769 /* copy event msg and queue the event */ 3770 evt_entry->type = type; 3771 memcpy((void *)evt_entry->data, evt_msg, len); 3772 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3773 3774 CSIO_DEC_STATS(hw, n_evt_freeq); 3775 CSIO_INC_STATS(hw, n_evt_activeq); 3776 3777 return 0; 3778 } 3779 3780 static int 3781 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3782 uint16_t len, bool msg_sg) 3783 { 3784 struct csio_evt_msg *evt_entry = NULL; 3785 struct csio_fl_dma_buf *fl_sg; 3786 uint32_t off = 0; 3787 unsigned long flags; 3788 int n, ret = 0; 3789 3790 if (type >= CSIO_EVT_MAX) 3791 return -EINVAL; 3792 3793 if (len > CSIO_EVT_MSG_SIZE) 3794 return -EINVAL; 3795 3796 spin_lock_irqsave(&hw->lock, flags); 3797 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3798 ret = -EINVAL; 3799 goto out; 3800 } 3801 3802 if (list_empty(&hw->evt_free_q)) { 3803 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3804 type, len); 3805 ret = -ENOMEM; 3806 goto out; 3807 } 3808 3809 evt_entry = list_first_entry(&hw->evt_free_q, 3810 struct csio_evt_msg, list); 3811 list_del_init(&evt_entry->list); 3812 3813 /* copy event msg and queue the event */ 3814 evt_entry->type = type; 3815 3816 /* If Payload in SG list*/ 3817 if (msg_sg) { 3818 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3819 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3820 memcpy((void *)((uintptr_t)evt_entry->data + off), 3821 fl_sg->flbufs[n].vaddr, 3822 fl_sg->flbufs[n].len); 3823 off += fl_sg->flbufs[n].len; 3824 } 3825 } else 3826 memcpy((void *)evt_entry->data, evt_msg, len); 3827 3828 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3829 CSIO_DEC_STATS(hw, n_evt_freeq); 3830 CSIO_INC_STATS(hw, n_evt_activeq); 3831 out: 3832 spin_unlock_irqrestore(&hw->lock, flags); 3833 return ret; 3834 } 3835 3836 static void 3837 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3838 { 3839 if (evt_entry) { 3840 spin_lock_irq(&hw->lock); 3841 list_del_init(&evt_entry->list); 3842 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3843 CSIO_DEC_STATS(hw, n_evt_activeq); 3844 CSIO_INC_STATS(hw, n_evt_freeq); 3845 spin_unlock_irq(&hw->lock); 3846 } 3847 } 3848 3849 void 3850 csio_evtq_flush(struct csio_hw *hw) 3851 { 3852 uint32_t count; 3853 count = 30; 3854 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3855 spin_unlock_irq(&hw->lock); 3856 msleep(2000); 3857 spin_lock_irq(&hw->lock); 3858 } 3859 3860 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3861 } 3862 3863 static void 3864 csio_evtq_stop(struct csio_hw *hw) 3865 { 3866 hw->flags |= CSIO_HWF_FWEVT_STOP; 3867 } 3868 3869 static void 3870 csio_evtq_start(struct csio_hw *hw) 3871 { 3872 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3873 } 3874 3875 static void 3876 csio_evtq_cleanup(struct csio_hw *hw) 3877 { 3878 struct list_head *evt_entry, *next_entry; 3879 3880 /* Release outstanding events from activeq to freeq*/ 3881 if (!list_empty(&hw->evt_active_q)) 3882 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3883 3884 hw->stats.n_evt_activeq = 0; 3885 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3886 3887 /* Freeup event entry */ 3888 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3889 kfree(evt_entry); 3890 CSIO_DEC_STATS(hw, n_evt_freeq); 3891 } 3892 3893 hw->stats.n_evt_freeq = 0; 3894 } 3895 3896 3897 static void 3898 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3899 struct csio_fl_dma_buf *flb, void *priv) 3900 { 3901 __u8 op; 3902 void *msg = NULL; 3903 uint32_t msg_len = 0; 3904 bool msg_sg = 0; 3905 3906 op = ((struct rss_header *) wr)->opcode; 3907 if (op == CPL_FW6_PLD) { 3908 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3909 if (!flb || !flb->totlen) { 3910 CSIO_INC_STATS(hw, n_cpl_unexp); 3911 return; 3912 } 3913 3914 msg = (void *) flb; 3915 msg_len = flb->totlen; 3916 msg_sg = 1; 3917 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3918 3919 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3920 /* skip RSS header */ 3921 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3922 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3923 sizeof(struct cpl_fw4_msg); 3924 } else { 3925 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3926 CSIO_INC_STATS(hw, n_cpl_unexp); 3927 return; 3928 } 3929 3930 /* 3931 * Enqueue event to EventQ. Events processing happens 3932 * in Event worker thread context 3933 */ 3934 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3935 (uint16_t)msg_len, msg_sg)) 3936 CSIO_INC_STATS(hw, n_evt_drop); 3937 } 3938 3939 void 3940 csio_evtq_worker(struct work_struct *work) 3941 { 3942 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3943 struct list_head *evt_entry, *next_entry; 3944 LIST_HEAD(evt_q); 3945 struct csio_evt_msg *evt_msg; 3946 struct cpl_fw6_msg *msg; 3947 struct csio_rnode *rn; 3948 int rv = 0; 3949 uint8_t evtq_stop = 0; 3950 3951 csio_dbg(hw, "event worker thread active evts#%d\n", 3952 hw->stats.n_evt_activeq); 3953 3954 spin_lock_irq(&hw->lock); 3955 while (!list_empty(&hw->evt_active_q)) { 3956 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3957 spin_unlock_irq(&hw->lock); 3958 3959 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3960 evt_msg = (struct csio_evt_msg *) evt_entry; 3961 3962 /* Drop events if queue is STOPPED */ 3963 spin_lock_irq(&hw->lock); 3964 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3965 evtq_stop = 1; 3966 spin_unlock_irq(&hw->lock); 3967 if (evtq_stop) { 3968 CSIO_INC_STATS(hw, n_evt_drop); 3969 goto free_evt; 3970 } 3971 3972 switch (evt_msg->type) { 3973 case CSIO_EVT_FW: 3974 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3975 3976 if ((msg->opcode == CPL_FW6_MSG || 3977 msg->opcode == CPL_FW4_MSG) && 3978 !msg->type) { 3979 rv = csio_mb_fwevt_handler(hw, 3980 msg->data); 3981 if (!rv) 3982 break; 3983 /* Handle any remaining fw events */ 3984 csio_fcoe_fwevt_handler(hw, 3985 msg->opcode, msg->data); 3986 } else if (msg->opcode == CPL_FW6_PLD) { 3987 3988 csio_fcoe_fwevt_handler(hw, 3989 msg->opcode, msg->data); 3990 } else { 3991 csio_warn(hw, 3992 "Unhandled FW msg op %x type %x\n", 3993 msg->opcode, msg->type); 3994 CSIO_INC_STATS(hw, n_evt_drop); 3995 } 3996 break; 3997 3998 case CSIO_EVT_MBX: 3999 csio_mberr_worker(hw); 4000 break; 4001 4002 case CSIO_EVT_DEV_LOSS: 4003 memcpy(&rn, evt_msg->data, sizeof(rn)); 4004 csio_rnode_devloss_handler(rn); 4005 break; 4006 4007 default: 4008 csio_warn(hw, "Unhandled event %x on evtq\n", 4009 evt_msg->type); 4010 CSIO_INC_STATS(hw, n_evt_unexp); 4011 break; 4012 } 4013 free_evt: 4014 csio_free_evt(hw, evt_msg); 4015 } 4016 4017 spin_lock_irq(&hw->lock); 4018 } 4019 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 4020 spin_unlock_irq(&hw->lock); 4021 } 4022 4023 int 4024 csio_fwevtq_handler(struct csio_hw *hw) 4025 { 4026 int rv; 4027 4028 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 4029 CSIO_INC_STATS(hw, n_int_stray); 4030 return -EINVAL; 4031 } 4032 4033 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 4034 csio_process_fwevtq_entry, NULL); 4035 return rv; 4036 } 4037 4038 /**************************************************************************** 4039 * Entry points 4040 ****************************************************************************/ 4041 4042 /* Management module */ 4043 /* 4044 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 4045 * mgmt - mgmt module 4046 * @io_req - io request 4047 * 4048 * Return - 0:if given IO Req exists in active Q. 4049 * -EINVAL :if lookup fails. 4050 */ 4051 int 4052 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 4053 { 4054 struct list_head *tmp; 4055 4056 /* Lookup ioreq in the ACTIVEQ */ 4057 list_for_each(tmp, &mgmtm->active_q) { 4058 if (io_req == (struct csio_ioreq *)tmp) 4059 return 0; 4060 } 4061 return -EINVAL; 4062 } 4063 4064 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 4065 4066 /* 4067 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 4068 * @data - Event data. 4069 * 4070 * Return - none. 4071 */ 4072 static void 4073 csio_mgmt_tmo_handler(struct timer_list *t) 4074 { 4075 struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); 4076 struct list_head *tmp; 4077 struct csio_ioreq *io_req; 4078 4079 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 4080 4081 spin_lock_irq(&mgmtm->hw->lock); 4082 4083 list_for_each(tmp, &mgmtm->active_q) { 4084 io_req = (struct csio_ioreq *) tmp; 4085 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 4086 4087 if (!io_req->tmo) { 4088 /* Dequeue the request from retry Q. */ 4089 tmp = csio_list_prev(tmp); 4090 list_del_init(&io_req->sm.sm_list); 4091 if (io_req->io_cbfn) { 4092 /* io_req will be freed by completion handler */ 4093 io_req->wr_status = -ETIMEDOUT; 4094 io_req->io_cbfn(mgmtm->hw, io_req); 4095 } else { 4096 CSIO_DB_ASSERT(0); 4097 } 4098 } 4099 } 4100 4101 /* If retry queue is not empty, re-arm timer */ 4102 if (!list_empty(&mgmtm->active_q)) 4103 mod_timer(&mgmtm->mgmt_timer, 4104 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 4105 spin_unlock_irq(&mgmtm->hw->lock); 4106 } 4107 4108 static void 4109 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 4110 { 4111 struct csio_hw *hw = mgmtm->hw; 4112 struct csio_ioreq *io_req; 4113 struct list_head *tmp; 4114 uint32_t count; 4115 4116 count = 30; 4117 /* Wait for all outstanding req to complete gracefully */ 4118 while ((!list_empty(&mgmtm->active_q)) && count--) { 4119 spin_unlock_irq(&hw->lock); 4120 msleep(2000); 4121 spin_lock_irq(&hw->lock); 4122 } 4123 4124 /* release outstanding req from ACTIVEQ */ 4125 list_for_each(tmp, &mgmtm->active_q) { 4126 io_req = (struct csio_ioreq *) tmp; 4127 tmp = csio_list_prev(tmp); 4128 list_del_init(&io_req->sm.sm_list); 4129 mgmtm->stats.n_active--; 4130 if (io_req->io_cbfn) { 4131 /* io_req will be freed by completion handler */ 4132 io_req->wr_status = -ETIMEDOUT; 4133 io_req->io_cbfn(mgmtm->hw, io_req); 4134 } 4135 } 4136 } 4137 4138 /* 4139 * csio_mgmt_init - Mgmt module init entry point 4140 * @mgmtsm - mgmt module 4141 * @hw - HW module 4142 * 4143 * Initialize mgmt timer, resource wait queue, active queue, 4144 * completion q. Allocate Egress and Ingress 4145 * WR queues and save off the queue index returned by the WR 4146 * module for future use. Allocate and save off mgmt reqs in the 4147 * mgmt_req_freelist for future use. Make sure their SM is initialized 4148 * to uninit state. 4149 * Returns: 0 - on success 4150 * -ENOMEM - on error. 4151 */ 4152 static int 4153 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 4154 { 4155 timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); 4156 4157 INIT_LIST_HEAD(&mgmtm->active_q); 4158 INIT_LIST_HEAD(&mgmtm->cbfn_q); 4159 4160 mgmtm->hw = hw; 4161 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 4162 4163 return 0; 4164 } 4165 4166 /* 4167 * csio_mgmtm_exit - MGMT module exit entry point 4168 * @mgmtsm - mgmt module 4169 * 4170 * This function called during MGMT module uninit. 4171 * Stop timers, free ioreqs allocated. 4172 * Returns: None 4173 * 4174 */ 4175 static void 4176 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 4177 { 4178 del_timer_sync(&mgmtm->mgmt_timer); 4179 } 4180 4181 4182 /** 4183 * csio_hw_start - Kicks off the HW State machine 4184 * @hw: Pointer to HW module. 4185 * 4186 * It is assumed that the initialization is a synchronous operation. 4187 * So when we return afer posting the event, the HW SM should be in 4188 * the ready state, if there were no errors during init. 4189 */ 4190 int 4191 csio_hw_start(struct csio_hw *hw) 4192 { 4193 spin_lock_irq(&hw->lock); 4194 csio_post_event(&hw->sm, CSIO_HWE_CFG); 4195 spin_unlock_irq(&hw->lock); 4196 4197 if (csio_is_hw_ready(hw)) 4198 return 0; 4199 else if (csio_match_state(hw, csio_hws_uninit)) 4200 return -EINVAL; 4201 else 4202 return -ENODEV; 4203 } 4204 4205 int 4206 csio_hw_stop(struct csio_hw *hw) 4207 { 4208 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 4209 4210 if (csio_is_hw_removing(hw)) 4211 return 0; 4212 else 4213 return -EINVAL; 4214 } 4215 4216 /* Max reset retries */ 4217 #define CSIO_MAX_RESET_RETRIES 3 4218 4219 /** 4220 * csio_hw_reset - Reset the hardware 4221 * @hw: HW module. 4222 * 4223 * Caller should hold lock across this function. 4224 */ 4225 int 4226 csio_hw_reset(struct csio_hw *hw) 4227 { 4228 if (!csio_is_hw_master(hw)) 4229 return -EPERM; 4230 4231 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 4232 csio_dbg(hw, "Max hw reset attempts reached.."); 4233 return -EINVAL; 4234 } 4235 4236 hw->rst_retries++; 4237 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 4238 4239 if (csio_is_hw_ready(hw)) { 4240 hw->rst_retries = 0; 4241 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 4242 return 0; 4243 } else 4244 return -EINVAL; 4245 } 4246 4247 /* 4248 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 4249 * @hw: HW module. 4250 */ 4251 static void 4252 csio_hw_get_device_id(struct csio_hw *hw) 4253 { 4254 /* Is the adapter device id cached already ?*/ 4255 if (csio_is_dev_id_cached(hw)) 4256 return; 4257 4258 /* Get the PCI vendor & device id */ 4259 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 4260 &hw->params.pci.vendor_id); 4261 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 4262 &hw->params.pci.device_id); 4263 4264 csio_dev_id_cached(hw); 4265 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 4266 4267 } /* csio_hw_get_device_id */ 4268 4269 /* 4270 * csio_hw_set_description - Set the model, description of the hw. 4271 * @hw: HW module. 4272 * @ven_id: PCI Vendor ID 4273 * @dev_id: PCI Device ID 4274 */ 4275 static void 4276 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 4277 { 4278 uint32_t adap_type, prot_type; 4279 4280 if (ven_id == CSIO_VENDOR_ID) { 4281 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 4282 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 4283 4284 if (prot_type == CSIO_T5_FCOE_ASIC) { 4285 memcpy(hw->hw_ver, 4286 csio_t5_fcoe_adapters[adap_type].model_no, 16); 4287 memcpy(hw->model_desc, 4288 csio_t5_fcoe_adapters[adap_type].description, 4289 32); 4290 } else { 4291 char tempName[32] = "Chelsio FCoE Controller"; 4292 memcpy(hw->model_desc, tempName, 32); 4293 } 4294 } 4295 } /* csio_hw_set_description */ 4296 4297 /** 4298 * csio_hw_init - Initialize HW module. 4299 * @hw: Pointer to HW module. 4300 * 4301 * Initialize the members of the HW module. 4302 */ 4303 int 4304 csio_hw_init(struct csio_hw *hw) 4305 { 4306 int rv = -EINVAL; 4307 uint32_t i; 4308 uint16_t ven_id, dev_id; 4309 struct csio_evt_msg *evt_entry; 4310 4311 INIT_LIST_HEAD(&hw->sm.sm_list); 4312 csio_init_state(&hw->sm, csio_hws_uninit); 4313 spin_lock_init(&hw->lock); 4314 INIT_LIST_HEAD(&hw->sln_head); 4315 4316 /* Get the PCI vendor & device id */ 4317 csio_hw_get_device_id(hw); 4318 4319 strcpy(hw->name, CSIO_HW_NAME); 4320 4321 /* Initialize the HW chip ops T5 specific ops */ 4322 hw->chip_ops = &t5_ops; 4323 4324 /* Set the model & its description */ 4325 4326 ven_id = hw->params.pci.vendor_id; 4327 dev_id = hw->params.pci.device_id; 4328 4329 csio_hw_set_description(hw, ven_id, dev_id); 4330 4331 /* Initialize default log level */ 4332 hw->params.log_level = (uint32_t) csio_dbg_level; 4333 4334 csio_set_fwevt_intr_idx(hw, -1); 4335 csio_set_nondata_intr_idx(hw, -1); 4336 4337 /* Init all the modules: Mailbox, WorkRequest and Transport */ 4338 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 4339 goto err; 4340 4341 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 4342 if (rv) 4343 goto err_mbm_exit; 4344 4345 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 4346 if (rv) 4347 goto err_wrm_exit; 4348 4349 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 4350 if (rv) 4351 goto err_scsim_exit; 4352 /* Pre-allocate evtq and initialize them */ 4353 INIT_LIST_HEAD(&hw->evt_active_q); 4354 INIT_LIST_HEAD(&hw->evt_free_q); 4355 for (i = 0; i < csio_evtq_sz; i++) { 4356 4357 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 4358 if (!evt_entry) { 4359 rv = -ENOMEM; 4360 csio_err(hw, "Failed to initialize eventq"); 4361 goto err_evtq_cleanup; 4362 } 4363 4364 list_add_tail(&evt_entry->list, &hw->evt_free_q); 4365 CSIO_INC_STATS(hw, n_evt_freeq); 4366 } 4367 4368 hw->dev_num = dev_num; 4369 dev_num++; 4370 4371 return 0; 4372 4373 err_evtq_cleanup: 4374 csio_evtq_cleanup(hw); 4375 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4376 err_scsim_exit: 4377 csio_scsim_exit(csio_hw_to_scsim(hw)); 4378 err_wrm_exit: 4379 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4380 err_mbm_exit: 4381 csio_mbm_exit(csio_hw_to_mbm(hw)); 4382 err: 4383 return rv; 4384 } 4385 4386 /** 4387 * csio_hw_exit - Un-initialize HW module. 4388 * @hw: Pointer to HW module. 4389 * 4390 */ 4391 void 4392 csio_hw_exit(struct csio_hw *hw) 4393 { 4394 csio_evtq_cleanup(hw); 4395 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4396 csio_scsim_exit(csio_hw_to_scsim(hw)); 4397 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4398 csio_mbm_exit(csio_hw_to_mbm(hw)); 4399 } 4400