1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_dbg_level = 0xFEFF; 51 unsigned int csio_port_mask = 0xf; 52 53 /* Default FW event queue entries. */ 54 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 55 56 /* Default MSI param level */ 57 int csio_msi = 2; 58 59 /* FCoE function instances */ 60 static int dev_num; 61 62 /* FCoE Adapter types & its description */ 63 static const struct csio_adap_desc csio_t4_fcoe_adapters[] = { 64 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, 65 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, 66 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, 67 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"}, 68 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"}, 69 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"}, 70 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"}, 71 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"}, 72 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"}, 73 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"}, 74 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"}, 75 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"}, 76 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, 77 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, 78 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, 79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 80 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"}, 81 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"}, 82 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"}, 83 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"}, 84 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"}, 85 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"}, 86 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"}, 87 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"} 88 }; 89 90 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 91 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 92 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 93 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"}, 94 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 95 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 96 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 97 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 98 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 99 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 100 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 101 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 102 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 103 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 104 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 105 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 106 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 107 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 108 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 109 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 110 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"} 111 }; 112 113 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 114 static void csio_hw_mbm_cleanup(struct csio_hw *); 115 116 /* State machine forward declarations */ 117 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 118 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 119 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 120 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 121 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 122 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 123 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 124 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 125 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 126 127 static void csio_hw_initialize(struct csio_hw *hw); 128 static void csio_evtq_stop(struct csio_hw *hw); 129 static void csio_evtq_start(struct csio_hw *hw); 130 131 int csio_is_hw_ready(struct csio_hw *hw) 132 { 133 return csio_match_state(hw, csio_hws_ready); 134 } 135 136 int csio_is_hw_removing(struct csio_hw *hw) 137 { 138 return csio_match_state(hw, csio_hws_removing); 139 } 140 141 142 /* 143 * csio_hw_wait_op_done_val - wait until an operation is completed 144 * @hw: the HW module 145 * @reg: the register to check for completion 146 * @mask: a single-bit field within @reg that indicates completion 147 * @polarity: the value of the field when the operation is completed 148 * @attempts: number of check iterations 149 * @delay: delay in usecs between iterations 150 * @valp: where to store the value of the register at completion time 151 * 152 * Wait until an operation is completed by checking a bit in a register 153 * up to @attempts times. If @valp is not NULL the value of the register 154 * at the time it indicated completion is stored there. Returns 0 if the 155 * operation completes and -EAGAIN otherwise. 156 */ 157 int 158 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 159 int polarity, int attempts, int delay, uint32_t *valp) 160 { 161 uint32_t val; 162 while (1) { 163 val = csio_rd_reg32(hw, reg); 164 165 if (!!(val & mask) == polarity) { 166 if (valp) 167 *valp = val; 168 return 0; 169 } 170 171 if (--attempts == 0) 172 return -EAGAIN; 173 if (delay) 174 udelay(delay); 175 } 176 } 177 178 /* 179 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 180 * @hw: the adapter 181 * @addr: the indirect TP register address 182 * @mask: specifies the field within the register to modify 183 * @val: new value for the field 184 * 185 * Sets a field of an indirect TP register to the given value. 186 */ 187 void 188 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 189 unsigned int mask, unsigned int val) 190 { 191 csio_wr_reg32(hw, addr, TP_PIO_ADDR); 192 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask; 193 csio_wr_reg32(hw, val, TP_PIO_DATA); 194 } 195 196 void 197 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 198 uint32_t value) 199 { 200 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 201 202 csio_wr_reg32(hw, val | value, reg); 203 /* Flush */ 204 csio_rd_reg32(hw, reg); 205 206 } 207 208 static int 209 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 210 { 211 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 212 addr, len, buf, 0); 213 } 214 215 /* 216 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 217 */ 218 #define EEPROM_MAX_RD_POLL 40 219 #define EEPROM_MAX_WR_POLL 6 220 #define EEPROM_STAT_ADDR 0x7bfc 221 #define VPD_BASE 0x400 222 #define VPD_BASE_OLD 0 223 #define VPD_LEN 1024 224 #define VPD_INFO_FLD_HDR_SIZE 3 225 226 /* 227 * csio_hw_seeprom_read - read a serial EEPROM location 228 * @hw: hw to read 229 * @addr: EEPROM virtual address 230 * @data: where to store the read data 231 * 232 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 233 * VPD capability. Note that this function must be called with a virtual 234 * address. 235 */ 236 static int 237 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 238 { 239 uint16_t val = 0; 240 int attempts = EEPROM_MAX_RD_POLL; 241 uint32_t base = hw->params.pci.vpd_cap_addr; 242 243 if (addr >= EEPROMVSIZE || (addr & 3)) 244 return -EINVAL; 245 246 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 247 248 do { 249 udelay(10); 250 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 251 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 252 253 if (!(val & PCI_VPD_ADDR_F)) { 254 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 255 return -EINVAL; 256 } 257 258 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 259 *data = le32_to_cpu(*data); 260 261 return 0; 262 } 263 264 /* 265 * Partial EEPROM Vital Product Data structure. Includes only the ID and 266 * VPD-R sections. 267 */ 268 struct t4_vpd_hdr { 269 u8 id_tag; 270 u8 id_len[2]; 271 u8 id_data[ID_LEN]; 272 u8 vpdr_tag; 273 u8 vpdr_len[2]; 274 }; 275 276 /* 277 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 278 * the VPD 279 * @v: Pointer to buffered vpd data structure 280 * @kw: The keyword to search for 281 * 282 * Returns the value of the information field keyword or 283 * -EINVAL otherwise. 284 */ 285 static int 286 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 287 { 288 int32_t i; 289 int32_t offset , len; 290 const uint8_t *buf = &v->id_tag; 291 const uint8_t *vpdr_len = &v->vpdr_tag; 292 offset = sizeof(struct t4_vpd_hdr); 293 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 294 295 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 296 return -EINVAL; 297 298 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 299 if (memcmp(buf + i , kw, 2) == 0) { 300 i += VPD_INFO_FLD_HDR_SIZE; 301 return i; 302 } 303 304 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 305 } 306 307 return -EINVAL; 308 } 309 310 static int 311 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 312 { 313 *pos = pci_find_capability(pdev, cap); 314 if (*pos) 315 return 0; 316 317 return -1; 318 } 319 320 /* 321 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 322 * @hw: HW module 323 * @p: where to store the parameters 324 * 325 * Reads card parameters stored in VPD EEPROM. 326 */ 327 static int 328 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 329 { 330 int i, ret, ec, sn, addr; 331 uint8_t *vpd, csum; 332 const struct t4_vpd_hdr *v; 333 /* To get around compilation warning from strstrip */ 334 char *s; 335 336 if (csio_is_valid_vpd(hw)) 337 return 0; 338 339 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 340 &hw->params.pci.vpd_cap_addr); 341 if (ret) 342 return -EINVAL; 343 344 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 345 if (vpd == NULL) 346 return -ENOMEM; 347 348 /* 349 * Card information normally starts at VPD_BASE but early cards had 350 * it at 0. 351 */ 352 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 353 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 354 355 for (i = 0; i < VPD_LEN; i += 4) { 356 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 357 if (ret) { 358 kfree(vpd); 359 return ret; 360 } 361 } 362 363 /* Reset the VPD flag! */ 364 hw->flags &= (~CSIO_HWF_VPD_VALID); 365 366 v = (const struct t4_vpd_hdr *)vpd; 367 368 #define FIND_VPD_KW(var, name) do { \ 369 var = csio_hw_get_vpd_keyword_val(v, name); \ 370 if (var < 0) { \ 371 csio_err(hw, "missing VPD keyword " name "\n"); \ 372 kfree(vpd); \ 373 return -EINVAL; \ 374 } \ 375 } while (0) 376 377 FIND_VPD_KW(i, "RV"); 378 for (csum = 0; i >= 0; i--) 379 csum += vpd[i]; 380 381 if (csum) { 382 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 383 kfree(vpd); 384 return -EINVAL; 385 } 386 FIND_VPD_KW(ec, "EC"); 387 FIND_VPD_KW(sn, "SN"); 388 #undef FIND_VPD_KW 389 390 memcpy(p->id, v->id_data, ID_LEN); 391 s = strstrip(p->id); 392 memcpy(p->ec, vpd + ec, EC_LEN); 393 s = strstrip(p->ec); 394 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 395 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 396 s = strstrip(p->sn); 397 398 csio_valid_vpd_copied(hw); 399 400 kfree(vpd); 401 return 0; 402 } 403 404 /* 405 * csio_hw_sf1_read - read data from the serial flash 406 * @hw: the HW module 407 * @byte_cnt: number of bytes to read 408 * @cont: whether another operation will be chained 409 * @lock: whether to lock SF for PL access only 410 * @valp: where to store the read data 411 * 412 * Reads up to 4 bytes of data from the serial flash. The location of 413 * the read needs to be specified prior to calling this by issuing the 414 * appropriate commands to the serial flash. 415 */ 416 static int 417 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 418 int32_t lock, uint32_t *valp) 419 { 420 int ret; 421 422 if (!byte_cnt || byte_cnt > 4) 423 return -EINVAL; 424 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 425 return -EBUSY; 426 427 cont = cont ? SF_CONT : 0; 428 lock = lock ? SF_LOCK : 0; 429 430 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP); 431 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 432 10, NULL); 433 if (!ret) 434 *valp = csio_rd_reg32(hw, SF_DATA); 435 return ret; 436 } 437 438 /* 439 * csio_hw_sf1_write - write data to the serial flash 440 * @hw: the HW module 441 * @byte_cnt: number of bytes to write 442 * @cont: whether another operation will be chained 443 * @lock: whether to lock SF for PL access only 444 * @val: value to write 445 * 446 * Writes up to 4 bytes of data to the serial flash. The location of 447 * the write needs to be specified prior to calling this by issuing the 448 * appropriate commands to the serial flash. 449 */ 450 static int 451 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 452 int32_t lock, uint32_t val) 453 { 454 if (!byte_cnt || byte_cnt > 4) 455 return -EINVAL; 456 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 457 return -EBUSY; 458 459 cont = cont ? SF_CONT : 0; 460 lock = lock ? SF_LOCK : 0; 461 462 csio_wr_reg32(hw, val, SF_DATA); 463 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP); 464 465 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 466 10, NULL); 467 } 468 469 /* 470 * csio_hw_flash_wait_op - wait for a flash operation to complete 471 * @hw: the HW module 472 * @attempts: max number of polls of the status register 473 * @delay: delay between polls in ms 474 * 475 * Wait for a flash operation to complete by polling the status register. 476 */ 477 static int 478 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 479 { 480 int ret; 481 uint32_t status; 482 483 while (1) { 484 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 485 if (ret != 0) 486 return ret; 487 488 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 489 if (ret != 0) 490 return ret; 491 492 if (!(status & 1)) 493 return 0; 494 if (--attempts == 0) 495 return -EAGAIN; 496 if (delay) 497 msleep(delay); 498 } 499 } 500 501 /* 502 * csio_hw_read_flash - read words from serial flash 503 * @hw: the HW module 504 * @addr: the start address for the read 505 * @nwords: how many 32-bit words to read 506 * @data: where to store the read data 507 * @byte_oriented: whether to store data as bytes or as words 508 * 509 * Read the specified number of 32-bit words from the serial flash. 510 * If @byte_oriented is set the read data is stored as a byte array 511 * (i.e., big-endian), otherwise as 32-bit words in the platform's 512 * natural endianess. 513 */ 514 static int 515 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 516 uint32_t *data, int32_t byte_oriented) 517 { 518 int ret; 519 520 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 521 return -EINVAL; 522 523 addr = swab32(addr) | SF_RD_DATA_FAST; 524 525 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 526 if (ret != 0) 527 return ret; 528 529 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 530 if (ret != 0) 531 return ret; 532 533 for ( ; nwords; nwords--, data++) { 534 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 535 if (nwords == 1) 536 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 537 if (ret) 538 return ret; 539 if (byte_oriented) 540 *data = htonl(*data); 541 } 542 return 0; 543 } 544 545 /* 546 * csio_hw_write_flash - write up to a page of data to the serial flash 547 * @hw: the hw 548 * @addr: the start address to write 549 * @n: length of data to write in bytes 550 * @data: the data to write 551 * 552 * Writes up to a page of data (256 bytes) to the serial flash starting 553 * at the given address. All the data must be written to the same page. 554 */ 555 static int 556 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 557 uint32_t n, const uint8_t *data) 558 { 559 int ret = -EINVAL; 560 uint32_t buf[64]; 561 uint32_t i, c, left, val, offset = addr & 0xff; 562 563 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 564 return -EINVAL; 565 566 val = swab32(addr) | SF_PROG_PAGE; 567 568 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 569 if (ret != 0) 570 goto unlock; 571 572 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 573 if (ret != 0) 574 goto unlock; 575 576 for (left = n; left; left -= c) { 577 c = min(left, 4U); 578 for (val = 0, i = 0; i < c; ++i) 579 val = (val << 8) + *data++; 580 581 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 582 if (ret) 583 goto unlock; 584 } 585 ret = csio_hw_flash_wait_op(hw, 8, 1); 586 if (ret) 587 goto unlock; 588 589 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 590 591 /* Read the page to verify the write succeeded */ 592 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 593 if (ret) 594 return ret; 595 596 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 597 csio_err(hw, 598 "failed to correctly write the flash page at %#x\n", 599 addr); 600 return -EINVAL; 601 } 602 603 return 0; 604 605 unlock: 606 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 607 return ret; 608 } 609 610 /* 611 * csio_hw_flash_erase_sectors - erase a range of flash sectors 612 * @hw: the HW module 613 * @start: the first sector to erase 614 * @end: the last sector to erase 615 * 616 * Erases the sectors in the given inclusive range. 617 */ 618 static int 619 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 620 { 621 int ret = 0; 622 623 while (start <= end) { 624 625 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 626 if (ret != 0) 627 goto out; 628 629 ret = csio_hw_sf1_write(hw, 4, 0, 1, 630 SF_ERASE_SECTOR | (start << 8)); 631 if (ret != 0) 632 goto out; 633 634 ret = csio_hw_flash_wait_op(hw, 14, 500); 635 if (ret != 0) 636 goto out; 637 638 start++; 639 } 640 out: 641 if (ret) 642 csio_err(hw, "erase of flash sector %d failed, error %d\n", 643 start, ret); 644 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 645 return 0; 646 } 647 648 static void 649 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 650 { 651 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 652 FW_HDR_FW_VER_MAJOR_G(hw->fwrev), 653 FW_HDR_FW_VER_MINOR_G(hw->fwrev), 654 FW_HDR_FW_VER_MICRO_G(hw->fwrev), 655 FW_HDR_FW_VER_BUILD_G(hw->fwrev)); 656 } 657 658 /* 659 * csio_hw_get_fw_version - read the firmware version 660 * @hw: HW module 661 * @vers: where to place the version 662 * 663 * Reads the FW version from flash. 664 */ 665 static int 666 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 667 { 668 return csio_hw_read_flash(hw, FW_IMG_START + 669 offsetof(struct fw_hdr, fw_ver), 1, 670 vers, 0); 671 } 672 673 /* 674 * csio_hw_get_tp_version - read the TP microcode version 675 * @hw: HW module 676 * @vers: where to place the version 677 * 678 * Reads the TP microcode version from flash. 679 */ 680 static int 681 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 682 { 683 return csio_hw_read_flash(hw, FLASH_FW_START + 684 offsetof(struct fw_hdr, tp_microcode_ver), 1, 685 vers, 0); 686 } 687 688 /* 689 * csio_hw_check_fw_version - check if the FW is compatible with 690 * this driver 691 * @hw: HW module 692 * 693 * Checks if an adapter's FW is compatible with the driver. Returns 0 694 * if there's exact match, a negative error if the version could not be 695 * read or there's a major/minor version mismatch/minor. 696 */ 697 static int 698 csio_hw_check_fw_version(struct csio_hw *hw) 699 { 700 int ret, major, minor, micro; 701 702 ret = csio_hw_get_fw_version(hw, &hw->fwrev); 703 if (!ret) 704 ret = csio_hw_get_tp_version(hw, &hw->tp_vers); 705 if (ret) 706 return ret; 707 708 major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev); 709 minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev); 710 micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev); 711 712 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ 713 csio_err(hw, "card FW has major version %u, driver wants %u\n", 714 major, FW_VERSION_MAJOR(hw)); 715 return -EINVAL; 716 } 717 718 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw)) 719 return 0; /* perfect match */ 720 721 /* Minor/micro version mismatch */ 722 return -EINVAL; 723 } 724 725 /* 726 * csio_hw_fw_dload - download firmware. 727 * @hw: HW module 728 * @fw_data: firmware image to write. 729 * @size: image size 730 * 731 * Write the supplied firmware image to the card's serial flash. 732 */ 733 static int 734 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 735 { 736 uint32_t csum; 737 int32_t addr; 738 int ret; 739 uint32_t i; 740 uint8_t first_page[SF_PAGE_SIZE]; 741 const __be32 *p = (const __be32 *)fw_data; 742 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 743 uint32_t sf_sec_size; 744 745 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 746 csio_err(hw, "Serial Flash data invalid\n"); 747 return -EINVAL; 748 } 749 750 if (!size) { 751 csio_err(hw, "FW image has no data\n"); 752 return -EINVAL; 753 } 754 755 if (size & 511) { 756 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 757 return -EINVAL; 758 } 759 760 if (ntohs(hdr->len512) * 512 != size) { 761 csio_err(hw, "FW image size differs from size in FW header\n"); 762 return -EINVAL; 763 } 764 765 if (size > FW_MAX_SIZE) { 766 csio_err(hw, "FW image too large, max is %u bytes\n", 767 FW_MAX_SIZE); 768 return -EINVAL; 769 } 770 771 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 772 csum += ntohl(p[i]); 773 774 if (csum != 0xffffffff) { 775 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 776 return -EINVAL; 777 } 778 779 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 780 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 781 782 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 783 FW_START_SEC, FW_START_SEC + i - 1); 784 785 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC, 786 FW_START_SEC + i - 1); 787 if (ret) { 788 csio_err(hw, "Flash Erase failed\n"); 789 goto out; 790 } 791 792 /* 793 * We write the correct version at the end so the driver can see a bad 794 * version if the FW write fails. Start by writing a copy of the 795 * first page with a bad version. 796 */ 797 memcpy(first_page, fw_data, SF_PAGE_SIZE); 798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 799 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page); 800 if (ret) 801 goto out; 802 803 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 804 FW_IMG_START, FW_IMG_START + size); 805 806 addr = FW_IMG_START; 807 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 808 addr += SF_PAGE_SIZE; 809 fw_data += SF_PAGE_SIZE; 810 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 811 if (ret) 812 goto out; 813 } 814 815 ret = csio_hw_write_flash(hw, 816 FW_IMG_START + 817 offsetof(struct fw_hdr, fw_ver), 818 sizeof(hdr->fw_ver), 819 (const uint8_t *)&hdr->fw_ver); 820 821 out: 822 if (ret) 823 csio_err(hw, "firmware download failed, error %d\n", ret); 824 return ret; 825 } 826 827 static int 828 csio_hw_get_flash_params(struct csio_hw *hw) 829 { 830 int ret; 831 uint32_t info = 0; 832 833 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 834 if (!ret) 835 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 836 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 837 if (ret != 0) 838 return ret; 839 840 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 841 return -EINVAL; 842 info >>= 16; /* log2 of size */ 843 if (info >= 0x14 && info < 0x18) 844 hw->params.sf_nsec = 1 << (info - 16); 845 else if (info == 0x18) 846 hw->params.sf_nsec = 64; 847 else 848 return -EINVAL; 849 hw->params.sf_size = 1 << info; 850 851 return 0; 852 } 853 854 /*****************************************************************************/ 855 /* HW State machine assists */ 856 /*****************************************************************************/ 857 858 static int 859 csio_hw_dev_ready(struct csio_hw *hw) 860 { 861 uint32_t reg; 862 int cnt = 6; 863 864 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) && 865 (--cnt != 0)) 866 mdelay(100); 867 868 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) || 869 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) { 870 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 871 return -EIO; 872 } 873 874 hw->pfn = SOURCEPF_GET(reg); 875 876 return 0; 877 } 878 879 /* 880 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 881 * @hw: HW module 882 * @state: Device state 883 * 884 * FW_HELLO_CMD has to be polled for completion. 885 */ 886 static int 887 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 888 { 889 struct csio_mb *mbp; 890 int rv = 0; 891 enum fw_retval retval; 892 uint8_t mpfn; 893 char state_str[16]; 894 int retries = FW_CMD_HELLO_RETRIES; 895 896 memset(state_str, 0, sizeof(state_str)); 897 898 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 899 if (!mbp) { 900 rv = -ENOMEM; 901 CSIO_INC_STATS(hw, n_err_nomem); 902 goto out; 903 } 904 905 retry: 906 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 907 hw->pfn, CSIO_MASTER_MAY, NULL); 908 909 rv = csio_mb_issue(hw, mbp); 910 if (rv) { 911 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 912 goto out_free_mb; 913 } 914 915 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 916 if (retval != FW_SUCCESS) { 917 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 918 rv = -EINVAL; 919 goto out_free_mb; 920 } 921 922 /* Firmware has designated us to be master */ 923 if (hw->pfn == mpfn) { 924 hw->flags |= CSIO_HWF_MASTER; 925 } else if (*state == CSIO_DEV_STATE_UNINIT) { 926 /* 927 * If we're not the Master PF then we need to wait around for 928 * the Master PF Driver to finish setting up the adapter. 929 * 930 * Note that we also do this wait if we're a non-Master-capable 931 * PF and there is no current Master PF; a Master PF may show up 932 * momentarily and we wouldn't want to fail pointlessly. (This 933 * can happen when an OS loads lots of different drivers rapidly 934 * at the same time). In this case, the Master PF returned by 935 * the firmware will be PCIE_FW_MASTER_MASK so the test below 936 * will work ... 937 */ 938 939 int waiting = FW_CMD_HELLO_TIMEOUT; 940 941 /* 942 * Wait for the firmware to either indicate an error or 943 * initialized state. If we see either of these we bail out 944 * and report the issue to the caller. If we exhaust the 945 * "hello timeout" and we haven't exhausted our retries, try 946 * again. Otherwise bail with a timeout error. 947 */ 948 for (;;) { 949 uint32_t pcie_fw; 950 951 spin_unlock_irq(&hw->lock); 952 msleep(50); 953 spin_lock_irq(&hw->lock); 954 waiting -= 50; 955 956 /* 957 * If neither Error nor Initialialized are indicated 958 * by the firmware keep waiting till we exaust our 959 * timeout ... and then retry if we haven't exhausted 960 * our retries ... 961 */ 962 pcie_fw = csio_rd_reg32(hw, PCIE_FW); 963 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { 964 if (waiting <= 0) { 965 if (retries-- > 0) 966 goto retry; 967 968 rv = -ETIMEDOUT; 969 break; 970 } 971 continue; 972 } 973 974 /* 975 * We either have an Error or Initialized condition 976 * report errors preferentially. 977 */ 978 if (state) { 979 if (pcie_fw & PCIE_FW_ERR) { 980 *state = CSIO_DEV_STATE_ERR; 981 rv = -ETIMEDOUT; 982 } else if (pcie_fw & PCIE_FW_INIT) 983 *state = CSIO_DEV_STATE_INIT; 984 } 985 986 /* 987 * If we arrived before a Master PF was selected and 988 * there's not a valid Master PF, grab its identity 989 * for our caller. 990 */ 991 if (mpfn == PCIE_FW_MASTER_MASK && 992 (pcie_fw & PCIE_FW_MASTER_VLD)) 993 mpfn = PCIE_FW_MASTER_GET(pcie_fw); 994 break; 995 } 996 hw->flags &= ~CSIO_HWF_MASTER; 997 } 998 999 switch (*state) { 1000 case CSIO_DEV_STATE_UNINIT: 1001 strcpy(state_str, "Initializing"); 1002 break; 1003 case CSIO_DEV_STATE_INIT: 1004 strcpy(state_str, "Initialized"); 1005 break; 1006 case CSIO_DEV_STATE_ERR: 1007 strcpy(state_str, "Error"); 1008 break; 1009 default: 1010 strcpy(state_str, "Unknown"); 1011 break; 1012 } 1013 1014 if (hw->pfn == mpfn) 1015 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 1016 hw->pfn, state_str); 1017 else 1018 csio_info(hw, 1019 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 1020 hw->pfn, mpfn, state_str); 1021 1022 out_free_mb: 1023 mempool_free(mbp, hw->mb_mempool); 1024 out: 1025 return rv; 1026 } 1027 1028 /* 1029 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 1030 * @hw: HW module 1031 * 1032 */ 1033 static int 1034 csio_do_bye(struct csio_hw *hw) 1035 { 1036 struct csio_mb *mbp; 1037 enum fw_retval retval; 1038 1039 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1040 if (!mbp) { 1041 CSIO_INC_STATS(hw, n_err_nomem); 1042 return -ENOMEM; 1043 } 1044 1045 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1046 1047 if (csio_mb_issue(hw, mbp)) { 1048 csio_err(hw, "Issue of BYE command failed\n"); 1049 mempool_free(mbp, hw->mb_mempool); 1050 return -EINVAL; 1051 } 1052 1053 retval = csio_mb_fw_retval(mbp); 1054 if (retval != FW_SUCCESS) { 1055 mempool_free(mbp, hw->mb_mempool); 1056 return -EINVAL; 1057 } 1058 1059 mempool_free(mbp, hw->mb_mempool); 1060 1061 return 0; 1062 } 1063 1064 /* 1065 * csio_do_reset- Perform the device reset. 1066 * @hw: HW module 1067 * @fw_rst: FW reset 1068 * 1069 * If fw_rst is set, issues FW reset mbox cmd otherwise 1070 * does PIO reset. 1071 * Performs reset of the function. 1072 */ 1073 static int 1074 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1075 { 1076 struct csio_mb *mbp; 1077 enum fw_retval retval; 1078 1079 if (!fw_rst) { 1080 /* PIO reset */ 1081 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1082 mdelay(2000); 1083 return 0; 1084 } 1085 1086 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1087 if (!mbp) { 1088 CSIO_INC_STATS(hw, n_err_nomem); 1089 return -ENOMEM; 1090 } 1091 1092 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1093 PIORSTMODE | PIORST, 0, NULL); 1094 1095 if (csio_mb_issue(hw, mbp)) { 1096 csio_err(hw, "Issue of RESET command failed.n"); 1097 mempool_free(mbp, hw->mb_mempool); 1098 return -EINVAL; 1099 } 1100 1101 retval = csio_mb_fw_retval(mbp); 1102 if (retval != FW_SUCCESS) { 1103 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1104 mempool_free(mbp, hw->mb_mempool); 1105 return -EINVAL; 1106 } 1107 1108 mempool_free(mbp, hw->mb_mempool); 1109 1110 return 0; 1111 } 1112 1113 static int 1114 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1115 { 1116 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1117 uint16_t caps; 1118 1119 caps = ntohs(rsp->fcoecaps); 1120 1121 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1122 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1123 return -EINVAL; 1124 } 1125 1126 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1127 csio_err(hw, "No FCoE Control Offload capability\n"); 1128 return -EINVAL; 1129 } 1130 1131 return 0; 1132 } 1133 1134 /* 1135 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1136 * @hw: the HW module 1137 * @mbox: mailbox to use for the FW RESET command (if desired) 1138 * @force: force uP into RESET even if FW RESET command fails 1139 * 1140 * Issues a RESET command to firmware (if desired) with a HALT indication 1141 * and then puts the microprocessor into RESET state. The RESET command 1142 * will only be issued if a legitimate mailbox is provided (mbox <= 1143 * PCIE_FW_MASTER_MASK). 1144 * 1145 * This is generally used in order for the host to safely manipulate the 1146 * adapter without fear of conflicting with whatever the firmware might 1147 * be doing. The only way out of this state is to RESTART the firmware 1148 * ... 1149 */ 1150 static int 1151 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1152 { 1153 enum fw_retval retval = 0; 1154 1155 /* 1156 * If a legitimate mailbox is provided, issue a RESET command 1157 * with a HALT indication. 1158 */ 1159 if (mbox <= PCIE_FW_MASTER_MASK) { 1160 struct csio_mb *mbp; 1161 1162 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1163 if (!mbp) { 1164 CSIO_INC_STATS(hw, n_err_nomem); 1165 return -ENOMEM; 1166 } 1167 1168 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1169 PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F, 1170 NULL); 1171 1172 if (csio_mb_issue(hw, mbp)) { 1173 csio_err(hw, "Issue of RESET command failed!\n"); 1174 mempool_free(mbp, hw->mb_mempool); 1175 return -EINVAL; 1176 } 1177 1178 retval = csio_mb_fw_retval(mbp); 1179 mempool_free(mbp, hw->mb_mempool); 1180 } 1181 1182 /* 1183 * Normally we won't complete the operation if the firmware RESET 1184 * command fails but if our caller insists we'll go ahead and put the 1185 * uP into RESET. This can be useful if the firmware is hung or even 1186 * missing ... We'll have to take the risk of putting the uP into 1187 * RESET without the cooperation of firmware in that case. 1188 * 1189 * We also force the firmware's HALT flag to be on in case we bypassed 1190 * the firmware RESET command above or we're dealing with old firmware 1191 * which doesn't have the HALT capability. This will serve as a flag 1192 * for the incoming firmware to know that it's coming out of a HALT 1193 * rather than a RESET ... if it's new enough to understand that ... 1194 */ 1195 if (retval == 0 || force) { 1196 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST); 1197 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT); 1198 } 1199 1200 /* 1201 * And we always return the result of the firmware RESET command 1202 * even when we force the uP into RESET ... 1203 */ 1204 return retval ? -EINVAL : 0; 1205 } 1206 1207 /* 1208 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1209 * @hw: the HW module 1210 * @reset: if we want to do a RESET to restart things 1211 * 1212 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1213 * return the previous PF Master remains as the new PF Master and there 1214 * is no need to issue a new HELLO command, etc. 1215 * 1216 * We do this in two ways: 1217 * 1218 * 1. If we're dealing with newer firmware we'll simply want to take 1219 * the chip's microprocessor out of RESET. This will cause the 1220 * firmware to start up from its start vector. And then we'll loop 1221 * until the firmware indicates it's started again (PCIE_FW.HALT 1222 * reset to 0) or we timeout. 1223 * 1224 * 2. If we're dealing with older firmware then we'll need to RESET 1225 * the chip since older firmware won't recognize the PCIE_FW.HALT 1226 * flag and automatically RESET itself on startup. 1227 */ 1228 static int 1229 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1230 { 1231 if (reset) { 1232 /* 1233 * Since we're directing the RESET instead of the firmware 1234 * doing it automatically, we need to clear the PCIE_FW.HALT 1235 * bit. 1236 */ 1237 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0); 1238 1239 /* 1240 * If we've been given a valid mailbox, first try to get the 1241 * firmware to do the RESET. If that works, great and we can 1242 * return success. Otherwise, if we haven't been given a 1243 * valid mailbox or the RESET command failed, fall back to 1244 * hitting the chip with a hammer. 1245 */ 1246 if (mbox <= PCIE_FW_MASTER_MASK) { 1247 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1248 msleep(100); 1249 if (csio_do_reset(hw, true) == 0) 1250 return 0; 1251 } 1252 1253 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1254 msleep(2000); 1255 } else { 1256 int ms; 1257 1258 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1259 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1260 if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT)) 1261 return 0; 1262 msleep(100); 1263 ms += 100; 1264 } 1265 return -ETIMEDOUT; 1266 } 1267 return 0; 1268 } 1269 1270 /* 1271 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1272 * @hw: the HW module 1273 * @mbox: mailbox to use for the FW RESET command (if desired) 1274 * @fw_data: the firmware image to write 1275 * @size: image size 1276 * @force: force upgrade even if firmware doesn't cooperate 1277 * 1278 * Perform all of the steps necessary for upgrading an adapter's 1279 * firmware image. Normally this requires the cooperation of the 1280 * existing firmware in order to halt all existing activities 1281 * but if an invalid mailbox token is passed in we skip that step 1282 * (though we'll still put the adapter microprocessor into RESET in 1283 * that case). 1284 * 1285 * On successful return the new firmware will have been loaded and 1286 * the adapter will have been fully RESET losing all previous setup 1287 * state. On unsuccessful return the adapter may be completely hosed ... 1288 * positive errno indicates that the adapter is ~probably~ intact, a 1289 * negative errno indicates that things are looking bad ... 1290 */ 1291 static int 1292 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1293 const u8 *fw_data, uint32_t size, int32_t force) 1294 { 1295 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1296 int reset, ret; 1297 1298 ret = csio_hw_fw_halt(hw, mbox, force); 1299 if (ret != 0 && !force) 1300 return ret; 1301 1302 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1303 if (ret != 0) 1304 return ret; 1305 1306 /* 1307 * Older versions of the firmware don't understand the new 1308 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1309 * restart. So for newly loaded older firmware we'll have to do the 1310 * RESET for it so it starts up on a clean slate. We can tell if 1311 * the newly loaded firmware will handle this right by checking 1312 * its header flags to see if it advertises the capability. 1313 */ 1314 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1315 return csio_hw_fw_restart(hw, mbox, reset); 1316 } 1317 1318 1319 /* 1320 * csio_hw_fw_config_file - setup an adapter via a Configuration File 1321 * @hw: the HW module 1322 * @mbox: mailbox to use for the FW command 1323 * @mtype: the memory type where the Configuration File is located 1324 * @maddr: the memory address where the Configuration File is located 1325 * @finiver: return value for CF [fini] version 1326 * @finicsum: return value for CF [fini] checksum 1327 * @cfcsum: return value for CF computed checksum 1328 * 1329 * Issue a command to get the firmware to process the Configuration 1330 * File located at the specified mtype/maddress. If the Configuration 1331 * File is processed successfully and return value pointers are 1332 * provided, the Configuration File "[fini] section version and 1333 * checksum values will be returned along with the computed checksum. 1334 * It's up to the caller to decide how it wants to respond to the 1335 * checksums not matching but it recommended that a prominant warning 1336 * be emitted in order to help people rapidly identify changed or 1337 * corrupted Configuration Files. 1338 * 1339 * Also note that it's possible to modify things like "niccaps", 1340 * "toecaps",etc. between processing the Configuration File and telling 1341 * the firmware to use the new configuration. Callers which want to 1342 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for 1343 * Configuration Files if they want to do this. 1344 */ 1345 static int 1346 csio_hw_fw_config_file(struct csio_hw *hw, 1347 unsigned int mtype, unsigned int maddr, 1348 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum) 1349 { 1350 struct csio_mb *mbp; 1351 struct fw_caps_config_cmd *caps_cmd; 1352 int rv = -EINVAL; 1353 enum fw_retval ret; 1354 1355 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1356 if (!mbp) { 1357 CSIO_INC_STATS(hw, n_err_nomem); 1358 return -ENOMEM; 1359 } 1360 /* 1361 * Tell the firmware to process the indicated Configuration File. 1362 * If there are no errors and the caller has provided return value 1363 * pointers for the [fini] section version, checksum and computed 1364 * checksum, pass those back to the caller. 1365 */ 1366 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1367 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1368 caps_cmd->op_to_write = 1369 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1370 FW_CMD_REQUEST_F | 1371 FW_CMD_READ_F); 1372 caps_cmd->cfvalid_to_len16 = 1373 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | 1374 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | 1375 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | 1376 FW_LEN16(*caps_cmd)); 1377 1378 if (csio_mb_issue(hw, mbp)) { 1379 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1380 goto out; 1381 } 1382 1383 ret = csio_mb_fw_retval(mbp); 1384 if (ret != FW_SUCCESS) { 1385 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1386 goto out; 1387 } 1388 1389 if (finiver) 1390 *finiver = ntohl(caps_cmd->finiver); 1391 if (finicsum) 1392 *finicsum = ntohl(caps_cmd->finicsum); 1393 if (cfcsum) 1394 *cfcsum = ntohl(caps_cmd->cfcsum); 1395 1396 /* Validate device capabilities */ 1397 if (csio_hw_validate_caps(hw, mbp)) { 1398 rv = -ENOENT; 1399 goto out; 1400 } 1401 1402 /* 1403 * And now tell the firmware to use the configuration we just loaded. 1404 */ 1405 caps_cmd->op_to_write = 1406 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 1407 FW_CMD_REQUEST_F | 1408 FW_CMD_WRITE_F); 1409 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1410 1411 if (csio_mb_issue(hw, mbp)) { 1412 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1413 goto out; 1414 } 1415 1416 ret = csio_mb_fw_retval(mbp); 1417 if (ret != FW_SUCCESS) { 1418 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1419 goto out; 1420 } 1421 1422 rv = 0; 1423 out: 1424 mempool_free(mbp, hw->mb_mempool); 1425 return rv; 1426 } 1427 1428 /* 1429 * csio_get_device_params - Get device parameters. 1430 * @hw: HW module 1431 * 1432 */ 1433 static int 1434 csio_get_device_params(struct csio_hw *hw) 1435 { 1436 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1437 struct csio_mb *mbp; 1438 enum fw_retval retval; 1439 u32 param[6]; 1440 int i, j = 0; 1441 1442 /* Initialize portids to -1 */ 1443 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1444 hw->pport[i].portid = -1; 1445 1446 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1447 if (!mbp) { 1448 CSIO_INC_STATS(hw, n_err_nomem); 1449 return -ENOMEM; 1450 } 1451 1452 /* Get port vec information. */ 1453 param[0] = FW_PARAM_DEV(PORTVEC); 1454 1455 /* Get Core clock. */ 1456 param[1] = FW_PARAM_DEV(CCLK); 1457 1458 /* Get EQ id start and end. */ 1459 param[2] = FW_PARAM_PFVF(EQ_START); 1460 param[3] = FW_PARAM_PFVF(EQ_END); 1461 1462 /* Get IQ id start and end. */ 1463 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1464 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1465 1466 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1467 ARRAY_SIZE(param), param, NULL, false, NULL); 1468 if (csio_mb_issue(hw, mbp)) { 1469 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1470 mempool_free(mbp, hw->mb_mempool); 1471 return -EINVAL; 1472 } 1473 1474 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1475 ARRAY_SIZE(param), param); 1476 if (retval != FW_SUCCESS) { 1477 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1478 retval); 1479 mempool_free(mbp, hw->mb_mempool); 1480 return -EINVAL; 1481 } 1482 1483 /* cache the information. */ 1484 hw->port_vec = param[0]; 1485 hw->vpd.cclk = param[1]; 1486 wrm->fw_eq_start = param[2]; 1487 wrm->fw_iq_start = param[4]; 1488 1489 /* Using FW configured max iqs & eqs */ 1490 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1491 !csio_is_hw_master(hw)) { 1492 hw->cfg_niq = param[5] - param[4] + 1; 1493 hw->cfg_neq = param[3] - param[2] + 1; 1494 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1495 hw->cfg_niq, hw->cfg_neq); 1496 } 1497 1498 hw->port_vec &= csio_port_mask; 1499 1500 hw->num_pports = hweight32(hw->port_vec); 1501 1502 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1503 hw->port_vec, hw->num_pports); 1504 1505 for (i = 0; i < hw->num_pports; i++) { 1506 while ((hw->port_vec & (1 << j)) == 0) 1507 j++; 1508 hw->pport[i].portid = j++; 1509 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1510 } 1511 mempool_free(mbp, hw->mb_mempool); 1512 1513 return 0; 1514 } 1515 1516 1517 /* 1518 * csio_config_device_caps - Get and set device capabilities. 1519 * @hw: HW module 1520 * 1521 */ 1522 static int 1523 csio_config_device_caps(struct csio_hw *hw) 1524 { 1525 struct csio_mb *mbp; 1526 enum fw_retval retval; 1527 int rv = -EINVAL; 1528 1529 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1530 if (!mbp) { 1531 CSIO_INC_STATS(hw, n_err_nomem); 1532 return -ENOMEM; 1533 } 1534 1535 /* Get device capabilities */ 1536 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1537 1538 if (csio_mb_issue(hw, mbp)) { 1539 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1540 goto out; 1541 } 1542 1543 retval = csio_mb_fw_retval(mbp); 1544 if (retval != FW_SUCCESS) { 1545 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1546 goto out; 1547 } 1548 1549 /* Validate device capabilities */ 1550 if (csio_hw_validate_caps(hw, mbp)) 1551 goto out; 1552 1553 /* Don't config device capabilities if already configured */ 1554 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1555 rv = 0; 1556 goto out; 1557 } 1558 1559 /* Write back desired device capabilities */ 1560 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1561 false, true, NULL); 1562 1563 if (csio_mb_issue(hw, mbp)) { 1564 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1565 goto out; 1566 } 1567 1568 retval = csio_mb_fw_retval(mbp); 1569 if (retval != FW_SUCCESS) { 1570 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1571 goto out; 1572 } 1573 1574 rv = 0; 1575 out: 1576 mempool_free(mbp, hw->mb_mempool); 1577 return rv; 1578 } 1579 1580 /* 1581 * csio_enable_ports - Bring up all available ports. 1582 * @hw: HW module. 1583 * 1584 */ 1585 static int 1586 csio_enable_ports(struct csio_hw *hw) 1587 { 1588 struct csio_mb *mbp; 1589 enum fw_retval retval; 1590 uint8_t portid; 1591 int i; 1592 1593 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1594 if (!mbp) { 1595 CSIO_INC_STATS(hw, n_err_nomem); 1596 return -ENOMEM; 1597 } 1598 1599 for (i = 0; i < hw->num_pports; i++) { 1600 portid = hw->pport[i].portid; 1601 1602 /* Read PORT information */ 1603 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1604 false, 0, 0, NULL); 1605 1606 if (csio_mb_issue(hw, mbp)) { 1607 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1608 portid); 1609 mempool_free(mbp, hw->mb_mempool); 1610 return -EINVAL; 1611 } 1612 1613 csio_mb_process_read_port_rsp(hw, mbp, &retval, 1614 &hw->pport[i].pcap); 1615 if (retval != FW_SUCCESS) { 1616 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1617 portid, retval); 1618 mempool_free(mbp, hw->mb_mempool); 1619 return -EINVAL; 1620 } 1621 1622 /* Write back PORT information */ 1623 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, 1624 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); 1625 1626 if (csio_mb_issue(hw, mbp)) { 1627 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1628 portid); 1629 mempool_free(mbp, hw->mb_mempool); 1630 return -EINVAL; 1631 } 1632 1633 retval = csio_mb_fw_retval(mbp); 1634 if (retval != FW_SUCCESS) { 1635 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1636 portid, retval); 1637 mempool_free(mbp, hw->mb_mempool); 1638 return -EINVAL; 1639 } 1640 1641 } /* For all ports */ 1642 1643 mempool_free(mbp, hw->mb_mempool); 1644 1645 return 0; 1646 } 1647 1648 /* 1649 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1650 * @hw: HW module 1651 * Issued with lock held. 1652 */ 1653 static int 1654 csio_get_fcoe_resinfo(struct csio_hw *hw) 1655 { 1656 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1657 struct fw_fcoe_res_info_cmd *rsp; 1658 struct csio_mb *mbp; 1659 enum fw_retval retval; 1660 1661 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1662 if (!mbp) { 1663 CSIO_INC_STATS(hw, n_err_nomem); 1664 return -ENOMEM; 1665 } 1666 1667 /* Get FCoE FW resource information */ 1668 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1669 1670 if (csio_mb_issue(hw, mbp)) { 1671 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1672 mempool_free(mbp, hw->mb_mempool); 1673 return -EINVAL; 1674 } 1675 1676 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1677 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 1678 if (retval != FW_SUCCESS) { 1679 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1680 retval); 1681 mempool_free(mbp, hw->mb_mempool); 1682 return -EINVAL; 1683 } 1684 1685 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1686 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1687 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1688 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1689 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1690 res_info->max_ssns = ntohl(rsp->max_ssns); 1691 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1692 res_info->used_ssns = ntohl(rsp->used_ssns); 1693 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1694 res_info->max_vnps = ntohl(rsp->max_vnps); 1695 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1696 res_info->used_vnps = ntohl(rsp->used_vnps); 1697 1698 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1699 res_info->max_xchgs); 1700 mempool_free(mbp, hw->mb_mempool); 1701 1702 return 0; 1703 } 1704 1705 static int 1706 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1707 { 1708 struct csio_mb *mbp; 1709 enum fw_retval retval; 1710 u32 _param[1]; 1711 1712 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1713 if (!mbp) { 1714 CSIO_INC_STATS(hw, n_err_nomem); 1715 return -ENOMEM; 1716 } 1717 1718 /* 1719 * Find out whether we're dealing with a version of 1720 * the firmware which has configuration file support. 1721 */ 1722 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 1723 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 1724 1725 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1726 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1727 if (csio_mb_issue(hw, mbp)) { 1728 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1729 mempool_free(mbp, hw->mb_mempool); 1730 return -EINVAL; 1731 } 1732 1733 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1734 ARRAY_SIZE(_param), _param); 1735 if (retval != FW_SUCCESS) { 1736 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1737 retval); 1738 mempool_free(mbp, hw->mb_mempool); 1739 return -EINVAL; 1740 } 1741 1742 mempool_free(mbp, hw->mb_mempool); 1743 *param = _param[0]; 1744 1745 return 0; 1746 } 1747 1748 static int 1749 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1750 { 1751 int ret = 0; 1752 const struct firmware *cf; 1753 struct pci_dev *pci_dev = hw->pdev; 1754 struct device *dev = &pci_dev->dev; 1755 unsigned int mtype = 0, maddr = 0; 1756 uint32_t *cfg_data; 1757 int value_to_add = 0; 1758 1759 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) { 1760 csio_err(hw, "could not find config file %s, err: %d\n", 1761 CSIO_CF_FNAME(hw), ret); 1762 return -ENOENT; 1763 } 1764 1765 if (cf->size%4 != 0) 1766 value_to_add = 4 - (cf->size % 4); 1767 1768 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1769 if (cfg_data == NULL) { 1770 ret = -ENOMEM; 1771 goto leave; 1772 } 1773 1774 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1775 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1776 ret = -EINVAL; 1777 goto leave; 1778 } 1779 1780 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1781 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1782 1783 ret = csio_memory_write(hw, mtype, maddr, 1784 cf->size + value_to_add, cfg_data); 1785 1786 if ((ret == 0) && (value_to_add != 0)) { 1787 union { 1788 u32 word; 1789 char buf[4]; 1790 } last; 1791 size_t size = cf->size & ~0x3; 1792 int i; 1793 1794 last.word = cfg_data[size >> 2]; 1795 for (i = value_to_add; i < 4; i++) 1796 last.buf[i] = 0; 1797 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1798 } 1799 if (ret == 0) { 1800 csio_info(hw, "config file upgraded to %s\n", 1801 CSIO_CF_FNAME(hw)); 1802 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw)); 1803 } 1804 1805 leave: 1806 kfree(cfg_data); 1807 release_firmware(cf); 1808 return ret; 1809 } 1810 1811 /* 1812 * HW initialization: contact FW, obtain config, perform basic init. 1813 * 1814 * If the firmware we're dealing with has Configuration File support, then 1815 * we use that to perform all configuration -- either using the configuration 1816 * file stored in flash on the adapter or using a filesystem-local file 1817 * if available. 1818 * 1819 * If we don't have configuration file support in the firmware, then we'll 1820 * have to set things up the old fashioned way with hard-coded register 1821 * writes and firmware commands ... 1822 */ 1823 1824 /* 1825 * Attempt to initialize the HW via a Firmware Configuration File. 1826 */ 1827 static int 1828 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 1829 { 1830 unsigned int mtype, maddr; 1831 int rv; 1832 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 1833 int using_flash; 1834 char path[64]; 1835 1836 /* 1837 * Reset device if necessary 1838 */ 1839 if (reset) { 1840 rv = csio_do_reset(hw, true); 1841 if (rv != 0) 1842 goto bye; 1843 } 1844 1845 /* 1846 * If we have a configuration file in host , 1847 * then use that. Otherwise, use the configuration file stored 1848 * in the HW flash ... 1849 */ 1850 spin_unlock_irq(&hw->lock); 1851 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 1852 spin_lock_irq(&hw->lock); 1853 if (rv != 0) { 1854 if (rv == -ENOENT) { 1855 /* 1856 * config file was not found. Use default 1857 * config file from flash. 1858 */ 1859 mtype = FW_MEMTYPE_CF_FLASH; 1860 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 1861 using_flash = 1; 1862 } else { 1863 /* 1864 * we revert back to the hardwired config if 1865 * flashing failed. 1866 */ 1867 goto bye; 1868 } 1869 } else { 1870 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); 1871 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; 1872 using_flash = 0; 1873 } 1874 1875 hw->cfg_store = (uint8_t)mtype; 1876 1877 /* 1878 * Issue a Capability Configuration command to the firmware to get it 1879 * to parse the Configuration File. 1880 */ 1881 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver, 1882 &finicsum, &cfcsum); 1883 if (rv != 0) 1884 goto bye; 1885 1886 hw->cfg_finiver = finiver; 1887 hw->cfg_finicsum = finicsum; 1888 hw->cfg_cfcsum = cfcsum; 1889 hw->cfg_csum_status = true; 1890 1891 if (finicsum != cfcsum) { 1892 csio_warn(hw, 1893 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1894 finicsum, cfcsum); 1895 1896 hw->cfg_csum_status = false; 1897 } 1898 1899 /* 1900 * Note that we're operating with parameters 1901 * not supplied by the driver, rather than from hard-wired 1902 * initialization constants buried in the driver. 1903 */ 1904 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 1905 1906 /* device parameters */ 1907 rv = csio_get_device_params(hw); 1908 if (rv != 0) 1909 goto bye; 1910 1911 /* Configure SGE */ 1912 csio_wr_sge_init(hw); 1913 1914 /* 1915 * And finally tell the firmware to initialize itself using the 1916 * parameters from the Configuration File. 1917 */ 1918 /* Post event to notify completion of configuration */ 1919 csio_post_event(&hw->sm, CSIO_HWE_INIT); 1920 1921 csio_info(hw, 1922 "Firmware Configuration File %s, version %#x, computed checksum %#x\n", 1923 (using_flash ? "in device FLASH" : path), finiver, cfcsum); 1924 1925 return 0; 1926 1927 /* 1928 * Something bad happened. Return the error ... 1929 */ 1930 bye: 1931 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 1932 csio_dbg(hw, "Configuration file error %d\n", rv); 1933 return rv; 1934 } 1935 1936 /* 1937 * Attempt to initialize the adapter via hard-coded, driver supplied 1938 * parameters ... 1939 */ 1940 static int 1941 csio_hw_no_fwconfig(struct csio_hw *hw, int reset) 1942 { 1943 int rv; 1944 /* 1945 * Reset device if necessary 1946 */ 1947 if (reset) { 1948 rv = csio_do_reset(hw, true); 1949 if (rv != 0) 1950 goto out; 1951 } 1952 1953 /* Get and set device capabilities */ 1954 rv = csio_config_device_caps(hw); 1955 if (rv != 0) 1956 goto out; 1957 1958 /* device parameters */ 1959 rv = csio_get_device_params(hw); 1960 if (rv != 0) 1961 goto out; 1962 1963 /* Configure SGE */ 1964 csio_wr_sge_init(hw); 1965 1966 /* Post event to notify completion of configuration */ 1967 csio_post_event(&hw->sm, CSIO_HWE_INIT); 1968 1969 out: 1970 return rv; 1971 } 1972 1973 /* 1974 * Returns -EINVAL if attempts to flash the firmware failed 1975 * else returns 0, 1976 * if flashing was not attempted because the card had the 1977 * latest firmware ECANCELED is returned 1978 */ 1979 static int 1980 csio_hw_flash_fw(struct csio_hw *hw) 1981 { 1982 int ret = -ECANCELED; 1983 const struct firmware *fw; 1984 const struct fw_hdr *hdr; 1985 u32 fw_ver; 1986 struct pci_dev *pci_dev = hw->pdev; 1987 struct device *dev = &pci_dev->dev ; 1988 1989 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) { 1990 csio_err(hw, "could not find firmware image %s, err: %d\n", 1991 CSIO_FW_FNAME(hw), ret); 1992 return -EINVAL; 1993 } 1994 1995 hdr = (const struct fw_hdr *)fw->data; 1996 fw_ver = ntohl(hdr->fw_ver); 1997 if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw)) 1998 return -EINVAL; /* wrong major version, won't do */ 1999 2000 /* 2001 * If the flash FW is unusable or we found something newer, load it. 2002 */ 2003 if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) || 2004 fw_ver > hw->fwrev) { 2005 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2006 /*force=*/false); 2007 if (!ret) 2008 csio_info(hw, 2009 "firmware upgraded to version %pI4 from %s\n", 2010 &hdr->fw_ver, CSIO_FW_FNAME(hw)); 2011 else 2012 csio_err(hw, "firmware upgrade failed! err=%d\n", ret); 2013 } else 2014 ret = -EINVAL; 2015 2016 release_firmware(fw); 2017 2018 return ret; 2019 } 2020 2021 2022 /* 2023 * csio_hw_configure - Configure HW 2024 * @hw - HW module 2025 * 2026 */ 2027 static void 2028 csio_hw_configure(struct csio_hw *hw) 2029 { 2030 int reset = 1; 2031 int rv; 2032 u32 param[1]; 2033 2034 rv = csio_hw_dev_ready(hw); 2035 if (rv != 0) { 2036 CSIO_INC_STATS(hw, n_err_fatal); 2037 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2038 goto out; 2039 } 2040 2041 /* HW version */ 2042 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV); 2043 2044 /* Needed for FW download */ 2045 rv = csio_hw_get_flash_params(hw); 2046 if (rv != 0) { 2047 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2048 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2049 goto out; 2050 } 2051 2052 /* Set PCIe completion timeout to 4 seconds */ 2053 if (pci_is_pcie(hw->pdev)) 2054 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, 2055 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); 2056 2057 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2058 2059 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2060 if (rv != 0) 2061 goto out; 2062 2063 csio_hw_print_fw_version(hw, "Firmware revision"); 2064 2065 rv = csio_do_hello(hw, &hw->fw_state); 2066 if (rv != 0) { 2067 CSIO_INC_STATS(hw, n_err_fatal); 2068 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2069 goto out; 2070 } 2071 2072 /* Read vpd */ 2073 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2074 if (rv != 0) 2075 goto out; 2076 2077 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2078 rv = csio_hw_check_fw_version(hw); 2079 if (rv == -EINVAL) { 2080 2081 /* Do firmware update */ 2082 spin_unlock_irq(&hw->lock); 2083 rv = csio_hw_flash_fw(hw); 2084 spin_lock_irq(&hw->lock); 2085 2086 if (rv == 0) { 2087 reset = 0; 2088 /* 2089 * Note that the chip was reset as part of the 2090 * firmware upgrade so we don't reset it again 2091 * below and grab the new firmware version. 2092 */ 2093 rv = csio_hw_check_fw_version(hw); 2094 } 2095 } 2096 /* 2097 * If the firmware doesn't support Configuration 2098 * Files, use the old Driver-based, hard-wired 2099 * initialization. Otherwise, try using the 2100 * Configuration File support and fall back to the 2101 * Driver-based initialization if there's no 2102 * Configuration File found. 2103 */ 2104 if (csio_hw_check_fwconfig(hw, param) == 0) { 2105 rv = csio_hw_use_fwconfig(hw, reset, param); 2106 if (rv == -ENOENT) 2107 goto out; 2108 if (rv != 0) { 2109 csio_info(hw, 2110 "No Configuration File present " 2111 "on adapter. Using hard-wired " 2112 "configuration parameters.\n"); 2113 rv = csio_hw_no_fwconfig(hw, reset); 2114 } 2115 } else { 2116 rv = csio_hw_no_fwconfig(hw, reset); 2117 } 2118 2119 if (rv != 0) 2120 goto out; 2121 2122 } else { 2123 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2124 2125 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2126 2127 /* device parameters */ 2128 rv = csio_get_device_params(hw); 2129 if (rv != 0) 2130 goto out; 2131 2132 /* Get device capabilities */ 2133 rv = csio_config_device_caps(hw); 2134 if (rv != 0) 2135 goto out; 2136 2137 /* Configure SGE */ 2138 csio_wr_sge_init(hw); 2139 2140 /* Post event to notify completion of configuration */ 2141 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2142 goto out; 2143 } 2144 } /* if not master */ 2145 2146 out: 2147 return; 2148 } 2149 2150 /* 2151 * csio_hw_initialize - Initialize HW 2152 * @hw - HW module 2153 * 2154 */ 2155 static void 2156 csio_hw_initialize(struct csio_hw *hw) 2157 { 2158 struct csio_mb *mbp; 2159 enum fw_retval retval; 2160 int rv; 2161 int i; 2162 2163 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2164 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2165 if (!mbp) 2166 goto out; 2167 2168 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2169 2170 if (csio_mb_issue(hw, mbp)) { 2171 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2172 goto free_and_out; 2173 } 2174 2175 retval = csio_mb_fw_retval(mbp); 2176 if (retval != FW_SUCCESS) { 2177 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2178 retval); 2179 goto free_and_out; 2180 } 2181 2182 mempool_free(mbp, hw->mb_mempool); 2183 } 2184 2185 rv = csio_get_fcoe_resinfo(hw); 2186 if (rv != 0) { 2187 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2188 goto out; 2189 } 2190 2191 spin_unlock_irq(&hw->lock); 2192 rv = csio_config_queues(hw); 2193 spin_lock_irq(&hw->lock); 2194 2195 if (rv != 0) { 2196 csio_err(hw, "Config of queues failed!: %d\n", rv); 2197 goto out; 2198 } 2199 2200 for (i = 0; i < hw->num_pports; i++) 2201 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2202 2203 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2204 rv = csio_enable_ports(hw); 2205 if (rv != 0) { 2206 csio_err(hw, "Failed to enable ports: %d\n", rv); 2207 goto out; 2208 } 2209 } 2210 2211 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2212 return; 2213 2214 free_and_out: 2215 mempool_free(mbp, hw->mb_mempool); 2216 out: 2217 return; 2218 } 2219 2220 #define PF_INTR_MASK (PFSW | PFCIM) 2221 2222 /* 2223 * csio_hw_intr_enable - Enable HW interrupts 2224 * @hw: Pointer to HW module. 2225 * 2226 * Enable interrupts in HW registers. 2227 */ 2228 static void 2229 csio_hw_intr_enable(struct csio_hw *hw) 2230 { 2231 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2232 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2233 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE); 2234 2235 /* 2236 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2237 * by FW, so do nothing for INTX. 2238 */ 2239 if (hw->intr_mode == CSIO_IM_MSIX) 2240 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2241 AIVEC(AIVEC_MASK), vec); 2242 else if (hw->intr_mode == CSIO_IM_MSI) 2243 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2244 AIVEC(AIVEC_MASK), 0); 2245 2246 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE)); 2247 2248 /* Turn on MB interrupts - this will internally flush PIO as well */ 2249 csio_mb_intr_enable(hw); 2250 2251 /* These are common registers - only a master can modify them */ 2252 if (csio_is_hw_master(hw)) { 2253 /* 2254 * Disable the Serial FLASH interrupt, if enabled! 2255 */ 2256 pl &= (~SF); 2257 csio_wr_reg32(hw, pl, PL_INT_ENABLE); 2258 2259 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | 2260 EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | 2261 ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | 2262 ERR_DATA_CPL_ON_HIGH_QID1 | 2263 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2264 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2265 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2266 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, 2267 SGE_INT_ENABLE3); 2268 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); 2269 } 2270 2271 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2272 2273 } 2274 2275 /* 2276 * csio_hw_intr_disable - Disable HW interrupts 2277 * @hw: Pointer to HW module. 2278 * 2279 * Turn off Mailbox and PCI_PF_CFG interrupts. 2280 */ 2281 void 2282 csio_hw_intr_disable(struct csio_hw *hw) 2283 { 2284 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2285 2286 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2287 return; 2288 2289 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2290 2291 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE)); 2292 if (csio_is_hw_master(hw)) 2293 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0); 2294 2295 /* Turn off MB interrupts */ 2296 csio_mb_intr_disable(hw); 2297 2298 } 2299 2300 void 2301 csio_hw_fatal_err(struct csio_hw *hw) 2302 { 2303 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); 2304 csio_hw_intr_disable(hw); 2305 2306 /* Do not reset HW, we may need FW state for debugging */ 2307 csio_fatal(hw, "HW Fatal error encountered!\n"); 2308 } 2309 2310 /*****************************************************************************/ 2311 /* START: HW SM */ 2312 /*****************************************************************************/ 2313 /* 2314 * csio_hws_uninit - Uninit state 2315 * @hw - HW module 2316 * @evt - Event 2317 * 2318 */ 2319 static void 2320 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2321 { 2322 hw->prev_evt = hw->cur_evt; 2323 hw->cur_evt = evt; 2324 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2325 2326 switch (evt) { 2327 case CSIO_HWE_CFG: 2328 csio_set_state(&hw->sm, csio_hws_configuring); 2329 csio_hw_configure(hw); 2330 break; 2331 2332 default: 2333 CSIO_INC_STATS(hw, n_evt_unexp); 2334 break; 2335 } 2336 } 2337 2338 /* 2339 * csio_hws_configuring - Configuring state 2340 * @hw - HW module 2341 * @evt - Event 2342 * 2343 */ 2344 static void 2345 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2346 { 2347 hw->prev_evt = hw->cur_evt; 2348 hw->cur_evt = evt; 2349 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2350 2351 switch (evt) { 2352 case CSIO_HWE_INIT: 2353 csio_set_state(&hw->sm, csio_hws_initializing); 2354 csio_hw_initialize(hw); 2355 break; 2356 2357 case CSIO_HWE_INIT_DONE: 2358 csio_set_state(&hw->sm, csio_hws_ready); 2359 /* Fan out event to all lnode SMs */ 2360 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2361 break; 2362 2363 case CSIO_HWE_FATAL: 2364 csio_set_state(&hw->sm, csio_hws_uninit); 2365 break; 2366 2367 case CSIO_HWE_PCI_REMOVE: 2368 csio_do_bye(hw); 2369 break; 2370 default: 2371 CSIO_INC_STATS(hw, n_evt_unexp); 2372 break; 2373 } 2374 } 2375 2376 /* 2377 * csio_hws_initializing - Initialiazing state 2378 * @hw - HW module 2379 * @evt - Event 2380 * 2381 */ 2382 static void 2383 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2384 { 2385 hw->prev_evt = hw->cur_evt; 2386 hw->cur_evt = evt; 2387 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2388 2389 switch (evt) { 2390 case CSIO_HWE_INIT_DONE: 2391 csio_set_state(&hw->sm, csio_hws_ready); 2392 2393 /* Fan out event to all lnode SMs */ 2394 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2395 2396 /* Enable interrupts */ 2397 csio_hw_intr_enable(hw); 2398 break; 2399 2400 case CSIO_HWE_FATAL: 2401 csio_set_state(&hw->sm, csio_hws_uninit); 2402 break; 2403 2404 case CSIO_HWE_PCI_REMOVE: 2405 csio_do_bye(hw); 2406 break; 2407 2408 default: 2409 CSIO_INC_STATS(hw, n_evt_unexp); 2410 break; 2411 } 2412 } 2413 2414 /* 2415 * csio_hws_ready - Ready state 2416 * @hw - HW module 2417 * @evt - Event 2418 * 2419 */ 2420 static void 2421 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2422 { 2423 /* Remember the event */ 2424 hw->evtflag = evt; 2425 2426 hw->prev_evt = hw->cur_evt; 2427 hw->cur_evt = evt; 2428 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2429 2430 switch (evt) { 2431 case CSIO_HWE_HBA_RESET: 2432 case CSIO_HWE_FW_DLOAD: 2433 case CSIO_HWE_SUSPEND: 2434 case CSIO_HWE_PCI_REMOVE: 2435 case CSIO_HWE_PCIERR_DETECTED: 2436 csio_set_state(&hw->sm, csio_hws_quiescing); 2437 /* cleanup all outstanding cmds */ 2438 if (evt == CSIO_HWE_HBA_RESET || 2439 evt == CSIO_HWE_PCIERR_DETECTED) 2440 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2441 else 2442 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2443 2444 csio_hw_intr_disable(hw); 2445 csio_hw_mbm_cleanup(hw); 2446 csio_evtq_stop(hw); 2447 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2448 csio_evtq_flush(hw); 2449 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2450 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2451 break; 2452 2453 case CSIO_HWE_FATAL: 2454 csio_set_state(&hw->sm, csio_hws_uninit); 2455 break; 2456 2457 default: 2458 CSIO_INC_STATS(hw, n_evt_unexp); 2459 break; 2460 } 2461 } 2462 2463 /* 2464 * csio_hws_quiescing - Quiescing state 2465 * @hw - HW module 2466 * @evt - Event 2467 * 2468 */ 2469 static void 2470 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2471 { 2472 hw->prev_evt = hw->cur_evt; 2473 hw->cur_evt = evt; 2474 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2475 2476 switch (evt) { 2477 case CSIO_HWE_QUIESCED: 2478 switch (hw->evtflag) { 2479 case CSIO_HWE_FW_DLOAD: 2480 csio_set_state(&hw->sm, csio_hws_resetting); 2481 /* Download firmware */ 2482 /* Fall through */ 2483 2484 case CSIO_HWE_HBA_RESET: 2485 csio_set_state(&hw->sm, csio_hws_resetting); 2486 /* Start reset of the HBA */ 2487 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2488 csio_wr_destroy_queues(hw, false); 2489 csio_do_reset(hw, false); 2490 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2491 break; 2492 2493 case CSIO_HWE_PCI_REMOVE: 2494 csio_set_state(&hw->sm, csio_hws_removing); 2495 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2496 csio_wr_destroy_queues(hw, true); 2497 /* Now send the bye command */ 2498 csio_do_bye(hw); 2499 break; 2500 2501 case CSIO_HWE_SUSPEND: 2502 csio_set_state(&hw->sm, csio_hws_quiesced); 2503 break; 2504 2505 case CSIO_HWE_PCIERR_DETECTED: 2506 csio_set_state(&hw->sm, csio_hws_pcierr); 2507 csio_wr_destroy_queues(hw, false); 2508 break; 2509 2510 default: 2511 CSIO_INC_STATS(hw, n_evt_unexp); 2512 break; 2513 2514 } 2515 break; 2516 2517 default: 2518 CSIO_INC_STATS(hw, n_evt_unexp); 2519 break; 2520 } 2521 } 2522 2523 /* 2524 * csio_hws_quiesced - Quiesced state 2525 * @hw - HW module 2526 * @evt - Event 2527 * 2528 */ 2529 static void 2530 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2531 { 2532 hw->prev_evt = hw->cur_evt; 2533 hw->cur_evt = evt; 2534 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2535 2536 switch (evt) { 2537 case CSIO_HWE_RESUME: 2538 csio_set_state(&hw->sm, csio_hws_configuring); 2539 csio_hw_configure(hw); 2540 break; 2541 2542 default: 2543 CSIO_INC_STATS(hw, n_evt_unexp); 2544 break; 2545 } 2546 } 2547 2548 /* 2549 * csio_hws_resetting - HW Resetting state 2550 * @hw - HW module 2551 * @evt - Event 2552 * 2553 */ 2554 static void 2555 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2556 { 2557 hw->prev_evt = hw->cur_evt; 2558 hw->cur_evt = evt; 2559 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2560 2561 switch (evt) { 2562 case CSIO_HWE_HBA_RESET_DONE: 2563 csio_evtq_start(hw); 2564 csio_set_state(&hw->sm, csio_hws_configuring); 2565 csio_hw_configure(hw); 2566 break; 2567 2568 default: 2569 CSIO_INC_STATS(hw, n_evt_unexp); 2570 break; 2571 } 2572 } 2573 2574 /* 2575 * csio_hws_removing - PCI Hotplug removing state 2576 * @hw - HW module 2577 * @evt - Event 2578 * 2579 */ 2580 static void 2581 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2582 { 2583 hw->prev_evt = hw->cur_evt; 2584 hw->cur_evt = evt; 2585 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2586 2587 switch (evt) { 2588 case CSIO_HWE_HBA_RESET: 2589 if (!csio_is_hw_master(hw)) 2590 break; 2591 /* 2592 * The BYE should have alerady been issued, so we cant 2593 * use the mailbox interface. Hence we use the PL_RST 2594 * register directly. 2595 */ 2596 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2597 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 2598 mdelay(2000); 2599 break; 2600 2601 /* Should never receive any new events */ 2602 default: 2603 CSIO_INC_STATS(hw, n_evt_unexp); 2604 break; 2605 2606 } 2607 } 2608 2609 /* 2610 * csio_hws_pcierr - PCI Error state 2611 * @hw - HW module 2612 * @evt - Event 2613 * 2614 */ 2615 static void 2616 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2617 { 2618 hw->prev_evt = hw->cur_evt; 2619 hw->cur_evt = evt; 2620 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2621 2622 switch (evt) { 2623 case CSIO_HWE_PCIERR_SLOT_RESET: 2624 csio_evtq_start(hw); 2625 csio_set_state(&hw->sm, csio_hws_configuring); 2626 csio_hw_configure(hw); 2627 break; 2628 2629 default: 2630 CSIO_INC_STATS(hw, n_evt_unexp); 2631 break; 2632 } 2633 } 2634 2635 /*****************************************************************************/ 2636 /* END: HW SM */ 2637 /*****************************************************************************/ 2638 2639 /* 2640 * csio_handle_intr_status - table driven interrupt handler 2641 * @hw: HW instance 2642 * @reg: the interrupt status register to process 2643 * @acts: table of interrupt actions 2644 * 2645 * A table driven interrupt handler that applies a set of masks to an 2646 * interrupt status word and performs the corresponding actions if the 2647 * interrupts described by the mask have occured. The actions include 2648 * optionally emitting a warning or alert message. The table is terminated 2649 * by an entry specifying mask 0. Returns the number of fatal interrupt 2650 * conditions. 2651 */ 2652 int 2653 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2654 const struct intr_info *acts) 2655 { 2656 int fatal = 0; 2657 unsigned int mask = 0; 2658 unsigned int status = csio_rd_reg32(hw, reg); 2659 2660 for ( ; acts->mask; ++acts) { 2661 if (!(status & acts->mask)) 2662 continue; 2663 if (acts->fatal) { 2664 fatal++; 2665 csio_fatal(hw, "Fatal %s (0x%x)\n", 2666 acts->msg, status & acts->mask); 2667 } else if (acts->msg) 2668 csio_info(hw, "%s (0x%x)\n", 2669 acts->msg, status & acts->mask); 2670 mask |= acts->mask; 2671 } 2672 status &= mask; 2673 if (status) /* clear processed interrupts */ 2674 csio_wr_reg32(hw, status, reg); 2675 return fatal; 2676 } 2677 2678 /* 2679 * TP interrupt handler. 2680 */ 2681 static void csio_tp_intr_handler(struct csio_hw *hw) 2682 { 2683 static struct intr_info tp_intr_info[] = { 2684 { 0x3fffffff, "TP parity error", -1, 1 }, 2685 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 2686 { 0, NULL, 0, 0 } 2687 }; 2688 2689 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info)) 2690 csio_hw_fatal_err(hw); 2691 } 2692 2693 /* 2694 * SGE interrupt handler. 2695 */ 2696 static void csio_sge_intr_handler(struct csio_hw *hw) 2697 { 2698 uint64_t v; 2699 2700 static struct intr_info sge_intr_info[] = { 2701 { ERR_CPL_EXCEED_IQE_SIZE, 2702 "SGE received CPL exceeding IQE size", -1, 1 }, 2703 { ERR_INVALID_CIDX_INC, 2704 "SGE GTS CIDX increment too large", -1, 0 }, 2705 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 2706 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 2707 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 2708 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2709 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 2710 0 }, 2711 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 2712 0 }, 2713 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 2714 0 }, 2715 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 2716 0 }, 2717 { ERR_ING_CTXT_PRIO, 2718 "SGE too many priority ingress contexts", -1, 0 }, 2719 { ERR_EGR_CTXT_PRIO, 2720 "SGE too many priority egress contexts", -1, 0 }, 2721 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 2722 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 2723 { 0, NULL, 0, 0 } 2724 }; 2725 2726 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | 2727 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); 2728 if (v) { 2729 csio_fatal(hw, "SGE parity error (%#llx)\n", 2730 (unsigned long long)v); 2731 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 2732 SGE_INT_CAUSE1); 2733 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); 2734 } 2735 2736 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); 2737 2738 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || 2739 v != 0) 2740 csio_hw_fatal_err(hw); 2741 } 2742 2743 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\ 2744 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR) 2745 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\ 2746 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR) 2747 2748 /* 2749 * CIM interrupt handler. 2750 */ 2751 static void csio_cim_intr_handler(struct csio_hw *hw) 2752 { 2753 static struct intr_info cim_intr_info[] = { 2754 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 2755 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2756 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2757 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 2758 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 2759 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 2760 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 2761 { 0, NULL, 0, 0 } 2762 }; 2763 static struct intr_info cim_upintr_info[] = { 2764 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 2765 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 2766 { ILLWRINT, "CIM illegal write", -1, 1 }, 2767 { ILLRDINT, "CIM illegal read", -1, 1 }, 2768 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 2769 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 2770 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 2771 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 2772 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 2773 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 2774 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 2775 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 2776 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 2777 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 2778 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 2779 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 2780 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 2781 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 2782 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 2783 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 2784 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 2785 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 2786 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 2787 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 2788 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 2789 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 2790 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 2791 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 2792 { 0, NULL, 0, 0 } 2793 }; 2794 2795 int fat; 2796 2797 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE, 2798 cim_intr_info) + 2799 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE, 2800 cim_upintr_info); 2801 if (fat) 2802 csio_hw_fatal_err(hw); 2803 } 2804 2805 /* 2806 * ULP RX interrupt handler. 2807 */ 2808 static void csio_ulprx_intr_handler(struct csio_hw *hw) 2809 { 2810 static struct intr_info ulprx_intr_info[] = { 2811 { 0x1800000, "ULPRX context error", -1, 1 }, 2812 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2813 { 0, NULL, 0, 0 } 2814 }; 2815 2816 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info)) 2817 csio_hw_fatal_err(hw); 2818 } 2819 2820 /* 2821 * ULP TX interrupt handler. 2822 */ 2823 static void csio_ulptx_intr_handler(struct csio_hw *hw) 2824 { 2825 static struct intr_info ulptx_intr_info[] = { 2826 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 2827 0 }, 2828 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 2829 0 }, 2830 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 2831 0 }, 2832 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 2833 0 }, 2834 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2835 { 0, NULL, 0, 0 } 2836 }; 2837 2838 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info)) 2839 csio_hw_fatal_err(hw); 2840 } 2841 2842 /* 2843 * PM TX interrupt handler. 2844 */ 2845 static void csio_pmtx_intr_handler(struct csio_hw *hw) 2846 { 2847 static struct intr_info pmtx_intr_info[] = { 2848 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 2849 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 2850 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 2851 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 2852 { 0xffffff0, "PMTX framing error", -1, 1 }, 2853 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 2854 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 2855 1 }, 2856 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 2857 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 2858 { 0, NULL, 0, 0 } 2859 }; 2860 2861 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info)) 2862 csio_hw_fatal_err(hw); 2863 } 2864 2865 /* 2866 * PM RX interrupt handler. 2867 */ 2868 static void csio_pmrx_intr_handler(struct csio_hw *hw) 2869 { 2870 static struct intr_info pmrx_intr_info[] = { 2871 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 2872 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2873 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 2874 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 2875 1 }, 2876 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 2877 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 2878 { 0, NULL, 0, 0 } 2879 }; 2880 2881 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info)) 2882 csio_hw_fatal_err(hw); 2883 } 2884 2885 /* 2886 * CPL switch interrupt handler. 2887 */ 2888 static void csio_cplsw_intr_handler(struct csio_hw *hw) 2889 { 2890 static struct intr_info cplsw_intr_info[] = { 2891 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 2892 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 2893 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 2894 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 2895 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 2896 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 2897 { 0, NULL, 0, 0 } 2898 }; 2899 2900 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info)) 2901 csio_hw_fatal_err(hw); 2902 } 2903 2904 /* 2905 * LE interrupt handler. 2906 */ 2907 static void csio_le_intr_handler(struct csio_hw *hw) 2908 { 2909 static struct intr_info le_intr_info[] = { 2910 { LIPMISS, "LE LIP miss", -1, 0 }, 2911 { LIP0, "LE 0 LIP error", -1, 0 }, 2912 { PARITYERR, "LE parity error", -1, 1 }, 2913 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 2914 { REQQPARERR, "LE request queue parity error", -1, 1 }, 2915 { 0, NULL, 0, 0 } 2916 }; 2917 2918 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info)) 2919 csio_hw_fatal_err(hw); 2920 } 2921 2922 /* 2923 * MPS interrupt handler. 2924 */ 2925 static void csio_mps_intr_handler(struct csio_hw *hw) 2926 { 2927 static struct intr_info mps_rx_intr_info[] = { 2928 { 0xffffff, "MPS Rx parity error", -1, 1 }, 2929 { 0, NULL, 0, 0 } 2930 }; 2931 static struct intr_info mps_tx_intr_info[] = { 2932 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 2933 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 2934 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 2935 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 2936 { BUBBLE, "MPS Tx underflow", -1, 1 }, 2937 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 2938 { FRMERR, "MPS Tx framing error", -1, 1 }, 2939 { 0, NULL, 0, 0 } 2940 }; 2941 static struct intr_info mps_trc_intr_info[] = { 2942 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 2943 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 2944 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 2945 { 0, NULL, 0, 0 } 2946 }; 2947 static struct intr_info mps_stat_sram_intr_info[] = { 2948 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 2949 { 0, NULL, 0, 0 } 2950 }; 2951 static struct intr_info mps_stat_tx_intr_info[] = { 2952 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 2953 { 0, NULL, 0, 0 } 2954 }; 2955 static struct intr_info mps_stat_rx_intr_info[] = { 2956 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 2957 { 0, NULL, 0, 0 } 2958 }; 2959 static struct intr_info mps_cls_intr_info[] = { 2960 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 2961 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 2962 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 2963 { 0, NULL, 0, 0 } 2964 }; 2965 2966 int fat; 2967 2968 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE, 2969 mps_rx_intr_info) + 2970 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE, 2971 mps_tx_intr_info) + 2972 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE, 2973 mps_trc_intr_info) + 2974 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM, 2975 mps_stat_sram_intr_info) + 2976 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 2977 mps_stat_tx_intr_info) + 2978 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 2979 mps_stat_rx_intr_info) + 2980 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE, 2981 mps_cls_intr_info); 2982 2983 csio_wr_reg32(hw, 0, MPS_INT_CAUSE); 2984 csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */ 2985 if (fat) 2986 csio_hw_fatal_err(hw); 2987 } 2988 2989 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 2990 2991 /* 2992 * EDC/MC interrupt handler. 2993 */ 2994 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 2995 { 2996 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 2997 2998 unsigned int addr, cnt_addr, v; 2999 3000 if (idx <= MEM_EDC1) { 3001 addr = EDC_REG(EDC_INT_CAUSE, idx); 3002 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 3003 } else { 3004 addr = MC_INT_CAUSE; 3005 cnt_addr = MC_ECC_STATUS; 3006 } 3007 3008 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3009 if (v & PERR_INT_CAUSE) 3010 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3011 if (v & ECC_CE_INT_CAUSE) { 3012 uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr)); 3013 3014 csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr); 3015 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3016 cnt, name[idx], cnt > 1 ? "s" : ""); 3017 } 3018 if (v & ECC_UE_INT_CAUSE) 3019 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3020 3021 csio_wr_reg32(hw, v, addr); 3022 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 3023 csio_hw_fatal_err(hw); 3024 } 3025 3026 /* 3027 * MA interrupt handler. 3028 */ 3029 static void csio_ma_intr_handler(struct csio_hw *hw) 3030 { 3031 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE); 3032 3033 if (status & MEM_PERR_INT_CAUSE) 3034 csio_fatal(hw, "MA parity error, parity status %#x\n", 3035 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS)); 3036 if (status & MEM_WRAP_INT_CAUSE) { 3037 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS); 3038 csio_fatal(hw, 3039 "MA address wrap-around error by client %u to address %#x\n", 3040 MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); 3041 } 3042 csio_wr_reg32(hw, status, MA_INT_CAUSE); 3043 csio_hw_fatal_err(hw); 3044 } 3045 3046 /* 3047 * SMB interrupt handler. 3048 */ 3049 static void csio_smb_intr_handler(struct csio_hw *hw) 3050 { 3051 static struct intr_info smb_intr_info[] = { 3052 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 3053 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 3054 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 3055 { 0, NULL, 0, 0 } 3056 }; 3057 3058 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info)) 3059 csio_hw_fatal_err(hw); 3060 } 3061 3062 /* 3063 * NC-SI interrupt handler. 3064 */ 3065 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3066 { 3067 static struct intr_info ncsi_intr_info[] = { 3068 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 3069 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 3070 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 3071 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 3072 { 0, NULL, 0, 0 } 3073 }; 3074 3075 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info)) 3076 csio_hw_fatal_err(hw); 3077 } 3078 3079 /* 3080 * XGMAC interrupt handler. 3081 */ 3082 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3083 { 3084 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port)); 3085 3086 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 3087 if (!v) 3088 return; 3089 3090 if (v & TXFIFO_PRTY_ERR) 3091 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3092 if (v & RXFIFO_PRTY_ERR) 3093 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3094 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port)); 3095 csio_hw_fatal_err(hw); 3096 } 3097 3098 /* 3099 * PL interrupt handler. 3100 */ 3101 static void csio_pl_intr_handler(struct csio_hw *hw) 3102 { 3103 static struct intr_info pl_intr_info[] = { 3104 { FATALPERR, "T4 fatal parity error", -1, 1 }, 3105 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 3106 { 0, NULL, 0, 0 } 3107 }; 3108 3109 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info)) 3110 csio_hw_fatal_err(hw); 3111 } 3112 3113 /* 3114 * csio_hw_slow_intr_handler - control path interrupt handler 3115 * @hw: HW module 3116 * 3117 * Interrupt handler for non-data global interrupt events, e.g., errors. 3118 * The designation 'slow' is because it involves register reads, while 3119 * data interrupts typically don't involve any MMIOs. 3120 */ 3121 int 3122 csio_hw_slow_intr_handler(struct csio_hw *hw) 3123 { 3124 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE); 3125 3126 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3127 CSIO_INC_STATS(hw, n_plint_unexp); 3128 return 0; 3129 } 3130 3131 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3132 3133 CSIO_INC_STATS(hw, n_plint_cnt); 3134 3135 if (cause & CIM) 3136 csio_cim_intr_handler(hw); 3137 3138 if (cause & MPS) 3139 csio_mps_intr_handler(hw); 3140 3141 if (cause & NCSI) 3142 csio_ncsi_intr_handler(hw); 3143 3144 if (cause & PL) 3145 csio_pl_intr_handler(hw); 3146 3147 if (cause & SMB) 3148 csio_smb_intr_handler(hw); 3149 3150 if (cause & XGMAC0) 3151 csio_xgmac_intr_handler(hw, 0); 3152 3153 if (cause & XGMAC1) 3154 csio_xgmac_intr_handler(hw, 1); 3155 3156 if (cause & XGMAC_KR0) 3157 csio_xgmac_intr_handler(hw, 2); 3158 3159 if (cause & XGMAC_KR1) 3160 csio_xgmac_intr_handler(hw, 3); 3161 3162 if (cause & PCIE) 3163 hw->chip_ops->chip_pcie_intr_handler(hw); 3164 3165 if (cause & MC) 3166 csio_mem_intr_handler(hw, MEM_MC); 3167 3168 if (cause & EDC0) 3169 csio_mem_intr_handler(hw, MEM_EDC0); 3170 3171 if (cause & EDC1) 3172 csio_mem_intr_handler(hw, MEM_EDC1); 3173 3174 if (cause & LE) 3175 csio_le_intr_handler(hw); 3176 3177 if (cause & TP) 3178 csio_tp_intr_handler(hw); 3179 3180 if (cause & MA) 3181 csio_ma_intr_handler(hw); 3182 3183 if (cause & PM_TX) 3184 csio_pmtx_intr_handler(hw); 3185 3186 if (cause & PM_RX) 3187 csio_pmrx_intr_handler(hw); 3188 3189 if (cause & ULP_RX) 3190 csio_ulprx_intr_handler(hw); 3191 3192 if (cause & CPL_SWITCH) 3193 csio_cplsw_intr_handler(hw); 3194 3195 if (cause & SGE) 3196 csio_sge_intr_handler(hw); 3197 3198 if (cause & ULP_TX) 3199 csio_ulptx_intr_handler(hw); 3200 3201 /* Clear the interrupts just processed for which we are the master. */ 3202 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE); 3203 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */ 3204 3205 return 1; 3206 } 3207 3208 /***************************************************************************** 3209 * HW <--> mailbox interfacing routines. 3210 ****************************************************************************/ 3211 /* 3212 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3213 * 3214 * @data: Private data pointer. 3215 * 3216 * Called from worker thread context. 3217 */ 3218 static void 3219 csio_mberr_worker(void *data) 3220 { 3221 struct csio_hw *hw = (struct csio_hw *)data; 3222 struct csio_mbm *mbm = &hw->mbm; 3223 LIST_HEAD(cbfn_q); 3224 struct csio_mb *mbp_next; 3225 int rv; 3226 3227 del_timer_sync(&mbm->timer); 3228 3229 spin_lock_irq(&hw->lock); 3230 if (list_empty(&mbm->cbfn_q)) { 3231 spin_unlock_irq(&hw->lock); 3232 return; 3233 } 3234 3235 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3236 mbm->stats.n_cbfnq = 0; 3237 3238 /* Try to start waiting mailboxes */ 3239 if (!list_empty(&mbm->req_q)) { 3240 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3241 list_del_init(&mbp_next->list); 3242 3243 rv = csio_mb_issue(hw, mbp_next); 3244 if (rv != 0) 3245 list_add_tail(&mbp_next->list, &mbm->req_q); 3246 else 3247 CSIO_DEC_STATS(mbm, n_activeq); 3248 } 3249 spin_unlock_irq(&hw->lock); 3250 3251 /* Now callback completions */ 3252 csio_mb_completions(hw, &cbfn_q); 3253 } 3254 3255 /* 3256 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3257 * 3258 * @data: private data pointer 3259 * 3260 **/ 3261 static void 3262 csio_hw_mb_timer(uintptr_t data) 3263 { 3264 struct csio_hw *hw = (struct csio_hw *)data; 3265 struct csio_mb *mbp = NULL; 3266 3267 spin_lock_irq(&hw->lock); 3268 mbp = csio_mb_tmo_handler(hw); 3269 spin_unlock_irq(&hw->lock); 3270 3271 /* Call back the function for the timed-out Mailbox */ 3272 if (mbp) 3273 mbp->mb_cbfn(hw, mbp); 3274 3275 } 3276 3277 /* 3278 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3279 * @hw: HW module 3280 * 3281 * Called with lock held, should exit with lock held. 3282 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3283 * into a local queue. Drops lock and calls the completions. Holds 3284 * lock and returns. 3285 */ 3286 static void 3287 csio_hw_mbm_cleanup(struct csio_hw *hw) 3288 { 3289 LIST_HEAD(cbfn_q); 3290 3291 csio_mb_cancel_all(hw, &cbfn_q); 3292 3293 spin_unlock_irq(&hw->lock); 3294 csio_mb_completions(hw, &cbfn_q); 3295 spin_lock_irq(&hw->lock); 3296 } 3297 3298 /***************************************************************************** 3299 * Event handling 3300 ****************************************************************************/ 3301 int 3302 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3303 uint16_t len) 3304 { 3305 struct csio_evt_msg *evt_entry = NULL; 3306 3307 if (type >= CSIO_EVT_MAX) 3308 return -EINVAL; 3309 3310 if (len > CSIO_EVT_MSG_SIZE) 3311 return -EINVAL; 3312 3313 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3314 return -EINVAL; 3315 3316 if (list_empty(&hw->evt_free_q)) { 3317 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3318 type, len); 3319 return -ENOMEM; 3320 } 3321 3322 evt_entry = list_first_entry(&hw->evt_free_q, 3323 struct csio_evt_msg, list); 3324 list_del_init(&evt_entry->list); 3325 3326 /* copy event msg and queue the event */ 3327 evt_entry->type = type; 3328 memcpy((void *)evt_entry->data, evt_msg, len); 3329 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3330 3331 CSIO_DEC_STATS(hw, n_evt_freeq); 3332 CSIO_INC_STATS(hw, n_evt_activeq); 3333 3334 return 0; 3335 } 3336 3337 static int 3338 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3339 uint16_t len, bool msg_sg) 3340 { 3341 struct csio_evt_msg *evt_entry = NULL; 3342 struct csio_fl_dma_buf *fl_sg; 3343 uint32_t off = 0; 3344 unsigned long flags; 3345 int n, ret = 0; 3346 3347 if (type >= CSIO_EVT_MAX) 3348 return -EINVAL; 3349 3350 if (len > CSIO_EVT_MSG_SIZE) 3351 return -EINVAL; 3352 3353 spin_lock_irqsave(&hw->lock, flags); 3354 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3355 ret = -EINVAL; 3356 goto out; 3357 } 3358 3359 if (list_empty(&hw->evt_free_q)) { 3360 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3361 type, len); 3362 ret = -ENOMEM; 3363 goto out; 3364 } 3365 3366 evt_entry = list_first_entry(&hw->evt_free_q, 3367 struct csio_evt_msg, list); 3368 list_del_init(&evt_entry->list); 3369 3370 /* copy event msg and queue the event */ 3371 evt_entry->type = type; 3372 3373 /* If Payload in SG list*/ 3374 if (msg_sg) { 3375 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3376 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3377 memcpy((void *)((uintptr_t)evt_entry->data + off), 3378 fl_sg->flbufs[n].vaddr, 3379 fl_sg->flbufs[n].len); 3380 off += fl_sg->flbufs[n].len; 3381 } 3382 } else 3383 memcpy((void *)evt_entry->data, evt_msg, len); 3384 3385 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3386 CSIO_DEC_STATS(hw, n_evt_freeq); 3387 CSIO_INC_STATS(hw, n_evt_activeq); 3388 out: 3389 spin_unlock_irqrestore(&hw->lock, flags); 3390 return ret; 3391 } 3392 3393 static void 3394 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3395 { 3396 if (evt_entry) { 3397 spin_lock_irq(&hw->lock); 3398 list_del_init(&evt_entry->list); 3399 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3400 CSIO_DEC_STATS(hw, n_evt_activeq); 3401 CSIO_INC_STATS(hw, n_evt_freeq); 3402 spin_unlock_irq(&hw->lock); 3403 } 3404 } 3405 3406 void 3407 csio_evtq_flush(struct csio_hw *hw) 3408 { 3409 uint32_t count; 3410 count = 30; 3411 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3412 spin_unlock_irq(&hw->lock); 3413 msleep(2000); 3414 spin_lock_irq(&hw->lock); 3415 } 3416 3417 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3418 } 3419 3420 static void 3421 csio_evtq_stop(struct csio_hw *hw) 3422 { 3423 hw->flags |= CSIO_HWF_FWEVT_STOP; 3424 } 3425 3426 static void 3427 csio_evtq_start(struct csio_hw *hw) 3428 { 3429 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3430 } 3431 3432 static void 3433 csio_evtq_cleanup(struct csio_hw *hw) 3434 { 3435 struct list_head *evt_entry, *next_entry; 3436 3437 /* Release outstanding events from activeq to freeq*/ 3438 if (!list_empty(&hw->evt_active_q)) 3439 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3440 3441 hw->stats.n_evt_activeq = 0; 3442 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3443 3444 /* Freeup event entry */ 3445 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3446 kfree(evt_entry); 3447 CSIO_DEC_STATS(hw, n_evt_freeq); 3448 } 3449 3450 hw->stats.n_evt_freeq = 0; 3451 } 3452 3453 3454 static void 3455 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3456 struct csio_fl_dma_buf *flb, void *priv) 3457 { 3458 __u8 op; 3459 void *msg = NULL; 3460 uint32_t msg_len = 0; 3461 bool msg_sg = 0; 3462 3463 op = ((struct rss_header *) wr)->opcode; 3464 if (op == CPL_FW6_PLD) { 3465 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3466 if (!flb || !flb->totlen) { 3467 CSIO_INC_STATS(hw, n_cpl_unexp); 3468 return; 3469 } 3470 3471 msg = (void *) flb; 3472 msg_len = flb->totlen; 3473 msg_sg = 1; 3474 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3475 3476 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3477 /* skip RSS header */ 3478 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3479 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3480 sizeof(struct cpl_fw4_msg); 3481 } else { 3482 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3483 CSIO_INC_STATS(hw, n_cpl_unexp); 3484 return; 3485 } 3486 3487 /* 3488 * Enqueue event to EventQ. Events processing happens 3489 * in Event worker thread context 3490 */ 3491 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3492 (uint16_t)msg_len, msg_sg)) 3493 CSIO_INC_STATS(hw, n_evt_drop); 3494 } 3495 3496 void 3497 csio_evtq_worker(struct work_struct *work) 3498 { 3499 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3500 struct list_head *evt_entry, *next_entry; 3501 LIST_HEAD(evt_q); 3502 struct csio_evt_msg *evt_msg; 3503 struct cpl_fw6_msg *msg; 3504 struct csio_rnode *rn; 3505 int rv = 0; 3506 uint8_t evtq_stop = 0; 3507 3508 csio_dbg(hw, "event worker thread active evts#%d\n", 3509 hw->stats.n_evt_activeq); 3510 3511 spin_lock_irq(&hw->lock); 3512 while (!list_empty(&hw->evt_active_q)) { 3513 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3514 spin_unlock_irq(&hw->lock); 3515 3516 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3517 evt_msg = (struct csio_evt_msg *) evt_entry; 3518 3519 /* Drop events if queue is STOPPED */ 3520 spin_lock_irq(&hw->lock); 3521 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3522 evtq_stop = 1; 3523 spin_unlock_irq(&hw->lock); 3524 if (evtq_stop) { 3525 CSIO_INC_STATS(hw, n_evt_drop); 3526 goto free_evt; 3527 } 3528 3529 switch (evt_msg->type) { 3530 case CSIO_EVT_FW: 3531 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3532 3533 if ((msg->opcode == CPL_FW6_MSG || 3534 msg->opcode == CPL_FW4_MSG) && 3535 !msg->type) { 3536 rv = csio_mb_fwevt_handler(hw, 3537 msg->data); 3538 if (!rv) 3539 break; 3540 /* Handle any remaining fw events */ 3541 csio_fcoe_fwevt_handler(hw, 3542 msg->opcode, msg->data); 3543 } else if (msg->opcode == CPL_FW6_PLD) { 3544 3545 csio_fcoe_fwevt_handler(hw, 3546 msg->opcode, msg->data); 3547 } else { 3548 csio_warn(hw, 3549 "Unhandled FW msg op %x type %x\n", 3550 msg->opcode, msg->type); 3551 CSIO_INC_STATS(hw, n_evt_drop); 3552 } 3553 break; 3554 3555 case CSIO_EVT_MBX: 3556 csio_mberr_worker(hw); 3557 break; 3558 3559 case CSIO_EVT_DEV_LOSS: 3560 memcpy(&rn, evt_msg->data, sizeof(rn)); 3561 csio_rnode_devloss_handler(rn); 3562 break; 3563 3564 default: 3565 csio_warn(hw, "Unhandled event %x on evtq\n", 3566 evt_msg->type); 3567 CSIO_INC_STATS(hw, n_evt_unexp); 3568 break; 3569 } 3570 free_evt: 3571 csio_free_evt(hw, evt_msg); 3572 } 3573 3574 spin_lock_irq(&hw->lock); 3575 } 3576 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3577 spin_unlock_irq(&hw->lock); 3578 } 3579 3580 int 3581 csio_fwevtq_handler(struct csio_hw *hw) 3582 { 3583 int rv; 3584 3585 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 3586 CSIO_INC_STATS(hw, n_int_stray); 3587 return -EINVAL; 3588 } 3589 3590 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 3591 csio_process_fwevtq_entry, NULL); 3592 return rv; 3593 } 3594 3595 /**************************************************************************** 3596 * Entry points 3597 ****************************************************************************/ 3598 3599 /* Management module */ 3600 /* 3601 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 3602 * mgmt - mgmt module 3603 * @io_req - io request 3604 * 3605 * Return - 0:if given IO Req exists in active Q. 3606 * -EINVAL :if lookup fails. 3607 */ 3608 int 3609 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 3610 { 3611 struct list_head *tmp; 3612 3613 /* Lookup ioreq in the ACTIVEQ */ 3614 list_for_each(tmp, &mgmtm->active_q) { 3615 if (io_req == (struct csio_ioreq *)tmp) 3616 return 0; 3617 } 3618 return -EINVAL; 3619 } 3620 3621 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 3622 3623 /* 3624 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 3625 * @data - Event data. 3626 * 3627 * Return - none. 3628 */ 3629 static void 3630 csio_mgmt_tmo_handler(uintptr_t data) 3631 { 3632 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; 3633 struct list_head *tmp; 3634 struct csio_ioreq *io_req; 3635 3636 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 3637 3638 spin_lock_irq(&mgmtm->hw->lock); 3639 3640 list_for_each(tmp, &mgmtm->active_q) { 3641 io_req = (struct csio_ioreq *) tmp; 3642 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 3643 3644 if (!io_req->tmo) { 3645 /* Dequeue the request from retry Q. */ 3646 tmp = csio_list_prev(tmp); 3647 list_del_init(&io_req->sm.sm_list); 3648 if (io_req->io_cbfn) { 3649 /* io_req will be freed by completion handler */ 3650 io_req->wr_status = -ETIMEDOUT; 3651 io_req->io_cbfn(mgmtm->hw, io_req); 3652 } else { 3653 CSIO_DB_ASSERT(0); 3654 } 3655 } 3656 } 3657 3658 /* If retry queue is not empty, re-arm timer */ 3659 if (!list_empty(&mgmtm->active_q)) 3660 mod_timer(&mgmtm->mgmt_timer, 3661 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 3662 spin_unlock_irq(&mgmtm->hw->lock); 3663 } 3664 3665 static void 3666 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 3667 { 3668 struct csio_hw *hw = mgmtm->hw; 3669 struct csio_ioreq *io_req; 3670 struct list_head *tmp; 3671 uint32_t count; 3672 3673 count = 30; 3674 /* Wait for all outstanding req to complete gracefully */ 3675 while ((!list_empty(&mgmtm->active_q)) && count--) { 3676 spin_unlock_irq(&hw->lock); 3677 msleep(2000); 3678 spin_lock_irq(&hw->lock); 3679 } 3680 3681 /* release outstanding req from ACTIVEQ */ 3682 list_for_each(tmp, &mgmtm->active_q) { 3683 io_req = (struct csio_ioreq *) tmp; 3684 tmp = csio_list_prev(tmp); 3685 list_del_init(&io_req->sm.sm_list); 3686 mgmtm->stats.n_active--; 3687 if (io_req->io_cbfn) { 3688 /* io_req will be freed by completion handler */ 3689 io_req->wr_status = -ETIMEDOUT; 3690 io_req->io_cbfn(mgmtm->hw, io_req); 3691 } 3692 } 3693 } 3694 3695 /* 3696 * csio_mgmt_init - Mgmt module init entry point 3697 * @mgmtsm - mgmt module 3698 * @hw - HW module 3699 * 3700 * Initialize mgmt timer, resource wait queue, active queue, 3701 * completion q. Allocate Egress and Ingress 3702 * WR queues and save off the queue index returned by the WR 3703 * module for future use. Allocate and save off mgmt reqs in the 3704 * mgmt_req_freelist for future use. Make sure their SM is initialized 3705 * to uninit state. 3706 * Returns: 0 - on success 3707 * -ENOMEM - on error. 3708 */ 3709 static int 3710 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 3711 { 3712 struct timer_list *timer = &mgmtm->mgmt_timer; 3713 3714 init_timer(timer); 3715 timer->function = csio_mgmt_tmo_handler; 3716 timer->data = (unsigned long)mgmtm; 3717 3718 INIT_LIST_HEAD(&mgmtm->active_q); 3719 INIT_LIST_HEAD(&mgmtm->cbfn_q); 3720 3721 mgmtm->hw = hw; 3722 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 3723 3724 return 0; 3725 } 3726 3727 /* 3728 * csio_mgmtm_exit - MGMT module exit entry point 3729 * @mgmtsm - mgmt module 3730 * 3731 * This function called during MGMT module uninit. 3732 * Stop timers, free ioreqs allocated. 3733 * Returns: None 3734 * 3735 */ 3736 static void 3737 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 3738 { 3739 del_timer_sync(&mgmtm->mgmt_timer); 3740 } 3741 3742 3743 /** 3744 * csio_hw_start - Kicks off the HW State machine 3745 * @hw: Pointer to HW module. 3746 * 3747 * It is assumed that the initialization is a synchronous operation. 3748 * So when we return afer posting the event, the HW SM should be in 3749 * the ready state, if there were no errors during init. 3750 */ 3751 int 3752 csio_hw_start(struct csio_hw *hw) 3753 { 3754 spin_lock_irq(&hw->lock); 3755 csio_post_event(&hw->sm, CSIO_HWE_CFG); 3756 spin_unlock_irq(&hw->lock); 3757 3758 if (csio_is_hw_ready(hw)) 3759 return 0; 3760 else 3761 return -EINVAL; 3762 } 3763 3764 int 3765 csio_hw_stop(struct csio_hw *hw) 3766 { 3767 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 3768 3769 if (csio_is_hw_removing(hw)) 3770 return 0; 3771 else 3772 return -EINVAL; 3773 } 3774 3775 /* Max reset retries */ 3776 #define CSIO_MAX_RESET_RETRIES 3 3777 3778 /** 3779 * csio_hw_reset - Reset the hardware 3780 * @hw: HW module. 3781 * 3782 * Caller should hold lock across this function. 3783 */ 3784 int 3785 csio_hw_reset(struct csio_hw *hw) 3786 { 3787 if (!csio_is_hw_master(hw)) 3788 return -EPERM; 3789 3790 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 3791 csio_dbg(hw, "Max hw reset attempts reached.."); 3792 return -EINVAL; 3793 } 3794 3795 hw->rst_retries++; 3796 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 3797 3798 if (csio_is_hw_ready(hw)) { 3799 hw->rst_retries = 0; 3800 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 3801 return 0; 3802 } else 3803 return -EINVAL; 3804 } 3805 3806 /* 3807 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 3808 * @hw: HW module. 3809 */ 3810 static void 3811 csio_hw_get_device_id(struct csio_hw *hw) 3812 { 3813 /* Is the adapter device id cached already ?*/ 3814 if (csio_is_dev_id_cached(hw)) 3815 return; 3816 3817 /* Get the PCI vendor & device id */ 3818 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 3819 &hw->params.pci.vendor_id); 3820 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 3821 &hw->params.pci.device_id); 3822 3823 csio_dev_id_cached(hw); 3824 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 3825 3826 } /* csio_hw_get_device_id */ 3827 3828 /* 3829 * csio_hw_set_description - Set the model, description of the hw. 3830 * @hw: HW module. 3831 * @ven_id: PCI Vendor ID 3832 * @dev_id: PCI Device ID 3833 */ 3834 static void 3835 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 3836 { 3837 uint32_t adap_type, prot_type; 3838 3839 if (ven_id == CSIO_VENDOR_ID) { 3840 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3841 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3842 3843 if (prot_type == CSIO_T4_FCOE_ASIC) { 3844 memcpy(hw->hw_ver, 3845 csio_t4_fcoe_adapters[adap_type].model_no, 16); 3846 memcpy(hw->model_desc, 3847 csio_t4_fcoe_adapters[adap_type].description, 3848 32); 3849 } else if (prot_type == CSIO_T5_FCOE_ASIC) { 3850 memcpy(hw->hw_ver, 3851 csio_t5_fcoe_adapters[adap_type].model_no, 16); 3852 memcpy(hw->model_desc, 3853 csio_t5_fcoe_adapters[adap_type].description, 3854 32); 3855 } else { 3856 char tempName[32] = "Chelsio FCoE Controller"; 3857 memcpy(hw->model_desc, tempName, 32); 3858 } 3859 } 3860 } /* csio_hw_set_description */ 3861 3862 /** 3863 * csio_hw_init - Initialize HW module. 3864 * @hw: Pointer to HW module. 3865 * 3866 * Initialize the members of the HW module. 3867 */ 3868 int 3869 csio_hw_init(struct csio_hw *hw) 3870 { 3871 int rv = -EINVAL; 3872 uint32_t i; 3873 uint16_t ven_id, dev_id; 3874 struct csio_evt_msg *evt_entry; 3875 3876 INIT_LIST_HEAD(&hw->sm.sm_list); 3877 csio_init_state(&hw->sm, csio_hws_uninit); 3878 spin_lock_init(&hw->lock); 3879 INIT_LIST_HEAD(&hw->sln_head); 3880 3881 /* Get the PCI vendor & device id */ 3882 csio_hw_get_device_id(hw); 3883 3884 strcpy(hw->name, CSIO_HW_NAME); 3885 3886 /* Initialize the HW chip ops with T4/T5 specific ops */ 3887 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops; 3888 3889 /* Set the model & its description */ 3890 3891 ven_id = hw->params.pci.vendor_id; 3892 dev_id = hw->params.pci.device_id; 3893 3894 csio_hw_set_description(hw, ven_id, dev_id); 3895 3896 /* Initialize default log level */ 3897 hw->params.log_level = (uint32_t) csio_dbg_level; 3898 3899 csio_set_fwevt_intr_idx(hw, -1); 3900 csio_set_nondata_intr_idx(hw, -1); 3901 3902 /* Init all the modules: Mailbox, WorkRequest and Transport */ 3903 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 3904 goto err; 3905 3906 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 3907 if (rv) 3908 goto err_mbm_exit; 3909 3910 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 3911 if (rv) 3912 goto err_wrm_exit; 3913 3914 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 3915 if (rv) 3916 goto err_scsim_exit; 3917 /* Pre-allocate evtq and initialize them */ 3918 INIT_LIST_HEAD(&hw->evt_active_q); 3919 INIT_LIST_HEAD(&hw->evt_free_q); 3920 for (i = 0; i < csio_evtq_sz; i++) { 3921 3922 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 3923 if (!evt_entry) { 3924 csio_err(hw, "Failed to initialize eventq"); 3925 goto err_evtq_cleanup; 3926 } 3927 3928 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3929 CSIO_INC_STATS(hw, n_evt_freeq); 3930 } 3931 3932 hw->dev_num = dev_num; 3933 dev_num++; 3934 3935 return 0; 3936 3937 err_evtq_cleanup: 3938 csio_evtq_cleanup(hw); 3939 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 3940 err_scsim_exit: 3941 csio_scsim_exit(csio_hw_to_scsim(hw)); 3942 err_wrm_exit: 3943 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 3944 err_mbm_exit: 3945 csio_mbm_exit(csio_hw_to_mbm(hw)); 3946 err: 3947 return rv; 3948 } 3949 3950 /** 3951 * csio_hw_exit - Un-initialize HW module. 3952 * @hw: Pointer to HW module. 3953 * 3954 */ 3955 void 3956 csio_hw_exit(struct csio_hw *hw) 3957 { 3958 csio_evtq_cleanup(hw); 3959 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 3960 csio_scsim_exit(csio_hw_to_scsim(hw)); 3961 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 3962 csio_mbm_exit(csio_hw_to_mbm(hw)); 3963 } 3964