1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_force_master; 51 int csio_dbg_level = 0xFEFF; 52 unsigned int csio_port_mask = 0xf; 53 54 /* Default FW event queue entries. */ 55 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 56 57 /* Default MSI param level */ 58 int csio_msi = 2; 59 60 /* FCoE function instances */ 61 static int dev_num; 62 63 /* FCoE Adapter types & its description */ 64 static const struct csio_adap_desc csio_t4_fcoe_adapters[] = { 65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, 66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, 67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, 68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"}, 69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"}, 70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"}, 71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"}, 72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"}, 73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"}, 74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"}, 75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"}, 76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"}, 77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, 78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, 79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, 80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"}, 82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"}, 83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"}, 84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"}, 85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"}, 86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"}, 87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"}, 88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"} 89 }; 90 91 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"}, 95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"} 112 }; 113 114 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 115 static void csio_hw_mbm_cleanup(struct csio_hw *); 116 117 /* State machine forward declarations */ 118 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 119 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 120 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 121 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 122 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 123 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 124 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 125 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 126 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 127 128 static void csio_hw_initialize(struct csio_hw *hw); 129 static void csio_evtq_stop(struct csio_hw *hw); 130 static void csio_evtq_start(struct csio_hw *hw); 131 132 int csio_is_hw_ready(struct csio_hw *hw) 133 { 134 return csio_match_state(hw, csio_hws_ready); 135 } 136 137 int csio_is_hw_removing(struct csio_hw *hw) 138 { 139 return csio_match_state(hw, csio_hws_removing); 140 } 141 142 143 /* 144 * csio_hw_wait_op_done_val - wait until an operation is completed 145 * @hw: the HW module 146 * @reg: the register to check for completion 147 * @mask: a single-bit field within @reg that indicates completion 148 * @polarity: the value of the field when the operation is completed 149 * @attempts: number of check iterations 150 * @delay: delay in usecs between iterations 151 * @valp: where to store the value of the register at completion time 152 * 153 * Wait until an operation is completed by checking a bit in a register 154 * up to @attempts times. If @valp is not NULL the value of the register 155 * at the time it indicated completion is stored there. Returns 0 if the 156 * operation completes and -EAGAIN otherwise. 157 */ 158 int 159 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 160 int polarity, int attempts, int delay, uint32_t *valp) 161 { 162 uint32_t val; 163 while (1) { 164 val = csio_rd_reg32(hw, reg); 165 166 if (!!(val & mask) == polarity) { 167 if (valp) 168 *valp = val; 169 return 0; 170 } 171 172 if (--attempts == 0) 173 return -EAGAIN; 174 if (delay) 175 udelay(delay); 176 } 177 } 178 179 /* 180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 181 * @hw: the adapter 182 * @addr: the indirect TP register address 183 * @mask: specifies the field within the register to modify 184 * @val: new value for the field 185 * 186 * Sets a field of an indirect TP register to the given value. 187 */ 188 void 189 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 190 unsigned int mask, unsigned int val) 191 { 192 csio_wr_reg32(hw, addr, TP_PIO_ADDR); 193 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask; 194 csio_wr_reg32(hw, val, TP_PIO_DATA); 195 } 196 197 void 198 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 199 uint32_t value) 200 { 201 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 202 203 csio_wr_reg32(hw, val | value, reg); 204 /* Flush */ 205 csio_rd_reg32(hw, reg); 206 207 } 208 209 static int 210 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 211 { 212 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 213 addr, len, buf, 0); 214 } 215 216 /* 217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 218 */ 219 #define EEPROM_MAX_RD_POLL 40 220 #define EEPROM_MAX_WR_POLL 6 221 #define EEPROM_STAT_ADDR 0x7bfc 222 #define VPD_BASE 0x400 223 #define VPD_BASE_OLD 0 224 #define VPD_LEN 1024 225 #define VPD_INFO_FLD_HDR_SIZE 3 226 227 /* 228 * csio_hw_seeprom_read - read a serial EEPROM location 229 * @hw: hw to read 230 * @addr: EEPROM virtual address 231 * @data: where to store the read data 232 * 233 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 234 * VPD capability. Note that this function must be called with a virtual 235 * address. 236 */ 237 static int 238 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 239 { 240 uint16_t val = 0; 241 int attempts = EEPROM_MAX_RD_POLL; 242 uint32_t base = hw->params.pci.vpd_cap_addr; 243 244 if (addr >= EEPROMVSIZE || (addr & 3)) 245 return -EINVAL; 246 247 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 248 249 do { 250 udelay(10); 251 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 252 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 253 254 if (!(val & PCI_VPD_ADDR_F)) { 255 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 256 return -EINVAL; 257 } 258 259 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 260 *data = le32_to_cpu(*data); 261 262 return 0; 263 } 264 265 /* 266 * Partial EEPROM Vital Product Data structure. Includes only the ID and 267 * VPD-R sections. 268 */ 269 struct t4_vpd_hdr { 270 u8 id_tag; 271 u8 id_len[2]; 272 u8 id_data[ID_LEN]; 273 u8 vpdr_tag; 274 u8 vpdr_len[2]; 275 }; 276 277 /* 278 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 279 * the VPD 280 * @v: Pointer to buffered vpd data structure 281 * @kw: The keyword to search for 282 * 283 * Returns the value of the information field keyword or 284 * -EINVAL otherwise. 285 */ 286 static int 287 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 288 { 289 int32_t i; 290 int32_t offset , len; 291 const uint8_t *buf = &v->id_tag; 292 const uint8_t *vpdr_len = &v->vpdr_tag; 293 offset = sizeof(struct t4_vpd_hdr); 294 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 295 296 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 297 return -EINVAL; 298 299 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 300 if (memcmp(buf + i , kw, 2) == 0) { 301 i += VPD_INFO_FLD_HDR_SIZE; 302 return i; 303 } 304 305 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 306 } 307 308 return -EINVAL; 309 } 310 311 static int 312 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 313 { 314 *pos = pci_find_capability(pdev, cap); 315 if (*pos) 316 return 0; 317 318 return -1; 319 } 320 321 /* 322 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 323 * @hw: HW module 324 * @p: where to store the parameters 325 * 326 * Reads card parameters stored in VPD EEPROM. 327 */ 328 static int 329 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 330 { 331 int i, ret, ec, sn, addr; 332 uint8_t *vpd, csum; 333 const struct t4_vpd_hdr *v; 334 /* To get around compilation warning from strstrip */ 335 char *s; 336 337 if (csio_is_valid_vpd(hw)) 338 return 0; 339 340 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 341 &hw->params.pci.vpd_cap_addr); 342 if (ret) 343 return -EINVAL; 344 345 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 346 if (vpd == NULL) 347 return -ENOMEM; 348 349 /* 350 * Card information normally starts at VPD_BASE but early cards had 351 * it at 0. 352 */ 353 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 354 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 355 356 for (i = 0; i < VPD_LEN; i += 4) { 357 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 358 if (ret) { 359 kfree(vpd); 360 return ret; 361 } 362 } 363 364 /* Reset the VPD flag! */ 365 hw->flags &= (~CSIO_HWF_VPD_VALID); 366 367 v = (const struct t4_vpd_hdr *)vpd; 368 369 #define FIND_VPD_KW(var, name) do { \ 370 var = csio_hw_get_vpd_keyword_val(v, name); \ 371 if (var < 0) { \ 372 csio_err(hw, "missing VPD keyword " name "\n"); \ 373 kfree(vpd); \ 374 return -EINVAL; \ 375 } \ 376 } while (0) 377 378 FIND_VPD_KW(i, "RV"); 379 for (csum = 0; i >= 0; i--) 380 csum += vpd[i]; 381 382 if (csum) { 383 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 384 kfree(vpd); 385 return -EINVAL; 386 } 387 FIND_VPD_KW(ec, "EC"); 388 FIND_VPD_KW(sn, "SN"); 389 #undef FIND_VPD_KW 390 391 memcpy(p->id, v->id_data, ID_LEN); 392 s = strstrip(p->id); 393 memcpy(p->ec, vpd + ec, EC_LEN); 394 s = strstrip(p->ec); 395 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 396 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 397 s = strstrip(p->sn); 398 399 csio_valid_vpd_copied(hw); 400 401 kfree(vpd); 402 return 0; 403 } 404 405 /* 406 * csio_hw_sf1_read - read data from the serial flash 407 * @hw: the HW module 408 * @byte_cnt: number of bytes to read 409 * @cont: whether another operation will be chained 410 * @lock: whether to lock SF for PL access only 411 * @valp: where to store the read data 412 * 413 * Reads up to 4 bytes of data from the serial flash. The location of 414 * the read needs to be specified prior to calling this by issuing the 415 * appropriate commands to the serial flash. 416 */ 417 static int 418 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 419 int32_t lock, uint32_t *valp) 420 { 421 int ret; 422 423 if (!byte_cnt || byte_cnt > 4) 424 return -EINVAL; 425 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 426 return -EBUSY; 427 428 cont = cont ? SF_CONT : 0; 429 lock = lock ? SF_LOCK : 0; 430 431 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP); 432 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 433 10, NULL); 434 if (!ret) 435 *valp = csio_rd_reg32(hw, SF_DATA); 436 return ret; 437 } 438 439 /* 440 * csio_hw_sf1_write - write data to the serial flash 441 * @hw: the HW module 442 * @byte_cnt: number of bytes to write 443 * @cont: whether another operation will be chained 444 * @lock: whether to lock SF for PL access only 445 * @val: value to write 446 * 447 * Writes up to 4 bytes of data to the serial flash. The location of 448 * the write needs to be specified prior to calling this by issuing the 449 * appropriate commands to the serial flash. 450 */ 451 static int 452 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 453 int32_t lock, uint32_t val) 454 { 455 if (!byte_cnt || byte_cnt > 4) 456 return -EINVAL; 457 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 458 return -EBUSY; 459 460 cont = cont ? SF_CONT : 0; 461 lock = lock ? SF_LOCK : 0; 462 463 csio_wr_reg32(hw, val, SF_DATA); 464 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP); 465 466 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 467 10, NULL); 468 } 469 470 /* 471 * csio_hw_flash_wait_op - wait for a flash operation to complete 472 * @hw: the HW module 473 * @attempts: max number of polls of the status register 474 * @delay: delay between polls in ms 475 * 476 * Wait for a flash operation to complete by polling the status register. 477 */ 478 static int 479 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 480 { 481 int ret; 482 uint32_t status; 483 484 while (1) { 485 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 486 if (ret != 0) 487 return ret; 488 489 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 490 if (ret != 0) 491 return ret; 492 493 if (!(status & 1)) 494 return 0; 495 if (--attempts == 0) 496 return -EAGAIN; 497 if (delay) 498 msleep(delay); 499 } 500 } 501 502 /* 503 * csio_hw_read_flash - read words from serial flash 504 * @hw: the HW module 505 * @addr: the start address for the read 506 * @nwords: how many 32-bit words to read 507 * @data: where to store the read data 508 * @byte_oriented: whether to store data as bytes or as words 509 * 510 * Read the specified number of 32-bit words from the serial flash. 511 * If @byte_oriented is set the read data is stored as a byte array 512 * (i.e., big-endian), otherwise as 32-bit words in the platform's 513 * natural endianess. 514 */ 515 static int 516 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 517 uint32_t *data, int32_t byte_oriented) 518 { 519 int ret; 520 521 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 522 return -EINVAL; 523 524 addr = swab32(addr) | SF_RD_DATA_FAST; 525 526 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 527 if (ret != 0) 528 return ret; 529 530 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 531 if (ret != 0) 532 return ret; 533 534 for ( ; nwords; nwords--, data++) { 535 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 536 if (nwords == 1) 537 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 538 if (ret) 539 return ret; 540 if (byte_oriented) 541 *data = htonl(*data); 542 } 543 return 0; 544 } 545 546 /* 547 * csio_hw_write_flash - write up to a page of data to the serial flash 548 * @hw: the hw 549 * @addr: the start address to write 550 * @n: length of data to write in bytes 551 * @data: the data to write 552 * 553 * Writes up to a page of data (256 bytes) to the serial flash starting 554 * at the given address. All the data must be written to the same page. 555 */ 556 static int 557 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 558 uint32_t n, const uint8_t *data) 559 { 560 int ret = -EINVAL; 561 uint32_t buf[64]; 562 uint32_t i, c, left, val, offset = addr & 0xff; 563 564 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 565 return -EINVAL; 566 567 val = swab32(addr) | SF_PROG_PAGE; 568 569 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 570 if (ret != 0) 571 goto unlock; 572 573 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 574 if (ret != 0) 575 goto unlock; 576 577 for (left = n; left; left -= c) { 578 c = min(left, 4U); 579 for (val = 0, i = 0; i < c; ++i) 580 val = (val << 8) + *data++; 581 582 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 583 if (ret) 584 goto unlock; 585 } 586 ret = csio_hw_flash_wait_op(hw, 8, 1); 587 if (ret) 588 goto unlock; 589 590 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 591 592 /* Read the page to verify the write succeeded */ 593 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 594 if (ret) 595 return ret; 596 597 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 598 csio_err(hw, 599 "failed to correctly write the flash page at %#x\n", 600 addr); 601 return -EINVAL; 602 } 603 604 return 0; 605 606 unlock: 607 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 608 return ret; 609 } 610 611 /* 612 * csio_hw_flash_erase_sectors - erase a range of flash sectors 613 * @hw: the HW module 614 * @start: the first sector to erase 615 * @end: the last sector to erase 616 * 617 * Erases the sectors in the given inclusive range. 618 */ 619 static int 620 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 621 { 622 int ret = 0; 623 624 while (start <= end) { 625 626 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 627 if (ret != 0) 628 goto out; 629 630 ret = csio_hw_sf1_write(hw, 4, 0, 1, 631 SF_ERASE_SECTOR | (start << 8)); 632 if (ret != 0) 633 goto out; 634 635 ret = csio_hw_flash_wait_op(hw, 14, 500); 636 if (ret != 0) 637 goto out; 638 639 start++; 640 } 641 out: 642 if (ret) 643 csio_err(hw, "erase of flash sector %d failed, error %d\n", 644 start, ret); 645 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 646 return 0; 647 } 648 649 static void 650 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 651 { 652 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 653 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), 654 FW_HDR_FW_VER_MINOR_GET(hw->fwrev), 655 FW_HDR_FW_VER_MICRO_GET(hw->fwrev), 656 FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); 657 } 658 659 /* 660 * csio_hw_get_fw_version - read the firmware version 661 * @hw: HW module 662 * @vers: where to place the version 663 * 664 * Reads the FW version from flash. 665 */ 666 static int 667 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 668 { 669 return csio_hw_read_flash(hw, FW_IMG_START + 670 offsetof(struct fw_hdr, fw_ver), 1, 671 vers, 0); 672 } 673 674 /* 675 * csio_hw_get_tp_version - read the TP microcode version 676 * @hw: HW module 677 * @vers: where to place the version 678 * 679 * Reads the TP microcode version from flash. 680 */ 681 static int 682 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 683 { 684 return csio_hw_read_flash(hw, FLASH_FW_START + 685 offsetof(struct fw_hdr, tp_microcode_ver), 1, 686 vers, 0); 687 } 688 689 /* 690 * csio_hw_check_fw_version - check if the FW is compatible with 691 * this driver 692 * @hw: HW module 693 * 694 * Checks if an adapter's FW is compatible with the driver. Returns 0 695 * if there's exact match, a negative error if the version could not be 696 * read or there's a major/minor version mismatch/minor. 697 */ 698 static int 699 csio_hw_check_fw_version(struct csio_hw *hw) 700 { 701 int ret, major, minor, micro; 702 703 ret = csio_hw_get_fw_version(hw, &hw->fwrev); 704 if (!ret) 705 ret = csio_hw_get_tp_version(hw, &hw->tp_vers); 706 if (ret) 707 return ret; 708 709 major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev); 710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); 711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); 712 713 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ 714 csio_err(hw, "card FW has major version %u, driver wants %u\n", 715 major, FW_VERSION_MAJOR(hw)); 716 return -EINVAL; 717 } 718 719 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw)) 720 return 0; /* perfect match */ 721 722 /* Minor/micro version mismatch */ 723 return -EINVAL; 724 } 725 726 /* 727 * csio_hw_fw_dload - download firmware. 728 * @hw: HW module 729 * @fw_data: firmware image to write. 730 * @size: image size 731 * 732 * Write the supplied firmware image to the card's serial flash. 733 */ 734 static int 735 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 736 { 737 uint32_t csum; 738 int32_t addr; 739 int ret; 740 uint32_t i; 741 uint8_t first_page[SF_PAGE_SIZE]; 742 const __be32 *p = (const __be32 *)fw_data; 743 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 744 uint32_t sf_sec_size; 745 746 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 747 csio_err(hw, "Serial Flash data invalid\n"); 748 return -EINVAL; 749 } 750 751 if (!size) { 752 csio_err(hw, "FW image has no data\n"); 753 return -EINVAL; 754 } 755 756 if (size & 511) { 757 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 758 return -EINVAL; 759 } 760 761 if (ntohs(hdr->len512) * 512 != size) { 762 csio_err(hw, "FW image size differs from size in FW header\n"); 763 return -EINVAL; 764 } 765 766 if (size > FW_MAX_SIZE) { 767 csio_err(hw, "FW image too large, max is %u bytes\n", 768 FW_MAX_SIZE); 769 return -EINVAL; 770 } 771 772 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 773 csum += ntohl(p[i]); 774 775 if (csum != 0xffffffff) { 776 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 777 return -EINVAL; 778 } 779 780 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 781 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 782 783 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 784 FW_START_SEC, FW_START_SEC + i - 1); 785 786 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC, 787 FW_START_SEC + i - 1); 788 if (ret) { 789 csio_err(hw, "Flash Erase failed\n"); 790 goto out; 791 } 792 793 /* 794 * We write the correct version at the end so the driver can see a bad 795 * version if the FW write fails. Start by writing a copy of the 796 * first page with a bad version. 797 */ 798 memcpy(first_page, fw_data, SF_PAGE_SIZE); 799 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 800 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page); 801 if (ret) 802 goto out; 803 804 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 805 FW_IMG_START, FW_IMG_START + size); 806 807 addr = FW_IMG_START; 808 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 809 addr += SF_PAGE_SIZE; 810 fw_data += SF_PAGE_SIZE; 811 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 812 if (ret) 813 goto out; 814 } 815 816 ret = csio_hw_write_flash(hw, 817 FW_IMG_START + 818 offsetof(struct fw_hdr, fw_ver), 819 sizeof(hdr->fw_ver), 820 (const uint8_t *)&hdr->fw_ver); 821 822 out: 823 if (ret) 824 csio_err(hw, "firmware download failed, error %d\n", ret); 825 return ret; 826 } 827 828 static int 829 csio_hw_get_flash_params(struct csio_hw *hw) 830 { 831 int ret; 832 uint32_t info = 0; 833 834 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 835 if (!ret) 836 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 837 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 838 if (ret != 0) 839 return ret; 840 841 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 842 return -EINVAL; 843 info >>= 16; /* log2 of size */ 844 if (info >= 0x14 && info < 0x18) 845 hw->params.sf_nsec = 1 << (info - 16); 846 else if (info == 0x18) 847 hw->params.sf_nsec = 64; 848 else 849 return -EINVAL; 850 hw->params.sf_size = 1 << info; 851 852 return 0; 853 } 854 855 static void 856 csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) 857 { 858 uint16_t val; 859 int pcie_cap; 860 861 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { 862 pci_read_config_word(hw->pdev, 863 pcie_cap + PCI_EXP_DEVCTL2, &val); 864 val &= 0xfff0; 865 val |= range ; 866 pci_write_config_word(hw->pdev, 867 pcie_cap + PCI_EXP_DEVCTL2, val); 868 } 869 } 870 871 /*****************************************************************************/ 872 /* HW State machine assists */ 873 /*****************************************************************************/ 874 875 static int 876 csio_hw_dev_ready(struct csio_hw *hw) 877 { 878 uint32_t reg; 879 int cnt = 6; 880 881 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) && 882 (--cnt != 0)) 883 mdelay(100); 884 885 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) || 886 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) { 887 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 888 return -EIO; 889 } 890 891 hw->pfn = SOURCEPF_GET(reg); 892 893 return 0; 894 } 895 896 /* 897 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 898 * @hw: HW module 899 * @state: Device state 900 * 901 * FW_HELLO_CMD has to be polled for completion. 902 */ 903 static int 904 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 905 { 906 struct csio_mb *mbp; 907 int rv = 0; 908 enum csio_dev_master master; 909 enum fw_retval retval; 910 uint8_t mpfn; 911 char state_str[16]; 912 int retries = FW_CMD_HELLO_RETRIES; 913 914 memset(state_str, 0, sizeof(state_str)); 915 916 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 917 if (!mbp) { 918 rv = -ENOMEM; 919 CSIO_INC_STATS(hw, n_err_nomem); 920 goto out; 921 } 922 923 master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY; 924 925 retry: 926 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 927 hw->pfn, master, NULL); 928 929 rv = csio_mb_issue(hw, mbp); 930 if (rv) { 931 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 932 goto out_free_mb; 933 } 934 935 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 936 if (retval != FW_SUCCESS) { 937 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 938 rv = -EINVAL; 939 goto out_free_mb; 940 } 941 942 /* Firmware has designated us to be master */ 943 if (hw->pfn == mpfn) { 944 hw->flags |= CSIO_HWF_MASTER; 945 } else if (*state == CSIO_DEV_STATE_UNINIT) { 946 /* 947 * If we're not the Master PF then we need to wait around for 948 * the Master PF Driver to finish setting up the adapter. 949 * 950 * Note that we also do this wait if we're a non-Master-capable 951 * PF and there is no current Master PF; a Master PF may show up 952 * momentarily and we wouldn't want to fail pointlessly. (This 953 * can happen when an OS loads lots of different drivers rapidly 954 * at the same time). In this case, the Master PF returned by 955 * the firmware will be PCIE_FW_MASTER_MASK so the test below 956 * will work ... 957 */ 958 959 int waiting = FW_CMD_HELLO_TIMEOUT; 960 961 /* 962 * Wait for the firmware to either indicate an error or 963 * initialized state. If we see either of these we bail out 964 * and report the issue to the caller. If we exhaust the 965 * "hello timeout" and we haven't exhausted our retries, try 966 * again. Otherwise bail with a timeout error. 967 */ 968 for (;;) { 969 uint32_t pcie_fw; 970 971 spin_unlock_irq(&hw->lock); 972 msleep(50); 973 spin_lock_irq(&hw->lock); 974 waiting -= 50; 975 976 /* 977 * If neither Error nor Initialialized are indicated 978 * by the firmware keep waiting till we exaust our 979 * timeout ... and then retry if we haven't exhausted 980 * our retries ... 981 */ 982 pcie_fw = csio_rd_reg32(hw, PCIE_FW); 983 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { 984 if (waiting <= 0) { 985 if (retries-- > 0) 986 goto retry; 987 988 rv = -ETIMEDOUT; 989 break; 990 } 991 continue; 992 } 993 994 /* 995 * We either have an Error or Initialized condition 996 * report errors preferentially. 997 */ 998 if (state) { 999 if (pcie_fw & PCIE_FW_ERR) { 1000 *state = CSIO_DEV_STATE_ERR; 1001 rv = -ETIMEDOUT; 1002 } else if (pcie_fw & PCIE_FW_INIT) 1003 *state = CSIO_DEV_STATE_INIT; 1004 } 1005 1006 /* 1007 * If we arrived before a Master PF was selected and 1008 * there's not a valid Master PF, grab its identity 1009 * for our caller. 1010 */ 1011 if (mpfn == PCIE_FW_MASTER_MASK && 1012 (pcie_fw & PCIE_FW_MASTER_VLD)) 1013 mpfn = PCIE_FW_MASTER_GET(pcie_fw); 1014 break; 1015 } 1016 hw->flags &= ~CSIO_HWF_MASTER; 1017 } 1018 1019 switch (*state) { 1020 case CSIO_DEV_STATE_UNINIT: 1021 strcpy(state_str, "Initializing"); 1022 break; 1023 case CSIO_DEV_STATE_INIT: 1024 strcpy(state_str, "Initialized"); 1025 break; 1026 case CSIO_DEV_STATE_ERR: 1027 strcpy(state_str, "Error"); 1028 break; 1029 default: 1030 strcpy(state_str, "Unknown"); 1031 break; 1032 } 1033 1034 if (hw->pfn == mpfn) 1035 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 1036 hw->pfn, state_str); 1037 else 1038 csio_info(hw, 1039 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 1040 hw->pfn, mpfn, state_str); 1041 1042 out_free_mb: 1043 mempool_free(mbp, hw->mb_mempool); 1044 out: 1045 return rv; 1046 } 1047 1048 /* 1049 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 1050 * @hw: HW module 1051 * 1052 */ 1053 static int 1054 csio_do_bye(struct csio_hw *hw) 1055 { 1056 struct csio_mb *mbp; 1057 enum fw_retval retval; 1058 1059 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1060 if (!mbp) { 1061 CSIO_INC_STATS(hw, n_err_nomem); 1062 return -ENOMEM; 1063 } 1064 1065 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1066 1067 if (csio_mb_issue(hw, mbp)) { 1068 csio_err(hw, "Issue of BYE command failed\n"); 1069 mempool_free(mbp, hw->mb_mempool); 1070 return -EINVAL; 1071 } 1072 1073 retval = csio_mb_fw_retval(mbp); 1074 if (retval != FW_SUCCESS) { 1075 mempool_free(mbp, hw->mb_mempool); 1076 return -EINVAL; 1077 } 1078 1079 mempool_free(mbp, hw->mb_mempool); 1080 1081 return 0; 1082 } 1083 1084 /* 1085 * csio_do_reset- Perform the device reset. 1086 * @hw: HW module 1087 * @fw_rst: FW reset 1088 * 1089 * If fw_rst is set, issues FW reset mbox cmd otherwise 1090 * does PIO reset. 1091 * Performs reset of the function. 1092 */ 1093 static int 1094 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1095 { 1096 struct csio_mb *mbp; 1097 enum fw_retval retval; 1098 1099 if (!fw_rst) { 1100 /* PIO reset */ 1101 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1102 mdelay(2000); 1103 return 0; 1104 } 1105 1106 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1107 if (!mbp) { 1108 CSIO_INC_STATS(hw, n_err_nomem); 1109 return -ENOMEM; 1110 } 1111 1112 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1113 PIORSTMODE | PIORST, 0, NULL); 1114 1115 if (csio_mb_issue(hw, mbp)) { 1116 csio_err(hw, "Issue of RESET command failed.n"); 1117 mempool_free(mbp, hw->mb_mempool); 1118 return -EINVAL; 1119 } 1120 1121 retval = csio_mb_fw_retval(mbp); 1122 if (retval != FW_SUCCESS) { 1123 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1124 mempool_free(mbp, hw->mb_mempool); 1125 return -EINVAL; 1126 } 1127 1128 mempool_free(mbp, hw->mb_mempool); 1129 1130 return 0; 1131 } 1132 1133 static int 1134 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1135 { 1136 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1137 uint16_t caps; 1138 1139 caps = ntohs(rsp->fcoecaps); 1140 1141 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1142 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1143 return -EINVAL; 1144 } 1145 1146 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1147 csio_err(hw, "No FCoE Control Offload capability\n"); 1148 return -EINVAL; 1149 } 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1156 * @hw: the HW module 1157 * @mbox: mailbox to use for the FW RESET command (if desired) 1158 * @force: force uP into RESET even if FW RESET command fails 1159 * 1160 * Issues a RESET command to firmware (if desired) with a HALT indication 1161 * and then puts the microprocessor into RESET state. The RESET command 1162 * will only be issued if a legitimate mailbox is provided (mbox <= 1163 * PCIE_FW_MASTER_MASK). 1164 * 1165 * This is generally used in order for the host to safely manipulate the 1166 * adapter without fear of conflicting with whatever the firmware might 1167 * be doing. The only way out of this state is to RESTART the firmware 1168 * ... 1169 */ 1170 static int 1171 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1172 { 1173 enum fw_retval retval = 0; 1174 1175 /* 1176 * If a legitimate mailbox is provided, issue a RESET command 1177 * with a HALT indication. 1178 */ 1179 if (mbox <= PCIE_FW_MASTER_MASK) { 1180 struct csio_mb *mbp; 1181 1182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1183 if (!mbp) { 1184 CSIO_INC_STATS(hw, n_err_nomem); 1185 return -ENOMEM; 1186 } 1187 1188 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1189 PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1), 1190 NULL); 1191 1192 if (csio_mb_issue(hw, mbp)) { 1193 csio_err(hw, "Issue of RESET command failed!\n"); 1194 mempool_free(mbp, hw->mb_mempool); 1195 return -EINVAL; 1196 } 1197 1198 retval = csio_mb_fw_retval(mbp); 1199 mempool_free(mbp, hw->mb_mempool); 1200 } 1201 1202 /* 1203 * Normally we won't complete the operation if the firmware RESET 1204 * command fails but if our caller insists we'll go ahead and put the 1205 * uP into RESET. This can be useful if the firmware is hung or even 1206 * missing ... We'll have to take the risk of putting the uP into 1207 * RESET without the cooperation of firmware in that case. 1208 * 1209 * We also force the firmware's HALT flag to be on in case we bypassed 1210 * the firmware RESET command above or we're dealing with old firmware 1211 * which doesn't have the HALT capability. This will serve as a flag 1212 * for the incoming firmware to know that it's coming out of a HALT 1213 * rather than a RESET ... if it's new enough to understand that ... 1214 */ 1215 if (retval == 0 || force) { 1216 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST); 1217 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT); 1218 } 1219 1220 /* 1221 * And we always return the result of the firmware RESET command 1222 * even when we force the uP into RESET ... 1223 */ 1224 return retval ? -EINVAL : 0; 1225 } 1226 1227 /* 1228 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1229 * @hw: the HW module 1230 * @reset: if we want to do a RESET to restart things 1231 * 1232 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1233 * return the previous PF Master remains as the new PF Master and there 1234 * is no need to issue a new HELLO command, etc. 1235 * 1236 * We do this in two ways: 1237 * 1238 * 1. If we're dealing with newer firmware we'll simply want to take 1239 * the chip's microprocessor out of RESET. This will cause the 1240 * firmware to start up from its start vector. And then we'll loop 1241 * until the firmware indicates it's started again (PCIE_FW.HALT 1242 * reset to 0) or we timeout. 1243 * 1244 * 2. If we're dealing with older firmware then we'll need to RESET 1245 * the chip since older firmware won't recognize the PCIE_FW.HALT 1246 * flag and automatically RESET itself on startup. 1247 */ 1248 static int 1249 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1250 { 1251 if (reset) { 1252 /* 1253 * Since we're directing the RESET instead of the firmware 1254 * doing it automatically, we need to clear the PCIE_FW.HALT 1255 * bit. 1256 */ 1257 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0); 1258 1259 /* 1260 * If we've been given a valid mailbox, first try to get the 1261 * firmware to do the RESET. If that works, great and we can 1262 * return success. Otherwise, if we haven't been given a 1263 * valid mailbox or the RESET command failed, fall back to 1264 * hitting the chip with a hammer. 1265 */ 1266 if (mbox <= PCIE_FW_MASTER_MASK) { 1267 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1268 msleep(100); 1269 if (csio_do_reset(hw, true) == 0) 1270 return 0; 1271 } 1272 1273 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1274 msleep(2000); 1275 } else { 1276 int ms; 1277 1278 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1279 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1280 if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT)) 1281 return 0; 1282 msleep(100); 1283 ms += 100; 1284 } 1285 return -ETIMEDOUT; 1286 } 1287 return 0; 1288 } 1289 1290 /* 1291 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1292 * @hw: the HW module 1293 * @mbox: mailbox to use for the FW RESET command (if desired) 1294 * @fw_data: the firmware image to write 1295 * @size: image size 1296 * @force: force upgrade even if firmware doesn't cooperate 1297 * 1298 * Perform all of the steps necessary for upgrading an adapter's 1299 * firmware image. Normally this requires the cooperation of the 1300 * existing firmware in order to halt all existing activities 1301 * but if an invalid mailbox token is passed in we skip that step 1302 * (though we'll still put the adapter microprocessor into RESET in 1303 * that case). 1304 * 1305 * On successful return the new firmware will have been loaded and 1306 * the adapter will have been fully RESET losing all previous setup 1307 * state. On unsuccessful return the adapter may be completely hosed ... 1308 * positive errno indicates that the adapter is ~probably~ intact, a 1309 * negative errno indicates that things are looking bad ... 1310 */ 1311 static int 1312 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1313 const u8 *fw_data, uint32_t size, int32_t force) 1314 { 1315 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1316 int reset, ret; 1317 1318 ret = csio_hw_fw_halt(hw, mbox, force); 1319 if (ret != 0 && !force) 1320 return ret; 1321 1322 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1323 if (ret != 0) 1324 return ret; 1325 1326 /* 1327 * Older versions of the firmware don't understand the new 1328 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1329 * restart. So for newly loaded older firmware we'll have to do the 1330 * RESET for it so it starts up on a clean slate. We can tell if 1331 * the newly loaded firmware will handle this right by checking 1332 * its header flags to see if it advertises the capability. 1333 */ 1334 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1335 return csio_hw_fw_restart(hw, mbox, reset); 1336 } 1337 1338 1339 /* 1340 * csio_hw_fw_config_file - setup an adapter via a Configuration File 1341 * @hw: the HW module 1342 * @mbox: mailbox to use for the FW command 1343 * @mtype: the memory type where the Configuration File is located 1344 * @maddr: the memory address where the Configuration File is located 1345 * @finiver: return value for CF [fini] version 1346 * @finicsum: return value for CF [fini] checksum 1347 * @cfcsum: return value for CF computed checksum 1348 * 1349 * Issue a command to get the firmware to process the Configuration 1350 * File located at the specified mtype/maddress. If the Configuration 1351 * File is processed successfully and return value pointers are 1352 * provided, the Configuration File "[fini] section version and 1353 * checksum values will be returned along with the computed checksum. 1354 * It's up to the caller to decide how it wants to respond to the 1355 * checksums not matching but it recommended that a prominant warning 1356 * be emitted in order to help people rapidly identify changed or 1357 * corrupted Configuration Files. 1358 * 1359 * Also note that it's possible to modify things like "niccaps", 1360 * "toecaps",etc. between processing the Configuration File and telling 1361 * the firmware to use the new configuration. Callers which want to 1362 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for 1363 * Configuration Files if they want to do this. 1364 */ 1365 static int 1366 csio_hw_fw_config_file(struct csio_hw *hw, 1367 unsigned int mtype, unsigned int maddr, 1368 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum) 1369 { 1370 struct csio_mb *mbp; 1371 struct fw_caps_config_cmd *caps_cmd; 1372 int rv = -EINVAL; 1373 enum fw_retval ret; 1374 1375 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1376 if (!mbp) { 1377 CSIO_INC_STATS(hw, n_err_nomem); 1378 return -ENOMEM; 1379 } 1380 /* 1381 * Tell the firmware to process the indicated Configuration File. 1382 * If there are no errors and the caller has provided return value 1383 * pointers for the [fini] section version, checksum and computed 1384 * checksum, pass those back to the caller. 1385 */ 1386 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1387 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1388 caps_cmd->op_to_write = 1389 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1390 FW_CMD_REQUEST | 1391 FW_CMD_READ); 1392 caps_cmd->cfvalid_to_len16 = 1393 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 1394 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1395 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 1396 FW_LEN16(*caps_cmd)); 1397 1398 if (csio_mb_issue(hw, mbp)) { 1399 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1400 goto out; 1401 } 1402 1403 ret = csio_mb_fw_retval(mbp); 1404 if (ret != FW_SUCCESS) { 1405 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1406 goto out; 1407 } 1408 1409 if (finiver) 1410 *finiver = ntohl(caps_cmd->finiver); 1411 if (finicsum) 1412 *finicsum = ntohl(caps_cmd->finicsum); 1413 if (cfcsum) 1414 *cfcsum = ntohl(caps_cmd->cfcsum); 1415 1416 /* Validate device capabilities */ 1417 if (csio_hw_validate_caps(hw, mbp)) { 1418 rv = -ENOENT; 1419 goto out; 1420 } 1421 1422 /* 1423 * And now tell the firmware to use the configuration we just loaded. 1424 */ 1425 caps_cmd->op_to_write = 1426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1427 FW_CMD_REQUEST | 1428 FW_CMD_WRITE); 1429 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1430 1431 if (csio_mb_issue(hw, mbp)) { 1432 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1433 goto out; 1434 } 1435 1436 ret = csio_mb_fw_retval(mbp); 1437 if (ret != FW_SUCCESS) { 1438 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1439 goto out; 1440 } 1441 1442 rv = 0; 1443 out: 1444 mempool_free(mbp, hw->mb_mempool); 1445 return rv; 1446 } 1447 1448 /* 1449 * csio_get_device_params - Get device parameters. 1450 * @hw: HW module 1451 * 1452 */ 1453 static int 1454 csio_get_device_params(struct csio_hw *hw) 1455 { 1456 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1457 struct csio_mb *mbp; 1458 enum fw_retval retval; 1459 u32 param[6]; 1460 int i, j = 0; 1461 1462 /* Initialize portids to -1 */ 1463 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1464 hw->pport[i].portid = -1; 1465 1466 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1467 if (!mbp) { 1468 CSIO_INC_STATS(hw, n_err_nomem); 1469 return -ENOMEM; 1470 } 1471 1472 /* Get port vec information. */ 1473 param[0] = FW_PARAM_DEV(PORTVEC); 1474 1475 /* Get Core clock. */ 1476 param[1] = FW_PARAM_DEV(CCLK); 1477 1478 /* Get EQ id start and end. */ 1479 param[2] = FW_PARAM_PFVF(EQ_START); 1480 param[3] = FW_PARAM_PFVF(EQ_END); 1481 1482 /* Get IQ id start and end. */ 1483 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1484 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1485 1486 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1487 ARRAY_SIZE(param), param, NULL, false, NULL); 1488 if (csio_mb_issue(hw, mbp)) { 1489 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1490 mempool_free(mbp, hw->mb_mempool); 1491 return -EINVAL; 1492 } 1493 1494 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1495 ARRAY_SIZE(param), param); 1496 if (retval != FW_SUCCESS) { 1497 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1498 retval); 1499 mempool_free(mbp, hw->mb_mempool); 1500 return -EINVAL; 1501 } 1502 1503 /* cache the information. */ 1504 hw->port_vec = param[0]; 1505 hw->vpd.cclk = param[1]; 1506 wrm->fw_eq_start = param[2]; 1507 wrm->fw_iq_start = param[4]; 1508 1509 /* Using FW configured max iqs & eqs */ 1510 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1511 !csio_is_hw_master(hw)) { 1512 hw->cfg_niq = param[5] - param[4] + 1; 1513 hw->cfg_neq = param[3] - param[2] + 1; 1514 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1515 hw->cfg_niq, hw->cfg_neq); 1516 } 1517 1518 hw->port_vec &= csio_port_mask; 1519 1520 hw->num_pports = hweight32(hw->port_vec); 1521 1522 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1523 hw->port_vec, hw->num_pports); 1524 1525 for (i = 0; i < hw->num_pports; i++) { 1526 while ((hw->port_vec & (1 << j)) == 0) 1527 j++; 1528 hw->pport[i].portid = j++; 1529 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1530 } 1531 mempool_free(mbp, hw->mb_mempool); 1532 1533 return 0; 1534 } 1535 1536 1537 /* 1538 * csio_config_device_caps - Get and set device capabilities. 1539 * @hw: HW module 1540 * 1541 */ 1542 static int 1543 csio_config_device_caps(struct csio_hw *hw) 1544 { 1545 struct csio_mb *mbp; 1546 enum fw_retval retval; 1547 int rv = -EINVAL; 1548 1549 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1550 if (!mbp) { 1551 CSIO_INC_STATS(hw, n_err_nomem); 1552 return -ENOMEM; 1553 } 1554 1555 /* Get device capabilities */ 1556 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1557 1558 if (csio_mb_issue(hw, mbp)) { 1559 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1560 goto out; 1561 } 1562 1563 retval = csio_mb_fw_retval(mbp); 1564 if (retval != FW_SUCCESS) { 1565 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1566 goto out; 1567 } 1568 1569 /* Validate device capabilities */ 1570 if (csio_hw_validate_caps(hw, mbp)) 1571 goto out; 1572 1573 /* Don't config device capabilities if already configured */ 1574 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1575 rv = 0; 1576 goto out; 1577 } 1578 1579 /* Write back desired device capabilities */ 1580 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1581 false, true, NULL); 1582 1583 if (csio_mb_issue(hw, mbp)) { 1584 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1585 goto out; 1586 } 1587 1588 retval = csio_mb_fw_retval(mbp); 1589 if (retval != FW_SUCCESS) { 1590 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1591 goto out; 1592 } 1593 1594 rv = 0; 1595 out: 1596 mempool_free(mbp, hw->mb_mempool); 1597 return rv; 1598 } 1599 1600 static int 1601 csio_config_global_rss(struct csio_hw *hw) 1602 { 1603 struct csio_mb *mbp; 1604 enum fw_retval retval; 1605 1606 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1607 if (!mbp) { 1608 CSIO_INC_STATS(hw, n_err_nomem); 1609 return -ENOMEM; 1610 } 1611 1612 csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 1613 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 1614 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 1615 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 1616 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP, 1617 NULL); 1618 1619 if (csio_mb_issue(hw, mbp)) { 1620 csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n"); 1621 mempool_free(mbp, hw->mb_mempool); 1622 return -EINVAL; 1623 } 1624 1625 retval = csio_mb_fw_retval(mbp); 1626 if (retval != FW_SUCCESS) { 1627 csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval); 1628 mempool_free(mbp, hw->mb_mempool); 1629 return -EINVAL; 1630 } 1631 1632 mempool_free(mbp, hw->mb_mempool); 1633 1634 return 0; 1635 } 1636 1637 /* 1638 * csio_config_pfvf - Configure Physical/Virtual functions settings. 1639 * @hw: HW module 1640 * 1641 */ 1642 static int 1643 csio_config_pfvf(struct csio_hw *hw) 1644 { 1645 struct csio_mb *mbp; 1646 enum fw_retval retval; 1647 1648 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1649 if (!mbp) { 1650 CSIO_INC_STATS(hw, n_err_nomem); 1651 return -ENOMEM; 1652 } 1653 1654 /* 1655 * For now, allow all PFs to access to all ports using a pmask 1656 * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will 1657 * need to provide access based on some rule. 1658 */ 1659 csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ, 1660 CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK, 1661 CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL); 1662 1663 if (csio_mb_issue(hw, mbp)) { 1664 csio_err(hw, "Issue of FW_PFVF_CMD failed!\n"); 1665 mempool_free(mbp, hw->mb_mempool); 1666 return -EINVAL; 1667 } 1668 1669 retval = csio_mb_fw_retval(mbp); 1670 if (retval != FW_SUCCESS) { 1671 csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval); 1672 mempool_free(mbp, hw->mb_mempool); 1673 return -EINVAL; 1674 } 1675 1676 mempool_free(mbp, hw->mb_mempool); 1677 1678 return 0; 1679 } 1680 1681 /* 1682 * csio_enable_ports - Bring up all available ports. 1683 * @hw: HW module. 1684 * 1685 */ 1686 static int 1687 csio_enable_ports(struct csio_hw *hw) 1688 { 1689 struct csio_mb *mbp; 1690 enum fw_retval retval; 1691 uint8_t portid; 1692 int i; 1693 1694 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1695 if (!mbp) { 1696 CSIO_INC_STATS(hw, n_err_nomem); 1697 return -ENOMEM; 1698 } 1699 1700 for (i = 0; i < hw->num_pports; i++) { 1701 portid = hw->pport[i].portid; 1702 1703 /* Read PORT information */ 1704 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1705 false, 0, 0, NULL); 1706 1707 if (csio_mb_issue(hw, mbp)) { 1708 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1709 portid); 1710 mempool_free(mbp, hw->mb_mempool); 1711 return -EINVAL; 1712 } 1713 1714 csio_mb_process_read_port_rsp(hw, mbp, &retval, 1715 &hw->pport[i].pcap); 1716 if (retval != FW_SUCCESS) { 1717 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1718 portid, retval); 1719 mempool_free(mbp, hw->mb_mempool); 1720 return -EINVAL; 1721 } 1722 1723 /* Write back PORT information */ 1724 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, 1725 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); 1726 1727 if (csio_mb_issue(hw, mbp)) { 1728 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1729 portid); 1730 mempool_free(mbp, hw->mb_mempool); 1731 return -EINVAL; 1732 } 1733 1734 retval = csio_mb_fw_retval(mbp); 1735 if (retval != FW_SUCCESS) { 1736 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1737 portid, retval); 1738 mempool_free(mbp, hw->mb_mempool); 1739 return -EINVAL; 1740 } 1741 1742 } /* For all ports */ 1743 1744 mempool_free(mbp, hw->mb_mempool); 1745 1746 return 0; 1747 } 1748 1749 /* 1750 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1751 * @hw: HW module 1752 * Issued with lock held. 1753 */ 1754 static int 1755 csio_get_fcoe_resinfo(struct csio_hw *hw) 1756 { 1757 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1758 struct fw_fcoe_res_info_cmd *rsp; 1759 struct csio_mb *mbp; 1760 enum fw_retval retval; 1761 1762 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1763 if (!mbp) { 1764 CSIO_INC_STATS(hw, n_err_nomem); 1765 return -ENOMEM; 1766 } 1767 1768 /* Get FCoE FW resource information */ 1769 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1770 1771 if (csio_mb_issue(hw, mbp)) { 1772 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1773 mempool_free(mbp, hw->mb_mempool); 1774 return -EINVAL; 1775 } 1776 1777 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1778 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 1779 if (retval != FW_SUCCESS) { 1780 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1781 retval); 1782 mempool_free(mbp, hw->mb_mempool); 1783 return -EINVAL; 1784 } 1785 1786 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1787 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1788 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1789 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1790 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1791 res_info->max_ssns = ntohl(rsp->max_ssns); 1792 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1793 res_info->used_ssns = ntohl(rsp->used_ssns); 1794 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1795 res_info->max_vnps = ntohl(rsp->max_vnps); 1796 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1797 res_info->used_vnps = ntohl(rsp->used_vnps); 1798 1799 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1800 res_info->max_xchgs); 1801 mempool_free(mbp, hw->mb_mempool); 1802 1803 return 0; 1804 } 1805 1806 static int 1807 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1808 { 1809 struct csio_mb *mbp; 1810 enum fw_retval retval; 1811 u32 _param[1]; 1812 1813 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1814 if (!mbp) { 1815 CSIO_INC_STATS(hw, n_err_nomem); 1816 return -ENOMEM; 1817 } 1818 1819 /* 1820 * Find out whether we're dealing with a version of 1821 * the firmware which has configuration file support. 1822 */ 1823 _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1824 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 1825 1826 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1827 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1828 if (csio_mb_issue(hw, mbp)) { 1829 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1830 mempool_free(mbp, hw->mb_mempool); 1831 return -EINVAL; 1832 } 1833 1834 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1835 ARRAY_SIZE(_param), _param); 1836 if (retval != FW_SUCCESS) { 1837 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1838 retval); 1839 mempool_free(mbp, hw->mb_mempool); 1840 return -EINVAL; 1841 } 1842 1843 mempool_free(mbp, hw->mb_mempool); 1844 *param = _param[0]; 1845 1846 return 0; 1847 } 1848 1849 static int 1850 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1851 { 1852 int ret = 0; 1853 const struct firmware *cf; 1854 struct pci_dev *pci_dev = hw->pdev; 1855 struct device *dev = &pci_dev->dev; 1856 unsigned int mtype = 0, maddr = 0; 1857 uint32_t *cfg_data; 1858 int value_to_add = 0; 1859 1860 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) { 1861 csio_err(hw, "could not find config file %s, err: %d\n", 1862 CSIO_CF_FNAME(hw), ret); 1863 return -ENOENT; 1864 } 1865 1866 if (cf->size%4 != 0) 1867 value_to_add = 4 - (cf->size % 4); 1868 1869 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1870 if (cfg_data == NULL) { 1871 ret = -ENOMEM; 1872 goto leave; 1873 } 1874 1875 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1876 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1877 ret = -EINVAL; 1878 goto leave; 1879 } 1880 1881 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 1882 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 1883 1884 ret = csio_memory_write(hw, mtype, maddr, 1885 cf->size + value_to_add, cfg_data); 1886 1887 if ((ret == 0) && (value_to_add != 0)) { 1888 union { 1889 u32 word; 1890 char buf[4]; 1891 } last; 1892 size_t size = cf->size & ~0x3; 1893 int i; 1894 1895 last.word = cfg_data[size >> 2]; 1896 for (i = value_to_add; i < 4; i++) 1897 last.buf[i] = 0; 1898 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1899 } 1900 if (ret == 0) { 1901 csio_info(hw, "config file upgraded to %s\n", 1902 CSIO_CF_FNAME(hw)); 1903 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw)); 1904 } 1905 1906 leave: 1907 kfree(cfg_data); 1908 release_firmware(cf); 1909 return ret; 1910 } 1911 1912 /* 1913 * HW initialization: contact FW, obtain config, perform basic init. 1914 * 1915 * If the firmware we're dealing with has Configuration File support, then 1916 * we use that to perform all configuration -- either using the configuration 1917 * file stored in flash on the adapter or using a filesystem-local file 1918 * if available. 1919 * 1920 * If we don't have configuration file support in the firmware, then we'll 1921 * have to set things up the old fashioned way with hard-coded register 1922 * writes and firmware commands ... 1923 */ 1924 1925 /* 1926 * Attempt to initialize the HW via a Firmware Configuration File. 1927 */ 1928 static int 1929 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 1930 { 1931 unsigned int mtype, maddr; 1932 int rv; 1933 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 1934 int using_flash; 1935 char path[64]; 1936 1937 /* 1938 * Reset device if necessary 1939 */ 1940 if (reset) { 1941 rv = csio_do_reset(hw, true); 1942 if (rv != 0) 1943 goto bye; 1944 } 1945 1946 /* 1947 * If we have a configuration file in host , 1948 * then use that. Otherwise, use the configuration file stored 1949 * in the HW flash ... 1950 */ 1951 spin_unlock_irq(&hw->lock); 1952 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 1953 spin_lock_irq(&hw->lock); 1954 if (rv != 0) { 1955 if (rv == -ENOENT) { 1956 /* 1957 * config file was not found. Use default 1958 * config file from flash. 1959 */ 1960 mtype = FW_MEMTYPE_CF_FLASH; 1961 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 1962 using_flash = 1; 1963 } else { 1964 /* 1965 * we revert back to the hardwired config if 1966 * flashing failed. 1967 */ 1968 goto bye; 1969 } 1970 } else { 1971 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 1972 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 1973 using_flash = 0; 1974 } 1975 1976 hw->cfg_store = (uint8_t)mtype; 1977 1978 /* 1979 * Issue a Capability Configuration command to the firmware to get it 1980 * to parse the Configuration File. 1981 */ 1982 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver, 1983 &finicsum, &cfcsum); 1984 if (rv != 0) 1985 goto bye; 1986 1987 hw->cfg_finiver = finiver; 1988 hw->cfg_finicsum = finicsum; 1989 hw->cfg_cfcsum = cfcsum; 1990 hw->cfg_csum_status = true; 1991 1992 if (finicsum != cfcsum) { 1993 csio_warn(hw, 1994 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1995 finicsum, cfcsum); 1996 1997 hw->cfg_csum_status = false; 1998 } 1999 2000 /* 2001 * Note that we're operating with parameters 2002 * not supplied by the driver, rather than from hard-wired 2003 * initialization constants buried in the driver. 2004 */ 2005 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2006 2007 /* device parameters */ 2008 rv = csio_get_device_params(hw); 2009 if (rv != 0) 2010 goto bye; 2011 2012 /* Configure SGE */ 2013 csio_wr_sge_init(hw); 2014 2015 /* 2016 * And finally tell the firmware to initialize itself using the 2017 * parameters from the Configuration File. 2018 */ 2019 /* Post event to notify completion of configuration */ 2020 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2021 2022 csio_info(hw, 2023 "Firmware Configuration File %s, version %#x, computed checksum %#x\n", 2024 (using_flash ? "in device FLASH" : path), finiver, cfcsum); 2025 2026 return 0; 2027 2028 /* 2029 * Something bad happened. Return the error ... 2030 */ 2031 bye: 2032 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 2033 csio_dbg(hw, "Configuration file error %d\n", rv); 2034 return rv; 2035 } 2036 2037 /* 2038 * Attempt to initialize the adapter via hard-coded, driver supplied 2039 * parameters ... 2040 */ 2041 static int 2042 csio_hw_no_fwconfig(struct csio_hw *hw, int reset) 2043 { 2044 int rv; 2045 /* 2046 * Reset device if necessary 2047 */ 2048 if (reset) { 2049 rv = csio_do_reset(hw, true); 2050 if (rv != 0) 2051 goto out; 2052 } 2053 2054 /* Get and set device capabilities */ 2055 rv = csio_config_device_caps(hw); 2056 if (rv != 0) 2057 goto out; 2058 2059 /* Config Global RSS command */ 2060 rv = csio_config_global_rss(hw); 2061 if (rv != 0) 2062 goto out; 2063 2064 /* Configure PF/VF capabilities of device */ 2065 rv = csio_config_pfvf(hw); 2066 if (rv != 0) 2067 goto out; 2068 2069 /* device parameters */ 2070 rv = csio_get_device_params(hw); 2071 if (rv != 0) 2072 goto out; 2073 2074 /* Configure SGE */ 2075 csio_wr_sge_init(hw); 2076 2077 /* Post event to notify completion of configuration */ 2078 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2079 2080 out: 2081 return rv; 2082 } 2083 2084 /* 2085 * Returns -EINVAL if attempts to flash the firmware failed 2086 * else returns 0, 2087 * if flashing was not attempted because the card had the 2088 * latest firmware ECANCELED is returned 2089 */ 2090 static int 2091 csio_hw_flash_fw(struct csio_hw *hw) 2092 { 2093 int ret = -ECANCELED; 2094 const struct firmware *fw; 2095 const struct fw_hdr *hdr; 2096 u32 fw_ver; 2097 struct pci_dev *pci_dev = hw->pdev; 2098 struct device *dev = &pci_dev->dev ; 2099 2100 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) { 2101 csio_err(hw, "could not find firmware image %s, err: %d\n", 2102 CSIO_FW_FNAME(hw), ret); 2103 return -EINVAL; 2104 } 2105 2106 hdr = (const struct fw_hdr *)fw->data; 2107 fw_ver = ntohl(hdr->fw_ver); 2108 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw)) 2109 return -EINVAL; /* wrong major version, won't do */ 2110 2111 /* 2112 * If the flash FW is unusable or we found something newer, load it. 2113 */ 2114 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) || 2115 fw_ver > hw->fwrev) { 2116 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2117 /*force=*/false); 2118 if (!ret) 2119 csio_info(hw, 2120 "firmware upgraded to version %pI4 from %s\n", 2121 &hdr->fw_ver, CSIO_FW_FNAME(hw)); 2122 else 2123 csio_err(hw, "firmware upgrade failed! err=%d\n", ret); 2124 } else 2125 ret = -EINVAL; 2126 2127 release_firmware(fw); 2128 2129 return ret; 2130 } 2131 2132 2133 /* 2134 * csio_hw_configure - Configure HW 2135 * @hw - HW module 2136 * 2137 */ 2138 static void 2139 csio_hw_configure(struct csio_hw *hw) 2140 { 2141 int reset = 1; 2142 int rv; 2143 u32 param[1]; 2144 2145 rv = csio_hw_dev_ready(hw); 2146 if (rv != 0) { 2147 CSIO_INC_STATS(hw, n_err_fatal); 2148 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2149 goto out; 2150 } 2151 2152 /* HW version */ 2153 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV); 2154 2155 /* Needed for FW download */ 2156 rv = csio_hw_get_flash_params(hw); 2157 if (rv != 0) { 2158 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2159 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2160 goto out; 2161 } 2162 2163 /* Set pci completion timeout value to 4 seconds. */ 2164 csio_set_pcie_completion_timeout(hw, 0xd); 2165 2166 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2167 2168 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2169 if (rv != 0) 2170 goto out; 2171 2172 csio_hw_print_fw_version(hw, "Firmware revision"); 2173 2174 rv = csio_do_hello(hw, &hw->fw_state); 2175 if (rv != 0) { 2176 CSIO_INC_STATS(hw, n_err_fatal); 2177 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2178 goto out; 2179 } 2180 2181 /* Read vpd */ 2182 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2183 if (rv != 0) 2184 goto out; 2185 2186 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2187 rv = csio_hw_check_fw_version(hw); 2188 if (rv == -EINVAL) { 2189 2190 /* Do firmware update */ 2191 spin_unlock_irq(&hw->lock); 2192 rv = csio_hw_flash_fw(hw); 2193 spin_lock_irq(&hw->lock); 2194 2195 if (rv == 0) { 2196 reset = 0; 2197 /* 2198 * Note that the chip was reset as part of the 2199 * firmware upgrade so we don't reset it again 2200 * below and grab the new firmware version. 2201 */ 2202 rv = csio_hw_check_fw_version(hw); 2203 } 2204 } 2205 /* 2206 * If the firmware doesn't support Configuration 2207 * Files, use the old Driver-based, hard-wired 2208 * initialization. Otherwise, try using the 2209 * Configuration File support and fall back to the 2210 * Driver-based initialization if there's no 2211 * Configuration File found. 2212 */ 2213 if (csio_hw_check_fwconfig(hw, param) == 0) { 2214 rv = csio_hw_use_fwconfig(hw, reset, param); 2215 if (rv == -ENOENT) 2216 goto out; 2217 if (rv != 0) { 2218 csio_info(hw, 2219 "No Configuration File present " 2220 "on adapter. Using hard-wired " 2221 "configuration parameters.\n"); 2222 rv = csio_hw_no_fwconfig(hw, reset); 2223 } 2224 } else { 2225 rv = csio_hw_no_fwconfig(hw, reset); 2226 } 2227 2228 if (rv != 0) 2229 goto out; 2230 2231 } else { 2232 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2233 2234 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2235 2236 /* device parameters */ 2237 rv = csio_get_device_params(hw); 2238 if (rv != 0) 2239 goto out; 2240 2241 /* Get device capabilities */ 2242 rv = csio_config_device_caps(hw); 2243 if (rv != 0) 2244 goto out; 2245 2246 /* Configure SGE */ 2247 csio_wr_sge_init(hw); 2248 2249 /* Post event to notify completion of configuration */ 2250 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2251 goto out; 2252 } 2253 } /* if not master */ 2254 2255 out: 2256 return; 2257 } 2258 2259 /* 2260 * csio_hw_initialize - Initialize HW 2261 * @hw - HW module 2262 * 2263 */ 2264 static void 2265 csio_hw_initialize(struct csio_hw *hw) 2266 { 2267 struct csio_mb *mbp; 2268 enum fw_retval retval; 2269 int rv; 2270 int i; 2271 2272 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2273 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2274 if (!mbp) 2275 goto out; 2276 2277 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2278 2279 if (csio_mb_issue(hw, mbp)) { 2280 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2281 goto free_and_out; 2282 } 2283 2284 retval = csio_mb_fw_retval(mbp); 2285 if (retval != FW_SUCCESS) { 2286 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2287 retval); 2288 goto free_and_out; 2289 } 2290 2291 mempool_free(mbp, hw->mb_mempool); 2292 } 2293 2294 rv = csio_get_fcoe_resinfo(hw); 2295 if (rv != 0) { 2296 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2297 goto out; 2298 } 2299 2300 spin_unlock_irq(&hw->lock); 2301 rv = csio_config_queues(hw); 2302 spin_lock_irq(&hw->lock); 2303 2304 if (rv != 0) { 2305 csio_err(hw, "Config of queues failed!: %d\n", rv); 2306 goto out; 2307 } 2308 2309 for (i = 0; i < hw->num_pports; i++) 2310 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2311 2312 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2313 rv = csio_enable_ports(hw); 2314 if (rv != 0) { 2315 csio_err(hw, "Failed to enable ports: %d\n", rv); 2316 goto out; 2317 } 2318 } 2319 2320 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2321 return; 2322 2323 free_and_out: 2324 mempool_free(mbp, hw->mb_mempool); 2325 out: 2326 return; 2327 } 2328 2329 #define PF_INTR_MASK (PFSW | PFCIM) 2330 2331 /* 2332 * csio_hw_intr_enable - Enable HW interrupts 2333 * @hw: Pointer to HW module. 2334 * 2335 * Enable interrupts in HW registers. 2336 */ 2337 static void 2338 csio_hw_intr_enable(struct csio_hw *hw) 2339 { 2340 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2341 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2342 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE); 2343 2344 /* 2345 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2346 * by FW, so do nothing for INTX. 2347 */ 2348 if (hw->intr_mode == CSIO_IM_MSIX) 2349 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2350 AIVEC(AIVEC_MASK), vec); 2351 else if (hw->intr_mode == CSIO_IM_MSI) 2352 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2353 AIVEC(AIVEC_MASK), 0); 2354 2355 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE)); 2356 2357 /* Turn on MB interrupts - this will internally flush PIO as well */ 2358 csio_mb_intr_enable(hw); 2359 2360 /* These are common registers - only a master can modify them */ 2361 if (csio_is_hw_master(hw)) { 2362 /* 2363 * Disable the Serial FLASH interrupt, if enabled! 2364 */ 2365 pl &= (~SF); 2366 csio_wr_reg32(hw, pl, PL_INT_ENABLE); 2367 2368 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | 2369 EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | 2370 ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | 2371 ERR_DATA_CPL_ON_HIGH_QID1 | 2372 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2373 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2374 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2375 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, 2376 SGE_INT_ENABLE3); 2377 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); 2378 } 2379 2380 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2381 2382 } 2383 2384 /* 2385 * csio_hw_intr_disable - Disable HW interrupts 2386 * @hw: Pointer to HW module. 2387 * 2388 * Turn off Mailbox and PCI_PF_CFG interrupts. 2389 */ 2390 void 2391 csio_hw_intr_disable(struct csio_hw *hw) 2392 { 2393 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2394 2395 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2396 return; 2397 2398 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2399 2400 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE)); 2401 if (csio_is_hw_master(hw)) 2402 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0); 2403 2404 /* Turn off MB interrupts */ 2405 csio_mb_intr_disable(hw); 2406 2407 } 2408 2409 void 2410 csio_hw_fatal_err(struct csio_hw *hw) 2411 { 2412 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); 2413 csio_hw_intr_disable(hw); 2414 2415 /* Do not reset HW, we may need FW state for debugging */ 2416 csio_fatal(hw, "HW Fatal error encountered!\n"); 2417 } 2418 2419 /*****************************************************************************/ 2420 /* START: HW SM */ 2421 /*****************************************************************************/ 2422 /* 2423 * csio_hws_uninit - Uninit state 2424 * @hw - HW module 2425 * @evt - Event 2426 * 2427 */ 2428 static void 2429 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2430 { 2431 hw->prev_evt = hw->cur_evt; 2432 hw->cur_evt = evt; 2433 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2434 2435 switch (evt) { 2436 case CSIO_HWE_CFG: 2437 csio_set_state(&hw->sm, csio_hws_configuring); 2438 csio_hw_configure(hw); 2439 break; 2440 2441 default: 2442 CSIO_INC_STATS(hw, n_evt_unexp); 2443 break; 2444 } 2445 } 2446 2447 /* 2448 * csio_hws_configuring - Configuring state 2449 * @hw - HW module 2450 * @evt - Event 2451 * 2452 */ 2453 static void 2454 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2455 { 2456 hw->prev_evt = hw->cur_evt; 2457 hw->cur_evt = evt; 2458 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2459 2460 switch (evt) { 2461 case CSIO_HWE_INIT: 2462 csio_set_state(&hw->sm, csio_hws_initializing); 2463 csio_hw_initialize(hw); 2464 break; 2465 2466 case CSIO_HWE_INIT_DONE: 2467 csio_set_state(&hw->sm, csio_hws_ready); 2468 /* Fan out event to all lnode SMs */ 2469 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2470 break; 2471 2472 case CSIO_HWE_FATAL: 2473 csio_set_state(&hw->sm, csio_hws_uninit); 2474 break; 2475 2476 case CSIO_HWE_PCI_REMOVE: 2477 csio_do_bye(hw); 2478 break; 2479 default: 2480 CSIO_INC_STATS(hw, n_evt_unexp); 2481 break; 2482 } 2483 } 2484 2485 /* 2486 * csio_hws_initializing - Initialiazing state 2487 * @hw - HW module 2488 * @evt - Event 2489 * 2490 */ 2491 static void 2492 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2493 { 2494 hw->prev_evt = hw->cur_evt; 2495 hw->cur_evt = evt; 2496 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2497 2498 switch (evt) { 2499 case CSIO_HWE_INIT_DONE: 2500 csio_set_state(&hw->sm, csio_hws_ready); 2501 2502 /* Fan out event to all lnode SMs */ 2503 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2504 2505 /* Enable interrupts */ 2506 csio_hw_intr_enable(hw); 2507 break; 2508 2509 case CSIO_HWE_FATAL: 2510 csio_set_state(&hw->sm, csio_hws_uninit); 2511 break; 2512 2513 case CSIO_HWE_PCI_REMOVE: 2514 csio_do_bye(hw); 2515 break; 2516 2517 default: 2518 CSIO_INC_STATS(hw, n_evt_unexp); 2519 break; 2520 } 2521 } 2522 2523 /* 2524 * csio_hws_ready - Ready state 2525 * @hw - HW module 2526 * @evt - Event 2527 * 2528 */ 2529 static void 2530 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2531 { 2532 /* Remember the event */ 2533 hw->evtflag = evt; 2534 2535 hw->prev_evt = hw->cur_evt; 2536 hw->cur_evt = evt; 2537 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2538 2539 switch (evt) { 2540 case CSIO_HWE_HBA_RESET: 2541 case CSIO_HWE_FW_DLOAD: 2542 case CSIO_HWE_SUSPEND: 2543 case CSIO_HWE_PCI_REMOVE: 2544 case CSIO_HWE_PCIERR_DETECTED: 2545 csio_set_state(&hw->sm, csio_hws_quiescing); 2546 /* cleanup all outstanding cmds */ 2547 if (evt == CSIO_HWE_HBA_RESET || 2548 evt == CSIO_HWE_PCIERR_DETECTED) 2549 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2550 else 2551 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2552 2553 csio_hw_intr_disable(hw); 2554 csio_hw_mbm_cleanup(hw); 2555 csio_evtq_stop(hw); 2556 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2557 csio_evtq_flush(hw); 2558 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2559 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2560 break; 2561 2562 case CSIO_HWE_FATAL: 2563 csio_set_state(&hw->sm, csio_hws_uninit); 2564 break; 2565 2566 default: 2567 CSIO_INC_STATS(hw, n_evt_unexp); 2568 break; 2569 } 2570 } 2571 2572 /* 2573 * csio_hws_quiescing - Quiescing state 2574 * @hw - HW module 2575 * @evt - Event 2576 * 2577 */ 2578 static void 2579 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2580 { 2581 hw->prev_evt = hw->cur_evt; 2582 hw->cur_evt = evt; 2583 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2584 2585 switch (evt) { 2586 case CSIO_HWE_QUIESCED: 2587 switch (hw->evtflag) { 2588 case CSIO_HWE_FW_DLOAD: 2589 csio_set_state(&hw->sm, csio_hws_resetting); 2590 /* Download firmware */ 2591 /* Fall through */ 2592 2593 case CSIO_HWE_HBA_RESET: 2594 csio_set_state(&hw->sm, csio_hws_resetting); 2595 /* Start reset of the HBA */ 2596 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2597 csio_wr_destroy_queues(hw, false); 2598 csio_do_reset(hw, false); 2599 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2600 break; 2601 2602 case CSIO_HWE_PCI_REMOVE: 2603 csio_set_state(&hw->sm, csio_hws_removing); 2604 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2605 csio_wr_destroy_queues(hw, true); 2606 /* Now send the bye command */ 2607 csio_do_bye(hw); 2608 break; 2609 2610 case CSIO_HWE_SUSPEND: 2611 csio_set_state(&hw->sm, csio_hws_quiesced); 2612 break; 2613 2614 case CSIO_HWE_PCIERR_DETECTED: 2615 csio_set_state(&hw->sm, csio_hws_pcierr); 2616 csio_wr_destroy_queues(hw, false); 2617 break; 2618 2619 default: 2620 CSIO_INC_STATS(hw, n_evt_unexp); 2621 break; 2622 2623 } 2624 break; 2625 2626 default: 2627 CSIO_INC_STATS(hw, n_evt_unexp); 2628 break; 2629 } 2630 } 2631 2632 /* 2633 * csio_hws_quiesced - Quiesced state 2634 * @hw - HW module 2635 * @evt - Event 2636 * 2637 */ 2638 static void 2639 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2640 { 2641 hw->prev_evt = hw->cur_evt; 2642 hw->cur_evt = evt; 2643 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2644 2645 switch (evt) { 2646 case CSIO_HWE_RESUME: 2647 csio_set_state(&hw->sm, csio_hws_configuring); 2648 csio_hw_configure(hw); 2649 break; 2650 2651 default: 2652 CSIO_INC_STATS(hw, n_evt_unexp); 2653 break; 2654 } 2655 } 2656 2657 /* 2658 * csio_hws_resetting - HW Resetting state 2659 * @hw - HW module 2660 * @evt - Event 2661 * 2662 */ 2663 static void 2664 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2665 { 2666 hw->prev_evt = hw->cur_evt; 2667 hw->cur_evt = evt; 2668 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2669 2670 switch (evt) { 2671 case CSIO_HWE_HBA_RESET_DONE: 2672 csio_evtq_start(hw); 2673 csio_set_state(&hw->sm, csio_hws_configuring); 2674 csio_hw_configure(hw); 2675 break; 2676 2677 default: 2678 CSIO_INC_STATS(hw, n_evt_unexp); 2679 break; 2680 } 2681 } 2682 2683 /* 2684 * csio_hws_removing - PCI Hotplug removing state 2685 * @hw - HW module 2686 * @evt - Event 2687 * 2688 */ 2689 static void 2690 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2691 { 2692 hw->prev_evt = hw->cur_evt; 2693 hw->cur_evt = evt; 2694 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2695 2696 switch (evt) { 2697 case CSIO_HWE_HBA_RESET: 2698 if (!csio_is_hw_master(hw)) 2699 break; 2700 /* 2701 * The BYE should have alerady been issued, so we cant 2702 * use the mailbox interface. Hence we use the PL_RST 2703 * register directly. 2704 */ 2705 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2706 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 2707 mdelay(2000); 2708 break; 2709 2710 /* Should never receive any new events */ 2711 default: 2712 CSIO_INC_STATS(hw, n_evt_unexp); 2713 break; 2714 2715 } 2716 } 2717 2718 /* 2719 * csio_hws_pcierr - PCI Error state 2720 * @hw - HW module 2721 * @evt - Event 2722 * 2723 */ 2724 static void 2725 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2726 { 2727 hw->prev_evt = hw->cur_evt; 2728 hw->cur_evt = evt; 2729 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2730 2731 switch (evt) { 2732 case CSIO_HWE_PCIERR_SLOT_RESET: 2733 csio_evtq_start(hw); 2734 csio_set_state(&hw->sm, csio_hws_configuring); 2735 csio_hw_configure(hw); 2736 break; 2737 2738 default: 2739 CSIO_INC_STATS(hw, n_evt_unexp); 2740 break; 2741 } 2742 } 2743 2744 /*****************************************************************************/ 2745 /* END: HW SM */ 2746 /*****************************************************************************/ 2747 2748 /* 2749 * csio_handle_intr_status - table driven interrupt handler 2750 * @hw: HW instance 2751 * @reg: the interrupt status register to process 2752 * @acts: table of interrupt actions 2753 * 2754 * A table driven interrupt handler that applies a set of masks to an 2755 * interrupt status word and performs the corresponding actions if the 2756 * interrupts described by the mask have occured. The actions include 2757 * optionally emitting a warning or alert message. The table is terminated 2758 * by an entry specifying mask 0. Returns the number of fatal interrupt 2759 * conditions. 2760 */ 2761 int 2762 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2763 const struct intr_info *acts) 2764 { 2765 int fatal = 0; 2766 unsigned int mask = 0; 2767 unsigned int status = csio_rd_reg32(hw, reg); 2768 2769 for ( ; acts->mask; ++acts) { 2770 if (!(status & acts->mask)) 2771 continue; 2772 if (acts->fatal) { 2773 fatal++; 2774 csio_fatal(hw, "Fatal %s (0x%x)\n", 2775 acts->msg, status & acts->mask); 2776 } else if (acts->msg) 2777 csio_info(hw, "%s (0x%x)\n", 2778 acts->msg, status & acts->mask); 2779 mask |= acts->mask; 2780 } 2781 status &= mask; 2782 if (status) /* clear processed interrupts */ 2783 csio_wr_reg32(hw, status, reg); 2784 return fatal; 2785 } 2786 2787 /* 2788 * TP interrupt handler. 2789 */ 2790 static void csio_tp_intr_handler(struct csio_hw *hw) 2791 { 2792 static struct intr_info tp_intr_info[] = { 2793 { 0x3fffffff, "TP parity error", -1, 1 }, 2794 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 2795 { 0, NULL, 0, 0 } 2796 }; 2797 2798 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info)) 2799 csio_hw_fatal_err(hw); 2800 } 2801 2802 /* 2803 * SGE interrupt handler. 2804 */ 2805 static void csio_sge_intr_handler(struct csio_hw *hw) 2806 { 2807 uint64_t v; 2808 2809 static struct intr_info sge_intr_info[] = { 2810 { ERR_CPL_EXCEED_IQE_SIZE, 2811 "SGE received CPL exceeding IQE size", -1, 1 }, 2812 { ERR_INVALID_CIDX_INC, 2813 "SGE GTS CIDX increment too large", -1, 0 }, 2814 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 2815 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 2816 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 2817 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2818 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 2819 0 }, 2820 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 2821 0 }, 2822 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 2823 0 }, 2824 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 2825 0 }, 2826 { ERR_ING_CTXT_PRIO, 2827 "SGE too many priority ingress contexts", -1, 0 }, 2828 { ERR_EGR_CTXT_PRIO, 2829 "SGE too many priority egress contexts", -1, 0 }, 2830 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 2831 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 2832 { 0, NULL, 0, 0 } 2833 }; 2834 2835 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | 2836 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); 2837 if (v) { 2838 csio_fatal(hw, "SGE parity error (%#llx)\n", 2839 (unsigned long long)v); 2840 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 2841 SGE_INT_CAUSE1); 2842 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); 2843 } 2844 2845 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); 2846 2847 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || 2848 v != 0) 2849 csio_hw_fatal_err(hw); 2850 } 2851 2852 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\ 2853 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR) 2854 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\ 2855 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR) 2856 2857 /* 2858 * CIM interrupt handler. 2859 */ 2860 static void csio_cim_intr_handler(struct csio_hw *hw) 2861 { 2862 static struct intr_info cim_intr_info[] = { 2863 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 2864 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2865 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2866 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 2867 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 2868 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 2869 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 2870 { 0, NULL, 0, 0 } 2871 }; 2872 static struct intr_info cim_upintr_info[] = { 2873 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 2874 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 2875 { ILLWRINT, "CIM illegal write", -1, 1 }, 2876 { ILLRDINT, "CIM illegal read", -1, 1 }, 2877 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 2878 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 2879 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 2880 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 2881 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 2882 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 2883 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 2884 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 2885 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 2886 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 2887 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 2888 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 2889 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 2890 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 2891 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 2892 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 2893 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 2894 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 2895 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 2896 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 2897 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 2898 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 2899 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 2900 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 2901 { 0, NULL, 0, 0 } 2902 }; 2903 2904 int fat; 2905 2906 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE, 2907 cim_intr_info) + 2908 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE, 2909 cim_upintr_info); 2910 if (fat) 2911 csio_hw_fatal_err(hw); 2912 } 2913 2914 /* 2915 * ULP RX interrupt handler. 2916 */ 2917 static void csio_ulprx_intr_handler(struct csio_hw *hw) 2918 { 2919 static struct intr_info ulprx_intr_info[] = { 2920 { 0x1800000, "ULPRX context error", -1, 1 }, 2921 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2922 { 0, NULL, 0, 0 } 2923 }; 2924 2925 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info)) 2926 csio_hw_fatal_err(hw); 2927 } 2928 2929 /* 2930 * ULP TX interrupt handler. 2931 */ 2932 static void csio_ulptx_intr_handler(struct csio_hw *hw) 2933 { 2934 static struct intr_info ulptx_intr_info[] = { 2935 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 2936 0 }, 2937 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 2938 0 }, 2939 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 2940 0 }, 2941 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 2942 0 }, 2943 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2944 { 0, NULL, 0, 0 } 2945 }; 2946 2947 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info)) 2948 csio_hw_fatal_err(hw); 2949 } 2950 2951 /* 2952 * PM TX interrupt handler. 2953 */ 2954 static void csio_pmtx_intr_handler(struct csio_hw *hw) 2955 { 2956 static struct intr_info pmtx_intr_info[] = { 2957 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 2958 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 2959 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 2960 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 2961 { 0xffffff0, "PMTX framing error", -1, 1 }, 2962 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 2963 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 2964 1 }, 2965 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 2966 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 2967 { 0, NULL, 0, 0 } 2968 }; 2969 2970 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info)) 2971 csio_hw_fatal_err(hw); 2972 } 2973 2974 /* 2975 * PM RX interrupt handler. 2976 */ 2977 static void csio_pmrx_intr_handler(struct csio_hw *hw) 2978 { 2979 static struct intr_info pmrx_intr_info[] = { 2980 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 2981 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2982 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 2983 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 2984 1 }, 2985 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 2986 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 2987 { 0, NULL, 0, 0 } 2988 }; 2989 2990 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info)) 2991 csio_hw_fatal_err(hw); 2992 } 2993 2994 /* 2995 * CPL switch interrupt handler. 2996 */ 2997 static void csio_cplsw_intr_handler(struct csio_hw *hw) 2998 { 2999 static struct intr_info cplsw_intr_info[] = { 3000 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 3001 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 3002 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 3003 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 3004 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 3005 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 3006 { 0, NULL, 0, 0 } 3007 }; 3008 3009 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info)) 3010 csio_hw_fatal_err(hw); 3011 } 3012 3013 /* 3014 * LE interrupt handler. 3015 */ 3016 static void csio_le_intr_handler(struct csio_hw *hw) 3017 { 3018 static struct intr_info le_intr_info[] = { 3019 { LIPMISS, "LE LIP miss", -1, 0 }, 3020 { LIP0, "LE 0 LIP error", -1, 0 }, 3021 { PARITYERR, "LE parity error", -1, 1 }, 3022 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 3023 { REQQPARERR, "LE request queue parity error", -1, 1 }, 3024 { 0, NULL, 0, 0 } 3025 }; 3026 3027 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info)) 3028 csio_hw_fatal_err(hw); 3029 } 3030 3031 /* 3032 * MPS interrupt handler. 3033 */ 3034 static void csio_mps_intr_handler(struct csio_hw *hw) 3035 { 3036 static struct intr_info mps_rx_intr_info[] = { 3037 { 0xffffff, "MPS Rx parity error", -1, 1 }, 3038 { 0, NULL, 0, 0 } 3039 }; 3040 static struct intr_info mps_tx_intr_info[] = { 3041 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 3042 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 3043 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 3044 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 3045 { BUBBLE, "MPS Tx underflow", -1, 1 }, 3046 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 3047 { FRMERR, "MPS Tx framing error", -1, 1 }, 3048 { 0, NULL, 0, 0 } 3049 }; 3050 static struct intr_info mps_trc_intr_info[] = { 3051 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 3052 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 3053 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 3054 { 0, NULL, 0, 0 } 3055 }; 3056 static struct intr_info mps_stat_sram_intr_info[] = { 3057 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 3058 { 0, NULL, 0, 0 } 3059 }; 3060 static struct intr_info mps_stat_tx_intr_info[] = { 3061 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 3062 { 0, NULL, 0, 0 } 3063 }; 3064 static struct intr_info mps_stat_rx_intr_info[] = { 3065 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 3066 { 0, NULL, 0, 0 } 3067 }; 3068 static struct intr_info mps_cls_intr_info[] = { 3069 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 3070 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 3071 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 3072 { 0, NULL, 0, 0 } 3073 }; 3074 3075 int fat; 3076 3077 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE, 3078 mps_rx_intr_info) + 3079 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE, 3080 mps_tx_intr_info) + 3081 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE, 3082 mps_trc_intr_info) + 3083 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM, 3084 mps_stat_sram_intr_info) + 3085 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 3086 mps_stat_tx_intr_info) + 3087 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 3088 mps_stat_rx_intr_info) + 3089 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE, 3090 mps_cls_intr_info); 3091 3092 csio_wr_reg32(hw, 0, MPS_INT_CAUSE); 3093 csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */ 3094 if (fat) 3095 csio_hw_fatal_err(hw); 3096 } 3097 3098 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 3099 3100 /* 3101 * EDC/MC interrupt handler. 3102 */ 3103 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3104 { 3105 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3106 3107 unsigned int addr, cnt_addr, v; 3108 3109 if (idx <= MEM_EDC1) { 3110 addr = EDC_REG(EDC_INT_CAUSE, idx); 3111 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 3112 } else { 3113 addr = MC_INT_CAUSE; 3114 cnt_addr = MC_ECC_STATUS; 3115 } 3116 3117 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3118 if (v & PERR_INT_CAUSE) 3119 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3120 if (v & ECC_CE_INT_CAUSE) { 3121 uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr)); 3122 3123 csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr); 3124 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3125 cnt, name[idx], cnt > 1 ? "s" : ""); 3126 } 3127 if (v & ECC_UE_INT_CAUSE) 3128 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3129 3130 csio_wr_reg32(hw, v, addr); 3131 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 3132 csio_hw_fatal_err(hw); 3133 } 3134 3135 /* 3136 * MA interrupt handler. 3137 */ 3138 static void csio_ma_intr_handler(struct csio_hw *hw) 3139 { 3140 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE); 3141 3142 if (status & MEM_PERR_INT_CAUSE) 3143 csio_fatal(hw, "MA parity error, parity status %#x\n", 3144 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS)); 3145 if (status & MEM_WRAP_INT_CAUSE) { 3146 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS); 3147 csio_fatal(hw, 3148 "MA address wrap-around error by client %u to address %#x\n", 3149 MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); 3150 } 3151 csio_wr_reg32(hw, status, MA_INT_CAUSE); 3152 csio_hw_fatal_err(hw); 3153 } 3154 3155 /* 3156 * SMB interrupt handler. 3157 */ 3158 static void csio_smb_intr_handler(struct csio_hw *hw) 3159 { 3160 static struct intr_info smb_intr_info[] = { 3161 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 3162 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 3163 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 3164 { 0, NULL, 0, 0 } 3165 }; 3166 3167 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info)) 3168 csio_hw_fatal_err(hw); 3169 } 3170 3171 /* 3172 * NC-SI interrupt handler. 3173 */ 3174 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3175 { 3176 static struct intr_info ncsi_intr_info[] = { 3177 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 3178 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 3179 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 3180 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 3181 { 0, NULL, 0, 0 } 3182 }; 3183 3184 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info)) 3185 csio_hw_fatal_err(hw); 3186 } 3187 3188 /* 3189 * XGMAC interrupt handler. 3190 */ 3191 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3192 { 3193 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port)); 3194 3195 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 3196 if (!v) 3197 return; 3198 3199 if (v & TXFIFO_PRTY_ERR) 3200 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3201 if (v & RXFIFO_PRTY_ERR) 3202 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3203 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port)); 3204 csio_hw_fatal_err(hw); 3205 } 3206 3207 /* 3208 * PL interrupt handler. 3209 */ 3210 static void csio_pl_intr_handler(struct csio_hw *hw) 3211 { 3212 static struct intr_info pl_intr_info[] = { 3213 { FATALPERR, "T4 fatal parity error", -1, 1 }, 3214 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 3215 { 0, NULL, 0, 0 } 3216 }; 3217 3218 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info)) 3219 csio_hw_fatal_err(hw); 3220 } 3221 3222 /* 3223 * csio_hw_slow_intr_handler - control path interrupt handler 3224 * @hw: HW module 3225 * 3226 * Interrupt handler for non-data global interrupt events, e.g., errors. 3227 * The designation 'slow' is because it involves register reads, while 3228 * data interrupts typically don't involve any MMIOs. 3229 */ 3230 int 3231 csio_hw_slow_intr_handler(struct csio_hw *hw) 3232 { 3233 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE); 3234 3235 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3236 CSIO_INC_STATS(hw, n_plint_unexp); 3237 return 0; 3238 } 3239 3240 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3241 3242 CSIO_INC_STATS(hw, n_plint_cnt); 3243 3244 if (cause & CIM) 3245 csio_cim_intr_handler(hw); 3246 3247 if (cause & MPS) 3248 csio_mps_intr_handler(hw); 3249 3250 if (cause & NCSI) 3251 csio_ncsi_intr_handler(hw); 3252 3253 if (cause & PL) 3254 csio_pl_intr_handler(hw); 3255 3256 if (cause & SMB) 3257 csio_smb_intr_handler(hw); 3258 3259 if (cause & XGMAC0) 3260 csio_xgmac_intr_handler(hw, 0); 3261 3262 if (cause & XGMAC1) 3263 csio_xgmac_intr_handler(hw, 1); 3264 3265 if (cause & XGMAC_KR0) 3266 csio_xgmac_intr_handler(hw, 2); 3267 3268 if (cause & XGMAC_KR1) 3269 csio_xgmac_intr_handler(hw, 3); 3270 3271 if (cause & PCIE) 3272 hw->chip_ops->chip_pcie_intr_handler(hw); 3273 3274 if (cause & MC) 3275 csio_mem_intr_handler(hw, MEM_MC); 3276 3277 if (cause & EDC0) 3278 csio_mem_intr_handler(hw, MEM_EDC0); 3279 3280 if (cause & EDC1) 3281 csio_mem_intr_handler(hw, MEM_EDC1); 3282 3283 if (cause & LE) 3284 csio_le_intr_handler(hw); 3285 3286 if (cause & TP) 3287 csio_tp_intr_handler(hw); 3288 3289 if (cause & MA) 3290 csio_ma_intr_handler(hw); 3291 3292 if (cause & PM_TX) 3293 csio_pmtx_intr_handler(hw); 3294 3295 if (cause & PM_RX) 3296 csio_pmrx_intr_handler(hw); 3297 3298 if (cause & ULP_RX) 3299 csio_ulprx_intr_handler(hw); 3300 3301 if (cause & CPL_SWITCH) 3302 csio_cplsw_intr_handler(hw); 3303 3304 if (cause & SGE) 3305 csio_sge_intr_handler(hw); 3306 3307 if (cause & ULP_TX) 3308 csio_ulptx_intr_handler(hw); 3309 3310 /* Clear the interrupts just processed for which we are the master. */ 3311 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE); 3312 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */ 3313 3314 return 1; 3315 } 3316 3317 /***************************************************************************** 3318 * HW <--> mailbox interfacing routines. 3319 ****************************************************************************/ 3320 /* 3321 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3322 * 3323 * @data: Private data pointer. 3324 * 3325 * Called from worker thread context. 3326 */ 3327 static void 3328 csio_mberr_worker(void *data) 3329 { 3330 struct csio_hw *hw = (struct csio_hw *)data; 3331 struct csio_mbm *mbm = &hw->mbm; 3332 LIST_HEAD(cbfn_q); 3333 struct csio_mb *mbp_next; 3334 int rv; 3335 3336 del_timer_sync(&mbm->timer); 3337 3338 spin_lock_irq(&hw->lock); 3339 if (list_empty(&mbm->cbfn_q)) { 3340 spin_unlock_irq(&hw->lock); 3341 return; 3342 } 3343 3344 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3345 mbm->stats.n_cbfnq = 0; 3346 3347 /* Try to start waiting mailboxes */ 3348 if (!list_empty(&mbm->req_q)) { 3349 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3350 list_del_init(&mbp_next->list); 3351 3352 rv = csio_mb_issue(hw, mbp_next); 3353 if (rv != 0) 3354 list_add_tail(&mbp_next->list, &mbm->req_q); 3355 else 3356 CSIO_DEC_STATS(mbm, n_activeq); 3357 } 3358 spin_unlock_irq(&hw->lock); 3359 3360 /* Now callback completions */ 3361 csio_mb_completions(hw, &cbfn_q); 3362 } 3363 3364 /* 3365 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3366 * 3367 * @data: private data pointer 3368 * 3369 **/ 3370 static void 3371 csio_hw_mb_timer(uintptr_t data) 3372 { 3373 struct csio_hw *hw = (struct csio_hw *)data; 3374 struct csio_mb *mbp = NULL; 3375 3376 spin_lock_irq(&hw->lock); 3377 mbp = csio_mb_tmo_handler(hw); 3378 spin_unlock_irq(&hw->lock); 3379 3380 /* Call back the function for the timed-out Mailbox */ 3381 if (mbp) 3382 mbp->mb_cbfn(hw, mbp); 3383 3384 } 3385 3386 /* 3387 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3388 * @hw: HW module 3389 * 3390 * Called with lock held, should exit with lock held. 3391 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3392 * into a local queue. Drops lock and calls the completions. Holds 3393 * lock and returns. 3394 */ 3395 static void 3396 csio_hw_mbm_cleanup(struct csio_hw *hw) 3397 { 3398 LIST_HEAD(cbfn_q); 3399 3400 csio_mb_cancel_all(hw, &cbfn_q); 3401 3402 spin_unlock_irq(&hw->lock); 3403 csio_mb_completions(hw, &cbfn_q); 3404 spin_lock_irq(&hw->lock); 3405 } 3406 3407 /***************************************************************************** 3408 * Event handling 3409 ****************************************************************************/ 3410 int 3411 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3412 uint16_t len) 3413 { 3414 struct csio_evt_msg *evt_entry = NULL; 3415 3416 if (type >= CSIO_EVT_MAX) 3417 return -EINVAL; 3418 3419 if (len > CSIO_EVT_MSG_SIZE) 3420 return -EINVAL; 3421 3422 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3423 return -EINVAL; 3424 3425 if (list_empty(&hw->evt_free_q)) { 3426 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3427 type, len); 3428 return -ENOMEM; 3429 } 3430 3431 evt_entry = list_first_entry(&hw->evt_free_q, 3432 struct csio_evt_msg, list); 3433 list_del_init(&evt_entry->list); 3434 3435 /* copy event msg and queue the event */ 3436 evt_entry->type = type; 3437 memcpy((void *)evt_entry->data, evt_msg, len); 3438 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3439 3440 CSIO_DEC_STATS(hw, n_evt_freeq); 3441 CSIO_INC_STATS(hw, n_evt_activeq); 3442 3443 return 0; 3444 } 3445 3446 static int 3447 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3448 uint16_t len, bool msg_sg) 3449 { 3450 struct csio_evt_msg *evt_entry = NULL; 3451 struct csio_fl_dma_buf *fl_sg; 3452 uint32_t off = 0; 3453 unsigned long flags; 3454 int n, ret = 0; 3455 3456 if (type >= CSIO_EVT_MAX) 3457 return -EINVAL; 3458 3459 if (len > CSIO_EVT_MSG_SIZE) 3460 return -EINVAL; 3461 3462 spin_lock_irqsave(&hw->lock, flags); 3463 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3464 ret = -EINVAL; 3465 goto out; 3466 } 3467 3468 if (list_empty(&hw->evt_free_q)) { 3469 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3470 type, len); 3471 ret = -ENOMEM; 3472 goto out; 3473 } 3474 3475 evt_entry = list_first_entry(&hw->evt_free_q, 3476 struct csio_evt_msg, list); 3477 list_del_init(&evt_entry->list); 3478 3479 /* copy event msg and queue the event */ 3480 evt_entry->type = type; 3481 3482 /* If Payload in SG list*/ 3483 if (msg_sg) { 3484 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3485 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3486 memcpy((void *)((uintptr_t)evt_entry->data + off), 3487 fl_sg->flbufs[n].vaddr, 3488 fl_sg->flbufs[n].len); 3489 off += fl_sg->flbufs[n].len; 3490 } 3491 } else 3492 memcpy((void *)evt_entry->data, evt_msg, len); 3493 3494 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3495 CSIO_DEC_STATS(hw, n_evt_freeq); 3496 CSIO_INC_STATS(hw, n_evt_activeq); 3497 out: 3498 spin_unlock_irqrestore(&hw->lock, flags); 3499 return ret; 3500 } 3501 3502 static void 3503 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3504 { 3505 if (evt_entry) { 3506 spin_lock_irq(&hw->lock); 3507 list_del_init(&evt_entry->list); 3508 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3509 CSIO_DEC_STATS(hw, n_evt_activeq); 3510 CSIO_INC_STATS(hw, n_evt_freeq); 3511 spin_unlock_irq(&hw->lock); 3512 } 3513 } 3514 3515 void 3516 csio_evtq_flush(struct csio_hw *hw) 3517 { 3518 uint32_t count; 3519 count = 30; 3520 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3521 spin_unlock_irq(&hw->lock); 3522 msleep(2000); 3523 spin_lock_irq(&hw->lock); 3524 } 3525 3526 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3527 } 3528 3529 static void 3530 csio_evtq_stop(struct csio_hw *hw) 3531 { 3532 hw->flags |= CSIO_HWF_FWEVT_STOP; 3533 } 3534 3535 static void 3536 csio_evtq_start(struct csio_hw *hw) 3537 { 3538 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3539 } 3540 3541 static void 3542 csio_evtq_cleanup(struct csio_hw *hw) 3543 { 3544 struct list_head *evt_entry, *next_entry; 3545 3546 /* Release outstanding events from activeq to freeq*/ 3547 if (!list_empty(&hw->evt_active_q)) 3548 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3549 3550 hw->stats.n_evt_activeq = 0; 3551 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3552 3553 /* Freeup event entry */ 3554 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3555 kfree(evt_entry); 3556 CSIO_DEC_STATS(hw, n_evt_freeq); 3557 } 3558 3559 hw->stats.n_evt_freeq = 0; 3560 } 3561 3562 3563 static void 3564 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3565 struct csio_fl_dma_buf *flb, void *priv) 3566 { 3567 __u8 op; 3568 void *msg = NULL; 3569 uint32_t msg_len = 0; 3570 bool msg_sg = 0; 3571 3572 op = ((struct rss_header *) wr)->opcode; 3573 if (op == CPL_FW6_PLD) { 3574 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3575 if (!flb || !flb->totlen) { 3576 CSIO_INC_STATS(hw, n_cpl_unexp); 3577 return; 3578 } 3579 3580 msg = (void *) flb; 3581 msg_len = flb->totlen; 3582 msg_sg = 1; 3583 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3584 3585 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3586 /* skip RSS header */ 3587 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3588 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3589 sizeof(struct cpl_fw4_msg); 3590 } else { 3591 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3592 CSIO_INC_STATS(hw, n_cpl_unexp); 3593 return; 3594 } 3595 3596 /* 3597 * Enqueue event to EventQ. Events processing happens 3598 * in Event worker thread context 3599 */ 3600 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3601 (uint16_t)msg_len, msg_sg)) 3602 CSIO_INC_STATS(hw, n_evt_drop); 3603 } 3604 3605 void 3606 csio_evtq_worker(struct work_struct *work) 3607 { 3608 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3609 struct list_head *evt_entry, *next_entry; 3610 LIST_HEAD(evt_q); 3611 struct csio_evt_msg *evt_msg; 3612 struct cpl_fw6_msg *msg; 3613 struct csio_rnode *rn; 3614 int rv = 0; 3615 uint8_t evtq_stop = 0; 3616 3617 csio_dbg(hw, "event worker thread active evts#%d\n", 3618 hw->stats.n_evt_activeq); 3619 3620 spin_lock_irq(&hw->lock); 3621 while (!list_empty(&hw->evt_active_q)) { 3622 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3623 spin_unlock_irq(&hw->lock); 3624 3625 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3626 evt_msg = (struct csio_evt_msg *) evt_entry; 3627 3628 /* Drop events if queue is STOPPED */ 3629 spin_lock_irq(&hw->lock); 3630 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3631 evtq_stop = 1; 3632 spin_unlock_irq(&hw->lock); 3633 if (evtq_stop) { 3634 CSIO_INC_STATS(hw, n_evt_drop); 3635 goto free_evt; 3636 } 3637 3638 switch (evt_msg->type) { 3639 case CSIO_EVT_FW: 3640 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3641 3642 if ((msg->opcode == CPL_FW6_MSG || 3643 msg->opcode == CPL_FW4_MSG) && 3644 !msg->type) { 3645 rv = csio_mb_fwevt_handler(hw, 3646 msg->data); 3647 if (!rv) 3648 break; 3649 /* Handle any remaining fw events */ 3650 csio_fcoe_fwevt_handler(hw, 3651 msg->opcode, msg->data); 3652 } else if (msg->opcode == CPL_FW6_PLD) { 3653 3654 csio_fcoe_fwevt_handler(hw, 3655 msg->opcode, msg->data); 3656 } else { 3657 csio_warn(hw, 3658 "Unhandled FW msg op %x type %x\n", 3659 msg->opcode, msg->type); 3660 CSIO_INC_STATS(hw, n_evt_drop); 3661 } 3662 break; 3663 3664 case CSIO_EVT_MBX: 3665 csio_mberr_worker(hw); 3666 break; 3667 3668 case CSIO_EVT_DEV_LOSS: 3669 memcpy(&rn, evt_msg->data, sizeof(rn)); 3670 csio_rnode_devloss_handler(rn); 3671 break; 3672 3673 default: 3674 csio_warn(hw, "Unhandled event %x on evtq\n", 3675 evt_msg->type); 3676 CSIO_INC_STATS(hw, n_evt_unexp); 3677 break; 3678 } 3679 free_evt: 3680 csio_free_evt(hw, evt_msg); 3681 } 3682 3683 spin_lock_irq(&hw->lock); 3684 } 3685 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3686 spin_unlock_irq(&hw->lock); 3687 } 3688 3689 int 3690 csio_fwevtq_handler(struct csio_hw *hw) 3691 { 3692 int rv; 3693 3694 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 3695 CSIO_INC_STATS(hw, n_int_stray); 3696 return -EINVAL; 3697 } 3698 3699 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 3700 csio_process_fwevtq_entry, NULL); 3701 return rv; 3702 } 3703 3704 /**************************************************************************** 3705 * Entry points 3706 ****************************************************************************/ 3707 3708 /* Management module */ 3709 /* 3710 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 3711 * mgmt - mgmt module 3712 * @io_req - io request 3713 * 3714 * Return - 0:if given IO Req exists in active Q. 3715 * -EINVAL :if lookup fails. 3716 */ 3717 int 3718 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 3719 { 3720 struct list_head *tmp; 3721 3722 /* Lookup ioreq in the ACTIVEQ */ 3723 list_for_each(tmp, &mgmtm->active_q) { 3724 if (io_req == (struct csio_ioreq *)tmp) 3725 return 0; 3726 } 3727 return -EINVAL; 3728 } 3729 3730 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 3731 3732 /* 3733 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 3734 * @data - Event data. 3735 * 3736 * Return - none. 3737 */ 3738 static void 3739 csio_mgmt_tmo_handler(uintptr_t data) 3740 { 3741 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; 3742 struct list_head *tmp; 3743 struct csio_ioreq *io_req; 3744 3745 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 3746 3747 spin_lock_irq(&mgmtm->hw->lock); 3748 3749 list_for_each(tmp, &mgmtm->active_q) { 3750 io_req = (struct csio_ioreq *) tmp; 3751 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 3752 3753 if (!io_req->tmo) { 3754 /* Dequeue the request from retry Q. */ 3755 tmp = csio_list_prev(tmp); 3756 list_del_init(&io_req->sm.sm_list); 3757 if (io_req->io_cbfn) { 3758 /* io_req will be freed by completion handler */ 3759 io_req->wr_status = -ETIMEDOUT; 3760 io_req->io_cbfn(mgmtm->hw, io_req); 3761 } else { 3762 CSIO_DB_ASSERT(0); 3763 } 3764 } 3765 } 3766 3767 /* If retry queue is not empty, re-arm timer */ 3768 if (!list_empty(&mgmtm->active_q)) 3769 mod_timer(&mgmtm->mgmt_timer, 3770 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 3771 spin_unlock_irq(&mgmtm->hw->lock); 3772 } 3773 3774 static void 3775 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 3776 { 3777 struct csio_hw *hw = mgmtm->hw; 3778 struct csio_ioreq *io_req; 3779 struct list_head *tmp; 3780 uint32_t count; 3781 3782 count = 30; 3783 /* Wait for all outstanding req to complete gracefully */ 3784 while ((!list_empty(&mgmtm->active_q)) && count--) { 3785 spin_unlock_irq(&hw->lock); 3786 msleep(2000); 3787 spin_lock_irq(&hw->lock); 3788 } 3789 3790 /* release outstanding req from ACTIVEQ */ 3791 list_for_each(tmp, &mgmtm->active_q) { 3792 io_req = (struct csio_ioreq *) tmp; 3793 tmp = csio_list_prev(tmp); 3794 list_del_init(&io_req->sm.sm_list); 3795 mgmtm->stats.n_active--; 3796 if (io_req->io_cbfn) { 3797 /* io_req will be freed by completion handler */ 3798 io_req->wr_status = -ETIMEDOUT; 3799 io_req->io_cbfn(mgmtm->hw, io_req); 3800 } 3801 } 3802 } 3803 3804 /* 3805 * csio_mgmt_init - Mgmt module init entry point 3806 * @mgmtsm - mgmt module 3807 * @hw - HW module 3808 * 3809 * Initialize mgmt timer, resource wait queue, active queue, 3810 * completion q. Allocate Egress and Ingress 3811 * WR queues and save off the queue index returned by the WR 3812 * module for future use. Allocate and save off mgmt reqs in the 3813 * mgmt_req_freelist for future use. Make sure their SM is initialized 3814 * to uninit state. 3815 * Returns: 0 - on success 3816 * -ENOMEM - on error. 3817 */ 3818 static int 3819 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 3820 { 3821 struct timer_list *timer = &mgmtm->mgmt_timer; 3822 3823 init_timer(timer); 3824 timer->function = csio_mgmt_tmo_handler; 3825 timer->data = (unsigned long)mgmtm; 3826 3827 INIT_LIST_HEAD(&mgmtm->active_q); 3828 INIT_LIST_HEAD(&mgmtm->cbfn_q); 3829 3830 mgmtm->hw = hw; 3831 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 3832 3833 return 0; 3834 } 3835 3836 /* 3837 * csio_mgmtm_exit - MGMT module exit entry point 3838 * @mgmtsm - mgmt module 3839 * 3840 * This function called during MGMT module uninit. 3841 * Stop timers, free ioreqs allocated. 3842 * Returns: None 3843 * 3844 */ 3845 static void 3846 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 3847 { 3848 del_timer_sync(&mgmtm->mgmt_timer); 3849 } 3850 3851 3852 /** 3853 * csio_hw_start - Kicks off the HW State machine 3854 * @hw: Pointer to HW module. 3855 * 3856 * It is assumed that the initialization is a synchronous operation. 3857 * So when we return afer posting the event, the HW SM should be in 3858 * the ready state, if there were no errors during init. 3859 */ 3860 int 3861 csio_hw_start(struct csio_hw *hw) 3862 { 3863 spin_lock_irq(&hw->lock); 3864 csio_post_event(&hw->sm, CSIO_HWE_CFG); 3865 spin_unlock_irq(&hw->lock); 3866 3867 if (csio_is_hw_ready(hw)) 3868 return 0; 3869 else 3870 return -EINVAL; 3871 } 3872 3873 int 3874 csio_hw_stop(struct csio_hw *hw) 3875 { 3876 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 3877 3878 if (csio_is_hw_removing(hw)) 3879 return 0; 3880 else 3881 return -EINVAL; 3882 } 3883 3884 /* Max reset retries */ 3885 #define CSIO_MAX_RESET_RETRIES 3 3886 3887 /** 3888 * csio_hw_reset - Reset the hardware 3889 * @hw: HW module. 3890 * 3891 * Caller should hold lock across this function. 3892 */ 3893 int 3894 csio_hw_reset(struct csio_hw *hw) 3895 { 3896 if (!csio_is_hw_master(hw)) 3897 return -EPERM; 3898 3899 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 3900 csio_dbg(hw, "Max hw reset attempts reached.."); 3901 return -EINVAL; 3902 } 3903 3904 hw->rst_retries++; 3905 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 3906 3907 if (csio_is_hw_ready(hw)) { 3908 hw->rst_retries = 0; 3909 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 3910 return 0; 3911 } else 3912 return -EINVAL; 3913 } 3914 3915 /* 3916 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 3917 * @hw: HW module. 3918 */ 3919 static void 3920 csio_hw_get_device_id(struct csio_hw *hw) 3921 { 3922 /* Is the adapter device id cached already ?*/ 3923 if (csio_is_dev_id_cached(hw)) 3924 return; 3925 3926 /* Get the PCI vendor & device id */ 3927 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 3928 &hw->params.pci.vendor_id); 3929 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 3930 &hw->params.pci.device_id); 3931 3932 csio_dev_id_cached(hw); 3933 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 3934 3935 } /* csio_hw_get_device_id */ 3936 3937 /* 3938 * csio_hw_set_description - Set the model, description of the hw. 3939 * @hw: HW module. 3940 * @ven_id: PCI Vendor ID 3941 * @dev_id: PCI Device ID 3942 */ 3943 static void 3944 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 3945 { 3946 uint32_t adap_type, prot_type; 3947 3948 if (ven_id == CSIO_VENDOR_ID) { 3949 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3950 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3951 3952 if (prot_type == CSIO_T4_FCOE_ASIC) { 3953 memcpy(hw->hw_ver, 3954 csio_t4_fcoe_adapters[adap_type].model_no, 16); 3955 memcpy(hw->model_desc, 3956 csio_t4_fcoe_adapters[adap_type].description, 3957 32); 3958 } else if (prot_type == CSIO_T5_FCOE_ASIC) { 3959 memcpy(hw->hw_ver, 3960 csio_t5_fcoe_adapters[adap_type].model_no, 16); 3961 memcpy(hw->model_desc, 3962 csio_t5_fcoe_adapters[adap_type].description, 3963 32); 3964 } else { 3965 char tempName[32] = "Chelsio FCoE Controller"; 3966 memcpy(hw->model_desc, tempName, 32); 3967 } 3968 } 3969 } /* csio_hw_set_description */ 3970 3971 /** 3972 * csio_hw_init - Initialize HW module. 3973 * @hw: Pointer to HW module. 3974 * 3975 * Initialize the members of the HW module. 3976 */ 3977 int 3978 csio_hw_init(struct csio_hw *hw) 3979 { 3980 int rv = -EINVAL; 3981 uint32_t i; 3982 uint16_t ven_id, dev_id; 3983 struct csio_evt_msg *evt_entry; 3984 3985 INIT_LIST_HEAD(&hw->sm.sm_list); 3986 csio_init_state(&hw->sm, csio_hws_uninit); 3987 spin_lock_init(&hw->lock); 3988 INIT_LIST_HEAD(&hw->sln_head); 3989 3990 /* Get the PCI vendor & device id */ 3991 csio_hw_get_device_id(hw); 3992 3993 strcpy(hw->name, CSIO_HW_NAME); 3994 3995 /* Initialize the HW chip ops with T4/T5 specific ops */ 3996 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops; 3997 3998 /* Set the model & its description */ 3999 4000 ven_id = hw->params.pci.vendor_id; 4001 dev_id = hw->params.pci.device_id; 4002 4003 csio_hw_set_description(hw, ven_id, dev_id); 4004 4005 /* Initialize default log level */ 4006 hw->params.log_level = (uint32_t) csio_dbg_level; 4007 4008 csio_set_fwevt_intr_idx(hw, -1); 4009 csio_set_nondata_intr_idx(hw, -1); 4010 4011 /* Init all the modules: Mailbox, WorkRequest and Transport */ 4012 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 4013 goto err; 4014 4015 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 4016 if (rv) 4017 goto err_mbm_exit; 4018 4019 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 4020 if (rv) 4021 goto err_wrm_exit; 4022 4023 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 4024 if (rv) 4025 goto err_scsim_exit; 4026 /* Pre-allocate evtq and initialize them */ 4027 INIT_LIST_HEAD(&hw->evt_active_q); 4028 INIT_LIST_HEAD(&hw->evt_free_q); 4029 for (i = 0; i < csio_evtq_sz; i++) { 4030 4031 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 4032 if (!evt_entry) { 4033 csio_err(hw, "Failed to initialize eventq"); 4034 goto err_evtq_cleanup; 4035 } 4036 4037 list_add_tail(&evt_entry->list, &hw->evt_free_q); 4038 CSIO_INC_STATS(hw, n_evt_freeq); 4039 } 4040 4041 hw->dev_num = dev_num; 4042 dev_num++; 4043 4044 return 0; 4045 4046 err_evtq_cleanup: 4047 csio_evtq_cleanup(hw); 4048 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4049 err_scsim_exit: 4050 csio_scsim_exit(csio_hw_to_scsim(hw)); 4051 err_wrm_exit: 4052 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4053 err_mbm_exit: 4054 csio_mbm_exit(csio_hw_to_mbm(hw)); 4055 err: 4056 return rv; 4057 } 4058 4059 /** 4060 * csio_hw_exit - Un-initialize HW module. 4061 * @hw: Pointer to HW module. 4062 * 4063 */ 4064 void 4065 csio_hw_exit(struct csio_hw *hw) 4066 { 4067 csio_evtq_cleanup(hw); 4068 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4069 csio_scsim_exit(csio_hw_to_scsim(hw)); 4070 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4071 csio_mbm_exit(csio_hw_to_mbm(hw)); 4072 } 4073