1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_force_master; 51 int csio_dbg_level = 0xFEFF; 52 unsigned int csio_port_mask = 0xf; 53 54 /* Default FW event queue entries. */ 55 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 56 57 /* Default MSI param level */ 58 int csio_msi = 2; 59 60 /* FCoE function instances */ 61 static int dev_num; 62 63 /* FCoE Adapter types & its description */ 64 static const struct csio_adap_desc csio_fcoe_adapters[] = { 65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, 66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, 67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, 68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"}, 69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"}, 70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"}, 71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"}, 72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"}, 73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"}, 74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"}, 75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"}, 76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"}, 77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, 78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, 79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, 80 {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"} 81 }; 82 83 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 84 static void csio_hw_mbm_cleanup(struct csio_hw *); 85 86 /* State machine forward declarations */ 87 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 88 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 89 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 90 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 91 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 92 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 93 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 94 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 95 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 96 97 static void csio_hw_initialize(struct csio_hw *hw); 98 static void csio_evtq_stop(struct csio_hw *hw); 99 static void csio_evtq_start(struct csio_hw *hw); 100 101 int csio_is_hw_ready(struct csio_hw *hw) 102 { 103 return csio_match_state(hw, csio_hws_ready); 104 } 105 106 int csio_is_hw_removing(struct csio_hw *hw) 107 { 108 return csio_match_state(hw, csio_hws_removing); 109 } 110 111 112 /* 113 * csio_hw_wait_op_done_val - wait until an operation is completed 114 * @hw: the HW module 115 * @reg: the register to check for completion 116 * @mask: a single-bit field within @reg that indicates completion 117 * @polarity: the value of the field when the operation is completed 118 * @attempts: number of check iterations 119 * @delay: delay in usecs between iterations 120 * @valp: where to store the value of the register at completion time 121 * 122 * Wait until an operation is completed by checking a bit in a register 123 * up to @attempts times. If @valp is not NULL the value of the register 124 * at the time it indicated completion is stored there. Returns 0 if the 125 * operation completes and -EAGAIN otherwise. 126 */ 127 static int 128 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 129 int polarity, int attempts, int delay, uint32_t *valp) 130 { 131 uint32_t val; 132 while (1) { 133 val = csio_rd_reg32(hw, reg); 134 135 if (!!(val & mask) == polarity) { 136 if (valp) 137 *valp = val; 138 return 0; 139 } 140 141 if (--attempts == 0) 142 return -EAGAIN; 143 if (delay) 144 udelay(delay); 145 } 146 } 147 148 void 149 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 150 uint32_t value) 151 { 152 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 153 154 csio_wr_reg32(hw, val | value, reg); 155 /* Flush */ 156 csio_rd_reg32(hw, reg); 157 158 } 159 160 /* 161 * csio_hw_mc_read - read from MC through backdoor accesses 162 * @hw: the hw module 163 * @addr: address of first byte requested 164 * @data: 64 bytes of data containing the requested address 165 * @ecc: where to store the corresponding 64-bit ECC word 166 * 167 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 168 * that covers the requested address @addr. If @parity is not %NULL it 169 * is assigned the 64-bit ECC word for the read data. 170 */ 171 int 172 csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data, 173 uint64_t *ecc) 174 { 175 int i; 176 177 if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST) 178 return -EBUSY; 179 csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR); 180 csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN); 181 csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN); 182 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), 183 MC_BIST_CMD); 184 i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST, 185 0, 10, 1, NULL); 186 if (i) 187 return i; 188 189 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) 190 191 for (i = 15; i >= 0; i--) 192 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); 193 if (ecc) 194 *ecc = csio_rd_reg64(hw, MC_DATA(16)); 195 #undef MC_DATA 196 return 0; 197 } 198 199 /* 200 * csio_hw_edc_read - read from EDC through backdoor accesses 201 * @hw: the hw module 202 * @idx: which EDC to access 203 * @addr: address of first byte requested 204 * @data: 64 bytes of data containing the requested address 205 * @ecc: where to store the corresponding 64-bit ECC word 206 * 207 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 208 * that covers the requested address @addr. If @parity is not %NULL it 209 * is assigned the 64-bit ECC word for the read data. 210 */ 211 int 212 csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, 213 uint64_t *ecc) 214 { 215 int i; 216 217 idx *= EDC_STRIDE; 218 if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST) 219 return -EBUSY; 220 csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx); 221 csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx); 222 csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx); 223 csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST, 224 EDC_BIST_CMD + idx); 225 i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST, 226 0, 10, 1, NULL); 227 if (i) 228 return i; 229 230 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) 231 232 for (i = 15; i >= 0; i--) 233 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); 234 if (ecc) 235 *ecc = csio_rd_reg64(hw, EDC_DATA(16)); 236 #undef EDC_DATA 237 return 0; 238 } 239 240 /* 241 * csio_mem_win_rw - read/write memory through PCIE memory window 242 * @hw: the adapter 243 * @addr: address of first byte requested 244 * @data: MEMWIN0_APERTURE bytes of data containing the requested address 245 * @dir: direction of transfer 1 => read, 0 => write 246 * 247 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a 248 * MEMWIN0_APERTURE-byte-aligned address that covers the requested 249 * address @addr. 250 */ 251 static int 252 csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir) 253 { 254 int i; 255 256 /* 257 * Setup offset into PCIE memory window. Address must be a 258 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to 259 * ensure that changes propagate before we attempt to use the new 260 * values.) 261 */ 262 csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1), 263 PCIE_MEM_ACCESS_OFFSET); 264 csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET); 265 266 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 267 for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) { 268 if (dir) 269 *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i)); 270 else 271 csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i)); 272 } 273 274 return 0; 275 } 276 277 /* 278 * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window 279 * @hw: the csio_hw 280 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 281 * @addr: address within indicated memory type 282 * @len: amount of memory to transfer 283 * @buf: host memory buffer 284 * @dir: direction of transfer 1 => read, 0 => write 285 * 286 * Reads/writes an [almost] arbitrary memory region in the firmware: the 287 * firmware memory address, length and host buffer must be aligned on 288 * 32-bit boudaries. The memory is transferred as a raw byte sequence 289 * from/to the firmware's memory. If this memory contains data 290 * structures which contain multi-byte integers, it's the callers 291 * responsibility to perform appropriate byte order conversions. 292 */ 293 static int 294 csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len, 295 uint32_t *buf, int dir) 296 { 297 uint32_t pos, start, end, offset, memoffset; 298 int ret; 299 uint32_t *data; 300 301 /* 302 * Argument sanity checks ... 303 */ 304 if ((addr & 0x3) || (len & 0x3)) 305 return -EINVAL; 306 307 data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL); 308 if (!data) 309 return -ENOMEM; 310 311 /* Offset into the region of memory which is being accessed 312 * MEM_EDC0 = 0 313 * MEM_EDC1 = 1 314 * MEM_MC = 2 315 */ 316 memoffset = (mtype * (5 * 1024 * 1024)); 317 318 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 319 addr = addr + memoffset; 320 321 /* 322 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes 323 * at a time so we need to round down the start and round up the end. 324 * We'll start copying out of the first line at (addr - start) a word 325 * at a time. 326 */ 327 start = addr & ~(MEMWIN0_APERTURE-1); 328 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); 329 offset = (addr - start)/sizeof(__be32); 330 331 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { 332 /* 333 * If we're writing, copy the data from the caller's memory 334 * buffer 335 */ 336 if (!dir) { 337 /* 338 * If we're doing a partial write, then we need to do 339 * a read-modify-write ... 340 */ 341 if (offset || len < MEMWIN0_APERTURE) { 342 ret = csio_mem_win_rw(hw, pos, data, 1); 343 if (ret) { 344 kfree(data); 345 return ret; 346 } 347 } 348 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 349 len > 0) { 350 data[offset++] = *buf++; 351 len -= sizeof(__be32); 352 } 353 } 354 355 /* 356 * Transfer a block of memory and bail if there's an error. 357 */ 358 ret = csio_mem_win_rw(hw, pos, data, dir); 359 if (ret) { 360 kfree(data); 361 return ret; 362 } 363 364 /* 365 * If we're reading, copy the data into the caller's memory 366 * buffer. 367 */ 368 if (dir) 369 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 370 len > 0) { 371 *buf++ = data[offset++]; 372 len -= sizeof(__be32); 373 } 374 } 375 376 kfree(data); 377 378 return 0; 379 } 380 381 static int 382 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 383 { 384 return csio_memory_rw(hw, mtype, addr, len, buf, 0); 385 } 386 387 /* 388 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 389 */ 390 #define EEPROM_MAX_RD_POLL 40 391 #define EEPROM_MAX_WR_POLL 6 392 #define EEPROM_STAT_ADDR 0x7bfc 393 #define VPD_BASE 0x400 394 #define VPD_BASE_OLD 0 395 #define VPD_LEN 512 396 #define VPD_INFO_FLD_HDR_SIZE 3 397 398 /* 399 * csio_hw_seeprom_read - read a serial EEPROM location 400 * @hw: hw to read 401 * @addr: EEPROM virtual address 402 * @data: where to store the read data 403 * 404 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 405 * VPD capability. Note that this function must be called with a virtual 406 * address. 407 */ 408 static int 409 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 410 { 411 uint16_t val = 0; 412 int attempts = EEPROM_MAX_RD_POLL; 413 uint32_t base = hw->params.pci.vpd_cap_addr; 414 415 if (addr >= EEPROMVSIZE || (addr & 3)) 416 return -EINVAL; 417 418 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 419 420 do { 421 udelay(10); 422 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 423 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 424 425 if (!(val & PCI_VPD_ADDR_F)) { 426 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 427 return -EINVAL; 428 } 429 430 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 431 *data = le32_to_cpu(*data); 432 433 return 0; 434 } 435 436 /* 437 * Partial EEPROM Vital Product Data structure. Includes only the ID and 438 * VPD-R sections. 439 */ 440 struct t4_vpd_hdr { 441 u8 id_tag; 442 u8 id_len[2]; 443 u8 id_data[ID_LEN]; 444 u8 vpdr_tag; 445 u8 vpdr_len[2]; 446 }; 447 448 /* 449 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 450 * the VPD 451 * @v: Pointer to buffered vpd data structure 452 * @kw: The keyword to search for 453 * 454 * Returns the value of the information field keyword or 455 * -EINVAL otherwise. 456 */ 457 static int 458 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 459 { 460 int32_t i; 461 int32_t offset , len; 462 const uint8_t *buf = &v->id_tag; 463 const uint8_t *vpdr_len = &v->vpdr_tag; 464 offset = sizeof(struct t4_vpd_hdr); 465 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 466 467 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 468 return -EINVAL; 469 470 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 471 if (memcmp(buf + i , kw, 2) == 0) { 472 i += VPD_INFO_FLD_HDR_SIZE; 473 return i; 474 } 475 476 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 477 } 478 479 return -EINVAL; 480 } 481 482 static int 483 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 484 { 485 *pos = pci_find_capability(pdev, cap); 486 if (*pos) 487 return 0; 488 489 return -1; 490 } 491 492 /* 493 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 494 * @hw: HW module 495 * @p: where to store the parameters 496 * 497 * Reads card parameters stored in VPD EEPROM. 498 */ 499 static int 500 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 501 { 502 int i, ret, ec, sn, addr; 503 uint8_t *vpd, csum; 504 const struct t4_vpd_hdr *v; 505 /* To get around compilation warning from strstrip */ 506 char *s; 507 508 if (csio_is_valid_vpd(hw)) 509 return 0; 510 511 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 512 &hw->params.pci.vpd_cap_addr); 513 if (ret) 514 return -EINVAL; 515 516 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 517 if (vpd == NULL) 518 return -ENOMEM; 519 520 /* 521 * Card information normally starts at VPD_BASE but early cards had 522 * it at 0. 523 */ 524 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 525 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 526 527 for (i = 0; i < VPD_LEN; i += 4) { 528 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 529 if (ret) { 530 kfree(vpd); 531 return ret; 532 } 533 } 534 535 /* Reset the VPD flag! */ 536 hw->flags &= (~CSIO_HWF_VPD_VALID); 537 538 v = (const struct t4_vpd_hdr *)vpd; 539 540 #define FIND_VPD_KW(var, name) do { \ 541 var = csio_hw_get_vpd_keyword_val(v, name); \ 542 if (var < 0) { \ 543 csio_err(hw, "missing VPD keyword " name "\n"); \ 544 kfree(vpd); \ 545 return -EINVAL; \ 546 } \ 547 } while (0) 548 549 FIND_VPD_KW(i, "RV"); 550 for (csum = 0; i >= 0; i--) 551 csum += vpd[i]; 552 553 if (csum) { 554 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 555 kfree(vpd); 556 return -EINVAL; 557 } 558 FIND_VPD_KW(ec, "EC"); 559 FIND_VPD_KW(sn, "SN"); 560 #undef FIND_VPD_KW 561 562 memcpy(p->id, v->id_data, ID_LEN); 563 s = strstrip(p->id); 564 memcpy(p->ec, vpd + ec, EC_LEN); 565 s = strstrip(p->ec); 566 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 567 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 568 s = strstrip(p->sn); 569 570 csio_valid_vpd_copied(hw); 571 572 kfree(vpd); 573 return 0; 574 } 575 576 /* 577 * csio_hw_sf1_read - read data from the serial flash 578 * @hw: the HW module 579 * @byte_cnt: number of bytes to read 580 * @cont: whether another operation will be chained 581 * @lock: whether to lock SF for PL access only 582 * @valp: where to store the read data 583 * 584 * Reads up to 4 bytes of data from the serial flash. The location of 585 * the read needs to be specified prior to calling this by issuing the 586 * appropriate commands to the serial flash. 587 */ 588 static int 589 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 590 int32_t lock, uint32_t *valp) 591 { 592 int ret; 593 594 if (!byte_cnt || byte_cnt > 4) 595 return -EINVAL; 596 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 597 return -EBUSY; 598 599 cont = cont ? SF_CONT : 0; 600 lock = lock ? SF_LOCK : 0; 601 602 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP); 603 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 604 10, NULL); 605 if (!ret) 606 *valp = csio_rd_reg32(hw, SF_DATA); 607 return ret; 608 } 609 610 /* 611 * csio_hw_sf1_write - write data to the serial flash 612 * @hw: the HW module 613 * @byte_cnt: number of bytes to write 614 * @cont: whether another operation will be chained 615 * @lock: whether to lock SF for PL access only 616 * @val: value to write 617 * 618 * Writes up to 4 bytes of data to the serial flash. The location of 619 * the write needs to be specified prior to calling this by issuing the 620 * appropriate commands to the serial flash. 621 */ 622 static int 623 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 624 int32_t lock, uint32_t val) 625 { 626 if (!byte_cnt || byte_cnt > 4) 627 return -EINVAL; 628 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 629 return -EBUSY; 630 631 cont = cont ? SF_CONT : 0; 632 lock = lock ? SF_LOCK : 0; 633 634 csio_wr_reg32(hw, val, SF_DATA); 635 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP); 636 637 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 638 10, NULL); 639 } 640 641 /* 642 * csio_hw_flash_wait_op - wait for a flash operation to complete 643 * @hw: the HW module 644 * @attempts: max number of polls of the status register 645 * @delay: delay between polls in ms 646 * 647 * Wait for a flash operation to complete by polling the status register. 648 */ 649 static int 650 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 651 { 652 int ret; 653 uint32_t status; 654 655 while (1) { 656 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 657 if (ret != 0) 658 return ret; 659 660 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 661 if (ret != 0) 662 return ret; 663 664 if (!(status & 1)) 665 return 0; 666 if (--attempts == 0) 667 return -EAGAIN; 668 if (delay) 669 msleep(delay); 670 } 671 } 672 673 /* 674 * csio_hw_read_flash - read words from serial flash 675 * @hw: the HW module 676 * @addr: the start address for the read 677 * @nwords: how many 32-bit words to read 678 * @data: where to store the read data 679 * @byte_oriented: whether to store data as bytes or as words 680 * 681 * Read the specified number of 32-bit words from the serial flash. 682 * If @byte_oriented is set the read data is stored as a byte array 683 * (i.e., big-endian), otherwise as 32-bit words in the platform's 684 * natural endianess. 685 */ 686 static int 687 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 688 uint32_t *data, int32_t byte_oriented) 689 { 690 int ret; 691 692 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 693 return -EINVAL; 694 695 addr = swab32(addr) | SF_RD_DATA_FAST; 696 697 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 698 if (ret != 0) 699 return ret; 700 701 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 702 if (ret != 0) 703 return ret; 704 705 for ( ; nwords; nwords--, data++) { 706 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 707 if (nwords == 1) 708 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 709 if (ret) 710 return ret; 711 if (byte_oriented) 712 *data = htonl(*data); 713 } 714 return 0; 715 } 716 717 /* 718 * csio_hw_write_flash - write up to a page of data to the serial flash 719 * @hw: the hw 720 * @addr: the start address to write 721 * @n: length of data to write in bytes 722 * @data: the data to write 723 * 724 * Writes up to a page of data (256 bytes) to the serial flash starting 725 * at the given address. All the data must be written to the same page. 726 */ 727 static int 728 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 729 uint32_t n, const uint8_t *data) 730 { 731 int ret = -EINVAL; 732 uint32_t buf[64]; 733 uint32_t i, c, left, val, offset = addr & 0xff; 734 735 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 736 return -EINVAL; 737 738 val = swab32(addr) | SF_PROG_PAGE; 739 740 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 741 if (ret != 0) 742 goto unlock; 743 744 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 745 if (ret != 0) 746 goto unlock; 747 748 for (left = n; left; left -= c) { 749 c = min(left, 4U); 750 for (val = 0, i = 0; i < c; ++i) 751 val = (val << 8) + *data++; 752 753 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 754 if (ret) 755 goto unlock; 756 } 757 ret = csio_hw_flash_wait_op(hw, 8, 1); 758 if (ret) 759 goto unlock; 760 761 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 762 763 /* Read the page to verify the write succeeded */ 764 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 765 if (ret) 766 return ret; 767 768 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 769 csio_err(hw, 770 "failed to correctly write the flash page at %#x\n", 771 addr); 772 return -EINVAL; 773 } 774 775 return 0; 776 777 unlock: 778 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 779 return ret; 780 } 781 782 /* 783 * csio_hw_flash_erase_sectors - erase a range of flash sectors 784 * @hw: the HW module 785 * @start: the first sector to erase 786 * @end: the last sector to erase 787 * 788 * Erases the sectors in the given inclusive range. 789 */ 790 static int 791 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 792 { 793 int ret = 0; 794 795 while (start <= end) { 796 797 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 798 if (ret != 0) 799 goto out; 800 801 ret = csio_hw_sf1_write(hw, 4, 0, 1, 802 SF_ERASE_SECTOR | (start << 8)); 803 if (ret != 0) 804 goto out; 805 806 ret = csio_hw_flash_wait_op(hw, 14, 500); 807 if (ret != 0) 808 goto out; 809 810 start++; 811 } 812 out: 813 if (ret) 814 csio_err(hw, "erase of flash sector %d failed, error %d\n", 815 start, ret); 816 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 817 return 0; 818 } 819 820 /* 821 * csio_hw_flash_cfg_addr - return the address of the flash 822 * configuration file 823 * @hw: the HW module 824 * 825 * Return the address within the flash where the Firmware Configuration 826 * File is stored. 827 */ 828 static unsigned int 829 csio_hw_flash_cfg_addr(struct csio_hw *hw) 830 { 831 if (hw->params.sf_size == 0x100000) 832 return FPGA_FLASH_CFG_OFFSET; 833 else 834 return FLASH_CFG_OFFSET; 835 } 836 837 static void 838 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 839 { 840 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 841 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), 842 FW_HDR_FW_VER_MINOR_GET(hw->fwrev), 843 FW_HDR_FW_VER_MICRO_GET(hw->fwrev), 844 FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); 845 } 846 847 /* 848 * csio_hw_get_fw_version - read the firmware version 849 * @hw: HW module 850 * @vers: where to place the version 851 * 852 * Reads the FW version from flash. 853 */ 854 static int 855 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 856 { 857 return csio_hw_read_flash(hw, FW_IMG_START + 858 offsetof(struct fw_hdr, fw_ver), 1, 859 vers, 0); 860 } 861 862 /* 863 * csio_hw_get_tp_version - read the TP microcode version 864 * @hw: HW module 865 * @vers: where to place the version 866 * 867 * Reads the TP microcode version from flash. 868 */ 869 static int 870 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 871 { 872 return csio_hw_read_flash(hw, FLASH_FW_START + 873 offsetof(struct fw_hdr, tp_microcode_ver), 1, 874 vers, 0); 875 } 876 877 /* 878 * csio_hw_check_fw_version - check if the FW is compatible with 879 * this driver 880 * @hw: HW module 881 * 882 * Checks if an adapter's FW is compatible with the driver. Returns 0 883 * if there's exact match, a negative error if the version could not be 884 * read or there's a major/minor version mismatch/minor. 885 */ 886 static int 887 csio_hw_check_fw_version(struct csio_hw *hw) 888 { 889 int ret, major, minor, micro; 890 891 ret = csio_hw_get_fw_version(hw, &hw->fwrev); 892 if (!ret) 893 ret = csio_hw_get_tp_version(hw, &hw->tp_vers); 894 if (ret) 895 return ret; 896 897 major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev); 898 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); 899 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); 900 901 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 902 csio_err(hw, "card FW has major version %u, driver wants %u\n", 903 major, FW_VERSION_MAJOR); 904 return -EINVAL; 905 } 906 907 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 908 return 0; /* perfect match */ 909 910 /* Minor/micro version mismatch */ 911 return -EINVAL; 912 } 913 914 /* 915 * csio_hw_fw_dload - download firmware. 916 * @hw: HW module 917 * @fw_data: firmware image to write. 918 * @size: image size 919 * 920 * Write the supplied firmware image to the card's serial flash. 921 */ 922 static int 923 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 924 { 925 uint32_t csum; 926 int32_t addr; 927 int ret; 928 uint32_t i; 929 uint8_t first_page[SF_PAGE_SIZE]; 930 const __be32 *p = (const __be32 *)fw_data; 931 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 932 uint32_t sf_sec_size; 933 934 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 935 csio_err(hw, "Serial Flash data invalid\n"); 936 return -EINVAL; 937 } 938 939 if (!size) { 940 csio_err(hw, "FW image has no data\n"); 941 return -EINVAL; 942 } 943 944 if (size & 511) { 945 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 946 return -EINVAL; 947 } 948 949 if (ntohs(hdr->len512) * 512 != size) { 950 csio_err(hw, "FW image size differs from size in FW header\n"); 951 return -EINVAL; 952 } 953 954 if (size > FW_MAX_SIZE) { 955 csio_err(hw, "FW image too large, max is %u bytes\n", 956 FW_MAX_SIZE); 957 return -EINVAL; 958 } 959 960 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 961 csum += ntohl(p[i]); 962 963 if (csum != 0xffffffff) { 964 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 965 return -EINVAL; 966 } 967 968 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 969 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 970 971 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 972 FW_START_SEC, FW_START_SEC + i - 1); 973 974 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC, 975 FW_START_SEC + i - 1); 976 if (ret) { 977 csio_err(hw, "Flash Erase failed\n"); 978 goto out; 979 } 980 981 /* 982 * We write the correct version at the end so the driver can see a bad 983 * version if the FW write fails. Start by writing a copy of the 984 * first page with a bad version. 985 */ 986 memcpy(first_page, fw_data, SF_PAGE_SIZE); 987 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 988 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page); 989 if (ret) 990 goto out; 991 992 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 993 FW_IMG_START, FW_IMG_START + size); 994 995 addr = FW_IMG_START; 996 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 997 addr += SF_PAGE_SIZE; 998 fw_data += SF_PAGE_SIZE; 999 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 1000 if (ret) 1001 goto out; 1002 } 1003 1004 ret = csio_hw_write_flash(hw, 1005 FW_IMG_START + 1006 offsetof(struct fw_hdr, fw_ver), 1007 sizeof(hdr->fw_ver), 1008 (const uint8_t *)&hdr->fw_ver); 1009 1010 out: 1011 if (ret) 1012 csio_err(hw, "firmware download failed, error %d\n", ret); 1013 return ret; 1014 } 1015 1016 static int 1017 csio_hw_get_flash_params(struct csio_hw *hw) 1018 { 1019 int ret; 1020 uint32_t info = 0; 1021 1022 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 1023 if (!ret) 1024 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 1025 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 1026 if (ret != 0) 1027 return ret; 1028 1029 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 1030 return -EINVAL; 1031 info >>= 16; /* log2 of size */ 1032 if (info >= 0x14 && info < 0x18) 1033 hw->params.sf_nsec = 1 << (info - 16); 1034 else if (info == 0x18) 1035 hw->params.sf_nsec = 64; 1036 else 1037 return -EINVAL; 1038 hw->params.sf_size = 1 << info; 1039 1040 return 0; 1041 } 1042 1043 static void 1044 csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) 1045 { 1046 uint16_t val; 1047 uint32_t pcie_cap; 1048 1049 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { 1050 pci_read_config_word(hw->pdev, 1051 pcie_cap + PCI_EXP_DEVCTL2, &val); 1052 val &= 0xfff0; 1053 val |= range ; 1054 pci_write_config_word(hw->pdev, 1055 pcie_cap + PCI_EXP_DEVCTL2, val); 1056 } 1057 } 1058 1059 1060 /* 1061 * Return the specified PCI-E Configuration Space register from our Physical 1062 * Function. We try first via a Firmware LDST Command since we prefer to let 1063 * the firmware own all of these registers, but if that fails we go for it 1064 * directly ourselves. 1065 */ 1066 static uint32_t 1067 csio_read_pcie_cfg4(struct csio_hw *hw, int reg) 1068 { 1069 u32 val = 0; 1070 struct csio_mb *mbp; 1071 int rv; 1072 struct fw_ldst_cmd *ldst_cmd; 1073 1074 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1075 if (!mbp) { 1076 CSIO_INC_STATS(hw, n_err_nomem); 1077 pci_read_config_dword(hw->pdev, reg, &val); 1078 return val; 1079 } 1080 1081 csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg); 1082 1083 rv = csio_mb_issue(hw, mbp); 1084 1085 /* 1086 * If the LDST Command suucceeded, exctract the returned register 1087 * value. Otherwise read it directly ourself. 1088 */ 1089 if (rv == 0) { 1090 ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); 1091 val = ntohl(ldst_cmd->u.pcie.data[0]); 1092 } else 1093 pci_read_config_dword(hw->pdev, reg, &val); 1094 1095 mempool_free(mbp, hw->mb_mempool); 1096 1097 return val; 1098 } /* csio_read_pcie_cfg4 */ 1099 1100 static int 1101 csio_hw_set_mem_win(struct csio_hw *hw) 1102 { 1103 u32 bar0; 1104 1105 /* 1106 * Truncation intentional: we only read the bottom 32-bits of the 1107 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to 1108 * read BAR0 instead of using pci_resource_start() because we could be 1109 * operating from within a Virtual Machine which is trapping our 1110 * accesses to our Configuration Space and we need to set up the PCI-E 1111 * Memory Window decoders with the actual addresses which will be 1112 * coming across the PCI-E link. 1113 */ 1114 bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); 1115 bar0 &= PCI_BASE_ADDRESS_MEM_MASK; 1116 1117 /* 1118 * Set up memory window for accessing adapter memory ranges. (Read 1119 * back MA register to ensure that changes propagate before we attempt 1120 * to use the new values.) 1121 */ 1122 csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) | 1123 WINDOW(ilog2(MEMWIN0_APERTURE) - 10), 1124 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0)); 1125 csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) | 1126 WINDOW(ilog2(MEMWIN1_APERTURE) - 10), 1127 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1)); 1128 csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) | 1129 WINDOW(ilog2(MEMWIN2_APERTURE) - 10), 1130 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); 1131 csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); 1132 return 0; 1133 } /* csio_hw_set_mem_win */ 1134 1135 1136 1137 /*****************************************************************************/ 1138 /* HW State machine assists */ 1139 /*****************************************************************************/ 1140 1141 static int 1142 csio_hw_dev_ready(struct csio_hw *hw) 1143 { 1144 uint32_t reg; 1145 int cnt = 6; 1146 1147 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) && 1148 (--cnt != 0)) 1149 mdelay(100); 1150 1151 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) || 1152 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) { 1153 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 1154 return -EIO; 1155 } 1156 1157 hw->pfn = SOURCEPF_GET(reg); 1158 1159 return 0; 1160 } 1161 1162 /* 1163 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 1164 * @hw: HW module 1165 * @state: Device state 1166 * 1167 * FW_HELLO_CMD has to be polled for completion. 1168 */ 1169 static int 1170 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 1171 { 1172 struct csio_mb *mbp; 1173 int rv = 0; 1174 enum csio_dev_master master; 1175 enum fw_retval retval; 1176 uint8_t mpfn; 1177 char state_str[16]; 1178 int retries = FW_CMD_HELLO_RETRIES; 1179 1180 memset(state_str, 0, sizeof(state_str)); 1181 1182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1183 if (!mbp) { 1184 rv = -ENOMEM; 1185 CSIO_INC_STATS(hw, n_err_nomem); 1186 goto out; 1187 } 1188 1189 master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY; 1190 1191 retry: 1192 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 1193 hw->pfn, master, NULL); 1194 1195 rv = csio_mb_issue(hw, mbp); 1196 if (rv) { 1197 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 1198 goto out_free_mb; 1199 } 1200 1201 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 1202 if (retval != FW_SUCCESS) { 1203 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 1204 rv = -EINVAL; 1205 goto out_free_mb; 1206 } 1207 1208 /* Firmware has designated us to be master */ 1209 if (hw->pfn == mpfn) { 1210 hw->flags |= CSIO_HWF_MASTER; 1211 } else if (*state == CSIO_DEV_STATE_UNINIT) { 1212 /* 1213 * If we're not the Master PF then we need to wait around for 1214 * the Master PF Driver to finish setting up the adapter. 1215 * 1216 * Note that we also do this wait if we're a non-Master-capable 1217 * PF and there is no current Master PF; a Master PF may show up 1218 * momentarily and we wouldn't want to fail pointlessly. (This 1219 * can happen when an OS loads lots of different drivers rapidly 1220 * at the same time). In this case, the Master PF returned by 1221 * the firmware will be PCIE_FW_MASTER_MASK so the test below 1222 * will work ... 1223 */ 1224 1225 int waiting = FW_CMD_HELLO_TIMEOUT; 1226 1227 /* 1228 * Wait for the firmware to either indicate an error or 1229 * initialized state. If we see either of these we bail out 1230 * and report the issue to the caller. If we exhaust the 1231 * "hello timeout" and we haven't exhausted our retries, try 1232 * again. Otherwise bail with a timeout error. 1233 */ 1234 for (;;) { 1235 uint32_t pcie_fw; 1236 1237 msleep(50); 1238 waiting -= 50; 1239 1240 /* 1241 * If neither Error nor Initialialized are indicated 1242 * by the firmware keep waiting till we exaust our 1243 * timeout ... and then retry if we haven't exhausted 1244 * our retries ... 1245 */ 1246 pcie_fw = csio_rd_reg32(hw, PCIE_FW); 1247 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { 1248 if (waiting <= 0) { 1249 if (retries-- > 0) 1250 goto retry; 1251 1252 rv = -ETIMEDOUT; 1253 break; 1254 } 1255 continue; 1256 } 1257 1258 /* 1259 * We either have an Error or Initialized condition 1260 * report errors preferentially. 1261 */ 1262 if (state) { 1263 if (pcie_fw & PCIE_FW_ERR) { 1264 *state = CSIO_DEV_STATE_ERR; 1265 rv = -ETIMEDOUT; 1266 } else if (pcie_fw & PCIE_FW_INIT) 1267 *state = CSIO_DEV_STATE_INIT; 1268 } 1269 1270 /* 1271 * If we arrived before a Master PF was selected and 1272 * there's not a valid Master PF, grab its identity 1273 * for our caller. 1274 */ 1275 if (mpfn == PCIE_FW_MASTER_MASK && 1276 (pcie_fw & PCIE_FW_MASTER_VLD)) 1277 mpfn = PCIE_FW_MASTER_GET(pcie_fw); 1278 break; 1279 } 1280 hw->flags &= ~CSIO_HWF_MASTER; 1281 } 1282 1283 switch (*state) { 1284 case CSIO_DEV_STATE_UNINIT: 1285 strcpy(state_str, "Initializing"); 1286 break; 1287 case CSIO_DEV_STATE_INIT: 1288 strcpy(state_str, "Initialized"); 1289 break; 1290 case CSIO_DEV_STATE_ERR: 1291 strcpy(state_str, "Error"); 1292 break; 1293 default: 1294 strcpy(state_str, "Unknown"); 1295 break; 1296 } 1297 1298 if (hw->pfn == mpfn) 1299 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 1300 hw->pfn, state_str); 1301 else 1302 csio_info(hw, 1303 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 1304 hw->pfn, mpfn, state_str); 1305 1306 out_free_mb: 1307 mempool_free(mbp, hw->mb_mempool); 1308 out: 1309 return rv; 1310 } 1311 1312 /* 1313 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 1314 * @hw: HW module 1315 * 1316 */ 1317 static int 1318 csio_do_bye(struct csio_hw *hw) 1319 { 1320 struct csio_mb *mbp; 1321 enum fw_retval retval; 1322 1323 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1324 if (!mbp) { 1325 CSIO_INC_STATS(hw, n_err_nomem); 1326 return -ENOMEM; 1327 } 1328 1329 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1330 1331 if (csio_mb_issue(hw, mbp)) { 1332 csio_err(hw, "Issue of BYE command failed\n"); 1333 mempool_free(mbp, hw->mb_mempool); 1334 return -EINVAL; 1335 } 1336 1337 retval = csio_mb_fw_retval(mbp); 1338 if (retval != FW_SUCCESS) { 1339 mempool_free(mbp, hw->mb_mempool); 1340 return -EINVAL; 1341 } 1342 1343 mempool_free(mbp, hw->mb_mempool); 1344 1345 return 0; 1346 } 1347 1348 /* 1349 * csio_do_reset- Perform the device reset. 1350 * @hw: HW module 1351 * @fw_rst: FW reset 1352 * 1353 * If fw_rst is set, issues FW reset mbox cmd otherwise 1354 * does PIO reset. 1355 * Performs reset of the function. 1356 */ 1357 static int 1358 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1359 { 1360 struct csio_mb *mbp; 1361 enum fw_retval retval; 1362 1363 if (!fw_rst) { 1364 /* PIO reset */ 1365 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1366 mdelay(2000); 1367 return 0; 1368 } 1369 1370 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1371 if (!mbp) { 1372 CSIO_INC_STATS(hw, n_err_nomem); 1373 return -ENOMEM; 1374 } 1375 1376 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1377 PIORSTMODE | PIORST, 0, NULL); 1378 1379 if (csio_mb_issue(hw, mbp)) { 1380 csio_err(hw, "Issue of RESET command failed.n"); 1381 mempool_free(mbp, hw->mb_mempool); 1382 return -EINVAL; 1383 } 1384 1385 retval = csio_mb_fw_retval(mbp); 1386 if (retval != FW_SUCCESS) { 1387 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1388 mempool_free(mbp, hw->mb_mempool); 1389 return -EINVAL; 1390 } 1391 1392 mempool_free(mbp, hw->mb_mempool); 1393 1394 return 0; 1395 } 1396 1397 static int 1398 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1399 { 1400 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1401 uint16_t caps; 1402 1403 caps = ntohs(rsp->fcoecaps); 1404 1405 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1406 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1407 return -EINVAL; 1408 } 1409 1410 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1411 csio_err(hw, "No FCoE Control Offload capability\n"); 1412 return -EINVAL; 1413 } 1414 1415 return 0; 1416 } 1417 1418 /* 1419 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1420 * @hw: the HW module 1421 * @mbox: mailbox to use for the FW RESET command (if desired) 1422 * @force: force uP into RESET even if FW RESET command fails 1423 * 1424 * Issues a RESET command to firmware (if desired) with a HALT indication 1425 * and then puts the microprocessor into RESET state. The RESET command 1426 * will only be issued if a legitimate mailbox is provided (mbox <= 1427 * PCIE_FW_MASTER_MASK). 1428 * 1429 * This is generally used in order for the host to safely manipulate the 1430 * adapter without fear of conflicting with whatever the firmware might 1431 * be doing. The only way out of this state is to RESTART the firmware 1432 * ... 1433 */ 1434 static int 1435 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1436 { 1437 enum fw_retval retval = 0; 1438 1439 /* 1440 * If a legitimate mailbox is provided, issue a RESET command 1441 * with a HALT indication. 1442 */ 1443 if (mbox <= PCIE_FW_MASTER_MASK) { 1444 struct csio_mb *mbp; 1445 1446 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1447 if (!mbp) { 1448 CSIO_INC_STATS(hw, n_err_nomem); 1449 return -ENOMEM; 1450 } 1451 1452 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1453 PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1), 1454 NULL); 1455 1456 if (csio_mb_issue(hw, mbp)) { 1457 csio_err(hw, "Issue of RESET command failed!\n"); 1458 mempool_free(mbp, hw->mb_mempool); 1459 return -EINVAL; 1460 } 1461 1462 retval = csio_mb_fw_retval(mbp); 1463 mempool_free(mbp, hw->mb_mempool); 1464 } 1465 1466 /* 1467 * Normally we won't complete the operation if the firmware RESET 1468 * command fails but if our caller insists we'll go ahead and put the 1469 * uP into RESET. This can be useful if the firmware is hung or even 1470 * missing ... We'll have to take the risk of putting the uP into 1471 * RESET without the cooperation of firmware in that case. 1472 * 1473 * We also force the firmware's HALT flag to be on in case we bypassed 1474 * the firmware RESET command above or we're dealing with old firmware 1475 * which doesn't have the HALT capability. This will serve as a flag 1476 * for the incoming firmware to know that it's coming out of a HALT 1477 * rather than a RESET ... if it's new enough to understand that ... 1478 */ 1479 if (retval == 0 || force) { 1480 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST); 1481 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT); 1482 } 1483 1484 /* 1485 * And we always return the result of the firmware RESET command 1486 * even when we force the uP into RESET ... 1487 */ 1488 return retval ? -EINVAL : 0; 1489 } 1490 1491 /* 1492 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1493 * @hw: the HW module 1494 * @reset: if we want to do a RESET to restart things 1495 * 1496 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1497 * return the previous PF Master remains as the new PF Master and there 1498 * is no need to issue a new HELLO command, etc. 1499 * 1500 * We do this in two ways: 1501 * 1502 * 1. If we're dealing with newer firmware we'll simply want to take 1503 * the chip's microprocessor out of RESET. This will cause the 1504 * firmware to start up from its start vector. And then we'll loop 1505 * until the firmware indicates it's started again (PCIE_FW.HALT 1506 * reset to 0) or we timeout. 1507 * 1508 * 2. If we're dealing with older firmware then we'll need to RESET 1509 * the chip since older firmware won't recognize the PCIE_FW.HALT 1510 * flag and automatically RESET itself on startup. 1511 */ 1512 static int 1513 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1514 { 1515 if (reset) { 1516 /* 1517 * Since we're directing the RESET instead of the firmware 1518 * doing it automatically, we need to clear the PCIE_FW.HALT 1519 * bit. 1520 */ 1521 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0); 1522 1523 /* 1524 * If we've been given a valid mailbox, first try to get the 1525 * firmware to do the RESET. If that works, great and we can 1526 * return success. Otherwise, if we haven't been given a 1527 * valid mailbox or the RESET command failed, fall back to 1528 * hitting the chip with a hammer. 1529 */ 1530 if (mbox <= PCIE_FW_MASTER_MASK) { 1531 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1532 msleep(100); 1533 if (csio_do_reset(hw, true) == 0) 1534 return 0; 1535 } 1536 1537 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1538 msleep(2000); 1539 } else { 1540 int ms; 1541 1542 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1543 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1544 if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT)) 1545 return 0; 1546 msleep(100); 1547 ms += 100; 1548 } 1549 return -ETIMEDOUT; 1550 } 1551 return 0; 1552 } 1553 1554 /* 1555 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1556 * @hw: the HW module 1557 * @mbox: mailbox to use for the FW RESET command (if desired) 1558 * @fw_data: the firmware image to write 1559 * @size: image size 1560 * @force: force upgrade even if firmware doesn't cooperate 1561 * 1562 * Perform all of the steps necessary for upgrading an adapter's 1563 * firmware image. Normally this requires the cooperation of the 1564 * existing firmware in order to halt all existing activities 1565 * but if an invalid mailbox token is passed in we skip that step 1566 * (though we'll still put the adapter microprocessor into RESET in 1567 * that case). 1568 * 1569 * On successful return the new firmware will have been loaded and 1570 * the adapter will have been fully RESET losing all previous setup 1571 * state. On unsuccessful return the adapter may be completely hosed ... 1572 * positive errno indicates that the adapter is ~probably~ intact, a 1573 * negative errno indicates that things are looking bad ... 1574 */ 1575 static int 1576 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1577 const u8 *fw_data, uint32_t size, int32_t force) 1578 { 1579 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1580 int reset, ret; 1581 1582 ret = csio_hw_fw_halt(hw, mbox, force); 1583 if (ret != 0 && !force) 1584 return ret; 1585 1586 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1587 if (ret != 0) 1588 return ret; 1589 1590 /* 1591 * Older versions of the firmware don't understand the new 1592 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1593 * restart. So for newly loaded older firmware we'll have to do the 1594 * RESET for it so it starts up on a clean slate. We can tell if 1595 * the newly loaded firmware will handle this right by checking 1596 * its header flags to see if it advertises the capability. 1597 */ 1598 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1599 return csio_hw_fw_restart(hw, mbox, reset); 1600 } 1601 1602 1603 /* 1604 * csio_hw_fw_config_file - setup an adapter via a Configuration File 1605 * @hw: the HW module 1606 * @mbox: mailbox to use for the FW command 1607 * @mtype: the memory type where the Configuration File is located 1608 * @maddr: the memory address where the Configuration File is located 1609 * @finiver: return value for CF [fini] version 1610 * @finicsum: return value for CF [fini] checksum 1611 * @cfcsum: return value for CF computed checksum 1612 * 1613 * Issue a command to get the firmware to process the Configuration 1614 * File located at the specified mtype/maddress. If the Configuration 1615 * File is processed successfully and return value pointers are 1616 * provided, the Configuration File "[fini] section version and 1617 * checksum values will be returned along with the computed checksum. 1618 * It's up to the caller to decide how it wants to respond to the 1619 * checksums not matching but it recommended that a prominant warning 1620 * be emitted in order to help people rapidly identify changed or 1621 * corrupted Configuration Files. 1622 * 1623 * Also note that it's possible to modify things like "niccaps", 1624 * "toecaps",etc. between processing the Configuration File and telling 1625 * the firmware to use the new configuration. Callers which want to 1626 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for 1627 * Configuration Files if they want to do this. 1628 */ 1629 static int 1630 csio_hw_fw_config_file(struct csio_hw *hw, 1631 unsigned int mtype, unsigned int maddr, 1632 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum) 1633 { 1634 struct csio_mb *mbp; 1635 struct fw_caps_config_cmd *caps_cmd; 1636 int rv = -EINVAL; 1637 enum fw_retval ret; 1638 1639 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1640 if (!mbp) { 1641 CSIO_INC_STATS(hw, n_err_nomem); 1642 return -ENOMEM; 1643 } 1644 /* 1645 * Tell the firmware to process the indicated Configuration File. 1646 * If there are no errors and the caller has provided return value 1647 * pointers for the [fini] section version, checksum and computed 1648 * checksum, pass those back to the caller. 1649 */ 1650 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1651 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1652 caps_cmd->op_to_write = 1653 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1654 FW_CMD_REQUEST | 1655 FW_CMD_READ); 1656 caps_cmd->cfvalid_to_len16 = 1657 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 1658 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1659 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 1660 FW_LEN16(*caps_cmd)); 1661 1662 if (csio_mb_issue(hw, mbp)) { 1663 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1664 goto out; 1665 } 1666 1667 ret = csio_mb_fw_retval(mbp); 1668 if (ret != FW_SUCCESS) { 1669 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1670 goto out; 1671 } 1672 1673 if (finiver) 1674 *finiver = ntohl(caps_cmd->finiver); 1675 if (finicsum) 1676 *finicsum = ntohl(caps_cmd->finicsum); 1677 if (cfcsum) 1678 *cfcsum = ntohl(caps_cmd->cfcsum); 1679 1680 /* Validate device capabilities */ 1681 if (csio_hw_validate_caps(hw, mbp)) { 1682 rv = -ENOENT; 1683 goto out; 1684 } 1685 1686 /* 1687 * And now tell the firmware to use the configuration we just loaded. 1688 */ 1689 caps_cmd->op_to_write = 1690 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1691 FW_CMD_REQUEST | 1692 FW_CMD_WRITE); 1693 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1694 1695 if (csio_mb_issue(hw, mbp)) { 1696 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1697 goto out; 1698 } 1699 1700 ret = csio_mb_fw_retval(mbp); 1701 if (ret != FW_SUCCESS) { 1702 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1703 goto out; 1704 } 1705 1706 rv = 0; 1707 out: 1708 mempool_free(mbp, hw->mb_mempool); 1709 return rv; 1710 } 1711 1712 /* 1713 * csio_get_device_params - Get device parameters. 1714 * @hw: HW module 1715 * 1716 */ 1717 static int 1718 csio_get_device_params(struct csio_hw *hw) 1719 { 1720 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1721 struct csio_mb *mbp; 1722 enum fw_retval retval; 1723 u32 param[6]; 1724 int i, j = 0; 1725 1726 /* Initialize portids to -1 */ 1727 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1728 hw->pport[i].portid = -1; 1729 1730 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1731 if (!mbp) { 1732 CSIO_INC_STATS(hw, n_err_nomem); 1733 return -ENOMEM; 1734 } 1735 1736 /* Get port vec information. */ 1737 param[0] = FW_PARAM_DEV(PORTVEC); 1738 1739 /* Get Core clock. */ 1740 param[1] = FW_PARAM_DEV(CCLK); 1741 1742 /* Get EQ id start and end. */ 1743 param[2] = FW_PARAM_PFVF(EQ_START); 1744 param[3] = FW_PARAM_PFVF(EQ_END); 1745 1746 /* Get IQ id start and end. */ 1747 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1748 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1749 1750 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1751 ARRAY_SIZE(param), param, NULL, false, NULL); 1752 if (csio_mb_issue(hw, mbp)) { 1753 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1754 mempool_free(mbp, hw->mb_mempool); 1755 return -EINVAL; 1756 } 1757 1758 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1759 ARRAY_SIZE(param), param); 1760 if (retval != FW_SUCCESS) { 1761 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1762 retval); 1763 mempool_free(mbp, hw->mb_mempool); 1764 return -EINVAL; 1765 } 1766 1767 /* cache the information. */ 1768 hw->port_vec = param[0]; 1769 hw->vpd.cclk = param[1]; 1770 wrm->fw_eq_start = param[2]; 1771 wrm->fw_iq_start = param[4]; 1772 1773 /* Using FW configured max iqs & eqs */ 1774 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1775 !csio_is_hw_master(hw)) { 1776 hw->cfg_niq = param[5] - param[4] + 1; 1777 hw->cfg_neq = param[3] - param[2] + 1; 1778 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1779 hw->cfg_niq, hw->cfg_neq); 1780 } 1781 1782 hw->port_vec &= csio_port_mask; 1783 1784 hw->num_pports = hweight32(hw->port_vec); 1785 1786 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1787 hw->port_vec, hw->num_pports); 1788 1789 for (i = 0; i < hw->num_pports; i++) { 1790 while ((hw->port_vec & (1 << j)) == 0) 1791 j++; 1792 hw->pport[i].portid = j++; 1793 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1794 } 1795 mempool_free(mbp, hw->mb_mempool); 1796 1797 return 0; 1798 } 1799 1800 1801 /* 1802 * csio_config_device_caps - Get and set device capabilities. 1803 * @hw: HW module 1804 * 1805 */ 1806 static int 1807 csio_config_device_caps(struct csio_hw *hw) 1808 { 1809 struct csio_mb *mbp; 1810 enum fw_retval retval; 1811 int rv = -EINVAL; 1812 1813 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1814 if (!mbp) { 1815 CSIO_INC_STATS(hw, n_err_nomem); 1816 return -ENOMEM; 1817 } 1818 1819 /* Get device capabilities */ 1820 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1821 1822 if (csio_mb_issue(hw, mbp)) { 1823 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1824 goto out; 1825 } 1826 1827 retval = csio_mb_fw_retval(mbp); 1828 if (retval != FW_SUCCESS) { 1829 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1830 goto out; 1831 } 1832 1833 /* Validate device capabilities */ 1834 if (csio_hw_validate_caps(hw, mbp)) 1835 goto out; 1836 1837 /* Don't config device capabilities if already configured */ 1838 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1839 rv = 0; 1840 goto out; 1841 } 1842 1843 /* Write back desired device capabilities */ 1844 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1845 false, true, NULL); 1846 1847 if (csio_mb_issue(hw, mbp)) { 1848 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1849 goto out; 1850 } 1851 1852 retval = csio_mb_fw_retval(mbp); 1853 if (retval != FW_SUCCESS) { 1854 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1855 goto out; 1856 } 1857 1858 rv = 0; 1859 out: 1860 mempool_free(mbp, hw->mb_mempool); 1861 return rv; 1862 } 1863 1864 static int 1865 csio_config_global_rss(struct csio_hw *hw) 1866 { 1867 struct csio_mb *mbp; 1868 enum fw_retval retval; 1869 1870 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1871 if (!mbp) { 1872 CSIO_INC_STATS(hw, n_err_nomem); 1873 return -ENOMEM; 1874 } 1875 1876 csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 1877 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 1878 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 1879 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 1880 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP, 1881 NULL); 1882 1883 if (csio_mb_issue(hw, mbp)) { 1884 csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n"); 1885 mempool_free(mbp, hw->mb_mempool); 1886 return -EINVAL; 1887 } 1888 1889 retval = csio_mb_fw_retval(mbp); 1890 if (retval != FW_SUCCESS) { 1891 csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval); 1892 mempool_free(mbp, hw->mb_mempool); 1893 return -EINVAL; 1894 } 1895 1896 mempool_free(mbp, hw->mb_mempool); 1897 1898 return 0; 1899 } 1900 1901 /* 1902 * csio_config_pfvf - Configure Physical/Virtual functions settings. 1903 * @hw: HW module 1904 * 1905 */ 1906 static int 1907 csio_config_pfvf(struct csio_hw *hw) 1908 { 1909 struct csio_mb *mbp; 1910 enum fw_retval retval; 1911 1912 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1913 if (!mbp) { 1914 CSIO_INC_STATS(hw, n_err_nomem); 1915 return -ENOMEM; 1916 } 1917 1918 /* 1919 * For now, allow all PFs to access to all ports using a pmask 1920 * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will 1921 * need to provide access based on some rule. 1922 */ 1923 csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ, 1924 CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK, 1925 CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL); 1926 1927 if (csio_mb_issue(hw, mbp)) { 1928 csio_err(hw, "Issue of FW_PFVF_CMD failed!\n"); 1929 mempool_free(mbp, hw->mb_mempool); 1930 return -EINVAL; 1931 } 1932 1933 retval = csio_mb_fw_retval(mbp); 1934 if (retval != FW_SUCCESS) { 1935 csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval); 1936 mempool_free(mbp, hw->mb_mempool); 1937 return -EINVAL; 1938 } 1939 1940 mempool_free(mbp, hw->mb_mempool); 1941 1942 return 0; 1943 } 1944 1945 /* 1946 * csio_enable_ports - Bring up all available ports. 1947 * @hw: HW module. 1948 * 1949 */ 1950 static int 1951 csio_enable_ports(struct csio_hw *hw) 1952 { 1953 struct csio_mb *mbp; 1954 enum fw_retval retval; 1955 uint8_t portid; 1956 int i; 1957 1958 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1959 if (!mbp) { 1960 CSIO_INC_STATS(hw, n_err_nomem); 1961 return -ENOMEM; 1962 } 1963 1964 for (i = 0; i < hw->num_pports; i++) { 1965 portid = hw->pport[i].portid; 1966 1967 /* Read PORT information */ 1968 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1969 false, 0, 0, NULL); 1970 1971 if (csio_mb_issue(hw, mbp)) { 1972 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1973 portid); 1974 mempool_free(mbp, hw->mb_mempool); 1975 return -EINVAL; 1976 } 1977 1978 csio_mb_process_read_port_rsp(hw, mbp, &retval, 1979 &hw->pport[i].pcap); 1980 if (retval != FW_SUCCESS) { 1981 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1982 portid, retval); 1983 mempool_free(mbp, hw->mb_mempool); 1984 return -EINVAL; 1985 } 1986 1987 /* Write back PORT information */ 1988 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, 1989 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); 1990 1991 if (csio_mb_issue(hw, mbp)) { 1992 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1993 portid); 1994 mempool_free(mbp, hw->mb_mempool); 1995 return -EINVAL; 1996 } 1997 1998 retval = csio_mb_fw_retval(mbp); 1999 if (retval != FW_SUCCESS) { 2000 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 2001 portid, retval); 2002 mempool_free(mbp, hw->mb_mempool); 2003 return -EINVAL; 2004 } 2005 2006 } /* For all ports */ 2007 2008 mempool_free(mbp, hw->mb_mempool); 2009 2010 return 0; 2011 } 2012 2013 /* 2014 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 2015 * @hw: HW module 2016 * Issued with lock held. 2017 */ 2018 static int 2019 csio_get_fcoe_resinfo(struct csio_hw *hw) 2020 { 2021 struct csio_fcoe_res_info *res_info = &hw->fres_info; 2022 struct fw_fcoe_res_info_cmd *rsp; 2023 struct csio_mb *mbp; 2024 enum fw_retval retval; 2025 2026 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2027 if (!mbp) { 2028 CSIO_INC_STATS(hw, n_err_nomem); 2029 return -ENOMEM; 2030 } 2031 2032 /* Get FCoE FW resource information */ 2033 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2034 2035 if (csio_mb_issue(hw, mbp)) { 2036 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 2037 mempool_free(mbp, hw->mb_mempool); 2038 return -EINVAL; 2039 } 2040 2041 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 2042 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 2043 if (retval != FW_SUCCESS) { 2044 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 2045 retval); 2046 mempool_free(mbp, hw->mb_mempool); 2047 return -EINVAL; 2048 } 2049 2050 res_info->e_d_tov = ntohs(rsp->e_d_tov); 2051 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 2052 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 2053 res_info->r_r_tov = ntohs(rsp->r_r_tov); 2054 res_info->max_xchgs = ntohl(rsp->max_xchgs); 2055 res_info->max_ssns = ntohl(rsp->max_ssns); 2056 res_info->used_xchgs = ntohl(rsp->used_xchgs); 2057 res_info->used_ssns = ntohl(rsp->used_ssns); 2058 res_info->max_fcfs = ntohl(rsp->max_fcfs); 2059 res_info->max_vnps = ntohl(rsp->max_vnps); 2060 res_info->used_fcfs = ntohl(rsp->used_fcfs); 2061 res_info->used_vnps = ntohl(rsp->used_vnps); 2062 2063 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 2064 res_info->max_xchgs); 2065 mempool_free(mbp, hw->mb_mempool); 2066 2067 return 0; 2068 } 2069 2070 static int 2071 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 2072 { 2073 struct csio_mb *mbp; 2074 enum fw_retval retval; 2075 u32 _param[1]; 2076 2077 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2078 if (!mbp) { 2079 CSIO_INC_STATS(hw, n_err_nomem); 2080 return -ENOMEM; 2081 } 2082 2083 /* 2084 * Find out whether we're dealing with a version of 2085 * the firmware which has configuration file support. 2086 */ 2087 _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2088 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 2089 2090 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 2091 ARRAY_SIZE(_param), _param, NULL, false, NULL); 2092 if (csio_mb_issue(hw, mbp)) { 2093 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 2094 mempool_free(mbp, hw->mb_mempool); 2095 return -EINVAL; 2096 } 2097 2098 csio_mb_process_read_params_rsp(hw, mbp, &retval, 2099 ARRAY_SIZE(_param), _param); 2100 if (retval != FW_SUCCESS) { 2101 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 2102 retval); 2103 mempool_free(mbp, hw->mb_mempool); 2104 return -EINVAL; 2105 } 2106 2107 mempool_free(mbp, hw->mb_mempool); 2108 *param = _param[0]; 2109 2110 return 0; 2111 } 2112 2113 static int 2114 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 2115 { 2116 int ret = 0; 2117 const struct firmware *cf; 2118 struct pci_dev *pci_dev = hw->pdev; 2119 struct device *dev = &pci_dev->dev; 2120 unsigned int mtype = 0, maddr = 0; 2121 uint32_t *cfg_data; 2122 int value_to_add = 0; 2123 2124 if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) { 2125 csio_err(hw, "could not find config file " CSIO_CF_FNAME 2126 ",err: %d\n", ret); 2127 return -ENOENT; 2128 } 2129 2130 if (cf->size%4 != 0) 2131 value_to_add = 4 - (cf->size % 4); 2132 2133 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 2134 if (cfg_data == NULL) 2135 return -ENOMEM; 2136 2137 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 2138 2139 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) 2140 return -EINVAL; 2141 2142 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 2143 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 2144 2145 ret = csio_memory_write(hw, mtype, maddr, 2146 cf->size + value_to_add, cfg_data); 2147 if (ret == 0) { 2148 csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n"); 2149 strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); 2150 } 2151 2152 kfree(cfg_data); 2153 release_firmware(cf); 2154 2155 return ret; 2156 } 2157 2158 /* 2159 * HW initialization: contact FW, obtain config, perform basic init. 2160 * 2161 * If the firmware we're dealing with has Configuration File support, then 2162 * we use that to perform all configuration -- either using the configuration 2163 * file stored in flash on the adapter or using a filesystem-local file 2164 * if available. 2165 * 2166 * If we don't have configuration file support in the firmware, then we'll 2167 * have to set things up the old fashioned way with hard-coded register 2168 * writes and firmware commands ... 2169 */ 2170 2171 /* 2172 * Attempt to initialize the HW via a Firmware Configuration File. 2173 */ 2174 static int 2175 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 2176 { 2177 unsigned int mtype, maddr; 2178 int rv; 2179 uint32_t finiver, finicsum, cfcsum; 2180 int using_flash; 2181 char path[64]; 2182 2183 /* 2184 * Reset device if necessary 2185 */ 2186 if (reset) { 2187 rv = csio_do_reset(hw, true); 2188 if (rv != 0) 2189 goto bye; 2190 } 2191 2192 /* 2193 * If we have a configuration file in host , 2194 * then use that. Otherwise, use the configuration file stored 2195 * in the HW flash ... 2196 */ 2197 spin_unlock_irq(&hw->lock); 2198 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 2199 spin_lock_irq(&hw->lock); 2200 if (rv != 0) { 2201 if (rv == -ENOENT) { 2202 /* 2203 * config file was not found. Use default 2204 * config file from flash. 2205 */ 2206 mtype = FW_MEMTYPE_CF_FLASH; 2207 maddr = csio_hw_flash_cfg_addr(hw); 2208 using_flash = 1; 2209 } else { 2210 /* 2211 * we revert back to the hardwired config if 2212 * flashing failed. 2213 */ 2214 goto bye; 2215 } 2216 } else { 2217 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 2218 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 2219 using_flash = 0; 2220 } 2221 2222 hw->cfg_store = (uint8_t)mtype; 2223 2224 /* 2225 * Issue a Capability Configuration command to the firmware to get it 2226 * to parse the Configuration File. 2227 */ 2228 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver, 2229 &finicsum, &cfcsum); 2230 if (rv != 0) 2231 goto bye; 2232 2233 hw->cfg_finiver = finiver; 2234 hw->cfg_finicsum = finicsum; 2235 hw->cfg_cfcsum = cfcsum; 2236 hw->cfg_csum_status = true; 2237 2238 if (finicsum != cfcsum) { 2239 csio_warn(hw, 2240 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 2241 finicsum, cfcsum); 2242 2243 hw->cfg_csum_status = false; 2244 } 2245 2246 /* 2247 * Note that we're operating with parameters 2248 * not supplied by the driver, rather than from hard-wired 2249 * initialization constants buried in the driver. 2250 */ 2251 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2252 2253 /* device parameters */ 2254 rv = csio_get_device_params(hw); 2255 if (rv != 0) 2256 goto bye; 2257 2258 /* Configure SGE */ 2259 csio_wr_sge_init(hw); 2260 2261 /* 2262 * And finally tell the firmware to initialize itself using the 2263 * parameters from the Configuration File. 2264 */ 2265 /* Post event to notify completion of configuration */ 2266 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2267 2268 csio_info(hw, 2269 "Firmware Configuration File %s, version %#x, computed checksum %#x\n", 2270 (using_flash ? "in device FLASH" : path), finiver, cfcsum); 2271 2272 return 0; 2273 2274 /* 2275 * Something bad happened. Return the error ... 2276 */ 2277 bye: 2278 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 2279 csio_dbg(hw, "Configuration file error %d\n", rv); 2280 return rv; 2281 } 2282 2283 /* 2284 * Attempt to initialize the adapter via hard-coded, driver supplied 2285 * parameters ... 2286 */ 2287 static int 2288 csio_hw_no_fwconfig(struct csio_hw *hw, int reset) 2289 { 2290 int rv; 2291 /* 2292 * Reset device if necessary 2293 */ 2294 if (reset) { 2295 rv = csio_do_reset(hw, true); 2296 if (rv != 0) 2297 goto out; 2298 } 2299 2300 /* Get and set device capabilities */ 2301 rv = csio_config_device_caps(hw); 2302 if (rv != 0) 2303 goto out; 2304 2305 /* Config Global RSS command */ 2306 rv = csio_config_global_rss(hw); 2307 if (rv != 0) 2308 goto out; 2309 2310 /* Configure PF/VF capabilities of device */ 2311 rv = csio_config_pfvf(hw); 2312 if (rv != 0) 2313 goto out; 2314 2315 /* device parameters */ 2316 rv = csio_get_device_params(hw); 2317 if (rv != 0) 2318 goto out; 2319 2320 /* Configure SGE */ 2321 csio_wr_sge_init(hw); 2322 2323 /* Post event to notify completion of configuration */ 2324 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2325 2326 out: 2327 return rv; 2328 } 2329 2330 /* 2331 * Returns -EINVAL if attempts to flash the firmware failed 2332 * else returns 0, 2333 * if flashing was not attempted because the card had the 2334 * latest firmware ECANCELED is returned 2335 */ 2336 static int 2337 csio_hw_flash_fw(struct csio_hw *hw) 2338 { 2339 int ret = -ECANCELED; 2340 const struct firmware *fw; 2341 const struct fw_hdr *hdr; 2342 u32 fw_ver; 2343 struct pci_dev *pci_dev = hw->pdev; 2344 struct device *dev = &pci_dev->dev ; 2345 2346 if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) { 2347 csio_err(hw, "could not find firmware image " CSIO_FW_FNAME 2348 ",err: %d\n", ret); 2349 return -EINVAL; 2350 } 2351 2352 hdr = (const struct fw_hdr *)fw->data; 2353 fw_ver = ntohl(hdr->fw_ver); 2354 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR) 2355 return -EINVAL; /* wrong major version, won't do */ 2356 2357 /* 2358 * If the flash FW is unusable or we found something newer, load it. 2359 */ 2360 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR || 2361 fw_ver > hw->fwrev) { 2362 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2363 /*force=*/false); 2364 if (!ret) 2365 csio_info(hw, "firmware upgraded to version %pI4 from " 2366 CSIO_FW_FNAME "\n", &hdr->fw_ver); 2367 else 2368 csio_err(hw, "firmware upgrade failed! err=%d\n", ret); 2369 } 2370 2371 release_firmware(fw); 2372 2373 return ret; 2374 } 2375 2376 2377 /* 2378 * csio_hw_configure - Configure HW 2379 * @hw - HW module 2380 * 2381 */ 2382 static void 2383 csio_hw_configure(struct csio_hw *hw) 2384 { 2385 int reset = 1; 2386 int rv; 2387 u32 param[1]; 2388 2389 rv = csio_hw_dev_ready(hw); 2390 if (rv != 0) { 2391 CSIO_INC_STATS(hw, n_err_fatal); 2392 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2393 goto out; 2394 } 2395 2396 /* HW version */ 2397 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV); 2398 2399 /* Needed for FW download */ 2400 rv = csio_hw_get_flash_params(hw); 2401 if (rv != 0) { 2402 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2403 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2404 goto out; 2405 } 2406 2407 /* Set pci completion timeout value to 4 seconds. */ 2408 csio_set_pcie_completion_timeout(hw, 0xd); 2409 2410 csio_hw_set_mem_win(hw); 2411 2412 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2413 if (rv != 0) 2414 goto out; 2415 2416 csio_hw_print_fw_version(hw, "Firmware revision"); 2417 2418 rv = csio_do_hello(hw, &hw->fw_state); 2419 if (rv != 0) { 2420 CSIO_INC_STATS(hw, n_err_fatal); 2421 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2422 goto out; 2423 } 2424 2425 /* Read vpd */ 2426 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2427 if (rv != 0) 2428 goto out; 2429 2430 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2431 rv = csio_hw_check_fw_version(hw); 2432 if (rv == -EINVAL) { 2433 2434 /* Do firmware update */ 2435 spin_unlock_irq(&hw->lock); 2436 rv = csio_hw_flash_fw(hw); 2437 spin_lock_irq(&hw->lock); 2438 2439 if (rv == 0) { 2440 reset = 0; 2441 /* 2442 * Note that the chip was reset as part of the 2443 * firmware upgrade so we don't reset it again 2444 * below and grab the new firmware version. 2445 */ 2446 rv = csio_hw_check_fw_version(hw); 2447 } 2448 } 2449 /* 2450 * If the firmware doesn't support Configuration 2451 * Files, use the old Driver-based, hard-wired 2452 * initialization. Otherwise, try using the 2453 * Configuration File support and fall back to the 2454 * Driver-based initialization if there's no 2455 * Configuration File found. 2456 */ 2457 if (csio_hw_check_fwconfig(hw, param) == 0) { 2458 rv = csio_hw_use_fwconfig(hw, reset, param); 2459 if (rv == -ENOENT) 2460 goto out; 2461 if (rv != 0) { 2462 csio_info(hw, 2463 "No Configuration File present " 2464 "on adapter. Using hard-wired " 2465 "configuration parameters.\n"); 2466 rv = csio_hw_no_fwconfig(hw, reset); 2467 } 2468 } else { 2469 rv = csio_hw_no_fwconfig(hw, reset); 2470 } 2471 2472 if (rv != 0) 2473 goto out; 2474 2475 } else { 2476 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2477 2478 /* device parameters */ 2479 rv = csio_get_device_params(hw); 2480 if (rv != 0) 2481 goto out; 2482 2483 /* Get device capabilities */ 2484 rv = csio_config_device_caps(hw); 2485 if (rv != 0) 2486 goto out; 2487 2488 /* Configure SGE */ 2489 csio_wr_sge_init(hw); 2490 2491 /* Post event to notify completion of configuration */ 2492 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2493 goto out; 2494 } 2495 } /* if not master */ 2496 2497 out: 2498 return; 2499 } 2500 2501 /* 2502 * csio_hw_initialize - Initialize HW 2503 * @hw - HW module 2504 * 2505 */ 2506 static void 2507 csio_hw_initialize(struct csio_hw *hw) 2508 { 2509 struct csio_mb *mbp; 2510 enum fw_retval retval; 2511 int rv; 2512 int i; 2513 2514 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2515 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2516 if (!mbp) 2517 goto out; 2518 2519 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2520 2521 if (csio_mb_issue(hw, mbp)) { 2522 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2523 goto free_and_out; 2524 } 2525 2526 retval = csio_mb_fw_retval(mbp); 2527 if (retval != FW_SUCCESS) { 2528 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2529 retval); 2530 goto free_and_out; 2531 } 2532 2533 mempool_free(mbp, hw->mb_mempool); 2534 } 2535 2536 rv = csio_get_fcoe_resinfo(hw); 2537 if (rv != 0) { 2538 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2539 goto out; 2540 } 2541 2542 spin_unlock_irq(&hw->lock); 2543 rv = csio_config_queues(hw); 2544 spin_lock_irq(&hw->lock); 2545 2546 if (rv != 0) { 2547 csio_err(hw, "Config of queues failed!: %d\n", rv); 2548 goto out; 2549 } 2550 2551 for (i = 0; i < hw->num_pports; i++) 2552 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2553 2554 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2555 rv = csio_enable_ports(hw); 2556 if (rv != 0) { 2557 csio_err(hw, "Failed to enable ports: %d\n", rv); 2558 goto out; 2559 } 2560 } 2561 2562 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2563 return; 2564 2565 free_and_out: 2566 mempool_free(mbp, hw->mb_mempool); 2567 out: 2568 return; 2569 } 2570 2571 #define PF_INTR_MASK (PFSW | PFCIM) 2572 2573 /* 2574 * csio_hw_intr_enable - Enable HW interrupts 2575 * @hw: Pointer to HW module. 2576 * 2577 * Enable interrupts in HW registers. 2578 */ 2579 static void 2580 csio_hw_intr_enable(struct csio_hw *hw) 2581 { 2582 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2583 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2584 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE); 2585 2586 /* 2587 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2588 * by FW, so do nothing for INTX. 2589 */ 2590 if (hw->intr_mode == CSIO_IM_MSIX) 2591 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2592 AIVEC(AIVEC_MASK), vec); 2593 else if (hw->intr_mode == CSIO_IM_MSI) 2594 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2595 AIVEC(AIVEC_MASK), 0); 2596 2597 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE)); 2598 2599 /* Turn on MB interrupts - this will internally flush PIO as well */ 2600 csio_mb_intr_enable(hw); 2601 2602 /* These are common registers - only a master can modify them */ 2603 if (csio_is_hw_master(hw)) { 2604 /* 2605 * Disable the Serial FLASH interrupt, if enabled! 2606 */ 2607 pl &= (~SF); 2608 csio_wr_reg32(hw, pl, PL_INT_ENABLE); 2609 2610 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | 2611 EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | 2612 ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | 2613 ERR_DATA_CPL_ON_HIGH_QID1 | 2614 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2615 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2616 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2617 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, 2618 SGE_INT_ENABLE3); 2619 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); 2620 } 2621 2622 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2623 2624 } 2625 2626 /* 2627 * csio_hw_intr_disable - Disable HW interrupts 2628 * @hw: Pointer to HW module. 2629 * 2630 * Turn off Mailbox and PCI_PF_CFG interrupts. 2631 */ 2632 void 2633 csio_hw_intr_disable(struct csio_hw *hw) 2634 { 2635 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2636 2637 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2638 return; 2639 2640 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2641 2642 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE)); 2643 if (csio_is_hw_master(hw)) 2644 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0); 2645 2646 /* Turn off MB interrupts */ 2647 csio_mb_intr_disable(hw); 2648 2649 } 2650 2651 static void 2652 csio_hw_fatal_err(struct csio_hw *hw) 2653 { 2654 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); 2655 csio_hw_intr_disable(hw); 2656 2657 /* Do not reset HW, we may need FW state for debugging */ 2658 csio_fatal(hw, "HW Fatal error encountered!\n"); 2659 } 2660 2661 /*****************************************************************************/ 2662 /* START: HW SM */ 2663 /*****************************************************************************/ 2664 /* 2665 * csio_hws_uninit - Uninit state 2666 * @hw - HW module 2667 * @evt - Event 2668 * 2669 */ 2670 static void 2671 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2672 { 2673 hw->prev_evt = hw->cur_evt; 2674 hw->cur_evt = evt; 2675 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2676 2677 switch (evt) { 2678 case CSIO_HWE_CFG: 2679 csio_set_state(&hw->sm, csio_hws_configuring); 2680 csio_hw_configure(hw); 2681 break; 2682 2683 default: 2684 CSIO_INC_STATS(hw, n_evt_unexp); 2685 break; 2686 } 2687 } 2688 2689 /* 2690 * csio_hws_configuring - Configuring state 2691 * @hw - HW module 2692 * @evt - Event 2693 * 2694 */ 2695 static void 2696 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2697 { 2698 hw->prev_evt = hw->cur_evt; 2699 hw->cur_evt = evt; 2700 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2701 2702 switch (evt) { 2703 case CSIO_HWE_INIT: 2704 csio_set_state(&hw->sm, csio_hws_initializing); 2705 csio_hw_initialize(hw); 2706 break; 2707 2708 case CSIO_HWE_INIT_DONE: 2709 csio_set_state(&hw->sm, csio_hws_ready); 2710 /* Fan out event to all lnode SMs */ 2711 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2712 break; 2713 2714 case CSIO_HWE_FATAL: 2715 csio_set_state(&hw->sm, csio_hws_uninit); 2716 break; 2717 2718 case CSIO_HWE_PCI_REMOVE: 2719 csio_do_bye(hw); 2720 break; 2721 default: 2722 CSIO_INC_STATS(hw, n_evt_unexp); 2723 break; 2724 } 2725 } 2726 2727 /* 2728 * csio_hws_initializing - Initialiazing state 2729 * @hw - HW module 2730 * @evt - Event 2731 * 2732 */ 2733 static void 2734 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2735 { 2736 hw->prev_evt = hw->cur_evt; 2737 hw->cur_evt = evt; 2738 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2739 2740 switch (evt) { 2741 case CSIO_HWE_INIT_DONE: 2742 csio_set_state(&hw->sm, csio_hws_ready); 2743 2744 /* Fan out event to all lnode SMs */ 2745 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2746 2747 /* Enable interrupts */ 2748 csio_hw_intr_enable(hw); 2749 break; 2750 2751 case CSIO_HWE_FATAL: 2752 csio_set_state(&hw->sm, csio_hws_uninit); 2753 break; 2754 2755 case CSIO_HWE_PCI_REMOVE: 2756 csio_do_bye(hw); 2757 break; 2758 2759 default: 2760 CSIO_INC_STATS(hw, n_evt_unexp); 2761 break; 2762 } 2763 } 2764 2765 /* 2766 * csio_hws_ready - Ready state 2767 * @hw - HW module 2768 * @evt - Event 2769 * 2770 */ 2771 static void 2772 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2773 { 2774 /* Remember the event */ 2775 hw->evtflag = evt; 2776 2777 hw->prev_evt = hw->cur_evt; 2778 hw->cur_evt = evt; 2779 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2780 2781 switch (evt) { 2782 case CSIO_HWE_HBA_RESET: 2783 case CSIO_HWE_FW_DLOAD: 2784 case CSIO_HWE_SUSPEND: 2785 case CSIO_HWE_PCI_REMOVE: 2786 case CSIO_HWE_PCIERR_DETECTED: 2787 csio_set_state(&hw->sm, csio_hws_quiescing); 2788 /* cleanup all outstanding cmds */ 2789 if (evt == CSIO_HWE_HBA_RESET || 2790 evt == CSIO_HWE_PCIERR_DETECTED) 2791 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2792 else 2793 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2794 2795 csio_hw_intr_disable(hw); 2796 csio_hw_mbm_cleanup(hw); 2797 csio_evtq_stop(hw); 2798 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2799 csio_evtq_flush(hw); 2800 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2801 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2802 break; 2803 2804 case CSIO_HWE_FATAL: 2805 csio_set_state(&hw->sm, csio_hws_uninit); 2806 break; 2807 2808 default: 2809 CSIO_INC_STATS(hw, n_evt_unexp); 2810 break; 2811 } 2812 } 2813 2814 /* 2815 * csio_hws_quiescing - Quiescing state 2816 * @hw - HW module 2817 * @evt - Event 2818 * 2819 */ 2820 static void 2821 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2822 { 2823 hw->prev_evt = hw->cur_evt; 2824 hw->cur_evt = evt; 2825 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2826 2827 switch (evt) { 2828 case CSIO_HWE_QUIESCED: 2829 switch (hw->evtflag) { 2830 case CSIO_HWE_FW_DLOAD: 2831 csio_set_state(&hw->sm, csio_hws_resetting); 2832 /* Download firmware */ 2833 /* Fall through */ 2834 2835 case CSIO_HWE_HBA_RESET: 2836 csio_set_state(&hw->sm, csio_hws_resetting); 2837 /* Start reset of the HBA */ 2838 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2839 csio_wr_destroy_queues(hw, false); 2840 csio_do_reset(hw, false); 2841 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2842 break; 2843 2844 case CSIO_HWE_PCI_REMOVE: 2845 csio_set_state(&hw->sm, csio_hws_removing); 2846 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2847 csio_wr_destroy_queues(hw, true); 2848 /* Now send the bye command */ 2849 csio_do_bye(hw); 2850 break; 2851 2852 case CSIO_HWE_SUSPEND: 2853 csio_set_state(&hw->sm, csio_hws_quiesced); 2854 break; 2855 2856 case CSIO_HWE_PCIERR_DETECTED: 2857 csio_set_state(&hw->sm, csio_hws_pcierr); 2858 csio_wr_destroy_queues(hw, false); 2859 break; 2860 2861 default: 2862 CSIO_INC_STATS(hw, n_evt_unexp); 2863 break; 2864 2865 } 2866 break; 2867 2868 default: 2869 CSIO_INC_STATS(hw, n_evt_unexp); 2870 break; 2871 } 2872 } 2873 2874 /* 2875 * csio_hws_quiesced - Quiesced state 2876 * @hw - HW module 2877 * @evt - Event 2878 * 2879 */ 2880 static void 2881 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2882 { 2883 hw->prev_evt = hw->cur_evt; 2884 hw->cur_evt = evt; 2885 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2886 2887 switch (evt) { 2888 case CSIO_HWE_RESUME: 2889 csio_set_state(&hw->sm, csio_hws_configuring); 2890 csio_hw_configure(hw); 2891 break; 2892 2893 default: 2894 CSIO_INC_STATS(hw, n_evt_unexp); 2895 break; 2896 } 2897 } 2898 2899 /* 2900 * csio_hws_resetting - HW Resetting state 2901 * @hw - HW module 2902 * @evt - Event 2903 * 2904 */ 2905 static void 2906 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2907 { 2908 hw->prev_evt = hw->cur_evt; 2909 hw->cur_evt = evt; 2910 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2911 2912 switch (evt) { 2913 case CSIO_HWE_HBA_RESET_DONE: 2914 csio_evtq_start(hw); 2915 csio_set_state(&hw->sm, csio_hws_configuring); 2916 csio_hw_configure(hw); 2917 break; 2918 2919 default: 2920 CSIO_INC_STATS(hw, n_evt_unexp); 2921 break; 2922 } 2923 } 2924 2925 /* 2926 * csio_hws_removing - PCI Hotplug removing state 2927 * @hw - HW module 2928 * @evt - Event 2929 * 2930 */ 2931 static void 2932 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2933 { 2934 hw->prev_evt = hw->cur_evt; 2935 hw->cur_evt = evt; 2936 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2937 2938 switch (evt) { 2939 case CSIO_HWE_HBA_RESET: 2940 if (!csio_is_hw_master(hw)) 2941 break; 2942 /* 2943 * The BYE should have alerady been issued, so we cant 2944 * use the mailbox interface. Hence we use the PL_RST 2945 * register directly. 2946 */ 2947 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2948 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 2949 mdelay(2000); 2950 break; 2951 2952 /* Should never receive any new events */ 2953 default: 2954 CSIO_INC_STATS(hw, n_evt_unexp); 2955 break; 2956 2957 } 2958 } 2959 2960 /* 2961 * csio_hws_pcierr - PCI Error state 2962 * @hw - HW module 2963 * @evt - Event 2964 * 2965 */ 2966 static void 2967 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2968 { 2969 hw->prev_evt = hw->cur_evt; 2970 hw->cur_evt = evt; 2971 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2972 2973 switch (evt) { 2974 case CSIO_HWE_PCIERR_SLOT_RESET: 2975 csio_evtq_start(hw); 2976 csio_set_state(&hw->sm, csio_hws_configuring); 2977 csio_hw_configure(hw); 2978 break; 2979 2980 default: 2981 CSIO_INC_STATS(hw, n_evt_unexp); 2982 break; 2983 } 2984 } 2985 2986 /*****************************************************************************/ 2987 /* END: HW SM */ 2988 /*****************************************************************************/ 2989 2990 /* Slow path handlers */ 2991 struct intr_info { 2992 unsigned int mask; /* bits to check in interrupt status */ 2993 const char *msg; /* message to print or NULL */ 2994 short stat_idx; /* stat counter to increment or -1 */ 2995 unsigned short fatal; /* whether the condition reported is fatal */ 2996 }; 2997 2998 /* 2999 * csio_handle_intr_status - table driven interrupt handler 3000 * @hw: HW instance 3001 * @reg: the interrupt status register to process 3002 * @acts: table of interrupt actions 3003 * 3004 * A table driven interrupt handler that applies a set of masks to an 3005 * interrupt status word and performs the corresponding actions if the 3006 * interrupts described by the mask have occured. The actions include 3007 * optionally emitting a warning or alert message. The table is terminated 3008 * by an entry specifying mask 0. Returns the number of fatal interrupt 3009 * conditions. 3010 */ 3011 static int 3012 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 3013 const struct intr_info *acts) 3014 { 3015 int fatal = 0; 3016 unsigned int mask = 0; 3017 unsigned int status = csio_rd_reg32(hw, reg); 3018 3019 for ( ; acts->mask; ++acts) { 3020 if (!(status & acts->mask)) 3021 continue; 3022 if (acts->fatal) { 3023 fatal++; 3024 csio_fatal(hw, "Fatal %s (0x%x)\n", 3025 acts->msg, status & acts->mask); 3026 } else if (acts->msg) 3027 csio_info(hw, "%s (0x%x)\n", 3028 acts->msg, status & acts->mask); 3029 mask |= acts->mask; 3030 } 3031 status &= mask; 3032 if (status) /* clear processed interrupts */ 3033 csio_wr_reg32(hw, status, reg); 3034 return fatal; 3035 } 3036 3037 /* 3038 * Interrupt handler for the PCIE module. 3039 */ 3040 static void 3041 csio_pcie_intr_handler(struct csio_hw *hw) 3042 { 3043 static struct intr_info sysbus_intr_info[] = { 3044 { RNPP, "RXNP array parity error", -1, 1 }, 3045 { RPCP, "RXPC array parity error", -1, 1 }, 3046 { RCIP, "RXCIF array parity error", -1, 1 }, 3047 { RCCP, "Rx completions control array parity error", -1, 1 }, 3048 { RFTP, "RXFT array parity error", -1, 1 }, 3049 { 0, NULL, 0, 0 } 3050 }; 3051 static struct intr_info pcie_port_intr_info[] = { 3052 { TPCP, "TXPC array parity error", -1, 1 }, 3053 { TNPP, "TXNP array parity error", -1, 1 }, 3054 { TFTP, "TXFT array parity error", -1, 1 }, 3055 { TCAP, "TXCA array parity error", -1, 1 }, 3056 { TCIP, "TXCIF array parity error", -1, 1 }, 3057 { RCAP, "RXCA array parity error", -1, 1 }, 3058 { OTDD, "outbound request TLP discarded", -1, 1 }, 3059 { RDPE, "Rx data parity error", -1, 1 }, 3060 { TDUE, "Tx uncorrectable data error", -1, 1 }, 3061 { 0, NULL, 0, 0 } 3062 }; 3063 static struct intr_info pcie_intr_info[] = { 3064 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 3065 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 3066 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 3067 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3068 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3069 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3070 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3071 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 3072 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 3073 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3074 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 3075 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3076 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3077 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 3078 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3079 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3080 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 3081 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3082 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3083 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3084 { FIDPERR, "PCI FID parity error", -1, 1 }, 3085 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 3086 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 3087 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3088 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 3089 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 3090 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 3091 { PCIESINT, "PCI core secondary fault", -1, 1 }, 3092 { PCIEPINT, "PCI core primary fault", -1, 1 }, 3093 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 3094 0 }, 3095 { 0, NULL, 0, 0 } 3096 }; 3097 3098 int fat; 3099 3100 fat = csio_handle_intr_status(hw, 3101 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 3102 sysbus_intr_info) + 3103 csio_handle_intr_status(hw, 3104 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 3105 pcie_port_intr_info) + 3106 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); 3107 if (fat) 3108 csio_hw_fatal_err(hw); 3109 } 3110 3111 /* 3112 * TP interrupt handler. 3113 */ 3114 static void csio_tp_intr_handler(struct csio_hw *hw) 3115 { 3116 static struct intr_info tp_intr_info[] = { 3117 { 0x3fffffff, "TP parity error", -1, 1 }, 3118 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 3119 { 0, NULL, 0, 0 } 3120 }; 3121 3122 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info)) 3123 csio_hw_fatal_err(hw); 3124 } 3125 3126 /* 3127 * SGE interrupt handler. 3128 */ 3129 static void csio_sge_intr_handler(struct csio_hw *hw) 3130 { 3131 uint64_t v; 3132 3133 static struct intr_info sge_intr_info[] = { 3134 { ERR_CPL_EXCEED_IQE_SIZE, 3135 "SGE received CPL exceeding IQE size", -1, 1 }, 3136 { ERR_INVALID_CIDX_INC, 3137 "SGE GTS CIDX increment too large", -1, 0 }, 3138 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 3139 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 3140 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 3141 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 3142 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 3143 0 }, 3144 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 3145 0 }, 3146 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 3147 0 }, 3148 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 3149 0 }, 3150 { ERR_ING_CTXT_PRIO, 3151 "SGE too many priority ingress contexts", -1, 0 }, 3152 { ERR_EGR_CTXT_PRIO, 3153 "SGE too many priority egress contexts", -1, 0 }, 3154 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 3155 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 3156 { 0, NULL, 0, 0 } 3157 }; 3158 3159 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | 3160 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); 3161 if (v) { 3162 csio_fatal(hw, "SGE parity error (%#llx)\n", 3163 (unsigned long long)v); 3164 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 3165 SGE_INT_CAUSE1); 3166 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); 3167 } 3168 3169 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); 3170 3171 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || 3172 v != 0) 3173 csio_hw_fatal_err(hw); 3174 } 3175 3176 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\ 3177 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR) 3178 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\ 3179 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR) 3180 3181 /* 3182 * CIM interrupt handler. 3183 */ 3184 static void csio_cim_intr_handler(struct csio_hw *hw) 3185 { 3186 static struct intr_info cim_intr_info[] = { 3187 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 3188 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 3189 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 3190 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 3191 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 3192 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 3193 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 3194 { 0, NULL, 0, 0 } 3195 }; 3196 static struct intr_info cim_upintr_info[] = { 3197 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 3198 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 3199 { ILLWRINT, "CIM illegal write", -1, 1 }, 3200 { ILLRDINT, "CIM illegal read", -1, 1 }, 3201 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 3202 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 3203 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 3204 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 3205 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 3206 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 3207 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 3208 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 3209 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 3210 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 3211 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 3212 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 3213 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 3214 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 3215 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 3216 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 3217 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 3218 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 3219 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 3220 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 3221 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 3222 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 3223 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 3224 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 3225 { 0, NULL, 0, 0 } 3226 }; 3227 3228 int fat; 3229 3230 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE, 3231 cim_intr_info) + 3232 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE, 3233 cim_upintr_info); 3234 if (fat) 3235 csio_hw_fatal_err(hw); 3236 } 3237 3238 /* 3239 * ULP RX interrupt handler. 3240 */ 3241 static void csio_ulprx_intr_handler(struct csio_hw *hw) 3242 { 3243 static struct intr_info ulprx_intr_info[] = { 3244 { 0x1800000, "ULPRX context error", -1, 1 }, 3245 { 0x7fffff, "ULPRX parity error", -1, 1 }, 3246 { 0, NULL, 0, 0 } 3247 }; 3248 3249 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info)) 3250 csio_hw_fatal_err(hw); 3251 } 3252 3253 /* 3254 * ULP TX interrupt handler. 3255 */ 3256 static void csio_ulptx_intr_handler(struct csio_hw *hw) 3257 { 3258 static struct intr_info ulptx_intr_info[] = { 3259 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 3260 0 }, 3261 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 3262 0 }, 3263 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 3264 0 }, 3265 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 3266 0 }, 3267 { 0xfffffff, "ULPTX parity error", -1, 1 }, 3268 { 0, NULL, 0, 0 } 3269 }; 3270 3271 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info)) 3272 csio_hw_fatal_err(hw); 3273 } 3274 3275 /* 3276 * PM TX interrupt handler. 3277 */ 3278 static void csio_pmtx_intr_handler(struct csio_hw *hw) 3279 { 3280 static struct intr_info pmtx_intr_info[] = { 3281 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 3282 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 3283 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 3284 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 3285 { 0xffffff0, "PMTX framing error", -1, 1 }, 3286 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 3287 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 3288 1 }, 3289 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 3290 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 3291 { 0, NULL, 0, 0 } 3292 }; 3293 3294 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info)) 3295 csio_hw_fatal_err(hw); 3296 } 3297 3298 /* 3299 * PM RX interrupt handler. 3300 */ 3301 static void csio_pmrx_intr_handler(struct csio_hw *hw) 3302 { 3303 static struct intr_info pmrx_intr_info[] = { 3304 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 3305 { 0x3ffff0, "PMRX framing error", -1, 1 }, 3306 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 3307 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 3308 1 }, 3309 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 3310 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 3311 { 0, NULL, 0, 0 } 3312 }; 3313 3314 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info)) 3315 csio_hw_fatal_err(hw); 3316 } 3317 3318 /* 3319 * CPL switch interrupt handler. 3320 */ 3321 static void csio_cplsw_intr_handler(struct csio_hw *hw) 3322 { 3323 static struct intr_info cplsw_intr_info[] = { 3324 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 3325 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 3326 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 3327 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 3328 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 3329 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 3330 { 0, NULL, 0, 0 } 3331 }; 3332 3333 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info)) 3334 csio_hw_fatal_err(hw); 3335 } 3336 3337 /* 3338 * LE interrupt handler. 3339 */ 3340 static void csio_le_intr_handler(struct csio_hw *hw) 3341 { 3342 static struct intr_info le_intr_info[] = { 3343 { LIPMISS, "LE LIP miss", -1, 0 }, 3344 { LIP0, "LE 0 LIP error", -1, 0 }, 3345 { PARITYERR, "LE parity error", -1, 1 }, 3346 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 3347 { REQQPARERR, "LE request queue parity error", -1, 1 }, 3348 { 0, NULL, 0, 0 } 3349 }; 3350 3351 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info)) 3352 csio_hw_fatal_err(hw); 3353 } 3354 3355 /* 3356 * MPS interrupt handler. 3357 */ 3358 static void csio_mps_intr_handler(struct csio_hw *hw) 3359 { 3360 static struct intr_info mps_rx_intr_info[] = { 3361 { 0xffffff, "MPS Rx parity error", -1, 1 }, 3362 { 0, NULL, 0, 0 } 3363 }; 3364 static struct intr_info mps_tx_intr_info[] = { 3365 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 3366 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 3367 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 3368 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 3369 { BUBBLE, "MPS Tx underflow", -1, 1 }, 3370 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 3371 { FRMERR, "MPS Tx framing error", -1, 1 }, 3372 { 0, NULL, 0, 0 } 3373 }; 3374 static struct intr_info mps_trc_intr_info[] = { 3375 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 3376 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 3377 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 3378 { 0, NULL, 0, 0 } 3379 }; 3380 static struct intr_info mps_stat_sram_intr_info[] = { 3381 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 3382 { 0, NULL, 0, 0 } 3383 }; 3384 static struct intr_info mps_stat_tx_intr_info[] = { 3385 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 3386 { 0, NULL, 0, 0 } 3387 }; 3388 static struct intr_info mps_stat_rx_intr_info[] = { 3389 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 3390 { 0, NULL, 0, 0 } 3391 }; 3392 static struct intr_info mps_cls_intr_info[] = { 3393 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 3394 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 3395 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 3396 { 0, NULL, 0, 0 } 3397 }; 3398 3399 int fat; 3400 3401 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE, 3402 mps_rx_intr_info) + 3403 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE, 3404 mps_tx_intr_info) + 3405 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE, 3406 mps_trc_intr_info) + 3407 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM, 3408 mps_stat_sram_intr_info) + 3409 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 3410 mps_stat_tx_intr_info) + 3411 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 3412 mps_stat_rx_intr_info) + 3413 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE, 3414 mps_cls_intr_info); 3415 3416 csio_wr_reg32(hw, 0, MPS_INT_CAUSE); 3417 csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */ 3418 if (fat) 3419 csio_hw_fatal_err(hw); 3420 } 3421 3422 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 3423 3424 /* 3425 * EDC/MC interrupt handler. 3426 */ 3427 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3428 { 3429 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3430 3431 unsigned int addr, cnt_addr, v; 3432 3433 if (idx <= MEM_EDC1) { 3434 addr = EDC_REG(EDC_INT_CAUSE, idx); 3435 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 3436 } else { 3437 addr = MC_INT_CAUSE; 3438 cnt_addr = MC_ECC_STATUS; 3439 } 3440 3441 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3442 if (v & PERR_INT_CAUSE) 3443 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3444 if (v & ECC_CE_INT_CAUSE) { 3445 uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr)); 3446 3447 csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr); 3448 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3449 cnt, name[idx], cnt > 1 ? "s" : ""); 3450 } 3451 if (v & ECC_UE_INT_CAUSE) 3452 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3453 3454 csio_wr_reg32(hw, v, addr); 3455 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 3456 csio_hw_fatal_err(hw); 3457 } 3458 3459 /* 3460 * MA interrupt handler. 3461 */ 3462 static void csio_ma_intr_handler(struct csio_hw *hw) 3463 { 3464 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE); 3465 3466 if (status & MEM_PERR_INT_CAUSE) 3467 csio_fatal(hw, "MA parity error, parity status %#x\n", 3468 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS)); 3469 if (status & MEM_WRAP_INT_CAUSE) { 3470 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS); 3471 csio_fatal(hw, 3472 "MA address wrap-around error by client %u to address %#x\n", 3473 MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); 3474 } 3475 csio_wr_reg32(hw, status, MA_INT_CAUSE); 3476 csio_hw_fatal_err(hw); 3477 } 3478 3479 /* 3480 * SMB interrupt handler. 3481 */ 3482 static void csio_smb_intr_handler(struct csio_hw *hw) 3483 { 3484 static struct intr_info smb_intr_info[] = { 3485 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 3486 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 3487 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 3488 { 0, NULL, 0, 0 } 3489 }; 3490 3491 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info)) 3492 csio_hw_fatal_err(hw); 3493 } 3494 3495 /* 3496 * NC-SI interrupt handler. 3497 */ 3498 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3499 { 3500 static struct intr_info ncsi_intr_info[] = { 3501 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 3502 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 3503 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 3504 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 3505 { 0, NULL, 0, 0 } 3506 }; 3507 3508 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info)) 3509 csio_hw_fatal_err(hw); 3510 } 3511 3512 /* 3513 * XGMAC interrupt handler. 3514 */ 3515 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3516 { 3517 uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 3518 3519 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 3520 if (!v) 3521 return; 3522 3523 if (v & TXFIFO_PRTY_ERR) 3524 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3525 if (v & RXFIFO_PRTY_ERR) 3526 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3527 csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 3528 csio_hw_fatal_err(hw); 3529 } 3530 3531 /* 3532 * PL interrupt handler. 3533 */ 3534 static void csio_pl_intr_handler(struct csio_hw *hw) 3535 { 3536 static struct intr_info pl_intr_info[] = { 3537 { FATALPERR, "T4 fatal parity error", -1, 1 }, 3538 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 3539 { 0, NULL, 0, 0 } 3540 }; 3541 3542 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info)) 3543 csio_hw_fatal_err(hw); 3544 } 3545 3546 /* 3547 * csio_hw_slow_intr_handler - control path interrupt handler 3548 * @hw: HW module 3549 * 3550 * Interrupt handler for non-data global interrupt events, e.g., errors. 3551 * The designation 'slow' is because it involves register reads, while 3552 * data interrupts typically don't involve any MMIOs. 3553 */ 3554 int 3555 csio_hw_slow_intr_handler(struct csio_hw *hw) 3556 { 3557 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE); 3558 3559 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3560 CSIO_INC_STATS(hw, n_plint_unexp); 3561 return 0; 3562 } 3563 3564 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3565 3566 CSIO_INC_STATS(hw, n_plint_cnt); 3567 3568 if (cause & CIM) 3569 csio_cim_intr_handler(hw); 3570 3571 if (cause & MPS) 3572 csio_mps_intr_handler(hw); 3573 3574 if (cause & NCSI) 3575 csio_ncsi_intr_handler(hw); 3576 3577 if (cause & PL) 3578 csio_pl_intr_handler(hw); 3579 3580 if (cause & SMB) 3581 csio_smb_intr_handler(hw); 3582 3583 if (cause & XGMAC0) 3584 csio_xgmac_intr_handler(hw, 0); 3585 3586 if (cause & XGMAC1) 3587 csio_xgmac_intr_handler(hw, 1); 3588 3589 if (cause & XGMAC_KR0) 3590 csio_xgmac_intr_handler(hw, 2); 3591 3592 if (cause & XGMAC_KR1) 3593 csio_xgmac_intr_handler(hw, 3); 3594 3595 if (cause & PCIE) 3596 csio_pcie_intr_handler(hw); 3597 3598 if (cause & MC) 3599 csio_mem_intr_handler(hw, MEM_MC); 3600 3601 if (cause & EDC0) 3602 csio_mem_intr_handler(hw, MEM_EDC0); 3603 3604 if (cause & EDC1) 3605 csio_mem_intr_handler(hw, MEM_EDC1); 3606 3607 if (cause & LE) 3608 csio_le_intr_handler(hw); 3609 3610 if (cause & TP) 3611 csio_tp_intr_handler(hw); 3612 3613 if (cause & MA) 3614 csio_ma_intr_handler(hw); 3615 3616 if (cause & PM_TX) 3617 csio_pmtx_intr_handler(hw); 3618 3619 if (cause & PM_RX) 3620 csio_pmrx_intr_handler(hw); 3621 3622 if (cause & ULP_RX) 3623 csio_ulprx_intr_handler(hw); 3624 3625 if (cause & CPL_SWITCH) 3626 csio_cplsw_intr_handler(hw); 3627 3628 if (cause & SGE) 3629 csio_sge_intr_handler(hw); 3630 3631 if (cause & ULP_TX) 3632 csio_ulptx_intr_handler(hw); 3633 3634 /* Clear the interrupts just processed for which we are the master. */ 3635 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE); 3636 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */ 3637 3638 return 1; 3639 } 3640 3641 /***************************************************************************** 3642 * HW <--> mailbox interfacing routines. 3643 ****************************************************************************/ 3644 /* 3645 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3646 * 3647 * @data: Private data pointer. 3648 * 3649 * Called from worker thread context. 3650 */ 3651 static void 3652 csio_mberr_worker(void *data) 3653 { 3654 struct csio_hw *hw = (struct csio_hw *)data; 3655 struct csio_mbm *mbm = &hw->mbm; 3656 LIST_HEAD(cbfn_q); 3657 struct csio_mb *mbp_next; 3658 int rv; 3659 3660 del_timer_sync(&mbm->timer); 3661 3662 spin_lock_irq(&hw->lock); 3663 if (list_empty(&mbm->cbfn_q)) { 3664 spin_unlock_irq(&hw->lock); 3665 return; 3666 } 3667 3668 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3669 mbm->stats.n_cbfnq = 0; 3670 3671 /* Try to start waiting mailboxes */ 3672 if (!list_empty(&mbm->req_q)) { 3673 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3674 list_del_init(&mbp_next->list); 3675 3676 rv = csio_mb_issue(hw, mbp_next); 3677 if (rv != 0) 3678 list_add_tail(&mbp_next->list, &mbm->req_q); 3679 else 3680 CSIO_DEC_STATS(mbm, n_activeq); 3681 } 3682 spin_unlock_irq(&hw->lock); 3683 3684 /* Now callback completions */ 3685 csio_mb_completions(hw, &cbfn_q); 3686 } 3687 3688 /* 3689 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3690 * 3691 * @data: private data pointer 3692 * 3693 **/ 3694 static void 3695 csio_hw_mb_timer(uintptr_t data) 3696 { 3697 struct csio_hw *hw = (struct csio_hw *)data; 3698 struct csio_mb *mbp = NULL; 3699 3700 spin_lock_irq(&hw->lock); 3701 mbp = csio_mb_tmo_handler(hw); 3702 spin_unlock_irq(&hw->lock); 3703 3704 /* Call back the function for the timed-out Mailbox */ 3705 if (mbp) 3706 mbp->mb_cbfn(hw, mbp); 3707 3708 } 3709 3710 /* 3711 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3712 * @hw: HW module 3713 * 3714 * Called with lock held, should exit with lock held. 3715 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3716 * into a local queue. Drops lock and calls the completions. Holds 3717 * lock and returns. 3718 */ 3719 static void 3720 csio_hw_mbm_cleanup(struct csio_hw *hw) 3721 { 3722 LIST_HEAD(cbfn_q); 3723 3724 csio_mb_cancel_all(hw, &cbfn_q); 3725 3726 spin_unlock_irq(&hw->lock); 3727 csio_mb_completions(hw, &cbfn_q); 3728 spin_lock_irq(&hw->lock); 3729 } 3730 3731 /***************************************************************************** 3732 * Event handling 3733 ****************************************************************************/ 3734 int 3735 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3736 uint16_t len) 3737 { 3738 struct csio_evt_msg *evt_entry = NULL; 3739 3740 if (type >= CSIO_EVT_MAX) 3741 return -EINVAL; 3742 3743 if (len > CSIO_EVT_MSG_SIZE) 3744 return -EINVAL; 3745 3746 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3747 return -EINVAL; 3748 3749 if (list_empty(&hw->evt_free_q)) { 3750 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3751 type, len); 3752 return -ENOMEM; 3753 } 3754 3755 evt_entry = list_first_entry(&hw->evt_free_q, 3756 struct csio_evt_msg, list); 3757 list_del_init(&evt_entry->list); 3758 3759 /* copy event msg and queue the event */ 3760 evt_entry->type = type; 3761 memcpy((void *)evt_entry->data, evt_msg, len); 3762 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3763 3764 CSIO_DEC_STATS(hw, n_evt_freeq); 3765 CSIO_INC_STATS(hw, n_evt_activeq); 3766 3767 return 0; 3768 } 3769 3770 static int 3771 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3772 uint16_t len, bool msg_sg) 3773 { 3774 struct csio_evt_msg *evt_entry = NULL; 3775 struct csio_fl_dma_buf *fl_sg; 3776 uint32_t off = 0; 3777 unsigned long flags; 3778 int n, ret = 0; 3779 3780 if (type >= CSIO_EVT_MAX) 3781 return -EINVAL; 3782 3783 if (len > CSIO_EVT_MSG_SIZE) 3784 return -EINVAL; 3785 3786 spin_lock_irqsave(&hw->lock, flags); 3787 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3788 ret = -EINVAL; 3789 goto out; 3790 } 3791 3792 if (list_empty(&hw->evt_free_q)) { 3793 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3794 type, len); 3795 ret = -ENOMEM; 3796 goto out; 3797 } 3798 3799 evt_entry = list_first_entry(&hw->evt_free_q, 3800 struct csio_evt_msg, list); 3801 list_del_init(&evt_entry->list); 3802 3803 /* copy event msg and queue the event */ 3804 evt_entry->type = type; 3805 3806 /* If Payload in SG list*/ 3807 if (msg_sg) { 3808 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3809 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3810 memcpy((void *)((uintptr_t)evt_entry->data + off), 3811 fl_sg->flbufs[n].vaddr, 3812 fl_sg->flbufs[n].len); 3813 off += fl_sg->flbufs[n].len; 3814 } 3815 } else 3816 memcpy((void *)evt_entry->data, evt_msg, len); 3817 3818 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3819 CSIO_DEC_STATS(hw, n_evt_freeq); 3820 CSIO_INC_STATS(hw, n_evt_activeq); 3821 out: 3822 spin_unlock_irqrestore(&hw->lock, flags); 3823 return ret; 3824 } 3825 3826 static void 3827 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3828 { 3829 if (evt_entry) { 3830 spin_lock_irq(&hw->lock); 3831 list_del_init(&evt_entry->list); 3832 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3833 CSIO_DEC_STATS(hw, n_evt_activeq); 3834 CSIO_INC_STATS(hw, n_evt_freeq); 3835 spin_unlock_irq(&hw->lock); 3836 } 3837 } 3838 3839 void 3840 csio_evtq_flush(struct csio_hw *hw) 3841 { 3842 uint32_t count; 3843 count = 30; 3844 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3845 spin_unlock_irq(&hw->lock); 3846 msleep(2000); 3847 spin_lock_irq(&hw->lock); 3848 } 3849 3850 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3851 } 3852 3853 static void 3854 csio_evtq_stop(struct csio_hw *hw) 3855 { 3856 hw->flags |= CSIO_HWF_FWEVT_STOP; 3857 } 3858 3859 static void 3860 csio_evtq_start(struct csio_hw *hw) 3861 { 3862 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3863 } 3864 3865 static void 3866 csio_evtq_cleanup(struct csio_hw *hw) 3867 { 3868 struct list_head *evt_entry, *next_entry; 3869 3870 /* Release outstanding events from activeq to freeq*/ 3871 if (!list_empty(&hw->evt_active_q)) 3872 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3873 3874 hw->stats.n_evt_activeq = 0; 3875 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3876 3877 /* Freeup event entry */ 3878 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3879 kfree(evt_entry); 3880 CSIO_DEC_STATS(hw, n_evt_freeq); 3881 } 3882 3883 hw->stats.n_evt_freeq = 0; 3884 } 3885 3886 3887 static void 3888 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3889 struct csio_fl_dma_buf *flb, void *priv) 3890 { 3891 __u8 op; 3892 __be64 *data; 3893 void *msg = NULL; 3894 uint32_t msg_len = 0; 3895 bool msg_sg = 0; 3896 3897 op = ((struct rss_header *) wr)->opcode; 3898 if (op == CPL_FW6_PLD) { 3899 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3900 if (!flb || !flb->totlen) { 3901 CSIO_INC_STATS(hw, n_cpl_unexp); 3902 return; 3903 } 3904 3905 msg = (void *) flb; 3906 msg_len = flb->totlen; 3907 msg_sg = 1; 3908 3909 data = (__be64 *) msg; 3910 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3911 3912 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3913 /* skip RSS header */ 3914 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3915 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3916 sizeof(struct cpl_fw4_msg); 3917 3918 data = (__be64 *) msg; 3919 } else { 3920 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3921 CSIO_INC_STATS(hw, n_cpl_unexp); 3922 return; 3923 } 3924 3925 /* 3926 * Enqueue event to EventQ. Events processing happens 3927 * in Event worker thread context 3928 */ 3929 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3930 (uint16_t)msg_len, msg_sg)) 3931 CSIO_INC_STATS(hw, n_evt_drop); 3932 } 3933 3934 void 3935 csio_evtq_worker(struct work_struct *work) 3936 { 3937 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3938 struct list_head *evt_entry, *next_entry; 3939 LIST_HEAD(evt_q); 3940 struct csio_evt_msg *evt_msg; 3941 struct cpl_fw6_msg *msg; 3942 struct csio_rnode *rn; 3943 int rv = 0; 3944 uint8_t evtq_stop = 0; 3945 3946 csio_dbg(hw, "event worker thread active evts#%d\n", 3947 hw->stats.n_evt_activeq); 3948 3949 spin_lock_irq(&hw->lock); 3950 while (!list_empty(&hw->evt_active_q)) { 3951 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3952 spin_unlock_irq(&hw->lock); 3953 3954 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3955 evt_msg = (struct csio_evt_msg *) evt_entry; 3956 3957 /* Drop events if queue is STOPPED */ 3958 spin_lock_irq(&hw->lock); 3959 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3960 evtq_stop = 1; 3961 spin_unlock_irq(&hw->lock); 3962 if (evtq_stop) { 3963 CSIO_INC_STATS(hw, n_evt_drop); 3964 goto free_evt; 3965 } 3966 3967 switch (evt_msg->type) { 3968 case CSIO_EVT_FW: 3969 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3970 3971 if ((msg->opcode == CPL_FW6_MSG || 3972 msg->opcode == CPL_FW4_MSG) && 3973 !msg->type) { 3974 rv = csio_mb_fwevt_handler(hw, 3975 msg->data); 3976 if (!rv) 3977 break; 3978 /* Handle any remaining fw events */ 3979 csio_fcoe_fwevt_handler(hw, 3980 msg->opcode, msg->data); 3981 } else if (msg->opcode == CPL_FW6_PLD) { 3982 3983 csio_fcoe_fwevt_handler(hw, 3984 msg->opcode, msg->data); 3985 } else { 3986 csio_warn(hw, 3987 "Unhandled FW msg op %x type %x\n", 3988 msg->opcode, msg->type); 3989 CSIO_INC_STATS(hw, n_evt_drop); 3990 } 3991 break; 3992 3993 case CSIO_EVT_MBX: 3994 csio_mberr_worker(hw); 3995 break; 3996 3997 case CSIO_EVT_DEV_LOSS: 3998 memcpy(&rn, evt_msg->data, sizeof(rn)); 3999 csio_rnode_devloss_handler(rn); 4000 break; 4001 4002 default: 4003 csio_warn(hw, "Unhandled event %x on evtq\n", 4004 evt_msg->type); 4005 CSIO_INC_STATS(hw, n_evt_unexp); 4006 break; 4007 } 4008 free_evt: 4009 csio_free_evt(hw, evt_msg); 4010 } 4011 4012 spin_lock_irq(&hw->lock); 4013 } 4014 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 4015 spin_unlock_irq(&hw->lock); 4016 } 4017 4018 int 4019 csio_fwevtq_handler(struct csio_hw *hw) 4020 { 4021 int rv; 4022 4023 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 4024 CSIO_INC_STATS(hw, n_int_stray); 4025 return -EINVAL; 4026 } 4027 4028 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 4029 csio_process_fwevtq_entry, NULL); 4030 return rv; 4031 } 4032 4033 /**************************************************************************** 4034 * Entry points 4035 ****************************************************************************/ 4036 4037 /* Management module */ 4038 /* 4039 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 4040 * mgmt - mgmt module 4041 * @io_req - io request 4042 * 4043 * Return - 0:if given IO Req exists in active Q. 4044 * -EINVAL :if lookup fails. 4045 */ 4046 int 4047 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 4048 { 4049 struct list_head *tmp; 4050 4051 /* Lookup ioreq in the ACTIVEQ */ 4052 list_for_each(tmp, &mgmtm->active_q) { 4053 if (io_req == (struct csio_ioreq *)tmp) 4054 return 0; 4055 } 4056 return -EINVAL; 4057 } 4058 4059 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 4060 4061 /* 4062 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 4063 * @data - Event data. 4064 * 4065 * Return - none. 4066 */ 4067 static void 4068 csio_mgmt_tmo_handler(uintptr_t data) 4069 { 4070 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; 4071 struct list_head *tmp; 4072 struct csio_ioreq *io_req; 4073 4074 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 4075 4076 spin_lock_irq(&mgmtm->hw->lock); 4077 4078 list_for_each(tmp, &mgmtm->active_q) { 4079 io_req = (struct csio_ioreq *) tmp; 4080 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 4081 4082 if (!io_req->tmo) { 4083 /* Dequeue the request from retry Q. */ 4084 tmp = csio_list_prev(tmp); 4085 list_del_init(&io_req->sm.sm_list); 4086 if (io_req->io_cbfn) { 4087 /* io_req will be freed by completion handler */ 4088 io_req->wr_status = -ETIMEDOUT; 4089 io_req->io_cbfn(mgmtm->hw, io_req); 4090 } else { 4091 CSIO_DB_ASSERT(0); 4092 } 4093 } 4094 } 4095 4096 /* If retry queue is not empty, re-arm timer */ 4097 if (!list_empty(&mgmtm->active_q)) 4098 mod_timer(&mgmtm->mgmt_timer, 4099 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 4100 spin_unlock_irq(&mgmtm->hw->lock); 4101 } 4102 4103 static void 4104 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 4105 { 4106 struct csio_hw *hw = mgmtm->hw; 4107 struct csio_ioreq *io_req; 4108 struct list_head *tmp; 4109 uint32_t count; 4110 4111 count = 30; 4112 /* Wait for all outstanding req to complete gracefully */ 4113 while ((!list_empty(&mgmtm->active_q)) && count--) { 4114 spin_unlock_irq(&hw->lock); 4115 msleep(2000); 4116 spin_lock_irq(&hw->lock); 4117 } 4118 4119 /* release outstanding req from ACTIVEQ */ 4120 list_for_each(tmp, &mgmtm->active_q) { 4121 io_req = (struct csio_ioreq *) tmp; 4122 tmp = csio_list_prev(tmp); 4123 list_del_init(&io_req->sm.sm_list); 4124 mgmtm->stats.n_active--; 4125 if (io_req->io_cbfn) { 4126 /* io_req will be freed by completion handler */ 4127 io_req->wr_status = -ETIMEDOUT; 4128 io_req->io_cbfn(mgmtm->hw, io_req); 4129 } 4130 } 4131 } 4132 4133 /* 4134 * csio_mgmt_init - Mgmt module init entry point 4135 * @mgmtsm - mgmt module 4136 * @hw - HW module 4137 * 4138 * Initialize mgmt timer, resource wait queue, active queue, 4139 * completion q. Allocate Egress and Ingress 4140 * WR queues and save off the queue index returned by the WR 4141 * module for future use. Allocate and save off mgmt reqs in the 4142 * mgmt_req_freelist for future use. Make sure their SM is initialized 4143 * to uninit state. 4144 * Returns: 0 - on success 4145 * -ENOMEM - on error. 4146 */ 4147 static int 4148 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 4149 { 4150 struct timer_list *timer = &mgmtm->mgmt_timer; 4151 4152 init_timer(timer); 4153 timer->function = csio_mgmt_tmo_handler; 4154 timer->data = (unsigned long)mgmtm; 4155 4156 INIT_LIST_HEAD(&mgmtm->active_q); 4157 INIT_LIST_HEAD(&mgmtm->cbfn_q); 4158 4159 mgmtm->hw = hw; 4160 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 4161 4162 return 0; 4163 } 4164 4165 /* 4166 * csio_mgmtm_exit - MGMT module exit entry point 4167 * @mgmtsm - mgmt module 4168 * 4169 * This function called during MGMT module uninit. 4170 * Stop timers, free ioreqs allocated. 4171 * Returns: None 4172 * 4173 */ 4174 static void 4175 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 4176 { 4177 del_timer_sync(&mgmtm->mgmt_timer); 4178 } 4179 4180 4181 /** 4182 * csio_hw_start - Kicks off the HW State machine 4183 * @hw: Pointer to HW module. 4184 * 4185 * It is assumed that the initialization is a synchronous operation. 4186 * So when we return afer posting the event, the HW SM should be in 4187 * the ready state, if there were no errors during init. 4188 */ 4189 int 4190 csio_hw_start(struct csio_hw *hw) 4191 { 4192 spin_lock_irq(&hw->lock); 4193 csio_post_event(&hw->sm, CSIO_HWE_CFG); 4194 spin_unlock_irq(&hw->lock); 4195 4196 if (csio_is_hw_ready(hw)) 4197 return 0; 4198 else 4199 return -EINVAL; 4200 } 4201 4202 int 4203 csio_hw_stop(struct csio_hw *hw) 4204 { 4205 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 4206 4207 if (csio_is_hw_removing(hw)) 4208 return 0; 4209 else 4210 return -EINVAL; 4211 } 4212 4213 /* Max reset retries */ 4214 #define CSIO_MAX_RESET_RETRIES 3 4215 4216 /** 4217 * csio_hw_reset - Reset the hardware 4218 * @hw: HW module. 4219 * 4220 * Caller should hold lock across this function. 4221 */ 4222 int 4223 csio_hw_reset(struct csio_hw *hw) 4224 { 4225 if (!csio_is_hw_master(hw)) 4226 return -EPERM; 4227 4228 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 4229 csio_dbg(hw, "Max hw reset attempts reached.."); 4230 return -EINVAL; 4231 } 4232 4233 hw->rst_retries++; 4234 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 4235 4236 if (csio_is_hw_ready(hw)) { 4237 hw->rst_retries = 0; 4238 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 4239 return 0; 4240 } else 4241 return -EINVAL; 4242 } 4243 4244 /* 4245 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 4246 * @hw: HW module. 4247 */ 4248 static void 4249 csio_hw_get_device_id(struct csio_hw *hw) 4250 { 4251 /* Is the adapter device id cached already ?*/ 4252 if (csio_is_dev_id_cached(hw)) 4253 return; 4254 4255 /* Get the PCI vendor & device id */ 4256 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 4257 &hw->params.pci.vendor_id); 4258 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 4259 &hw->params.pci.device_id); 4260 4261 csio_dev_id_cached(hw); 4262 4263 } /* csio_hw_get_device_id */ 4264 4265 /* 4266 * csio_hw_set_description - Set the model, description of the hw. 4267 * @hw: HW module. 4268 * @ven_id: PCI Vendor ID 4269 * @dev_id: PCI Device ID 4270 */ 4271 static void 4272 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 4273 { 4274 uint32_t adap_type, prot_type; 4275 4276 if (ven_id == CSIO_VENDOR_ID) { 4277 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 4278 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 4279 4280 if (prot_type == CSIO_FPGA) { 4281 memcpy(hw->model_desc, 4282 csio_fcoe_adapters[13].description, 32); 4283 } else if (prot_type == CSIO_T4_FCOE_ASIC) { 4284 memcpy(hw->hw_ver, 4285 csio_fcoe_adapters[adap_type].model_no, 16); 4286 memcpy(hw->model_desc, 4287 csio_fcoe_adapters[adap_type].description, 32); 4288 } else { 4289 char tempName[32] = "Chelsio FCoE Controller"; 4290 memcpy(hw->model_desc, tempName, 32); 4291 4292 CSIO_DB_ASSERT(0); 4293 } 4294 } 4295 } /* csio_hw_set_description */ 4296 4297 /** 4298 * csio_hw_init - Initialize HW module. 4299 * @hw: Pointer to HW module. 4300 * 4301 * Initialize the members of the HW module. 4302 */ 4303 int 4304 csio_hw_init(struct csio_hw *hw) 4305 { 4306 int rv = -EINVAL; 4307 uint32_t i; 4308 uint16_t ven_id, dev_id; 4309 struct csio_evt_msg *evt_entry; 4310 4311 INIT_LIST_HEAD(&hw->sm.sm_list); 4312 csio_init_state(&hw->sm, csio_hws_uninit); 4313 spin_lock_init(&hw->lock); 4314 INIT_LIST_HEAD(&hw->sln_head); 4315 4316 /* Get the PCI vendor & device id */ 4317 csio_hw_get_device_id(hw); 4318 4319 strcpy(hw->name, CSIO_HW_NAME); 4320 4321 /* Set the model & its description */ 4322 4323 ven_id = hw->params.pci.vendor_id; 4324 dev_id = hw->params.pci.device_id; 4325 4326 csio_hw_set_description(hw, ven_id, dev_id); 4327 4328 /* Initialize default log level */ 4329 hw->params.log_level = (uint32_t) csio_dbg_level; 4330 4331 csio_set_fwevt_intr_idx(hw, -1); 4332 csio_set_nondata_intr_idx(hw, -1); 4333 4334 /* Init all the modules: Mailbox, WorkRequest and Transport */ 4335 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 4336 goto err; 4337 4338 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 4339 if (rv) 4340 goto err_mbm_exit; 4341 4342 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 4343 if (rv) 4344 goto err_wrm_exit; 4345 4346 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 4347 if (rv) 4348 goto err_scsim_exit; 4349 /* Pre-allocate evtq and initialize them */ 4350 INIT_LIST_HEAD(&hw->evt_active_q); 4351 INIT_LIST_HEAD(&hw->evt_free_q); 4352 for (i = 0; i < csio_evtq_sz; i++) { 4353 4354 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 4355 if (!evt_entry) { 4356 csio_err(hw, "Failed to initialize eventq"); 4357 goto err_evtq_cleanup; 4358 } 4359 4360 list_add_tail(&evt_entry->list, &hw->evt_free_q); 4361 CSIO_INC_STATS(hw, n_evt_freeq); 4362 } 4363 4364 hw->dev_num = dev_num; 4365 dev_num++; 4366 4367 return 0; 4368 4369 err_evtq_cleanup: 4370 csio_evtq_cleanup(hw); 4371 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4372 err_scsim_exit: 4373 csio_scsim_exit(csio_hw_to_scsim(hw)); 4374 err_wrm_exit: 4375 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4376 err_mbm_exit: 4377 csio_mbm_exit(csio_hw_to_mbm(hw)); 4378 err: 4379 return rv; 4380 } 4381 4382 /** 4383 * csio_hw_exit - Un-initialize HW module. 4384 * @hw: Pointer to HW module. 4385 * 4386 */ 4387 void 4388 csio_hw_exit(struct csio_hw *hw) 4389 { 4390 csio_evtq_cleanup(hw); 4391 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 4392 csio_scsim_exit(csio_hw_to_scsim(hw)); 4393 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 4394 csio_mbm_exit(csio_hw_to_mbm(hw)); 4395 } 4396