1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 #include <linux/vmalloc.h> 9 #include <linux/delay.h> 10 11 #include "qla_def.h" 12 #include "qla_gbl.h" 13 14 #define TIMEOUT_100_MS 100 15 16 static const uint32_t qla8044_reg_tbl[] = { 17 QLA8044_PEG_HALT_STATUS1, 18 QLA8044_PEG_HALT_STATUS2, 19 QLA8044_PEG_ALIVE_COUNTER, 20 QLA8044_CRB_DRV_ACTIVE, 21 QLA8044_CRB_DEV_STATE, 22 QLA8044_CRB_DRV_STATE, 23 QLA8044_CRB_DRV_SCRATCH, 24 QLA8044_CRB_DEV_PART_INFO1, 25 QLA8044_CRB_IDC_VER_MAJOR, 26 QLA8044_FW_VER_MAJOR, 27 QLA8044_FW_VER_MINOR, 28 QLA8044_FW_VER_SUB, 29 QLA8044_CMDPEG_STATE, 30 QLA8044_ASIC_TEMP, 31 }; 32 33 /* 8044 Flash Read/Write functions */ 34 uint32_t 35 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) 36 { 37 return readl((void __iomem *) (ha->nx_pcibase + addr)); 38 } 39 40 void 41 qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val) 42 { 43 writel(val, (void __iomem *)((ha)->nx_pcibase + addr)); 44 } 45 46 int 47 qla8044_rd_direct(struct scsi_qla_host *vha, 48 const uint32_t crb_reg) 49 { 50 struct qla_hw_data *ha = vha->hw; 51 52 if (crb_reg < CRB_REG_INDEX_MAX) 53 return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]); 54 else 55 return QLA_FUNCTION_FAILED; 56 } 57 58 void 59 qla8044_wr_direct(struct scsi_qla_host *vha, 60 const uint32_t crb_reg, 61 const uint32_t value) 62 { 63 struct qla_hw_data *ha = vha->hw; 64 65 if (crb_reg < CRB_REG_INDEX_MAX) 66 qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value); 67 } 68 69 static int 70 qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr) 71 { 72 uint32_t val; 73 int ret_val = QLA_SUCCESS; 74 struct qla_hw_data *ha = vha->hw; 75 76 qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr); 77 val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum)); 78 79 if (val != addr) { 80 ql_log(ql_log_warn, vha, 0xb087, 81 "%s: Failed to set register window : " 82 "addr written 0x%x, read 0x%x!\n", 83 __func__, addr, val); 84 ret_val = QLA_FUNCTION_FAILED; 85 } 86 return ret_val; 87 } 88 89 static int 90 qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 91 { 92 int ret_val = QLA_SUCCESS; 93 struct qla_hw_data *ha = vha->hw; 94 95 ret_val = qla8044_set_win_base(vha, addr); 96 if (!ret_val) 97 *data = qla8044_rd_reg(ha, QLA8044_WILDCARD); 98 else 99 ql_log(ql_log_warn, vha, 0xb088, 100 "%s: failed read of addr 0x%x!\n", __func__, addr); 101 return ret_val; 102 } 103 104 static int 105 qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 106 { 107 int ret_val = QLA_SUCCESS; 108 struct qla_hw_data *ha = vha->hw; 109 110 ret_val = qla8044_set_win_base(vha, addr); 111 if (!ret_val) 112 qla8044_wr_reg(ha, QLA8044_WILDCARD, data); 113 else 114 ql_log(ql_log_warn, vha, 0xb089, 115 "%s: failed wrt to addr 0x%x, data 0x%x\n", 116 __func__, addr, data); 117 return ret_val; 118 } 119 120 /* 121 * qla8044_read_write_crb_reg - Read from raddr and write value to waddr. 122 * 123 * @ha : Pointer to adapter structure 124 * @raddr : CRB address to read from 125 * @waddr : CRB address to write to 126 * 127 */ 128 static void 129 qla8044_read_write_crb_reg(struct scsi_qla_host *vha, 130 uint32_t raddr, uint32_t waddr) 131 { 132 uint32_t value; 133 134 qla8044_rd_reg_indirect(vha, raddr, &value); 135 qla8044_wr_reg_indirect(vha, waddr, value); 136 } 137 138 static int 139 qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, 140 uint32_t mask) 141 { 142 unsigned long timeout; 143 uint32_t temp; 144 145 /* jiffies after 100ms */ 146 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); 147 do { 148 qla8044_rd_reg_indirect(vha, addr1, &temp); 149 if ((temp & mask) != 0) 150 break; 151 if (time_after_eq(jiffies, timeout)) { 152 ql_log(ql_log_warn, vha, 0xb151, 153 "Error in processing rdmdio entry\n"); 154 return -1; 155 } 156 } while (1); 157 158 return 0; 159 } 160 161 static uint32_t 162 qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, 163 uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) 164 { 165 uint32_t temp; 166 int ret = 0; 167 168 ret = qla8044_poll_wait_for_ready(vha, addr1, mask); 169 if (ret == -1) 170 return -1; 171 172 temp = (0x40000000 | addr); 173 qla8044_wr_reg_indirect(vha, addr1, temp); 174 175 ret = qla8044_poll_wait_for_ready(vha, addr1, mask); 176 if (ret == -1) 177 return 0; 178 179 qla8044_rd_reg_indirect(vha, addr3, &ret); 180 181 return ret; 182 } 183 184 185 static int 186 qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, 187 uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) 188 { 189 unsigned long timeout; 190 uint32_t temp; 191 192 /* jiffies after 100 msecs */ 193 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); 194 do { 195 temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); 196 if ((temp & 0x1) != 1) 197 break; 198 if (time_after_eq(jiffies, timeout)) { 199 ql_log(ql_log_warn, vha, 0xb152, 200 "Error in processing mdiobus idle\n"); 201 return -1; 202 } 203 } while (1); 204 205 return 0; 206 } 207 208 static int 209 qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, 210 uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) 211 { 212 int ret = 0; 213 214 ret = qla8044_poll_wait_for_ready(vha, addr1, mask); 215 if (ret == -1) 216 return -1; 217 218 qla8044_wr_reg_indirect(vha, addr3, value); 219 qla8044_wr_reg_indirect(vha, addr1, addr); 220 221 ret = qla8044_poll_wait_for_ready(vha, addr1, mask); 222 if (ret == -1) 223 return -1; 224 225 return 0; 226 } 227 /* 228 * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, 229 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. 230 * 231 * @vha : Pointer to adapter structure 232 * @raddr : CRB address to read from 233 * @waddr : CRB address to write to 234 * @p_rmw_hdr : header with shift/or/xor values. 235 * 236 */ 237 static void 238 qla8044_rmw_crb_reg(struct scsi_qla_host *vha, 239 uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr) 240 { 241 uint32_t value; 242 243 if (p_rmw_hdr->index_a) 244 value = vha->reset_tmplt.array[p_rmw_hdr->index_a]; 245 else 246 qla8044_rd_reg_indirect(vha, raddr, &value); 247 value &= p_rmw_hdr->test_mask; 248 value <<= p_rmw_hdr->shl; 249 value >>= p_rmw_hdr->shr; 250 value |= p_rmw_hdr->or_value; 251 value ^= p_rmw_hdr->xor_value; 252 qla8044_wr_reg_indirect(vha, waddr, value); 253 return; 254 } 255 256 static inline void 257 qla8044_set_qsnt_ready(struct scsi_qla_host *vha) 258 { 259 uint32_t qsnt_state; 260 struct qla_hw_data *ha = vha->hw; 261 262 qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); 263 qsnt_state |= (1 << ha->portnum); 264 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); 265 ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n", 266 __func__, vha->host_no, qsnt_state); 267 } 268 269 void 270 qla8044_clear_qsnt_ready(struct scsi_qla_host *vha) 271 { 272 uint32_t qsnt_state; 273 struct qla_hw_data *ha = vha->hw; 274 275 qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); 276 qsnt_state &= ~(1 << ha->portnum); 277 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); 278 ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n", 279 __func__, vha->host_no, qsnt_state); 280 } 281 282 /** 283 * 284 * qla8044_lock_recovery - Recovers the idc_lock. 285 * @ha : Pointer to adapter structure 286 * 287 * Lock Recovery Register 288 * 5-2 Lock recovery owner: Function ID of driver doing lock recovery, 289 * valid if bits 1..0 are set by driver doing lock recovery. 290 * 1-0 1 - Driver intends to force unlock the IDC lock. 291 * 2 - Driver is moving forward to unlock the IDC lock. Driver clears 292 * this field after force unlocking the IDC lock. 293 * 294 * Lock Recovery process 295 * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is 296 * greater than 0, then wait for the other driver to unlock otherwise 297 * move to the next step. 298 * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY 299 * register bits 1..0 and also set the function# in bits 5..2. 300 * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms. 301 * Wait for the other driver to perform lock recovery if the function 302 * number in bits 5..2 has changed, otherwise move to the next step. 303 * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0 304 * leaving your function# in bits 5..2. 305 * e. Force unlock using the DRIVER_UNLOCK register and immediately clear 306 * the IDC_LOCK_RECOVERY bits 5..0 by writing 0. 307 **/ 308 static int 309 qla8044_lock_recovery(struct scsi_qla_host *vha) 310 { 311 uint32_t lock = 0, lockid; 312 struct qla_hw_data *ha = vha->hw; 313 314 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); 315 316 /* Check for other Recovery in progress, go wait */ 317 if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0) 318 return QLA_FUNCTION_FAILED; 319 320 /* Intent to Recover */ 321 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 322 (ha->portnum << 323 IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER); 324 msleep(200); 325 326 /* Check Intent to Recover is advertised */ 327 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); 328 if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum << 329 IDC_LOCK_RECOVERY_STATE_SHIFT_BITS)) 330 return QLA_FUNCTION_FAILED; 331 332 ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n" 333 , __func__, ha->portnum); 334 335 /* Proceed to Recover */ 336 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 337 (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | 338 PROCEED_TO_RECOVER); 339 340 /* Force Unlock() */ 341 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF); 342 qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); 343 344 /* Clear bits 0-5 in IDC_RECOVERY register*/ 345 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0); 346 347 /* Get lock() */ 348 lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); 349 if (lock) { 350 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); 351 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum; 352 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid); 353 return QLA_SUCCESS; 354 } else 355 return QLA_FUNCTION_FAILED; 356 } 357 358 int 359 qla8044_idc_lock(struct qla_hw_data *ha) 360 { 361 uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0; 362 uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0; 363 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 364 365 while (status == 0) { 366 /* acquire semaphore5 from PCI HW block */ 367 status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); 368 369 if (status) { 370 /* Increment Counter (8-31) and update func_num (0-7) on 371 * getting a successful lock */ 372 lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); 373 lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum; 374 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id); 375 break; 376 } 377 378 if (timeout == 0) 379 first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); 380 381 if (++timeout >= 382 (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) { 383 tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); 384 func_num = tmo_owner & 0xFF; 385 lock_cnt = tmo_owner >> 8; 386 ql_log(ql_log_warn, vha, 0xb114, 387 "%s: Lock by func %d failed after 2s, lock held " 388 "by func %d, lock count %d, first_owner %d\n", 389 __func__, ha->portnum, func_num, lock_cnt, 390 (first_owner & 0xFF)); 391 if (first_owner != tmo_owner) { 392 /* Some other driver got lock, 393 * OR same driver got lock again (counter 394 * value changed), when we were waiting for 395 * lock. Retry for another 2 sec */ 396 ql_dbg(ql_dbg_p3p, vha, 0xb115, 397 "%s: %d: IDC lock failed\n", 398 __func__, ha->portnum); 399 timeout = 0; 400 } else { 401 /* Same driver holding lock > 2sec. 402 * Force Recovery */ 403 if (qla8044_lock_recovery(vha) == QLA_SUCCESS) { 404 /* Recovered and got lock */ 405 ret_val = QLA_SUCCESS; 406 ql_dbg(ql_dbg_p3p, vha, 0xb116, 407 "%s:IDC lock Recovery by %d" 408 "successful...\n", __func__, 409 ha->portnum); 410 } 411 /* Recovery Failed, some other function 412 * has the lock, wait for 2secs 413 * and retry 414 */ 415 ql_dbg(ql_dbg_p3p, vha, 0xb08a, 416 "%s: IDC lock Recovery by %d " 417 "failed, Retrying timeout\n", __func__, 418 ha->portnum); 419 timeout = 0; 420 } 421 } 422 msleep(QLA8044_DRV_LOCK_MSLEEP); 423 } 424 return ret_val; 425 } 426 427 void 428 qla8044_idc_unlock(struct qla_hw_data *ha) 429 { 430 int id; 431 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 432 433 id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); 434 435 if ((id & 0xFF) != ha->portnum) { 436 ql_log(ql_log_warn, vha, 0xb118, 437 "%s: IDC Unlock by %d failed, lock owner is %d!\n", 438 __func__, ha->portnum, (id & 0xFF)); 439 return; 440 } 441 442 /* Keep lock counter value, update the ha->func_num to 0xFF */ 443 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF)); 444 qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); 445 } 446 447 /* 8044 Flash Lock/Unlock functions */ 448 static int 449 qla8044_flash_lock(scsi_qla_host_t *vha) 450 { 451 int lock_owner; 452 int timeout = 0; 453 uint32_t lock_status = 0; 454 int ret_val = QLA_SUCCESS; 455 struct qla_hw_data *ha = vha->hw; 456 457 while (lock_status == 0) { 458 lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK); 459 if (lock_status) 460 break; 461 462 if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) { 463 lock_owner = qla8044_rd_reg(ha, 464 QLA8044_FLASH_LOCK_ID); 465 ql_log(ql_log_warn, vha, 0xb113, 466 "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", 467 __func__, ha->portnum, lock_owner); 468 ret_val = QLA_FUNCTION_FAILED; 469 break; 470 } 471 msleep(20); 472 } 473 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum); 474 return ret_val; 475 } 476 477 static void 478 qla8044_flash_unlock(scsi_qla_host_t *vha) 479 { 480 struct qla_hw_data *ha = vha->hw; 481 482 /* Reading FLASH_UNLOCK register unlocks the Flash */ 483 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); 484 qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); 485 } 486 487 488 static 489 void qla8044_flash_lock_recovery(struct scsi_qla_host *vha) 490 { 491 492 if (qla8044_flash_lock(vha)) { 493 /* Someone else is holding the lock. */ 494 ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n"); 495 } 496 497 /* 498 * Either we got the lock, or someone 499 * else died while holding it. 500 * In either case, unlock. 501 */ 502 qla8044_flash_unlock(vha); 503 } 504 505 /* 506 * Address and length are byte address 507 */ 508 static int 509 qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data, 510 uint32_t flash_addr, int u32_word_count) 511 { 512 int i, ret_val = QLA_SUCCESS; 513 uint32_t u32_word; 514 515 if (qla8044_flash_lock(vha) != QLA_SUCCESS) { 516 ret_val = QLA_FUNCTION_FAILED; 517 goto exit_lock_error; 518 } 519 520 if (flash_addr & 0x03) { 521 ql_log(ql_log_warn, vha, 0xb117, 522 "%s: Illegal addr = 0x%x\n", __func__, flash_addr); 523 ret_val = QLA_FUNCTION_FAILED; 524 goto exit_flash_read; 525 } 526 527 for (i = 0; i < u32_word_count; i++) { 528 if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, 529 (flash_addr & 0xFFFF0000))) { 530 ql_log(ql_log_warn, vha, 0xb119, 531 "%s: failed to write addr 0x%x to " 532 "FLASH_DIRECT_WINDOW\n! ", 533 __func__, flash_addr); 534 ret_val = QLA_FUNCTION_FAILED; 535 goto exit_flash_read; 536 } 537 538 ret_val = qla8044_rd_reg_indirect(vha, 539 QLA8044_FLASH_DIRECT_DATA(flash_addr), 540 &u32_word); 541 if (ret_val != QLA_SUCCESS) { 542 ql_log(ql_log_warn, vha, 0xb08c, 543 "%s: failed to read addr 0x%x!\n", 544 __func__, flash_addr); 545 goto exit_flash_read; 546 } 547 548 *(uint32_t *)p_data = u32_word; 549 p_data = p_data + 4; 550 flash_addr = flash_addr + 4; 551 } 552 553 exit_flash_read: 554 qla8044_flash_unlock(vha); 555 556 exit_lock_error: 557 return ret_val; 558 } 559 560 /* 561 * Address and length are byte address 562 */ 563 uint8_t * 564 qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, 565 uint32_t offset, uint32_t length) 566 { 567 scsi_block_requests(vha->host); 568 if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4) 569 != QLA_SUCCESS) { 570 ql_log(ql_log_warn, vha, 0xb08d, 571 "%s: Failed to read from flash\n", 572 __func__); 573 } 574 scsi_unblock_requests(vha->host); 575 return buf; 576 } 577 578 static inline int 579 qla8044_need_reset(struct scsi_qla_host *vha) 580 { 581 uint32_t drv_state, drv_active; 582 int rval; 583 struct qla_hw_data *ha = vha->hw; 584 585 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); 586 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); 587 588 rval = drv_state & (1 << ha->portnum); 589 590 if (ha->flags.eeh_busy && drv_active) 591 rval = 1; 592 return rval; 593 } 594 595 /* 596 * qla8044_write_list - Write the value (p_entry->arg2) to address specified 597 * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between 598 * entries. 599 * 600 * @vha : Pointer to adapter structure 601 * @p_hdr : reset_entry header for WRITE_LIST opcode. 602 * 603 */ 604 static void 605 qla8044_write_list(struct scsi_qla_host *vha, 606 struct qla8044_reset_entry_hdr *p_hdr) 607 { 608 struct qla8044_entry *p_entry; 609 uint32_t i; 610 611 p_entry = (struct qla8044_entry *)((char *)p_hdr + 612 sizeof(struct qla8044_reset_entry_hdr)); 613 614 for (i = 0; i < p_hdr->count; i++, p_entry++) { 615 qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); 616 if (p_hdr->delay) 617 udelay((uint32_t)(p_hdr->delay)); 618 } 619 } 620 621 /* 622 * qla8044_read_write_list - Read from address specified by p_entry->arg1, 623 * write value read to address specified by p_entry->arg2, for all entries in 624 * header with delay of p_hdr->delay between entries. 625 * 626 * @vha : Pointer to adapter structure 627 * @p_hdr : reset_entry header for READ_WRITE_LIST opcode. 628 * 629 */ 630 static void 631 qla8044_read_write_list(struct scsi_qla_host *vha, 632 struct qla8044_reset_entry_hdr *p_hdr) 633 { 634 struct qla8044_entry *p_entry; 635 uint32_t i; 636 637 p_entry = (struct qla8044_entry *)((char *)p_hdr + 638 sizeof(struct qla8044_reset_entry_hdr)); 639 640 for (i = 0; i < p_hdr->count; i++, p_entry++) { 641 qla8044_read_write_crb_reg(vha, p_entry->arg1, 642 p_entry->arg2); 643 if (p_hdr->delay) 644 udelay((uint32_t)(p_hdr->delay)); 645 } 646 } 647 648 /* 649 * qla8044_poll_reg - Poll the given CRB addr for duration msecs till 650 * value read ANDed with test_mask is equal to test_result. 651 * 652 * @ha : Pointer to adapter structure 653 * @addr : CRB register address 654 * @duration : Poll for total of "duration" msecs 655 * @test_mask : Mask value read with "test_mask" 656 * @test_result : Compare (value&test_mask) with test_result. 657 * 658 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED 659 */ 660 static int 661 qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, 662 int duration, uint32_t test_mask, uint32_t test_result) 663 { 664 uint32_t value; 665 int timeout_error; 666 uint8_t retries; 667 int ret_val = QLA_SUCCESS; 668 669 ret_val = qla8044_rd_reg_indirect(vha, addr, &value); 670 if (ret_val == QLA_FUNCTION_FAILED) { 671 timeout_error = 1; 672 goto exit_poll_reg; 673 } 674 675 /* poll every 1/10 of the total duration */ 676 retries = duration/10; 677 678 do { 679 if ((value & test_mask) != test_result) { 680 timeout_error = 1; 681 msleep(duration/10); 682 ret_val = qla8044_rd_reg_indirect(vha, addr, &value); 683 if (ret_val == QLA_FUNCTION_FAILED) { 684 timeout_error = 1; 685 goto exit_poll_reg; 686 } 687 } else { 688 timeout_error = 0; 689 break; 690 } 691 } while (retries--); 692 693 exit_poll_reg: 694 if (timeout_error) { 695 vha->reset_tmplt.seq_error++; 696 ql_log(ql_log_fatal, vha, 0xb090, 697 "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", 698 __func__, value, test_mask, test_result); 699 } 700 701 return timeout_error; 702 } 703 704 /* 705 * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB 706 * register specified by p_entry->arg1 and compare (value AND test_mask) with 707 * test_result to validate it. Wait for p_hdr->delay between processing entries. 708 * 709 * @ha : Pointer to adapter structure 710 * @p_hdr : reset_entry header for POLL_LIST opcode. 711 * 712 */ 713 static void 714 qla8044_poll_list(struct scsi_qla_host *vha, 715 struct qla8044_reset_entry_hdr *p_hdr) 716 { 717 long delay; 718 struct qla8044_entry *p_entry; 719 struct qla8044_poll *p_poll; 720 uint32_t i; 721 uint32_t value; 722 723 p_poll = (struct qla8044_poll *) 724 ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); 725 726 /* Entries start after 8 byte qla8044_poll, poll header contains 727 * the test_mask, test_value. 728 */ 729 p_entry = (struct qla8044_entry *)((char *)p_poll + 730 sizeof(struct qla8044_poll)); 731 732 delay = (long)p_hdr->delay; 733 734 if (!delay) { 735 for (i = 0; i < p_hdr->count; i++, p_entry++) 736 qla8044_poll_reg(vha, p_entry->arg1, 737 delay, p_poll->test_mask, p_poll->test_value); 738 } else { 739 for (i = 0; i < p_hdr->count; i++, p_entry++) { 740 if (delay) { 741 if (qla8044_poll_reg(vha, 742 p_entry->arg1, delay, 743 p_poll->test_mask, 744 p_poll->test_value)) { 745 /*If 746 * (data_read&test_mask != test_value) 747 * read TIMEOUT_ADDR (arg1) and 748 * ADDR (arg2) registers 749 */ 750 qla8044_rd_reg_indirect(vha, 751 p_entry->arg1, &value); 752 qla8044_rd_reg_indirect(vha, 753 p_entry->arg2, &value); 754 } 755 } 756 } 757 } 758 } 759 760 /* 761 * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr, 762 * read ar_addr, if (value& test_mask != test_mask) re-read till timeout 763 * expires. 764 * 765 * @vha : Pointer to adapter structure 766 * @p_hdr : reset entry header for POLL_WRITE_LIST opcode. 767 * 768 */ 769 static void 770 qla8044_poll_write_list(struct scsi_qla_host *vha, 771 struct qla8044_reset_entry_hdr *p_hdr) 772 { 773 long delay; 774 struct qla8044_quad_entry *p_entry; 775 struct qla8044_poll *p_poll; 776 uint32_t i; 777 778 p_poll = (struct qla8044_poll *)((char *)p_hdr + 779 sizeof(struct qla8044_reset_entry_hdr)); 780 781 p_entry = (struct qla8044_quad_entry *)((char *)p_poll + 782 sizeof(struct qla8044_poll)); 783 784 delay = (long)p_hdr->delay; 785 786 for (i = 0; i < p_hdr->count; i++, p_entry++) { 787 qla8044_wr_reg_indirect(vha, 788 p_entry->dr_addr, p_entry->dr_value); 789 qla8044_wr_reg_indirect(vha, 790 p_entry->ar_addr, p_entry->ar_value); 791 if (delay) { 792 if (qla8044_poll_reg(vha, 793 p_entry->ar_addr, delay, 794 p_poll->test_mask, 795 p_poll->test_value)) { 796 ql_dbg(ql_dbg_p3p, vha, 0xb091, 797 "%s: Timeout Error: poll list, ", 798 __func__); 799 ql_dbg(ql_dbg_p3p, vha, 0xb092, 800 "item_num %d, entry_num %d\n", i, 801 vha->reset_tmplt.seq_index); 802 } 803 } 804 } 805 } 806 807 /* 808 * qla8044_read_modify_write - Read value from p_entry->arg1, modify the 809 * value, write value to p_entry->arg2. Process entries with p_hdr->delay 810 * between entries. 811 * 812 * @vha : Pointer to adapter structure 813 * @p_hdr : header with shift/or/xor values. 814 * 815 */ 816 static void 817 qla8044_read_modify_write(struct scsi_qla_host *vha, 818 struct qla8044_reset_entry_hdr *p_hdr) 819 { 820 struct qla8044_entry *p_entry; 821 struct qla8044_rmw *p_rmw_hdr; 822 uint32_t i; 823 824 p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr + 825 sizeof(struct qla8044_reset_entry_hdr)); 826 827 p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr + 828 sizeof(struct qla8044_rmw)); 829 830 for (i = 0; i < p_hdr->count; i++, p_entry++) { 831 qla8044_rmw_crb_reg(vha, p_entry->arg1, 832 p_entry->arg2, p_rmw_hdr); 833 if (p_hdr->delay) 834 udelay((uint32_t)(p_hdr->delay)); 835 } 836 } 837 838 /* 839 * qla8044_pause - Wait for p_hdr->delay msecs, called between processing 840 * two entries of a sequence. 841 * 842 * @vha : Pointer to adapter structure 843 * @p_hdr : Common reset entry header. 844 * 845 */ 846 static 847 void qla8044_pause(struct scsi_qla_host *vha, 848 struct qla8044_reset_entry_hdr *p_hdr) 849 { 850 if (p_hdr->delay) 851 mdelay((uint32_t)((long)p_hdr->delay)); 852 } 853 854 /* 855 * qla8044_template_end - Indicates end of reset sequence processing. 856 * 857 * @vha : Pointer to adapter structure 858 * @p_hdr : Common reset entry header. 859 * 860 */ 861 static void 862 qla8044_template_end(struct scsi_qla_host *vha, 863 struct qla8044_reset_entry_hdr *p_hdr) 864 { 865 vha->reset_tmplt.template_end = 1; 866 867 if (vha->reset_tmplt.seq_error == 0) { 868 ql_dbg(ql_dbg_p3p, vha, 0xb093, 869 "%s: Reset sequence completed SUCCESSFULLY.\n", __func__); 870 } else { 871 ql_log(ql_log_fatal, vha, 0xb094, 872 "%s: Reset sequence completed with some timeout " 873 "errors.\n", __func__); 874 } 875 } 876 877 /* 878 * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr, 879 * if (value & test_mask != test_value) re-read till timeout value expires, 880 * read dr_addr register and assign to reset_tmplt.array. 881 * 882 * @vha : Pointer to adapter structure 883 * @p_hdr : Common reset entry header. 884 * 885 */ 886 static void 887 qla8044_poll_read_list(struct scsi_qla_host *vha, 888 struct qla8044_reset_entry_hdr *p_hdr) 889 { 890 long delay; 891 int index; 892 struct qla8044_quad_entry *p_entry; 893 struct qla8044_poll *p_poll; 894 uint32_t i; 895 uint32_t value; 896 897 p_poll = (struct qla8044_poll *) 898 ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); 899 900 p_entry = (struct qla8044_quad_entry *) 901 ((char *)p_poll + sizeof(struct qla8044_poll)); 902 903 delay = (long)p_hdr->delay; 904 905 for (i = 0; i < p_hdr->count; i++, p_entry++) { 906 qla8044_wr_reg_indirect(vha, p_entry->ar_addr, 907 p_entry->ar_value); 908 if (delay) { 909 if (qla8044_poll_reg(vha, p_entry->ar_addr, delay, 910 p_poll->test_mask, p_poll->test_value)) { 911 ql_dbg(ql_dbg_p3p, vha, 0xb095, 912 "%s: Timeout Error: poll " 913 "list, ", __func__); 914 ql_dbg(ql_dbg_p3p, vha, 0xb096, 915 "Item_num %d, " 916 "entry_num %d\n", i, 917 vha->reset_tmplt.seq_index); 918 } else { 919 index = vha->reset_tmplt.array_index; 920 qla8044_rd_reg_indirect(vha, 921 p_entry->dr_addr, &value); 922 vha->reset_tmplt.array[index++] = value; 923 if (index == QLA8044_MAX_RESET_SEQ_ENTRIES) 924 vha->reset_tmplt.array_index = 1; 925 } 926 } 927 } 928 } 929 930 /* 931 * qla8031_process_reset_template - Process all entries in reset template 932 * till entry with SEQ_END opcode, which indicates end of the reset template 933 * processing. Each entry has a Reset Entry header, entry opcode/command, with 934 * size of the entry, number of entries in sub-sequence and delay in microsecs 935 * or timeout in millisecs. 936 * 937 * @ha : Pointer to adapter structure 938 * @p_buff : Common reset entry header. 939 * 940 */ 941 static void 942 qla8044_process_reset_template(struct scsi_qla_host *vha, 943 char *p_buff) 944 { 945 int index, entries; 946 struct qla8044_reset_entry_hdr *p_hdr; 947 char *p_entry = p_buff; 948 949 vha->reset_tmplt.seq_end = 0; 950 vha->reset_tmplt.template_end = 0; 951 entries = vha->reset_tmplt.hdr->entries; 952 index = vha->reset_tmplt.seq_index; 953 954 for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) { 955 p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; 956 switch (p_hdr->cmd) { 957 case OPCODE_NOP: 958 break; 959 case OPCODE_WRITE_LIST: 960 qla8044_write_list(vha, p_hdr); 961 break; 962 case OPCODE_READ_WRITE_LIST: 963 qla8044_read_write_list(vha, p_hdr); 964 break; 965 case OPCODE_POLL_LIST: 966 qla8044_poll_list(vha, p_hdr); 967 break; 968 case OPCODE_POLL_WRITE_LIST: 969 qla8044_poll_write_list(vha, p_hdr); 970 break; 971 case OPCODE_READ_MODIFY_WRITE: 972 qla8044_read_modify_write(vha, p_hdr); 973 break; 974 case OPCODE_SEQ_PAUSE: 975 qla8044_pause(vha, p_hdr); 976 break; 977 case OPCODE_SEQ_END: 978 vha->reset_tmplt.seq_end = 1; 979 break; 980 case OPCODE_TMPL_END: 981 qla8044_template_end(vha, p_hdr); 982 break; 983 case OPCODE_POLL_READ_LIST: 984 qla8044_poll_read_list(vha, p_hdr); 985 break; 986 default: 987 ql_log(ql_log_fatal, vha, 0xb097, 988 "%s: Unknown command ==> 0x%04x on " 989 "entry = %d\n", __func__, p_hdr->cmd, index); 990 break; 991 } 992 /* 993 *Set pointer to next entry in the sequence. 994 */ 995 p_entry += p_hdr->size; 996 } 997 vha->reset_tmplt.seq_index = index; 998 } 999 1000 static void 1001 qla8044_process_init_seq(struct scsi_qla_host *vha) 1002 { 1003 qla8044_process_reset_template(vha, 1004 vha->reset_tmplt.init_offset); 1005 if (vha->reset_tmplt.seq_end != 1) 1006 ql_log(ql_log_fatal, vha, 0xb098, 1007 "%s: Abrupt INIT Sub-Sequence end.\n", 1008 __func__); 1009 } 1010 1011 static void 1012 qla8044_process_stop_seq(struct scsi_qla_host *vha) 1013 { 1014 vha->reset_tmplt.seq_index = 0; 1015 qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset); 1016 if (vha->reset_tmplt.seq_end != 1) 1017 ql_log(ql_log_fatal, vha, 0xb099, 1018 "%s: Abrupt STOP Sub-Sequence end.\n", __func__); 1019 } 1020 1021 static void 1022 qla8044_process_start_seq(struct scsi_qla_host *vha) 1023 { 1024 qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset); 1025 if (vha->reset_tmplt.template_end != 1) 1026 ql_log(ql_log_fatal, vha, 0xb09a, 1027 "%s: Abrupt START Sub-Sequence end.\n", 1028 __func__); 1029 } 1030 1031 static int 1032 qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha, 1033 uint32_t flash_addr, uint8_t *p_data, int u32_word_count) 1034 { 1035 uint32_t i; 1036 uint32_t u32_word; 1037 uint32_t flash_offset; 1038 uint32_t addr = flash_addr; 1039 int ret_val = QLA_SUCCESS; 1040 1041 flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1); 1042 1043 if (addr & 0x3) { 1044 ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n", 1045 __func__, addr); 1046 ret_val = QLA_FUNCTION_FAILED; 1047 goto exit_lockless_read; 1048 } 1049 1050 ret_val = qla8044_wr_reg_indirect(vha, 1051 QLA8044_FLASH_DIRECT_WINDOW, (addr)); 1052 1053 if (ret_val != QLA_SUCCESS) { 1054 ql_log(ql_log_fatal, vha, 0xb09c, 1055 "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", 1056 __func__, addr); 1057 goto exit_lockless_read; 1058 } 1059 1060 /* Check if data is spread across multiple sectors */ 1061 if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > 1062 (QLA8044_FLASH_SECTOR_SIZE - 1)) { 1063 /* Multi sector read */ 1064 for (i = 0; i < u32_word_count; i++) { 1065 ret_val = qla8044_rd_reg_indirect(vha, 1066 QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); 1067 if (ret_val != QLA_SUCCESS) { 1068 ql_log(ql_log_fatal, vha, 0xb09d, 1069 "%s: failed to read addr 0x%x!\n", 1070 __func__, addr); 1071 goto exit_lockless_read; 1072 } 1073 *(uint32_t *)p_data = u32_word; 1074 p_data = p_data + 4; 1075 addr = addr + 4; 1076 flash_offset = flash_offset + 4; 1077 if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) { 1078 /* This write is needed once for each sector */ 1079 ret_val = qla8044_wr_reg_indirect(vha, 1080 QLA8044_FLASH_DIRECT_WINDOW, (addr)); 1081 if (ret_val != QLA_SUCCESS) { 1082 ql_log(ql_log_fatal, vha, 0xb09f, 1083 "%s: failed to write addr " 1084 "0x%x to FLASH_DIRECT_WINDOW!\n", 1085 __func__, addr); 1086 goto exit_lockless_read; 1087 } 1088 flash_offset = 0; 1089 } 1090 } 1091 } else { 1092 /* Single sector read */ 1093 for (i = 0; i < u32_word_count; i++) { 1094 ret_val = qla8044_rd_reg_indirect(vha, 1095 QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); 1096 if (ret_val != QLA_SUCCESS) { 1097 ql_log(ql_log_fatal, vha, 0xb0a0, 1098 "%s: failed to read addr 0x%x!\n", 1099 __func__, addr); 1100 goto exit_lockless_read; 1101 } 1102 *(uint32_t *)p_data = u32_word; 1103 p_data = p_data + 4; 1104 addr = addr + 4; 1105 } 1106 } 1107 1108 exit_lockless_read: 1109 return ret_val; 1110 } 1111 1112 /* 1113 * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory 1114 * 1115 * @vha : Pointer to adapter structure 1116 * addr : Flash address to write to 1117 * data : Data to be written 1118 * count : word_count to be written 1119 * 1120 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED 1121 */ 1122 static int 1123 qla8044_ms_mem_write_128b(struct scsi_qla_host *vha, 1124 uint64_t addr, uint32_t *data, uint32_t count) 1125 { 1126 int i, j, ret_val = QLA_SUCCESS; 1127 uint32_t agt_ctrl; 1128 unsigned long flags; 1129 struct qla_hw_data *ha = vha->hw; 1130 1131 /* Only 128-bit aligned access */ 1132 if (addr & 0xF) { 1133 ret_val = QLA_FUNCTION_FAILED; 1134 goto exit_ms_mem_write; 1135 } 1136 write_lock_irqsave(&ha->hw_lock, flags); 1137 1138 /* Write address */ 1139 ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0); 1140 if (ret_val == QLA_FUNCTION_FAILED) { 1141 ql_log(ql_log_fatal, vha, 0xb0a1, 1142 "%s: write to AGT_ADDR_HI failed!\n", __func__); 1143 goto exit_ms_mem_write_unlock; 1144 } 1145 1146 for (i = 0; i < count; i++, addr += 16) { 1147 if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET, 1148 QLA8044_ADDR_QDR_NET_MAX)) || 1149 (addr_in_range(addr, QLA8044_ADDR_DDR_NET, 1150 QLA8044_ADDR_DDR_NET_MAX)))) { 1151 ret_val = QLA_FUNCTION_FAILED; 1152 goto exit_ms_mem_write_unlock; 1153 } 1154 1155 ret_val = qla8044_wr_reg_indirect(vha, 1156 MD_MIU_TEST_AGT_ADDR_LO, addr); 1157 1158 /* Write data */ 1159 ret_val += qla8044_wr_reg_indirect(vha, 1160 MD_MIU_TEST_AGT_WRDATA_LO, *data++); 1161 ret_val += qla8044_wr_reg_indirect(vha, 1162 MD_MIU_TEST_AGT_WRDATA_HI, *data++); 1163 ret_val += qla8044_wr_reg_indirect(vha, 1164 MD_MIU_TEST_AGT_WRDATA_ULO, *data++); 1165 ret_val += qla8044_wr_reg_indirect(vha, 1166 MD_MIU_TEST_AGT_WRDATA_UHI, *data++); 1167 if (ret_val == QLA_FUNCTION_FAILED) { 1168 ql_log(ql_log_fatal, vha, 0xb0a2, 1169 "%s: write to AGT_WRDATA failed!\n", 1170 __func__); 1171 goto exit_ms_mem_write_unlock; 1172 } 1173 1174 /* Check write status */ 1175 ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, 1176 MIU_TA_CTL_WRITE_ENABLE); 1177 ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, 1178 MIU_TA_CTL_WRITE_START); 1179 if (ret_val == QLA_FUNCTION_FAILED) { 1180 ql_log(ql_log_fatal, vha, 0xb0a3, 1181 "%s: write to AGT_CTRL failed!\n", __func__); 1182 goto exit_ms_mem_write_unlock; 1183 } 1184 1185 for (j = 0; j < MAX_CTL_CHECK; j++) { 1186 ret_val = qla8044_rd_reg_indirect(vha, 1187 MD_MIU_TEST_AGT_CTRL, &agt_ctrl); 1188 if (ret_val == QLA_FUNCTION_FAILED) { 1189 ql_log(ql_log_fatal, vha, 0xb0a4, 1190 "%s: failed to read " 1191 "MD_MIU_TEST_AGT_CTRL!\n", __func__); 1192 goto exit_ms_mem_write_unlock; 1193 } 1194 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) 1195 break; 1196 } 1197 1198 /* Status check failed */ 1199 if (j >= MAX_CTL_CHECK) { 1200 ql_log(ql_log_fatal, vha, 0xb0a5, 1201 "%s: MS memory write failed!\n", 1202 __func__); 1203 ret_val = QLA_FUNCTION_FAILED; 1204 goto exit_ms_mem_write_unlock; 1205 } 1206 } 1207 1208 exit_ms_mem_write_unlock: 1209 write_unlock_irqrestore(&ha->hw_lock, flags); 1210 1211 exit_ms_mem_write: 1212 return ret_val; 1213 } 1214 1215 static int 1216 qla8044_copy_bootloader(struct scsi_qla_host *vha) 1217 { 1218 uint8_t *p_cache; 1219 uint32_t src, count, size; 1220 uint64_t dest; 1221 int ret_val = QLA_SUCCESS; 1222 struct qla_hw_data *ha = vha->hw; 1223 1224 src = QLA8044_BOOTLOADER_FLASH_ADDR; 1225 dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR); 1226 size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE); 1227 1228 /* 128 bit alignment check */ 1229 if (size & 0xF) 1230 size = (size + 16) & ~0xF; 1231 1232 /* 16 byte count */ 1233 count = size/16; 1234 1235 p_cache = vmalloc(size); 1236 if (p_cache == NULL) { 1237 ql_log(ql_log_fatal, vha, 0xb0a6, 1238 "%s: Failed to allocate memory for " 1239 "boot loader cache\n", __func__); 1240 ret_val = QLA_FUNCTION_FAILED; 1241 goto exit_copy_bootloader; 1242 } 1243 1244 ret_val = qla8044_lockless_flash_read_u32(vha, src, 1245 p_cache, size/sizeof(uint32_t)); 1246 if (ret_val == QLA_FUNCTION_FAILED) { 1247 ql_log(ql_log_fatal, vha, 0xb0a7, 1248 "%s: Error reading F/W from flash!!!\n", __func__); 1249 goto exit_copy_error; 1250 } 1251 ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n", 1252 __func__); 1253 1254 /* 128 bit/16 byte write to MS memory */ 1255 ret_val = qla8044_ms_mem_write_128b(vha, dest, 1256 (uint32_t *)p_cache, count); 1257 if (ret_val == QLA_FUNCTION_FAILED) { 1258 ql_log(ql_log_fatal, vha, 0xb0a9, 1259 "%s: Error writing F/W to MS !!!\n", __func__); 1260 goto exit_copy_error; 1261 } 1262 ql_dbg(ql_dbg_p3p, vha, 0xb0aa, 1263 "%s: Wrote F/W (size %d) to MS !!!\n", 1264 __func__, size); 1265 1266 exit_copy_error: 1267 vfree(p_cache); 1268 1269 exit_copy_bootloader: 1270 return ret_val; 1271 } 1272 1273 static int 1274 qla8044_restart(struct scsi_qla_host *vha) 1275 { 1276 int ret_val = QLA_SUCCESS; 1277 struct qla_hw_data *ha = vha->hw; 1278 1279 qla8044_process_stop_seq(vha); 1280 1281 /* Collect minidump */ 1282 if (ql2xmdenable) 1283 qla8044_get_minidump(vha); 1284 else 1285 ql_log(ql_log_fatal, vha, 0xb14c, 1286 "Minidump disabled.\n"); 1287 1288 qla8044_process_init_seq(vha); 1289 1290 if (qla8044_copy_bootloader(vha)) { 1291 ql_log(ql_log_fatal, vha, 0xb0ab, 1292 "%s: Copy bootloader, firmware restart failed!\n", 1293 __func__); 1294 ret_val = QLA_FUNCTION_FAILED; 1295 goto exit_restart; 1296 } 1297 1298 /* 1299 * Loads F/W from flash 1300 */ 1301 qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH); 1302 1303 qla8044_process_start_seq(vha); 1304 1305 exit_restart: 1306 return ret_val; 1307 } 1308 1309 /* 1310 * qla8044_check_cmd_peg_status - Check peg status to see if Peg is 1311 * initialized. 1312 * 1313 * @ha : Pointer to adapter structure 1314 * 1315 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED 1316 */ 1317 static int 1318 qla8044_check_cmd_peg_status(struct scsi_qla_host *vha) 1319 { 1320 uint32_t val, ret_val = QLA_FUNCTION_FAILED; 1321 int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; 1322 struct qla_hw_data *ha = vha->hw; 1323 1324 do { 1325 val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE); 1326 if (val == PHAN_INITIALIZE_COMPLETE) { 1327 ql_dbg(ql_dbg_p3p, vha, 0xb0ac, 1328 "%s: Command Peg initialization " 1329 "complete! state=0x%x\n", __func__, val); 1330 ret_val = QLA_SUCCESS; 1331 break; 1332 } 1333 msleep(CRB_CMDPEG_CHECK_DELAY); 1334 } while (--retries); 1335 1336 return ret_val; 1337 } 1338 1339 static int 1340 qla8044_start_firmware(struct scsi_qla_host *vha) 1341 { 1342 int ret_val = QLA_SUCCESS; 1343 1344 if (qla8044_restart(vha)) { 1345 ql_log(ql_log_fatal, vha, 0xb0ad, 1346 "%s: Restart Error!!!, Need Reset!!!\n", 1347 __func__); 1348 ret_val = QLA_FUNCTION_FAILED; 1349 goto exit_start_fw; 1350 } else 1351 ql_dbg(ql_dbg_p3p, vha, 0xb0af, 1352 "%s: Restart done!\n", __func__); 1353 1354 ret_val = qla8044_check_cmd_peg_status(vha); 1355 if (ret_val) { 1356 ql_log(ql_log_fatal, vha, 0xb0b0, 1357 "%s: Peg not initialized!\n", __func__); 1358 ret_val = QLA_FUNCTION_FAILED; 1359 } 1360 1361 exit_start_fw: 1362 return ret_val; 1363 } 1364 1365 void 1366 qla8044_clear_drv_active(struct qla_hw_data *ha) 1367 { 1368 uint32_t drv_active; 1369 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1370 1371 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); 1372 drv_active &= ~(1 << (ha->portnum)); 1373 1374 ql_log(ql_log_info, vha, 0xb0b1, 1375 "%s(%ld): drv_active: 0x%08x\n", 1376 __func__, vha->host_no, drv_active); 1377 1378 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); 1379 } 1380 1381 /* 1382 * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw 1383 * @ha: pointer to adapter structure 1384 * 1385 * Note: IDC lock must be held upon entry 1386 **/ 1387 static int 1388 qla8044_device_bootstrap(struct scsi_qla_host *vha) 1389 { 1390 int rval = QLA_FUNCTION_FAILED; 1391 int i; 1392 uint32_t old_count = 0, count = 0; 1393 int need_reset = 0; 1394 uint32_t idc_ctrl; 1395 struct qla_hw_data *ha = vha->hw; 1396 1397 need_reset = qla8044_need_reset(vha); 1398 1399 if (!need_reset) { 1400 old_count = qla8044_rd_direct(vha, 1401 QLA8044_PEG_ALIVE_COUNTER_INDEX); 1402 1403 for (i = 0; i < 10; i++) { 1404 msleep(200); 1405 1406 count = qla8044_rd_direct(vha, 1407 QLA8044_PEG_ALIVE_COUNTER_INDEX); 1408 if (count != old_count) { 1409 rval = QLA_SUCCESS; 1410 goto dev_ready; 1411 } 1412 } 1413 qla8044_flash_lock_recovery(vha); 1414 } else { 1415 /* We are trying to perform a recovery here. */ 1416 if (ha->flags.isp82xx_fw_hung) 1417 qla8044_flash_lock_recovery(vha); 1418 } 1419 1420 /* set to DEV_INITIALIZING */ 1421 ql_log(ql_log_info, vha, 0xb0b2, 1422 "%s: HW State: INITIALIZING\n", __func__); 1423 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, 1424 QLA8XXX_DEV_INITIALIZING); 1425 1426 qla8044_idc_unlock(ha); 1427 rval = qla8044_start_firmware(vha); 1428 qla8044_idc_lock(ha); 1429 1430 if (rval != QLA_SUCCESS) { 1431 ql_log(ql_log_info, vha, 0xb0b3, 1432 "%s: HW State: FAILED\n", __func__); 1433 qla8044_clear_drv_active(ha); 1434 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, 1435 QLA8XXX_DEV_FAILED); 1436 return rval; 1437 } 1438 1439 /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after 1440 * device goes to INIT state. */ 1441 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); 1442 if (idc_ctrl & GRACEFUL_RESET_BIT1) { 1443 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, 1444 (idc_ctrl & ~GRACEFUL_RESET_BIT1)); 1445 ha->fw_dumped = 0; 1446 } 1447 1448 dev_ready: 1449 ql_log(ql_log_info, vha, 0xb0b4, 1450 "%s: HW State: READY\n", __func__); 1451 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY); 1452 1453 return rval; 1454 } 1455 1456 /*-------------------------Reset Sequence Functions-----------------------*/ 1457 static void 1458 qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha) 1459 { 1460 u8 *phdr; 1461 1462 if (!vha->reset_tmplt.buff) { 1463 ql_log(ql_log_fatal, vha, 0xb0b5, 1464 "%s: Error Invalid reset_seq_template\n", __func__); 1465 return; 1466 } 1467 1468 phdr = vha->reset_tmplt.buff; 1469 ql_dbg(ql_dbg_p3p, vha, 0xb0b6, 1470 "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X" 1471 "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n" 1472 "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", 1473 *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), 1474 *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), 1475 *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), 1476 *(phdr+13), *(phdr+14), *(phdr+15)); 1477 } 1478 1479 /* 1480 * qla8044_reset_seq_checksum_test - Validate Reset Sequence template. 1481 * 1482 * @ha : Pointer to adapter structure 1483 * 1484 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED 1485 */ 1486 static int 1487 qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha) 1488 { 1489 uint32_t sum = 0; 1490 uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff; 1491 int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t); 1492 1493 while (u16_count-- > 0) 1494 sum += *buff++; 1495 1496 while (sum >> 16) 1497 sum = (sum & 0xFFFF) + (sum >> 16); 1498 1499 /* checksum of 0 indicates a valid template */ 1500 if (~sum) { 1501 return QLA_SUCCESS; 1502 } else { 1503 ql_log(ql_log_fatal, vha, 0xb0b7, 1504 "%s: Reset seq checksum failed\n", __func__); 1505 return QLA_FUNCTION_FAILED; 1506 } 1507 } 1508 1509 /* 1510 * qla8044_read_reset_template - Read Reset Template from Flash, validate 1511 * the template and store offsets of stop/start/init offsets in ha->reset_tmplt. 1512 * 1513 * @ha : Pointer to adapter structure 1514 */ 1515 void 1516 qla8044_read_reset_template(struct scsi_qla_host *vha) 1517 { 1518 uint8_t *p_buff; 1519 uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; 1520 1521 vha->reset_tmplt.seq_error = 0; 1522 vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE); 1523 if (vha->reset_tmplt.buff == NULL) { 1524 ql_log(ql_log_fatal, vha, 0xb0b8, 1525 "%s: Failed to allocate reset template resources\n", 1526 __func__); 1527 goto exit_read_reset_template; 1528 } 1529 1530 p_buff = vha->reset_tmplt.buff; 1531 addr = QLA8044_RESET_TEMPLATE_ADDR; 1532 1533 tmplt_hdr_def_size = 1534 sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t); 1535 1536 ql_dbg(ql_dbg_p3p, vha, 0xb0b9, 1537 "%s: Read template hdr size %d from Flash\n", 1538 __func__, tmplt_hdr_def_size); 1539 1540 /* Copy template header from flash */ 1541 if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { 1542 ql_log(ql_log_fatal, vha, 0xb0ba, 1543 "%s: Failed to read reset template\n", __func__); 1544 goto exit_read_template_error; 1545 } 1546 1547 vha->reset_tmplt.hdr = 1548 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff; 1549 1550 /* Validate the template header size and signature */ 1551 tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); 1552 if ((tmplt_hdr_size != tmplt_hdr_def_size) || 1553 (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { 1554 ql_log(ql_log_fatal, vha, 0xb0bb, 1555 "%s: Template Header size invalid %d " 1556 "tmplt_hdr_def_size %d!!!\n", __func__, 1557 tmplt_hdr_size, tmplt_hdr_def_size); 1558 goto exit_read_template_error; 1559 } 1560 1561 addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size; 1562 p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size; 1563 tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size - 1564 vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t); 1565 1566 ql_dbg(ql_dbg_p3p, vha, 0xb0bc, 1567 "%s: Read rest of the template size %d\n", 1568 __func__, vha->reset_tmplt.hdr->size); 1569 1570 /* Copy rest of the template */ 1571 if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { 1572 ql_log(ql_log_fatal, vha, 0xb0bd, 1573 "%s: Failed to read reset template\n", __func__); 1574 goto exit_read_template_error; 1575 } 1576 1577 /* Integrity check */ 1578 if (qla8044_reset_seq_checksum_test(vha)) { 1579 ql_log(ql_log_fatal, vha, 0xb0be, 1580 "%s: Reset Seq checksum failed!\n", __func__); 1581 goto exit_read_template_error; 1582 } 1583 1584 ql_dbg(ql_dbg_p3p, vha, 0xb0bf, 1585 "%s: Reset Seq checksum passed! Get stop, " 1586 "start and init seq offsets\n", __func__); 1587 1588 /* Get STOP, START, INIT sequence offsets */ 1589 vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + 1590 vha->reset_tmplt.hdr->init_seq_offset; 1591 1592 vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + 1593 vha->reset_tmplt.hdr->start_seq_offset; 1594 1595 vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + 1596 vha->reset_tmplt.hdr->hdr_size; 1597 1598 qla8044_dump_reset_seq_hdr(vha); 1599 1600 goto exit_read_reset_template; 1601 1602 exit_read_template_error: 1603 vfree(vha->reset_tmplt.buff); 1604 1605 exit_read_reset_template: 1606 return; 1607 } 1608 1609 void 1610 qla8044_set_idc_dontreset(struct scsi_qla_host *vha) 1611 { 1612 uint32_t idc_ctrl; 1613 struct qla_hw_data *ha = vha->hw; 1614 1615 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); 1616 idc_ctrl |= DONTRESET_BIT0; 1617 ql_dbg(ql_dbg_p3p, vha, 0xb0c0, 1618 "%s: idc_ctrl = %d\n", __func__, idc_ctrl); 1619 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); 1620 } 1621 1622 static inline void 1623 qla8044_set_rst_ready(struct scsi_qla_host *vha) 1624 { 1625 uint32_t drv_state; 1626 struct qla_hw_data *ha = vha->hw; 1627 1628 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); 1629 1630 /* For ISP8044, drv_active register has 1 bit per function, 1631 * shift 1 by func_num to set a bit for the function.*/ 1632 drv_state |= (1 << ha->portnum); 1633 1634 ql_log(ql_log_info, vha, 0xb0c1, 1635 "%s(%ld): drv_state: 0x%08x\n", 1636 __func__, vha->host_no, drv_state); 1637 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); 1638 } 1639 1640 /** 1641 * qla8044_need_reset_handler - Code to start reset sequence 1642 * @ha: pointer to adapter structure 1643 * 1644 * Note: IDC lock must be held upon entry 1645 **/ 1646 static void 1647 qla8044_need_reset_handler(struct scsi_qla_host *vha) 1648 { 1649 uint32_t dev_state = 0, drv_state, drv_active; 1650 unsigned long reset_timeout; 1651 struct qla_hw_data *ha = vha->hw; 1652 1653 ql_log(ql_log_fatal, vha, 0xb0c2, 1654 "%s: Performing ISP error recovery\n", __func__); 1655 1656 if (vha->flags.online) { 1657 qla8044_idc_unlock(ha); 1658 qla2x00_abort_isp_cleanup(vha); 1659 ha->isp_ops->get_flash_version(vha, vha->req->ring); 1660 ha->isp_ops->nvram_config(vha); 1661 qla8044_idc_lock(ha); 1662 } 1663 1664 dev_state = qla8044_rd_direct(vha, 1665 QLA8044_CRB_DEV_STATE_INDEX); 1666 drv_state = qla8044_rd_direct(vha, 1667 QLA8044_CRB_DRV_STATE_INDEX); 1668 drv_active = qla8044_rd_direct(vha, 1669 QLA8044_CRB_DRV_ACTIVE_INDEX); 1670 1671 ql_log(ql_log_info, vha, 0xb0c5, 1672 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", 1673 __func__, vha->host_no, drv_state, drv_active, dev_state); 1674 1675 qla8044_set_rst_ready(vha); 1676 1677 /* wait for 10 seconds for reset ack from all functions */ 1678 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 1679 1680 do { 1681 if (time_after_eq(jiffies, reset_timeout)) { 1682 ql_log(ql_log_info, vha, 0xb0c4, 1683 "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", 1684 __func__, ha->portnum, drv_state, drv_active); 1685 break; 1686 } 1687 1688 qla8044_idc_unlock(ha); 1689 msleep(1000); 1690 qla8044_idc_lock(ha); 1691 1692 dev_state = qla8044_rd_direct(vha, 1693 QLA8044_CRB_DEV_STATE_INDEX); 1694 drv_state = qla8044_rd_direct(vha, 1695 QLA8044_CRB_DRV_STATE_INDEX); 1696 drv_active = qla8044_rd_direct(vha, 1697 QLA8044_CRB_DRV_ACTIVE_INDEX); 1698 } while (((drv_state & drv_active) != drv_active) && 1699 (dev_state == QLA8XXX_DEV_NEED_RESET)); 1700 1701 /* Remove IDC participation of functions not acknowledging */ 1702 if (drv_state != drv_active) { 1703 ql_log(ql_log_info, vha, 0xb0c7, 1704 "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", 1705 __func__, vha->host_no, ha->portnum, 1706 (drv_active ^ drv_state)); 1707 drv_active = drv_active & drv_state; 1708 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, 1709 drv_active); 1710 } else { 1711 /* 1712 * Reset owner should execute reset recovery, 1713 * if all functions acknowledged 1714 */ 1715 if ((ha->flags.nic_core_reset_owner) && 1716 (dev_state == QLA8XXX_DEV_NEED_RESET)) { 1717 ha->flags.nic_core_reset_owner = 0; 1718 qla8044_device_bootstrap(vha); 1719 return; 1720 } 1721 } 1722 1723 /* Exit if non active function */ 1724 if (!(drv_active & (1 << ha->portnum))) { 1725 ha->flags.nic_core_reset_owner = 0; 1726 return; 1727 } 1728 1729 /* 1730 * Execute Reset Recovery if Reset Owner or Function 7 1731 * is the only active function 1732 */ 1733 if (ha->flags.nic_core_reset_owner || 1734 ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { 1735 ha->flags.nic_core_reset_owner = 0; 1736 qla8044_device_bootstrap(vha); 1737 } 1738 } 1739 1740 static void 1741 qla8044_set_drv_active(struct scsi_qla_host *vha) 1742 { 1743 uint32_t drv_active; 1744 struct qla_hw_data *ha = vha->hw; 1745 1746 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); 1747 1748 /* For ISP8044, drv_active register has 1 bit per function, 1749 * shift 1 by func_num to set a bit for the function.*/ 1750 drv_active |= (1 << ha->portnum); 1751 1752 ql_log(ql_log_info, vha, 0xb0c8, 1753 "%s(%ld): drv_active: 0x%08x\n", 1754 __func__, vha->host_no, drv_active); 1755 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); 1756 } 1757 1758 static int 1759 qla8044_check_drv_active(struct scsi_qla_host *vha) 1760 { 1761 uint32_t drv_active; 1762 struct qla_hw_data *ha = vha->hw; 1763 1764 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); 1765 if (drv_active & (1 << ha->portnum)) 1766 return QLA_SUCCESS; 1767 else 1768 return QLA_TEST_FAILED; 1769 } 1770 1771 static void 1772 qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) 1773 { 1774 uint32_t idc_ctrl; 1775 struct qla_hw_data *ha = vha->hw; 1776 1777 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); 1778 idc_ctrl &= ~DONTRESET_BIT0; 1779 ql_log(ql_log_info, vha, 0xb0c9, 1780 "%s: idc_ctrl = %d\n", __func__, 1781 idc_ctrl); 1782 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); 1783 } 1784 1785 static int 1786 qla8044_set_idc_ver(struct scsi_qla_host *vha) 1787 { 1788 int idc_ver; 1789 uint32_t drv_active; 1790 int rval = QLA_SUCCESS; 1791 struct qla_hw_data *ha = vha->hw; 1792 1793 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); 1794 if (drv_active == (1 << ha->portnum)) { 1795 idc_ver = qla8044_rd_direct(vha, 1796 QLA8044_CRB_DRV_IDC_VERSION_INDEX); 1797 idc_ver &= (~0xFF); 1798 idc_ver |= QLA8044_IDC_VER_MAJ_VALUE; 1799 qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX, 1800 idc_ver); 1801 ql_log(ql_log_info, vha, 0xb0ca, 1802 "%s: IDC version updated to %d\n", 1803 __func__, idc_ver); 1804 } else { 1805 idc_ver = qla8044_rd_direct(vha, 1806 QLA8044_CRB_DRV_IDC_VERSION_INDEX); 1807 idc_ver &= 0xFF; 1808 if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) { 1809 ql_log(ql_log_info, vha, 0xb0cb, 1810 "%s: qla4xxx driver IDC version %d " 1811 "is not compatible with IDC version %d " 1812 "of other drivers!\n", 1813 __func__, QLA8044_IDC_VER_MAJ_VALUE, 1814 idc_ver); 1815 rval = QLA_FUNCTION_FAILED; 1816 goto exit_set_idc_ver; 1817 } 1818 } 1819 1820 /* Update IDC_MINOR_VERSION */ 1821 idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR); 1822 idc_ver &= ~(0x03 << (ha->portnum * 2)); 1823 idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2)); 1824 qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver); 1825 1826 exit_set_idc_ver: 1827 return rval; 1828 } 1829 1830 static int 1831 qla8044_update_idc_reg(struct scsi_qla_host *vha) 1832 { 1833 uint32_t drv_active; 1834 int rval = QLA_SUCCESS; 1835 struct qla_hw_data *ha = vha->hw; 1836 1837 if (vha->flags.init_done) 1838 goto exit_update_idc_reg; 1839 1840 qla8044_idc_lock(ha); 1841 qla8044_set_drv_active(vha); 1842 1843 drv_active = qla8044_rd_direct(vha, 1844 QLA8044_CRB_DRV_ACTIVE_INDEX); 1845 1846 /* If we are the first driver to load and 1847 * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */ 1848 if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba) 1849 qla8044_clear_idc_dontreset(vha); 1850 1851 rval = qla8044_set_idc_ver(vha); 1852 if (rval == QLA_FUNCTION_FAILED) 1853 qla8044_clear_drv_active(ha); 1854 qla8044_idc_unlock(ha); 1855 1856 exit_update_idc_reg: 1857 return rval; 1858 } 1859 1860 /** 1861 * qla8044_need_qsnt_handler - Code to start qsnt 1862 * @ha: pointer to adapter structure 1863 **/ 1864 static void 1865 qla8044_need_qsnt_handler(struct scsi_qla_host *vha) 1866 { 1867 unsigned long qsnt_timeout; 1868 uint32_t drv_state, drv_active, dev_state; 1869 struct qla_hw_data *ha = vha->hw; 1870 1871 if (vha->flags.online) 1872 qla2x00_quiesce_io(vha); 1873 else 1874 return; 1875 1876 qla8044_set_qsnt_ready(vha); 1877 1878 /* Wait for 30 secs for all functions to ack qsnt mode */ 1879 qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ); 1880 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); 1881 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); 1882 1883 /* Shift drv_active by 1 to match drv_state. As quiescent ready bit 1884 position is at bit 1 and drv active is at bit 0 */ 1885 drv_active = drv_active << 1; 1886 1887 while (drv_state != drv_active) { 1888 if (time_after_eq(jiffies, qsnt_timeout)) { 1889 /* Other functions did not ack, changing state to 1890 * DEV_READY 1891 */ 1892 clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1893 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, 1894 QLA8XXX_DEV_READY); 1895 qla8044_clear_qsnt_ready(vha); 1896 ql_log(ql_log_info, vha, 0xb0cc, 1897 "Timeout waiting for quiescent ack!!!\n"); 1898 return; 1899 } 1900 qla8044_idc_unlock(ha); 1901 msleep(1000); 1902 qla8044_idc_lock(ha); 1903 1904 drv_state = qla8044_rd_direct(vha, 1905 QLA8044_CRB_DRV_STATE_INDEX); 1906 drv_active = qla8044_rd_direct(vha, 1907 QLA8044_CRB_DRV_ACTIVE_INDEX); 1908 drv_active = drv_active << 1; 1909 } 1910 1911 /* All functions have Acked. Set quiescent state */ 1912 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 1913 1914 if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 1915 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, 1916 QLA8XXX_DEV_QUIESCENT); 1917 ql_log(ql_log_info, vha, 0xb0cd, 1918 "%s: HW State: QUIESCENT\n", __func__); 1919 } 1920 } 1921 1922 /* 1923 * qla8044_device_state_handler - Adapter state machine 1924 * @ha: pointer to host adapter structure. 1925 * 1926 * Note: IDC lock must be UNLOCKED upon entry 1927 **/ 1928 int 1929 qla8044_device_state_handler(struct scsi_qla_host *vha) 1930 { 1931 uint32_t dev_state; 1932 int rval = QLA_SUCCESS; 1933 unsigned long dev_init_timeout; 1934 struct qla_hw_data *ha = vha->hw; 1935 1936 rval = qla8044_update_idc_reg(vha); 1937 if (rval == QLA_FUNCTION_FAILED) 1938 goto exit_error; 1939 1940 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 1941 ql_dbg(ql_dbg_p3p, vha, 0xb0ce, 1942 "Device state is 0x%x = %s\n", 1943 dev_state, dev_state < MAX_STATES ? 1944 qdev_state(dev_state) : "Unknown"); 1945 1946 /* wait for 30 seconds for device to go ready */ 1947 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 1948 1949 qla8044_idc_lock(ha); 1950 1951 while (1) { 1952 if (time_after_eq(jiffies, dev_init_timeout)) { 1953 if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { 1954 ql_log(ql_log_warn, vha, 0xb0cf, 1955 "%s: Device Init Failed 0x%x = %s\n", 1956 QLA2XXX_DRIVER_NAME, dev_state, 1957 dev_state < MAX_STATES ? 1958 qdev_state(dev_state) : "Unknown"); 1959 qla8044_wr_direct(vha, 1960 QLA8044_CRB_DEV_STATE_INDEX, 1961 QLA8XXX_DEV_FAILED); 1962 } 1963 } 1964 1965 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 1966 ql_log(ql_log_info, vha, 0xb0d0, 1967 "Device state is 0x%x = %s\n", 1968 dev_state, dev_state < MAX_STATES ? 1969 qdev_state(dev_state) : "Unknown"); 1970 1971 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 1972 switch (dev_state) { 1973 case QLA8XXX_DEV_READY: 1974 ha->flags.nic_core_reset_owner = 0; 1975 goto exit; 1976 case QLA8XXX_DEV_COLD: 1977 rval = qla8044_device_bootstrap(vha); 1978 break; 1979 case QLA8XXX_DEV_INITIALIZING: 1980 qla8044_idc_unlock(ha); 1981 msleep(1000); 1982 qla8044_idc_lock(ha); 1983 break; 1984 case QLA8XXX_DEV_NEED_RESET: 1985 /* For ISP8044, if NEED_RESET is set by any driver, 1986 * it should be honored, irrespective of IDC_CTRL 1987 * DONTRESET_BIT0 */ 1988 qla8044_need_reset_handler(vha); 1989 break; 1990 case QLA8XXX_DEV_NEED_QUIESCENT: 1991 /* idc locked/unlocked in handler */ 1992 qla8044_need_qsnt_handler(vha); 1993 1994 /* Reset the init timeout after qsnt handler */ 1995 dev_init_timeout = jiffies + 1996 (ha->fcoe_reset_timeout * HZ); 1997 break; 1998 case QLA8XXX_DEV_QUIESCENT: 1999 ql_log(ql_log_info, vha, 0xb0d1, 2000 "HW State: QUIESCENT\n"); 2001 2002 qla8044_idc_unlock(ha); 2003 msleep(1000); 2004 qla8044_idc_lock(ha); 2005 2006 /* Reset the init timeout after qsnt handler */ 2007 dev_init_timeout = jiffies + 2008 (ha->fcoe_reset_timeout * HZ); 2009 break; 2010 case QLA8XXX_DEV_FAILED: 2011 ha->flags.nic_core_reset_owner = 0; 2012 qla8044_idc_unlock(ha); 2013 qla8xxx_dev_failed_handler(vha); 2014 rval = QLA_FUNCTION_FAILED; 2015 qla8044_idc_lock(ha); 2016 goto exit; 2017 default: 2018 qla8044_idc_unlock(ha); 2019 qla8xxx_dev_failed_handler(vha); 2020 rval = QLA_FUNCTION_FAILED; 2021 qla8044_idc_lock(ha); 2022 goto exit; 2023 } 2024 } 2025 exit: 2026 qla8044_idc_unlock(ha); 2027 2028 exit_error: 2029 return rval; 2030 } 2031 2032 /** 2033 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 2034 * @ha: adapter block pointer. 2035 * 2036 * Note: The caller should not hold the idc lock. 2037 **/ 2038 static int 2039 qla8044_check_temp(struct scsi_qla_host *vha) 2040 { 2041 uint32_t temp, temp_state, temp_val; 2042 int status = QLA_SUCCESS; 2043 2044 temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); 2045 temp_state = qla82xx_get_temp_state(temp); 2046 temp_val = qla82xx_get_temp_val(temp); 2047 2048 if (temp_state == QLA82XX_TEMP_PANIC) { 2049 ql_log(ql_log_warn, vha, 0xb0d2, 2050 "Device temperature %d degrees C" 2051 " exceeds maximum allowed. Hardware has been shut" 2052 " down\n", temp_val); 2053 status = QLA_FUNCTION_FAILED; 2054 return status; 2055 } else if (temp_state == QLA82XX_TEMP_WARN) { 2056 ql_log(ql_log_warn, vha, 0xb0d3, 2057 "Device temperature %d" 2058 " degrees C exceeds operating range." 2059 " Immediate action needed.\n", temp_val); 2060 } 2061 return 0; 2062 } 2063 2064 int qla8044_read_temperature(scsi_qla_host_t *vha) 2065 { 2066 uint32_t temp; 2067 2068 temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); 2069 return qla82xx_get_temp_val(temp); 2070 } 2071 2072 /** 2073 * qla8044_check_fw_alive - Check firmware health 2074 * @ha: Pointer to host adapter structure. 2075 * 2076 * Context: Interrupt 2077 **/ 2078 int 2079 qla8044_check_fw_alive(struct scsi_qla_host *vha) 2080 { 2081 uint32_t fw_heartbeat_counter; 2082 uint32_t halt_status1, halt_status2; 2083 int status = QLA_SUCCESS; 2084 2085 fw_heartbeat_counter = qla8044_rd_direct(vha, 2086 QLA8044_PEG_ALIVE_COUNTER_INDEX); 2087 2088 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 2089 if (fw_heartbeat_counter == 0xffffffff) { 2090 ql_dbg(ql_dbg_p3p, vha, 0xb0d4, 2091 "scsi%ld: %s: Device in frozen " 2092 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 2093 vha->host_no, __func__); 2094 return status; 2095 } 2096 2097 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 2098 vha->seconds_since_last_heartbeat++; 2099 /* FW not alive after 2 seconds */ 2100 if (vha->seconds_since_last_heartbeat == 2) { 2101 vha->seconds_since_last_heartbeat = 0; 2102 halt_status1 = qla8044_rd_direct(vha, 2103 QLA8044_PEG_HALT_STATUS1_INDEX); 2104 halt_status2 = qla8044_rd_direct(vha, 2105 QLA8044_PEG_HALT_STATUS2_INDEX); 2106 2107 ql_log(ql_log_info, vha, 0xb0d5, 2108 "scsi(%ld): %s, ISP8044 " 2109 "Dumping hw/fw registers:\n" 2110 " PEG_HALT_STATUS1: 0x%x, " 2111 "PEG_HALT_STATUS2: 0x%x,\n", 2112 vha->host_no, __func__, halt_status1, 2113 halt_status2); 2114 status = QLA_FUNCTION_FAILED; 2115 } 2116 } else 2117 vha->seconds_since_last_heartbeat = 0; 2118 2119 vha->fw_heartbeat_counter = fw_heartbeat_counter; 2120 return status; 2121 } 2122 2123 void 2124 qla8044_watchdog(struct scsi_qla_host *vha) 2125 { 2126 uint32_t dev_state, halt_status; 2127 int halt_status_unrecoverable = 0; 2128 struct qla_hw_data *ha = vha->hw; 2129 2130 /* don't poll if reset is going on or FW hang in quiescent state */ 2131 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 2132 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { 2133 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 2134 2135 if (qla8044_check_fw_alive(vha)) { 2136 ha->flags.isp82xx_fw_hung = 1; 2137 ql_log(ql_log_warn, vha, 0xb10a, 2138 "Firmware hung.\n"); 2139 qla82xx_clear_pending_mbx(vha); 2140 } 2141 2142 if (qla8044_check_temp(vha)) { 2143 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); 2144 ha->flags.isp82xx_fw_hung = 1; 2145 qla2xxx_wake_dpc(vha); 2146 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 2147 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 2148 ql_log(ql_log_info, vha, 0xb0d6, 2149 "%s: HW State: NEED RESET!\n", 2150 __func__); 2151 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2152 qla2xxx_wake_dpc(vha); 2153 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 2154 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 2155 ql_log(ql_log_info, vha, 0xb0d7, 2156 "%s: HW State: NEED QUIES detected!\n", 2157 __func__); 2158 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 2159 qla2xxx_wake_dpc(vha); 2160 } else { 2161 /* Check firmware health */ 2162 if (ha->flags.isp82xx_fw_hung) { 2163 halt_status = qla8044_rd_direct(vha, 2164 QLA8044_PEG_HALT_STATUS1_INDEX); 2165 if (halt_status & 2166 QLA8044_HALT_STATUS_FW_RESET) { 2167 ql_log(ql_log_fatal, vha, 2168 0xb0d8, "%s: Firmware " 2169 "error detected device " 2170 "is being reset\n", 2171 __func__); 2172 } else if (halt_status & 2173 QLA8044_HALT_STATUS_UNRECOVERABLE) { 2174 halt_status_unrecoverable = 1; 2175 } 2176 2177 /* Since we cannot change dev_state in interrupt 2178 * context, set appropriate DPC flag then wakeup 2179 * DPC */ 2180 if (halt_status_unrecoverable) { 2181 set_bit(ISP_UNRECOVERABLE, 2182 &vha->dpc_flags); 2183 } else { 2184 if (dev_state == 2185 QLA8XXX_DEV_QUIESCENT) { 2186 set_bit(FCOE_CTX_RESET_NEEDED, 2187 &vha->dpc_flags); 2188 ql_log(ql_log_info, vha, 0xb0d9, 2189 "%s: FW CONTEXT Reset " 2190 "needed!\n", __func__); 2191 } else { 2192 ql_log(ql_log_info, vha, 2193 0xb0da, "%s: " 2194 "detect abort needed\n", 2195 __func__); 2196 set_bit(ISP_ABORT_NEEDED, 2197 &vha->dpc_flags); 2198 } 2199 } 2200 qla2xxx_wake_dpc(vha); 2201 } 2202 } 2203 2204 } 2205 } 2206 2207 static int 2208 qla8044_minidump_process_control(struct scsi_qla_host *vha, 2209 struct qla8044_minidump_entry_hdr *entry_hdr) 2210 { 2211 struct qla8044_minidump_entry_crb *crb_entry; 2212 uint32_t read_value, opcode, poll_time, addr, index; 2213 uint32_t crb_addr, rval = QLA_SUCCESS; 2214 unsigned long wtime; 2215 struct qla8044_minidump_template_hdr *tmplt_hdr; 2216 int i; 2217 struct qla_hw_data *ha = vha->hw; 2218 2219 ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__); 2220 tmplt_hdr = (struct qla8044_minidump_template_hdr *) 2221 ha->md_tmplt_hdr; 2222 crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; 2223 2224 crb_addr = crb_entry->addr; 2225 for (i = 0; i < crb_entry->op_count; i++) { 2226 opcode = crb_entry->crb_ctrl.opcode; 2227 2228 if (opcode & QLA82XX_DBG_OPCODE_WR) { 2229 qla8044_wr_reg_indirect(vha, crb_addr, 2230 crb_entry->value_1); 2231 opcode &= ~QLA82XX_DBG_OPCODE_WR; 2232 } 2233 2234 if (opcode & QLA82XX_DBG_OPCODE_RW) { 2235 qla8044_rd_reg_indirect(vha, crb_addr, &read_value); 2236 qla8044_wr_reg_indirect(vha, crb_addr, read_value); 2237 opcode &= ~QLA82XX_DBG_OPCODE_RW; 2238 } 2239 2240 if (opcode & QLA82XX_DBG_OPCODE_AND) { 2241 qla8044_rd_reg_indirect(vha, crb_addr, &read_value); 2242 read_value &= crb_entry->value_2; 2243 opcode &= ~QLA82XX_DBG_OPCODE_AND; 2244 if (opcode & QLA82XX_DBG_OPCODE_OR) { 2245 read_value |= crb_entry->value_3; 2246 opcode &= ~QLA82XX_DBG_OPCODE_OR; 2247 } 2248 qla8044_wr_reg_indirect(vha, crb_addr, read_value); 2249 } 2250 if (opcode & QLA82XX_DBG_OPCODE_OR) { 2251 qla8044_rd_reg_indirect(vha, crb_addr, &read_value); 2252 read_value |= crb_entry->value_3; 2253 qla8044_wr_reg_indirect(vha, crb_addr, read_value); 2254 opcode &= ~QLA82XX_DBG_OPCODE_OR; 2255 } 2256 if (opcode & QLA82XX_DBG_OPCODE_POLL) { 2257 poll_time = crb_entry->crb_strd.poll_timeout; 2258 wtime = jiffies + poll_time; 2259 qla8044_rd_reg_indirect(vha, crb_addr, &read_value); 2260 2261 do { 2262 if ((read_value & crb_entry->value_2) == 2263 crb_entry->value_1) { 2264 break; 2265 } else if (time_after_eq(jiffies, wtime)) { 2266 /* capturing dump failed */ 2267 rval = QLA_FUNCTION_FAILED; 2268 break; 2269 } else { 2270 qla8044_rd_reg_indirect(vha, 2271 crb_addr, &read_value); 2272 } 2273 } while (1); 2274 opcode &= ~QLA82XX_DBG_OPCODE_POLL; 2275 } 2276 2277 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { 2278 if (crb_entry->crb_strd.state_index_a) { 2279 index = crb_entry->crb_strd.state_index_a; 2280 addr = tmplt_hdr->saved_state_array[index]; 2281 } else { 2282 addr = crb_addr; 2283 } 2284 2285 qla8044_rd_reg_indirect(vha, addr, &read_value); 2286 index = crb_entry->crb_ctrl.state_index_v; 2287 tmplt_hdr->saved_state_array[index] = read_value; 2288 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; 2289 } 2290 2291 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { 2292 if (crb_entry->crb_strd.state_index_a) { 2293 index = crb_entry->crb_strd.state_index_a; 2294 addr = tmplt_hdr->saved_state_array[index]; 2295 } else { 2296 addr = crb_addr; 2297 } 2298 2299 if (crb_entry->crb_ctrl.state_index_v) { 2300 index = crb_entry->crb_ctrl.state_index_v; 2301 read_value = 2302 tmplt_hdr->saved_state_array[index]; 2303 } else { 2304 read_value = crb_entry->value_1; 2305 } 2306 2307 qla8044_wr_reg_indirect(vha, addr, read_value); 2308 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; 2309 } 2310 2311 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { 2312 index = crb_entry->crb_ctrl.state_index_v; 2313 read_value = tmplt_hdr->saved_state_array[index]; 2314 read_value <<= crb_entry->crb_ctrl.shl; 2315 read_value >>= crb_entry->crb_ctrl.shr; 2316 if (crb_entry->value_2) 2317 read_value &= crb_entry->value_2; 2318 read_value |= crb_entry->value_3; 2319 read_value += crb_entry->value_1; 2320 tmplt_hdr->saved_state_array[index] = read_value; 2321 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; 2322 } 2323 crb_addr += crb_entry->crb_strd.addr_stride; 2324 } 2325 return rval; 2326 } 2327 2328 static void 2329 qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha, 2330 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2331 { 2332 uint32_t r_addr, r_stride, loop_cnt, i, r_value; 2333 struct qla8044_minidump_entry_crb *crb_hdr; 2334 uint32_t *data_ptr = *d_ptr; 2335 2336 ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__); 2337 crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; 2338 r_addr = crb_hdr->addr; 2339 r_stride = crb_hdr->crb_strd.addr_stride; 2340 loop_cnt = crb_hdr->op_count; 2341 2342 for (i = 0; i < loop_cnt; i++) { 2343 qla8044_rd_reg_indirect(vha, r_addr, &r_value); 2344 *data_ptr++ = r_addr; 2345 *data_ptr++ = r_value; 2346 r_addr += r_stride; 2347 } 2348 *d_ptr = data_ptr; 2349 } 2350 2351 static int 2352 qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, 2353 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2354 { 2355 uint32_t r_addr, r_value, r_data; 2356 uint32_t i, j, loop_cnt; 2357 struct qla8044_minidump_entry_rdmem *m_hdr; 2358 unsigned long flags; 2359 uint32_t *data_ptr = *d_ptr; 2360 struct qla_hw_data *ha = vha->hw; 2361 2362 ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__); 2363 m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; 2364 r_addr = m_hdr->read_addr; 2365 loop_cnt = m_hdr->read_data_size/16; 2366 2367 ql_dbg(ql_dbg_p3p, vha, 0xb0f0, 2368 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", 2369 __func__, r_addr, m_hdr->read_data_size); 2370 2371 if (r_addr & 0xf) { 2372 ql_dbg(ql_dbg_p3p, vha, 0xb0f1, 2373 "[%s]: Read addr 0x%x not 16 bytes aligned\n", 2374 __func__, r_addr); 2375 return QLA_FUNCTION_FAILED; 2376 } 2377 2378 if (m_hdr->read_data_size % 16) { 2379 ql_dbg(ql_dbg_p3p, vha, 0xb0f2, 2380 "[%s]: Read data[0x%x] not multiple of 16 bytes\n", 2381 __func__, m_hdr->read_data_size); 2382 return QLA_FUNCTION_FAILED; 2383 } 2384 2385 ql_dbg(ql_dbg_p3p, vha, 0xb0f3, 2386 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", 2387 __func__, r_addr, m_hdr->read_data_size, loop_cnt); 2388 2389 write_lock_irqsave(&ha->hw_lock, flags); 2390 for (i = 0; i < loop_cnt; i++) { 2391 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr); 2392 r_value = 0; 2393 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value); 2394 r_value = MIU_TA_CTL_ENABLE; 2395 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); 2396 r_value = MIU_TA_CTL_START_ENABLE; 2397 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); 2398 2399 for (j = 0; j < MAX_CTL_CHECK; j++) { 2400 qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, 2401 &r_value); 2402 if ((r_value & MIU_TA_CTL_BUSY) == 0) 2403 break; 2404 } 2405 2406 if (j >= MAX_CTL_CHECK) { 2407 write_unlock_irqrestore(&ha->hw_lock, flags); 2408 return QLA_SUCCESS; 2409 } 2410 2411 for (j = 0; j < 4; j++) { 2412 qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j], 2413 &r_data); 2414 *data_ptr++ = r_data; 2415 } 2416 2417 r_addr += 16; 2418 } 2419 write_unlock_irqrestore(&ha->hw_lock, flags); 2420 2421 ql_dbg(ql_dbg_p3p, vha, 0xb0f4, 2422 "Leaving fn: %s datacount: 0x%x\n", 2423 __func__, (loop_cnt * 16)); 2424 2425 *d_ptr = data_ptr; 2426 return QLA_SUCCESS; 2427 } 2428 2429 /* ISP83xx flash read for _RDROM _BOARD */ 2430 static uint32_t 2431 qla8044_minidump_process_rdrom(struct scsi_qla_host *vha, 2432 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2433 { 2434 uint32_t fl_addr, u32_count, rval; 2435 struct qla8044_minidump_entry_rdrom *rom_hdr; 2436 uint32_t *data_ptr = *d_ptr; 2437 2438 rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; 2439 fl_addr = rom_hdr->read_addr; 2440 u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t); 2441 2442 ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n", 2443 __func__, fl_addr, u32_count); 2444 2445 rval = qla8044_lockless_flash_read_u32(vha, fl_addr, 2446 (u8 *)(data_ptr), u32_count); 2447 2448 if (rval != QLA_SUCCESS) { 2449 ql_log(ql_log_fatal, vha, 0xb0f6, 2450 "%s: Flash Read Error,Count=%d\n", __func__, u32_count); 2451 return QLA_FUNCTION_FAILED; 2452 } else { 2453 data_ptr += u32_count; 2454 *d_ptr = data_ptr; 2455 return QLA_SUCCESS; 2456 } 2457 } 2458 2459 static void 2460 qla8044_mark_entry_skipped(struct scsi_qla_host *vha, 2461 struct qla8044_minidump_entry_hdr *entry_hdr, int index) 2462 { 2463 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; 2464 2465 ql_log(ql_log_info, vha, 0xb0f7, 2466 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", 2467 vha->host_no, index, entry_hdr->entry_type, 2468 entry_hdr->d_ctrl.entry_capture_mask); 2469 } 2470 2471 static int 2472 qla8044_minidump_process_l2tag(struct scsi_qla_host *vha, 2473 struct qla8044_minidump_entry_hdr *entry_hdr, 2474 uint32_t **d_ptr) 2475 { 2476 uint32_t addr, r_addr, c_addr, t_r_addr; 2477 uint32_t i, k, loop_count, t_value, r_cnt, r_value; 2478 unsigned long p_wait, w_time, p_mask; 2479 uint32_t c_value_w, c_value_r; 2480 struct qla8044_minidump_entry_cache *cache_hdr; 2481 int rval = QLA_FUNCTION_FAILED; 2482 uint32_t *data_ptr = *d_ptr; 2483 2484 ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__); 2485 cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; 2486 2487 loop_count = cache_hdr->op_count; 2488 r_addr = cache_hdr->read_addr; 2489 c_addr = cache_hdr->control_addr; 2490 c_value_w = cache_hdr->cache_ctrl.write_value; 2491 2492 t_r_addr = cache_hdr->tag_reg_addr; 2493 t_value = cache_hdr->addr_ctrl.init_tag_value; 2494 r_cnt = cache_hdr->read_ctrl.read_addr_cnt; 2495 p_wait = cache_hdr->cache_ctrl.poll_wait; 2496 p_mask = cache_hdr->cache_ctrl.poll_mask; 2497 2498 for (i = 0; i < loop_count; i++) { 2499 qla8044_wr_reg_indirect(vha, t_r_addr, t_value); 2500 if (c_value_w) 2501 qla8044_wr_reg_indirect(vha, c_addr, c_value_w); 2502 2503 if (p_mask) { 2504 w_time = jiffies + p_wait; 2505 do { 2506 qla8044_rd_reg_indirect(vha, c_addr, 2507 &c_value_r); 2508 if ((c_value_r & p_mask) == 0) { 2509 break; 2510 } else if (time_after_eq(jiffies, w_time)) { 2511 /* capturing dump failed */ 2512 return rval; 2513 } 2514 } while (1); 2515 } 2516 2517 addr = r_addr; 2518 for (k = 0; k < r_cnt; k++) { 2519 qla8044_rd_reg_indirect(vha, addr, &r_value); 2520 *data_ptr++ = r_value; 2521 addr += cache_hdr->read_ctrl.read_addr_stride; 2522 } 2523 t_value += cache_hdr->addr_ctrl.tag_value_stride; 2524 } 2525 *d_ptr = data_ptr; 2526 return QLA_SUCCESS; 2527 } 2528 2529 static void 2530 qla8044_minidump_process_l1cache(struct scsi_qla_host *vha, 2531 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2532 { 2533 uint32_t addr, r_addr, c_addr, t_r_addr; 2534 uint32_t i, k, loop_count, t_value, r_cnt, r_value; 2535 uint32_t c_value_w; 2536 struct qla8044_minidump_entry_cache *cache_hdr; 2537 uint32_t *data_ptr = *d_ptr; 2538 2539 cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; 2540 loop_count = cache_hdr->op_count; 2541 r_addr = cache_hdr->read_addr; 2542 c_addr = cache_hdr->control_addr; 2543 c_value_w = cache_hdr->cache_ctrl.write_value; 2544 2545 t_r_addr = cache_hdr->tag_reg_addr; 2546 t_value = cache_hdr->addr_ctrl.init_tag_value; 2547 r_cnt = cache_hdr->read_ctrl.read_addr_cnt; 2548 2549 for (i = 0; i < loop_count; i++) { 2550 qla8044_wr_reg_indirect(vha, t_r_addr, t_value); 2551 qla8044_wr_reg_indirect(vha, c_addr, c_value_w); 2552 addr = r_addr; 2553 for (k = 0; k < r_cnt; k++) { 2554 qla8044_rd_reg_indirect(vha, addr, &r_value); 2555 *data_ptr++ = r_value; 2556 addr += cache_hdr->read_ctrl.read_addr_stride; 2557 } 2558 t_value += cache_hdr->addr_ctrl.tag_value_stride; 2559 } 2560 *d_ptr = data_ptr; 2561 } 2562 2563 static void 2564 qla8044_minidump_process_rdocm(struct scsi_qla_host *vha, 2565 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2566 { 2567 uint32_t r_addr, r_stride, loop_cnt, i, r_value; 2568 struct qla8044_minidump_entry_rdocm *ocm_hdr; 2569 uint32_t *data_ptr = *d_ptr; 2570 struct qla_hw_data *ha = vha->hw; 2571 2572 ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__); 2573 2574 ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; 2575 r_addr = ocm_hdr->read_addr; 2576 r_stride = ocm_hdr->read_addr_stride; 2577 loop_cnt = ocm_hdr->op_count; 2578 2579 ql_dbg(ql_dbg_p3p, vha, 0xb0fa, 2580 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", 2581 __func__, r_addr, r_stride, loop_cnt); 2582 2583 for (i = 0; i < loop_cnt; i++) { 2584 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); 2585 *data_ptr++ = r_value; 2586 r_addr += r_stride; 2587 } 2588 ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n", 2589 __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))); 2590 2591 *d_ptr = data_ptr; 2592 } 2593 2594 static void 2595 qla8044_minidump_process_rdmux(struct scsi_qla_host *vha, 2596 struct qla8044_minidump_entry_hdr *entry_hdr, 2597 uint32_t **d_ptr) 2598 { 2599 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; 2600 struct qla8044_minidump_entry_mux *mux_hdr; 2601 uint32_t *data_ptr = *d_ptr; 2602 2603 ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__); 2604 2605 mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; 2606 r_addr = mux_hdr->read_addr; 2607 s_addr = mux_hdr->select_addr; 2608 s_stride = mux_hdr->select_value_stride; 2609 s_value = mux_hdr->select_value; 2610 loop_cnt = mux_hdr->op_count; 2611 2612 for (i = 0; i < loop_cnt; i++) { 2613 qla8044_wr_reg_indirect(vha, s_addr, s_value); 2614 qla8044_rd_reg_indirect(vha, r_addr, &r_value); 2615 *data_ptr++ = s_value; 2616 *data_ptr++ = r_value; 2617 s_value += s_stride; 2618 } 2619 *d_ptr = data_ptr; 2620 } 2621 2622 static void 2623 qla8044_minidump_process_queue(struct scsi_qla_host *vha, 2624 struct qla8044_minidump_entry_hdr *entry_hdr, 2625 uint32_t **d_ptr) 2626 { 2627 uint32_t s_addr, r_addr; 2628 uint32_t r_stride, r_value, r_cnt, qid = 0; 2629 uint32_t i, k, loop_cnt; 2630 struct qla8044_minidump_entry_queue *q_hdr; 2631 uint32_t *data_ptr = *d_ptr; 2632 2633 ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__); 2634 q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; 2635 s_addr = q_hdr->select_addr; 2636 r_cnt = q_hdr->rd_strd.read_addr_cnt; 2637 r_stride = q_hdr->rd_strd.read_addr_stride; 2638 loop_cnt = q_hdr->op_count; 2639 2640 for (i = 0; i < loop_cnt; i++) { 2641 qla8044_wr_reg_indirect(vha, s_addr, qid); 2642 r_addr = q_hdr->read_addr; 2643 for (k = 0; k < r_cnt; k++) { 2644 qla8044_rd_reg_indirect(vha, r_addr, &r_value); 2645 *data_ptr++ = r_value; 2646 r_addr += r_stride; 2647 } 2648 qid += q_hdr->q_strd.queue_id_stride; 2649 } 2650 *d_ptr = data_ptr; 2651 } 2652 2653 /* ISP83xx functions to process new minidump entries... */ 2654 static uint32_t 2655 qla8044_minidump_process_pollrd(struct scsi_qla_host *vha, 2656 struct qla8044_minidump_entry_hdr *entry_hdr, 2657 uint32_t **d_ptr) 2658 { 2659 uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; 2660 uint16_t s_stride, i; 2661 struct qla8044_minidump_entry_pollrd *pollrd_hdr; 2662 uint32_t *data_ptr = *d_ptr; 2663 2664 pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr; 2665 s_addr = pollrd_hdr->select_addr; 2666 r_addr = pollrd_hdr->read_addr; 2667 s_value = pollrd_hdr->select_value; 2668 s_stride = pollrd_hdr->select_value_stride; 2669 2670 poll_wait = pollrd_hdr->poll_wait; 2671 poll_mask = pollrd_hdr->poll_mask; 2672 2673 for (i = 0; i < pollrd_hdr->op_count; i++) { 2674 qla8044_wr_reg_indirect(vha, s_addr, s_value); 2675 poll_wait = pollrd_hdr->poll_wait; 2676 while (1) { 2677 qla8044_rd_reg_indirect(vha, s_addr, &r_value); 2678 if ((r_value & poll_mask) != 0) { 2679 break; 2680 } else { 2681 usleep_range(1000, 1100); 2682 if (--poll_wait == 0) { 2683 ql_log(ql_log_fatal, vha, 0xb0fe, 2684 "%s: TIMEOUT\n", __func__); 2685 goto error; 2686 } 2687 } 2688 } 2689 qla8044_rd_reg_indirect(vha, r_addr, &r_value); 2690 *data_ptr++ = s_value; 2691 *data_ptr++ = r_value; 2692 2693 s_value += s_stride; 2694 } 2695 *d_ptr = data_ptr; 2696 return QLA_SUCCESS; 2697 2698 error: 2699 return QLA_FUNCTION_FAILED; 2700 } 2701 2702 static void 2703 qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha, 2704 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2705 { 2706 uint32_t sel_val1, sel_val2, t_sel_val, data, i; 2707 uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; 2708 struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr; 2709 uint32_t *data_ptr = *d_ptr; 2710 2711 rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr; 2712 sel_val1 = rdmux2_hdr->select_value_1; 2713 sel_val2 = rdmux2_hdr->select_value_2; 2714 sel_addr1 = rdmux2_hdr->select_addr_1; 2715 sel_addr2 = rdmux2_hdr->select_addr_2; 2716 sel_val_mask = rdmux2_hdr->select_value_mask; 2717 read_addr = rdmux2_hdr->read_addr; 2718 2719 for (i = 0; i < rdmux2_hdr->op_count; i++) { 2720 qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); 2721 t_sel_val = sel_val1 & sel_val_mask; 2722 *data_ptr++ = t_sel_val; 2723 2724 qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); 2725 qla8044_rd_reg_indirect(vha, read_addr, &data); 2726 2727 *data_ptr++ = data; 2728 2729 qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); 2730 t_sel_val = sel_val2 & sel_val_mask; 2731 *data_ptr++ = t_sel_val; 2732 2733 qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); 2734 qla8044_rd_reg_indirect(vha, read_addr, &data); 2735 2736 *data_ptr++ = data; 2737 2738 sel_val1 += rdmux2_hdr->select_value_stride; 2739 sel_val2 += rdmux2_hdr->select_value_stride; 2740 } 2741 2742 *d_ptr = data_ptr; 2743 } 2744 2745 static uint32_t 2746 qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha, 2747 struct qla8044_minidump_entry_hdr *entry_hdr, 2748 uint32_t **d_ptr) 2749 { 2750 uint32_t poll_wait, poll_mask, r_value, data; 2751 uint32_t addr_1, addr_2, value_1, value_2; 2752 struct qla8044_minidump_entry_pollrdmwr *poll_hdr; 2753 uint32_t *data_ptr = *d_ptr; 2754 2755 poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr; 2756 addr_1 = poll_hdr->addr_1; 2757 addr_2 = poll_hdr->addr_2; 2758 value_1 = poll_hdr->value_1; 2759 value_2 = poll_hdr->value_2; 2760 poll_mask = poll_hdr->poll_mask; 2761 2762 qla8044_wr_reg_indirect(vha, addr_1, value_1); 2763 2764 poll_wait = poll_hdr->poll_wait; 2765 while (1) { 2766 qla8044_rd_reg_indirect(vha, addr_1, &r_value); 2767 2768 if ((r_value & poll_mask) != 0) { 2769 break; 2770 } else { 2771 usleep_range(1000, 1100); 2772 if (--poll_wait == 0) { 2773 ql_log(ql_log_fatal, vha, 0xb0ff, 2774 "%s: TIMEOUT\n", __func__); 2775 goto error; 2776 } 2777 } 2778 } 2779 2780 qla8044_rd_reg_indirect(vha, addr_2, &data); 2781 data &= poll_hdr->modify_mask; 2782 qla8044_wr_reg_indirect(vha, addr_2, data); 2783 qla8044_wr_reg_indirect(vha, addr_1, value_2); 2784 2785 poll_wait = poll_hdr->poll_wait; 2786 while (1) { 2787 qla8044_rd_reg_indirect(vha, addr_1, &r_value); 2788 2789 if ((r_value & poll_mask) != 0) { 2790 break; 2791 } else { 2792 usleep_range(1000, 1100); 2793 if (--poll_wait == 0) { 2794 ql_log(ql_log_fatal, vha, 0xb100, 2795 "%s: TIMEOUT2\n", __func__); 2796 goto error; 2797 } 2798 } 2799 } 2800 2801 *data_ptr++ = addr_2; 2802 *data_ptr++ = data; 2803 2804 *d_ptr = data_ptr; 2805 2806 return QLA_SUCCESS; 2807 2808 error: 2809 return QLA_FUNCTION_FAILED; 2810 } 2811 2812 #define ISP8044_PEX_DMA_ENGINE_INDEX 8 2813 #define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 2814 #define ISP8044_PEX_DMA_NUM_OFFSET 0x10000 2815 #define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 2816 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 2817 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 2818 2819 #define ISP8044_PEX_DMA_READ_SIZE (16 * 1024) 2820 #define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ 2821 2822 static int 2823 qla8044_check_dma_engine_state(struct scsi_qla_host *vha) 2824 { 2825 struct qla_hw_data *ha = vha->hw; 2826 int rval = QLA_SUCCESS; 2827 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; 2828 uint64_t dma_base_addr = 0; 2829 struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; 2830 2831 tmplt_hdr = ha->md_tmplt_hdr; 2832 dma_eng_num = 2833 tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; 2834 dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + 2835 (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); 2836 2837 /* Read the pex-dma's command-status-and-control register. */ 2838 rval = qla8044_rd_reg_indirect(vha, 2839 (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), 2840 &cmd_sts_and_cntrl); 2841 if (rval) 2842 return QLA_FUNCTION_FAILED; 2843 2844 /* Check if requested pex-dma engine is available. */ 2845 if (cmd_sts_and_cntrl & BIT_31) 2846 return QLA_SUCCESS; 2847 2848 return QLA_FUNCTION_FAILED; 2849 } 2850 2851 static int 2852 qla8044_start_pex_dma(struct scsi_qla_host *vha, 2853 struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr) 2854 { 2855 struct qla_hw_data *ha = vha->hw; 2856 int rval = QLA_SUCCESS, wait = 0; 2857 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; 2858 uint64_t dma_base_addr = 0; 2859 struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; 2860 2861 tmplt_hdr = ha->md_tmplt_hdr; 2862 dma_eng_num = 2863 tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; 2864 dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + 2865 (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); 2866 2867 rval = qla8044_wr_reg_indirect(vha, 2868 dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW, 2869 m_hdr->desc_card_addr); 2870 if (rval) 2871 goto error_exit; 2872 2873 rval = qla8044_wr_reg_indirect(vha, 2874 dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0); 2875 if (rval) 2876 goto error_exit; 2877 2878 rval = qla8044_wr_reg_indirect(vha, 2879 dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL, 2880 m_hdr->start_dma_cmd); 2881 if (rval) 2882 goto error_exit; 2883 2884 /* Wait for dma operation to complete. */ 2885 for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) { 2886 rval = qla8044_rd_reg_indirect(vha, 2887 (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), 2888 &cmd_sts_and_cntrl); 2889 if (rval) 2890 goto error_exit; 2891 2892 if ((cmd_sts_and_cntrl & BIT_1) == 0) 2893 break; 2894 2895 udelay(10); 2896 } 2897 2898 /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ 2899 if (wait >= ISP8044_PEX_DMA_MAX_WAIT) { 2900 rval = QLA_FUNCTION_FAILED; 2901 goto error_exit; 2902 } 2903 2904 error_exit: 2905 return rval; 2906 } 2907 2908 static int 2909 qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, 2910 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 2911 { 2912 struct qla_hw_data *ha = vha->hw; 2913 int rval = QLA_SUCCESS; 2914 struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL; 2915 uint32_t chunk_size, read_size; 2916 uint8_t *data_ptr = (uint8_t *)*d_ptr; 2917 void *rdmem_buffer = NULL; 2918 dma_addr_t rdmem_dma; 2919 struct qla8044_pex_dma_descriptor dma_desc; 2920 2921 rval = qla8044_check_dma_engine_state(vha); 2922 if (rval != QLA_SUCCESS) { 2923 ql_dbg(ql_dbg_p3p, vha, 0xb147, 2924 "DMA engine not available. Fallback to rdmem-read.\n"); 2925 return QLA_FUNCTION_FAILED; 2926 } 2927 2928 m_hdr = (void *)entry_hdr; 2929 2930 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, 2931 ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL); 2932 if (!rdmem_buffer) { 2933 ql_dbg(ql_dbg_p3p, vha, 0xb148, 2934 "Unable to allocate rdmem dma buffer\n"); 2935 return QLA_FUNCTION_FAILED; 2936 } 2937 2938 /* Prepare pex-dma descriptor to be written to MS memory. */ 2939 /* dma-desc-cmd layout: 2940 * 0-3: dma-desc-cmd 0-3 2941 * 4-7: pcid function number 2942 * 8-15: dma-desc-cmd 8-15 2943 * dma_bus_addr: dma buffer address 2944 * cmd.read_data_size: amount of data-chunk to be read. 2945 */ 2946 dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); 2947 dma_desc.cmd.dma_desc_cmd |= 2948 ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); 2949 2950 dma_desc.dma_bus_addr = rdmem_dma; 2951 dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE; 2952 read_size = 0; 2953 2954 /* 2955 * Perform rdmem operation using pex-dma. 2956 * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE. 2957 */ 2958 while (read_size < m_hdr->read_data_size) { 2959 if (m_hdr->read_data_size - read_size < 2960 ISP8044_PEX_DMA_READ_SIZE) { 2961 chunk_size = (m_hdr->read_data_size - read_size); 2962 dma_desc.cmd.read_data_size = chunk_size; 2963 } 2964 2965 dma_desc.src_addr = m_hdr->read_addr + read_size; 2966 2967 /* Prepare: Write pex-dma descriptor to MS memory. */ 2968 rval = qla8044_ms_mem_write_128b(vha, 2969 m_hdr->desc_card_addr, (void *)&dma_desc, 2970 (sizeof(struct qla8044_pex_dma_descriptor)/16)); 2971 if (rval) { 2972 ql_log(ql_log_warn, vha, 0xb14a, 2973 "%s: Error writing rdmem-dma-init to MS !!!\n", 2974 __func__); 2975 goto error_exit; 2976 } 2977 ql_dbg(ql_dbg_p3p, vha, 0xb14b, 2978 "%s: Dma-descriptor: Instruct for rdmem dma " 2979 "(chunk_size 0x%x).\n", __func__, chunk_size); 2980 2981 /* Execute: Start pex-dma operation. */ 2982 rval = qla8044_start_pex_dma(vha, m_hdr); 2983 if (rval) 2984 goto error_exit; 2985 2986 memcpy(data_ptr, rdmem_buffer, chunk_size); 2987 data_ptr += chunk_size; 2988 read_size += chunk_size; 2989 } 2990 2991 *d_ptr = (void *)data_ptr; 2992 2993 error_exit: 2994 if (rdmem_buffer) 2995 dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE, 2996 rdmem_buffer, rdmem_dma); 2997 2998 return rval; 2999 } 3000 3001 static uint32_t 3002 qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, 3003 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 3004 { 3005 int loop_cnt; 3006 uint32_t addr1, addr2, value, data, temp, wrVal; 3007 uint8_t stride, stride2; 3008 uint16_t count; 3009 uint32_t poll, mask, modify_mask; 3010 uint32_t wait_count = 0; 3011 3012 uint32_t *data_ptr = *d_ptr; 3013 3014 struct qla8044_minidump_entry_rddfe *rddfe; 3015 rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; 3016 3017 addr1 = rddfe->addr_1; 3018 value = rddfe->value; 3019 stride = rddfe->stride; 3020 stride2 = rddfe->stride2; 3021 count = rddfe->count; 3022 3023 poll = rddfe->poll; 3024 mask = rddfe->mask; 3025 modify_mask = rddfe->modify_mask; 3026 3027 addr2 = addr1 + stride; 3028 3029 for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { 3030 qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); 3031 3032 wait_count = 0; 3033 while (wait_count < poll) { 3034 qla8044_rd_reg_indirect(vha, addr1, &temp); 3035 if ((temp & mask) != 0) 3036 break; 3037 wait_count++; 3038 } 3039 3040 if (wait_count == poll) { 3041 ql_log(ql_log_warn, vha, 0xb153, 3042 "%s: TIMEOUT\n", __func__); 3043 goto error; 3044 } else { 3045 qla8044_rd_reg_indirect(vha, addr2, &temp); 3046 temp = temp & modify_mask; 3047 temp = (temp | ((loop_cnt << 16) | loop_cnt)); 3048 wrVal = ((temp << 16) | temp); 3049 3050 qla8044_wr_reg_indirect(vha, addr2, wrVal); 3051 qla8044_wr_reg_indirect(vha, addr1, value); 3052 3053 wait_count = 0; 3054 while (wait_count < poll) { 3055 qla8044_rd_reg_indirect(vha, addr1, &temp); 3056 if ((temp & mask) != 0) 3057 break; 3058 wait_count++; 3059 } 3060 if (wait_count == poll) { 3061 ql_log(ql_log_warn, vha, 0xb154, 3062 "%s: TIMEOUT\n", __func__); 3063 goto error; 3064 } 3065 3066 qla8044_wr_reg_indirect(vha, addr1, 3067 ((0x40000000 | value) + stride2)); 3068 wait_count = 0; 3069 while (wait_count < poll) { 3070 qla8044_rd_reg_indirect(vha, addr1, &temp); 3071 if ((temp & mask) != 0) 3072 break; 3073 wait_count++; 3074 } 3075 3076 if (wait_count == poll) { 3077 ql_log(ql_log_warn, vha, 0xb155, 3078 "%s: TIMEOUT\n", __func__); 3079 goto error; 3080 } 3081 3082 qla8044_rd_reg_indirect(vha, addr2, &data); 3083 3084 *data_ptr++ = wrVal; 3085 *data_ptr++ = data; 3086 } 3087 3088 } 3089 3090 *d_ptr = data_ptr; 3091 return QLA_SUCCESS; 3092 3093 error: 3094 return -1; 3095 3096 } 3097 3098 static uint32_t 3099 qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, 3100 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 3101 { 3102 int ret = 0; 3103 uint32_t addr1, addr2, value1, value2, data, selVal; 3104 uint8_t stride1, stride2; 3105 uint32_t addr3, addr4, addr5, addr6, addr7; 3106 uint16_t count, loop_cnt; 3107 uint32_t mask; 3108 uint32_t *data_ptr = *d_ptr; 3109 3110 struct qla8044_minidump_entry_rdmdio *rdmdio; 3111 3112 rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; 3113 3114 addr1 = rdmdio->addr_1; 3115 addr2 = rdmdio->addr_2; 3116 value1 = rdmdio->value_1; 3117 stride1 = rdmdio->stride_1; 3118 stride2 = rdmdio->stride_2; 3119 count = rdmdio->count; 3120 3121 mask = rdmdio->mask; 3122 value2 = rdmdio->value_2; 3123 3124 addr3 = addr1 + stride1; 3125 3126 for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { 3127 ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, 3128 addr3, mask); 3129 if (ret == -1) 3130 goto error; 3131 3132 addr4 = addr2 - stride1; 3133 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, 3134 value2); 3135 if (ret == -1) 3136 goto error; 3137 3138 addr5 = addr2 - (2 * stride1); 3139 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, 3140 value1); 3141 if (ret == -1) 3142 goto error; 3143 3144 addr6 = addr2 - (3 * stride1); 3145 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, 3146 addr6, 0x2); 3147 if (ret == -1) 3148 goto error; 3149 3150 ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, 3151 addr3, mask); 3152 if (ret == -1) 3153 goto error; 3154 3155 addr7 = addr2 - (4 * stride1); 3156 data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7); 3157 if (data == -1) 3158 goto error; 3159 3160 selVal = (value2 << 18) | (value1 << 2) | 2; 3161 3162 stride2 = rdmdio->stride_2; 3163 *data_ptr++ = selVal; 3164 *data_ptr++ = data; 3165 3166 value1 = value1 + stride2; 3167 *d_ptr = data_ptr; 3168 } 3169 3170 return 0; 3171 3172 error: 3173 return -1; 3174 } 3175 3176 static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, 3177 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 3178 { 3179 uint32_t addr1, addr2, value1, value2, poll, r_value; 3180 uint32_t wait_count = 0; 3181 struct qla8044_minidump_entry_pollwr *pollwr_hdr; 3182 3183 pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; 3184 addr1 = pollwr_hdr->addr_1; 3185 addr2 = pollwr_hdr->addr_2; 3186 value1 = pollwr_hdr->value_1; 3187 value2 = pollwr_hdr->value_2; 3188 3189 poll = pollwr_hdr->poll; 3190 3191 while (wait_count < poll) { 3192 qla8044_rd_reg_indirect(vha, addr1, &r_value); 3193 3194 if ((r_value & poll) != 0) 3195 break; 3196 wait_count++; 3197 } 3198 3199 if (wait_count == poll) { 3200 ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); 3201 goto error; 3202 } 3203 3204 qla8044_wr_reg_indirect(vha, addr2, value2); 3205 qla8044_wr_reg_indirect(vha, addr1, value1); 3206 3207 wait_count = 0; 3208 while (wait_count < poll) { 3209 qla8044_rd_reg_indirect(vha, addr1, &r_value); 3210 3211 if ((r_value & poll) != 0) 3212 break; 3213 wait_count++; 3214 } 3215 3216 return QLA_SUCCESS; 3217 3218 error: 3219 return -1; 3220 } 3221 3222 /* 3223 * 3224 * qla8044_collect_md_data - Retrieve firmware minidump data. 3225 * @ha: pointer to adapter structure 3226 **/ 3227 int 3228 qla8044_collect_md_data(struct scsi_qla_host *vha) 3229 { 3230 int num_entry_hdr = 0; 3231 struct qla8044_minidump_entry_hdr *entry_hdr; 3232 struct qla8044_minidump_template_hdr *tmplt_hdr; 3233 uint32_t *data_ptr; 3234 uint32_t data_collected = 0, f_capture_mask; 3235 int i, rval = QLA_FUNCTION_FAILED; 3236 uint64_t now; 3237 uint32_t timestamp, idc_control; 3238 struct qla_hw_data *ha = vha->hw; 3239 3240 if (!ha->md_dump) { 3241 ql_log(ql_log_info, vha, 0xb101, 3242 "%s(%ld) No buffer to dump\n", 3243 __func__, vha->host_no); 3244 return rval; 3245 } 3246 3247 if (ha->fw_dumped) { 3248 ql_log(ql_log_warn, vha, 0xb10d, 3249 "Firmware has been previously dumped (%p) " 3250 "-- ignoring request.\n", ha->fw_dump); 3251 goto md_failed; 3252 } 3253 3254 ha->fw_dumped = 0; 3255 3256 if (!ha->md_tmplt_hdr || !ha->md_dump) { 3257 ql_log(ql_log_warn, vha, 0xb10e, 3258 "Memory not allocated for minidump capture\n"); 3259 goto md_failed; 3260 } 3261 3262 qla8044_idc_lock(ha); 3263 idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); 3264 if (idc_control & GRACEFUL_RESET_BIT1) { 3265 ql_log(ql_log_warn, vha, 0xb112, 3266 "Forced reset from application, " 3267 "ignore minidump capture\n"); 3268 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, 3269 (idc_control & ~GRACEFUL_RESET_BIT1)); 3270 qla8044_idc_unlock(ha); 3271 3272 goto md_failed; 3273 } 3274 qla8044_idc_unlock(ha); 3275 3276 if (qla82xx_validate_template_chksum(vha)) { 3277 ql_log(ql_log_info, vha, 0xb109, 3278 "Template checksum validation error\n"); 3279 goto md_failed; 3280 } 3281 3282 tmplt_hdr = (struct qla8044_minidump_template_hdr *) 3283 ha->md_tmplt_hdr; 3284 data_ptr = (uint32_t *)((uint8_t *)ha->md_dump); 3285 num_entry_hdr = tmplt_hdr->num_of_entries; 3286 3287 ql_dbg(ql_dbg_p3p, vha, 0xb11a, 3288 "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); 3289 3290 f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; 3291 3292 /* Validate whether required debug level is set */ 3293 if ((f_capture_mask & 0x3) != 0x3) { 3294 ql_log(ql_log_warn, vha, 0xb10f, 3295 "Minimum required capture mask[0x%x] level not set\n", 3296 f_capture_mask); 3297 3298 } 3299 tmplt_hdr->driver_capture_mask = ql2xmdcapmask; 3300 ql_log(ql_log_info, vha, 0xb102, 3301 "[%s]: starting data ptr: %p\n", 3302 __func__, data_ptr); 3303 ql_log(ql_log_info, vha, 0xb10b, 3304 "[%s]: no of entry headers in Template: 0x%x\n", 3305 __func__, num_entry_hdr); 3306 ql_log(ql_log_info, vha, 0xb10c, 3307 "[%s]: Total_data_size 0x%x, %d obtained\n", 3308 __func__, ha->md_dump_size, ha->md_dump_size); 3309 3310 /* Update current timestamp before taking dump */ 3311 now = get_jiffies_64(); 3312 timestamp = (u32)(jiffies_to_msecs(now) / 1000); 3313 tmplt_hdr->driver_timestamp = timestamp; 3314 3315 entry_hdr = (struct qla8044_minidump_entry_hdr *) 3316 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); 3317 tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] = 3318 tmplt_hdr->ocm_window_reg[ha->portnum]; 3319 3320 /* Walk through the entry headers - validate/perform required action */ 3321 for (i = 0; i < num_entry_hdr; i++) { 3322 if (data_collected > ha->md_dump_size) { 3323 ql_log(ql_log_info, vha, 0xb103, 3324 "Data collected: [0x%x], " 3325 "Total Dump size: [0x%x]\n", 3326 data_collected, ha->md_dump_size); 3327 return rval; 3328 } 3329 3330 if (!(entry_hdr->d_ctrl.entry_capture_mask & 3331 ql2xmdcapmask)) { 3332 entry_hdr->d_ctrl.driver_flags |= 3333 QLA82XX_DBG_SKIPPED_FLAG; 3334 goto skip_nxt_entry; 3335 } 3336 3337 ql_dbg(ql_dbg_p3p, vha, 0xb104, 3338 "Data collected: [0x%x], Dump size left:[0x%x]\n", 3339 data_collected, 3340 (ha->md_dump_size - data_collected)); 3341 3342 /* Decode the entry type and take required action to capture 3343 * debug data 3344 */ 3345 switch (entry_hdr->entry_type) { 3346 case QLA82XX_RDEND: 3347 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3348 break; 3349 case QLA82XX_CNTRL: 3350 rval = qla8044_minidump_process_control(vha, 3351 entry_hdr); 3352 if (rval != QLA_SUCCESS) { 3353 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3354 goto md_failed; 3355 } 3356 break; 3357 case QLA82XX_RDCRB: 3358 qla8044_minidump_process_rdcrb(vha, 3359 entry_hdr, &data_ptr); 3360 break; 3361 case QLA82XX_RDMEM: 3362 rval = qla8044_minidump_pex_dma_read(vha, 3363 entry_hdr, &data_ptr); 3364 if (rval != QLA_SUCCESS) { 3365 rval = qla8044_minidump_process_rdmem(vha, 3366 entry_hdr, &data_ptr); 3367 if (rval != QLA_SUCCESS) { 3368 qla8044_mark_entry_skipped(vha, 3369 entry_hdr, i); 3370 goto md_failed; 3371 } 3372 } 3373 break; 3374 case QLA82XX_BOARD: 3375 case QLA82XX_RDROM: 3376 rval = qla8044_minidump_process_rdrom(vha, 3377 entry_hdr, &data_ptr); 3378 if (rval != QLA_SUCCESS) { 3379 qla8044_mark_entry_skipped(vha, 3380 entry_hdr, i); 3381 } 3382 break; 3383 case QLA82XX_L2DTG: 3384 case QLA82XX_L2ITG: 3385 case QLA82XX_L2DAT: 3386 case QLA82XX_L2INS: 3387 rval = qla8044_minidump_process_l2tag(vha, 3388 entry_hdr, &data_ptr); 3389 if (rval != QLA_SUCCESS) { 3390 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3391 goto md_failed; 3392 } 3393 break; 3394 case QLA8044_L1DTG: 3395 case QLA8044_L1ITG: 3396 case QLA82XX_L1DAT: 3397 case QLA82XX_L1INS: 3398 qla8044_minidump_process_l1cache(vha, 3399 entry_hdr, &data_ptr); 3400 break; 3401 case QLA82XX_RDOCM: 3402 qla8044_minidump_process_rdocm(vha, 3403 entry_hdr, &data_ptr); 3404 break; 3405 case QLA82XX_RDMUX: 3406 qla8044_minidump_process_rdmux(vha, 3407 entry_hdr, &data_ptr); 3408 break; 3409 case QLA82XX_QUEUE: 3410 qla8044_minidump_process_queue(vha, 3411 entry_hdr, &data_ptr); 3412 break; 3413 case QLA8044_POLLRD: 3414 rval = qla8044_minidump_process_pollrd(vha, 3415 entry_hdr, &data_ptr); 3416 if (rval != QLA_SUCCESS) 3417 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3418 break; 3419 case QLA8044_RDMUX2: 3420 qla8044_minidump_process_rdmux2(vha, 3421 entry_hdr, &data_ptr); 3422 break; 3423 case QLA8044_POLLRDMWR: 3424 rval = qla8044_minidump_process_pollrdmwr(vha, 3425 entry_hdr, &data_ptr); 3426 if (rval != QLA_SUCCESS) 3427 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3428 break; 3429 case QLA8044_RDDFE: 3430 rval = qla8044_minidump_process_rddfe(vha, entry_hdr, 3431 &data_ptr); 3432 if (rval != QLA_SUCCESS) 3433 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3434 break; 3435 case QLA8044_RDMDIO: 3436 rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, 3437 &data_ptr); 3438 if (rval != QLA_SUCCESS) 3439 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3440 break; 3441 case QLA8044_POLLWR: 3442 rval = qla8044_minidump_process_pollwr(vha, entry_hdr, 3443 &data_ptr); 3444 if (rval != QLA_SUCCESS) 3445 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3446 break; 3447 case QLA82XX_RDNOP: 3448 default: 3449 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3450 break; 3451 } 3452 3453 data_collected = (uint8_t *)data_ptr - 3454 (uint8_t *)((uint8_t *)ha->md_dump); 3455 skip_nxt_entry: 3456 /* 3457 * next entry in the template 3458 */ 3459 entry_hdr = (struct qla8044_minidump_entry_hdr *) 3460 (((uint8_t *)entry_hdr) + entry_hdr->entry_size); 3461 } 3462 3463 if (data_collected != ha->md_dump_size) { 3464 ql_log(ql_log_info, vha, 0xb105, 3465 "Dump data mismatch: Data collected: " 3466 "[0x%x], total_data_size:[0x%x]\n", 3467 data_collected, ha->md_dump_size); 3468 rval = QLA_FUNCTION_FAILED; 3469 goto md_failed; 3470 } 3471 3472 ql_log(ql_log_info, vha, 0xb110, 3473 "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", 3474 vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); 3475 ha->fw_dumped = 1; 3476 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 3477 3478 3479 ql_log(ql_log_info, vha, 0xb106, 3480 "Leaving fn: %s Last entry: 0x%x\n", 3481 __func__, i); 3482 md_failed: 3483 return rval; 3484 } 3485 3486 void 3487 qla8044_get_minidump(struct scsi_qla_host *vha) 3488 { 3489 struct qla_hw_data *ha = vha->hw; 3490 3491 if (!qla8044_collect_md_data(vha)) { 3492 ha->fw_dumped = 1; 3493 ha->prev_minidump_failed = 0; 3494 } else { 3495 ql_log(ql_log_fatal, vha, 0xb0db, 3496 "%s: Unable to collect minidump\n", 3497 __func__); 3498 ha->prev_minidump_failed = 1; 3499 } 3500 } 3501 3502 static int 3503 qla8044_poll_flash_status_reg(struct scsi_qla_host *vha) 3504 { 3505 uint32_t flash_status; 3506 int retries = QLA8044_FLASH_READ_RETRY_COUNT; 3507 int ret_val = QLA_SUCCESS; 3508 3509 while (retries--) { 3510 ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS, 3511 &flash_status); 3512 if (ret_val) { 3513 ql_log(ql_log_warn, vha, 0xb13c, 3514 "%s: Failed to read FLASH_STATUS reg.\n", 3515 __func__); 3516 break; 3517 } 3518 if ((flash_status & QLA8044_FLASH_STATUS_READY) == 3519 QLA8044_FLASH_STATUS_READY) 3520 break; 3521 msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY); 3522 } 3523 3524 if (!retries) 3525 ret_val = QLA_FUNCTION_FAILED; 3526 3527 return ret_val; 3528 } 3529 3530 static int 3531 qla8044_write_flash_status_reg(struct scsi_qla_host *vha, 3532 uint32_t data) 3533 { 3534 int ret_val = QLA_SUCCESS; 3535 uint32_t cmd; 3536 3537 cmd = vha->hw->fdt_wrt_sts_reg_cmd; 3538 3539 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 3540 QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd); 3541 if (ret_val) { 3542 ql_log(ql_log_warn, vha, 0xb125, 3543 "%s: Failed to write to FLASH_ADDR.\n", __func__); 3544 goto exit_func; 3545 } 3546 3547 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data); 3548 if (ret_val) { 3549 ql_log(ql_log_warn, vha, 0xb126, 3550 "%s: Failed to write to FLASH_WRDATA.\n", __func__); 3551 goto exit_func; 3552 } 3553 3554 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 3555 QLA8044_FLASH_SECOND_ERASE_MS_VAL); 3556 if (ret_val) { 3557 ql_log(ql_log_warn, vha, 0xb127, 3558 "%s: Failed to write to FLASH_CONTROL.\n", __func__); 3559 goto exit_func; 3560 } 3561 3562 ret_val = qla8044_poll_flash_status_reg(vha); 3563 if (ret_val) 3564 ql_log(ql_log_warn, vha, 0xb128, 3565 "%s: Error polling flash status reg.\n", __func__); 3566 3567 exit_func: 3568 return ret_val; 3569 } 3570 3571 /* 3572 * This function assumes that the flash lock is held. 3573 */ 3574 static int 3575 qla8044_unprotect_flash(scsi_qla_host_t *vha) 3576 { 3577 int ret_val; 3578 struct qla_hw_data *ha = vha->hw; 3579 3580 ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); 3581 if (ret_val) 3582 ql_log(ql_log_warn, vha, 0xb139, 3583 "%s: Write flash status failed.\n", __func__); 3584 3585 return ret_val; 3586 } 3587 3588 /* 3589 * This function assumes that the flash lock is held. 3590 */ 3591 static int 3592 qla8044_protect_flash(scsi_qla_host_t *vha) 3593 { 3594 int ret_val; 3595 struct qla_hw_data *ha = vha->hw; 3596 3597 ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); 3598 if (ret_val) 3599 ql_log(ql_log_warn, vha, 0xb13b, 3600 "%s: Write flash status failed.\n", __func__); 3601 3602 return ret_val; 3603 } 3604 3605 3606 static int 3607 qla8044_erase_flash_sector(struct scsi_qla_host *vha, 3608 uint32_t sector_start_addr) 3609 { 3610 uint32_t reversed_addr; 3611 int ret_val = QLA_SUCCESS; 3612 3613 ret_val = qla8044_poll_flash_status_reg(vha); 3614 if (ret_val) { 3615 ql_log(ql_log_warn, vha, 0xb12e, 3616 "%s: Poll flash status after erase failed..\n", __func__); 3617 } 3618 3619 reversed_addr = (((sector_start_addr & 0xFF) << 16) | 3620 (sector_start_addr & 0xFF00) | 3621 ((sector_start_addr & 0xFF0000) >> 16)); 3622 3623 ret_val = qla8044_wr_reg_indirect(vha, 3624 QLA8044_FLASH_WRDATA, reversed_addr); 3625 if (ret_val) { 3626 ql_log(ql_log_warn, vha, 0xb12f, 3627 "%s: Failed to write to FLASH_WRDATA.\n", __func__); 3628 } 3629 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 3630 QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd); 3631 if (ret_val) { 3632 ql_log(ql_log_warn, vha, 0xb130, 3633 "%s: Failed to write to FLASH_ADDR.\n", __func__); 3634 } 3635 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 3636 QLA8044_FLASH_LAST_ERASE_MS_VAL); 3637 if (ret_val) { 3638 ql_log(ql_log_warn, vha, 0xb131, 3639 "%s: Failed write to FLASH_CONTROL.\n", __func__); 3640 } 3641 ret_val = qla8044_poll_flash_status_reg(vha); 3642 if (ret_val) { 3643 ql_log(ql_log_warn, vha, 0xb132, 3644 "%s: Poll flash status failed.\n", __func__); 3645 } 3646 3647 3648 return ret_val; 3649 } 3650 3651 /* 3652 * qla8044_flash_write_u32 - Write data to flash 3653 * 3654 * @ha : Pointer to adapter structure 3655 * addr : Flash address to write to 3656 * p_data : Data to be written 3657 * 3658 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED 3659 * 3660 * NOTE: Lock should be held on entry 3661 */ 3662 static int 3663 qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr, 3664 uint32_t *p_data) 3665 { 3666 int ret_val = QLA_SUCCESS; 3667 3668 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 3669 0x00800000 | (addr >> 2)); 3670 if (ret_val) { 3671 ql_log(ql_log_warn, vha, 0xb134, 3672 "%s: Failed write to FLASH_ADDR.\n", __func__); 3673 goto exit_func; 3674 } 3675 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data); 3676 if (ret_val) { 3677 ql_log(ql_log_warn, vha, 0xb135, 3678 "%s: Failed write to FLASH_WRDATA.\n", __func__); 3679 goto exit_func; 3680 } 3681 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D); 3682 if (ret_val) { 3683 ql_log(ql_log_warn, vha, 0xb136, 3684 "%s: Failed write to FLASH_CONTROL.\n", __func__); 3685 goto exit_func; 3686 } 3687 ret_val = qla8044_poll_flash_status_reg(vha); 3688 if (ret_val) { 3689 ql_log(ql_log_warn, vha, 0xb137, 3690 "%s: Poll flash status failed.\n", __func__); 3691 } 3692 3693 exit_func: 3694 return ret_val; 3695 } 3696 3697 static int 3698 qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr, 3699 uint32_t faddr, uint32_t dwords) 3700 { 3701 int ret = QLA_FUNCTION_FAILED; 3702 uint32_t spi_val; 3703 3704 if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS || 3705 dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) { 3706 ql_dbg(ql_dbg_user, vha, 0xb123, 3707 "Got unsupported dwords = 0x%x.\n", 3708 dwords); 3709 return QLA_FUNCTION_FAILED; 3710 } 3711 3712 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val); 3713 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, 3714 spi_val | QLA8044_FLASH_SPI_CTL); 3715 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 3716 QLA8044_FLASH_FIRST_TEMP_VAL); 3717 3718 /* First DWORD write to FLASH_WRDATA */ 3719 ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, 3720 *dwptr++); 3721 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 3722 QLA8044_FLASH_FIRST_MS_PATTERN); 3723 3724 ret = qla8044_poll_flash_status_reg(vha); 3725 if (ret) { 3726 ql_log(ql_log_warn, vha, 0xb124, 3727 "%s: Failed.\n", __func__); 3728 goto exit_func; 3729 } 3730 3731 dwords--; 3732 3733 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 3734 QLA8044_FLASH_SECOND_TEMP_VAL); 3735 3736 3737 /* Second to N-1 DWORDS writes */ 3738 while (dwords != 1) { 3739 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); 3740 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 3741 QLA8044_FLASH_SECOND_MS_PATTERN); 3742 ret = qla8044_poll_flash_status_reg(vha); 3743 if (ret) { 3744 ql_log(ql_log_warn, vha, 0xb129, 3745 "%s: Failed.\n", __func__); 3746 goto exit_func; 3747 } 3748 dwords--; 3749 } 3750 3751 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 3752 QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2)); 3753 3754 /* Last DWORD write */ 3755 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); 3756 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 3757 QLA8044_FLASH_LAST_MS_PATTERN); 3758 ret = qla8044_poll_flash_status_reg(vha); 3759 if (ret) { 3760 ql_log(ql_log_warn, vha, 0xb12a, 3761 "%s: Failed.\n", __func__); 3762 goto exit_func; 3763 } 3764 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val); 3765 3766 if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) { 3767 ql_log(ql_log_warn, vha, 0xb12b, 3768 "%s: Failed.\n", __func__); 3769 spi_val = 0; 3770 /* Operation failed, clear error bit. */ 3771 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, 3772 &spi_val); 3773 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, 3774 spi_val | QLA8044_FLASH_SPI_CTL); 3775 } 3776 exit_func: 3777 return ret; 3778 } 3779 3780 static int 3781 qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr, 3782 uint32_t faddr, uint32_t dwords) 3783 { 3784 int ret = QLA_FUNCTION_FAILED; 3785 uint32_t liter; 3786 3787 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { 3788 ret = qla8044_flash_write_u32(vha, faddr, dwptr); 3789 if (ret) { 3790 ql_dbg(ql_dbg_p3p, vha, 0xb141, 3791 "%s: flash address=%x data=%x.\n", __func__, 3792 faddr, *dwptr); 3793 break; 3794 } 3795 } 3796 3797 return ret; 3798 } 3799 3800 int 3801 qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, 3802 uint32_t offset, uint32_t length) 3803 { 3804 int rval = QLA_FUNCTION_FAILED, i, burst_iter_count; 3805 int dword_count, erase_sec_count; 3806 uint32_t erase_offset; 3807 uint8_t *p_cache, *p_src; 3808 3809 erase_offset = offset; 3810 3811 p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL); 3812 if (!p_cache) 3813 return QLA_FUNCTION_FAILED; 3814 3815 memcpy(p_cache, buf, length); 3816 p_src = p_cache; 3817 dword_count = length / sizeof(uint32_t); 3818 /* Since the offset and legth are sector aligned, it will be always 3819 * multiple of burst_iter_count (64) 3820 */ 3821 burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS; 3822 erase_sec_count = length / QLA8044_SECTOR_SIZE; 3823 3824 /* Suspend HBA. */ 3825 scsi_block_requests(vha->host); 3826 /* Lock and enable write for whole operation. */ 3827 qla8044_flash_lock(vha); 3828 qla8044_unprotect_flash(vha); 3829 3830 /* Erasing the sectors */ 3831 for (i = 0; i < erase_sec_count; i++) { 3832 rval = qla8044_erase_flash_sector(vha, erase_offset); 3833 ql_dbg(ql_dbg_user, vha, 0xb138, 3834 "Done erase of sector=0x%x.\n", 3835 erase_offset); 3836 if (rval) { 3837 ql_log(ql_log_warn, vha, 0xb121, 3838 "Failed to erase the sector having address: " 3839 "0x%x.\n", erase_offset); 3840 goto out; 3841 } 3842 erase_offset += QLA8044_SECTOR_SIZE; 3843 } 3844 ql_dbg(ql_dbg_user, vha, 0xb13f, 3845 "Got write for addr = 0x%x length=0x%x.\n", 3846 offset, length); 3847 3848 for (i = 0; i < burst_iter_count; i++) { 3849 3850 /* Go with write. */ 3851 rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, 3852 offset, QLA8044_MAX_OPTROM_BURST_DWORDS); 3853 if (rval) { 3854 /* Buffer Mode failed skip to dword mode */ 3855 ql_log(ql_log_warn, vha, 0xb122, 3856 "Failed to write flash in buffer mode, " 3857 "Reverting to slow-write.\n"); 3858 rval = qla8044_write_flash_dword_mode(vha, 3859 (uint32_t *)p_src, offset, 3860 QLA8044_MAX_OPTROM_BURST_DWORDS); 3861 } 3862 p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; 3863 offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; 3864 } 3865 ql_dbg(ql_dbg_user, vha, 0xb133, 3866 "Done writing.\n"); 3867 3868 out: 3869 qla8044_protect_flash(vha); 3870 qla8044_flash_unlock(vha); 3871 scsi_unblock_requests(vha->host); 3872 kfree(p_cache); 3873 3874 return rval; 3875 } 3876 3877 #define LEG_INT_PTR_B31 (1 << 31) 3878 #define LEG_INT_PTR_B30 (1 << 30) 3879 #define PF_BITS_MASK (0xF << 16) 3880 /** 3881 * qla8044_intr_handler() - Process interrupts for the ISP8044 3882 * @irq: 3883 * @dev_id: SCSI driver HA context 3884 * 3885 * Called by system whenever the host adapter generates an interrupt. 3886 * 3887 * Returns handled flag. 3888 */ 3889 irqreturn_t 3890 qla8044_intr_handler(int irq, void *dev_id) 3891 { 3892 scsi_qla_host_t *vha; 3893 struct qla_hw_data *ha; 3894 struct rsp_que *rsp; 3895 struct device_reg_82xx __iomem *reg; 3896 int status = 0; 3897 unsigned long flags; 3898 unsigned long iter; 3899 uint32_t stat; 3900 uint16_t mb[4]; 3901 uint32_t leg_int_ptr = 0, pf_bit; 3902 3903 rsp = (struct rsp_que *) dev_id; 3904 if (!rsp) { 3905 ql_log(ql_log_info, NULL, 0xb143, 3906 "%s(): NULL response queue pointer\n", __func__); 3907 return IRQ_NONE; 3908 } 3909 ha = rsp->hw; 3910 vha = pci_get_drvdata(ha->pdev); 3911 3912 if (unlikely(pci_channel_offline(ha->pdev))) 3913 return IRQ_HANDLED; 3914 3915 leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); 3916 3917 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ 3918 if (!(leg_int_ptr & (LEG_INT_PTR_B31))) { 3919 ql_dbg(ql_dbg_p3p, vha, 0xb144, 3920 "%s: Legacy Interrupt Bit 31 not set, " 3921 "spurious interrupt!\n", __func__); 3922 return IRQ_NONE; 3923 } 3924 3925 pf_bit = ha->portnum << 16; 3926 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ 3927 if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) { 3928 ql_dbg(ql_dbg_p3p, vha, 0xb145, 3929 "%s: Incorrect function ID 0x%x in " 3930 "legacy interrupt register, " 3931 "ha->pf_bit = 0x%x\n", __func__, 3932 (leg_int_ptr & (PF_BITS_MASK)), pf_bit); 3933 return IRQ_NONE; 3934 } 3935 3936 /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger 3937 * Control register and poll till Legacy Interrupt Pointer register 3938 * bit32 is 0. 3939 */ 3940 qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0); 3941 do { 3942 leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); 3943 if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) 3944 break; 3945 } while (leg_int_ptr & (LEG_INT_PTR_B30)); 3946 3947 reg = &ha->iobase->isp82; 3948 spin_lock_irqsave(&ha->hardware_lock, flags); 3949 for (iter = 1; iter--; ) { 3950 3951 if (RD_REG_DWORD(®->host_int)) { 3952 stat = RD_REG_DWORD(®->host_status); 3953 if ((stat & HSRX_RISC_INT) == 0) 3954 break; 3955 3956 switch (stat & 0xff) { 3957 case 0x1: 3958 case 0x2: 3959 case 0x10: 3960 case 0x11: 3961 qla82xx_mbx_completion(vha, MSW(stat)); 3962 status |= MBX_INTERRUPT; 3963 break; 3964 case 0x12: 3965 mb[0] = MSW(stat); 3966 mb[1] = RD_REG_WORD(®->mailbox_out[1]); 3967 mb[2] = RD_REG_WORD(®->mailbox_out[2]); 3968 mb[3] = RD_REG_WORD(®->mailbox_out[3]); 3969 qla2x00_async_event(vha, rsp, mb); 3970 break; 3971 case 0x13: 3972 qla24xx_process_response_queue(vha, rsp); 3973 break; 3974 default: 3975 ql_dbg(ql_dbg_p3p, vha, 0xb146, 3976 "Unrecognized interrupt type " 3977 "(%d).\n", stat & 0xff); 3978 break; 3979 } 3980 } 3981 WRT_REG_DWORD(®->host_int, 0); 3982 } 3983 3984 qla2x00_handle_mbx_completion(ha, status); 3985 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3986 3987 return IRQ_HANDLED; 3988 } 3989 3990 static int 3991 qla8044_idc_dontreset(struct qla_hw_data *ha) 3992 { 3993 uint32_t idc_ctrl; 3994 3995 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); 3996 return idc_ctrl & DONTRESET_BIT0; 3997 } 3998 3999 static void 4000 qla8044_clear_rst_ready(scsi_qla_host_t *vha) 4001 { 4002 uint32_t drv_state; 4003 4004 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); 4005 4006 /* 4007 * For ISP8044, drv_active register has 1 bit per function, 4008 * shift 1 by func_num to set a bit for the function. 4009 * For ISP82xx, drv_active has 4 bits per function 4010 */ 4011 drv_state &= ~(1 << vha->hw->portnum); 4012 4013 ql_dbg(ql_dbg_p3p, vha, 0xb13d, 4014 "drv_state: 0x%08x\n", drv_state); 4015 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); 4016 } 4017 4018 int 4019 qla8044_abort_isp(scsi_qla_host_t *vha) 4020 { 4021 int rval; 4022 uint32_t dev_state; 4023 struct qla_hw_data *ha = vha->hw; 4024 4025 qla8044_idc_lock(ha); 4026 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 4027 4028 if (ql2xdontresethba) 4029 qla8044_set_idc_dontreset(vha); 4030 4031 /* If device_state is NEED_RESET, go ahead with 4032 * Reset,irrespective of ql2xdontresethba. This is to allow a 4033 * non-reset-owner to force a reset. Non-reset-owner sets 4034 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset 4035 * and then forces a Reset by setting device_state to 4036 * NEED_RESET. */ 4037 if (dev_state == QLA8XXX_DEV_READY) { 4038 /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset 4039 * recovery */ 4040 if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) { 4041 ql_dbg(ql_dbg_p3p, vha, 0xb13e, 4042 "Reset recovery disabled\n"); 4043 rval = QLA_FUNCTION_FAILED; 4044 goto exit_isp_reset; 4045 } 4046 4047 ql_dbg(ql_dbg_p3p, vha, 0xb140, 4048 "HW State: NEED RESET\n"); 4049 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, 4050 QLA8XXX_DEV_NEED_RESET); 4051 } 4052 4053 /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority 4054 * and which drivers are present. Unlike ISP82XX, the function setting 4055 * NEED_RESET, may not be the Reset owner. */ 4056 qla83xx_reset_ownership(vha); 4057 4058 qla8044_idc_unlock(ha); 4059 rval = qla8044_device_state_handler(vha); 4060 qla8044_idc_lock(ha); 4061 qla8044_clear_rst_ready(vha); 4062 4063 exit_isp_reset: 4064 qla8044_idc_unlock(ha); 4065 if (rval == QLA_SUCCESS) { 4066 ha->flags.isp82xx_fw_hung = 0; 4067 ha->flags.nic_core_reset_hdlr_active = 0; 4068 rval = qla82xx_restart_isp(vha); 4069 } 4070 4071 return rval; 4072 } 4073 4074 void 4075 qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 4076 { 4077 struct qla_hw_data *ha = vha->hw; 4078 4079 if (!ha->allow_cna_fw_dump) 4080 return; 4081 4082 scsi_block_requests(vha->host); 4083 ha->flags.isp82xx_no_md_cap = 1; 4084 qla8044_idc_lock(ha); 4085 qla82xx_set_reset_owner(vha); 4086 qla8044_idc_unlock(ha); 4087 qla2x00_wait_for_chip_reset(vha); 4088 scsi_unblock_requests(vha->host); 4089 } 4090