1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 8 #include <linux/ratelimit.h> 9 10 #include "ql4_def.h" 11 #include "ql4_version.h" 12 #include "ql4_glbl.h" 13 #include "ql4_dbg.h" 14 #include "ql4_inline.h" 15 16 uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr) 17 { 18 return readl((void __iomem *)(ha->nx_pcibase + addr)); 19 } 20 21 void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val) 22 { 23 writel(val, (void __iomem *)(ha->nx_pcibase + addr)); 24 } 25 26 static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr) 27 { 28 uint32_t val; 29 int ret_val = QLA_SUCCESS; 30 31 qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr); 32 val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num)); 33 if (val != addr) { 34 ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", 35 __func__, addr, val); 36 ret_val = QLA_ERROR; 37 } 38 39 return ret_val; 40 } 41 42 int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, 43 uint32_t *data) 44 { 45 int ret_val; 46 47 ret_val = qla4_83xx_set_win_base(ha, addr); 48 49 if (ret_val == QLA_SUCCESS) 50 *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD); 51 else 52 ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n", 53 __func__, addr); 54 55 return ret_val; 56 } 57 58 int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, 59 uint32_t data) 60 { 61 int ret_val; 62 63 ret_val = qla4_83xx_set_win_base(ha, addr); 64 65 if (ret_val == QLA_SUCCESS) 66 qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data); 67 else 68 ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n", 69 __func__, addr, data); 70 71 return ret_val; 72 } 73 74 static int qla4_83xx_flash_lock(struct scsi_qla_host *ha) 75 { 76 int lock_owner; 77 int timeout = 0; 78 uint32_t lock_status = 0; 79 int ret_val = QLA_SUCCESS; 80 81 while (lock_status == 0) { 82 lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK); 83 if (lock_status) 84 break; 85 86 if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) { 87 lock_owner = qla4_83xx_rd_reg(ha, 88 QLA83XX_FLASH_LOCK_ID); 89 ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n", 90 __func__, ha->func_num, lock_owner); 91 ret_val = QLA_ERROR; 92 break; 93 } 94 msleep(20); 95 } 96 97 qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num); 98 return ret_val; 99 } 100 101 static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha) 102 { 103 /* Reading FLASH_UNLOCK register unlocks the Flash */ 104 qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF); 105 qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK); 106 } 107 108 int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, 109 uint8_t *p_data, int u32_word_count) 110 { 111 int i; 112 uint32_t u32_word; 113 uint32_t addr = flash_addr; 114 int ret_val = QLA_SUCCESS; 115 116 ret_val = qla4_83xx_flash_lock(ha); 117 if (ret_val == QLA_ERROR) 118 goto exit_lock_error; 119 120 if (addr & 0x03) { 121 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", 122 __func__, addr); 123 ret_val = QLA_ERROR; 124 goto exit_flash_read; 125 } 126 127 for (i = 0; i < u32_word_count; i++) { 128 ret_val = qla4_83xx_wr_reg_indirect(ha, 129 QLA83XX_FLASH_DIRECT_WINDOW, 130 (addr & 0xFFFF0000)); 131 if (ret_val == QLA_ERROR) { 132 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!", 133 __func__, addr); 134 goto exit_flash_read; 135 } 136 137 ret_val = qla4_83xx_rd_reg_indirect(ha, 138 QLA83XX_FLASH_DIRECT_DATA(addr), 139 &u32_word); 140 if (ret_val == QLA_ERROR) { 141 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", 142 __func__, addr); 143 goto exit_flash_read; 144 } 145 146 *(__le32 *)p_data = le32_to_cpu(u32_word); 147 p_data = p_data + 4; 148 addr = addr + 4; 149 } 150 151 exit_flash_read: 152 qla4_83xx_flash_unlock(ha); 153 154 exit_lock_error: 155 return ret_val; 156 } 157 158 int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha, 159 uint32_t flash_addr, uint8_t *p_data, 160 int u32_word_count) 161 { 162 uint32_t i; 163 uint32_t u32_word; 164 uint32_t flash_offset; 165 uint32_t addr = flash_addr; 166 int ret_val = QLA_SUCCESS; 167 168 flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1); 169 170 if (addr & 0x3) { 171 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", 172 __func__, addr); 173 ret_val = QLA_ERROR; 174 goto exit_lockless_read; 175 } 176 177 ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, 178 addr); 179 if (ret_val == QLA_ERROR) { 180 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", 181 __func__, addr); 182 goto exit_lockless_read; 183 } 184 185 /* Check if data is spread across multiple sectors */ 186 if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > 187 (QLA83XX_FLASH_SECTOR_SIZE - 1)) { 188 189 /* Multi sector read */ 190 for (i = 0; i < u32_word_count; i++) { 191 ret_val = qla4_83xx_rd_reg_indirect(ha, 192 QLA83XX_FLASH_DIRECT_DATA(addr), 193 &u32_word); 194 if (ret_val == QLA_ERROR) { 195 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", 196 __func__, addr); 197 goto exit_lockless_read; 198 } 199 200 *(__le32 *)p_data = le32_to_cpu(u32_word); 201 p_data = p_data + 4; 202 addr = addr + 4; 203 flash_offset = flash_offset + 4; 204 205 if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { 206 /* This write is needed once for each sector */ 207 ret_val = qla4_83xx_wr_reg_indirect(ha, 208 QLA83XX_FLASH_DIRECT_WINDOW, 209 addr); 210 if (ret_val == QLA_ERROR) { 211 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", 212 __func__, addr); 213 goto exit_lockless_read; 214 } 215 flash_offset = 0; 216 } 217 } 218 } else { 219 /* Single sector read */ 220 for (i = 0; i < u32_word_count; i++) { 221 ret_val = qla4_83xx_rd_reg_indirect(ha, 222 QLA83XX_FLASH_DIRECT_DATA(addr), 223 &u32_word); 224 if (ret_val == QLA_ERROR) { 225 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", 226 __func__, addr); 227 goto exit_lockless_read; 228 } 229 230 *(__le32 *)p_data = le32_to_cpu(u32_word); 231 p_data = p_data + 4; 232 addr = addr + 4; 233 } 234 } 235 236 exit_lockless_read: 237 return ret_val; 238 } 239 240 void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) 241 { 242 if (qla4_83xx_flash_lock(ha)) 243 ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__); 244 245 /* 246 * We got the lock, or someone else is holding the lock 247 * since we are restting, forcefully unlock 248 */ 249 qla4_83xx_flash_unlock(ha); 250 } 251 252 /** 253 * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory 254 * @ha: Pointer to adapter structure 255 * @addr: Flash address to write to 256 * @data: Data to be written 257 * @count: word_count to be written 258 * 259 * Return: On success return QLA_SUCCESS 260 * On error return QLA_ERROR 261 **/ 262 int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, 263 uint32_t *data, uint32_t count) 264 { 265 int i, j; 266 uint32_t agt_ctrl; 267 unsigned long flags; 268 int ret_val = QLA_SUCCESS; 269 270 /* Only 128-bit aligned access */ 271 if (addr & 0xF) { 272 ret_val = QLA_ERROR; 273 goto exit_ms_mem_write; 274 } 275 276 write_lock_irqsave(&ha->hw_lock, flags); 277 278 /* Write address */ 279 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); 280 if (ret_val == QLA_ERROR) { 281 ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", 282 __func__); 283 goto exit_ms_mem_write_unlock; 284 } 285 286 for (i = 0; i < count; i++, addr += 16) { 287 if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, 288 QLA8XXX_ADDR_QDR_NET_MAX)) || 289 (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, 290 QLA8XXX_ADDR_DDR_NET_MAX)))) { 291 ret_val = QLA_ERROR; 292 goto exit_ms_mem_write_unlock; 293 } 294 295 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, 296 addr); 297 /* Write data */ 298 ret_val |= qla4_83xx_wr_reg_indirect(ha, 299 MD_MIU_TEST_AGT_WRDATA_LO, 300 *data++); 301 ret_val |= qla4_83xx_wr_reg_indirect(ha, 302 MD_MIU_TEST_AGT_WRDATA_HI, 303 *data++); 304 ret_val |= qla4_83xx_wr_reg_indirect(ha, 305 MD_MIU_TEST_AGT_WRDATA_ULO, 306 *data++); 307 ret_val |= qla4_83xx_wr_reg_indirect(ha, 308 MD_MIU_TEST_AGT_WRDATA_UHI, 309 *data++); 310 if (ret_val == QLA_ERROR) { 311 ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", 312 __func__); 313 goto exit_ms_mem_write_unlock; 314 } 315 316 /* Check write status */ 317 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, 318 MIU_TA_CTL_WRITE_ENABLE); 319 ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, 320 MIU_TA_CTL_WRITE_START); 321 if (ret_val == QLA_ERROR) { 322 ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", 323 __func__); 324 goto exit_ms_mem_write_unlock; 325 } 326 327 for (j = 0; j < MAX_CTL_CHECK; j++) { 328 ret_val = qla4_83xx_rd_reg_indirect(ha, 329 MD_MIU_TEST_AGT_CTRL, 330 &agt_ctrl); 331 if (ret_val == QLA_ERROR) { 332 ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", 333 __func__); 334 goto exit_ms_mem_write_unlock; 335 } 336 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) 337 break; 338 } 339 340 /* Status check failed */ 341 if (j >= MAX_CTL_CHECK) { 342 printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", 343 __func__); 344 ret_val = QLA_ERROR; 345 goto exit_ms_mem_write_unlock; 346 } 347 } 348 349 exit_ms_mem_write_unlock: 350 write_unlock_irqrestore(&ha->hw_lock, flags); 351 352 exit_ms_mem_write: 353 return ret_val; 354 } 355 356 #define INTENT_TO_RECOVER 0x01 357 #define PROCEED_TO_RECOVER 0x02 358 359 static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha) 360 { 361 362 uint32_t lock = 0, lockid; 363 int ret_val = QLA_ERROR; 364 365 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); 366 367 /* Check for other Recovery in progress, go wait */ 368 if ((lockid & 0x3) != 0) 369 goto exit_lock_recovery; 370 371 /* Intent to Recover */ 372 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 373 (ha->func_num << 2) | INTENT_TO_RECOVER); 374 375 msleep(200); 376 377 /* Check Intent to Recover is advertised */ 378 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); 379 if ((lockid & 0x3C) != (ha->func_num << 2)) 380 goto exit_lock_recovery; 381 382 ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n", 383 __func__, ha->func_num); 384 385 /* Proceed to Recover */ 386 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 387 (ha->func_num << 2) | PROCEED_TO_RECOVER); 388 389 /* Force Unlock */ 390 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF); 391 ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK); 392 393 /* Clear bits 0-5 in IDC_RECOVERY register*/ 394 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0); 395 396 /* Get lock */ 397 lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK); 398 if (lock) { 399 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); 400 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num; 401 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid); 402 ret_val = QLA_SUCCESS; 403 } 404 405 exit_lock_recovery: 406 return ret_val; 407 } 408 409 #define QLA83XX_DRV_LOCK_MSLEEP 200 410 411 int qla4_83xx_drv_lock(struct scsi_qla_host *ha) 412 { 413 int timeout = 0; 414 uint32_t status = 0; 415 int ret_val = QLA_SUCCESS; 416 uint32_t first_owner = 0; 417 uint32_t tmo_owner = 0; 418 uint32_t lock_id; 419 uint32_t func_num; 420 uint32_t lock_cnt; 421 422 while (status == 0) { 423 status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK); 424 if (status) { 425 /* Increment Counter (8-31) and update func_num (0-7) on 426 * getting a successful lock */ 427 lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); 428 lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num; 429 qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id); 430 break; 431 } 432 433 if (timeout == 0) 434 /* Save counter + ID of function holding the lock for 435 * first failure */ 436 first_owner = ha->isp_ops->rd_reg_direct(ha, 437 QLA83XX_DRV_LOCK_ID); 438 439 if (++timeout >= 440 (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) { 441 tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); 442 func_num = tmo_owner & 0xFF; 443 lock_cnt = tmo_owner >> 8; 444 ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", 445 __func__, ha->func_num, func_num, lock_cnt, 446 (first_owner & 0xFF)); 447 448 if (first_owner != tmo_owner) { 449 /* Some other driver got lock, OR same driver 450 * got lock again (counter value changed), when 451 * we were waiting for lock. 452 * Retry for another 2 sec */ 453 ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n", 454 __func__, ha->func_num); 455 timeout = 0; 456 } else { 457 /* Same driver holding lock > 2sec. 458 * Force Recovery */ 459 ret_val = qla4_83xx_lock_recovery(ha); 460 if (ret_val == QLA_SUCCESS) { 461 /* Recovered and got lock */ 462 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n", 463 __func__, ha->func_num); 464 break; 465 } 466 /* Recovery Failed, some other function 467 * has the lock, wait for 2secs and retry */ 468 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n", 469 __func__, ha->func_num); 470 timeout = 0; 471 } 472 } 473 msleep(QLA83XX_DRV_LOCK_MSLEEP); 474 } 475 476 return ret_val; 477 } 478 479 void qla4_83xx_drv_unlock(struct scsi_qla_host *ha) 480 { 481 int id; 482 483 id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); 484 485 if ((id & 0xFF) != ha->func_num) { 486 ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n", 487 __func__, ha->func_num, (id & 0xFF)); 488 return; 489 } 490 491 /* Keep lock counter value, update the ha->func_num to 0xFF */ 492 qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF)); 493 qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK); 494 } 495 496 void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha) 497 { 498 uint32_t idc_ctrl; 499 500 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 501 idc_ctrl |= DONTRESET_BIT0; 502 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); 503 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, 504 idc_ctrl)); 505 } 506 507 void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha) 508 { 509 uint32_t idc_ctrl; 510 511 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 512 idc_ctrl &= ~DONTRESET_BIT0; 513 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); 514 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, 515 idc_ctrl)); 516 } 517 518 int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha) 519 { 520 uint32_t idc_ctrl; 521 522 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 523 return idc_ctrl & DONTRESET_BIT0; 524 } 525 526 /*-------------------------IDC State Machine ---------------------*/ 527 528 enum { 529 UNKNOWN_CLASS = 0, 530 NIC_CLASS, 531 FCOE_CLASS, 532 ISCSI_CLASS 533 }; 534 535 struct device_info { 536 int func_num; 537 int device_type; 538 int port_num; 539 }; 540 541 int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha) 542 { 543 uint32_t drv_active; 544 uint32_t dev_part, dev_part1, dev_part2; 545 int i; 546 struct device_info device_map[16]; 547 int func_nibble; 548 int nibble; 549 int nic_present = 0; 550 int iscsi_present = 0; 551 int iscsi_func_low = 0; 552 553 /* Use the dev_partition register to determine the PCI function number 554 * and then check drv_active register to see which driver is loaded */ 555 dev_part1 = qla4_83xx_rd_reg(ha, 556 ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]); 557 dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2); 558 drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]); 559 560 /* Each function has 4 bits in dev_partition Info register, 561 * Lower 2 bits - device type, Upper 2 bits - physical port number */ 562 dev_part = dev_part1; 563 for (i = nibble = 0; i <= 15; i++, nibble++) { 564 func_nibble = dev_part & (0xF << (nibble * 4)); 565 func_nibble >>= (nibble * 4); 566 device_map[i].func_num = i; 567 device_map[i].device_type = func_nibble & 0x3; 568 device_map[i].port_num = func_nibble & 0xC; 569 570 if (device_map[i].device_type == NIC_CLASS) { 571 if (drv_active & (1 << device_map[i].func_num)) { 572 nic_present++; 573 break; 574 } 575 } else if (device_map[i].device_type == ISCSI_CLASS) { 576 if (drv_active & (1 << device_map[i].func_num)) { 577 if (!iscsi_present || 578 (iscsi_present && 579 (iscsi_func_low > device_map[i].func_num))) 580 iscsi_func_low = device_map[i].func_num; 581 582 iscsi_present++; 583 } 584 } 585 586 /* For function_num[8..15] get info from dev_part2 register */ 587 if (nibble == 7) { 588 nibble = 0; 589 dev_part = dev_part2; 590 } 591 } 592 593 /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets 594 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers 595 * present. */ 596 if (!nic_present && (ha->func_num == iscsi_func_low)) { 597 DEBUG2(ql4_printk(KERN_INFO, ha, 598 "%s: can reset - NIC not present and lower iSCSI function is %d\n", 599 __func__, ha->func_num)); 600 return 1; 601 } 602 603 return 0; 604 } 605 606 /** 607 * qla4_83xx_need_reset_handler - Code to start reset sequence 608 * @ha: pointer to adapter structure 609 * 610 * Note: IDC lock must be held upon entry 611 **/ 612 void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha) 613 { 614 uint32_t dev_state, drv_state, drv_active; 615 unsigned long reset_timeout, dev_init_timeout; 616 617 ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n", 618 __func__); 619 620 if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { 621 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n", 622 __func__)); 623 qla4_8xxx_set_rst_ready(ha); 624 625 /* Non-reset owners ACK Reset and wait for device INIT state 626 * as part of Reset Recovery by Reset Owner */ 627 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 628 629 do { 630 if (time_after_eq(jiffies, dev_init_timeout)) { 631 ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n", 632 __func__); 633 break; 634 } 635 636 ha->isp_ops->idc_unlock(ha); 637 msleep(1000); 638 ha->isp_ops->idc_lock(ha); 639 640 dev_state = qla4_8xxx_rd_direct(ha, 641 QLA8XXX_CRB_DEV_STATE); 642 } while (dev_state == QLA8XXX_DEV_NEED_RESET); 643 } else { 644 qla4_8xxx_set_rst_ready(ha); 645 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 646 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); 647 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); 648 649 ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n", 650 __func__, drv_state, drv_active); 651 652 while (drv_state != drv_active) { 653 if (time_after_eq(jiffies, reset_timeout)) { 654 ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", 655 __func__, DRIVER_NAME, drv_state, 656 drv_active); 657 break; 658 } 659 660 ha->isp_ops->idc_unlock(ha); 661 msleep(1000); 662 ha->isp_ops->idc_lock(ha); 663 664 drv_state = qla4_8xxx_rd_direct(ha, 665 QLA8XXX_CRB_DRV_STATE); 666 drv_active = qla4_8xxx_rd_direct(ha, 667 QLA8XXX_CRB_DRV_ACTIVE); 668 } 669 670 if (drv_state != drv_active) { 671 ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n", 672 __func__, (drv_active ^ drv_state)); 673 drv_active = drv_active & drv_state; 674 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, 675 drv_active); 676 } 677 678 clear_bit(AF_8XXX_RST_OWNER, &ha->flags); 679 /* Start Reset Recovery */ 680 qla4_8xxx_device_bootstrap(ha); 681 } 682 } 683 684 void qla4_83xx_get_idc_param(struct scsi_qla_host *ha) 685 { 686 uint32_t idc_params, ret_val; 687 688 ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR, 689 (uint8_t *)&idc_params, 1); 690 if (ret_val == QLA_SUCCESS) { 691 ha->nx_dev_init_timeout = idc_params & 0xFFFF; 692 ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF; 693 } else { 694 ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; 695 ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; 696 } 697 698 DEBUG2(ql4_printk(KERN_DEBUG, ha, 699 "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n", 700 __func__, ha->nx_dev_init_timeout, 701 ha->nx_reset_timeout)); 702 } 703 704 /*-------------------------Reset Sequence Functions-----------------------*/ 705 706 static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha) 707 { 708 uint8_t *phdr; 709 710 if (!ha->reset_tmplt.buff) { 711 ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n", 712 __func__); 713 return; 714 } 715 716 phdr = ha->reset_tmplt.buff; 717 718 DEBUG2(ql4_printk(KERN_INFO, ha, 719 "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", 720 *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), 721 *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), 722 *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), 723 *(phdr+13), *(phdr+14), *(phdr+15))); 724 } 725 726 static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) 727 { 728 uint8_t *p_cache; 729 uint32_t src, count, size; 730 uint64_t dest; 731 int ret_val = QLA_SUCCESS; 732 733 src = QLA83XX_BOOTLOADER_FLASH_ADDR; 734 dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR); 735 size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE); 736 737 /* 128 bit alignment check */ 738 if (size & 0xF) 739 size = (size + 16) & ~0xF; 740 741 /* 16 byte count */ 742 count = size/16; 743 744 p_cache = vmalloc(size); 745 if (p_cache == NULL) { 746 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n", 747 __func__); 748 ret_val = QLA_ERROR; 749 goto exit_copy_bootloader; 750 } 751 752 ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache, 753 size / sizeof(uint32_t)); 754 if (ret_val == QLA_ERROR) { 755 ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n", 756 __func__); 757 goto exit_copy_error; 758 } 759 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n", 760 __func__)); 761 762 /* 128 bit/16 byte write to MS memory */ 763 ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, 764 count); 765 if (ret_val == QLA_ERROR) { 766 ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", 767 __func__); 768 goto exit_copy_error; 769 } 770 771 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n", 772 __func__, size)); 773 774 exit_copy_error: 775 vfree(p_cache); 776 777 exit_copy_bootloader: 778 return ret_val; 779 } 780 781 static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha) 782 { 783 uint32_t val, ret_val = QLA_ERROR; 784 int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; 785 786 do { 787 val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE); 788 if (val == PHAN_INITIALIZE_COMPLETE) { 789 DEBUG2(ql4_printk(KERN_INFO, ha, 790 "%s: Command Peg initialization complete. State=0x%x\n", 791 __func__, val)); 792 ret_val = QLA_SUCCESS; 793 break; 794 } 795 msleep(CRB_CMDPEG_CHECK_DELAY); 796 } while (--retries); 797 798 return ret_val; 799 } 800 801 /** 802 * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till 803 * value read ANDed with test_mask is equal to test_result. 804 * 805 * @ha : Pointer to adapter structure 806 * @addr : CRB register address 807 * @duration : Poll for total of "duration" msecs 808 * @test_mask : Mask value read with "test_mask" 809 * @test_result : Compare (value&test_mask) with test_result. 810 **/ 811 static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr, 812 int duration, uint32_t test_mask, 813 uint32_t test_result) 814 { 815 uint32_t value; 816 uint8_t retries; 817 int ret_val = QLA_SUCCESS; 818 819 ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); 820 if (ret_val == QLA_ERROR) 821 goto exit_poll_reg; 822 823 retries = duration / 10; 824 do { 825 if ((value & test_mask) != test_result) { 826 msleep(duration / 10); 827 ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); 828 if (ret_val == QLA_ERROR) 829 goto exit_poll_reg; 830 831 ret_val = QLA_ERROR; 832 } else { 833 ret_val = QLA_SUCCESS; 834 break; 835 } 836 } while (retries--); 837 838 exit_poll_reg: 839 if (ret_val == QLA_ERROR) { 840 ha->reset_tmplt.seq_error++; 841 ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", 842 __func__, value, test_mask, test_result); 843 } 844 845 return ret_val; 846 } 847 848 static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha) 849 { 850 uint32_t sum = 0; 851 uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff; 852 int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t); 853 int ret_val; 854 855 while (u16_count-- > 0) 856 sum += *buff++; 857 858 while (sum >> 16) 859 sum = (sum & 0xFFFF) + (sum >> 16); 860 861 /* checksum of 0 indicates a valid template */ 862 if (~sum) { 863 ret_val = QLA_SUCCESS; 864 } else { 865 ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n", 866 __func__); 867 ret_val = QLA_ERROR; 868 } 869 870 return ret_val; 871 } 872 873 /** 874 * qla4_83xx_read_reset_template - Read Reset Template from Flash 875 * @ha: Pointer to adapter structure 876 **/ 877 void qla4_83xx_read_reset_template(struct scsi_qla_host *ha) 878 { 879 uint8_t *p_buff; 880 uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; 881 uint32_t ret_val; 882 883 ha->reset_tmplt.seq_error = 0; 884 ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE); 885 if (ha->reset_tmplt.buff == NULL) { 886 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n", 887 __func__); 888 goto exit_read_reset_template; 889 } 890 891 p_buff = ha->reset_tmplt.buff; 892 addr = QLA83XX_RESET_TEMPLATE_ADDR; 893 894 tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) / 895 sizeof(uint32_t); 896 897 DEBUG2(ql4_printk(KERN_INFO, ha, 898 "%s: Read template hdr size %d from Flash\n", 899 __func__, tmplt_hdr_def_size)); 900 901 /* Copy template header from flash */ 902 ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, 903 tmplt_hdr_def_size); 904 if (ret_val != QLA_SUCCESS) { 905 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", 906 __func__); 907 goto exit_read_template_error; 908 } 909 910 ha->reset_tmplt.hdr = 911 (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff; 912 913 /* Validate the template header size and signature */ 914 tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); 915 if ((tmplt_hdr_size != tmplt_hdr_def_size) || 916 (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { 917 ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n", 918 __func__, tmplt_hdr_size, tmplt_hdr_def_size); 919 goto exit_read_template_error; 920 } 921 922 addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size; 923 p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size; 924 tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size - 925 ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t); 926 927 DEBUG2(ql4_printk(KERN_INFO, ha, 928 "%s: Read rest of the template size %d\n", 929 __func__, ha->reset_tmplt.hdr->size)); 930 931 /* Copy rest of the template */ 932 ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, 933 tmplt_hdr_def_size); 934 if (ret_val != QLA_SUCCESS) { 935 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n", 936 __func__); 937 goto exit_read_template_error; 938 } 939 940 /* Integrity check */ 941 if (qla4_83xx_reset_seq_checksum_test(ha)) { 942 ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n", 943 __func__); 944 goto exit_read_template_error; 945 } 946 DEBUG2(ql4_printk(KERN_INFO, ha, 947 "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n", 948 __func__)); 949 950 /* Get STOP, START, INIT sequence offsets */ 951 ha->reset_tmplt.init_offset = ha->reset_tmplt.buff + 952 ha->reset_tmplt.hdr->init_seq_offset; 953 ha->reset_tmplt.start_offset = ha->reset_tmplt.buff + 954 ha->reset_tmplt.hdr->start_seq_offset; 955 ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff + 956 ha->reset_tmplt.hdr->hdr_size; 957 qla4_83xx_dump_reset_seq_hdr(ha); 958 959 goto exit_read_reset_template; 960 961 exit_read_template_error: 962 vfree(ha->reset_tmplt.buff); 963 964 exit_read_reset_template: 965 return; 966 } 967 968 /** 969 * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr. 970 * 971 * @ha : Pointer to adapter structure 972 * @raddr : CRB address to read from 973 * @waddr : CRB address to write to 974 **/ 975 static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha, 976 uint32_t raddr, uint32_t waddr) 977 { 978 uint32_t value; 979 980 qla4_83xx_rd_reg_indirect(ha, raddr, &value); 981 qla4_83xx_wr_reg_indirect(ha, waddr, value); 982 } 983 984 /** 985 * qla4_83xx_rmw_crb_reg - Read Modify Write crb register 986 * 987 * This function read value from raddr, AND with test_mask, 988 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. 989 * 990 * @ha : Pointer to adapter structure 991 * @raddr : CRB address to read from 992 * @waddr : CRB address to write to 993 * @p_rmw_hdr : header with shift/or/xor values. 994 **/ 995 static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr, 996 uint32_t waddr, 997 struct qla4_83xx_rmw *p_rmw_hdr) 998 { 999 uint32_t value; 1000 1001 if (p_rmw_hdr->index_a) 1002 value = ha->reset_tmplt.array[p_rmw_hdr->index_a]; 1003 else 1004 qla4_83xx_rd_reg_indirect(ha, raddr, &value); 1005 1006 value &= p_rmw_hdr->test_mask; 1007 value <<= p_rmw_hdr->shl; 1008 value >>= p_rmw_hdr->shr; 1009 value |= p_rmw_hdr->or_value; 1010 value ^= p_rmw_hdr->xor_value; 1011 1012 qla4_83xx_wr_reg_indirect(ha, waddr, value); 1013 1014 return; 1015 } 1016 1017 static void qla4_83xx_write_list(struct scsi_qla_host *ha, 1018 struct qla4_83xx_reset_entry_hdr *p_hdr) 1019 { 1020 struct qla4_83xx_entry *p_entry; 1021 uint32_t i; 1022 1023 p_entry = (struct qla4_83xx_entry *) 1024 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); 1025 1026 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1027 qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2); 1028 if (p_hdr->delay) 1029 udelay((uint32_t)(p_hdr->delay)); 1030 } 1031 } 1032 1033 static void qla4_83xx_read_write_list(struct scsi_qla_host *ha, 1034 struct qla4_83xx_reset_entry_hdr *p_hdr) 1035 { 1036 struct qla4_83xx_entry *p_entry; 1037 uint32_t i; 1038 1039 p_entry = (struct qla4_83xx_entry *) 1040 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); 1041 1042 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1043 qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2); 1044 if (p_hdr->delay) 1045 udelay((uint32_t)(p_hdr->delay)); 1046 } 1047 } 1048 1049 static void qla4_83xx_poll_list(struct scsi_qla_host *ha, 1050 struct qla4_83xx_reset_entry_hdr *p_hdr) 1051 { 1052 long delay; 1053 struct qla4_83xx_entry *p_entry; 1054 struct qla4_83xx_poll *p_poll; 1055 uint32_t i; 1056 uint32_t value; 1057 1058 p_poll = (struct qla4_83xx_poll *) 1059 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); 1060 1061 /* Entries start after 8 byte qla4_83xx_poll, poll header contains 1062 * the test_mask, test_value. */ 1063 p_entry = (struct qla4_83xx_entry *)((char *)p_poll + 1064 sizeof(struct qla4_83xx_poll)); 1065 1066 delay = (long)p_hdr->delay; 1067 if (!delay) { 1068 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1069 qla4_83xx_poll_reg(ha, p_entry->arg1, delay, 1070 p_poll->test_mask, 1071 p_poll->test_value); 1072 } 1073 } else { 1074 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1075 if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay, 1076 p_poll->test_mask, 1077 p_poll->test_value)) { 1078 qla4_83xx_rd_reg_indirect(ha, p_entry->arg1, 1079 &value); 1080 qla4_83xx_rd_reg_indirect(ha, p_entry->arg2, 1081 &value); 1082 } 1083 } 1084 } 1085 } 1086 1087 static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha, 1088 struct qla4_83xx_reset_entry_hdr *p_hdr) 1089 { 1090 long delay; 1091 struct qla4_83xx_quad_entry *p_entry; 1092 struct qla4_83xx_poll *p_poll; 1093 uint32_t i; 1094 1095 p_poll = (struct qla4_83xx_poll *) 1096 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); 1097 p_entry = (struct qla4_83xx_quad_entry *) 1098 ((char *)p_poll + sizeof(struct qla4_83xx_poll)); 1099 delay = (long)p_hdr->delay; 1100 1101 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1102 qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr, 1103 p_entry->dr_value); 1104 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, 1105 p_entry->ar_value); 1106 if (delay) { 1107 if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, 1108 p_poll->test_mask, 1109 p_poll->test_value)) { 1110 DEBUG2(ql4_printk(KERN_INFO, ha, 1111 "%s: Timeout Error: poll list, item_num %d, entry_num %d\n", 1112 __func__, i, 1113 ha->reset_tmplt.seq_index)); 1114 } 1115 } 1116 } 1117 } 1118 1119 static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha, 1120 struct qla4_83xx_reset_entry_hdr *p_hdr) 1121 { 1122 struct qla4_83xx_entry *p_entry; 1123 struct qla4_83xx_rmw *p_rmw_hdr; 1124 uint32_t i; 1125 1126 p_rmw_hdr = (struct qla4_83xx_rmw *) 1127 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); 1128 p_entry = (struct qla4_83xx_entry *) 1129 ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw)); 1130 1131 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1132 qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2, 1133 p_rmw_hdr); 1134 if (p_hdr->delay) 1135 udelay((uint32_t)(p_hdr->delay)); 1136 } 1137 } 1138 1139 static void qla4_83xx_pause(struct scsi_qla_host *ha, 1140 struct qla4_83xx_reset_entry_hdr *p_hdr) 1141 { 1142 if (p_hdr->delay) 1143 mdelay((uint32_t)((long)p_hdr->delay)); 1144 } 1145 1146 static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha, 1147 struct qla4_83xx_reset_entry_hdr *p_hdr) 1148 { 1149 long delay; 1150 int index; 1151 struct qla4_83xx_quad_entry *p_entry; 1152 struct qla4_83xx_poll *p_poll; 1153 uint32_t i; 1154 uint32_t value; 1155 1156 p_poll = (struct qla4_83xx_poll *) 1157 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); 1158 p_entry = (struct qla4_83xx_quad_entry *) 1159 ((char *)p_poll + sizeof(struct qla4_83xx_poll)); 1160 delay = (long)p_hdr->delay; 1161 1162 for (i = 0; i < p_hdr->count; i++, p_entry++) { 1163 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, 1164 p_entry->ar_value); 1165 if (delay) { 1166 if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, 1167 p_poll->test_mask, 1168 p_poll->test_value)) { 1169 DEBUG2(ql4_printk(KERN_INFO, ha, 1170 "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n", 1171 __func__, i, 1172 ha->reset_tmplt.seq_index)); 1173 } else { 1174 index = ha->reset_tmplt.array_index; 1175 qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr, 1176 &value); 1177 ha->reset_tmplt.array[index++] = value; 1178 1179 if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES) 1180 ha->reset_tmplt.array_index = 1; 1181 } 1182 } 1183 } 1184 } 1185 1186 static void qla4_83xx_seq_end(struct scsi_qla_host *ha, 1187 struct qla4_83xx_reset_entry_hdr *p_hdr) 1188 { 1189 ha->reset_tmplt.seq_end = 1; 1190 } 1191 1192 static void qla4_83xx_template_end(struct scsi_qla_host *ha, 1193 struct qla4_83xx_reset_entry_hdr *p_hdr) 1194 { 1195 ha->reset_tmplt.template_end = 1; 1196 1197 if (ha->reset_tmplt.seq_error == 0) { 1198 DEBUG2(ql4_printk(KERN_INFO, ha, 1199 "%s: Reset sequence completed SUCCESSFULLY.\n", 1200 __func__)); 1201 } else { 1202 ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n", 1203 __func__); 1204 } 1205 } 1206 1207 /** 1208 * qla4_83xx_process_reset_template - Process reset template. 1209 * 1210 * Process all entries in reset template till entry with SEQ_END opcode, 1211 * which indicates end of the reset template processing. Each entry has a 1212 * Reset Entry header, entry opcode/command, with size of the entry, number 1213 * of entries in sub-sequence and delay in microsecs or timeout in millisecs. 1214 * 1215 * @ha : Pointer to adapter structure 1216 * @p_buff : Common reset entry header. 1217 **/ 1218 static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha, 1219 char *p_buff) 1220 { 1221 int index, entries; 1222 struct qla4_83xx_reset_entry_hdr *p_hdr; 1223 char *p_entry = p_buff; 1224 1225 ha->reset_tmplt.seq_end = 0; 1226 ha->reset_tmplt.template_end = 0; 1227 entries = ha->reset_tmplt.hdr->entries; 1228 index = ha->reset_tmplt.seq_index; 1229 1230 for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) { 1231 1232 p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry; 1233 switch (p_hdr->cmd) { 1234 case OPCODE_NOP: 1235 break; 1236 case OPCODE_WRITE_LIST: 1237 qla4_83xx_write_list(ha, p_hdr); 1238 break; 1239 case OPCODE_READ_WRITE_LIST: 1240 qla4_83xx_read_write_list(ha, p_hdr); 1241 break; 1242 case OPCODE_POLL_LIST: 1243 qla4_83xx_poll_list(ha, p_hdr); 1244 break; 1245 case OPCODE_POLL_WRITE_LIST: 1246 qla4_83xx_poll_write_list(ha, p_hdr); 1247 break; 1248 case OPCODE_READ_MODIFY_WRITE: 1249 qla4_83xx_read_modify_write(ha, p_hdr); 1250 break; 1251 case OPCODE_SEQ_PAUSE: 1252 qla4_83xx_pause(ha, p_hdr); 1253 break; 1254 case OPCODE_SEQ_END: 1255 qla4_83xx_seq_end(ha, p_hdr); 1256 break; 1257 case OPCODE_TMPL_END: 1258 qla4_83xx_template_end(ha, p_hdr); 1259 break; 1260 case OPCODE_POLL_READ_LIST: 1261 qla4_83xx_poll_read_list(ha, p_hdr); 1262 break; 1263 default: 1264 ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n", 1265 __func__, p_hdr->cmd, index); 1266 break; 1267 } 1268 1269 /* Set pointer to next entry in the sequence. */ 1270 p_entry += p_hdr->size; 1271 } 1272 1273 ha->reset_tmplt.seq_index = index; 1274 } 1275 1276 static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha) 1277 { 1278 ha->reset_tmplt.seq_index = 0; 1279 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset); 1280 1281 if (ha->reset_tmplt.seq_end != 1) 1282 ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n", 1283 __func__); 1284 } 1285 1286 static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha) 1287 { 1288 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset); 1289 1290 if (ha->reset_tmplt.template_end != 1) 1291 ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n", 1292 __func__); 1293 } 1294 1295 static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha) 1296 { 1297 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset); 1298 1299 if (ha->reset_tmplt.seq_end != 1) 1300 ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n", 1301 __func__); 1302 } 1303 1304 static int qla4_83xx_restart(struct scsi_qla_host *ha) 1305 { 1306 int ret_val = QLA_SUCCESS; 1307 1308 qla4_83xx_process_stop_seq(ha); 1309 1310 /* Collect minidump*/ 1311 if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags)) 1312 qla4_8xxx_get_minidump(ha); 1313 1314 qla4_83xx_process_init_seq(ha); 1315 1316 if (qla4_83xx_copy_bootloader(ha)) { 1317 ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n", 1318 __func__); 1319 ret_val = QLA_ERROR; 1320 goto exit_restart; 1321 } 1322 1323 qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH); 1324 qla4_83xx_process_start_seq(ha); 1325 1326 exit_restart: 1327 return ret_val; 1328 } 1329 1330 int qla4_83xx_start_firmware(struct scsi_qla_host *ha) 1331 { 1332 int ret_val = QLA_SUCCESS; 1333 1334 ret_val = qla4_83xx_restart(ha); 1335 if (ret_val == QLA_ERROR) { 1336 ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__); 1337 goto exit_start_fw; 1338 } else { 1339 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n", 1340 __func__)); 1341 } 1342 1343 ret_val = qla4_83xx_check_cmd_peg_status(ha); 1344 if (ret_val == QLA_ERROR) 1345 ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n", 1346 __func__); 1347 1348 exit_start_fw: 1349 return ret_val; 1350 } 1351 1352 /*----------------------Interrupt Related functions ---------------------*/ 1353 1354 static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha) 1355 { 1356 if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) 1357 qla4_8xxx_intr_disable(ha); 1358 } 1359 1360 static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha) 1361 { 1362 uint32_t mb_int, ret; 1363 1364 if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { 1365 ret = readl(&ha->qla4_83xx_reg->mbox_int); 1366 mb_int = ret & ~INT_ENABLE_FW_MB; 1367 writel(mb_int, &ha->qla4_83xx_reg->mbox_int); 1368 writel(1, &ha->qla4_83xx_reg->leg_int_mask); 1369 } 1370 } 1371 1372 void qla4_83xx_disable_intrs(struct scsi_qla_host *ha) 1373 { 1374 qla4_83xx_disable_mbox_intrs(ha); 1375 qla4_83xx_disable_iocb_intrs(ha); 1376 } 1377 1378 static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha) 1379 { 1380 if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) { 1381 qla4_8xxx_intr_enable(ha); 1382 set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags); 1383 } 1384 } 1385 1386 void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha) 1387 { 1388 uint32_t mb_int; 1389 1390 if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { 1391 mb_int = INT_ENABLE_FW_MB; 1392 writel(mb_int, &ha->qla4_83xx_reg->mbox_int); 1393 writel(0, &ha->qla4_83xx_reg->leg_int_mask); 1394 set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags); 1395 } 1396 } 1397 1398 1399 void qla4_83xx_enable_intrs(struct scsi_qla_host *ha) 1400 { 1401 qla4_83xx_enable_mbox_intrs(ha); 1402 qla4_83xx_enable_iocb_intrs(ha); 1403 } 1404 1405 1406 void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, 1407 int incount) 1408 { 1409 int i; 1410 1411 /* Load all mailbox registers, except mailbox 0. */ 1412 for (i = 1; i < incount; i++) 1413 writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]); 1414 1415 writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]); 1416 1417 /* Set Host Interrupt register to 1, to tell the firmware that 1418 * a mailbox command is pending. Firmware after reading the 1419 * mailbox command, clears the host interrupt register */ 1420 writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr); 1421 } 1422 1423 void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount) 1424 { 1425 int intr_status; 1426 1427 intr_status = readl(&ha->qla4_83xx_reg->risc_intr); 1428 if (intr_status) { 1429 ha->mbox_status_count = outcount; 1430 ha->isp_ops->interrupt_service_routine(ha, intr_status); 1431 } 1432 } 1433 1434 /** 1435 * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands. 1436 * @ha: pointer to host adapter structure. 1437 **/ 1438 int qla4_83xx_isp_reset(struct scsi_qla_host *ha) 1439 { 1440 int rval; 1441 uint32_t dev_state; 1442 1443 ha->isp_ops->idc_lock(ha); 1444 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 1445 1446 if (ql4xdontresethba) 1447 qla4_83xx_set_idc_dontreset(ha); 1448 1449 if (dev_state == QLA8XXX_DEV_READY) { 1450 /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset 1451 * recovery */ 1452 if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) { 1453 ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n", 1454 __func__); 1455 rval = QLA_ERROR; 1456 goto exit_isp_reset; 1457 } 1458 1459 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n", 1460 __func__)); 1461 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 1462 QLA8XXX_DEV_NEED_RESET); 1463 1464 } else { 1465 /* If device_state is NEED_RESET, go ahead with 1466 * Reset,irrespective of ql4xdontresethba. This is to allow a 1467 * non-reset-owner to force a reset. Non-reset-owner sets 1468 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset 1469 * and then forces a Reset by setting device_state to 1470 * NEED_RESET. */ 1471 DEBUG2(ql4_printk(KERN_INFO, ha, 1472 "%s: HW state already set to NEED_RESET\n", 1473 __func__)); 1474 } 1475 1476 /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on 1477 * priority and which drivers are present. Unlike ISP8022, the function 1478 * setting NEED_RESET, may not be the Reset owner. */ 1479 if (qla4_83xx_can_perform_reset(ha)) 1480 set_bit(AF_8XXX_RST_OWNER, &ha->flags); 1481 1482 ha->isp_ops->idc_unlock(ha); 1483 rval = qla4_8xxx_device_state_handler(ha); 1484 1485 ha->isp_ops->idc_lock(ha); 1486 qla4_8xxx_clear_rst_ready(ha); 1487 exit_isp_reset: 1488 ha->isp_ops->idc_unlock(ha); 1489 1490 if (rval == QLA_SUCCESS) 1491 clear_bit(AF_FW_RECOVERY, &ha->flags); 1492 1493 return rval; 1494 } 1495 1496 static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha) 1497 { 1498 u32 val = 0, val1 = 0; 1499 int i, status = QLA_SUCCESS; 1500 1501 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val); 1502 DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val)); 1503 1504 /* Port 0 Rx Buffer Pause Threshold Registers. */ 1505 DEBUG2(ql4_printk(KERN_INFO, ha, 1506 "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); 1507 for (i = 0; i < 8; i++) { 1508 status = qla4_83xx_rd_reg_indirect(ha, 1509 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val); 1510 DEBUG2(pr_info("0x%x ", val)); 1511 } 1512 1513 DEBUG2(pr_info("\n")); 1514 1515 /* Port 1 Rx Buffer Pause Threshold Registers. */ 1516 DEBUG2(ql4_printk(KERN_INFO, ha, 1517 "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); 1518 for (i = 0; i < 8; i++) { 1519 status = qla4_83xx_rd_reg_indirect(ha, 1520 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val); 1521 DEBUG2(pr_info("0x%x ", val)); 1522 } 1523 1524 DEBUG2(pr_info("\n")); 1525 1526 /* Port 0 RxB Traffic Class Max Cell Registers. */ 1527 DEBUG2(ql4_printk(KERN_INFO, ha, 1528 "Port 0 RxB Traffic Class Max Cell Registers[3..0]:")); 1529 for (i = 0; i < 4; i++) { 1530 status = qla4_83xx_rd_reg_indirect(ha, 1531 QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val); 1532 DEBUG2(pr_info("0x%x ", val)); 1533 } 1534 1535 DEBUG2(pr_info("\n")); 1536 1537 /* Port 1 RxB Traffic Class Max Cell Registers. */ 1538 DEBUG2(ql4_printk(KERN_INFO, ha, 1539 "Port 1 RxB Traffic Class Max Cell Registers[3..0]:")); 1540 for (i = 0; i < 4; i++) { 1541 status = qla4_83xx_rd_reg_indirect(ha, 1542 QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val); 1543 DEBUG2(pr_info("0x%x ", val)); 1544 } 1545 1546 DEBUG2(pr_info("\n")); 1547 1548 /* Port 0 RxB Rx Traffic Class Stats. */ 1549 DEBUG2(ql4_printk(KERN_INFO, ha, 1550 "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]")); 1551 for (i = 7; i >= 0; i--) { 1552 status = qla4_83xx_rd_reg_indirect(ha, 1553 QLA83XX_PORT0_RXB_TC_STATS, 1554 &val); 1555 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ 1556 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, 1557 (val | (i << 29))); 1558 status = qla4_83xx_rd_reg_indirect(ha, 1559 QLA83XX_PORT0_RXB_TC_STATS, 1560 &val); 1561 DEBUG2(pr_info("0x%x ", val)); 1562 } 1563 1564 DEBUG2(pr_info("\n")); 1565 1566 /* Port 1 RxB Rx Traffic Class Stats. */ 1567 DEBUG2(ql4_printk(KERN_INFO, ha, 1568 "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]")); 1569 for (i = 7; i >= 0; i--) { 1570 status = qla4_83xx_rd_reg_indirect(ha, 1571 QLA83XX_PORT1_RXB_TC_STATS, 1572 &val); 1573 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ 1574 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, 1575 (val | (i << 29))); 1576 status = qla4_83xx_rd_reg_indirect(ha, 1577 QLA83XX_PORT1_RXB_TC_STATS, 1578 &val); 1579 DEBUG2(pr_info("0x%x ", val)); 1580 } 1581 1582 DEBUG2(pr_info("\n")); 1583 1584 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, 1585 &val); 1586 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, 1587 &val1); 1588 1589 DEBUG2(ql4_printk(KERN_INFO, ha, 1590 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", 1591 val, val1)); 1592 } 1593 1594 static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha) 1595 { 1596 int i; 1597 1598 /* set SRE-Shim Control Register */ 1599 qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, 1600 QLA83XX_SET_PAUSE_VAL); 1601 1602 for (i = 0; i < 8; i++) { 1603 /* Port 0 Rx Buffer Pause Threshold Registers. */ 1604 qla4_83xx_wr_reg_indirect(ha, 1605 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), 1606 QLA83XX_SET_PAUSE_VAL); 1607 /* Port 1 Rx Buffer Pause Threshold Registers. */ 1608 qla4_83xx_wr_reg_indirect(ha, 1609 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), 1610 QLA83XX_SET_PAUSE_VAL); 1611 } 1612 1613 for (i = 0; i < 4; i++) { 1614 /* Port 0 RxB Traffic Class Max Cell Registers. */ 1615 qla4_83xx_wr_reg_indirect(ha, 1616 QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), 1617 QLA83XX_SET_TC_MAX_CELL_VAL); 1618 /* Port 1 RxB Traffic Class Max Cell Registers. */ 1619 qla4_83xx_wr_reg_indirect(ha, 1620 QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), 1621 QLA83XX_SET_TC_MAX_CELL_VAL); 1622 } 1623 1624 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, 1625 QLA83XX_SET_PAUSE_VAL); 1626 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, 1627 QLA83XX_SET_PAUSE_VAL); 1628 1629 ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n"); 1630 } 1631 1632 /** 1633 * qla4_83xx_eport_init - Initialize EPort. 1634 * @ha: Pointer to host adapter structure. 1635 * 1636 * If EPort hardware is in reset state before disabling pause, there would be 1637 * serious hardware wedging issues. To prevent this perform eport init everytime 1638 * before disabling pause frames. 1639 **/ 1640 static void qla4_83xx_eport_init(struct scsi_qla_host *ha) 1641 { 1642 /* Clear the 8 registers */ 1643 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0); 1644 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0); 1645 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0); 1646 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0); 1647 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0); 1648 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0); 1649 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0); 1650 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0); 1651 1652 /* Write any value to Reset Control register */ 1653 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF); 1654 1655 ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n"); 1656 } 1657 1658 void qla4_83xx_disable_pause(struct scsi_qla_host *ha) 1659 { 1660 ha->isp_ops->idc_lock(ha); 1661 /* Before disabling pause frames, ensure that eport is not in reset */ 1662 qla4_83xx_eport_init(ha); 1663 qla4_83xx_dump_pause_control_regs(ha); 1664 __qla4_83xx_disable_pause(ha); 1665 ha->isp_ops->idc_unlock(ha); 1666 } 1667