1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 7 /* 8 * Table for showing the current message id in use for particular level 9 * Change this table for addition of log/debug messages. 10 * ---------------------------------------------------------------------- 11 * | Level | Last Value Used | Holes | 12 * ---------------------------------------------------------------------- 13 * | Module Init and Probe | 0x0199 | | 14 * | Mailbox commands | 0x1206 | 0x11a5-0x11ff | 15 * | Device Discovery | 0x2134 | 0x210e-0x2116 | 16 * | | | 0x211a | 17 * | | | 0x211c-0x2128 | 18 * | | | 0x212c-0x2134 | 19 * | Queue Command and IO tracing | 0x3074 | 0x300b | 20 * | | | 0x3027-0x3028 | 21 * | | | 0x303d-0x3041 | 22 * | | | 0x302d,0x3033 | 23 * | | | 0x3036,0x3038 | 24 * | | | 0x303a | 25 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 26 * | Async Events | 0x509c | | 27 * | Timer Routines | 0x6012 | | 28 * | User Space Interactions | 0x70e3 | 0x7018,0x702e | 29 * | | | 0x7020,0x7024 | 30 * | | | 0x7039,0x7045 | 31 * | | | 0x7073-0x7075 | 32 * | | | 0x70a5-0x70a6 | 33 * | | | 0x70a8,0x70ab | 34 * | | | 0x70ad-0x70ae | 35 * | | | 0x70d0-0x70d6 | 36 * | | | 0x70d7-0x70db | 37 * | Task Management | 0x8042 | 0x8000 | 38 * | | | 0x8019 | 39 * | | | 0x8025,0x8026 | 40 * | | | 0x8031,0x8032 | 41 * | | | 0x8039,0x803c | 42 * | AER/EEH | 0x9011 | | 43 * | Virtual Port | 0xa007 | | 44 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | 45 * | | | 0xb09e,0xb0ae | 46 * | | | 0xb0c3,0xb0c6 | 47 * | | | 0xb0e0-0xb0ef | 48 * | | | 0xb085,0xb0dc | 49 * | | | 0xb107,0xb108 | 50 * | | | 0xb111,0xb11e | 51 * | | | 0xb12c,0xb12d | 52 * | | | 0xb13a,0xb142 | 53 * | | | 0xb13c-0xb140 | 54 * | | | 0xb149 | 55 * | MultiQ | 0xc010 | | 56 * | Misc | 0xd303 | 0xd031-0xd0ff | 57 * | | | 0xd101-0xd1fe | 58 * | | | 0xd214-0xd2fe | 59 * | Target Mode | 0xe081 | | 60 * | Target Mode Management | 0xf09b | 0xf002 | 61 * | | | 0xf046-0xf049 | 62 * | Target Mode Task Management | 0x1000d | | 63 * ---------------------------------------------------------------------- 64 */ 65 66 #include "qla_def.h" 67 68 #include <linux/delay.h> 69 #define CREATE_TRACE_POINTS 70 #include <trace/events/qla.h> 71 72 static uint32_t ql_dbg_offset = 0x800; 73 74 static inline void 75 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 76 { 77 fw_dump->fw_major_version = htonl(ha->fw_major_version); 78 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 79 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); 80 fw_dump->fw_attributes = htonl(ha->fw_attributes); 81 82 fw_dump->vendor = htonl(ha->pdev->vendor); 83 fw_dump->device = htonl(ha->pdev->device); 84 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); 85 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); 86 } 87 88 static inline void * 89 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) 90 { 91 struct req_que *req = ha->req_q_map[0]; 92 struct rsp_que *rsp = ha->rsp_q_map[0]; 93 /* Request queue. */ 94 memcpy(ptr, req->ring, req->length * 95 sizeof(request_t)); 96 97 /* Response queue. */ 98 ptr += req->length * sizeof(request_t); 99 memcpy(ptr, rsp->ring, rsp->length * 100 sizeof(response_t)); 101 102 return ptr + (rsp->length * sizeof(response_t)); 103 } 104 105 int 106 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 107 uint32_t ram_dwords, void **nxt) 108 { 109 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 110 dma_addr_t dump_dma = ha->gid_list_dma; 111 uint32_t *chunk = (uint32_t *)ha->gid_list; 112 uint32_t dwords = qla2x00_gid_list_size(ha) / 4; 113 uint32_t stat; 114 ulong i, j, timer = 6000000; 115 int rval = QLA_FUNCTION_FAILED; 116 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 117 118 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 119 120 if (qla_pci_disconnected(vha, reg)) 121 return rval; 122 123 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { 124 if (i + dwords > ram_dwords) 125 dwords = ram_dwords - i; 126 127 wrt_reg_word(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); 128 wrt_reg_word(®->mailbox1, LSW(addr)); 129 wrt_reg_word(®->mailbox8, MSW(addr)); 130 131 wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); 132 wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); 133 wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); 134 wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); 135 136 wrt_reg_word(®->mailbox4, MSW(dwords)); 137 wrt_reg_word(®->mailbox5, LSW(dwords)); 138 139 wrt_reg_word(®->mailbox9, 0); 140 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 141 142 ha->flags.mbox_int = 0; 143 while (timer--) { 144 udelay(5); 145 146 if (qla_pci_disconnected(vha, reg)) 147 return rval; 148 149 stat = rd_reg_dword(®->host_status); 150 /* Check for pending interrupts. */ 151 if (!(stat & HSRX_RISC_INT)) 152 continue; 153 154 stat &= 0xff; 155 if (stat != 0x1 && stat != 0x2 && 156 stat != 0x10 && stat != 0x11) { 157 158 /* Clear this intr; it wasn't a mailbox intr */ 159 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 160 rd_reg_dword(®->hccr); 161 continue; 162 } 163 164 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 165 rval = rd_reg_word(®->mailbox0) & MBS_MASK; 166 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 167 rd_reg_dword(®->hccr); 168 break; 169 } 170 ha->flags.mbox_int = 1; 171 *nxt = ram + i; 172 173 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 174 /* no interrupt, timed out*/ 175 return rval; 176 } 177 if (rval) { 178 /* error completion status */ 179 return rval; 180 } 181 for (j = 0; j < dwords; j++) { 182 ram[i + j] = 183 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? 184 chunk[j] : swab32(chunk[j]); 185 } 186 } 187 188 *nxt = ram + i; 189 return QLA_SUCCESS; 190 } 191 192 int 193 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, 194 uint32_t ram_dwords, void **nxt) 195 { 196 int rval = QLA_FUNCTION_FAILED; 197 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 198 dma_addr_t dump_dma = ha->gid_list_dma; 199 uint32_t *chunk = (uint32_t *)ha->gid_list; 200 uint32_t dwords = qla2x00_gid_list_size(ha) / 4; 201 uint32_t stat; 202 ulong i, j, timer = 6000000; 203 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 204 205 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 206 207 if (qla_pci_disconnected(vha, reg)) 208 return rval; 209 210 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { 211 if (i + dwords > ram_dwords) 212 dwords = ram_dwords - i; 213 214 wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 215 wrt_reg_word(®->mailbox1, LSW(addr)); 216 wrt_reg_word(®->mailbox8, MSW(addr)); 217 wrt_reg_word(®->mailbox10, 0); 218 219 wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); 220 wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); 221 wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); 222 wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); 223 224 wrt_reg_word(®->mailbox4, MSW(dwords)); 225 wrt_reg_word(®->mailbox5, LSW(dwords)); 226 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 227 228 ha->flags.mbox_int = 0; 229 while (timer--) { 230 udelay(5); 231 if (qla_pci_disconnected(vha, reg)) 232 return rval; 233 234 stat = rd_reg_dword(®->host_status); 235 /* Check for pending interrupts. */ 236 if (!(stat & HSRX_RISC_INT)) 237 continue; 238 239 stat &= 0xff; 240 if (stat != 0x1 && stat != 0x2 && 241 stat != 0x10 && stat != 0x11) { 242 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 243 rd_reg_dword(®->hccr); 244 continue; 245 } 246 247 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 248 rval = rd_reg_word(®->mailbox0) & MBS_MASK; 249 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 250 rd_reg_dword(®->hccr); 251 break; 252 } 253 ha->flags.mbox_int = 1; 254 *nxt = ram + i; 255 256 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 257 /* no interrupt, timed out*/ 258 return rval; 259 } 260 if (rval) { 261 /* error completion status */ 262 return rval; 263 } 264 for (j = 0; j < dwords; j++) { 265 ram[i + j] = (__force __be32) 266 ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? 267 chunk[j] : swab32(chunk[j])); 268 } 269 } 270 271 *nxt = ram + i; 272 return QLA_SUCCESS; 273 } 274 275 static int 276 qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, 277 uint32_t cram_size, void **nxt) 278 { 279 int rval; 280 281 /* Code RAM. */ 282 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); 283 if (rval != QLA_SUCCESS) 284 return rval; 285 286 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); 287 288 /* External Memory. */ 289 rval = qla24xx_dump_ram(ha, 0x100000, *nxt, 290 ha->fw_memory_size - 0x100000 + 1, nxt); 291 if (rval == QLA_SUCCESS) 292 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); 293 294 return rval; 295 } 296 297 static __be32 * 298 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 299 uint32_t count, __be32 *buf) 300 { 301 __le32 __iomem *dmp_reg; 302 303 wrt_reg_dword(®->iobase_addr, iobase); 304 dmp_reg = ®->iobase_window; 305 for ( ; count--; dmp_reg++) 306 *buf++ = htonl(rd_reg_dword(dmp_reg)); 307 308 return buf; 309 } 310 311 void 312 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) 313 { 314 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_PAUSE); 315 316 /* 100 usec delay is sufficient enough for hardware to pause RISC */ 317 udelay(100); 318 if (rd_reg_dword(®->host_status) & HSRX_RISC_PAUSED) 319 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); 320 } 321 322 int 323 qla24xx_soft_reset(struct qla_hw_data *ha) 324 { 325 int rval = QLA_SUCCESS; 326 uint32_t cnt; 327 uint16_t wd; 328 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 329 330 /* 331 * Reset RISC. The delay is dependent on system architecture. 332 * Driver can proceed with the reset sequence after waiting 333 * for a timeout period. 334 */ 335 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 336 for (cnt = 0; cnt < 30000; cnt++) { 337 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 338 break; 339 340 udelay(10); 341 } 342 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) 343 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 344 345 wrt_reg_dword(®->ctrl_status, 346 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 347 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 348 349 udelay(100); 350 351 /* Wait for soft-reset to complete. */ 352 for (cnt = 0; cnt < 30000; cnt++) { 353 if ((rd_reg_dword(®->ctrl_status) & 354 CSRX_ISP_SOFT_RESET) == 0) 355 break; 356 357 udelay(10); 358 } 359 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 360 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); 361 362 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 363 rd_reg_dword(®->hccr); /* PCI Posting. */ 364 365 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && 366 rval == QLA_SUCCESS; cnt--) { 367 if (cnt) 368 udelay(10); 369 else 370 rval = QLA_FUNCTION_TIMEOUT; 371 } 372 if (rval == QLA_SUCCESS) 373 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 374 375 return rval; 376 } 377 378 static int 379 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, 380 uint32_t ram_words, void **nxt) 381 { 382 int rval; 383 uint32_t cnt, stat, timer, words, idx; 384 uint16_t mb0; 385 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 386 dma_addr_t dump_dma = ha->gid_list_dma; 387 __le16 *dump = (__force __le16 *)ha->gid_list; 388 389 rval = QLA_SUCCESS; 390 mb0 = 0; 391 392 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 393 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 394 395 words = qla2x00_gid_list_size(ha) / 2; 396 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 397 cnt += words, addr += words) { 398 if (cnt + words > ram_words) 399 words = ram_words - cnt; 400 401 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); 402 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); 403 404 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); 405 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); 406 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); 407 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); 408 409 WRT_MAILBOX_REG(ha, reg, 4, words); 410 wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); 411 412 for (timer = 6000000; timer; timer--) { 413 /* Check for pending interrupts. */ 414 stat = rd_reg_dword(®->u.isp2300.host_status); 415 if (stat & HSR_RISC_INT) { 416 stat &= 0xff; 417 418 if (stat == 0x1 || stat == 0x2) { 419 set_bit(MBX_INTERRUPT, 420 &ha->mbx_cmd_flags); 421 422 mb0 = RD_MAILBOX_REG(ha, reg, 0); 423 424 /* Release mailbox registers. */ 425 wrt_reg_word(®->semaphore, 0); 426 wrt_reg_word(®->hccr, 427 HCCR_CLR_RISC_INT); 428 rd_reg_word(®->hccr); 429 break; 430 } else if (stat == 0x10 || stat == 0x11) { 431 set_bit(MBX_INTERRUPT, 432 &ha->mbx_cmd_flags); 433 434 mb0 = RD_MAILBOX_REG(ha, reg, 0); 435 436 wrt_reg_word(®->hccr, 437 HCCR_CLR_RISC_INT); 438 rd_reg_word(®->hccr); 439 break; 440 } 441 442 /* clear this intr; it wasn't a mailbox intr */ 443 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 444 rd_reg_word(®->hccr); 445 } 446 udelay(5); 447 } 448 449 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 450 rval = mb0 & MBS_MASK; 451 for (idx = 0; idx < words; idx++) 452 ram[cnt + idx] = 453 cpu_to_be16(le16_to_cpu(dump[idx])); 454 } else { 455 rval = QLA_FUNCTION_FAILED; 456 } 457 } 458 459 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; 460 return rval; 461 } 462 463 static inline void 464 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 465 __be16 *buf) 466 { 467 __le16 __iomem *dmp_reg = ®->u.isp2300.fb_cmd; 468 469 for ( ; count--; dmp_reg++) 470 *buf++ = htons(rd_reg_word(dmp_reg)); 471 } 472 473 static inline void * 474 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) 475 { 476 if (!ha->eft) 477 return ptr; 478 479 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); 480 return ptr + ntohl(ha->fw_dump->eft_size); 481 } 482 483 static inline void * 484 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 485 { 486 uint32_t cnt; 487 __be32 *iter_reg; 488 struct qla2xxx_fce_chain *fcec = ptr; 489 490 if (!ha->fce) 491 return ptr; 492 493 *last_chain = &fcec->type; 494 fcec->type = htonl(DUMP_CHAIN_FCE); 495 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 496 fce_calc_size(ha->fce_bufs)); 497 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 498 fcec->addr_l = htonl(LSD(ha->fce_dma)); 499 fcec->addr_h = htonl(MSD(ha->fce_dma)); 500 501 iter_reg = fcec->eregs; 502 for (cnt = 0; cnt < 8; cnt++) 503 *iter_reg++ = htonl(ha->fce_mb[cnt]); 504 505 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 506 507 return (char *)iter_reg + ntohl(fcec->size); 508 } 509 510 static inline void * 511 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 512 { 513 struct qla2xxx_offld_chain *c = ptr; 514 515 if (!ha->exlogin_buf) 516 return ptr; 517 518 *last_chain = &c->type; 519 520 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); 521 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + 522 ha->exlogin_size); 523 c->size = cpu_to_be32(ha->exlogin_size); 524 c->addr = cpu_to_be64(ha->exlogin_buf_dma); 525 526 ptr += sizeof(struct qla2xxx_offld_chain); 527 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); 528 529 return (char *)ptr + be32_to_cpu(c->size); 530 } 531 532 static inline void * 533 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 534 { 535 struct qla2xxx_offld_chain *c = ptr; 536 537 if (!ha->exchoffld_buf) 538 return ptr; 539 540 *last_chain = &c->type; 541 542 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); 543 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + 544 ha->exchoffld_size); 545 c->size = cpu_to_be32(ha->exchoffld_size); 546 c->addr = cpu_to_be64(ha->exchoffld_buf_dma); 547 548 ptr += sizeof(struct qla2xxx_offld_chain); 549 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); 550 551 return (char *)ptr + be32_to_cpu(c->size); 552 } 553 554 static inline void * 555 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, 556 __be32 **last_chain) 557 { 558 struct qla2xxx_mqueue_chain *q; 559 struct qla2xxx_mqueue_header *qh; 560 uint32_t num_queues; 561 int que; 562 struct { 563 int length; 564 void *ring; 565 } aq, *aqp; 566 567 if (!ha->tgt.atio_ring) 568 return ptr; 569 570 num_queues = 1; 571 aqp = &aq; 572 aqp->length = ha->tgt.atio_q_length; 573 aqp->ring = ha->tgt.atio_ring; 574 575 for (que = 0; que < num_queues; que++) { 576 /* aqp = ha->atio_q_map[que]; */ 577 q = ptr; 578 *last_chain = &q->type; 579 q->type = htonl(DUMP_CHAIN_QUEUE); 580 q->chain_size = htonl( 581 sizeof(struct qla2xxx_mqueue_chain) + 582 sizeof(struct qla2xxx_mqueue_header) + 583 (aqp->length * sizeof(request_t))); 584 ptr += sizeof(struct qla2xxx_mqueue_chain); 585 586 /* Add header. */ 587 qh = ptr; 588 qh->queue = htonl(TYPE_ATIO_QUEUE); 589 qh->number = htonl(que); 590 qh->size = htonl(aqp->length * sizeof(request_t)); 591 ptr += sizeof(struct qla2xxx_mqueue_header); 592 593 /* Add data. */ 594 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); 595 596 ptr += aqp->length * sizeof(request_t); 597 } 598 599 return ptr; 600 } 601 602 static inline void * 603 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 604 { 605 struct qla2xxx_mqueue_chain *q; 606 struct qla2xxx_mqueue_header *qh; 607 struct req_que *req; 608 struct rsp_que *rsp; 609 int que; 610 611 if (!ha->mqenable) 612 return ptr; 613 614 /* Request queues */ 615 for (que = 1; que < ha->max_req_queues; que++) { 616 req = ha->req_q_map[que]; 617 if (!req) 618 break; 619 620 /* Add chain. */ 621 q = ptr; 622 *last_chain = &q->type; 623 q->type = htonl(DUMP_CHAIN_QUEUE); 624 q->chain_size = htonl( 625 sizeof(struct qla2xxx_mqueue_chain) + 626 sizeof(struct qla2xxx_mqueue_header) + 627 (req->length * sizeof(request_t))); 628 ptr += sizeof(struct qla2xxx_mqueue_chain); 629 630 /* Add header. */ 631 qh = ptr; 632 qh->queue = htonl(TYPE_REQUEST_QUEUE); 633 qh->number = htonl(que); 634 qh->size = htonl(req->length * sizeof(request_t)); 635 ptr += sizeof(struct qla2xxx_mqueue_header); 636 637 /* Add data. */ 638 memcpy(ptr, req->ring, req->length * sizeof(request_t)); 639 ptr += req->length * sizeof(request_t); 640 } 641 642 /* Response queues */ 643 for (que = 1; que < ha->max_rsp_queues; que++) { 644 rsp = ha->rsp_q_map[que]; 645 if (!rsp) 646 break; 647 648 /* Add chain. */ 649 q = ptr; 650 *last_chain = &q->type; 651 q->type = htonl(DUMP_CHAIN_QUEUE); 652 q->chain_size = htonl( 653 sizeof(struct qla2xxx_mqueue_chain) + 654 sizeof(struct qla2xxx_mqueue_header) + 655 (rsp->length * sizeof(response_t))); 656 ptr += sizeof(struct qla2xxx_mqueue_chain); 657 658 /* Add header. */ 659 qh = ptr; 660 qh->queue = htonl(TYPE_RESPONSE_QUEUE); 661 qh->number = htonl(que); 662 qh->size = htonl(rsp->length * sizeof(response_t)); 663 ptr += sizeof(struct qla2xxx_mqueue_header); 664 665 /* Add data. */ 666 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); 667 ptr += rsp->length * sizeof(response_t); 668 } 669 670 return ptr; 671 } 672 673 static inline void * 674 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 675 { 676 uint32_t cnt, que_idx; 677 uint8_t que_cnt; 678 struct qla2xxx_mq_chain *mq = ptr; 679 device_reg_t *reg; 680 681 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 682 IS_QLA28XX(ha)) 683 return ptr; 684 685 mq = ptr; 686 *last_chain = &mq->type; 687 mq->type = htonl(DUMP_CHAIN_MQ); 688 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); 689 690 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 691 ha->max_req_queues : ha->max_rsp_queues; 692 mq->count = htonl(que_cnt); 693 for (cnt = 0; cnt < que_cnt; cnt++) { 694 reg = ISP_QUE_REG(ha, cnt); 695 que_idx = cnt * 4; 696 mq->qregs[que_idx] = 697 htonl(rd_reg_dword(®->isp25mq.req_q_in)); 698 mq->qregs[que_idx+1] = 699 htonl(rd_reg_dword(®->isp25mq.req_q_out)); 700 mq->qregs[que_idx+2] = 701 htonl(rd_reg_dword(®->isp25mq.rsp_q_in)); 702 mq->qregs[que_idx+3] = 703 htonl(rd_reg_dword(®->isp25mq.rsp_q_out)); 704 } 705 706 return ptr + sizeof(struct qla2xxx_mq_chain); 707 } 708 709 void 710 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 711 { 712 struct qla_hw_data *ha = vha->hw; 713 714 if (rval != QLA_SUCCESS) { 715 ql_log(ql_log_warn, vha, 0xd000, 716 "Failed to dump firmware (%x), dump status flags (0x%lx).\n", 717 rval, ha->fw_dump_cap_flags); 718 ha->fw_dumped = false; 719 } else { 720 ql_log(ql_log_info, vha, 0xd001, 721 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", 722 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); 723 ha->fw_dumped = true; 724 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 725 } 726 } 727 728 void qla2xxx_dump_fw(scsi_qla_host_t *vha) 729 { 730 unsigned long flags; 731 732 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 733 vha->hw->isp_ops->fw_dump(vha); 734 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 735 } 736 737 /** 738 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 739 * @vha: HA context 740 */ 741 void 742 qla2300_fw_dump(scsi_qla_host_t *vha) 743 { 744 int rval; 745 uint32_t cnt; 746 struct qla_hw_data *ha = vha->hw; 747 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 748 __le16 __iomem *dmp_reg; 749 struct qla2300_fw_dump *fw; 750 void *nxt; 751 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 752 753 lockdep_assert_held(&ha->hardware_lock); 754 755 if (!ha->fw_dump) { 756 ql_log(ql_log_warn, vha, 0xd002, 757 "No buffer available for dump.\n"); 758 return; 759 } 760 761 if (ha->fw_dumped) { 762 ql_log(ql_log_warn, vha, 0xd003, 763 "Firmware has been previously dumped (%p) " 764 "-- ignoring request.\n", 765 ha->fw_dump); 766 return; 767 } 768 fw = &ha->fw_dump->isp.isp23; 769 qla2xxx_prep_dump(ha, ha->fw_dump); 770 771 rval = QLA_SUCCESS; 772 fw->hccr = htons(rd_reg_word(®->hccr)); 773 774 /* Pause RISC. */ 775 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 776 if (IS_QLA2300(ha)) { 777 for (cnt = 30000; 778 (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && 779 rval == QLA_SUCCESS; cnt--) { 780 if (cnt) 781 udelay(100); 782 else 783 rval = QLA_FUNCTION_TIMEOUT; 784 } 785 } else { 786 rd_reg_word(®->hccr); /* PCI Posting. */ 787 udelay(10); 788 } 789 790 if (rval == QLA_SUCCESS) { 791 dmp_reg = ®->flash_address; 792 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) 793 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); 794 795 dmp_reg = ®->u.isp2300.req_q_in; 796 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); 797 cnt++, dmp_reg++) 798 fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg)); 799 800 dmp_reg = ®->u.isp2300.mailbox0; 801 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); 802 cnt++, dmp_reg++) 803 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); 804 805 wrt_reg_word(®->ctrl_status, 0x40); 806 qla2xxx_read_window(reg, 32, fw->resp_dma_reg); 807 808 wrt_reg_word(®->ctrl_status, 0x50); 809 qla2xxx_read_window(reg, 48, fw->dma_reg); 810 811 wrt_reg_word(®->ctrl_status, 0x00); 812 dmp_reg = ®->risc_hw; 813 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); 814 cnt++, dmp_reg++) 815 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); 816 817 wrt_reg_word(®->pcr, 0x2000); 818 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 819 820 wrt_reg_word(®->pcr, 0x2200); 821 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 822 823 wrt_reg_word(®->pcr, 0x2400); 824 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 825 826 wrt_reg_word(®->pcr, 0x2600); 827 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 828 829 wrt_reg_word(®->pcr, 0x2800); 830 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 831 832 wrt_reg_word(®->pcr, 0x2A00); 833 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 834 835 wrt_reg_word(®->pcr, 0x2C00); 836 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 837 838 wrt_reg_word(®->pcr, 0x2E00); 839 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 840 841 wrt_reg_word(®->ctrl_status, 0x10); 842 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); 843 844 wrt_reg_word(®->ctrl_status, 0x20); 845 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 846 847 wrt_reg_word(®->ctrl_status, 0x30); 848 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 849 850 /* Reset RISC. */ 851 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 852 for (cnt = 0; cnt < 30000; cnt++) { 853 if ((rd_reg_word(®->ctrl_status) & 854 CSR_ISP_SOFT_RESET) == 0) 855 break; 856 857 udelay(10); 858 } 859 } 860 861 if (!IS_QLA2300(ha)) { 862 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 863 rval == QLA_SUCCESS; cnt--) { 864 if (cnt) 865 udelay(100); 866 else 867 rval = QLA_FUNCTION_TIMEOUT; 868 } 869 } 870 871 /* Get RISC SRAM. */ 872 if (rval == QLA_SUCCESS) 873 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, 874 ARRAY_SIZE(fw->risc_ram), &nxt); 875 876 /* Get stack SRAM. */ 877 if (rval == QLA_SUCCESS) 878 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, 879 ARRAY_SIZE(fw->stack_ram), &nxt); 880 881 /* Get data SRAM. */ 882 if (rval == QLA_SUCCESS) 883 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, 884 ha->fw_memory_size - 0x11000 + 1, &nxt); 885 886 if (rval == QLA_SUCCESS) 887 qla2xxx_copy_queues(ha, nxt); 888 889 qla2xxx_dump_post_process(base_vha, rval); 890 } 891 892 /** 893 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 894 * @vha: HA context 895 */ 896 void 897 qla2100_fw_dump(scsi_qla_host_t *vha) 898 { 899 int rval; 900 uint32_t cnt, timer; 901 uint16_t risc_address = 0; 902 uint16_t mb0 = 0, mb2 = 0; 903 struct qla_hw_data *ha = vha->hw; 904 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 905 __le16 __iomem *dmp_reg; 906 struct qla2100_fw_dump *fw; 907 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 908 909 lockdep_assert_held(&ha->hardware_lock); 910 911 if (!ha->fw_dump) { 912 ql_log(ql_log_warn, vha, 0xd004, 913 "No buffer available for dump.\n"); 914 return; 915 } 916 917 if (ha->fw_dumped) { 918 ql_log(ql_log_warn, vha, 0xd005, 919 "Firmware has been previously dumped (%p) " 920 "-- ignoring request.\n", 921 ha->fw_dump); 922 return; 923 } 924 fw = &ha->fw_dump->isp.isp21; 925 qla2xxx_prep_dump(ha, ha->fw_dump); 926 927 rval = QLA_SUCCESS; 928 fw->hccr = htons(rd_reg_word(®->hccr)); 929 930 /* Pause RISC. */ 931 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 932 for (cnt = 30000; (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && 933 rval == QLA_SUCCESS; cnt--) { 934 if (cnt) 935 udelay(100); 936 else 937 rval = QLA_FUNCTION_TIMEOUT; 938 } 939 if (rval == QLA_SUCCESS) { 940 dmp_reg = ®->flash_address; 941 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) 942 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); 943 944 dmp_reg = ®->u.isp2100.mailbox0; 945 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { 946 if (cnt == 8) 947 dmp_reg = ®->u_end.isp2200.mailbox8; 948 949 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); 950 } 951 952 dmp_reg = ®->u.isp2100.unused_2[0]; 953 for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) 954 fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg)); 955 956 wrt_reg_word(®->ctrl_status, 0x00); 957 dmp_reg = ®->risc_hw; 958 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) 959 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); 960 961 wrt_reg_word(®->pcr, 0x2000); 962 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 963 964 wrt_reg_word(®->pcr, 0x2100); 965 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 966 967 wrt_reg_word(®->pcr, 0x2200); 968 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 969 970 wrt_reg_word(®->pcr, 0x2300); 971 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 972 973 wrt_reg_word(®->pcr, 0x2400); 974 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 975 976 wrt_reg_word(®->pcr, 0x2500); 977 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 978 979 wrt_reg_word(®->pcr, 0x2600); 980 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 981 982 wrt_reg_word(®->pcr, 0x2700); 983 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 984 985 wrt_reg_word(®->ctrl_status, 0x10); 986 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); 987 988 wrt_reg_word(®->ctrl_status, 0x20); 989 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 990 991 wrt_reg_word(®->ctrl_status, 0x30); 992 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 993 994 /* Reset the ISP. */ 995 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 996 } 997 998 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 999 rval == QLA_SUCCESS; cnt--) { 1000 if (cnt) 1001 udelay(100); 1002 else 1003 rval = QLA_FUNCTION_TIMEOUT; 1004 } 1005 1006 /* Pause RISC. */ 1007 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && 1008 (rd_reg_word(®->mctr) & (BIT_1 | BIT_0)) != 0))) { 1009 1010 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 1011 for (cnt = 30000; 1012 (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && 1013 rval == QLA_SUCCESS; cnt--) { 1014 if (cnt) 1015 udelay(100); 1016 else 1017 rval = QLA_FUNCTION_TIMEOUT; 1018 } 1019 if (rval == QLA_SUCCESS) { 1020 /* Set memory configuration and timing. */ 1021 if (IS_QLA2100(ha)) 1022 wrt_reg_word(®->mctr, 0xf1); 1023 else 1024 wrt_reg_word(®->mctr, 0xf2); 1025 rd_reg_word(®->mctr); /* PCI Posting. */ 1026 1027 /* Release RISC. */ 1028 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 1029 } 1030 } 1031 1032 if (rval == QLA_SUCCESS) { 1033 /* Get RISC SRAM. */ 1034 risc_address = 0x1000; 1035 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 1036 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1037 } 1038 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; 1039 cnt++, risc_address++) { 1040 WRT_MAILBOX_REG(ha, reg, 1, risc_address); 1041 wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); 1042 1043 for (timer = 6000000; timer != 0; timer--) { 1044 /* Check for pending interrupts. */ 1045 if (rd_reg_word(®->istatus) & ISR_RISC_INT) { 1046 if (rd_reg_word(®->semaphore) & BIT_0) { 1047 set_bit(MBX_INTERRUPT, 1048 &ha->mbx_cmd_flags); 1049 1050 mb0 = RD_MAILBOX_REG(ha, reg, 0); 1051 mb2 = RD_MAILBOX_REG(ha, reg, 2); 1052 1053 wrt_reg_word(®->semaphore, 0); 1054 wrt_reg_word(®->hccr, 1055 HCCR_CLR_RISC_INT); 1056 rd_reg_word(®->hccr); 1057 break; 1058 } 1059 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 1060 rd_reg_word(®->hccr); 1061 } 1062 udelay(5); 1063 } 1064 1065 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1066 rval = mb0 & MBS_MASK; 1067 fw->risc_ram[cnt] = htons(mb2); 1068 } else { 1069 rval = QLA_FUNCTION_FAILED; 1070 } 1071 } 1072 1073 if (rval == QLA_SUCCESS) 1074 qla2xxx_copy_queues(ha, &fw->queue_dump[0]); 1075 1076 qla2xxx_dump_post_process(base_vha, rval); 1077 } 1078 1079 void 1080 qla24xx_fw_dump(scsi_qla_host_t *vha) 1081 { 1082 int rval; 1083 uint32_t cnt; 1084 struct qla_hw_data *ha = vha->hw; 1085 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1086 __le32 __iomem *dmp_reg; 1087 __be32 *iter_reg; 1088 __le16 __iomem *mbx_reg; 1089 struct qla24xx_fw_dump *fw; 1090 void *nxt; 1091 void *nxt_chain; 1092 __be32 *last_chain = NULL; 1093 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1094 1095 lockdep_assert_held(&ha->hardware_lock); 1096 1097 if (IS_P3P_TYPE(ha)) 1098 return; 1099 1100 ha->fw_dump_cap_flags = 0; 1101 1102 if (!ha->fw_dump) { 1103 ql_log(ql_log_warn, vha, 0xd006, 1104 "No buffer available for dump.\n"); 1105 return; 1106 } 1107 1108 if (ha->fw_dumped) { 1109 ql_log(ql_log_warn, vha, 0xd007, 1110 "Firmware has been previously dumped (%p) " 1111 "-- ignoring request.\n", 1112 ha->fw_dump); 1113 return; 1114 } 1115 QLA_FW_STOPPED(ha); 1116 fw = &ha->fw_dump->isp.isp24; 1117 qla2xxx_prep_dump(ha, ha->fw_dump); 1118 1119 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1120 1121 /* 1122 * Pause RISC. No need to track timeout, as resetting the chip 1123 * is the right approach incase of pause timeout 1124 */ 1125 qla24xx_pause_risc(reg, ha); 1126 1127 /* Host interface registers. */ 1128 dmp_reg = ®->flash_addr; 1129 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 1130 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 1131 1132 /* Disable interrupts. */ 1133 wrt_reg_dword(®->ictrl, 0); 1134 rd_reg_dword(®->ictrl); 1135 1136 /* Shadow registers. */ 1137 wrt_reg_dword(®->iobase_addr, 0x0F70); 1138 rd_reg_dword(®->iobase_addr); 1139 wrt_reg_dword(®->iobase_select, 0xB0000000); 1140 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 1141 1142 wrt_reg_dword(®->iobase_select, 0xB0100000); 1143 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 1144 1145 wrt_reg_dword(®->iobase_select, 0xB0200000); 1146 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 1147 1148 wrt_reg_dword(®->iobase_select, 0xB0300000); 1149 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 1150 1151 wrt_reg_dword(®->iobase_select, 0xB0400000); 1152 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 1153 1154 wrt_reg_dword(®->iobase_select, 0xB0500000); 1155 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 1156 1157 wrt_reg_dword(®->iobase_select, 0xB0600000); 1158 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 1159 1160 /* Mailbox registers. */ 1161 mbx_reg = ®->mailbox0; 1162 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 1163 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 1164 1165 /* Transfer sequence registers. */ 1166 iter_reg = fw->xseq_gp_reg; 1167 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1168 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1169 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1170 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1171 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1172 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1173 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1174 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1175 1176 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); 1177 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1178 1179 /* Receive sequence registers. */ 1180 iter_reg = fw->rseq_gp_reg; 1181 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1182 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1183 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1184 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1185 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1186 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1187 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1188 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1189 1190 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); 1191 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1192 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1193 1194 /* Command DMA registers. */ 1195 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1196 1197 /* Queues. */ 1198 iter_reg = fw->req0_dma_reg; 1199 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1200 dmp_reg = ®->iobase_q; 1201 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1202 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1203 1204 iter_reg = fw->resp0_dma_reg; 1205 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1206 dmp_reg = ®->iobase_q; 1207 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1208 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1209 1210 iter_reg = fw->req1_dma_reg; 1211 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1212 dmp_reg = ®->iobase_q; 1213 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1214 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1215 1216 /* Transmit DMA registers. */ 1217 iter_reg = fw->xmt0_dma_reg; 1218 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1219 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1220 1221 iter_reg = fw->xmt1_dma_reg; 1222 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1223 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1224 1225 iter_reg = fw->xmt2_dma_reg; 1226 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1227 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1228 1229 iter_reg = fw->xmt3_dma_reg; 1230 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1231 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1232 1233 iter_reg = fw->xmt4_dma_reg; 1234 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1235 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1236 1237 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1238 1239 /* Receive DMA registers. */ 1240 iter_reg = fw->rcvt0_data_dma_reg; 1241 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1242 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1243 1244 iter_reg = fw->rcvt1_data_dma_reg; 1245 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1246 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1247 1248 /* RISC registers. */ 1249 iter_reg = fw->risc_gp_reg; 1250 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1251 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1252 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1253 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1254 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1255 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1256 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1257 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1258 1259 /* Local memory controller registers. */ 1260 iter_reg = fw->lmc_reg; 1261 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1262 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1263 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1264 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1265 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1266 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1267 qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1268 1269 /* Fibre Protocol Module registers. */ 1270 iter_reg = fw->fpm_hdw_reg; 1271 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1272 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1273 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1274 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1275 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1276 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1277 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1278 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1279 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1280 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1281 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1282 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1283 1284 /* Frame Buffer registers. */ 1285 iter_reg = fw->fb_hdw_reg; 1286 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1287 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1288 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1289 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1290 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1291 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1292 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1293 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1294 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1295 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1296 qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1297 1298 rval = qla24xx_soft_reset(ha); 1299 if (rval != QLA_SUCCESS) 1300 goto qla24xx_fw_dump_failed_0; 1301 1302 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1303 &nxt); 1304 if (rval != QLA_SUCCESS) 1305 goto qla24xx_fw_dump_failed_0; 1306 1307 nxt = qla2xxx_copy_queues(ha, nxt); 1308 1309 qla24xx_copy_eft(ha, nxt); 1310 1311 nxt_chain = (void *)ha->fw_dump + ha->chain_offset; 1312 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1313 if (last_chain) { 1314 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1315 *last_chain |= htonl(DUMP_CHAIN_LAST); 1316 } 1317 1318 /* Adjust valid length. */ 1319 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1320 1321 qla24xx_fw_dump_failed_0: 1322 qla2xxx_dump_post_process(base_vha, rval); 1323 } 1324 1325 void 1326 qla25xx_fw_dump(scsi_qla_host_t *vha) 1327 { 1328 int rval; 1329 uint32_t cnt; 1330 struct qla_hw_data *ha = vha->hw; 1331 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1332 __le32 __iomem *dmp_reg; 1333 __be32 *iter_reg; 1334 __le16 __iomem *mbx_reg; 1335 struct qla25xx_fw_dump *fw; 1336 void *nxt, *nxt_chain; 1337 __be32 *last_chain = NULL; 1338 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1339 1340 lockdep_assert_held(&ha->hardware_lock); 1341 1342 ha->fw_dump_cap_flags = 0; 1343 1344 if (!ha->fw_dump) { 1345 ql_log(ql_log_warn, vha, 0xd008, 1346 "No buffer available for dump.\n"); 1347 return; 1348 } 1349 1350 if (ha->fw_dumped) { 1351 ql_log(ql_log_warn, vha, 0xd009, 1352 "Firmware has been previously dumped (%p) " 1353 "-- ignoring request.\n", 1354 ha->fw_dump); 1355 return; 1356 } 1357 QLA_FW_STOPPED(ha); 1358 fw = &ha->fw_dump->isp.isp25; 1359 qla2xxx_prep_dump(ha, ha->fw_dump); 1360 ha->fw_dump->version = htonl(2); 1361 1362 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1363 1364 /* 1365 * Pause RISC. No need to track timeout, as resetting the chip 1366 * is the right approach incase of pause timeout 1367 */ 1368 qla24xx_pause_risc(reg, ha); 1369 1370 /* Host/Risc registers. */ 1371 iter_reg = fw->host_risc_reg; 1372 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1373 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1374 1375 /* PCIe registers. */ 1376 wrt_reg_dword(®->iobase_addr, 0x7C00); 1377 rd_reg_dword(®->iobase_addr); 1378 wrt_reg_dword(®->iobase_window, 0x01); 1379 dmp_reg = ®->iobase_c4; 1380 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); 1381 dmp_reg++; 1382 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); 1383 dmp_reg++; 1384 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); 1385 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); 1386 1387 wrt_reg_dword(®->iobase_window, 0x00); 1388 rd_reg_dword(®->iobase_window); 1389 1390 /* Host interface registers. */ 1391 dmp_reg = ®->flash_addr; 1392 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 1393 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 1394 1395 /* Disable interrupts. */ 1396 wrt_reg_dword(®->ictrl, 0); 1397 rd_reg_dword(®->ictrl); 1398 1399 /* Shadow registers. */ 1400 wrt_reg_dword(®->iobase_addr, 0x0F70); 1401 rd_reg_dword(®->iobase_addr); 1402 wrt_reg_dword(®->iobase_select, 0xB0000000); 1403 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 1404 1405 wrt_reg_dword(®->iobase_select, 0xB0100000); 1406 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 1407 1408 wrt_reg_dword(®->iobase_select, 0xB0200000); 1409 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 1410 1411 wrt_reg_dword(®->iobase_select, 0xB0300000); 1412 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 1413 1414 wrt_reg_dword(®->iobase_select, 0xB0400000); 1415 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 1416 1417 wrt_reg_dword(®->iobase_select, 0xB0500000); 1418 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 1419 1420 wrt_reg_dword(®->iobase_select, 0xB0600000); 1421 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 1422 1423 wrt_reg_dword(®->iobase_select, 0xB0700000); 1424 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); 1425 1426 wrt_reg_dword(®->iobase_select, 0xB0800000); 1427 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); 1428 1429 wrt_reg_dword(®->iobase_select, 0xB0900000); 1430 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); 1431 1432 wrt_reg_dword(®->iobase_select, 0xB0A00000); 1433 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); 1434 1435 /* RISC I/O register. */ 1436 wrt_reg_dword(®->iobase_addr, 0x0010); 1437 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); 1438 1439 /* Mailbox registers. */ 1440 mbx_reg = ®->mailbox0; 1441 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 1442 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 1443 1444 /* Transfer sequence registers. */ 1445 iter_reg = fw->xseq_gp_reg; 1446 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1447 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1448 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1449 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1450 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1451 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1452 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1453 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1454 1455 iter_reg = fw->xseq_0_reg; 1456 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1457 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1458 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1459 1460 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1461 1462 /* Receive sequence registers. */ 1463 iter_reg = fw->rseq_gp_reg; 1464 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1465 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1466 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1467 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1468 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1469 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1470 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1471 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1472 1473 iter_reg = fw->rseq_0_reg; 1474 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1475 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1476 1477 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1478 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1479 1480 /* Auxiliary sequence registers. */ 1481 iter_reg = fw->aseq_gp_reg; 1482 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1483 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1484 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1485 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1486 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1487 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1488 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1489 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1490 1491 iter_reg = fw->aseq_0_reg; 1492 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1493 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1494 1495 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1496 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1497 1498 /* Command DMA registers. */ 1499 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1500 1501 /* Queues. */ 1502 iter_reg = fw->req0_dma_reg; 1503 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1504 dmp_reg = ®->iobase_q; 1505 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1506 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1507 1508 iter_reg = fw->resp0_dma_reg; 1509 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1510 dmp_reg = ®->iobase_q; 1511 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1512 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1513 1514 iter_reg = fw->req1_dma_reg; 1515 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1516 dmp_reg = ®->iobase_q; 1517 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1518 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1519 1520 /* Transmit DMA registers. */ 1521 iter_reg = fw->xmt0_dma_reg; 1522 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1523 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1524 1525 iter_reg = fw->xmt1_dma_reg; 1526 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1527 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1528 1529 iter_reg = fw->xmt2_dma_reg; 1530 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1531 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1532 1533 iter_reg = fw->xmt3_dma_reg; 1534 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1535 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1536 1537 iter_reg = fw->xmt4_dma_reg; 1538 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1539 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1540 1541 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1542 1543 /* Receive DMA registers. */ 1544 iter_reg = fw->rcvt0_data_dma_reg; 1545 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1546 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1547 1548 iter_reg = fw->rcvt1_data_dma_reg; 1549 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1550 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1551 1552 /* RISC registers. */ 1553 iter_reg = fw->risc_gp_reg; 1554 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1555 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1556 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1557 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1558 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1559 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1560 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1561 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1562 1563 /* Local memory controller registers. */ 1564 iter_reg = fw->lmc_reg; 1565 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1566 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1567 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1568 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1569 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1570 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1571 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1572 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1573 1574 /* Fibre Protocol Module registers. */ 1575 iter_reg = fw->fpm_hdw_reg; 1576 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1577 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1578 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1579 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1580 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1581 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1582 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1583 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1584 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1585 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1586 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1587 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1588 1589 /* Frame Buffer registers. */ 1590 iter_reg = fw->fb_hdw_reg; 1591 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1592 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1593 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1594 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1595 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1596 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1597 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1598 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1599 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1600 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1601 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1602 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1603 1604 /* Multi queue registers */ 1605 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1606 &last_chain); 1607 1608 rval = qla24xx_soft_reset(ha); 1609 if (rval != QLA_SUCCESS) 1610 goto qla25xx_fw_dump_failed_0; 1611 1612 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1613 &nxt); 1614 if (rval != QLA_SUCCESS) 1615 goto qla25xx_fw_dump_failed_0; 1616 1617 nxt = qla2xxx_copy_queues(ha, nxt); 1618 1619 qla24xx_copy_eft(ha, nxt); 1620 1621 /* Chain entries -- started with MQ. */ 1622 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1623 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1624 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1625 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 1626 if (last_chain) { 1627 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1628 *last_chain |= htonl(DUMP_CHAIN_LAST); 1629 } 1630 1631 /* Adjust valid length. */ 1632 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1633 1634 qla25xx_fw_dump_failed_0: 1635 qla2xxx_dump_post_process(base_vha, rval); 1636 } 1637 1638 void 1639 qla81xx_fw_dump(scsi_qla_host_t *vha) 1640 { 1641 int rval; 1642 uint32_t cnt; 1643 struct qla_hw_data *ha = vha->hw; 1644 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1645 __le32 __iomem *dmp_reg; 1646 __be32 *iter_reg; 1647 __le16 __iomem *mbx_reg; 1648 struct qla81xx_fw_dump *fw; 1649 void *nxt, *nxt_chain; 1650 __be32 *last_chain = NULL; 1651 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1652 1653 lockdep_assert_held(&ha->hardware_lock); 1654 1655 ha->fw_dump_cap_flags = 0; 1656 1657 if (!ha->fw_dump) { 1658 ql_log(ql_log_warn, vha, 0xd00a, 1659 "No buffer available for dump.\n"); 1660 return; 1661 } 1662 1663 if (ha->fw_dumped) { 1664 ql_log(ql_log_warn, vha, 0xd00b, 1665 "Firmware has been previously dumped (%p) " 1666 "-- ignoring request.\n", 1667 ha->fw_dump); 1668 return; 1669 } 1670 fw = &ha->fw_dump->isp.isp81; 1671 qla2xxx_prep_dump(ha, ha->fw_dump); 1672 1673 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1674 1675 /* 1676 * Pause RISC. No need to track timeout, as resetting the chip 1677 * is the right approach incase of pause timeout 1678 */ 1679 qla24xx_pause_risc(reg, ha); 1680 1681 /* Host/Risc registers. */ 1682 iter_reg = fw->host_risc_reg; 1683 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1684 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1685 1686 /* PCIe registers. */ 1687 wrt_reg_dword(®->iobase_addr, 0x7C00); 1688 rd_reg_dword(®->iobase_addr); 1689 wrt_reg_dword(®->iobase_window, 0x01); 1690 dmp_reg = ®->iobase_c4; 1691 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); 1692 dmp_reg++; 1693 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); 1694 dmp_reg++; 1695 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); 1696 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); 1697 1698 wrt_reg_dword(®->iobase_window, 0x00); 1699 rd_reg_dword(®->iobase_window); 1700 1701 /* Host interface registers. */ 1702 dmp_reg = ®->flash_addr; 1703 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 1704 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 1705 1706 /* Disable interrupts. */ 1707 wrt_reg_dword(®->ictrl, 0); 1708 rd_reg_dword(®->ictrl); 1709 1710 /* Shadow registers. */ 1711 wrt_reg_dword(®->iobase_addr, 0x0F70); 1712 rd_reg_dword(®->iobase_addr); 1713 wrt_reg_dword(®->iobase_select, 0xB0000000); 1714 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 1715 1716 wrt_reg_dword(®->iobase_select, 0xB0100000); 1717 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 1718 1719 wrt_reg_dword(®->iobase_select, 0xB0200000); 1720 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 1721 1722 wrt_reg_dword(®->iobase_select, 0xB0300000); 1723 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 1724 1725 wrt_reg_dword(®->iobase_select, 0xB0400000); 1726 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 1727 1728 wrt_reg_dword(®->iobase_select, 0xB0500000); 1729 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 1730 1731 wrt_reg_dword(®->iobase_select, 0xB0600000); 1732 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 1733 1734 wrt_reg_dword(®->iobase_select, 0xB0700000); 1735 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); 1736 1737 wrt_reg_dword(®->iobase_select, 0xB0800000); 1738 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); 1739 1740 wrt_reg_dword(®->iobase_select, 0xB0900000); 1741 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); 1742 1743 wrt_reg_dword(®->iobase_select, 0xB0A00000); 1744 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); 1745 1746 /* RISC I/O register. */ 1747 wrt_reg_dword(®->iobase_addr, 0x0010); 1748 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); 1749 1750 /* Mailbox registers. */ 1751 mbx_reg = ®->mailbox0; 1752 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 1753 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 1754 1755 /* Transfer sequence registers. */ 1756 iter_reg = fw->xseq_gp_reg; 1757 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1758 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1759 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1760 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1761 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1762 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1763 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1764 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1765 1766 iter_reg = fw->xseq_0_reg; 1767 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1768 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1769 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1770 1771 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1772 1773 /* Receive sequence registers. */ 1774 iter_reg = fw->rseq_gp_reg; 1775 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1776 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1777 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1778 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1779 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1780 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1781 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1782 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1783 1784 iter_reg = fw->rseq_0_reg; 1785 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1786 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1787 1788 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1789 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1790 1791 /* Auxiliary sequence registers. */ 1792 iter_reg = fw->aseq_gp_reg; 1793 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1794 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1795 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1796 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1797 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1798 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1799 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1800 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1801 1802 iter_reg = fw->aseq_0_reg; 1803 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1804 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1805 1806 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1807 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1808 1809 /* Command DMA registers. */ 1810 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1811 1812 /* Queues. */ 1813 iter_reg = fw->req0_dma_reg; 1814 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1815 dmp_reg = ®->iobase_q; 1816 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1817 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1818 1819 iter_reg = fw->resp0_dma_reg; 1820 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1821 dmp_reg = ®->iobase_q; 1822 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1823 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1824 1825 iter_reg = fw->req1_dma_reg; 1826 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1827 dmp_reg = ®->iobase_q; 1828 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1829 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1830 1831 /* Transmit DMA registers. */ 1832 iter_reg = fw->xmt0_dma_reg; 1833 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1834 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1835 1836 iter_reg = fw->xmt1_dma_reg; 1837 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1838 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1839 1840 iter_reg = fw->xmt2_dma_reg; 1841 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1842 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1843 1844 iter_reg = fw->xmt3_dma_reg; 1845 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1846 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1847 1848 iter_reg = fw->xmt4_dma_reg; 1849 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1850 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1851 1852 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1853 1854 /* Receive DMA registers. */ 1855 iter_reg = fw->rcvt0_data_dma_reg; 1856 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1857 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1858 1859 iter_reg = fw->rcvt1_data_dma_reg; 1860 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1861 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1862 1863 /* RISC registers. */ 1864 iter_reg = fw->risc_gp_reg; 1865 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1866 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1867 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1868 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1869 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1870 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1871 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1872 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1873 1874 /* Local memory controller registers. */ 1875 iter_reg = fw->lmc_reg; 1876 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1877 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1878 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1879 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1880 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1881 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1882 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1883 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1884 1885 /* Fibre Protocol Module registers. */ 1886 iter_reg = fw->fpm_hdw_reg; 1887 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1888 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1889 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1890 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1891 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1892 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1893 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1894 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1895 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1896 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1897 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1898 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1899 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 1900 qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 1901 1902 /* Frame Buffer registers. */ 1903 iter_reg = fw->fb_hdw_reg; 1904 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1905 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1906 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1907 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1908 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1909 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1910 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1911 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1912 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1913 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1914 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1915 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 1916 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1917 1918 /* Multi queue registers */ 1919 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1920 &last_chain); 1921 1922 rval = qla24xx_soft_reset(ha); 1923 if (rval != QLA_SUCCESS) 1924 goto qla81xx_fw_dump_failed_0; 1925 1926 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1927 &nxt); 1928 if (rval != QLA_SUCCESS) 1929 goto qla81xx_fw_dump_failed_0; 1930 1931 nxt = qla2xxx_copy_queues(ha, nxt); 1932 1933 qla24xx_copy_eft(ha, nxt); 1934 1935 /* Chain entries -- started with MQ. */ 1936 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1937 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1938 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1939 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 1940 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); 1941 if (last_chain) { 1942 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1943 *last_chain |= htonl(DUMP_CHAIN_LAST); 1944 } 1945 1946 /* Adjust valid length. */ 1947 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1948 1949 qla81xx_fw_dump_failed_0: 1950 qla2xxx_dump_post_process(base_vha, rval); 1951 } 1952 1953 void 1954 qla83xx_fw_dump(scsi_qla_host_t *vha) 1955 { 1956 int rval; 1957 uint32_t cnt; 1958 struct qla_hw_data *ha = vha->hw; 1959 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1960 __le32 __iomem *dmp_reg; 1961 __be32 *iter_reg; 1962 __le16 __iomem *mbx_reg; 1963 struct qla83xx_fw_dump *fw; 1964 void *nxt, *nxt_chain; 1965 __be32 *last_chain = NULL; 1966 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1967 1968 lockdep_assert_held(&ha->hardware_lock); 1969 1970 ha->fw_dump_cap_flags = 0; 1971 1972 if (!ha->fw_dump) { 1973 ql_log(ql_log_warn, vha, 0xd00c, 1974 "No buffer available for dump!!!\n"); 1975 return; 1976 } 1977 1978 if (ha->fw_dumped) { 1979 ql_log(ql_log_warn, vha, 0xd00d, 1980 "Firmware has been previously dumped (%p) -- ignoring " 1981 "request...\n", ha->fw_dump); 1982 return; 1983 } 1984 QLA_FW_STOPPED(ha); 1985 fw = &ha->fw_dump->isp.isp83; 1986 qla2xxx_prep_dump(ha, ha->fw_dump); 1987 1988 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1989 1990 /* 1991 * Pause RISC. No need to track timeout, as resetting the chip 1992 * is the right approach incase of pause timeout 1993 */ 1994 qla24xx_pause_risc(reg, ha); 1995 1996 wrt_reg_dword(®->iobase_addr, 0x6000); 1997 dmp_reg = ®->iobase_window; 1998 rd_reg_dword(dmp_reg); 1999 wrt_reg_dword(dmp_reg, 0); 2000 2001 dmp_reg = ®->unused_4_1[0]; 2002 rd_reg_dword(dmp_reg); 2003 wrt_reg_dword(dmp_reg, 0); 2004 2005 wrt_reg_dword(®->iobase_addr, 0x6010); 2006 dmp_reg = ®->unused_4_1[2]; 2007 rd_reg_dword(dmp_reg); 2008 wrt_reg_dword(dmp_reg, 0); 2009 2010 /* select PCR and disable ecc checking and correction */ 2011 wrt_reg_dword(®->iobase_addr, 0x0F70); 2012 rd_reg_dword(®->iobase_addr); 2013 wrt_reg_dword(®->iobase_select, 0x60000000); /* write to F0h = PCR */ 2014 2015 /* Host/Risc registers. */ 2016 iter_reg = fw->host_risc_reg; 2017 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 2018 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); 2019 qla24xx_read_window(reg, 0x7040, 16, iter_reg); 2020 2021 /* PCIe registers. */ 2022 wrt_reg_dword(®->iobase_addr, 0x7C00); 2023 rd_reg_dword(®->iobase_addr); 2024 wrt_reg_dword(®->iobase_window, 0x01); 2025 dmp_reg = ®->iobase_c4; 2026 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); 2027 dmp_reg++; 2028 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); 2029 dmp_reg++; 2030 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); 2031 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); 2032 2033 wrt_reg_dword(®->iobase_window, 0x00); 2034 rd_reg_dword(®->iobase_window); 2035 2036 /* Host interface registers. */ 2037 dmp_reg = ®->flash_addr; 2038 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 2039 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 2040 2041 /* Disable interrupts. */ 2042 wrt_reg_dword(®->ictrl, 0); 2043 rd_reg_dword(®->ictrl); 2044 2045 /* Shadow registers. */ 2046 wrt_reg_dword(®->iobase_addr, 0x0F70); 2047 rd_reg_dword(®->iobase_addr); 2048 wrt_reg_dword(®->iobase_select, 0xB0000000); 2049 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 2050 2051 wrt_reg_dword(®->iobase_select, 0xB0100000); 2052 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 2053 2054 wrt_reg_dword(®->iobase_select, 0xB0200000); 2055 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 2056 2057 wrt_reg_dword(®->iobase_select, 0xB0300000); 2058 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 2059 2060 wrt_reg_dword(®->iobase_select, 0xB0400000); 2061 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 2062 2063 wrt_reg_dword(®->iobase_select, 0xB0500000); 2064 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 2065 2066 wrt_reg_dword(®->iobase_select, 0xB0600000); 2067 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 2068 2069 wrt_reg_dword(®->iobase_select, 0xB0700000); 2070 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); 2071 2072 wrt_reg_dword(®->iobase_select, 0xB0800000); 2073 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); 2074 2075 wrt_reg_dword(®->iobase_select, 0xB0900000); 2076 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); 2077 2078 wrt_reg_dword(®->iobase_select, 0xB0A00000); 2079 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); 2080 2081 /* RISC I/O register. */ 2082 wrt_reg_dword(®->iobase_addr, 0x0010); 2083 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); 2084 2085 /* Mailbox registers. */ 2086 mbx_reg = ®->mailbox0; 2087 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 2088 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 2089 2090 /* Transfer sequence registers. */ 2091 iter_reg = fw->xseq_gp_reg; 2092 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); 2093 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); 2094 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); 2095 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); 2096 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); 2097 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); 2098 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); 2099 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); 2100 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 2101 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 2102 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 2103 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 2104 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 2105 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 2106 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 2107 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 2108 2109 iter_reg = fw->xseq_0_reg; 2110 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 2111 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 2112 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 2113 2114 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 2115 2116 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); 2117 2118 /* Receive sequence registers. */ 2119 iter_reg = fw->rseq_gp_reg; 2120 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); 2121 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); 2122 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); 2123 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); 2124 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); 2125 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); 2126 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); 2127 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); 2128 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 2129 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 2130 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 2131 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 2132 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 2133 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 2134 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 2135 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 2136 2137 iter_reg = fw->rseq_0_reg; 2138 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 2139 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 2140 2141 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 2142 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 2143 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); 2144 2145 /* Auxiliary sequence registers. */ 2146 iter_reg = fw->aseq_gp_reg; 2147 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 2148 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 2149 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 2150 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 2151 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 2152 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 2153 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 2154 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); 2155 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); 2156 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); 2157 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); 2158 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); 2159 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); 2160 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); 2161 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); 2162 qla24xx_read_window(reg, 0xB170, 16, iter_reg); 2163 2164 iter_reg = fw->aseq_0_reg; 2165 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 2166 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 2167 2168 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 2169 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 2170 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); 2171 2172 /* Command DMA registers. */ 2173 iter_reg = fw->cmd_dma_reg; 2174 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); 2175 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); 2176 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); 2177 qla24xx_read_window(reg, 0x71F0, 16, iter_reg); 2178 2179 /* Queues. */ 2180 iter_reg = fw->req0_dma_reg; 2181 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 2182 dmp_reg = ®->iobase_q; 2183 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2184 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 2185 2186 iter_reg = fw->resp0_dma_reg; 2187 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 2188 dmp_reg = ®->iobase_q; 2189 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2190 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 2191 2192 iter_reg = fw->req1_dma_reg; 2193 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 2194 dmp_reg = ®->iobase_q; 2195 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2196 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 2197 2198 /* Transmit DMA registers. */ 2199 iter_reg = fw->xmt0_dma_reg; 2200 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 2201 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 2202 2203 iter_reg = fw->xmt1_dma_reg; 2204 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 2205 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 2206 2207 iter_reg = fw->xmt2_dma_reg; 2208 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 2209 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 2210 2211 iter_reg = fw->xmt3_dma_reg; 2212 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 2213 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 2214 2215 iter_reg = fw->xmt4_dma_reg; 2216 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 2217 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 2218 2219 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 2220 2221 /* Receive DMA registers. */ 2222 iter_reg = fw->rcvt0_data_dma_reg; 2223 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 2224 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 2225 2226 iter_reg = fw->rcvt1_data_dma_reg; 2227 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 2228 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 2229 2230 /* RISC registers. */ 2231 iter_reg = fw->risc_gp_reg; 2232 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 2233 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 2234 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 2235 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 2236 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 2237 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 2238 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 2239 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 2240 2241 /* Local memory controller registers. */ 2242 iter_reg = fw->lmc_reg; 2243 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 2244 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 2245 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 2246 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 2247 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 2248 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 2249 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 2250 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 2251 2252 /* Fibre Protocol Module registers. */ 2253 iter_reg = fw->fpm_hdw_reg; 2254 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 2255 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 2256 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 2257 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 2258 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 2259 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 2260 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 2261 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 2262 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 2263 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 2264 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 2265 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 2266 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 2267 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 2268 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); 2269 qla24xx_read_window(reg, 0x40F0, 16, iter_reg); 2270 2271 /* RQ0 Array registers. */ 2272 iter_reg = fw->rq0_array_reg; 2273 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); 2274 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); 2275 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); 2276 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); 2277 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); 2278 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); 2279 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); 2280 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); 2281 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); 2282 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); 2283 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); 2284 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); 2285 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); 2286 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); 2287 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); 2288 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); 2289 2290 /* RQ1 Array registers. */ 2291 iter_reg = fw->rq1_array_reg; 2292 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); 2293 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); 2294 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); 2295 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); 2296 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); 2297 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); 2298 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); 2299 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); 2300 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); 2301 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); 2302 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); 2303 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); 2304 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); 2305 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); 2306 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); 2307 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); 2308 2309 /* RP0 Array registers. */ 2310 iter_reg = fw->rp0_array_reg; 2311 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); 2312 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); 2313 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); 2314 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); 2315 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); 2316 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); 2317 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); 2318 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); 2319 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); 2320 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); 2321 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); 2322 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); 2323 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); 2324 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); 2325 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); 2326 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); 2327 2328 /* RP1 Array registers. */ 2329 iter_reg = fw->rp1_array_reg; 2330 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); 2331 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); 2332 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); 2333 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); 2334 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); 2335 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); 2336 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); 2337 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); 2338 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); 2339 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); 2340 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); 2341 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); 2342 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); 2343 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); 2344 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); 2345 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); 2346 2347 iter_reg = fw->at0_array_reg; 2348 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); 2349 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); 2350 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); 2351 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); 2352 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); 2353 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); 2354 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); 2355 qla24xx_read_window(reg, 0x70F0, 16, iter_reg); 2356 2357 /* I/O Queue Control registers. */ 2358 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); 2359 2360 /* Frame Buffer registers. */ 2361 iter_reg = fw->fb_hdw_reg; 2362 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 2363 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 2364 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 2365 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 2366 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 2367 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); 2368 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); 2369 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 2370 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 2371 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 2372 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 2373 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 2374 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 2375 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 2376 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); 2377 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); 2378 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); 2379 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); 2380 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); 2381 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); 2382 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); 2383 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); 2384 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); 2385 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); 2386 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); 2387 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); 2388 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 2389 2390 /* Multi queue registers */ 2391 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 2392 &last_chain); 2393 2394 rval = qla24xx_soft_reset(ha); 2395 if (rval != QLA_SUCCESS) { 2396 ql_log(ql_log_warn, vha, 0xd00e, 2397 "SOFT RESET FAILED, forcing continuation of dump!!!\n"); 2398 rval = QLA_SUCCESS; 2399 2400 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); 2401 2402 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 2403 rd_reg_dword(®->hccr); 2404 2405 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 2406 rd_reg_dword(®->hccr); 2407 2408 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 2409 rd_reg_dword(®->hccr); 2410 2411 for (cnt = 30000; cnt && (rd_reg_word(®->mailbox0)); cnt--) 2412 udelay(5); 2413 2414 if (!cnt) { 2415 nxt = fw->code_ram; 2416 nxt += sizeof(fw->code_ram); 2417 nxt += (ha->fw_memory_size - 0x100000 + 1); 2418 goto copy_queue; 2419 } else { 2420 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2421 ql_log(ql_log_warn, vha, 0xd010, 2422 "bigger hammer success?\n"); 2423 } 2424 } 2425 2426 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 2427 &nxt); 2428 if (rval != QLA_SUCCESS) 2429 goto qla83xx_fw_dump_failed_0; 2430 2431 copy_queue: 2432 nxt = qla2xxx_copy_queues(ha, nxt); 2433 2434 qla24xx_copy_eft(ha, nxt); 2435 2436 /* Chain entries -- started with MQ. */ 2437 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2438 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2439 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 2440 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 2441 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); 2442 if (last_chain) { 2443 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 2444 *last_chain |= htonl(DUMP_CHAIN_LAST); 2445 } 2446 2447 /* Adjust valid length. */ 2448 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 2449 2450 qla83xx_fw_dump_failed_0: 2451 qla2xxx_dump_post_process(base_vha, rval); 2452 } 2453 2454 /****************************************************************************/ 2455 /* Driver Debug Functions. */ 2456 /****************************************************************************/ 2457 2458 /* Write the debug message prefix into @pbuf. */ 2459 static void ql_dbg_prefix(char *pbuf, int pbuf_size, 2460 const scsi_qla_host_t *vha, uint msg_id) 2461 { 2462 if (vha) { 2463 const struct pci_dev *pdev = vha->hw->pdev; 2464 2465 /* <module-name> [<dev-name>]-<msg-id>:<host>: */ 2466 snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR, 2467 dev_name(&(pdev->dev)), msg_id, vha->host_no); 2468 } else { 2469 /* <module-name> [<dev-name>]-<msg-id>: : */ 2470 snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, 2471 "0000:00:00.0", msg_id); 2472 } 2473 } 2474 2475 /* 2476 * This function is for formatting and logging debug information. 2477 * It is to be used when vha is available. It formats the message 2478 * and logs it to the messages file. 2479 * parameters: 2480 * level: The level of the debug messages to be printed. 2481 * If ql2xextended_error_logging value is correctly set, 2482 * this message will appear in the messages file. 2483 * vha: Pointer to the scsi_qla_host_t. 2484 * id: This is a unique identifier for the level. It identifies the 2485 * part of the code from where the message originated. 2486 * msg: The message to be displayed. 2487 */ 2488 void 2489 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) 2490 { 2491 va_list va; 2492 struct va_format vaf; 2493 char pbuf[64]; 2494 2495 va_start(va, fmt); 2496 2497 vaf.fmt = fmt; 2498 vaf.va = &va; 2499 2500 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); 2501 2502 if (!ql_mask_match(level)) 2503 trace_ql_dbg_log(pbuf, &vaf); 2504 else 2505 pr_warn("%s%pV", pbuf, &vaf); 2506 2507 va_end(va); 2508 2509 } 2510 2511 /* 2512 * This function is for formatting and logging debug information. 2513 * It is to be used when vha is not available and pci is available, 2514 * i.e., before host allocation. It formats the message and logs it 2515 * to the messages file. 2516 * parameters: 2517 * level: The level of the debug messages to be printed. 2518 * If ql2xextended_error_logging value is correctly set, 2519 * this message will appear in the messages file. 2520 * pdev: Pointer to the struct pci_dev. 2521 * id: This is a unique id for the level. It identifies the part 2522 * of the code from where the message originated. 2523 * msg: The message to be displayed. 2524 */ 2525 void 2526 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) 2527 { 2528 va_list va; 2529 struct va_format vaf; 2530 char pbuf[128]; 2531 2532 if (pdev == NULL) 2533 return; 2534 if (!ql_mask_match(level)) 2535 return; 2536 2537 va_start(va, fmt); 2538 2539 vaf.fmt = fmt; 2540 vaf.va = &va; 2541 2542 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset); 2543 pr_warn("%s%pV", pbuf, &vaf); 2544 2545 va_end(va); 2546 } 2547 2548 /* 2549 * This function is for formatting and logging log messages. 2550 * It is to be used when vha is available. It formats the message 2551 * and logs it to the messages file. All the messages will be logged 2552 * irrespective of value of ql2xextended_error_logging. 2553 * parameters: 2554 * level: The level of the log messages to be printed in the 2555 * messages file. 2556 * vha: Pointer to the scsi_qla_host_t 2557 * id: This is a unique id for the level. It identifies the 2558 * part of the code from where the message originated. 2559 * msg: The message to be displayed. 2560 */ 2561 void 2562 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) 2563 { 2564 va_list va; 2565 struct va_format vaf; 2566 char pbuf[128]; 2567 2568 if (level > ql_errlev) 2569 return; 2570 2571 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); 2572 2573 va_start(va, fmt); 2574 2575 vaf.fmt = fmt; 2576 vaf.va = &va; 2577 2578 switch (level) { 2579 case ql_log_fatal: /* FATAL LOG */ 2580 pr_crit("%s%pV", pbuf, &vaf); 2581 break; 2582 case ql_log_warn: 2583 pr_err("%s%pV", pbuf, &vaf); 2584 break; 2585 case ql_log_info: 2586 pr_warn("%s%pV", pbuf, &vaf); 2587 break; 2588 default: 2589 pr_info("%s%pV", pbuf, &vaf); 2590 break; 2591 } 2592 2593 va_end(va); 2594 } 2595 2596 /* 2597 * This function is for formatting and logging log messages. 2598 * It is to be used when vha is not available and pci is available, 2599 * i.e., before host allocation. It formats the message and logs 2600 * it to the messages file. All the messages are logged irrespective 2601 * of the value of ql2xextended_error_logging. 2602 * parameters: 2603 * level: The level of the log messages to be printed in the 2604 * messages file. 2605 * pdev: Pointer to the struct pci_dev. 2606 * id: This is a unique id for the level. It identifies the 2607 * part of the code from where the message originated. 2608 * msg: The message to be displayed. 2609 */ 2610 void 2611 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) 2612 { 2613 va_list va; 2614 struct va_format vaf; 2615 char pbuf[128]; 2616 2617 if (pdev == NULL) 2618 return; 2619 if (level > ql_errlev) 2620 return; 2621 2622 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id); 2623 2624 va_start(va, fmt); 2625 2626 vaf.fmt = fmt; 2627 vaf.va = &va; 2628 2629 switch (level) { 2630 case ql_log_fatal: /* FATAL LOG */ 2631 pr_crit("%s%pV", pbuf, &vaf); 2632 break; 2633 case ql_log_warn: 2634 pr_err("%s%pV", pbuf, &vaf); 2635 break; 2636 case ql_log_info: 2637 pr_warn("%s%pV", pbuf, &vaf); 2638 break; 2639 default: 2640 pr_info("%s%pV", pbuf, &vaf); 2641 break; 2642 } 2643 2644 va_end(va); 2645 } 2646 2647 void 2648 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) 2649 { 2650 int i; 2651 struct qla_hw_data *ha = vha->hw; 2652 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2653 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2654 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 2655 __le16 __iomem *mbx_reg; 2656 2657 if (!ql_mask_match(level)) 2658 return; 2659 2660 if (IS_P3P_TYPE(ha)) 2661 mbx_reg = ®82->mailbox_in[0]; 2662 else if (IS_FWI2_CAPABLE(ha)) 2663 mbx_reg = ®24->mailbox0; 2664 else 2665 mbx_reg = MAILBOX_REG(ha, reg, 0); 2666 2667 ql_dbg(level, vha, id, "Mailbox registers:\n"); 2668 for (i = 0; i < 6; i++, mbx_reg++) 2669 ql_dbg(level, vha, id, 2670 "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); 2671 } 2672 2673 void 2674 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, 2675 uint size) 2676 { 2677 uint cnt; 2678 2679 if (!ql_mask_match(level)) 2680 return; 2681 2682 ql_dbg(level, vha, id, 2683 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); 2684 ql_dbg(level, vha, id, 2685 "----- -----------------------------------------------\n"); 2686 for (cnt = 0; cnt < size; cnt += 16) { 2687 ql_dbg(level, vha, id, "%04x: ", cnt); 2688 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, 2689 buf + cnt, min(16U, size - cnt), false); 2690 } 2691 } 2692 2693 /* 2694 * This function is for formatting and logging log messages. 2695 * It is to be used when vha is available. It formats the message 2696 * and logs it to the messages file. All the messages will be logged 2697 * irrespective of value of ql2xextended_error_logging. 2698 * parameters: 2699 * level: The level of the log messages to be printed in the 2700 * messages file. 2701 * vha: Pointer to the scsi_qla_host_t 2702 * id: This is a unique id for the level. It identifies the 2703 * part of the code from where the message originated. 2704 * msg: The message to be displayed. 2705 */ 2706 void 2707 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, 2708 const char *fmt, ...) 2709 { 2710 va_list va; 2711 struct va_format vaf; 2712 char pbuf[128]; 2713 2714 if (level > ql_errlev) 2715 return; 2716 2717 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id); 2718 2719 va_start(va, fmt); 2720 2721 vaf.fmt = fmt; 2722 vaf.va = &va; 2723 2724 switch (level) { 2725 case ql_log_fatal: /* FATAL LOG */ 2726 pr_crit("%s%pV", pbuf, &vaf); 2727 break; 2728 case ql_log_warn: 2729 pr_err("%s%pV", pbuf, &vaf); 2730 break; 2731 case ql_log_info: 2732 pr_warn("%s%pV", pbuf, &vaf); 2733 break; 2734 default: 2735 pr_info("%s%pV", pbuf, &vaf); 2736 break; 2737 } 2738 2739 va_end(va); 2740 } 2741 2742 /* 2743 * This function is for formatting and logging debug information. 2744 * It is to be used when vha is available. It formats the message 2745 * and logs it to the messages file. 2746 * parameters: 2747 * level: The level of the debug messages to be printed. 2748 * If ql2xextended_error_logging value is correctly set, 2749 * this message will appear in the messages file. 2750 * vha: Pointer to the scsi_qla_host_t. 2751 * id: This is a unique identifier for the level. It identifies the 2752 * part of the code from where the message originated. 2753 * msg: The message to be displayed. 2754 */ 2755 void 2756 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, 2757 const char *fmt, ...) 2758 { 2759 va_list va; 2760 struct va_format vaf; 2761 char pbuf[128]; 2762 2763 if (!ql_mask_match(level)) 2764 return; 2765 2766 va_start(va, fmt); 2767 2768 vaf.fmt = fmt; 2769 vaf.va = &va; 2770 2771 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, 2772 id + ql_dbg_offset); 2773 pr_warn("%s%pV", pbuf, &vaf); 2774 2775 va_end(va); 2776 2777 } 2778