1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 7 /* 8 * Table for showing the current message id in use for particular level 9 * Change this table for addition of log/debug messages. 10 * ---------------------------------------------------------------------- 11 * | Level | Last Value Used | Holes | 12 * ---------------------------------------------------------------------- 13 * | Module Init and Probe | 0x0199 | | 14 * | Mailbox commands | 0x1206 | 0x11a5-0x11ff | 15 * | Device Discovery | 0x2134 | 0x210e-0x2116 | 16 * | | | 0x211a | 17 * | | | 0x211c-0x2128 | 18 * | | | 0x212c-0x2134 | 19 * | Queue Command and IO tracing | 0x3074 | 0x300b | 20 * | | | 0x3027-0x3028 | 21 * | | | 0x303d-0x3041 | 22 * | | | 0x302d,0x3033 | 23 * | | | 0x3036,0x3038 | 24 * | | | 0x303a | 25 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 26 * | Async Events | 0x509c | | 27 * | Timer Routines | 0x6012 | | 28 * | User Space Interactions | 0x70e3 | 0x7018,0x702e | 29 * | | | 0x7020,0x7024 | 30 * | | | 0x7039,0x7045 | 31 * | | | 0x7073-0x7075 | 32 * | | | 0x70a5-0x70a6 | 33 * | | | 0x70a8,0x70ab | 34 * | | | 0x70ad-0x70ae | 35 * | | | 0x70d0-0x70d6 | 36 * | | | 0x70d7-0x70db | 37 * | Task Management | 0x8042 | 0x8000 | 38 * | | | 0x8019 | 39 * | | | 0x8025,0x8026 | 40 * | | | 0x8031,0x8032 | 41 * | | | 0x8039,0x803c | 42 * | AER/EEH | 0x9011 | | 43 * | Virtual Port | 0xa007 | | 44 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | 45 * | | | 0xb09e,0xb0ae | 46 * | | | 0xb0c3,0xb0c6 | 47 * | | | 0xb0e0-0xb0ef | 48 * | | | 0xb085,0xb0dc | 49 * | | | 0xb107,0xb108 | 50 * | | | 0xb111,0xb11e | 51 * | | | 0xb12c,0xb12d | 52 * | | | 0xb13a,0xb142 | 53 * | | | 0xb13c-0xb140 | 54 * | | | 0xb149 | 55 * | MultiQ | 0xc010 | | 56 * | Misc | 0xd303 | 0xd031-0xd0ff | 57 * | | | 0xd101-0xd1fe | 58 * | | | 0xd214-0xd2fe | 59 * | Target Mode | 0xe081 | | 60 * | Target Mode Management | 0xf09b | 0xf002 | 61 * | | | 0xf046-0xf049 | 62 * | Target Mode Task Management | 0x1000d | | 63 * ---------------------------------------------------------------------- 64 */ 65 66 #include "qla_def.h" 67 68 #include <linux/delay.h> 69 #define CREATE_TRACE_POINTS 70 #include <trace/events/qla.h> 71 72 static uint32_t ql_dbg_offset = 0x800; 73 74 static inline void 75 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 76 { 77 fw_dump->fw_major_version = htonl(ha->fw_major_version); 78 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 79 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); 80 fw_dump->fw_attributes = htonl(ha->fw_attributes); 81 82 fw_dump->vendor = htonl(ha->pdev->vendor); 83 fw_dump->device = htonl(ha->pdev->device); 84 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); 85 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); 86 } 87 88 static inline void * 89 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) 90 { 91 struct req_que *req = ha->req_q_map[0]; 92 struct rsp_que *rsp = ha->rsp_q_map[0]; 93 /* Request queue. */ 94 memcpy(ptr, req->ring, req->length * 95 sizeof(request_t)); 96 97 /* Response queue. */ 98 ptr += req->length * sizeof(request_t); 99 memcpy(ptr, rsp->ring, rsp->length * 100 sizeof(response_t)); 101 102 return ptr + (rsp->length * sizeof(response_t)); 103 } 104 105 int 106 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 107 uint32_t ram_dwords, void **nxt) 108 { 109 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 110 dma_addr_t dump_dma = ha->gid_list_dma; 111 uint32_t *chunk = (uint32_t *)ha->gid_list; 112 uint32_t dwords = qla2x00_gid_list_size(ha) / 4; 113 uint32_t stat; 114 ulong i, j, timer = 6000000; 115 int rval = QLA_FUNCTION_FAILED; 116 117 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 118 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { 119 if (i + dwords > ram_dwords) 120 dwords = ram_dwords - i; 121 122 wrt_reg_word(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); 123 wrt_reg_word(®->mailbox1, LSW(addr)); 124 wrt_reg_word(®->mailbox8, MSW(addr)); 125 126 wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); 127 wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); 128 wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); 129 wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); 130 131 wrt_reg_word(®->mailbox4, MSW(dwords)); 132 wrt_reg_word(®->mailbox5, LSW(dwords)); 133 134 wrt_reg_word(®->mailbox9, 0); 135 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 136 137 ha->flags.mbox_int = 0; 138 while (timer--) { 139 udelay(5); 140 141 stat = rd_reg_dword(®->host_status); 142 /* Check for pending interrupts. */ 143 if (!(stat & HSRX_RISC_INT)) 144 continue; 145 146 stat &= 0xff; 147 if (stat != 0x1 && stat != 0x2 && 148 stat != 0x10 && stat != 0x11) { 149 150 /* Clear this intr; it wasn't a mailbox intr */ 151 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 152 rd_reg_dword(®->hccr); 153 continue; 154 } 155 156 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 157 rval = rd_reg_word(®->mailbox0) & MBS_MASK; 158 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 159 rd_reg_dword(®->hccr); 160 break; 161 } 162 ha->flags.mbox_int = 1; 163 *nxt = ram + i; 164 165 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 166 /* no interrupt, timed out*/ 167 return rval; 168 } 169 if (rval) { 170 /* error completion status */ 171 return rval; 172 } 173 for (j = 0; j < dwords; j++) { 174 ram[i + j] = 175 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? 176 chunk[j] : swab32(chunk[j]); 177 } 178 } 179 180 *nxt = ram + i; 181 return QLA_SUCCESS; 182 } 183 184 int 185 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, 186 uint32_t ram_dwords, void **nxt) 187 { 188 int rval = QLA_FUNCTION_FAILED; 189 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 190 dma_addr_t dump_dma = ha->gid_list_dma; 191 uint32_t *chunk = (uint32_t *)ha->gid_list; 192 uint32_t dwords = qla2x00_gid_list_size(ha) / 4; 193 uint32_t stat; 194 ulong i, j, timer = 6000000; 195 196 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 197 198 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { 199 if (i + dwords > ram_dwords) 200 dwords = ram_dwords - i; 201 202 wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 203 wrt_reg_word(®->mailbox1, LSW(addr)); 204 wrt_reg_word(®->mailbox8, MSW(addr)); 205 wrt_reg_word(®->mailbox10, 0); 206 207 wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); 208 wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); 209 wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); 210 wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); 211 212 wrt_reg_word(®->mailbox4, MSW(dwords)); 213 wrt_reg_word(®->mailbox5, LSW(dwords)); 214 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 215 216 ha->flags.mbox_int = 0; 217 while (timer--) { 218 udelay(5); 219 stat = rd_reg_dword(®->host_status); 220 221 /* Check for pending interrupts. */ 222 if (!(stat & HSRX_RISC_INT)) 223 continue; 224 225 stat &= 0xff; 226 if (stat != 0x1 && stat != 0x2 && 227 stat != 0x10 && stat != 0x11) { 228 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 229 rd_reg_dword(®->hccr); 230 continue; 231 } 232 233 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 234 rval = rd_reg_word(®->mailbox0) & MBS_MASK; 235 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 236 rd_reg_dword(®->hccr); 237 break; 238 } 239 ha->flags.mbox_int = 1; 240 *nxt = ram + i; 241 242 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 243 /* no interrupt, timed out*/ 244 return rval; 245 } 246 if (rval) { 247 /* error completion status */ 248 return rval; 249 } 250 for (j = 0; j < dwords; j++) { 251 ram[i + j] = (__force __be32) 252 ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? 253 chunk[j] : swab32(chunk[j])); 254 } 255 } 256 257 *nxt = ram + i; 258 return QLA_SUCCESS; 259 } 260 261 static int 262 qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, 263 uint32_t cram_size, void **nxt) 264 { 265 int rval; 266 267 /* Code RAM. */ 268 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); 269 if (rval != QLA_SUCCESS) 270 return rval; 271 272 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); 273 274 /* External Memory. */ 275 rval = qla24xx_dump_ram(ha, 0x100000, *nxt, 276 ha->fw_memory_size - 0x100000 + 1, nxt); 277 if (rval == QLA_SUCCESS) 278 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); 279 280 return rval; 281 } 282 283 static __be32 * 284 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 285 uint32_t count, __be32 *buf) 286 { 287 __le32 __iomem *dmp_reg; 288 289 wrt_reg_dword(®->iobase_addr, iobase); 290 dmp_reg = ®->iobase_window; 291 for ( ; count--; dmp_reg++) 292 *buf++ = htonl(rd_reg_dword(dmp_reg)); 293 294 return buf; 295 } 296 297 void 298 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) 299 { 300 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_PAUSE); 301 302 /* 100 usec delay is sufficient enough for hardware to pause RISC */ 303 udelay(100); 304 if (rd_reg_dword(®->host_status) & HSRX_RISC_PAUSED) 305 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); 306 } 307 308 int 309 qla24xx_soft_reset(struct qla_hw_data *ha) 310 { 311 int rval = QLA_SUCCESS; 312 uint32_t cnt; 313 uint16_t wd; 314 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 315 316 /* 317 * Reset RISC. The delay is dependent on system architecture. 318 * Driver can proceed with the reset sequence after waiting 319 * for a timeout period. 320 */ 321 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 322 for (cnt = 0; cnt < 30000; cnt++) { 323 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 324 break; 325 326 udelay(10); 327 } 328 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) 329 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 330 331 wrt_reg_dword(®->ctrl_status, 332 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 333 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 334 335 udelay(100); 336 337 /* Wait for soft-reset to complete. */ 338 for (cnt = 0; cnt < 30000; cnt++) { 339 if ((rd_reg_dword(®->ctrl_status) & 340 CSRX_ISP_SOFT_RESET) == 0) 341 break; 342 343 udelay(10); 344 } 345 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 346 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); 347 348 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 349 rd_reg_dword(®->hccr); /* PCI Posting. */ 350 351 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && 352 rval == QLA_SUCCESS; cnt--) { 353 if (cnt) 354 udelay(10); 355 else 356 rval = QLA_FUNCTION_TIMEOUT; 357 } 358 if (rval == QLA_SUCCESS) 359 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 360 361 return rval; 362 } 363 364 static int 365 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, 366 uint32_t ram_words, void **nxt) 367 { 368 int rval; 369 uint32_t cnt, stat, timer, words, idx; 370 uint16_t mb0; 371 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 372 dma_addr_t dump_dma = ha->gid_list_dma; 373 __le16 *dump = (__force __le16 *)ha->gid_list; 374 375 rval = QLA_SUCCESS; 376 mb0 = 0; 377 378 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 379 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 380 381 words = qla2x00_gid_list_size(ha) / 2; 382 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 383 cnt += words, addr += words) { 384 if (cnt + words > ram_words) 385 words = ram_words - cnt; 386 387 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); 388 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); 389 390 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); 391 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); 392 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); 393 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); 394 395 WRT_MAILBOX_REG(ha, reg, 4, words); 396 wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); 397 398 for (timer = 6000000; timer; timer--) { 399 /* Check for pending interrupts. */ 400 stat = rd_reg_dword(®->u.isp2300.host_status); 401 if (stat & HSR_RISC_INT) { 402 stat &= 0xff; 403 404 if (stat == 0x1 || stat == 0x2) { 405 set_bit(MBX_INTERRUPT, 406 &ha->mbx_cmd_flags); 407 408 mb0 = RD_MAILBOX_REG(ha, reg, 0); 409 410 /* Release mailbox registers. */ 411 wrt_reg_word(®->semaphore, 0); 412 wrt_reg_word(®->hccr, 413 HCCR_CLR_RISC_INT); 414 rd_reg_word(®->hccr); 415 break; 416 } else if (stat == 0x10 || stat == 0x11) { 417 set_bit(MBX_INTERRUPT, 418 &ha->mbx_cmd_flags); 419 420 mb0 = RD_MAILBOX_REG(ha, reg, 0); 421 422 wrt_reg_word(®->hccr, 423 HCCR_CLR_RISC_INT); 424 rd_reg_word(®->hccr); 425 break; 426 } 427 428 /* clear this intr; it wasn't a mailbox intr */ 429 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 430 rd_reg_word(®->hccr); 431 } 432 udelay(5); 433 } 434 435 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 436 rval = mb0 & MBS_MASK; 437 for (idx = 0; idx < words; idx++) 438 ram[cnt + idx] = 439 cpu_to_be16(le16_to_cpu(dump[idx])); 440 } else { 441 rval = QLA_FUNCTION_FAILED; 442 } 443 } 444 445 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; 446 return rval; 447 } 448 449 static inline void 450 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 451 __be16 *buf) 452 { 453 __le16 __iomem *dmp_reg = ®->u.isp2300.fb_cmd; 454 455 for ( ; count--; dmp_reg++) 456 *buf++ = htons(rd_reg_word(dmp_reg)); 457 } 458 459 static inline void * 460 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) 461 { 462 if (!ha->eft) 463 return ptr; 464 465 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); 466 return ptr + ntohl(ha->fw_dump->eft_size); 467 } 468 469 static inline void * 470 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 471 { 472 uint32_t cnt; 473 __be32 *iter_reg; 474 struct qla2xxx_fce_chain *fcec = ptr; 475 476 if (!ha->fce) 477 return ptr; 478 479 *last_chain = &fcec->type; 480 fcec->type = htonl(DUMP_CHAIN_FCE); 481 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 482 fce_calc_size(ha->fce_bufs)); 483 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 484 fcec->addr_l = htonl(LSD(ha->fce_dma)); 485 fcec->addr_h = htonl(MSD(ha->fce_dma)); 486 487 iter_reg = fcec->eregs; 488 for (cnt = 0; cnt < 8; cnt++) 489 *iter_reg++ = htonl(ha->fce_mb[cnt]); 490 491 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 492 493 return (char *)iter_reg + ntohl(fcec->size); 494 } 495 496 static inline void * 497 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 498 { 499 struct qla2xxx_offld_chain *c = ptr; 500 501 if (!ha->exlogin_buf) 502 return ptr; 503 504 *last_chain = &c->type; 505 506 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); 507 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + 508 ha->exlogin_size); 509 c->size = cpu_to_be32(ha->exlogin_size); 510 c->addr = cpu_to_be64(ha->exlogin_buf_dma); 511 512 ptr += sizeof(struct qla2xxx_offld_chain); 513 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); 514 515 return (char *)ptr + be32_to_cpu(c->size); 516 } 517 518 static inline void * 519 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 520 { 521 struct qla2xxx_offld_chain *c = ptr; 522 523 if (!ha->exchoffld_buf) 524 return ptr; 525 526 *last_chain = &c->type; 527 528 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); 529 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + 530 ha->exchoffld_size); 531 c->size = cpu_to_be32(ha->exchoffld_size); 532 c->addr = cpu_to_be64(ha->exchoffld_buf_dma); 533 534 ptr += sizeof(struct qla2xxx_offld_chain); 535 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); 536 537 return (char *)ptr + be32_to_cpu(c->size); 538 } 539 540 static inline void * 541 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, 542 __be32 **last_chain) 543 { 544 struct qla2xxx_mqueue_chain *q; 545 struct qla2xxx_mqueue_header *qh; 546 uint32_t num_queues; 547 int que; 548 struct { 549 int length; 550 void *ring; 551 } aq, *aqp; 552 553 if (!ha->tgt.atio_ring) 554 return ptr; 555 556 num_queues = 1; 557 aqp = &aq; 558 aqp->length = ha->tgt.atio_q_length; 559 aqp->ring = ha->tgt.atio_ring; 560 561 for (que = 0; que < num_queues; que++) { 562 /* aqp = ha->atio_q_map[que]; */ 563 q = ptr; 564 *last_chain = &q->type; 565 q->type = htonl(DUMP_CHAIN_QUEUE); 566 q->chain_size = htonl( 567 sizeof(struct qla2xxx_mqueue_chain) + 568 sizeof(struct qla2xxx_mqueue_header) + 569 (aqp->length * sizeof(request_t))); 570 ptr += sizeof(struct qla2xxx_mqueue_chain); 571 572 /* Add header. */ 573 qh = ptr; 574 qh->queue = htonl(TYPE_ATIO_QUEUE); 575 qh->number = htonl(que); 576 qh->size = htonl(aqp->length * sizeof(request_t)); 577 ptr += sizeof(struct qla2xxx_mqueue_header); 578 579 /* Add data. */ 580 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); 581 582 ptr += aqp->length * sizeof(request_t); 583 } 584 585 return ptr; 586 } 587 588 static inline void * 589 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 590 { 591 struct qla2xxx_mqueue_chain *q; 592 struct qla2xxx_mqueue_header *qh; 593 struct req_que *req; 594 struct rsp_que *rsp; 595 int que; 596 597 if (!ha->mqenable) 598 return ptr; 599 600 /* Request queues */ 601 for (que = 1; que < ha->max_req_queues; que++) { 602 req = ha->req_q_map[que]; 603 if (!req) 604 break; 605 606 /* Add chain. */ 607 q = ptr; 608 *last_chain = &q->type; 609 q->type = htonl(DUMP_CHAIN_QUEUE); 610 q->chain_size = htonl( 611 sizeof(struct qla2xxx_mqueue_chain) + 612 sizeof(struct qla2xxx_mqueue_header) + 613 (req->length * sizeof(request_t))); 614 ptr += sizeof(struct qla2xxx_mqueue_chain); 615 616 /* Add header. */ 617 qh = ptr; 618 qh->queue = htonl(TYPE_REQUEST_QUEUE); 619 qh->number = htonl(que); 620 qh->size = htonl(req->length * sizeof(request_t)); 621 ptr += sizeof(struct qla2xxx_mqueue_header); 622 623 /* Add data. */ 624 memcpy(ptr, req->ring, req->length * sizeof(request_t)); 625 ptr += req->length * sizeof(request_t); 626 } 627 628 /* Response queues */ 629 for (que = 1; que < ha->max_rsp_queues; que++) { 630 rsp = ha->rsp_q_map[que]; 631 if (!rsp) 632 break; 633 634 /* Add chain. */ 635 q = ptr; 636 *last_chain = &q->type; 637 q->type = htonl(DUMP_CHAIN_QUEUE); 638 q->chain_size = htonl( 639 sizeof(struct qla2xxx_mqueue_chain) + 640 sizeof(struct qla2xxx_mqueue_header) + 641 (rsp->length * sizeof(response_t))); 642 ptr += sizeof(struct qla2xxx_mqueue_chain); 643 644 /* Add header. */ 645 qh = ptr; 646 qh->queue = htonl(TYPE_RESPONSE_QUEUE); 647 qh->number = htonl(que); 648 qh->size = htonl(rsp->length * sizeof(response_t)); 649 ptr += sizeof(struct qla2xxx_mqueue_header); 650 651 /* Add data. */ 652 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); 653 ptr += rsp->length * sizeof(response_t); 654 } 655 656 return ptr; 657 } 658 659 static inline void * 660 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) 661 { 662 uint32_t cnt, que_idx; 663 uint8_t que_cnt; 664 struct qla2xxx_mq_chain *mq = ptr; 665 device_reg_t *reg; 666 667 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 668 IS_QLA28XX(ha)) 669 return ptr; 670 671 mq = ptr; 672 *last_chain = &mq->type; 673 mq->type = htonl(DUMP_CHAIN_MQ); 674 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); 675 676 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 677 ha->max_req_queues : ha->max_rsp_queues; 678 mq->count = htonl(que_cnt); 679 for (cnt = 0; cnt < que_cnt; cnt++) { 680 reg = ISP_QUE_REG(ha, cnt); 681 que_idx = cnt * 4; 682 mq->qregs[que_idx] = 683 htonl(rd_reg_dword(®->isp25mq.req_q_in)); 684 mq->qregs[que_idx+1] = 685 htonl(rd_reg_dword(®->isp25mq.req_q_out)); 686 mq->qregs[que_idx+2] = 687 htonl(rd_reg_dword(®->isp25mq.rsp_q_in)); 688 mq->qregs[que_idx+3] = 689 htonl(rd_reg_dword(®->isp25mq.rsp_q_out)); 690 } 691 692 return ptr + sizeof(struct qla2xxx_mq_chain); 693 } 694 695 void 696 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 697 { 698 struct qla_hw_data *ha = vha->hw; 699 700 if (rval != QLA_SUCCESS) { 701 ql_log(ql_log_warn, vha, 0xd000, 702 "Failed to dump firmware (%x), dump status flags (0x%lx).\n", 703 rval, ha->fw_dump_cap_flags); 704 ha->fw_dumped = false; 705 } else { 706 ql_log(ql_log_info, vha, 0xd001, 707 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", 708 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); 709 ha->fw_dumped = true; 710 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 711 } 712 } 713 714 void qla2xxx_dump_fw(scsi_qla_host_t *vha) 715 { 716 unsigned long flags; 717 718 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 719 vha->hw->isp_ops->fw_dump(vha); 720 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 721 } 722 723 /** 724 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 725 * @vha: HA context 726 */ 727 void 728 qla2300_fw_dump(scsi_qla_host_t *vha) 729 { 730 int rval; 731 uint32_t cnt; 732 struct qla_hw_data *ha = vha->hw; 733 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 734 __le16 __iomem *dmp_reg; 735 struct qla2300_fw_dump *fw; 736 void *nxt; 737 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 738 739 lockdep_assert_held(&ha->hardware_lock); 740 741 if (!ha->fw_dump) { 742 ql_log(ql_log_warn, vha, 0xd002, 743 "No buffer available for dump.\n"); 744 return; 745 } 746 747 if (ha->fw_dumped) { 748 ql_log(ql_log_warn, vha, 0xd003, 749 "Firmware has been previously dumped (%p) " 750 "-- ignoring request.\n", 751 ha->fw_dump); 752 return; 753 } 754 fw = &ha->fw_dump->isp.isp23; 755 qla2xxx_prep_dump(ha, ha->fw_dump); 756 757 rval = QLA_SUCCESS; 758 fw->hccr = htons(rd_reg_word(®->hccr)); 759 760 /* Pause RISC. */ 761 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 762 if (IS_QLA2300(ha)) { 763 for (cnt = 30000; 764 (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && 765 rval == QLA_SUCCESS; cnt--) { 766 if (cnt) 767 udelay(100); 768 else 769 rval = QLA_FUNCTION_TIMEOUT; 770 } 771 } else { 772 rd_reg_word(®->hccr); /* PCI Posting. */ 773 udelay(10); 774 } 775 776 if (rval == QLA_SUCCESS) { 777 dmp_reg = ®->flash_address; 778 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) 779 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); 780 781 dmp_reg = ®->u.isp2300.req_q_in; 782 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); 783 cnt++, dmp_reg++) 784 fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg)); 785 786 dmp_reg = ®->u.isp2300.mailbox0; 787 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); 788 cnt++, dmp_reg++) 789 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); 790 791 wrt_reg_word(®->ctrl_status, 0x40); 792 qla2xxx_read_window(reg, 32, fw->resp_dma_reg); 793 794 wrt_reg_word(®->ctrl_status, 0x50); 795 qla2xxx_read_window(reg, 48, fw->dma_reg); 796 797 wrt_reg_word(®->ctrl_status, 0x00); 798 dmp_reg = ®->risc_hw; 799 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); 800 cnt++, dmp_reg++) 801 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); 802 803 wrt_reg_word(®->pcr, 0x2000); 804 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 805 806 wrt_reg_word(®->pcr, 0x2200); 807 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 808 809 wrt_reg_word(®->pcr, 0x2400); 810 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 811 812 wrt_reg_word(®->pcr, 0x2600); 813 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 814 815 wrt_reg_word(®->pcr, 0x2800); 816 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 817 818 wrt_reg_word(®->pcr, 0x2A00); 819 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 820 821 wrt_reg_word(®->pcr, 0x2C00); 822 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 823 824 wrt_reg_word(®->pcr, 0x2E00); 825 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 826 827 wrt_reg_word(®->ctrl_status, 0x10); 828 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); 829 830 wrt_reg_word(®->ctrl_status, 0x20); 831 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 832 833 wrt_reg_word(®->ctrl_status, 0x30); 834 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 835 836 /* Reset RISC. */ 837 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 838 for (cnt = 0; cnt < 30000; cnt++) { 839 if ((rd_reg_word(®->ctrl_status) & 840 CSR_ISP_SOFT_RESET) == 0) 841 break; 842 843 udelay(10); 844 } 845 } 846 847 if (!IS_QLA2300(ha)) { 848 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 849 rval == QLA_SUCCESS; cnt--) { 850 if (cnt) 851 udelay(100); 852 else 853 rval = QLA_FUNCTION_TIMEOUT; 854 } 855 } 856 857 /* Get RISC SRAM. */ 858 if (rval == QLA_SUCCESS) 859 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, 860 ARRAY_SIZE(fw->risc_ram), &nxt); 861 862 /* Get stack SRAM. */ 863 if (rval == QLA_SUCCESS) 864 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, 865 ARRAY_SIZE(fw->stack_ram), &nxt); 866 867 /* Get data SRAM. */ 868 if (rval == QLA_SUCCESS) 869 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, 870 ha->fw_memory_size - 0x11000 + 1, &nxt); 871 872 if (rval == QLA_SUCCESS) 873 qla2xxx_copy_queues(ha, nxt); 874 875 qla2xxx_dump_post_process(base_vha, rval); 876 } 877 878 /** 879 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 880 * @vha: HA context 881 */ 882 void 883 qla2100_fw_dump(scsi_qla_host_t *vha) 884 { 885 int rval; 886 uint32_t cnt, timer; 887 uint16_t risc_address = 0; 888 uint16_t mb0 = 0, mb2 = 0; 889 struct qla_hw_data *ha = vha->hw; 890 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 891 __le16 __iomem *dmp_reg; 892 struct qla2100_fw_dump *fw; 893 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 894 895 lockdep_assert_held(&ha->hardware_lock); 896 897 if (!ha->fw_dump) { 898 ql_log(ql_log_warn, vha, 0xd004, 899 "No buffer available for dump.\n"); 900 return; 901 } 902 903 if (ha->fw_dumped) { 904 ql_log(ql_log_warn, vha, 0xd005, 905 "Firmware has been previously dumped (%p) " 906 "-- ignoring request.\n", 907 ha->fw_dump); 908 return; 909 } 910 fw = &ha->fw_dump->isp.isp21; 911 qla2xxx_prep_dump(ha, ha->fw_dump); 912 913 rval = QLA_SUCCESS; 914 fw->hccr = htons(rd_reg_word(®->hccr)); 915 916 /* Pause RISC. */ 917 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 918 for (cnt = 30000; (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && 919 rval == QLA_SUCCESS; cnt--) { 920 if (cnt) 921 udelay(100); 922 else 923 rval = QLA_FUNCTION_TIMEOUT; 924 } 925 if (rval == QLA_SUCCESS) { 926 dmp_reg = ®->flash_address; 927 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) 928 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); 929 930 dmp_reg = ®->u.isp2100.mailbox0; 931 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { 932 if (cnt == 8) 933 dmp_reg = ®->u_end.isp2200.mailbox8; 934 935 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); 936 } 937 938 dmp_reg = ®->u.isp2100.unused_2[0]; 939 for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) 940 fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg)); 941 942 wrt_reg_word(®->ctrl_status, 0x00); 943 dmp_reg = ®->risc_hw; 944 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) 945 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); 946 947 wrt_reg_word(®->pcr, 0x2000); 948 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 949 950 wrt_reg_word(®->pcr, 0x2100); 951 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 952 953 wrt_reg_word(®->pcr, 0x2200); 954 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 955 956 wrt_reg_word(®->pcr, 0x2300); 957 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 958 959 wrt_reg_word(®->pcr, 0x2400); 960 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 961 962 wrt_reg_word(®->pcr, 0x2500); 963 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 964 965 wrt_reg_word(®->pcr, 0x2600); 966 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 967 968 wrt_reg_word(®->pcr, 0x2700); 969 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 970 971 wrt_reg_word(®->ctrl_status, 0x10); 972 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); 973 974 wrt_reg_word(®->ctrl_status, 0x20); 975 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 976 977 wrt_reg_word(®->ctrl_status, 0x30); 978 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 979 980 /* Reset the ISP. */ 981 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 982 } 983 984 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 985 rval == QLA_SUCCESS; cnt--) { 986 if (cnt) 987 udelay(100); 988 else 989 rval = QLA_FUNCTION_TIMEOUT; 990 } 991 992 /* Pause RISC. */ 993 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && 994 (rd_reg_word(®->mctr) & (BIT_1 | BIT_0)) != 0))) { 995 996 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 997 for (cnt = 30000; 998 (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && 999 rval == QLA_SUCCESS; cnt--) { 1000 if (cnt) 1001 udelay(100); 1002 else 1003 rval = QLA_FUNCTION_TIMEOUT; 1004 } 1005 if (rval == QLA_SUCCESS) { 1006 /* Set memory configuration and timing. */ 1007 if (IS_QLA2100(ha)) 1008 wrt_reg_word(®->mctr, 0xf1); 1009 else 1010 wrt_reg_word(®->mctr, 0xf2); 1011 rd_reg_word(®->mctr); /* PCI Posting. */ 1012 1013 /* Release RISC. */ 1014 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 1015 } 1016 } 1017 1018 if (rval == QLA_SUCCESS) { 1019 /* Get RISC SRAM. */ 1020 risc_address = 0x1000; 1021 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 1022 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1023 } 1024 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; 1025 cnt++, risc_address++) { 1026 WRT_MAILBOX_REG(ha, reg, 1, risc_address); 1027 wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); 1028 1029 for (timer = 6000000; timer != 0; timer--) { 1030 /* Check for pending interrupts. */ 1031 if (rd_reg_word(®->istatus) & ISR_RISC_INT) { 1032 if (rd_reg_word(®->semaphore) & BIT_0) { 1033 set_bit(MBX_INTERRUPT, 1034 &ha->mbx_cmd_flags); 1035 1036 mb0 = RD_MAILBOX_REG(ha, reg, 0); 1037 mb2 = RD_MAILBOX_REG(ha, reg, 2); 1038 1039 wrt_reg_word(®->semaphore, 0); 1040 wrt_reg_word(®->hccr, 1041 HCCR_CLR_RISC_INT); 1042 rd_reg_word(®->hccr); 1043 break; 1044 } 1045 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 1046 rd_reg_word(®->hccr); 1047 } 1048 udelay(5); 1049 } 1050 1051 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1052 rval = mb0 & MBS_MASK; 1053 fw->risc_ram[cnt] = htons(mb2); 1054 } else { 1055 rval = QLA_FUNCTION_FAILED; 1056 } 1057 } 1058 1059 if (rval == QLA_SUCCESS) 1060 qla2xxx_copy_queues(ha, &fw->queue_dump[0]); 1061 1062 qla2xxx_dump_post_process(base_vha, rval); 1063 } 1064 1065 void 1066 qla24xx_fw_dump(scsi_qla_host_t *vha) 1067 { 1068 int rval; 1069 uint32_t cnt; 1070 struct qla_hw_data *ha = vha->hw; 1071 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1072 __le32 __iomem *dmp_reg; 1073 __be32 *iter_reg; 1074 __le16 __iomem *mbx_reg; 1075 struct qla24xx_fw_dump *fw; 1076 void *nxt; 1077 void *nxt_chain; 1078 __be32 *last_chain = NULL; 1079 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1080 1081 lockdep_assert_held(&ha->hardware_lock); 1082 1083 if (IS_P3P_TYPE(ha)) 1084 return; 1085 1086 ha->fw_dump_cap_flags = 0; 1087 1088 if (!ha->fw_dump) { 1089 ql_log(ql_log_warn, vha, 0xd006, 1090 "No buffer available for dump.\n"); 1091 return; 1092 } 1093 1094 if (ha->fw_dumped) { 1095 ql_log(ql_log_warn, vha, 0xd007, 1096 "Firmware has been previously dumped (%p) " 1097 "-- ignoring request.\n", 1098 ha->fw_dump); 1099 return; 1100 } 1101 QLA_FW_STOPPED(ha); 1102 fw = &ha->fw_dump->isp.isp24; 1103 qla2xxx_prep_dump(ha, ha->fw_dump); 1104 1105 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1106 1107 /* 1108 * Pause RISC. No need to track timeout, as resetting the chip 1109 * is the right approach incase of pause timeout 1110 */ 1111 qla24xx_pause_risc(reg, ha); 1112 1113 /* Host interface registers. */ 1114 dmp_reg = ®->flash_addr; 1115 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 1116 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 1117 1118 /* Disable interrupts. */ 1119 wrt_reg_dword(®->ictrl, 0); 1120 rd_reg_dword(®->ictrl); 1121 1122 /* Shadow registers. */ 1123 wrt_reg_dword(®->iobase_addr, 0x0F70); 1124 rd_reg_dword(®->iobase_addr); 1125 wrt_reg_dword(®->iobase_select, 0xB0000000); 1126 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 1127 1128 wrt_reg_dword(®->iobase_select, 0xB0100000); 1129 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 1130 1131 wrt_reg_dword(®->iobase_select, 0xB0200000); 1132 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 1133 1134 wrt_reg_dword(®->iobase_select, 0xB0300000); 1135 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 1136 1137 wrt_reg_dword(®->iobase_select, 0xB0400000); 1138 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 1139 1140 wrt_reg_dword(®->iobase_select, 0xB0500000); 1141 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 1142 1143 wrt_reg_dword(®->iobase_select, 0xB0600000); 1144 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 1145 1146 /* Mailbox registers. */ 1147 mbx_reg = ®->mailbox0; 1148 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 1149 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 1150 1151 /* Transfer sequence registers. */ 1152 iter_reg = fw->xseq_gp_reg; 1153 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1154 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1155 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1156 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1157 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1158 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1159 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1160 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1161 1162 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); 1163 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1164 1165 /* Receive sequence registers. */ 1166 iter_reg = fw->rseq_gp_reg; 1167 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1168 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1169 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1170 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1171 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1172 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1173 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1174 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1175 1176 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); 1177 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1178 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1179 1180 /* Command DMA registers. */ 1181 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1182 1183 /* Queues. */ 1184 iter_reg = fw->req0_dma_reg; 1185 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1186 dmp_reg = ®->iobase_q; 1187 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1188 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1189 1190 iter_reg = fw->resp0_dma_reg; 1191 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1192 dmp_reg = ®->iobase_q; 1193 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1194 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1195 1196 iter_reg = fw->req1_dma_reg; 1197 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1198 dmp_reg = ®->iobase_q; 1199 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1200 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1201 1202 /* Transmit DMA registers. */ 1203 iter_reg = fw->xmt0_dma_reg; 1204 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1205 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1206 1207 iter_reg = fw->xmt1_dma_reg; 1208 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1209 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1210 1211 iter_reg = fw->xmt2_dma_reg; 1212 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1213 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1214 1215 iter_reg = fw->xmt3_dma_reg; 1216 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1217 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1218 1219 iter_reg = fw->xmt4_dma_reg; 1220 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1221 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1222 1223 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1224 1225 /* Receive DMA registers. */ 1226 iter_reg = fw->rcvt0_data_dma_reg; 1227 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1228 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1229 1230 iter_reg = fw->rcvt1_data_dma_reg; 1231 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1232 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1233 1234 /* RISC registers. */ 1235 iter_reg = fw->risc_gp_reg; 1236 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1237 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1238 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1239 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1240 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1241 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1242 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1243 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1244 1245 /* Local memory controller registers. */ 1246 iter_reg = fw->lmc_reg; 1247 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1248 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1249 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1250 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1251 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1252 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1253 qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1254 1255 /* Fibre Protocol Module registers. */ 1256 iter_reg = fw->fpm_hdw_reg; 1257 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1258 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1259 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1260 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1261 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1262 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1263 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1264 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1265 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1266 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1267 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1268 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1269 1270 /* Frame Buffer registers. */ 1271 iter_reg = fw->fb_hdw_reg; 1272 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1273 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1274 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1275 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1276 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1277 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1278 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1279 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1280 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1281 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1282 qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1283 1284 rval = qla24xx_soft_reset(ha); 1285 if (rval != QLA_SUCCESS) 1286 goto qla24xx_fw_dump_failed_0; 1287 1288 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1289 &nxt); 1290 if (rval != QLA_SUCCESS) 1291 goto qla24xx_fw_dump_failed_0; 1292 1293 nxt = qla2xxx_copy_queues(ha, nxt); 1294 1295 qla24xx_copy_eft(ha, nxt); 1296 1297 nxt_chain = (void *)ha->fw_dump + ha->chain_offset; 1298 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1299 if (last_chain) { 1300 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1301 *last_chain |= htonl(DUMP_CHAIN_LAST); 1302 } 1303 1304 /* Adjust valid length. */ 1305 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1306 1307 qla24xx_fw_dump_failed_0: 1308 qla2xxx_dump_post_process(base_vha, rval); 1309 } 1310 1311 void 1312 qla25xx_fw_dump(scsi_qla_host_t *vha) 1313 { 1314 int rval; 1315 uint32_t cnt; 1316 struct qla_hw_data *ha = vha->hw; 1317 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1318 __le32 __iomem *dmp_reg; 1319 __be32 *iter_reg; 1320 __le16 __iomem *mbx_reg; 1321 struct qla25xx_fw_dump *fw; 1322 void *nxt, *nxt_chain; 1323 __be32 *last_chain = NULL; 1324 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1325 1326 lockdep_assert_held(&ha->hardware_lock); 1327 1328 ha->fw_dump_cap_flags = 0; 1329 1330 if (!ha->fw_dump) { 1331 ql_log(ql_log_warn, vha, 0xd008, 1332 "No buffer available for dump.\n"); 1333 return; 1334 } 1335 1336 if (ha->fw_dumped) { 1337 ql_log(ql_log_warn, vha, 0xd009, 1338 "Firmware has been previously dumped (%p) " 1339 "-- ignoring request.\n", 1340 ha->fw_dump); 1341 return; 1342 } 1343 QLA_FW_STOPPED(ha); 1344 fw = &ha->fw_dump->isp.isp25; 1345 qla2xxx_prep_dump(ha, ha->fw_dump); 1346 ha->fw_dump->version = htonl(2); 1347 1348 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1349 1350 /* 1351 * Pause RISC. No need to track timeout, as resetting the chip 1352 * is the right approach incase of pause timeout 1353 */ 1354 qla24xx_pause_risc(reg, ha); 1355 1356 /* Host/Risc registers. */ 1357 iter_reg = fw->host_risc_reg; 1358 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1359 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1360 1361 /* PCIe registers. */ 1362 wrt_reg_dword(®->iobase_addr, 0x7C00); 1363 rd_reg_dword(®->iobase_addr); 1364 wrt_reg_dword(®->iobase_window, 0x01); 1365 dmp_reg = ®->iobase_c4; 1366 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); 1367 dmp_reg++; 1368 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); 1369 dmp_reg++; 1370 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); 1371 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); 1372 1373 wrt_reg_dword(®->iobase_window, 0x00); 1374 rd_reg_dword(®->iobase_window); 1375 1376 /* Host interface registers. */ 1377 dmp_reg = ®->flash_addr; 1378 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 1379 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 1380 1381 /* Disable interrupts. */ 1382 wrt_reg_dword(®->ictrl, 0); 1383 rd_reg_dword(®->ictrl); 1384 1385 /* Shadow registers. */ 1386 wrt_reg_dword(®->iobase_addr, 0x0F70); 1387 rd_reg_dword(®->iobase_addr); 1388 wrt_reg_dword(®->iobase_select, 0xB0000000); 1389 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 1390 1391 wrt_reg_dword(®->iobase_select, 0xB0100000); 1392 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 1393 1394 wrt_reg_dword(®->iobase_select, 0xB0200000); 1395 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 1396 1397 wrt_reg_dword(®->iobase_select, 0xB0300000); 1398 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 1399 1400 wrt_reg_dword(®->iobase_select, 0xB0400000); 1401 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 1402 1403 wrt_reg_dword(®->iobase_select, 0xB0500000); 1404 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 1405 1406 wrt_reg_dword(®->iobase_select, 0xB0600000); 1407 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 1408 1409 wrt_reg_dword(®->iobase_select, 0xB0700000); 1410 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); 1411 1412 wrt_reg_dword(®->iobase_select, 0xB0800000); 1413 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); 1414 1415 wrt_reg_dword(®->iobase_select, 0xB0900000); 1416 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); 1417 1418 wrt_reg_dword(®->iobase_select, 0xB0A00000); 1419 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); 1420 1421 /* RISC I/O register. */ 1422 wrt_reg_dword(®->iobase_addr, 0x0010); 1423 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); 1424 1425 /* Mailbox registers. */ 1426 mbx_reg = ®->mailbox0; 1427 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 1428 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 1429 1430 /* Transfer sequence registers. */ 1431 iter_reg = fw->xseq_gp_reg; 1432 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1433 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1434 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1435 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1436 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1437 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1438 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1439 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1440 1441 iter_reg = fw->xseq_0_reg; 1442 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1443 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1444 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1445 1446 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1447 1448 /* Receive sequence registers. */ 1449 iter_reg = fw->rseq_gp_reg; 1450 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1451 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1452 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1453 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1454 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1455 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1456 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1457 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1458 1459 iter_reg = fw->rseq_0_reg; 1460 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1461 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1462 1463 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1464 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1465 1466 /* Auxiliary sequence registers. */ 1467 iter_reg = fw->aseq_gp_reg; 1468 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1469 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1470 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1471 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1472 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1473 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1474 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1475 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1476 1477 iter_reg = fw->aseq_0_reg; 1478 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1479 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1480 1481 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1482 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1483 1484 /* Command DMA registers. */ 1485 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1486 1487 /* Queues. */ 1488 iter_reg = fw->req0_dma_reg; 1489 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1490 dmp_reg = ®->iobase_q; 1491 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1492 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1493 1494 iter_reg = fw->resp0_dma_reg; 1495 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1496 dmp_reg = ®->iobase_q; 1497 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1498 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1499 1500 iter_reg = fw->req1_dma_reg; 1501 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1502 dmp_reg = ®->iobase_q; 1503 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1504 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1505 1506 /* Transmit DMA registers. */ 1507 iter_reg = fw->xmt0_dma_reg; 1508 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1509 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1510 1511 iter_reg = fw->xmt1_dma_reg; 1512 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1513 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1514 1515 iter_reg = fw->xmt2_dma_reg; 1516 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1517 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1518 1519 iter_reg = fw->xmt3_dma_reg; 1520 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1521 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1522 1523 iter_reg = fw->xmt4_dma_reg; 1524 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1525 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1526 1527 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1528 1529 /* Receive DMA registers. */ 1530 iter_reg = fw->rcvt0_data_dma_reg; 1531 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1532 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1533 1534 iter_reg = fw->rcvt1_data_dma_reg; 1535 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1536 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1537 1538 /* RISC registers. */ 1539 iter_reg = fw->risc_gp_reg; 1540 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1541 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1542 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1543 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1544 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1545 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1546 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1547 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1548 1549 /* Local memory controller registers. */ 1550 iter_reg = fw->lmc_reg; 1551 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1552 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1553 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1554 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1555 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1556 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1557 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1558 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1559 1560 /* Fibre Protocol Module registers. */ 1561 iter_reg = fw->fpm_hdw_reg; 1562 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1563 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1564 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1565 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1566 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1567 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1568 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1569 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1570 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1571 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1572 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1573 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1574 1575 /* Frame Buffer registers. */ 1576 iter_reg = fw->fb_hdw_reg; 1577 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1578 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1579 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1580 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1581 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1582 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1583 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1584 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1585 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1586 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1587 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1588 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1589 1590 /* Multi queue registers */ 1591 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1592 &last_chain); 1593 1594 rval = qla24xx_soft_reset(ha); 1595 if (rval != QLA_SUCCESS) 1596 goto qla25xx_fw_dump_failed_0; 1597 1598 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1599 &nxt); 1600 if (rval != QLA_SUCCESS) 1601 goto qla25xx_fw_dump_failed_0; 1602 1603 nxt = qla2xxx_copy_queues(ha, nxt); 1604 1605 qla24xx_copy_eft(ha, nxt); 1606 1607 /* Chain entries -- started with MQ. */ 1608 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1609 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1610 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1611 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 1612 if (last_chain) { 1613 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1614 *last_chain |= htonl(DUMP_CHAIN_LAST); 1615 } 1616 1617 /* Adjust valid length. */ 1618 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1619 1620 qla25xx_fw_dump_failed_0: 1621 qla2xxx_dump_post_process(base_vha, rval); 1622 } 1623 1624 void 1625 qla81xx_fw_dump(scsi_qla_host_t *vha) 1626 { 1627 int rval; 1628 uint32_t cnt; 1629 struct qla_hw_data *ha = vha->hw; 1630 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1631 __le32 __iomem *dmp_reg; 1632 __be32 *iter_reg; 1633 __le16 __iomem *mbx_reg; 1634 struct qla81xx_fw_dump *fw; 1635 void *nxt, *nxt_chain; 1636 __be32 *last_chain = NULL; 1637 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1638 1639 lockdep_assert_held(&ha->hardware_lock); 1640 1641 ha->fw_dump_cap_flags = 0; 1642 1643 if (!ha->fw_dump) { 1644 ql_log(ql_log_warn, vha, 0xd00a, 1645 "No buffer available for dump.\n"); 1646 return; 1647 } 1648 1649 if (ha->fw_dumped) { 1650 ql_log(ql_log_warn, vha, 0xd00b, 1651 "Firmware has been previously dumped (%p) " 1652 "-- ignoring request.\n", 1653 ha->fw_dump); 1654 return; 1655 } 1656 fw = &ha->fw_dump->isp.isp81; 1657 qla2xxx_prep_dump(ha, ha->fw_dump); 1658 1659 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1660 1661 /* 1662 * Pause RISC. No need to track timeout, as resetting the chip 1663 * is the right approach incase of pause timeout 1664 */ 1665 qla24xx_pause_risc(reg, ha); 1666 1667 /* Host/Risc registers. */ 1668 iter_reg = fw->host_risc_reg; 1669 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1670 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1671 1672 /* PCIe registers. */ 1673 wrt_reg_dword(®->iobase_addr, 0x7C00); 1674 rd_reg_dword(®->iobase_addr); 1675 wrt_reg_dword(®->iobase_window, 0x01); 1676 dmp_reg = ®->iobase_c4; 1677 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); 1678 dmp_reg++; 1679 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); 1680 dmp_reg++; 1681 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); 1682 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); 1683 1684 wrt_reg_dword(®->iobase_window, 0x00); 1685 rd_reg_dword(®->iobase_window); 1686 1687 /* Host interface registers. */ 1688 dmp_reg = ®->flash_addr; 1689 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 1690 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 1691 1692 /* Disable interrupts. */ 1693 wrt_reg_dword(®->ictrl, 0); 1694 rd_reg_dword(®->ictrl); 1695 1696 /* Shadow registers. */ 1697 wrt_reg_dword(®->iobase_addr, 0x0F70); 1698 rd_reg_dword(®->iobase_addr); 1699 wrt_reg_dword(®->iobase_select, 0xB0000000); 1700 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 1701 1702 wrt_reg_dword(®->iobase_select, 0xB0100000); 1703 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 1704 1705 wrt_reg_dword(®->iobase_select, 0xB0200000); 1706 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 1707 1708 wrt_reg_dword(®->iobase_select, 0xB0300000); 1709 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 1710 1711 wrt_reg_dword(®->iobase_select, 0xB0400000); 1712 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 1713 1714 wrt_reg_dword(®->iobase_select, 0xB0500000); 1715 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 1716 1717 wrt_reg_dword(®->iobase_select, 0xB0600000); 1718 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 1719 1720 wrt_reg_dword(®->iobase_select, 0xB0700000); 1721 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); 1722 1723 wrt_reg_dword(®->iobase_select, 0xB0800000); 1724 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); 1725 1726 wrt_reg_dword(®->iobase_select, 0xB0900000); 1727 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); 1728 1729 wrt_reg_dword(®->iobase_select, 0xB0A00000); 1730 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); 1731 1732 /* RISC I/O register. */ 1733 wrt_reg_dword(®->iobase_addr, 0x0010); 1734 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); 1735 1736 /* Mailbox registers. */ 1737 mbx_reg = ®->mailbox0; 1738 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 1739 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 1740 1741 /* Transfer sequence registers. */ 1742 iter_reg = fw->xseq_gp_reg; 1743 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1744 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1745 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1746 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1747 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1748 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1749 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1750 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1751 1752 iter_reg = fw->xseq_0_reg; 1753 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1754 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1755 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1756 1757 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1758 1759 /* Receive sequence registers. */ 1760 iter_reg = fw->rseq_gp_reg; 1761 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1762 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1763 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1764 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1765 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1766 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1767 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1768 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1769 1770 iter_reg = fw->rseq_0_reg; 1771 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1772 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1773 1774 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1775 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1776 1777 /* Auxiliary sequence registers. */ 1778 iter_reg = fw->aseq_gp_reg; 1779 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1780 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1781 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1782 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1783 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1784 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1785 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1786 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1787 1788 iter_reg = fw->aseq_0_reg; 1789 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1790 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1791 1792 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1793 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1794 1795 /* Command DMA registers. */ 1796 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1797 1798 /* Queues. */ 1799 iter_reg = fw->req0_dma_reg; 1800 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1801 dmp_reg = ®->iobase_q; 1802 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1803 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1804 1805 iter_reg = fw->resp0_dma_reg; 1806 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1807 dmp_reg = ®->iobase_q; 1808 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1809 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1810 1811 iter_reg = fw->req1_dma_reg; 1812 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1813 dmp_reg = ®->iobase_q; 1814 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1815 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 1816 1817 /* Transmit DMA registers. */ 1818 iter_reg = fw->xmt0_dma_reg; 1819 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1820 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1821 1822 iter_reg = fw->xmt1_dma_reg; 1823 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1824 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1825 1826 iter_reg = fw->xmt2_dma_reg; 1827 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1828 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1829 1830 iter_reg = fw->xmt3_dma_reg; 1831 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1832 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1833 1834 iter_reg = fw->xmt4_dma_reg; 1835 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1836 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1837 1838 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1839 1840 /* Receive DMA registers. */ 1841 iter_reg = fw->rcvt0_data_dma_reg; 1842 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1843 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1844 1845 iter_reg = fw->rcvt1_data_dma_reg; 1846 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1847 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1848 1849 /* RISC registers. */ 1850 iter_reg = fw->risc_gp_reg; 1851 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1852 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1853 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1854 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1855 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1856 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1857 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1858 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1859 1860 /* Local memory controller registers. */ 1861 iter_reg = fw->lmc_reg; 1862 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1863 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1864 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1865 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1866 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1867 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1868 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1869 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1870 1871 /* Fibre Protocol Module registers. */ 1872 iter_reg = fw->fpm_hdw_reg; 1873 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1874 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1875 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1876 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1877 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1878 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1879 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1880 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1881 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1882 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1883 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1884 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1885 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 1886 qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 1887 1888 /* Frame Buffer registers. */ 1889 iter_reg = fw->fb_hdw_reg; 1890 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1891 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1892 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1893 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1894 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1895 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1896 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1897 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1898 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1899 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1900 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1901 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 1902 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1903 1904 /* Multi queue registers */ 1905 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1906 &last_chain); 1907 1908 rval = qla24xx_soft_reset(ha); 1909 if (rval != QLA_SUCCESS) 1910 goto qla81xx_fw_dump_failed_0; 1911 1912 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1913 &nxt); 1914 if (rval != QLA_SUCCESS) 1915 goto qla81xx_fw_dump_failed_0; 1916 1917 nxt = qla2xxx_copy_queues(ha, nxt); 1918 1919 qla24xx_copy_eft(ha, nxt); 1920 1921 /* Chain entries -- started with MQ. */ 1922 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1923 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1924 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1925 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 1926 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); 1927 if (last_chain) { 1928 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1929 *last_chain |= htonl(DUMP_CHAIN_LAST); 1930 } 1931 1932 /* Adjust valid length. */ 1933 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1934 1935 qla81xx_fw_dump_failed_0: 1936 qla2xxx_dump_post_process(base_vha, rval); 1937 } 1938 1939 void 1940 qla83xx_fw_dump(scsi_qla_host_t *vha) 1941 { 1942 int rval; 1943 uint32_t cnt; 1944 struct qla_hw_data *ha = vha->hw; 1945 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1946 __le32 __iomem *dmp_reg; 1947 __be32 *iter_reg; 1948 __le16 __iomem *mbx_reg; 1949 struct qla83xx_fw_dump *fw; 1950 void *nxt, *nxt_chain; 1951 __be32 *last_chain = NULL; 1952 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1953 1954 lockdep_assert_held(&ha->hardware_lock); 1955 1956 ha->fw_dump_cap_flags = 0; 1957 1958 if (!ha->fw_dump) { 1959 ql_log(ql_log_warn, vha, 0xd00c, 1960 "No buffer available for dump!!!\n"); 1961 return; 1962 } 1963 1964 if (ha->fw_dumped) { 1965 ql_log(ql_log_warn, vha, 0xd00d, 1966 "Firmware has been previously dumped (%p) -- ignoring " 1967 "request...\n", ha->fw_dump); 1968 return; 1969 } 1970 QLA_FW_STOPPED(ha); 1971 fw = &ha->fw_dump->isp.isp83; 1972 qla2xxx_prep_dump(ha, ha->fw_dump); 1973 1974 fw->host_status = htonl(rd_reg_dword(®->host_status)); 1975 1976 /* 1977 * Pause RISC. No need to track timeout, as resetting the chip 1978 * is the right approach incase of pause timeout 1979 */ 1980 qla24xx_pause_risc(reg, ha); 1981 1982 wrt_reg_dword(®->iobase_addr, 0x6000); 1983 dmp_reg = ®->iobase_window; 1984 rd_reg_dword(dmp_reg); 1985 wrt_reg_dword(dmp_reg, 0); 1986 1987 dmp_reg = ®->unused_4_1[0]; 1988 rd_reg_dword(dmp_reg); 1989 wrt_reg_dword(dmp_reg, 0); 1990 1991 wrt_reg_dword(®->iobase_addr, 0x6010); 1992 dmp_reg = ®->unused_4_1[2]; 1993 rd_reg_dword(dmp_reg); 1994 wrt_reg_dword(dmp_reg, 0); 1995 1996 /* select PCR and disable ecc checking and correction */ 1997 wrt_reg_dword(®->iobase_addr, 0x0F70); 1998 rd_reg_dword(®->iobase_addr); 1999 wrt_reg_dword(®->iobase_select, 0x60000000); /* write to F0h = PCR */ 2000 2001 /* Host/Risc registers. */ 2002 iter_reg = fw->host_risc_reg; 2003 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 2004 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); 2005 qla24xx_read_window(reg, 0x7040, 16, iter_reg); 2006 2007 /* PCIe registers. */ 2008 wrt_reg_dword(®->iobase_addr, 0x7C00); 2009 rd_reg_dword(®->iobase_addr); 2010 wrt_reg_dword(®->iobase_window, 0x01); 2011 dmp_reg = ®->iobase_c4; 2012 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); 2013 dmp_reg++; 2014 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); 2015 dmp_reg++; 2016 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); 2017 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); 2018 2019 wrt_reg_dword(®->iobase_window, 0x00); 2020 rd_reg_dword(®->iobase_window); 2021 2022 /* Host interface registers. */ 2023 dmp_reg = ®->flash_addr; 2024 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) 2025 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); 2026 2027 /* Disable interrupts. */ 2028 wrt_reg_dword(®->ictrl, 0); 2029 rd_reg_dword(®->ictrl); 2030 2031 /* Shadow registers. */ 2032 wrt_reg_dword(®->iobase_addr, 0x0F70); 2033 rd_reg_dword(®->iobase_addr); 2034 wrt_reg_dword(®->iobase_select, 0xB0000000); 2035 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); 2036 2037 wrt_reg_dword(®->iobase_select, 0xB0100000); 2038 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); 2039 2040 wrt_reg_dword(®->iobase_select, 0xB0200000); 2041 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); 2042 2043 wrt_reg_dword(®->iobase_select, 0xB0300000); 2044 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); 2045 2046 wrt_reg_dword(®->iobase_select, 0xB0400000); 2047 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); 2048 2049 wrt_reg_dword(®->iobase_select, 0xB0500000); 2050 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); 2051 2052 wrt_reg_dword(®->iobase_select, 0xB0600000); 2053 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); 2054 2055 wrt_reg_dword(®->iobase_select, 0xB0700000); 2056 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); 2057 2058 wrt_reg_dword(®->iobase_select, 0xB0800000); 2059 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); 2060 2061 wrt_reg_dword(®->iobase_select, 0xB0900000); 2062 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); 2063 2064 wrt_reg_dword(®->iobase_select, 0xB0A00000); 2065 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); 2066 2067 /* RISC I/O register. */ 2068 wrt_reg_dword(®->iobase_addr, 0x0010); 2069 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); 2070 2071 /* Mailbox registers. */ 2072 mbx_reg = ®->mailbox0; 2073 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) 2074 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); 2075 2076 /* Transfer sequence registers. */ 2077 iter_reg = fw->xseq_gp_reg; 2078 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); 2079 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); 2080 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); 2081 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); 2082 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); 2083 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); 2084 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); 2085 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); 2086 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 2087 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 2088 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 2089 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 2090 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 2091 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 2092 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 2093 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 2094 2095 iter_reg = fw->xseq_0_reg; 2096 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 2097 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 2098 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 2099 2100 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 2101 2102 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); 2103 2104 /* Receive sequence registers. */ 2105 iter_reg = fw->rseq_gp_reg; 2106 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); 2107 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); 2108 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); 2109 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); 2110 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); 2111 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); 2112 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); 2113 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); 2114 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 2115 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 2116 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 2117 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 2118 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 2119 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 2120 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 2121 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 2122 2123 iter_reg = fw->rseq_0_reg; 2124 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 2125 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 2126 2127 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 2128 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 2129 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); 2130 2131 /* Auxiliary sequence registers. */ 2132 iter_reg = fw->aseq_gp_reg; 2133 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 2134 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 2135 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 2136 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 2137 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 2138 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 2139 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 2140 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); 2141 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); 2142 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); 2143 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); 2144 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); 2145 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); 2146 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); 2147 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); 2148 qla24xx_read_window(reg, 0xB170, 16, iter_reg); 2149 2150 iter_reg = fw->aseq_0_reg; 2151 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 2152 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 2153 2154 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 2155 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 2156 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); 2157 2158 /* Command DMA registers. */ 2159 iter_reg = fw->cmd_dma_reg; 2160 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); 2161 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); 2162 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); 2163 qla24xx_read_window(reg, 0x71F0, 16, iter_reg); 2164 2165 /* Queues. */ 2166 iter_reg = fw->req0_dma_reg; 2167 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 2168 dmp_reg = ®->iobase_q; 2169 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2170 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 2171 2172 iter_reg = fw->resp0_dma_reg; 2173 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 2174 dmp_reg = ®->iobase_q; 2175 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2176 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 2177 2178 iter_reg = fw->req1_dma_reg; 2179 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 2180 dmp_reg = ®->iobase_q; 2181 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2182 *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); 2183 2184 /* Transmit DMA registers. */ 2185 iter_reg = fw->xmt0_dma_reg; 2186 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 2187 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 2188 2189 iter_reg = fw->xmt1_dma_reg; 2190 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 2191 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 2192 2193 iter_reg = fw->xmt2_dma_reg; 2194 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 2195 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 2196 2197 iter_reg = fw->xmt3_dma_reg; 2198 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 2199 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 2200 2201 iter_reg = fw->xmt4_dma_reg; 2202 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 2203 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 2204 2205 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 2206 2207 /* Receive DMA registers. */ 2208 iter_reg = fw->rcvt0_data_dma_reg; 2209 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 2210 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 2211 2212 iter_reg = fw->rcvt1_data_dma_reg; 2213 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 2214 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 2215 2216 /* RISC registers. */ 2217 iter_reg = fw->risc_gp_reg; 2218 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 2219 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 2220 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 2221 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 2222 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 2223 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 2224 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 2225 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 2226 2227 /* Local memory controller registers. */ 2228 iter_reg = fw->lmc_reg; 2229 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 2230 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 2231 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 2232 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 2233 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 2234 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 2235 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 2236 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 2237 2238 /* Fibre Protocol Module registers. */ 2239 iter_reg = fw->fpm_hdw_reg; 2240 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 2241 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 2242 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 2243 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 2244 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 2245 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 2246 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 2247 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 2248 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 2249 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 2250 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 2251 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 2252 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 2253 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 2254 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); 2255 qla24xx_read_window(reg, 0x40F0, 16, iter_reg); 2256 2257 /* RQ0 Array registers. */ 2258 iter_reg = fw->rq0_array_reg; 2259 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); 2260 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); 2261 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); 2262 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); 2263 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); 2264 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); 2265 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); 2266 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); 2267 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); 2268 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); 2269 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); 2270 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); 2271 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); 2272 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); 2273 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); 2274 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); 2275 2276 /* RQ1 Array registers. */ 2277 iter_reg = fw->rq1_array_reg; 2278 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); 2279 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); 2280 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); 2281 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); 2282 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); 2283 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); 2284 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); 2285 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); 2286 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); 2287 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); 2288 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); 2289 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); 2290 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); 2291 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); 2292 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); 2293 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); 2294 2295 /* RP0 Array registers. */ 2296 iter_reg = fw->rp0_array_reg; 2297 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); 2298 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); 2299 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); 2300 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); 2301 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); 2302 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); 2303 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); 2304 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); 2305 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); 2306 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); 2307 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); 2308 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); 2309 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); 2310 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); 2311 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); 2312 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); 2313 2314 /* RP1 Array registers. */ 2315 iter_reg = fw->rp1_array_reg; 2316 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); 2317 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); 2318 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); 2319 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); 2320 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); 2321 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); 2322 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); 2323 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); 2324 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); 2325 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); 2326 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); 2327 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); 2328 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); 2329 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); 2330 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); 2331 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); 2332 2333 iter_reg = fw->at0_array_reg; 2334 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); 2335 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); 2336 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); 2337 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); 2338 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); 2339 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); 2340 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); 2341 qla24xx_read_window(reg, 0x70F0, 16, iter_reg); 2342 2343 /* I/O Queue Control registers. */ 2344 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); 2345 2346 /* Frame Buffer registers. */ 2347 iter_reg = fw->fb_hdw_reg; 2348 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 2349 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 2350 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 2351 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 2352 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 2353 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); 2354 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); 2355 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 2356 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 2357 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 2358 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 2359 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 2360 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 2361 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 2362 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); 2363 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); 2364 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); 2365 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); 2366 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); 2367 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); 2368 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); 2369 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); 2370 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); 2371 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); 2372 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); 2373 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); 2374 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 2375 2376 /* Multi queue registers */ 2377 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 2378 &last_chain); 2379 2380 rval = qla24xx_soft_reset(ha); 2381 if (rval != QLA_SUCCESS) { 2382 ql_log(ql_log_warn, vha, 0xd00e, 2383 "SOFT RESET FAILED, forcing continuation of dump!!!\n"); 2384 rval = QLA_SUCCESS; 2385 2386 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); 2387 2388 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 2389 rd_reg_dword(®->hccr); 2390 2391 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 2392 rd_reg_dword(®->hccr); 2393 2394 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 2395 rd_reg_dword(®->hccr); 2396 2397 for (cnt = 30000; cnt && (rd_reg_word(®->mailbox0)); cnt--) 2398 udelay(5); 2399 2400 if (!cnt) { 2401 nxt = fw->code_ram; 2402 nxt += sizeof(fw->code_ram); 2403 nxt += (ha->fw_memory_size - 0x100000 + 1); 2404 goto copy_queue; 2405 } else { 2406 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2407 ql_log(ql_log_warn, vha, 0xd010, 2408 "bigger hammer success?\n"); 2409 } 2410 } 2411 2412 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 2413 &nxt); 2414 if (rval != QLA_SUCCESS) 2415 goto qla83xx_fw_dump_failed_0; 2416 2417 copy_queue: 2418 nxt = qla2xxx_copy_queues(ha, nxt); 2419 2420 qla24xx_copy_eft(ha, nxt); 2421 2422 /* Chain entries -- started with MQ. */ 2423 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2424 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2425 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 2426 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 2427 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); 2428 if (last_chain) { 2429 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 2430 *last_chain |= htonl(DUMP_CHAIN_LAST); 2431 } 2432 2433 /* Adjust valid length. */ 2434 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 2435 2436 qla83xx_fw_dump_failed_0: 2437 qla2xxx_dump_post_process(base_vha, rval); 2438 } 2439 2440 /****************************************************************************/ 2441 /* Driver Debug Functions. */ 2442 /****************************************************************************/ 2443 2444 /* Write the debug message prefix into @pbuf. */ 2445 static void ql_dbg_prefix(char *pbuf, int pbuf_size, 2446 const scsi_qla_host_t *vha, uint msg_id) 2447 { 2448 if (vha) { 2449 const struct pci_dev *pdev = vha->hw->pdev; 2450 2451 /* <module-name> [<dev-name>]-<msg-id>:<host>: */ 2452 snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR, 2453 dev_name(&(pdev->dev)), msg_id, vha->host_no); 2454 } else { 2455 /* <module-name> [<dev-name>]-<msg-id>: : */ 2456 snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, 2457 "0000:00:00.0", msg_id); 2458 } 2459 } 2460 2461 /* 2462 * This function is for formatting and logging debug information. 2463 * It is to be used when vha is available. It formats the message 2464 * and logs it to the messages file. 2465 * parameters: 2466 * level: The level of the debug messages to be printed. 2467 * If ql2xextended_error_logging value is correctly set, 2468 * this message will appear in the messages file. 2469 * vha: Pointer to the scsi_qla_host_t. 2470 * id: This is a unique identifier for the level. It identifies the 2471 * part of the code from where the message originated. 2472 * msg: The message to be displayed. 2473 */ 2474 void 2475 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) 2476 { 2477 va_list va; 2478 struct va_format vaf; 2479 char pbuf[64]; 2480 2481 va_start(va, fmt); 2482 2483 vaf.fmt = fmt; 2484 vaf.va = &va; 2485 2486 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); 2487 2488 if (!ql_mask_match(level)) 2489 trace_ql_dbg_log(pbuf, &vaf); 2490 else 2491 pr_warn("%s%pV", pbuf, &vaf); 2492 2493 va_end(va); 2494 2495 } 2496 2497 /* 2498 * This function is for formatting and logging debug information. 2499 * It is to be used when vha is not available and pci is available, 2500 * i.e., before host allocation. It formats the message and logs it 2501 * to the messages file. 2502 * parameters: 2503 * level: The level of the debug messages to be printed. 2504 * If ql2xextended_error_logging value is correctly set, 2505 * this message will appear in the messages file. 2506 * pdev: Pointer to the struct pci_dev. 2507 * id: This is a unique id for the level. It identifies the part 2508 * of the code from where the message originated. 2509 * msg: The message to be displayed. 2510 */ 2511 void 2512 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) 2513 { 2514 va_list va; 2515 struct va_format vaf; 2516 char pbuf[128]; 2517 2518 if (pdev == NULL) 2519 return; 2520 if (!ql_mask_match(level)) 2521 return; 2522 2523 va_start(va, fmt); 2524 2525 vaf.fmt = fmt; 2526 vaf.va = &va; 2527 2528 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset); 2529 pr_warn("%s%pV", pbuf, &vaf); 2530 2531 va_end(va); 2532 } 2533 2534 /* 2535 * This function is for formatting and logging log messages. 2536 * It is to be used when vha is available. It formats the message 2537 * and logs it to the messages file. All the messages will be logged 2538 * irrespective of value of ql2xextended_error_logging. 2539 * parameters: 2540 * level: The level of the log messages to be printed in the 2541 * messages file. 2542 * vha: Pointer to the scsi_qla_host_t 2543 * id: This is a unique id for the level. It identifies the 2544 * part of the code from where the message originated. 2545 * msg: The message to be displayed. 2546 */ 2547 void 2548 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) 2549 { 2550 va_list va; 2551 struct va_format vaf; 2552 char pbuf[128]; 2553 2554 if (level > ql_errlev) 2555 return; 2556 2557 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); 2558 2559 va_start(va, fmt); 2560 2561 vaf.fmt = fmt; 2562 vaf.va = &va; 2563 2564 switch (level) { 2565 case ql_log_fatal: /* FATAL LOG */ 2566 pr_crit("%s%pV", pbuf, &vaf); 2567 break; 2568 case ql_log_warn: 2569 pr_err("%s%pV", pbuf, &vaf); 2570 break; 2571 case ql_log_info: 2572 pr_warn("%s%pV", pbuf, &vaf); 2573 break; 2574 default: 2575 pr_info("%s%pV", pbuf, &vaf); 2576 break; 2577 } 2578 2579 va_end(va); 2580 } 2581 2582 /* 2583 * This function is for formatting and logging log messages. 2584 * It is to be used when vha is not available and pci is available, 2585 * i.e., before host allocation. It formats the message and logs 2586 * it to the messages file. All the messages are logged irrespective 2587 * of the value of ql2xextended_error_logging. 2588 * parameters: 2589 * level: The level of the log messages to be printed in the 2590 * messages file. 2591 * pdev: Pointer to the struct pci_dev. 2592 * id: This is a unique id for the level. It identifies the 2593 * part of the code from where the message originated. 2594 * msg: The message to be displayed. 2595 */ 2596 void 2597 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) 2598 { 2599 va_list va; 2600 struct va_format vaf; 2601 char pbuf[128]; 2602 2603 if (pdev == NULL) 2604 return; 2605 if (level > ql_errlev) 2606 return; 2607 2608 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id); 2609 2610 va_start(va, fmt); 2611 2612 vaf.fmt = fmt; 2613 vaf.va = &va; 2614 2615 switch (level) { 2616 case ql_log_fatal: /* FATAL LOG */ 2617 pr_crit("%s%pV", pbuf, &vaf); 2618 break; 2619 case ql_log_warn: 2620 pr_err("%s%pV", pbuf, &vaf); 2621 break; 2622 case ql_log_info: 2623 pr_warn("%s%pV", pbuf, &vaf); 2624 break; 2625 default: 2626 pr_info("%s%pV", pbuf, &vaf); 2627 break; 2628 } 2629 2630 va_end(va); 2631 } 2632 2633 void 2634 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) 2635 { 2636 int i; 2637 struct qla_hw_data *ha = vha->hw; 2638 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2639 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2640 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 2641 __le16 __iomem *mbx_reg; 2642 2643 if (!ql_mask_match(level)) 2644 return; 2645 2646 if (IS_P3P_TYPE(ha)) 2647 mbx_reg = ®82->mailbox_in[0]; 2648 else if (IS_FWI2_CAPABLE(ha)) 2649 mbx_reg = ®24->mailbox0; 2650 else 2651 mbx_reg = MAILBOX_REG(ha, reg, 0); 2652 2653 ql_dbg(level, vha, id, "Mailbox registers:\n"); 2654 for (i = 0; i < 6; i++, mbx_reg++) 2655 ql_dbg(level, vha, id, 2656 "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); 2657 } 2658 2659 void 2660 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, 2661 uint size) 2662 { 2663 uint cnt; 2664 2665 if (!ql_mask_match(level)) 2666 return; 2667 2668 ql_dbg(level, vha, id, 2669 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); 2670 ql_dbg(level, vha, id, 2671 "----- -----------------------------------------------\n"); 2672 for (cnt = 0; cnt < size; cnt += 16) { 2673 ql_dbg(level, vha, id, "%04x: ", cnt); 2674 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, 2675 buf + cnt, min(16U, size - cnt), false); 2676 } 2677 } 2678 2679 /* 2680 * This function is for formatting and logging log messages. 2681 * It is to be used when vha is available. It formats the message 2682 * and logs it to the messages file. All the messages will be logged 2683 * irrespective of value of ql2xextended_error_logging. 2684 * parameters: 2685 * level: The level of the log messages to be printed in the 2686 * messages file. 2687 * vha: Pointer to the scsi_qla_host_t 2688 * id: This is a unique id for the level. It identifies the 2689 * part of the code from where the message originated. 2690 * msg: The message to be displayed. 2691 */ 2692 void 2693 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, 2694 const char *fmt, ...) 2695 { 2696 va_list va; 2697 struct va_format vaf; 2698 char pbuf[128]; 2699 2700 if (level > ql_errlev) 2701 return; 2702 2703 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id); 2704 2705 va_start(va, fmt); 2706 2707 vaf.fmt = fmt; 2708 vaf.va = &va; 2709 2710 switch (level) { 2711 case ql_log_fatal: /* FATAL LOG */ 2712 pr_crit("%s%pV", pbuf, &vaf); 2713 break; 2714 case ql_log_warn: 2715 pr_err("%s%pV", pbuf, &vaf); 2716 break; 2717 case ql_log_info: 2718 pr_warn("%s%pV", pbuf, &vaf); 2719 break; 2720 default: 2721 pr_info("%s%pV", pbuf, &vaf); 2722 break; 2723 } 2724 2725 va_end(va); 2726 } 2727 2728 /* 2729 * This function is for formatting and logging debug information. 2730 * It is to be used when vha is available. It formats the message 2731 * and logs it to the messages file. 2732 * parameters: 2733 * level: The level of the debug messages to be printed. 2734 * If ql2xextended_error_logging value is correctly set, 2735 * this message will appear in the messages file. 2736 * vha: Pointer to the scsi_qla_host_t. 2737 * id: This is a unique identifier for the level. It identifies the 2738 * part of the code from where the message originated. 2739 * msg: The message to be displayed. 2740 */ 2741 void 2742 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, 2743 const char *fmt, ...) 2744 { 2745 va_list va; 2746 struct va_format vaf; 2747 char pbuf[128]; 2748 2749 if (!ql_mask_match(level)) 2750 return; 2751 2752 va_start(va, fmt); 2753 2754 vaf.fmt = fmt; 2755 vaf.va = &va; 2756 2757 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, 2758 id + ql_dbg_offset); 2759 pr_warn("%s%pV", pbuf, &vaf); 2760 2761 va_end(va); 2762 2763 } 2764