1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 /* 9 * Table for showing the current message id in use for particular level 10 * Change this table for addition of log/debug messages. 11 * ---------------------------------------------------------------------- 12 * | Level | Last Value Used | Holes | 13 * ---------------------------------------------------------------------- 14 * | Module Init and Probe | 0x017d | 0x004b,0x0141 | 15 * | | | 0x0144,0x0146 | 16 * | | | 0x015b-0x0160 | 17 * | | | 0x016e-0x0170 | 18 * | Mailbox commands | 0x118d | 0x1018-0x1019 | 19 * | | | 0x10ca | 20 * | | | 0x1115-0x1116 | 21 * | | | 0x111a-0x111b | 22 * | | | 0x1155-0x1158 | 23 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 24 * | | | 0x2011-0x2012, | 25 * | | | 0x2016 | 26 * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | 27 * | | | 0x3027-0x3028 | 28 * | | | 0x303d-0x3041 | 29 * | | | 0x302d,0x3033 | 30 * | | | 0x3036,0x3038 | 31 * | | | 0x303a | 32 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 33 * | Async Events | 0x5087 | 0x502b-0x502f | 34 * | | | 0x5047,0x5052 | 35 * | | | 0x5084,0x5075 | 36 * | | | 0x503d,0x5044 | 37 * | | | 0x507b | 38 * | Timer Routines | 0x6012 | | 39 * | User Space Interactions | 0x70e2 | 0x7018,0x702e | 40 * | | | 0x7020,0x7024 | 41 * | | | 0x7039,0x7045 | 42 * | | | 0x7073-0x7075 | 43 * | | | 0x70a5-0x70a6 | 44 * | | | 0x70a8,0x70ab | 45 * | | | 0x70ad-0x70ae | 46 * | | | 0x70d7-0x70db | 47 * | | | 0x70de-0x70df | 48 * | Task Management | 0x803d | 0x8000,0x800b | 49 * | | | 0x8019 | 50 * | | | 0x8025,0x8026 | 51 * | | | 0x8031,0x8032 | 52 * | | | 0x8039,0x803c | 53 * | AER/EEH | 0x9011 | | 54 * | Virtual Port | 0xa007 | | 55 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | 56 * | | | 0xb09e,0xb0ae | 57 * | | | 0xb0c3,0xb0c6 | 58 * | | | 0xb0e0-0xb0ef | 59 * | | | 0xb085,0xb0dc | 60 * | | | 0xb107,0xb108 | 61 * | | | 0xb111,0xb11e | 62 * | | | 0xb12c,0xb12d | 63 * | | | 0xb13a,0xb142 | 64 * | | | 0xb13c-0xb140 | 65 * | | | 0xb149 | 66 * | MultiQ | 0xc00c | | 67 * | Misc | 0xd212 | 0xd017-0xd019 | 68 * | | | 0xd020 | 69 * | | | 0xd030-0xd0ff | 70 * | | | 0xd101-0xd1fe | 71 * | | | 0xd213-0xd2fe | 72 * | Target Mode | 0xe078 | | 73 * | Target Mode Management | 0xf072 | 0xf002-0xf003 | 74 * | | | 0xf046-0xf049 | 75 * | Target Mode Task Management | 0x1000b | | 76 * ---------------------------------------------------------------------- 77 */ 78 79 #include "qla_def.h" 80 81 #include <linux/delay.h> 82 83 static uint32_t ql_dbg_offset = 0x800; 84 85 static inline void 86 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 87 { 88 fw_dump->fw_major_version = htonl(ha->fw_major_version); 89 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 90 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); 91 fw_dump->fw_attributes = htonl(ha->fw_attributes); 92 93 fw_dump->vendor = htonl(ha->pdev->vendor); 94 fw_dump->device = htonl(ha->pdev->device); 95 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); 96 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); 97 } 98 99 static inline void * 100 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) 101 { 102 struct req_que *req = ha->req_q_map[0]; 103 struct rsp_que *rsp = ha->rsp_q_map[0]; 104 /* Request queue. */ 105 memcpy(ptr, req->ring, req->length * 106 sizeof(request_t)); 107 108 /* Response queue. */ 109 ptr += req->length * sizeof(request_t); 110 memcpy(ptr, rsp->ring, rsp->length * 111 sizeof(response_t)); 112 113 return ptr + (rsp->length * sizeof(response_t)); 114 } 115 116 int 117 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 118 uint32_t ram_dwords, void **nxt) 119 { 120 int rval; 121 uint32_t cnt, stat, timer, dwords, idx; 122 uint16_t mb0, mb1; 123 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 124 dma_addr_t dump_dma = ha->gid_list_dma; 125 uint32_t *dump = (uint32_t *)ha->gid_list; 126 127 rval = QLA_SUCCESS; 128 mb0 = 0; 129 130 WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); 131 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 132 133 dwords = qla2x00_gid_list_size(ha) / 4; 134 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; 135 cnt += dwords, addr += dwords) { 136 if (cnt + dwords > ram_dwords) 137 dwords = ram_dwords - cnt; 138 139 WRT_REG_WORD(®->mailbox1, LSW(addr)); 140 WRT_REG_WORD(®->mailbox8, MSW(addr)); 141 142 WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); 143 WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); 144 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); 145 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); 146 147 WRT_REG_WORD(®->mailbox4, MSW(dwords)); 148 WRT_REG_WORD(®->mailbox5, LSW(dwords)); 149 150 WRT_REG_WORD(®->mailbox9, 0); 151 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 152 153 ha->flags.mbox_int = 0; 154 for (timer = 6000000; timer; timer--) { 155 /* Check for pending interrupts. */ 156 stat = RD_REG_DWORD(®->host_status); 157 if (stat & HSRX_RISC_INT) { 158 stat &= 0xff; 159 160 if (stat == 0x1 || stat == 0x2 || 161 stat == 0x10 || stat == 0x11) { 162 set_bit(MBX_INTERRUPT, 163 &ha->mbx_cmd_flags); 164 165 mb0 = RD_REG_WORD(®->mailbox0); 166 mb1 = RD_REG_WORD(®->mailbox1); 167 168 WRT_REG_DWORD(®->hccr, 169 HCCRX_CLR_RISC_INT); 170 RD_REG_DWORD(®->hccr); 171 break; 172 } 173 174 /* Clear this intr; it wasn't a mailbox intr */ 175 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 176 RD_REG_DWORD(®->hccr); 177 } 178 udelay(5); 179 } 180 ha->flags.mbox_int = 1; 181 182 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 183 rval = mb0 & MBS_MASK; 184 for (idx = 0; idx < dwords; idx++) 185 ram[cnt + idx] = IS_QLA27XX(ha) ? 186 le32_to_cpu(dump[idx]) : swab32(dump[idx]); 187 } else { 188 rval = QLA_FUNCTION_FAILED; 189 } 190 } 191 192 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; 193 return rval; 194 } 195 196 int 197 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 198 uint32_t ram_dwords, void **nxt) 199 { 200 int rval; 201 uint32_t cnt, stat, timer, dwords, idx; 202 uint16_t mb0; 203 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 204 dma_addr_t dump_dma = ha->gid_list_dma; 205 uint32_t *dump = (uint32_t *)ha->gid_list; 206 207 rval = QLA_SUCCESS; 208 mb0 = 0; 209 210 WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 211 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 212 213 dwords = qla2x00_gid_list_size(ha) / 4; 214 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; 215 cnt += dwords, addr += dwords) { 216 if (cnt + dwords > ram_dwords) 217 dwords = ram_dwords - cnt; 218 219 WRT_REG_WORD(®->mailbox1, LSW(addr)); 220 WRT_REG_WORD(®->mailbox8, MSW(addr)); 221 222 WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); 223 WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); 224 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); 225 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); 226 227 WRT_REG_WORD(®->mailbox4, MSW(dwords)); 228 WRT_REG_WORD(®->mailbox5, LSW(dwords)); 229 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 230 231 ha->flags.mbox_int = 0; 232 for (timer = 6000000; timer; timer--) { 233 /* Check for pending interrupts. */ 234 stat = RD_REG_DWORD(®->host_status); 235 if (stat & HSRX_RISC_INT) { 236 stat &= 0xff; 237 238 if (stat == 0x1 || stat == 0x2 || 239 stat == 0x10 || stat == 0x11) { 240 set_bit(MBX_INTERRUPT, 241 &ha->mbx_cmd_flags); 242 243 mb0 = RD_REG_WORD(®->mailbox0); 244 245 WRT_REG_DWORD(®->hccr, 246 HCCRX_CLR_RISC_INT); 247 RD_REG_DWORD(®->hccr); 248 break; 249 } 250 251 /* Clear this intr; it wasn't a mailbox intr */ 252 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 253 RD_REG_DWORD(®->hccr); 254 } 255 udelay(5); 256 } 257 ha->flags.mbox_int = 1; 258 259 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 260 rval = mb0 & MBS_MASK; 261 for (idx = 0; idx < dwords; idx++) 262 ram[cnt + idx] = IS_QLA27XX(ha) ? 263 le32_to_cpu(dump[idx]) : swab32(dump[idx]); 264 } else { 265 rval = QLA_FUNCTION_FAILED; 266 } 267 } 268 269 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; 270 return rval; 271 } 272 273 static int 274 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, 275 uint32_t cram_size, void **nxt) 276 { 277 int rval; 278 279 /* Code RAM. */ 280 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); 281 if (rval != QLA_SUCCESS) 282 return rval; 283 284 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); 285 286 /* External Memory. */ 287 rval = qla24xx_dump_ram(ha, 0x100000, *nxt, 288 ha->fw_memory_size - 0x100000 + 1, nxt); 289 if (rval == QLA_SUCCESS) 290 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); 291 292 return rval; 293 } 294 295 static uint32_t * 296 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 297 uint32_t count, uint32_t *buf) 298 { 299 uint32_t __iomem *dmp_reg; 300 301 WRT_REG_DWORD(®->iobase_addr, iobase); 302 dmp_reg = ®->iobase_window; 303 while (count--) 304 *buf++ = htonl(RD_REG_DWORD(dmp_reg++)); 305 306 return buf; 307 } 308 309 void 310 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) 311 { 312 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); 313 314 /* 100 usec delay is sufficient enough for hardware to pause RISC */ 315 udelay(100); 316 if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) 317 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); 318 } 319 320 int 321 qla24xx_soft_reset(struct qla_hw_data *ha) 322 { 323 int rval = QLA_SUCCESS; 324 uint32_t cnt; 325 uint16_t wd; 326 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 327 328 /* 329 * Reset RISC. The delay is dependent on system architecture. 330 * Driver can proceed with the reset sequence after waiting 331 * for a timeout period. 332 */ 333 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 334 for (cnt = 0; cnt < 30000; cnt++) { 335 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 336 break; 337 338 udelay(10); 339 } 340 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) 341 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 342 343 WRT_REG_DWORD(®->ctrl_status, 344 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 345 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 346 347 udelay(100); 348 349 /* Wait for soft-reset to complete. */ 350 for (cnt = 0; cnt < 30000; cnt++) { 351 if ((RD_REG_DWORD(®->ctrl_status) & 352 CSRX_ISP_SOFT_RESET) == 0) 353 break; 354 355 udelay(10); 356 } 357 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 358 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); 359 360 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 361 RD_REG_DWORD(®->hccr); /* PCI Posting. */ 362 363 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && 364 rval == QLA_SUCCESS; cnt--) { 365 if (cnt) 366 udelay(10); 367 else 368 rval = QLA_FUNCTION_TIMEOUT; 369 } 370 if (rval == QLA_SUCCESS) 371 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 372 373 return rval; 374 } 375 376 static int 377 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 378 uint32_t ram_words, void **nxt) 379 { 380 int rval; 381 uint32_t cnt, stat, timer, words, idx; 382 uint16_t mb0; 383 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 384 dma_addr_t dump_dma = ha->gid_list_dma; 385 uint16_t *dump = (uint16_t *)ha->gid_list; 386 387 rval = QLA_SUCCESS; 388 mb0 = 0; 389 390 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 391 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 392 393 words = qla2x00_gid_list_size(ha) / 2; 394 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 395 cnt += words, addr += words) { 396 if (cnt + words > ram_words) 397 words = ram_words - cnt; 398 399 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); 400 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); 401 402 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); 403 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); 404 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); 405 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); 406 407 WRT_MAILBOX_REG(ha, reg, 4, words); 408 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 409 410 for (timer = 6000000; timer; timer--) { 411 /* Check for pending interrupts. */ 412 stat = RD_REG_DWORD(®->u.isp2300.host_status); 413 if (stat & HSR_RISC_INT) { 414 stat &= 0xff; 415 416 if (stat == 0x1 || stat == 0x2) { 417 set_bit(MBX_INTERRUPT, 418 &ha->mbx_cmd_flags); 419 420 mb0 = RD_MAILBOX_REG(ha, reg, 0); 421 422 /* Release mailbox registers. */ 423 WRT_REG_WORD(®->semaphore, 0); 424 WRT_REG_WORD(®->hccr, 425 HCCR_CLR_RISC_INT); 426 RD_REG_WORD(®->hccr); 427 break; 428 } else if (stat == 0x10 || stat == 0x11) { 429 set_bit(MBX_INTERRUPT, 430 &ha->mbx_cmd_flags); 431 432 mb0 = RD_MAILBOX_REG(ha, reg, 0); 433 434 WRT_REG_WORD(®->hccr, 435 HCCR_CLR_RISC_INT); 436 RD_REG_WORD(®->hccr); 437 break; 438 } 439 440 /* clear this intr; it wasn't a mailbox intr */ 441 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 442 RD_REG_WORD(®->hccr); 443 } 444 udelay(5); 445 } 446 447 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 448 rval = mb0 & MBS_MASK; 449 for (idx = 0; idx < words; idx++) 450 ram[cnt + idx] = swab16(dump[idx]); 451 } else { 452 rval = QLA_FUNCTION_FAILED; 453 } 454 } 455 456 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; 457 return rval; 458 } 459 460 static inline void 461 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 462 uint16_t *buf) 463 { 464 uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; 465 466 while (count--) 467 *buf++ = htons(RD_REG_WORD(dmp_reg++)); 468 } 469 470 static inline void * 471 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) 472 { 473 if (!ha->eft) 474 return ptr; 475 476 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); 477 return ptr + ntohl(ha->fw_dump->eft_size); 478 } 479 480 static inline void * 481 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 482 { 483 uint32_t cnt; 484 uint32_t *iter_reg; 485 struct qla2xxx_fce_chain *fcec = ptr; 486 487 if (!ha->fce) 488 return ptr; 489 490 *last_chain = &fcec->type; 491 fcec->type = __constant_htonl(DUMP_CHAIN_FCE); 492 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 493 fce_calc_size(ha->fce_bufs)); 494 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 495 fcec->addr_l = htonl(LSD(ha->fce_dma)); 496 fcec->addr_h = htonl(MSD(ha->fce_dma)); 497 498 iter_reg = fcec->eregs; 499 for (cnt = 0; cnt < 8; cnt++) 500 *iter_reg++ = htonl(ha->fce_mb[cnt]); 501 502 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 503 504 return (char *)iter_reg + ntohl(fcec->size); 505 } 506 507 static inline void * 508 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, 509 uint32_t **last_chain) 510 { 511 struct qla2xxx_mqueue_chain *q; 512 struct qla2xxx_mqueue_header *qh; 513 uint32_t num_queues; 514 int que; 515 struct { 516 int length; 517 void *ring; 518 } aq, *aqp; 519 520 if (!ha->tgt.atio_ring) 521 return ptr; 522 523 num_queues = 1; 524 aqp = &aq; 525 aqp->length = ha->tgt.atio_q_length; 526 aqp->ring = ha->tgt.atio_ring; 527 528 for (que = 0; que < num_queues; que++) { 529 /* aqp = ha->atio_q_map[que]; */ 530 q = ptr; 531 *last_chain = &q->type; 532 q->type = __constant_htonl(DUMP_CHAIN_QUEUE); 533 q->chain_size = htonl( 534 sizeof(struct qla2xxx_mqueue_chain) + 535 sizeof(struct qla2xxx_mqueue_header) + 536 (aqp->length * sizeof(request_t))); 537 ptr += sizeof(struct qla2xxx_mqueue_chain); 538 539 /* Add header. */ 540 qh = ptr; 541 qh->queue = __constant_htonl(TYPE_ATIO_QUEUE); 542 qh->number = htonl(que); 543 qh->size = htonl(aqp->length * sizeof(request_t)); 544 ptr += sizeof(struct qla2xxx_mqueue_header); 545 546 /* Add data. */ 547 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); 548 549 ptr += aqp->length * sizeof(request_t); 550 } 551 552 return ptr; 553 } 554 555 static inline void * 556 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 557 { 558 struct qla2xxx_mqueue_chain *q; 559 struct qla2xxx_mqueue_header *qh; 560 struct req_que *req; 561 struct rsp_que *rsp; 562 int que; 563 564 if (!ha->mqenable) 565 return ptr; 566 567 /* Request queues */ 568 for (que = 1; que < ha->max_req_queues; que++) { 569 req = ha->req_q_map[que]; 570 if (!req) 571 break; 572 573 /* Add chain. */ 574 q = ptr; 575 *last_chain = &q->type; 576 q->type = __constant_htonl(DUMP_CHAIN_QUEUE); 577 q->chain_size = htonl( 578 sizeof(struct qla2xxx_mqueue_chain) + 579 sizeof(struct qla2xxx_mqueue_header) + 580 (req->length * sizeof(request_t))); 581 ptr += sizeof(struct qla2xxx_mqueue_chain); 582 583 /* Add header. */ 584 qh = ptr; 585 qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE); 586 qh->number = htonl(que); 587 qh->size = htonl(req->length * sizeof(request_t)); 588 ptr += sizeof(struct qla2xxx_mqueue_header); 589 590 /* Add data. */ 591 memcpy(ptr, req->ring, req->length * sizeof(request_t)); 592 ptr += req->length * sizeof(request_t); 593 } 594 595 /* Response queues */ 596 for (que = 1; que < ha->max_rsp_queues; que++) { 597 rsp = ha->rsp_q_map[que]; 598 if (!rsp) 599 break; 600 601 /* Add chain. */ 602 q = ptr; 603 *last_chain = &q->type; 604 q->type = __constant_htonl(DUMP_CHAIN_QUEUE); 605 q->chain_size = htonl( 606 sizeof(struct qla2xxx_mqueue_chain) + 607 sizeof(struct qla2xxx_mqueue_header) + 608 (rsp->length * sizeof(response_t))); 609 ptr += sizeof(struct qla2xxx_mqueue_chain); 610 611 /* Add header. */ 612 qh = ptr; 613 qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE); 614 qh->number = htonl(que); 615 qh->size = htonl(rsp->length * sizeof(response_t)); 616 ptr += sizeof(struct qla2xxx_mqueue_header); 617 618 /* Add data. */ 619 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); 620 ptr += rsp->length * sizeof(response_t); 621 } 622 623 return ptr; 624 } 625 626 static inline void * 627 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 628 { 629 uint32_t cnt, que_idx; 630 uint8_t que_cnt; 631 struct qla2xxx_mq_chain *mq = ptr; 632 device_reg_t __iomem *reg; 633 634 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 635 return ptr; 636 637 mq = ptr; 638 *last_chain = &mq->type; 639 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 640 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 641 642 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 643 ha->max_req_queues : ha->max_rsp_queues; 644 mq->count = htonl(que_cnt); 645 for (cnt = 0; cnt < que_cnt; cnt++) { 646 reg = ISP_QUE_REG(ha, cnt); 647 que_idx = cnt * 4; 648 mq->qregs[que_idx] = 649 htonl(RD_REG_DWORD(®->isp25mq.req_q_in)); 650 mq->qregs[que_idx+1] = 651 htonl(RD_REG_DWORD(®->isp25mq.req_q_out)); 652 mq->qregs[que_idx+2] = 653 htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in)); 654 mq->qregs[que_idx+3] = 655 htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out)); 656 } 657 658 return ptr + sizeof(struct qla2xxx_mq_chain); 659 } 660 661 void 662 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 663 { 664 struct qla_hw_data *ha = vha->hw; 665 666 if (rval != QLA_SUCCESS) { 667 ql_log(ql_log_warn, vha, 0xd000, 668 "Failed to dump firmware (%x), dump status flags (0x%lx).\n", 669 rval, ha->fw_dump_cap_flags); 670 ha->fw_dumped = 0; 671 } else { 672 ql_log(ql_log_info, vha, 0xd001, 673 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", 674 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); 675 ha->fw_dumped = 1; 676 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 677 } 678 } 679 680 /** 681 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 682 * @ha: HA context 683 * @hardware_locked: Called with the hardware_lock 684 */ 685 void 686 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 687 { 688 int rval; 689 uint32_t cnt; 690 struct qla_hw_data *ha = vha->hw; 691 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 692 uint16_t __iomem *dmp_reg; 693 unsigned long flags; 694 struct qla2300_fw_dump *fw; 695 void *nxt; 696 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 697 698 flags = 0; 699 700 if (!hardware_locked) 701 spin_lock_irqsave(&ha->hardware_lock, flags); 702 703 if (!ha->fw_dump) { 704 ql_log(ql_log_warn, vha, 0xd002, 705 "No buffer available for dump.\n"); 706 goto qla2300_fw_dump_failed; 707 } 708 709 if (ha->fw_dumped) { 710 ql_log(ql_log_warn, vha, 0xd003, 711 "Firmware has been previously dumped (%p) " 712 "-- ignoring request.\n", 713 ha->fw_dump); 714 goto qla2300_fw_dump_failed; 715 } 716 fw = &ha->fw_dump->isp.isp23; 717 qla2xxx_prep_dump(ha, ha->fw_dump); 718 719 rval = QLA_SUCCESS; 720 fw->hccr = htons(RD_REG_WORD(®->hccr)); 721 722 /* Pause RISC. */ 723 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 724 if (IS_QLA2300(ha)) { 725 for (cnt = 30000; 726 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 727 rval == QLA_SUCCESS; cnt--) { 728 if (cnt) 729 udelay(100); 730 else 731 rval = QLA_FUNCTION_TIMEOUT; 732 } 733 } else { 734 RD_REG_WORD(®->hccr); /* PCI Posting. */ 735 udelay(10); 736 } 737 738 if (rval == QLA_SUCCESS) { 739 dmp_reg = ®->flash_address; 740 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 741 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 742 743 dmp_reg = ®->u.isp2300.req_q_in; 744 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) 745 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 746 747 dmp_reg = ®->u.isp2300.mailbox0; 748 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 749 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 750 751 WRT_REG_WORD(®->ctrl_status, 0x40); 752 qla2xxx_read_window(reg, 32, fw->resp_dma_reg); 753 754 WRT_REG_WORD(®->ctrl_status, 0x50); 755 qla2xxx_read_window(reg, 48, fw->dma_reg); 756 757 WRT_REG_WORD(®->ctrl_status, 0x00); 758 dmp_reg = ®->risc_hw; 759 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 760 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 761 762 WRT_REG_WORD(®->pcr, 0x2000); 763 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 764 765 WRT_REG_WORD(®->pcr, 0x2200); 766 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 767 768 WRT_REG_WORD(®->pcr, 0x2400); 769 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 770 771 WRT_REG_WORD(®->pcr, 0x2600); 772 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 773 774 WRT_REG_WORD(®->pcr, 0x2800); 775 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 776 777 WRT_REG_WORD(®->pcr, 0x2A00); 778 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 779 780 WRT_REG_WORD(®->pcr, 0x2C00); 781 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 782 783 WRT_REG_WORD(®->pcr, 0x2E00); 784 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 785 786 WRT_REG_WORD(®->ctrl_status, 0x10); 787 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); 788 789 WRT_REG_WORD(®->ctrl_status, 0x20); 790 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 791 792 WRT_REG_WORD(®->ctrl_status, 0x30); 793 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 794 795 /* Reset RISC. */ 796 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 797 for (cnt = 0; cnt < 30000; cnt++) { 798 if ((RD_REG_WORD(®->ctrl_status) & 799 CSR_ISP_SOFT_RESET) == 0) 800 break; 801 802 udelay(10); 803 } 804 } 805 806 if (!IS_QLA2300(ha)) { 807 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 808 rval == QLA_SUCCESS; cnt--) { 809 if (cnt) 810 udelay(100); 811 else 812 rval = QLA_FUNCTION_TIMEOUT; 813 } 814 } 815 816 /* Get RISC SRAM. */ 817 if (rval == QLA_SUCCESS) 818 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, 819 sizeof(fw->risc_ram) / 2, &nxt); 820 821 /* Get stack SRAM. */ 822 if (rval == QLA_SUCCESS) 823 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, 824 sizeof(fw->stack_ram) / 2, &nxt); 825 826 /* Get data SRAM. */ 827 if (rval == QLA_SUCCESS) 828 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, 829 ha->fw_memory_size - 0x11000 + 1, &nxt); 830 831 if (rval == QLA_SUCCESS) 832 qla2xxx_copy_queues(ha, nxt); 833 834 qla2xxx_dump_post_process(base_vha, rval); 835 836 qla2300_fw_dump_failed: 837 if (!hardware_locked) 838 spin_unlock_irqrestore(&ha->hardware_lock, flags); 839 } 840 841 /** 842 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 843 * @ha: HA context 844 * @hardware_locked: Called with the hardware_lock 845 */ 846 void 847 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 848 { 849 int rval; 850 uint32_t cnt, timer; 851 uint16_t risc_address; 852 uint16_t mb0, mb2; 853 struct qla_hw_data *ha = vha->hw; 854 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 855 uint16_t __iomem *dmp_reg; 856 unsigned long flags; 857 struct qla2100_fw_dump *fw; 858 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 859 860 risc_address = 0; 861 mb0 = mb2 = 0; 862 flags = 0; 863 864 if (!hardware_locked) 865 spin_lock_irqsave(&ha->hardware_lock, flags); 866 867 if (!ha->fw_dump) { 868 ql_log(ql_log_warn, vha, 0xd004, 869 "No buffer available for dump.\n"); 870 goto qla2100_fw_dump_failed; 871 } 872 873 if (ha->fw_dumped) { 874 ql_log(ql_log_warn, vha, 0xd005, 875 "Firmware has been previously dumped (%p) " 876 "-- ignoring request.\n", 877 ha->fw_dump); 878 goto qla2100_fw_dump_failed; 879 } 880 fw = &ha->fw_dump->isp.isp21; 881 qla2xxx_prep_dump(ha, ha->fw_dump); 882 883 rval = QLA_SUCCESS; 884 fw->hccr = htons(RD_REG_WORD(®->hccr)); 885 886 /* Pause RISC. */ 887 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 888 for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 889 rval == QLA_SUCCESS; cnt--) { 890 if (cnt) 891 udelay(100); 892 else 893 rval = QLA_FUNCTION_TIMEOUT; 894 } 895 if (rval == QLA_SUCCESS) { 896 dmp_reg = ®->flash_address; 897 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 898 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 899 900 dmp_reg = ®->u.isp2100.mailbox0; 901 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 902 if (cnt == 8) 903 dmp_reg = ®->u_end.isp2200.mailbox8; 904 905 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 906 } 907 908 dmp_reg = ®->u.isp2100.unused_2[0]; 909 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 910 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 911 912 WRT_REG_WORD(®->ctrl_status, 0x00); 913 dmp_reg = ®->risc_hw; 914 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 915 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 916 917 WRT_REG_WORD(®->pcr, 0x2000); 918 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 919 920 WRT_REG_WORD(®->pcr, 0x2100); 921 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 922 923 WRT_REG_WORD(®->pcr, 0x2200); 924 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 925 926 WRT_REG_WORD(®->pcr, 0x2300); 927 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 928 929 WRT_REG_WORD(®->pcr, 0x2400); 930 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 931 932 WRT_REG_WORD(®->pcr, 0x2500); 933 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 934 935 WRT_REG_WORD(®->pcr, 0x2600); 936 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 937 938 WRT_REG_WORD(®->pcr, 0x2700); 939 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 940 941 WRT_REG_WORD(®->ctrl_status, 0x10); 942 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); 943 944 WRT_REG_WORD(®->ctrl_status, 0x20); 945 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 946 947 WRT_REG_WORD(®->ctrl_status, 0x30); 948 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 949 950 /* Reset the ISP. */ 951 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 952 } 953 954 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 955 rval == QLA_SUCCESS; cnt--) { 956 if (cnt) 957 udelay(100); 958 else 959 rval = QLA_FUNCTION_TIMEOUT; 960 } 961 962 /* Pause RISC. */ 963 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && 964 (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { 965 966 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 967 for (cnt = 30000; 968 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 969 rval == QLA_SUCCESS; cnt--) { 970 if (cnt) 971 udelay(100); 972 else 973 rval = QLA_FUNCTION_TIMEOUT; 974 } 975 if (rval == QLA_SUCCESS) { 976 /* Set memory configuration and timing. */ 977 if (IS_QLA2100(ha)) 978 WRT_REG_WORD(®->mctr, 0xf1); 979 else 980 WRT_REG_WORD(®->mctr, 0xf2); 981 RD_REG_WORD(®->mctr); /* PCI Posting. */ 982 983 /* Release RISC. */ 984 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 985 } 986 } 987 988 if (rval == QLA_SUCCESS) { 989 /* Get RISC SRAM. */ 990 risc_address = 0x1000; 991 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 992 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 993 } 994 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; 995 cnt++, risc_address++) { 996 WRT_MAILBOX_REG(ha, reg, 1, risc_address); 997 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 998 999 for (timer = 6000000; timer != 0; timer--) { 1000 /* Check for pending interrupts. */ 1001 if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { 1002 if (RD_REG_WORD(®->semaphore) & BIT_0) { 1003 set_bit(MBX_INTERRUPT, 1004 &ha->mbx_cmd_flags); 1005 1006 mb0 = RD_MAILBOX_REG(ha, reg, 0); 1007 mb2 = RD_MAILBOX_REG(ha, reg, 2); 1008 1009 WRT_REG_WORD(®->semaphore, 0); 1010 WRT_REG_WORD(®->hccr, 1011 HCCR_CLR_RISC_INT); 1012 RD_REG_WORD(®->hccr); 1013 break; 1014 } 1015 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 1016 RD_REG_WORD(®->hccr); 1017 } 1018 udelay(5); 1019 } 1020 1021 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1022 rval = mb0 & MBS_MASK; 1023 fw->risc_ram[cnt] = htons(mb2); 1024 } else { 1025 rval = QLA_FUNCTION_FAILED; 1026 } 1027 } 1028 1029 if (rval == QLA_SUCCESS) 1030 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); 1031 1032 qla2xxx_dump_post_process(base_vha, rval); 1033 1034 qla2100_fw_dump_failed: 1035 if (!hardware_locked) 1036 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1037 } 1038 1039 void 1040 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1041 { 1042 int rval; 1043 uint32_t cnt; 1044 uint32_t risc_address; 1045 struct qla_hw_data *ha = vha->hw; 1046 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1047 uint32_t __iomem *dmp_reg; 1048 uint32_t *iter_reg; 1049 uint16_t __iomem *mbx_reg; 1050 unsigned long flags; 1051 struct qla24xx_fw_dump *fw; 1052 uint32_t ext_mem_cnt; 1053 void *nxt; 1054 void *nxt_chain; 1055 uint32_t *last_chain = NULL; 1056 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1057 1058 if (IS_P3P_TYPE(ha)) 1059 return; 1060 1061 risc_address = ext_mem_cnt = 0; 1062 flags = 0; 1063 ha->fw_dump_cap_flags = 0; 1064 1065 if (!hardware_locked) 1066 spin_lock_irqsave(&ha->hardware_lock, flags); 1067 1068 if (!ha->fw_dump) { 1069 ql_log(ql_log_warn, vha, 0xd006, 1070 "No buffer available for dump.\n"); 1071 goto qla24xx_fw_dump_failed; 1072 } 1073 1074 if (ha->fw_dumped) { 1075 ql_log(ql_log_warn, vha, 0xd007, 1076 "Firmware has been previously dumped (%p) " 1077 "-- ignoring request.\n", 1078 ha->fw_dump); 1079 goto qla24xx_fw_dump_failed; 1080 } 1081 fw = &ha->fw_dump->isp.isp24; 1082 qla2xxx_prep_dump(ha, ha->fw_dump); 1083 1084 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1085 1086 /* 1087 * Pause RISC. No need to track timeout, as resetting the chip 1088 * is the right approach incase of pause timeout 1089 */ 1090 qla24xx_pause_risc(reg, ha); 1091 1092 /* Host interface registers. */ 1093 dmp_reg = ®->flash_addr; 1094 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1095 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1096 1097 /* Disable interrupts. */ 1098 WRT_REG_DWORD(®->ictrl, 0); 1099 RD_REG_DWORD(®->ictrl); 1100 1101 /* Shadow registers. */ 1102 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1103 RD_REG_DWORD(®->iobase_addr); 1104 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1105 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1106 1107 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1108 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1109 1110 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1111 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1112 1113 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1114 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1115 1116 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1117 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1118 1119 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1120 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1121 1122 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1123 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1124 1125 /* Mailbox registers. */ 1126 mbx_reg = ®->mailbox0; 1127 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1128 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1129 1130 /* Transfer sequence registers. */ 1131 iter_reg = fw->xseq_gp_reg; 1132 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1133 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1134 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1135 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1136 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1137 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1138 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1139 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1140 1141 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); 1142 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1143 1144 /* Receive sequence registers. */ 1145 iter_reg = fw->rseq_gp_reg; 1146 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1147 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1148 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1149 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1150 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1151 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1152 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1153 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1154 1155 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); 1156 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1157 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1158 1159 /* Command DMA registers. */ 1160 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1161 1162 /* Queues. */ 1163 iter_reg = fw->req0_dma_reg; 1164 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1165 dmp_reg = ®->iobase_q; 1166 for (cnt = 0; cnt < 7; cnt++) 1167 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1168 1169 iter_reg = fw->resp0_dma_reg; 1170 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1171 dmp_reg = ®->iobase_q; 1172 for (cnt = 0; cnt < 7; cnt++) 1173 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1174 1175 iter_reg = fw->req1_dma_reg; 1176 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1177 dmp_reg = ®->iobase_q; 1178 for (cnt = 0; cnt < 7; cnt++) 1179 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1180 1181 /* Transmit DMA registers. */ 1182 iter_reg = fw->xmt0_dma_reg; 1183 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1184 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1185 1186 iter_reg = fw->xmt1_dma_reg; 1187 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1188 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1189 1190 iter_reg = fw->xmt2_dma_reg; 1191 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1192 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1193 1194 iter_reg = fw->xmt3_dma_reg; 1195 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1196 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1197 1198 iter_reg = fw->xmt4_dma_reg; 1199 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1200 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1201 1202 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1203 1204 /* Receive DMA registers. */ 1205 iter_reg = fw->rcvt0_data_dma_reg; 1206 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1207 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1208 1209 iter_reg = fw->rcvt1_data_dma_reg; 1210 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1211 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1212 1213 /* RISC registers. */ 1214 iter_reg = fw->risc_gp_reg; 1215 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1216 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1217 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1218 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1219 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1220 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1221 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1222 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1223 1224 /* Local memory controller registers. */ 1225 iter_reg = fw->lmc_reg; 1226 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1227 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1228 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1229 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1230 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1231 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1232 qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1233 1234 /* Fibre Protocol Module registers. */ 1235 iter_reg = fw->fpm_hdw_reg; 1236 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1237 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1238 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1239 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1240 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1241 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1242 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1243 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1244 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1245 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1246 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1247 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1248 1249 /* Frame Buffer registers. */ 1250 iter_reg = fw->fb_hdw_reg; 1251 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1252 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1253 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1254 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1255 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1256 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1257 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1258 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1259 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1260 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1261 qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1262 1263 rval = qla24xx_soft_reset(ha); 1264 if (rval != QLA_SUCCESS) 1265 goto qla24xx_fw_dump_failed_0; 1266 1267 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1268 &nxt); 1269 if (rval != QLA_SUCCESS) 1270 goto qla24xx_fw_dump_failed_0; 1271 1272 nxt = qla2xxx_copy_queues(ha, nxt); 1273 1274 qla24xx_copy_eft(ha, nxt); 1275 1276 nxt_chain = (void *)ha->fw_dump + ha->chain_offset; 1277 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1278 if (last_chain) { 1279 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1280 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1281 } 1282 1283 /* Adjust valid length. */ 1284 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1285 1286 qla24xx_fw_dump_failed_0: 1287 qla2xxx_dump_post_process(base_vha, rval); 1288 1289 qla24xx_fw_dump_failed: 1290 if (!hardware_locked) 1291 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1292 } 1293 1294 void 1295 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1296 { 1297 int rval; 1298 uint32_t cnt; 1299 uint32_t risc_address; 1300 struct qla_hw_data *ha = vha->hw; 1301 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1302 uint32_t __iomem *dmp_reg; 1303 uint32_t *iter_reg; 1304 uint16_t __iomem *mbx_reg; 1305 unsigned long flags; 1306 struct qla25xx_fw_dump *fw; 1307 uint32_t ext_mem_cnt; 1308 void *nxt, *nxt_chain; 1309 uint32_t *last_chain = NULL; 1310 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1311 1312 risc_address = ext_mem_cnt = 0; 1313 flags = 0; 1314 ha->fw_dump_cap_flags = 0; 1315 1316 if (!hardware_locked) 1317 spin_lock_irqsave(&ha->hardware_lock, flags); 1318 1319 if (!ha->fw_dump) { 1320 ql_log(ql_log_warn, vha, 0xd008, 1321 "No buffer available for dump.\n"); 1322 goto qla25xx_fw_dump_failed; 1323 } 1324 1325 if (ha->fw_dumped) { 1326 ql_log(ql_log_warn, vha, 0xd009, 1327 "Firmware has been previously dumped (%p) " 1328 "-- ignoring request.\n", 1329 ha->fw_dump); 1330 goto qla25xx_fw_dump_failed; 1331 } 1332 fw = &ha->fw_dump->isp.isp25; 1333 qla2xxx_prep_dump(ha, ha->fw_dump); 1334 ha->fw_dump->version = __constant_htonl(2); 1335 1336 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1337 1338 /* 1339 * Pause RISC. No need to track timeout, as resetting the chip 1340 * is the right approach incase of pause timeout 1341 */ 1342 qla24xx_pause_risc(reg, ha); 1343 1344 /* Host/Risc registers. */ 1345 iter_reg = fw->host_risc_reg; 1346 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1347 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1348 1349 /* PCIe registers. */ 1350 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1351 RD_REG_DWORD(®->iobase_addr); 1352 WRT_REG_DWORD(®->iobase_window, 0x01); 1353 dmp_reg = ®->iobase_c4; 1354 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 1355 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1356 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1357 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1358 1359 WRT_REG_DWORD(®->iobase_window, 0x00); 1360 RD_REG_DWORD(®->iobase_window); 1361 1362 /* Host interface registers. */ 1363 dmp_reg = ®->flash_addr; 1364 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1365 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1366 1367 /* Disable interrupts. */ 1368 WRT_REG_DWORD(®->ictrl, 0); 1369 RD_REG_DWORD(®->ictrl); 1370 1371 /* Shadow registers. */ 1372 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1373 RD_REG_DWORD(®->iobase_addr); 1374 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1375 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1376 1377 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1378 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1379 1380 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1381 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1382 1383 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1384 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1385 1386 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1387 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1388 1389 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1390 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1391 1392 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1393 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1394 1395 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1396 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1397 1398 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1399 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1400 1401 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1402 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1403 1404 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1405 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1406 1407 /* RISC I/O register. */ 1408 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1409 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1410 1411 /* Mailbox registers. */ 1412 mbx_reg = ®->mailbox0; 1413 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1414 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1415 1416 /* Transfer sequence registers. */ 1417 iter_reg = fw->xseq_gp_reg; 1418 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1419 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1420 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1421 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1422 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1423 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1424 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1425 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1426 1427 iter_reg = fw->xseq_0_reg; 1428 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1429 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1430 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1431 1432 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1433 1434 /* Receive sequence registers. */ 1435 iter_reg = fw->rseq_gp_reg; 1436 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1437 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1438 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1439 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1440 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1441 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1442 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1443 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1444 1445 iter_reg = fw->rseq_0_reg; 1446 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1447 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1448 1449 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1450 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1451 1452 /* Auxiliary sequence registers. */ 1453 iter_reg = fw->aseq_gp_reg; 1454 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1455 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1456 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1457 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1458 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1459 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1460 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1461 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1462 1463 iter_reg = fw->aseq_0_reg; 1464 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1465 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1466 1467 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1468 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1469 1470 /* Command DMA registers. */ 1471 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1472 1473 /* Queues. */ 1474 iter_reg = fw->req0_dma_reg; 1475 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1476 dmp_reg = ®->iobase_q; 1477 for (cnt = 0; cnt < 7; cnt++) 1478 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1479 1480 iter_reg = fw->resp0_dma_reg; 1481 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1482 dmp_reg = ®->iobase_q; 1483 for (cnt = 0; cnt < 7; cnt++) 1484 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1485 1486 iter_reg = fw->req1_dma_reg; 1487 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1488 dmp_reg = ®->iobase_q; 1489 for (cnt = 0; cnt < 7; cnt++) 1490 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1491 1492 /* Transmit DMA registers. */ 1493 iter_reg = fw->xmt0_dma_reg; 1494 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1495 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1496 1497 iter_reg = fw->xmt1_dma_reg; 1498 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1499 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1500 1501 iter_reg = fw->xmt2_dma_reg; 1502 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1503 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1504 1505 iter_reg = fw->xmt3_dma_reg; 1506 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1507 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1508 1509 iter_reg = fw->xmt4_dma_reg; 1510 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1511 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1512 1513 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1514 1515 /* Receive DMA registers. */ 1516 iter_reg = fw->rcvt0_data_dma_reg; 1517 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1518 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1519 1520 iter_reg = fw->rcvt1_data_dma_reg; 1521 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1522 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1523 1524 /* RISC registers. */ 1525 iter_reg = fw->risc_gp_reg; 1526 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1527 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1528 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1529 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1530 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1531 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1532 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1533 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1534 1535 /* Local memory controller registers. */ 1536 iter_reg = fw->lmc_reg; 1537 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1538 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1539 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1540 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1541 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1542 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1543 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1544 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1545 1546 /* Fibre Protocol Module registers. */ 1547 iter_reg = fw->fpm_hdw_reg; 1548 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1549 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1550 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1551 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1552 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1553 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1554 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1555 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1556 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1557 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1558 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1559 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1560 1561 /* Frame Buffer registers. */ 1562 iter_reg = fw->fb_hdw_reg; 1563 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1564 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1565 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1566 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1567 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1568 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1569 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1570 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1571 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1572 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1573 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1574 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1575 1576 /* Multi queue registers */ 1577 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1578 &last_chain); 1579 1580 rval = qla24xx_soft_reset(ha); 1581 if (rval != QLA_SUCCESS) 1582 goto qla25xx_fw_dump_failed_0; 1583 1584 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1585 &nxt); 1586 if (rval != QLA_SUCCESS) 1587 goto qla25xx_fw_dump_failed_0; 1588 1589 nxt = qla2xxx_copy_queues(ha, nxt); 1590 1591 qla24xx_copy_eft(ha, nxt); 1592 1593 /* Chain entries -- started with MQ. */ 1594 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1595 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1596 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1597 if (last_chain) { 1598 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1599 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1600 } 1601 1602 /* Adjust valid length. */ 1603 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1604 1605 qla25xx_fw_dump_failed_0: 1606 qla2xxx_dump_post_process(base_vha, rval); 1607 1608 qla25xx_fw_dump_failed: 1609 if (!hardware_locked) 1610 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1611 } 1612 1613 void 1614 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1615 { 1616 int rval; 1617 uint32_t cnt; 1618 uint32_t risc_address; 1619 struct qla_hw_data *ha = vha->hw; 1620 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1621 uint32_t __iomem *dmp_reg; 1622 uint32_t *iter_reg; 1623 uint16_t __iomem *mbx_reg; 1624 unsigned long flags; 1625 struct qla81xx_fw_dump *fw; 1626 uint32_t ext_mem_cnt; 1627 void *nxt, *nxt_chain; 1628 uint32_t *last_chain = NULL; 1629 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1630 1631 risc_address = ext_mem_cnt = 0; 1632 flags = 0; 1633 ha->fw_dump_cap_flags = 0; 1634 1635 if (!hardware_locked) 1636 spin_lock_irqsave(&ha->hardware_lock, flags); 1637 1638 if (!ha->fw_dump) { 1639 ql_log(ql_log_warn, vha, 0xd00a, 1640 "No buffer available for dump.\n"); 1641 goto qla81xx_fw_dump_failed; 1642 } 1643 1644 if (ha->fw_dumped) { 1645 ql_log(ql_log_warn, vha, 0xd00b, 1646 "Firmware has been previously dumped (%p) " 1647 "-- ignoring request.\n", 1648 ha->fw_dump); 1649 goto qla81xx_fw_dump_failed; 1650 } 1651 fw = &ha->fw_dump->isp.isp81; 1652 qla2xxx_prep_dump(ha, ha->fw_dump); 1653 1654 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1655 1656 /* 1657 * Pause RISC. No need to track timeout, as resetting the chip 1658 * is the right approach incase of pause timeout 1659 */ 1660 qla24xx_pause_risc(reg, ha); 1661 1662 /* Host/Risc registers. */ 1663 iter_reg = fw->host_risc_reg; 1664 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1665 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1666 1667 /* PCIe registers. */ 1668 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1669 RD_REG_DWORD(®->iobase_addr); 1670 WRT_REG_DWORD(®->iobase_window, 0x01); 1671 dmp_reg = ®->iobase_c4; 1672 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 1673 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1674 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1675 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1676 1677 WRT_REG_DWORD(®->iobase_window, 0x00); 1678 RD_REG_DWORD(®->iobase_window); 1679 1680 /* Host interface registers. */ 1681 dmp_reg = ®->flash_addr; 1682 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1683 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1684 1685 /* Disable interrupts. */ 1686 WRT_REG_DWORD(®->ictrl, 0); 1687 RD_REG_DWORD(®->ictrl); 1688 1689 /* Shadow registers. */ 1690 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1691 RD_REG_DWORD(®->iobase_addr); 1692 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1693 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1694 1695 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1696 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1697 1698 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1699 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1700 1701 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1702 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1703 1704 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1705 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1706 1707 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1708 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1709 1710 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1711 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1712 1713 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1714 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1715 1716 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1717 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1718 1719 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1720 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1721 1722 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1723 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1724 1725 /* RISC I/O register. */ 1726 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1727 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1728 1729 /* Mailbox registers. */ 1730 mbx_reg = ®->mailbox0; 1731 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1732 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1733 1734 /* Transfer sequence registers. */ 1735 iter_reg = fw->xseq_gp_reg; 1736 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1737 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1738 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1739 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1740 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1741 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1742 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1743 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1744 1745 iter_reg = fw->xseq_0_reg; 1746 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1747 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1748 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1749 1750 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1751 1752 /* Receive sequence registers. */ 1753 iter_reg = fw->rseq_gp_reg; 1754 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1755 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1756 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1757 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1758 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1759 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1760 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1761 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1762 1763 iter_reg = fw->rseq_0_reg; 1764 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1765 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1766 1767 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1768 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1769 1770 /* Auxiliary sequence registers. */ 1771 iter_reg = fw->aseq_gp_reg; 1772 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1773 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1774 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1775 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1776 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1777 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1778 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1779 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1780 1781 iter_reg = fw->aseq_0_reg; 1782 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1783 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1784 1785 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1786 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1787 1788 /* Command DMA registers. */ 1789 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1790 1791 /* Queues. */ 1792 iter_reg = fw->req0_dma_reg; 1793 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1794 dmp_reg = ®->iobase_q; 1795 for (cnt = 0; cnt < 7; cnt++) 1796 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1797 1798 iter_reg = fw->resp0_dma_reg; 1799 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1800 dmp_reg = ®->iobase_q; 1801 for (cnt = 0; cnt < 7; cnt++) 1802 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1803 1804 iter_reg = fw->req1_dma_reg; 1805 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1806 dmp_reg = ®->iobase_q; 1807 for (cnt = 0; cnt < 7; cnt++) 1808 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1809 1810 /* Transmit DMA registers. */ 1811 iter_reg = fw->xmt0_dma_reg; 1812 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1813 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1814 1815 iter_reg = fw->xmt1_dma_reg; 1816 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1817 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1818 1819 iter_reg = fw->xmt2_dma_reg; 1820 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1821 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1822 1823 iter_reg = fw->xmt3_dma_reg; 1824 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1825 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1826 1827 iter_reg = fw->xmt4_dma_reg; 1828 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1829 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1830 1831 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1832 1833 /* Receive DMA registers. */ 1834 iter_reg = fw->rcvt0_data_dma_reg; 1835 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1836 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1837 1838 iter_reg = fw->rcvt1_data_dma_reg; 1839 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1840 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1841 1842 /* RISC registers. */ 1843 iter_reg = fw->risc_gp_reg; 1844 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1845 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1846 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1847 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1848 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1849 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1850 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1851 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1852 1853 /* Local memory controller registers. */ 1854 iter_reg = fw->lmc_reg; 1855 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1856 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1857 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1858 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1859 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1860 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1861 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1862 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1863 1864 /* Fibre Protocol Module registers. */ 1865 iter_reg = fw->fpm_hdw_reg; 1866 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1867 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1868 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1869 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1870 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1871 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1872 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1873 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1874 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1875 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1876 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1877 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1878 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 1879 qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 1880 1881 /* Frame Buffer registers. */ 1882 iter_reg = fw->fb_hdw_reg; 1883 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1884 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1885 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1886 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1887 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1888 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1889 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1890 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1891 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1892 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1893 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1894 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 1895 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1896 1897 /* Multi queue registers */ 1898 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1899 &last_chain); 1900 1901 rval = qla24xx_soft_reset(ha); 1902 if (rval != QLA_SUCCESS) 1903 goto qla81xx_fw_dump_failed_0; 1904 1905 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1906 &nxt); 1907 if (rval != QLA_SUCCESS) 1908 goto qla81xx_fw_dump_failed_0; 1909 1910 nxt = qla2xxx_copy_queues(ha, nxt); 1911 1912 qla24xx_copy_eft(ha, nxt); 1913 1914 /* Chain entries -- started with MQ. */ 1915 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1916 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1917 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1918 if (last_chain) { 1919 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1920 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1921 } 1922 1923 /* Adjust valid length. */ 1924 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1925 1926 qla81xx_fw_dump_failed_0: 1927 qla2xxx_dump_post_process(base_vha, rval); 1928 1929 qla81xx_fw_dump_failed: 1930 if (!hardware_locked) 1931 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1932 } 1933 1934 void 1935 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1936 { 1937 int rval; 1938 uint32_t cnt, reg_data; 1939 uint32_t risc_address; 1940 struct qla_hw_data *ha = vha->hw; 1941 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1942 uint32_t __iomem *dmp_reg; 1943 uint32_t *iter_reg; 1944 uint16_t __iomem *mbx_reg; 1945 unsigned long flags; 1946 struct qla83xx_fw_dump *fw; 1947 uint32_t ext_mem_cnt; 1948 void *nxt, *nxt_chain; 1949 uint32_t *last_chain = NULL; 1950 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1951 1952 risc_address = ext_mem_cnt = 0; 1953 flags = 0; 1954 ha->fw_dump_cap_flags = 0; 1955 1956 if (!hardware_locked) 1957 spin_lock_irqsave(&ha->hardware_lock, flags); 1958 1959 if (!ha->fw_dump) { 1960 ql_log(ql_log_warn, vha, 0xd00c, 1961 "No buffer available for dump!!!\n"); 1962 goto qla83xx_fw_dump_failed; 1963 } 1964 1965 if (ha->fw_dumped) { 1966 ql_log(ql_log_warn, vha, 0xd00d, 1967 "Firmware has been previously dumped (%p) -- ignoring " 1968 "request...\n", ha->fw_dump); 1969 goto qla83xx_fw_dump_failed; 1970 } 1971 fw = &ha->fw_dump->isp.isp83; 1972 qla2xxx_prep_dump(ha, ha->fw_dump); 1973 1974 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1975 1976 /* 1977 * Pause RISC. No need to track timeout, as resetting the chip 1978 * is the right approach incase of pause timeout 1979 */ 1980 qla24xx_pause_risc(reg, ha); 1981 1982 WRT_REG_DWORD(®->iobase_addr, 0x6000); 1983 dmp_reg = ®->iobase_window; 1984 reg_data = RD_REG_DWORD(dmp_reg); 1985 WRT_REG_DWORD(dmp_reg, 0); 1986 1987 dmp_reg = ®->unused_4_1[0]; 1988 reg_data = RD_REG_DWORD(dmp_reg); 1989 WRT_REG_DWORD(dmp_reg, 0); 1990 1991 WRT_REG_DWORD(®->iobase_addr, 0x6010); 1992 dmp_reg = ®->unused_4_1[2]; 1993 reg_data = RD_REG_DWORD(dmp_reg); 1994 WRT_REG_DWORD(dmp_reg, 0); 1995 1996 /* select PCR and disable ecc checking and correction */ 1997 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1998 RD_REG_DWORD(®->iobase_addr); 1999 WRT_REG_DWORD(®->iobase_select, 0x60000000); /* write to F0h = PCR */ 2000 2001 /* Host/Risc registers. */ 2002 iter_reg = fw->host_risc_reg; 2003 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 2004 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); 2005 qla24xx_read_window(reg, 0x7040, 16, iter_reg); 2006 2007 /* PCIe registers. */ 2008 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2009 RD_REG_DWORD(®->iobase_addr); 2010 WRT_REG_DWORD(®->iobase_window, 0x01); 2011 dmp_reg = ®->iobase_c4; 2012 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 2013 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 2014 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 2015 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 2016 2017 WRT_REG_DWORD(®->iobase_window, 0x00); 2018 RD_REG_DWORD(®->iobase_window); 2019 2020 /* Host interface registers. */ 2021 dmp_reg = ®->flash_addr; 2022 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 2023 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 2024 2025 /* Disable interrupts. */ 2026 WRT_REG_DWORD(®->ictrl, 0); 2027 RD_REG_DWORD(®->ictrl); 2028 2029 /* Shadow registers. */ 2030 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 2031 RD_REG_DWORD(®->iobase_addr); 2032 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 2033 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2034 2035 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 2036 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2037 2038 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 2039 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2040 2041 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 2042 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2043 2044 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 2045 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2046 2047 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 2048 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2049 2050 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 2051 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2052 2053 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 2054 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2055 2056 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 2057 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2058 2059 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 2060 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2061 2062 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 2063 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2064 2065 /* RISC I/O register. */ 2066 WRT_REG_DWORD(®->iobase_addr, 0x0010); 2067 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 2068 2069 /* Mailbox registers. */ 2070 mbx_reg = ®->mailbox0; 2071 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 2072 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 2073 2074 /* Transfer sequence registers. */ 2075 iter_reg = fw->xseq_gp_reg; 2076 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); 2077 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); 2078 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); 2079 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); 2080 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); 2081 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); 2082 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); 2083 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); 2084 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 2085 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 2086 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 2087 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 2088 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 2089 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 2090 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 2091 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 2092 2093 iter_reg = fw->xseq_0_reg; 2094 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 2095 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 2096 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 2097 2098 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 2099 2100 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); 2101 2102 /* Receive sequence registers. */ 2103 iter_reg = fw->rseq_gp_reg; 2104 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); 2105 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); 2106 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); 2107 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); 2108 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); 2109 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); 2110 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); 2111 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); 2112 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 2113 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 2114 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 2115 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 2116 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 2117 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 2118 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 2119 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 2120 2121 iter_reg = fw->rseq_0_reg; 2122 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 2123 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 2124 2125 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 2126 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 2127 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); 2128 2129 /* Auxiliary sequence registers. */ 2130 iter_reg = fw->aseq_gp_reg; 2131 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 2132 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 2133 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 2134 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 2135 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 2136 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 2137 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 2138 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); 2139 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); 2140 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); 2141 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); 2142 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); 2143 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); 2144 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); 2145 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); 2146 qla24xx_read_window(reg, 0xB170, 16, iter_reg); 2147 2148 iter_reg = fw->aseq_0_reg; 2149 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 2150 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 2151 2152 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 2153 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 2154 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); 2155 2156 /* Command DMA registers. */ 2157 iter_reg = fw->cmd_dma_reg; 2158 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); 2159 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); 2160 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); 2161 qla24xx_read_window(reg, 0x71F0, 16, iter_reg); 2162 2163 /* Queues. */ 2164 iter_reg = fw->req0_dma_reg; 2165 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 2166 dmp_reg = ®->iobase_q; 2167 for (cnt = 0; cnt < 7; cnt++) 2168 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 2169 2170 iter_reg = fw->resp0_dma_reg; 2171 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 2172 dmp_reg = ®->iobase_q; 2173 for (cnt = 0; cnt < 7; cnt++) 2174 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 2175 2176 iter_reg = fw->req1_dma_reg; 2177 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 2178 dmp_reg = ®->iobase_q; 2179 for (cnt = 0; cnt < 7; cnt++) 2180 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 2181 2182 /* Transmit DMA registers. */ 2183 iter_reg = fw->xmt0_dma_reg; 2184 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 2185 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 2186 2187 iter_reg = fw->xmt1_dma_reg; 2188 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 2189 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 2190 2191 iter_reg = fw->xmt2_dma_reg; 2192 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 2193 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 2194 2195 iter_reg = fw->xmt3_dma_reg; 2196 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 2197 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 2198 2199 iter_reg = fw->xmt4_dma_reg; 2200 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 2201 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 2202 2203 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 2204 2205 /* Receive DMA registers. */ 2206 iter_reg = fw->rcvt0_data_dma_reg; 2207 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 2208 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 2209 2210 iter_reg = fw->rcvt1_data_dma_reg; 2211 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 2212 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 2213 2214 /* RISC registers. */ 2215 iter_reg = fw->risc_gp_reg; 2216 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 2217 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 2218 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 2219 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 2220 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 2221 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 2222 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 2223 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 2224 2225 /* Local memory controller registers. */ 2226 iter_reg = fw->lmc_reg; 2227 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 2228 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 2229 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 2230 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 2231 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 2232 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 2233 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 2234 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 2235 2236 /* Fibre Protocol Module registers. */ 2237 iter_reg = fw->fpm_hdw_reg; 2238 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 2239 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 2240 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 2241 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 2242 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 2243 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 2244 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 2245 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 2246 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 2247 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 2248 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 2249 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 2250 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 2251 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 2252 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); 2253 qla24xx_read_window(reg, 0x40F0, 16, iter_reg); 2254 2255 /* RQ0 Array registers. */ 2256 iter_reg = fw->rq0_array_reg; 2257 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); 2258 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); 2259 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); 2260 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); 2261 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); 2262 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); 2263 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); 2264 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); 2265 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); 2266 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); 2267 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); 2268 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); 2269 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); 2270 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); 2271 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); 2272 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); 2273 2274 /* RQ1 Array registers. */ 2275 iter_reg = fw->rq1_array_reg; 2276 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); 2277 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); 2278 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); 2279 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); 2280 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); 2281 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); 2282 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); 2283 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); 2284 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); 2285 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); 2286 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); 2287 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); 2288 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); 2289 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); 2290 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); 2291 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); 2292 2293 /* RP0 Array registers. */ 2294 iter_reg = fw->rp0_array_reg; 2295 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); 2296 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); 2297 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); 2298 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); 2299 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); 2300 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); 2301 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); 2302 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); 2303 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); 2304 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); 2305 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); 2306 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); 2307 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); 2308 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); 2309 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); 2310 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); 2311 2312 /* RP1 Array registers. */ 2313 iter_reg = fw->rp1_array_reg; 2314 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); 2315 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); 2316 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); 2317 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); 2318 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); 2319 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); 2320 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); 2321 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); 2322 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); 2323 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); 2324 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); 2325 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); 2326 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); 2327 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); 2328 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); 2329 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); 2330 2331 iter_reg = fw->at0_array_reg; 2332 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); 2333 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); 2334 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); 2335 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); 2336 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); 2337 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); 2338 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); 2339 qla24xx_read_window(reg, 0x70F0, 16, iter_reg); 2340 2341 /* I/O Queue Control registers. */ 2342 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); 2343 2344 /* Frame Buffer registers. */ 2345 iter_reg = fw->fb_hdw_reg; 2346 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 2347 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 2348 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 2349 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 2350 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 2351 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); 2352 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); 2353 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 2354 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 2355 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 2356 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 2357 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 2358 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 2359 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 2360 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); 2361 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); 2362 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); 2363 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); 2364 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); 2365 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); 2366 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); 2367 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); 2368 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); 2369 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); 2370 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); 2371 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); 2372 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 2373 2374 /* Multi queue registers */ 2375 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 2376 &last_chain); 2377 2378 rval = qla24xx_soft_reset(ha); 2379 if (rval != QLA_SUCCESS) { 2380 ql_log(ql_log_warn, vha, 0xd00e, 2381 "SOFT RESET FAILED, forcing continuation of dump!!!\n"); 2382 rval = QLA_SUCCESS; 2383 2384 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); 2385 2386 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 2387 RD_REG_DWORD(®->hccr); 2388 2389 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 2390 RD_REG_DWORD(®->hccr); 2391 2392 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 2393 RD_REG_DWORD(®->hccr); 2394 2395 for (cnt = 30000; cnt && (RD_REG_WORD(®->mailbox0)); cnt--) 2396 udelay(5); 2397 2398 if (!cnt) { 2399 nxt = fw->code_ram; 2400 nxt += sizeof(fw->code_ram); 2401 nxt += (ha->fw_memory_size - 0x100000 + 1); 2402 goto copy_queue; 2403 } else { 2404 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2405 ql_log(ql_log_warn, vha, 0xd010, 2406 "bigger hammer success?\n"); 2407 } 2408 } 2409 2410 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 2411 &nxt); 2412 if (rval != QLA_SUCCESS) 2413 goto qla83xx_fw_dump_failed_0; 2414 2415 copy_queue: 2416 nxt = qla2xxx_copy_queues(ha, nxt); 2417 2418 qla24xx_copy_eft(ha, nxt); 2419 2420 /* Chain entries -- started with MQ. */ 2421 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2422 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2423 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 2424 if (last_chain) { 2425 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 2426 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 2427 } 2428 2429 /* Adjust valid length. */ 2430 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 2431 2432 qla83xx_fw_dump_failed_0: 2433 qla2xxx_dump_post_process(base_vha, rval); 2434 2435 qla83xx_fw_dump_failed: 2436 if (!hardware_locked) 2437 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2438 } 2439 2440 /****************************************************************************/ 2441 /* Driver Debug Functions. */ 2442 /****************************************************************************/ 2443 2444 static inline int 2445 ql_mask_match(uint32_t level) 2446 { 2447 if (ql2xextended_error_logging == 1) 2448 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 2449 return (level & ql2xextended_error_logging) == level; 2450 } 2451 2452 /* 2453 * This function is for formatting and logging debug information. 2454 * It is to be used when vha is available. It formats the message 2455 * and logs it to the messages file. 2456 * parameters: 2457 * level: The level of the debug messages to be printed. 2458 * If ql2xextended_error_logging value is correctly set, 2459 * this message will appear in the messages file. 2460 * vha: Pointer to the scsi_qla_host_t. 2461 * id: This is a unique identifier for the level. It identifies the 2462 * part of the code from where the message originated. 2463 * msg: The message to be displayed. 2464 */ 2465 void 2466 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) 2467 { 2468 va_list va; 2469 struct va_format vaf; 2470 2471 if (!ql_mask_match(level)) 2472 return; 2473 2474 va_start(va, fmt); 2475 2476 vaf.fmt = fmt; 2477 vaf.va = &va; 2478 2479 if (vha != NULL) { 2480 const struct pci_dev *pdev = vha->hw->pdev; 2481 /* <module-name> <pci-name> <msg-id>:<host> Message */ 2482 pr_warn("%s [%s]-%04x:%ld: %pV", 2483 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, 2484 vha->host_no, &vaf); 2485 } else { 2486 pr_warn("%s [%s]-%04x: : %pV", 2487 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); 2488 } 2489 2490 va_end(va); 2491 2492 } 2493 2494 /* 2495 * This function is for formatting and logging debug information. 2496 * It is to be used when vha is not available and pci is available, 2497 * i.e., before host allocation. It formats the message and logs it 2498 * to the messages file. 2499 * parameters: 2500 * level: The level of the debug messages to be printed. 2501 * If ql2xextended_error_logging value is correctly set, 2502 * this message will appear in the messages file. 2503 * pdev: Pointer to the struct pci_dev. 2504 * id: This is a unique id for the level. It identifies the part 2505 * of the code from where the message originated. 2506 * msg: The message to be displayed. 2507 */ 2508 void 2509 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, 2510 const char *fmt, ...) 2511 { 2512 va_list va; 2513 struct va_format vaf; 2514 2515 if (pdev == NULL) 2516 return; 2517 if (!ql_mask_match(level)) 2518 return; 2519 2520 va_start(va, fmt); 2521 2522 vaf.fmt = fmt; 2523 vaf.va = &va; 2524 2525 /* <module-name> <dev-name>:<msg-id> Message */ 2526 pr_warn("%s [%s]-%04x: : %pV", 2527 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); 2528 2529 va_end(va); 2530 } 2531 2532 /* 2533 * This function is for formatting and logging log messages. 2534 * It is to be used when vha is available. It formats the message 2535 * and logs it to the messages file. All the messages will be logged 2536 * irrespective of value of ql2xextended_error_logging. 2537 * parameters: 2538 * level: The level of the log messages to be printed in the 2539 * messages file. 2540 * vha: Pointer to the scsi_qla_host_t 2541 * id: This is a unique id for the level. It identifies the 2542 * part of the code from where the message originated. 2543 * msg: The message to be displayed. 2544 */ 2545 void 2546 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) 2547 { 2548 va_list va; 2549 struct va_format vaf; 2550 char pbuf[128]; 2551 2552 if (level > ql_errlev) 2553 return; 2554 2555 if (vha != NULL) { 2556 const struct pci_dev *pdev = vha->hw->pdev; 2557 /* <module-name> <msg-id>:<host> Message */ 2558 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", 2559 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); 2560 } else { 2561 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 2562 QL_MSGHDR, "0000:00:00.0", id); 2563 } 2564 pbuf[sizeof(pbuf) - 1] = 0; 2565 2566 va_start(va, fmt); 2567 2568 vaf.fmt = fmt; 2569 vaf.va = &va; 2570 2571 switch (level) { 2572 case ql_log_fatal: /* FATAL LOG */ 2573 pr_crit("%s%pV", pbuf, &vaf); 2574 break; 2575 case ql_log_warn: 2576 pr_err("%s%pV", pbuf, &vaf); 2577 break; 2578 case ql_log_info: 2579 pr_warn("%s%pV", pbuf, &vaf); 2580 break; 2581 default: 2582 pr_info("%s%pV", pbuf, &vaf); 2583 break; 2584 } 2585 2586 va_end(va); 2587 } 2588 2589 /* 2590 * This function is for formatting and logging log messages. 2591 * It is to be used when vha is not available and pci is available, 2592 * i.e., before host allocation. It formats the message and logs 2593 * it to the messages file. All the messages are logged irrespective 2594 * of the value of ql2xextended_error_logging. 2595 * parameters: 2596 * level: The level of the log messages to be printed in the 2597 * messages file. 2598 * pdev: Pointer to the struct pci_dev. 2599 * id: This is a unique id for the level. It identifies the 2600 * part of the code from where the message originated. 2601 * msg: The message to be displayed. 2602 */ 2603 void 2604 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, 2605 const char *fmt, ...) 2606 { 2607 va_list va; 2608 struct va_format vaf; 2609 char pbuf[128]; 2610 2611 if (pdev == NULL) 2612 return; 2613 if (level > ql_errlev) 2614 return; 2615 2616 /* <module-name> <dev-name>:<msg-id> Message */ 2617 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 2618 QL_MSGHDR, dev_name(&(pdev->dev)), id); 2619 pbuf[sizeof(pbuf) - 1] = 0; 2620 2621 va_start(va, fmt); 2622 2623 vaf.fmt = fmt; 2624 vaf.va = &va; 2625 2626 switch (level) { 2627 case ql_log_fatal: /* FATAL LOG */ 2628 pr_crit("%s%pV", pbuf, &vaf); 2629 break; 2630 case ql_log_warn: 2631 pr_err("%s%pV", pbuf, &vaf); 2632 break; 2633 case ql_log_info: 2634 pr_warn("%s%pV", pbuf, &vaf); 2635 break; 2636 default: 2637 pr_info("%s%pV", pbuf, &vaf); 2638 break; 2639 } 2640 2641 va_end(va); 2642 } 2643 2644 void 2645 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) 2646 { 2647 int i; 2648 struct qla_hw_data *ha = vha->hw; 2649 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2650 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2651 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 2652 uint16_t __iomem *mbx_reg; 2653 2654 if (!ql_mask_match(level)) 2655 return; 2656 2657 if (IS_P3P_TYPE(ha)) 2658 mbx_reg = ®82->mailbox_in[0]; 2659 else if (IS_FWI2_CAPABLE(ha)) 2660 mbx_reg = ®24->mailbox0; 2661 else 2662 mbx_reg = MAILBOX_REG(ha, reg, 0); 2663 2664 ql_dbg(level, vha, id, "Mailbox registers:\n"); 2665 for (i = 0; i < 6; i++) 2666 ql_dbg(level, vha, id, 2667 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); 2668 } 2669 2670 2671 void 2672 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, 2673 uint8_t *b, uint32_t size) 2674 { 2675 uint32_t cnt; 2676 uint8_t c; 2677 2678 if (!ql_mask_match(level)) 2679 return; 2680 2681 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " 2682 "9 Ah Bh Ch Dh Eh Fh\n"); 2683 ql_dbg(level, vha, id, "----------------------------------" 2684 "----------------------------\n"); 2685 2686 ql_dbg(level, vha, id, " "); 2687 for (cnt = 0; cnt < size;) { 2688 c = *b++; 2689 printk("%02x", (uint32_t) c); 2690 cnt++; 2691 if (!(cnt % 16)) 2692 printk("\n"); 2693 else 2694 printk(" "); 2695 } 2696 if (cnt % 16) 2697 ql_dbg(level, vha, id, "\n"); 2698 } 2699