1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 /* 9 * Table for showing the current message id in use for particular level 10 * Change this table for addition of log/debug messages. 11 * ---------------------------------------------------------------------- 12 * | Level | Last Value Used | Holes | 13 * ---------------------------------------------------------------------- 14 * | Module Init and Probe | 0x0193 | 0x0146 | 15 * | | | 0x015b-0x0160 | 16 * | | | 0x016e | 17 * | Mailbox commands | 0x1206 | 0x11a2-0x11ff | 18 * | Device Discovery | 0x2134 | 0x210e-0x2116 | 19 * | | | 0x211a | 20 * | | | 0x211c-0x2128 | 21 * | | | 0x212a-0x2134 | 22 * | Queue Command and IO tracing | 0x3074 | 0x300b | 23 * | | | 0x3027-0x3028 | 24 * | | | 0x303d-0x3041 | 25 * | | | 0x302d,0x3033 | 26 * | | | 0x3036,0x3038 | 27 * | | | 0x303a | 28 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 29 * | Async Events | 0x5090 | 0x502b-0x502f | 30 * | | | 0x5047 | 31 * | | | 0x5084,0x5075 | 32 * | | | 0x503d,0x5044 | 33 * | | | 0x505f | 34 * | Timer Routines | 0x6012 | | 35 * | User Space Interactions | 0x70e3 | 0x7018,0x702e | 36 * | | | 0x7020,0x7024 | 37 * | | | 0x7039,0x7045 | 38 * | | | 0x7073-0x7075 | 39 * | | | 0x70a5-0x70a6 | 40 * | | | 0x70a8,0x70ab | 41 * | | | 0x70ad-0x70ae | 42 * | | | 0x70d0-0x70d6 | 43 * | | | 0x70d7-0x70db | 44 * | Task Management | 0x8042 | 0x8000 | 45 * | | | 0x8019 | 46 * | | | 0x8025,0x8026 | 47 * | | | 0x8031,0x8032 | 48 * | | | 0x8039,0x803c | 49 * | AER/EEH | 0x9011 | | 50 * | Virtual Port | 0xa007 | | 51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | 52 * | | | 0xb09e,0xb0ae | 53 * | | | 0xb0c3,0xb0c6 | 54 * | | | 0xb0e0-0xb0ef | 55 * | | | 0xb085,0xb0dc | 56 * | | | 0xb107,0xb108 | 57 * | | | 0xb111,0xb11e | 58 * | | | 0xb12c,0xb12d | 59 * | | | 0xb13a,0xb142 | 60 * | | | 0xb13c-0xb140 | 61 * | | | 0xb149 | 62 * | MultiQ | 0xc010 | | 63 * | Misc | 0xd303 | 0xd031-0xd0ff | 64 * | | | 0xd101-0xd1fe | 65 * | | | 0xd214-0xd2fe | 66 * | Target Mode | 0xe081 | | 67 * | Target Mode Management | 0xf09b | 0xf002 | 68 * | | | 0xf046-0xf049 | 69 * | Target Mode Task Management | 0x1000d | | 70 * ---------------------------------------------------------------------- 71 */ 72 73 #include "qla_def.h" 74 75 #include <linux/delay.h> 76 77 static uint32_t ql_dbg_offset = 0x800; 78 79 static inline void 80 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 81 { 82 fw_dump->fw_major_version = htonl(ha->fw_major_version); 83 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 84 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); 85 fw_dump->fw_attributes = htonl(ha->fw_attributes); 86 87 fw_dump->vendor = htonl(ha->pdev->vendor); 88 fw_dump->device = htonl(ha->pdev->device); 89 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); 90 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); 91 } 92 93 static inline void * 94 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) 95 { 96 struct req_que *req = ha->req_q_map[0]; 97 struct rsp_que *rsp = ha->rsp_q_map[0]; 98 /* Request queue. */ 99 memcpy(ptr, req->ring, req->length * 100 sizeof(request_t)); 101 102 /* Response queue. */ 103 ptr += req->length * sizeof(request_t); 104 memcpy(ptr, rsp->ring, rsp->length * 105 sizeof(response_t)); 106 107 return ptr + (rsp->length * sizeof(response_t)); 108 } 109 110 int 111 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 112 uint32_t ram_dwords, void **nxt) 113 { 114 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 115 dma_addr_t dump_dma = ha->gid_list_dma; 116 uint32_t *chunk = (void *)ha->gid_list; 117 uint32_t dwords = qla2x00_gid_list_size(ha) / 4; 118 uint32_t stat; 119 ulong i, j, timer = 6000000; 120 int rval = QLA_FUNCTION_FAILED; 121 122 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 123 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { 124 if (i + dwords > ram_dwords) 125 dwords = ram_dwords - i; 126 127 WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); 128 WRT_REG_WORD(®->mailbox1, LSW(addr)); 129 WRT_REG_WORD(®->mailbox8, MSW(addr)); 130 131 WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); 132 WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); 133 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); 134 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); 135 136 WRT_REG_WORD(®->mailbox4, MSW(dwords)); 137 WRT_REG_WORD(®->mailbox5, LSW(dwords)); 138 139 WRT_REG_WORD(®->mailbox9, 0); 140 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 141 142 ha->flags.mbox_int = 0; 143 while (timer--) { 144 udelay(5); 145 146 stat = RD_REG_DWORD(®->host_status); 147 /* Check for pending interrupts. */ 148 if (!(stat & HSRX_RISC_INT)) 149 continue; 150 151 stat &= 0xff; 152 if (stat != 0x1 && stat != 0x2 && 153 stat != 0x10 && stat != 0x11) { 154 155 /* Clear this intr; it wasn't a mailbox intr */ 156 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 157 RD_REG_DWORD(®->hccr); 158 continue; 159 } 160 161 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 162 rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; 163 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 164 RD_REG_DWORD(®->hccr); 165 break; 166 } 167 ha->flags.mbox_int = 1; 168 *nxt = ram + i; 169 170 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 171 /* no interrupt, timed out*/ 172 return rval; 173 } 174 if (rval) { 175 /* error completion status */ 176 return rval; 177 } 178 for (j = 0; j < dwords; j++) { 179 ram[i + j] = 180 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? 181 chunk[j] : swab32(chunk[j]); 182 } 183 } 184 185 *nxt = ram + i; 186 return QLA_SUCCESS; 187 } 188 189 int 190 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 191 uint32_t ram_dwords, void **nxt) 192 { 193 int rval = QLA_FUNCTION_FAILED; 194 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 195 dma_addr_t dump_dma = ha->gid_list_dma; 196 uint32_t *chunk = (void *)ha->gid_list; 197 uint32_t dwords = qla2x00_gid_list_size(ha) / 4; 198 uint32_t stat; 199 ulong i, j, timer = 6000000; 200 201 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 202 203 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { 204 if (i + dwords > ram_dwords) 205 dwords = ram_dwords - i; 206 207 WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 208 WRT_REG_WORD(®->mailbox1, LSW(addr)); 209 WRT_REG_WORD(®->mailbox8, MSW(addr)); 210 211 WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); 212 WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); 213 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); 214 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); 215 216 WRT_REG_WORD(®->mailbox4, MSW(dwords)); 217 WRT_REG_WORD(®->mailbox5, LSW(dwords)); 218 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 219 220 ha->flags.mbox_int = 0; 221 while (timer--) { 222 udelay(5); 223 stat = RD_REG_DWORD(®->host_status); 224 225 /* Check for pending interrupts. */ 226 if (!(stat & HSRX_RISC_INT)) 227 continue; 228 229 stat &= 0xff; 230 if (stat != 0x1 && stat != 0x2 && 231 stat != 0x10 && stat != 0x11) { 232 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 233 RD_REG_DWORD(®->hccr); 234 continue; 235 } 236 237 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 238 rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; 239 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 240 RD_REG_DWORD(®->hccr); 241 break; 242 } 243 ha->flags.mbox_int = 1; 244 *nxt = ram + i; 245 246 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 247 /* no interrupt, timed out*/ 248 return rval; 249 } 250 if (rval) { 251 /* error completion status */ 252 return rval; 253 } 254 for (j = 0; j < dwords; j++) { 255 ram[i + j] = 256 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? 257 chunk[j] : swab32(chunk[j]); 258 } 259 } 260 261 *nxt = ram + i; 262 return QLA_SUCCESS; 263 } 264 265 static int 266 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, 267 uint32_t cram_size, void **nxt) 268 { 269 int rval; 270 271 /* Code RAM. */ 272 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); 273 if (rval != QLA_SUCCESS) 274 return rval; 275 276 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); 277 278 /* External Memory. */ 279 rval = qla24xx_dump_ram(ha, 0x100000, *nxt, 280 ha->fw_memory_size - 0x100000 + 1, nxt); 281 if (rval == QLA_SUCCESS) 282 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); 283 284 return rval; 285 } 286 287 static uint32_t * 288 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 289 uint32_t count, uint32_t *buf) 290 { 291 uint32_t __iomem *dmp_reg; 292 293 WRT_REG_DWORD(®->iobase_addr, iobase); 294 dmp_reg = ®->iobase_window; 295 for ( ; count--; dmp_reg++) 296 *buf++ = htonl(RD_REG_DWORD(dmp_reg)); 297 298 return buf; 299 } 300 301 void 302 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) 303 { 304 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); 305 306 /* 100 usec delay is sufficient enough for hardware to pause RISC */ 307 udelay(100); 308 if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) 309 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); 310 } 311 312 int 313 qla24xx_soft_reset(struct qla_hw_data *ha) 314 { 315 int rval = QLA_SUCCESS; 316 uint32_t cnt; 317 uint16_t wd; 318 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 319 320 /* 321 * Reset RISC. The delay is dependent on system architecture. 322 * Driver can proceed with the reset sequence after waiting 323 * for a timeout period. 324 */ 325 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 326 for (cnt = 0; cnt < 30000; cnt++) { 327 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 328 break; 329 330 udelay(10); 331 } 332 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) 333 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 334 335 WRT_REG_DWORD(®->ctrl_status, 336 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 337 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 338 339 udelay(100); 340 341 /* Wait for soft-reset to complete. */ 342 for (cnt = 0; cnt < 30000; cnt++) { 343 if ((RD_REG_DWORD(®->ctrl_status) & 344 CSRX_ISP_SOFT_RESET) == 0) 345 break; 346 347 udelay(10); 348 } 349 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 350 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); 351 352 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 353 RD_REG_DWORD(®->hccr); /* PCI Posting. */ 354 355 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && 356 rval == QLA_SUCCESS; cnt--) { 357 if (cnt) 358 udelay(10); 359 else 360 rval = QLA_FUNCTION_TIMEOUT; 361 } 362 if (rval == QLA_SUCCESS) 363 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 364 365 return rval; 366 } 367 368 static int 369 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 370 uint32_t ram_words, void **nxt) 371 { 372 int rval; 373 uint32_t cnt, stat, timer, words, idx; 374 uint16_t mb0; 375 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 376 dma_addr_t dump_dma = ha->gid_list_dma; 377 uint16_t *dump = (uint16_t *)ha->gid_list; 378 379 rval = QLA_SUCCESS; 380 mb0 = 0; 381 382 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 383 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 384 385 words = qla2x00_gid_list_size(ha) / 2; 386 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 387 cnt += words, addr += words) { 388 if (cnt + words > ram_words) 389 words = ram_words - cnt; 390 391 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); 392 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); 393 394 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); 395 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); 396 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); 397 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); 398 399 WRT_MAILBOX_REG(ha, reg, 4, words); 400 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 401 402 for (timer = 6000000; timer; timer--) { 403 /* Check for pending interrupts. */ 404 stat = RD_REG_DWORD(®->u.isp2300.host_status); 405 if (stat & HSR_RISC_INT) { 406 stat &= 0xff; 407 408 if (stat == 0x1 || stat == 0x2) { 409 set_bit(MBX_INTERRUPT, 410 &ha->mbx_cmd_flags); 411 412 mb0 = RD_MAILBOX_REG(ha, reg, 0); 413 414 /* Release mailbox registers. */ 415 WRT_REG_WORD(®->semaphore, 0); 416 WRT_REG_WORD(®->hccr, 417 HCCR_CLR_RISC_INT); 418 RD_REG_WORD(®->hccr); 419 break; 420 } else if (stat == 0x10 || stat == 0x11) { 421 set_bit(MBX_INTERRUPT, 422 &ha->mbx_cmd_flags); 423 424 mb0 = RD_MAILBOX_REG(ha, reg, 0); 425 426 WRT_REG_WORD(®->hccr, 427 HCCR_CLR_RISC_INT); 428 RD_REG_WORD(®->hccr); 429 break; 430 } 431 432 /* clear this intr; it wasn't a mailbox intr */ 433 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 434 RD_REG_WORD(®->hccr); 435 } 436 udelay(5); 437 } 438 439 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 440 rval = mb0 & MBS_MASK; 441 for (idx = 0; idx < words; idx++) 442 ram[cnt + idx] = swab16(dump[idx]); 443 } else { 444 rval = QLA_FUNCTION_FAILED; 445 } 446 } 447 448 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; 449 return rval; 450 } 451 452 static inline void 453 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 454 uint16_t *buf) 455 { 456 uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; 457 458 for ( ; count--; dmp_reg++) 459 *buf++ = htons(RD_REG_WORD(dmp_reg)); 460 } 461 462 static inline void * 463 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) 464 { 465 if (!ha->eft) 466 return ptr; 467 468 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); 469 return ptr + ntohl(ha->fw_dump->eft_size); 470 } 471 472 static inline void * 473 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 474 { 475 uint32_t cnt; 476 uint32_t *iter_reg; 477 struct qla2xxx_fce_chain *fcec = ptr; 478 479 if (!ha->fce) 480 return ptr; 481 482 *last_chain = &fcec->type; 483 fcec->type = htonl(DUMP_CHAIN_FCE); 484 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 485 fce_calc_size(ha->fce_bufs)); 486 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 487 fcec->addr_l = htonl(LSD(ha->fce_dma)); 488 fcec->addr_h = htonl(MSD(ha->fce_dma)); 489 490 iter_reg = fcec->eregs; 491 for (cnt = 0; cnt < 8; cnt++) 492 *iter_reg++ = htonl(ha->fce_mb[cnt]); 493 494 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 495 496 return (char *)iter_reg + ntohl(fcec->size); 497 } 498 499 static inline void * 500 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 501 { 502 struct qla2xxx_offld_chain *c = ptr; 503 504 if (!ha->exlogin_buf) 505 return ptr; 506 507 *last_chain = &c->type; 508 509 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); 510 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + 511 ha->exlogin_size); 512 c->size = cpu_to_be32(ha->exlogin_size); 513 c->addr = cpu_to_be64(ha->exlogin_buf_dma); 514 515 ptr += sizeof(struct qla2xxx_offld_chain); 516 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); 517 518 return (char *)ptr + cpu_to_be32(c->size); 519 } 520 521 static inline void * 522 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 523 { 524 struct qla2xxx_offld_chain *c = ptr; 525 526 if (!ha->exchoffld_buf) 527 return ptr; 528 529 *last_chain = &c->type; 530 531 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); 532 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + 533 ha->exchoffld_size); 534 c->size = cpu_to_be32(ha->exchoffld_size); 535 c->addr = cpu_to_be64(ha->exchoffld_buf_dma); 536 537 ptr += sizeof(struct qla2xxx_offld_chain); 538 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); 539 540 return (char *)ptr + cpu_to_be32(c->size); 541 } 542 543 static inline void * 544 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, 545 uint32_t **last_chain) 546 { 547 struct qla2xxx_mqueue_chain *q; 548 struct qla2xxx_mqueue_header *qh; 549 uint32_t num_queues; 550 int que; 551 struct { 552 int length; 553 void *ring; 554 } aq, *aqp; 555 556 if (!ha->tgt.atio_ring) 557 return ptr; 558 559 num_queues = 1; 560 aqp = &aq; 561 aqp->length = ha->tgt.atio_q_length; 562 aqp->ring = ha->tgt.atio_ring; 563 564 for (que = 0; que < num_queues; que++) { 565 /* aqp = ha->atio_q_map[que]; */ 566 q = ptr; 567 *last_chain = &q->type; 568 q->type = htonl(DUMP_CHAIN_QUEUE); 569 q->chain_size = htonl( 570 sizeof(struct qla2xxx_mqueue_chain) + 571 sizeof(struct qla2xxx_mqueue_header) + 572 (aqp->length * sizeof(request_t))); 573 ptr += sizeof(struct qla2xxx_mqueue_chain); 574 575 /* Add header. */ 576 qh = ptr; 577 qh->queue = htonl(TYPE_ATIO_QUEUE); 578 qh->number = htonl(que); 579 qh->size = htonl(aqp->length * sizeof(request_t)); 580 ptr += sizeof(struct qla2xxx_mqueue_header); 581 582 /* Add data. */ 583 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); 584 585 ptr += aqp->length * sizeof(request_t); 586 } 587 588 return ptr; 589 } 590 591 static inline void * 592 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 593 { 594 struct qla2xxx_mqueue_chain *q; 595 struct qla2xxx_mqueue_header *qh; 596 struct req_que *req; 597 struct rsp_que *rsp; 598 int que; 599 600 if (!ha->mqenable) 601 return ptr; 602 603 /* Request queues */ 604 for (que = 1; que < ha->max_req_queues; que++) { 605 req = ha->req_q_map[que]; 606 if (!req) 607 break; 608 609 /* Add chain. */ 610 q = ptr; 611 *last_chain = &q->type; 612 q->type = htonl(DUMP_CHAIN_QUEUE); 613 q->chain_size = htonl( 614 sizeof(struct qla2xxx_mqueue_chain) + 615 sizeof(struct qla2xxx_mqueue_header) + 616 (req->length * sizeof(request_t))); 617 ptr += sizeof(struct qla2xxx_mqueue_chain); 618 619 /* Add header. */ 620 qh = ptr; 621 qh->queue = htonl(TYPE_REQUEST_QUEUE); 622 qh->number = htonl(que); 623 qh->size = htonl(req->length * sizeof(request_t)); 624 ptr += sizeof(struct qla2xxx_mqueue_header); 625 626 /* Add data. */ 627 memcpy(ptr, req->ring, req->length * sizeof(request_t)); 628 ptr += req->length * sizeof(request_t); 629 } 630 631 /* Response queues */ 632 for (que = 1; que < ha->max_rsp_queues; que++) { 633 rsp = ha->rsp_q_map[que]; 634 if (!rsp) 635 break; 636 637 /* Add chain. */ 638 q = ptr; 639 *last_chain = &q->type; 640 q->type = htonl(DUMP_CHAIN_QUEUE); 641 q->chain_size = htonl( 642 sizeof(struct qla2xxx_mqueue_chain) + 643 sizeof(struct qla2xxx_mqueue_header) + 644 (rsp->length * sizeof(response_t))); 645 ptr += sizeof(struct qla2xxx_mqueue_chain); 646 647 /* Add header. */ 648 qh = ptr; 649 qh->queue = htonl(TYPE_RESPONSE_QUEUE); 650 qh->number = htonl(que); 651 qh->size = htonl(rsp->length * sizeof(response_t)); 652 ptr += sizeof(struct qla2xxx_mqueue_header); 653 654 /* Add data. */ 655 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); 656 ptr += rsp->length * sizeof(response_t); 657 } 658 659 return ptr; 660 } 661 662 static inline void * 663 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 664 { 665 uint32_t cnt, que_idx; 666 uint8_t que_cnt; 667 struct qla2xxx_mq_chain *mq = ptr; 668 device_reg_t *reg; 669 670 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 671 IS_QLA28XX(ha)) 672 return ptr; 673 674 mq = ptr; 675 *last_chain = &mq->type; 676 mq->type = htonl(DUMP_CHAIN_MQ); 677 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); 678 679 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 680 ha->max_req_queues : ha->max_rsp_queues; 681 mq->count = htonl(que_cnt); 682 for (cnt = 0; cnt < que_cnt; cnt++) { 683 reg = ISP_QUE_REG(ha, cnt); 684 que_idx = cnt * 4; 685 mq->qregs[que_idx] = 686 htonl(RD_REG_DWORD(®->isp25mq.req_q_in)); 687 mq->qregs[que_idx+1] = 688 htonl(RD_REG_DWORD(®->isp25mq.req_q_out)); 689 mq->qregs[que_idx+2] = 690 htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in)); 691 mq->qregs[que_idx+3] = 692 htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out)); 693 } 694 695 return ptr + sizeof(struct qla2xxx_mq_chain); 696 } 697 698 void 699 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 700 { 701 struct qla_hw_data *ha = vha->hw; 702 703 if (rval != QLA_SUCCESS) { 704 ql_log(ql_log_warn, vha, 0xd000, 705 "Failed to dump firmware (%x), dump status flags (0x%lx).\n", 706 rval, ha->fw_dump_cap_flags); 707 ha->fw_dumped = 0; 708 } else { 709 ql_log(ql_log_info, vha, 0xd001, 710 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", 711 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); 712 ha->fw_dumped = 1; 713 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 714 } 715 } 716 717 /** 718 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 719 * @vha: HA context 720 * @hardware_locked: Called with the hardware_lock 721 */ 722 void 723 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 724 { 725 int rval; 726 uint32_t cnt; 727 struct qla_hw_data *ha = vha->hw; 728 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 729 uint16_t __iomem *dmp_reg; 730 unsigned long flags; 731 struct qla2300_fw_dump *fw; 732 void *nxt; 733 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 734 735 flags = 0; 736 737 #ifndef __CHECKER__ 738 if (!hardware_locked) 739 spin_lock_irqsave(&ha->hardware_lock, flags); 740 #endif 741 742 if (!ha->fw_dump) { 743 ql_log(ql_log_warn, vha, 0xd002, 744 "No buffer available for dump.\n"); 745 goto qla2300_fw_dump_failed; 746 } 747 748 if (ha->fw_dumped) { 749 ql_log(ql_log_warn, vha, 0xd003, 750 "Firmware has been previously dumped (%p) " 751 "-- ignoring request.\n", 752 ha->fw_dump); 753 goto qla2300_fw_dump_failed; 754 } 755 fw = &ha->fw_dump->isp.isp23; 756 qla2xxx_prep_dump(ha, ha->fw_dump); 757 758 rval = QLA_SUCCESS; 759 fw->hccr = htons(RD_REG_WORD(®->hccr)); 760 761 /* Pause RISC. */ 762 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 763 if (IS_QLA2300(ha)) { 764 for (cnt = 30000; 765 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 766 rval == QLA_SUCCESS; cnt--) { 767 if (cnt) 768 udelay(100); 769 else 770 rval = QLA_FUNCTION_TIMEOUT; 771 } 772 } else { 773 RD_REG_WORD(®->hccr); /* PCI Posting. */ 774 udelay(10); 775 } 776 777 if (rval == QLA_SUCCESS) { 778 dmp_reg = ®->flash_address; 779 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) 780 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 781 782 dmp_reg = ®->u.isp2300.req_q_in; 783 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; 784 cnt++, dmp_reg++) 785 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 786 787 dmp_reg = ®->u.isp2300.mailbox0; 788 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; 789 cnt++, dmp_reg++) 790 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 791 792 WRT_REG_WORD(®->ctrl_status, 0x40); 793 qla2xxx_read_window(reg, 32, fw->resp_dma_reg); 794 795 WRT_REG_WORD(®->ctrl_status, 0x50); 796 qla2xxx_read_window(reg, 48, fw->dma_reg); 797 798 WRT_REG_WORD(®->ctrl_status, 0x00); 799 dmp_reg = ®->risc_hw; 800 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; 801 cnt++, dmp_reg++) 802 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 803 804 WRT_REG_WORD(®->pcr, 0x2000); 805 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 806 807 WRT_REG_WORD(®->pcr, 0x2200); 808 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 809 810 WRT_REG_WORD(®->pcr, 0x2400); 811 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 812 813 WRT_REG_WORD(®->pcr, 0x2600); 814 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 815 816 WRT_REG_WORD(®->pcr, 0x2800); 817 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 818 819 WRT_REG_WORD(®->pcr, 0x2A00); 820 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 821 822 WRT_REG_WORD(®->pcr, 0x2C00); 823 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 824 825 WRT_REG_WORD(®->pcr, 0x2E00); 826 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 827 828 WRT_REG_WORD(®->ctrl_status, 0x10); 829 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); 830 831 WRT_REG_WORD(®->ctrl_status, 0x20); 832 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 833 834 WRT_REG_WORD(®->ctrl_status, 0x30); 835 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 836 837 /* Reset RISC. */ 838 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 839 for (cnt = 0; cnt < 30000; cnt++) { 840 if ((RD_REG_WORD(®->ctrl_status) & 841 CSR_ISP_SOFT_RESET) == 0) 842 break; 843 844 udelay(10); 845 } 846 } 847 848 if (!IS_QLA2300(ha)) { 849 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 850 rval == QLA_SUCCESS; cnt--) { 851 if (cnt) 852 udelay(100); 853 else 854 rval = QLA_FUNCTION_TIMEOUT; 855 } 856 } 857 858 /* Get RISC SRAM. */ 859 if (rval == QLA_SUCCESS) 860 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, 861 sizeof(fw->risc_ram) / 2, &nxt); 862 863 /* Get stack SRAM. */ 864 if (rval == QLA_SUCCESS) 865 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, 866 sizeof(fw->stack_ram) / 2, &nxt); 867 868 /* Get data SRAM. */ 869 if (rval == QLA_SUCCESS) 870 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, 871 ha->fw_memory_size - 0x11000 + 1, &nxt); 872 873 if (rval == QLA_SUCCESS) 874 qla2xxx_copy_queues(ha, nxt); 875 876 qla2xxx_dump_post_process(base_vha, rval); 877 878 qla2300_fw_dump_failed: 879 #ifndef __CHECKER__ 880 if (!hardware_locked) 881 spin_unlock_irqrestore(&ha->hardware_lock, flags); 882 #else 883 ; 884 #endif 885 } 886 887 /** 888 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 889 * @vha: HA context 890 * @hardware_locked: Called with the hardware_lock 891 */ 892 void 893 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 894 { 895 int rval; 896 uint32_t cnt, timer; 897 uint16_t risc_address; 898 uint16_t mb0, mb2; 899 struct qla_hw_data *ha = vha->hw; 900 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 901 uint16_t __iomem *dmp_reg; 902 unsigned long flags; 903 struct qla2100_fw_dump *fw; 904 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 905 906 risc_address = 0; 907 mb0 = mb2 = 0; 908 flags = 0; 909 910 #ifndef __CHECKER__ 911 if (!hardware_locked) 912 spin_lock_irqsave(&ha->hardware_lock, flags); 913 #endif 914 915 if (!ha->fw_dump) { 916 ql_log(ql_log_warn, vha, 0xd004, 917 "No buffer available for dump.\n"); 918 goto qla2100_fw_dump_failed; 919 } 920 921 if (ha->fw_dumped) { 922 ql_log(ql_log_warn, vha, 0xd005, 923 "Firmware has been previously dumped (%p) " 924 "-- ignoring request.\n", 925 ha->fw_dump); 926 goto qla2100_fw_dump_failed; 927 } 928 fw = &ha->fw_dump->isp.isp21; 929 qla2xxx_prep_dump(ha, ha->fw_dump); 930 931 rval = QLA_SUCCESS; 932 fw->hccr = htons(RD_REG_WORD(®->hccr)); 933 934 /* Pause RISC. */ 935 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 936 for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 937 rval == QLA_SUCCESS; cnt--) { 938 if (cnt) 939 udelay(100); 940 else 941 rval = QLA_FUNCTION_TIMEOUT; 942 } 943 if (rval == QLA_SUCCESS) { 944 dmp_reg = ®->flash_address; 945 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) 946 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 947 948 dmp_reg = ®->u.isp2100.mailbox0; 949 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { 950 if (cnt == 8) 951 dmp_reg = ®->u_end.isp2200.mailbox8; 952 953 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 954 } 955 956 dmp_reg = ®->u.isp2100.unused_2[0]; 957 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++) 958 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 959 960 WRT_REG_WORD(®->ctrl_status, 0x00); 961 dmp_reg = ®->risc_hw; 962 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++) 963 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); 964 965 WRT_REG_WORD(®->pcr, 0x2000); 966 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 967 968 WRT_REG_WORD(®->pcr, 0x2100); 969 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 970 971 WRT_REG_WORD(®->pcr, 0x2200); 972 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 973 974 WRT_REG_WORD(®->pcr, 0x2300); 975 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 976 977 WRT_REG_WORD(®->pcr, 0x2400); 978 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 979 980 WRT_REG_WORD(®->pcr, 0x2500); 981 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 982 983 WRT_REG_WORD(®->pcr, 0x2600); 984 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 985 986 WRT_REG_WORD(®->pcr, 0x2700); 987 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 988 989 WRT_REG_WORD(®->ctrl_status, 0x10); 990 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); 991 992 WRT_REG_WORD(®->ctrl_status, 0x20); 993 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 994 995 WRT_REG_WORD(®->ctrl_status, 0x30); 996 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 997 998 /* Reset the ISP. */ 999 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 1000 } 1001 1002 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 1003 rval == QLA_SUCCESS; cnt--) { 1004 if (cnt) 1005 udelay(100); 1006 else 1007 rval = QLA_FUNCTION_TIMEOUT; 1008 } 1009 1010 /* Pause RISC. */ 1011 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && 1012 (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { 1013 1014 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 1015 for (cnt = 30000; 1016 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 1017 rval == QLA_SUCCESS; cnt--) { 1018 if (cnt) 1019 udelay(100); 1020 else 1021 rval = QLA_FUNCTION_TIMEOUT; 1022 } 1023 if (rval == QLA_SUCCESS) { 1024 /* Set memory configuration and timing. */ 1025 if (IS_QLA2100(ha)) 1026 WRT_REG_WORD(®->mctr, 0xf1); 1027 else 1028 WRT_REG_WORD(®->mctr, 0xf2); 1029 RD_REG_WORD(®->mctr); /* PCI Posting. */ 1030 1031 /* Release RISC. */ 1032 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 1033 } 1034 } 1035 1036 if (rval == QLA_SUCCESS) { 1037 /* Get RISC SRAM. */ 1038 risc_address = 0x1000; 1039 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 1040 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1041 } 1042 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; 1043 cnt++, risc_address++) { 1044 WRT_MAILBOX_REG(ha, reg, 1, risc_address); 1045 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 1046 1047 for (timer = 6000000; timer != 0; timer--) { 1048 /* Check for pending interrupts. */ 1049 if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { 1050 if (RD_REG_WORD(®->semaphore) & BIT_0) { 1051 set_bit(MBX_INTERRUPT, 1052 &ha->mbx_cmd_flags); 1053 1054 mb0 = RD_MAILBOX_REG(ha, reg, 0); 1055 mb2 = RD_MAILBOX_REG(ha, reg, 2); 1056 1057 WRT_REG_WORD(®->semaphore, 0); 1058 WRT_REG_WORD(®->hccr, 1059 HCCR_CLR_RISC_INT); 1060 RD_REG_WORD(®->hccr); 1061 break; 1062 } 1063 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 1064 RD_REG_WORD(®->hccr); 1065 } 1066 udelay(5); 1067 } 1068 1069 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1070 rval = mb0 & MBS_MASK; 1071 fw->risc_ram[cnt] = htons(mb2); 1072 } else { 1073 rval = QLA_FUNCTION_FAILED; 1074 } 1075 } 1076 1077 if (rval == QLA_SUCCESS) 1078 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); 1079 1080 qla2xxx_dump_post_process(base_vha, rval); 1081 1082 qla2100_fw_dump_failed: 1083 #ifndef __CHECKER__ 1084 if (!hardware_locked) 1085 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1086 #else 1087 ; 1088 #endif 1089 } 1090 1091 void 1092 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1093 { 1094 int rval; 1095 uint32_t cnt; 1096 struct qla_hw_data *ha = vha->hw; 1097 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1098 uint32_t __iomem *dmp_reg; 1099 uint32_t *iter_reg; 1100 uint16_t __iomem *mbx_reg; 1101 unsigned long flags; 1102 struct qla24xx_fw_dump *fw; 1103 void *nxt; 1104 void *nxt_chain; 1105 uint32_t *last_chain = NULL; 1106 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1107 1108 if (IS_P3P_TYPE(ha)) 1109 return; 1110 1111 flags = 0; 1112 ha->fw_dump_cap_flags = 0; 1113 1114 #ifndef __CHECKER__ 1115 if (!hardware_locked) 1116 spin_lock_irqsave(&ha->hardware_lock, flags); 1117 #endif 1118 1119 if (!ha->fw_dump) { 1120 ql_log(ql_log_warn, vha, 0xd006, 1121 "No buffer available for dump.\n"); 1122 goto qla24xx_fw_dump_failed; 1123 } 1124 1125 if (ha->fw_dumped) { 1126 ql_log(ql_log_warn, vha, 0xd007, 1127 "Firmware has been previously dumped (%p) " 1128 "-- ignoring request.\n", 1129 ha->fw_dump); 1130 goto qla24xx_fw_dump_failed; 1131 } 1132 QLA_FW_STOPPED(ha); 1133 fw = &ha->fw_dump->isp.isp24; 1134 qla2xxx_prep_dump(ha, ha->fw_dump); 1135 1136 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1137 1138 /* 1139 * Pause RISC. No need to track timeout, as resetting the chip 1140 * is the right approach incase of pause timeout 1141 */ 1142 qla24xx_pause_risc(reg, ha); 1143 1144 /* Host interface registers. */ 1145 dmp_reg = ®->flash_addr; 1146 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) 1147 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); 1148 1149 /* Disable interrupts. */ 1150 WRT_REG_DWORD(®->ictrl, 0); 1151 RD_REG_DWORD(®->ictrl); 1152 1153 /* Shadow registers. */ 1154 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1155 RD_REG_DWORD(®->iobase_addr); 1156 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1157 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1158 1159 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1160 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1161 1162 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1163 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1164 1165 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1166 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1167 1168 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1169 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1170 1171 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1172 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1173 1174 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1175 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1176 1177 /* Mailbox registers. */ 1178 mbx_reg = ®->mailbox0; 1179 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) 1180 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 1181 1182 /* Transfer sequence registers. */ 1183 iter_reg = fw->xseq_gp_reg; 1184 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1185 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1186 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1187 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1188 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1189 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1190 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1191 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1192 1193 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); 1194 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1195 1196 /* Receive sequence registers. */ 1197 iter_reg = fw->rseq_gp_reg; 1198 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1199 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1200 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1201 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1202 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1203 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1204 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1205 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1206 1207 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); 1208 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1209 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1210 1211 /* Command DMA registers. */ 1212 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1213 1214 /* Queues. */ 1215 iter_reg = fw->req0_dma_reg; 1216 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1217 dmp_reg = ®->iobase_q; 1218 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1219 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1220 1221 iter_reg = fw->resp0_dma_reg; 1222 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1223 dmp_reg = ®->iobase_q; 1224 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1225 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1226 1227 iter_reg = fw->req1_dma_reg; 1228 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1229 dmp_reg = ®->iobase_q; 1230 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1231 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1232 1233 /* Transmit DMA registers. */ 1234 iter_reg = fw->xmt0_dma_reg; 1235 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1236 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1237 1238 iter_reg = fw->xmt1_dma_reg; 1239 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1240 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1241 1242 iter_reg = fw->xmt2_dma_reg; 1243 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1244 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1245 1246 iter_reg = fw->xmt3_dma_reg; 1247 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1248 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1249 1250 iter_reg = fw->xmt4_dma_reg; 1251 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1252 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1253 1254 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1255 1256 /* Receive DMA registers. */ 1257 iter_reg = fw->rcvt0_data_dma_reg; 1258 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1259 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1260 1261 iter_reg = fw->rcvt1_data_dma_reg; 1262 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1263 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1264 1265 /* RISC registers. */ 1266 iter_reg = fw->risc_gp_reg; 1267 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1268 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1269 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1270 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1271 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1272 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1273 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1274 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1275 1276 /* Local memory controller registers. */ 1277 iter_reg = fw->lmc_reg; 1278 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1279 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1280 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1281 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1282 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1283 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1284 qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1285 1286 /* Fibre Protocol Module registers. */ 1287 iter_reg = fw->fpm_hdw_reg; 1288 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1289 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1290 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1291 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1292 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1293 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1294 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1295 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1296 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1297 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1298 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1299 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1300 1301 /* Frame Buffer registers. */ 1302 iter_reg = fw->fb_hdw_reg; 1303 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1304 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1305 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1306 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1307 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1308 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1309 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1310 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1311 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1312 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1313 qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1314 1315 rval = qla24xx_soft_reset(ha); 1316 if (rval != QLA_SUCCESS) 1317 goto qla24xx_fw_dump_failed_0; 1318 1319 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1320 &nxt); 1321 if (rval != QLA_SUCCESS) 1322 goto qla24xx_fw_dump_failed_0; 1323 1324 nxt = qla2xxx_copy_queues(ha, nxt); 1325 1326 qla24xx_copy_eft(ha, nxt); 1327 1328 nxt_chain = (void *)ha->fw_dump + ha->chain_offset; 1329 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1330 if (last_chain) { 1331 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1332 *last_chain |= htonl(DUMP_CHAIN_LAST); 1333 } 1334 1335 /* Adjust valid length. */ 1336 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1337 1338 qla24xx_fw_dump_failed_0: 1339 qla2xxx_dump_post_process(base_vha, rval); 1340 1341 qla24xx_fw_dump_failed: 1342 #ifndef __CHECKER__ 1343 if (!hardware_locked) 1344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1345 #else 1346 ; 1347 #endif 1348 } 1349 1350 void 1351 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1352 { 1353 int rval; 1354 uint32_t cnt; 1355 struct qla_hw_data *ha = vha->hw; 1356 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1357 uint32_t __iomem *dmp_reg; 1358 uint32_t *iter_reg; 1359 uint16_t __iomem *mbx_reg; 1360 unsigned long flags; 1361 struct qla25xx_fw_dump *fw; 1362 void *nxt, *nxt_chain; 1363 uint32_t *last_chain = NULL; 1364 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1365 1366 flags = 0; 1367 ha->fw_dump_cap_flags = 0; 1368 1369 #ifndef __CHECKER__ 1370 if (!hardware_locked) 1371 spin_lock_irqsave(&ha->hardware_lock, flags); 1372 #endif 1373 1374 if (!ha->fw_dump) { 1375 ql_log(ql_log_warn, vha, 0xd008, 1376 "No buffer available for dump.\n"); 1377 goto qla25xx_fw_dump_failed; 1378 } 1379 1380 if (ha->fw_dumped) { 1381 ql_log(ql_log_warn, vha, 0xd009, 1382 "Firmware has been previously dumped (%p) " 1383 "-- ignoring request.\n", 1384 ha->fw_dump); 1385 goto qla25xx_fw_dump_failed; 1386 } 1387 QLA_FW_STOPPED(ha); 1388 fw = &ha->fw_dump->isp.isp25; 1389 qla2xxx_prep_dump(ha, ha->fw_dump); 1390 ha->fw_dump->version = htonl(2); 1391 1392 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1393 1394 /* 1395 * Pause RISC. No need to track timeout, as resetting the chip 1396 * is the right approach incase of pause timeout 1397 */ 1398 qla24xx_pause_risc(reg, ha); 1399 1400 /* Host/Risc registers. */ 1401 iter_reg = fw->host_risc_reg; 1402 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1403 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1404 1405 /* PCIe registers. */ 1406 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1407 RD_REG_DWORD(®->iobase_addr); 1408 WRT_REG_DWORD(®->iobase_window, 0x01); 1409 dmp_reg = ®->iobase_c4; 1410 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); 1411 dmp_reg++; 1412 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); 1413 dmp_reg++; 1414 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1415 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1416 1417 WRT_REG_DWORD(®->iobase_window, 0x00); 1418 RD_REG_DWORD(®->iobase_window); 1419 1420 /* Host interface registers. */ 1421 dmp_reg = ®->flash_addr; 1422 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) 1423 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); 1424 1425 /* Disable interrupts. */ 1426 WRT_REG_DWORD(®->ictrl, 0); 1427 RD_REG_DWORD(®->ictrl); 1428 1429 /* Shadow registers. */ 1430 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1431 RD_REG_DWORD(®->iobase_addr); 1432 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1433 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1434 1435 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1436 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1437 1438 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1439 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1440 1441 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1442 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1443 1444 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1445 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1446 1447 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1448 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1449 1450 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1451 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1452 1453 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1454 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1455 1456 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1457 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1458 1459 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1460 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1461 1462 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1463 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1464 1465 /* RISC I/O register. */ 1466 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1467 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1468 1469 /* Mailbox registers. */ 1470 mbx_reg = ®->mailbox0; 1471 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) 1472 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 1473 1474 /* Transfer sequence registers. */ 1475 iter_reg = fw->xseq_gp_reg; 1476 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1477 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1478 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1479 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1480 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1481 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1482 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1483 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1484 1485 iter_reg = fw->xseq_0_reg; 1486 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1487 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1488 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1489 1490 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1491 1492 /* Receive sequence registers. */ 1493 iter_reg = fw->rseq_gp_reg; 1494 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1495 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1496 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1497 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1498 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1499 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1500 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1501 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1502 1503 iter_reg = fw->rseq_0_reg; 1504 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1505 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1506 1507 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1508 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1509 1510 /* Auxiliary sequence registers. */ 1511 iter_reg = fw->aseq_gp_reg; 1512 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1513 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1514 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1515 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1516 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1517 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1518 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1519 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1520 1521 iter_reg = fw->aseq_0_reg; 1522 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1523 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1524 1525 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1526 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1527 1528 /* Command DMA registers. */ 1529 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1530 1531 /* Queues. */ 1532 iter_reg = fw->req0_dma_reg; 1533 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1534 dmp_reg = ®->iobase_q; 1535 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1536 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1537 1538 iter_reg = fw->resp0_dma_reg; 1539 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1540 dmp_reg = ®->iobase_q; 1541 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1542 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1543 1544 iter_reg = fw->req1_dma_reg; 1545 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1546 dmp_reg = ®->iobase_q; 1547 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1548 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1549 1550 /* Transmit DMA registers. */ 1551 iter_reg = fw->xmt0_dma_reg; 1552 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1553 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1554 1555 iter_reg = fw->xmt1_dma_reg; 1556 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1557 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1558 1559 iter_reg = fw->xmt2_dma_reg; 1560 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1561 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1562 1563 iter_reg = fw->xmt3_dma_reg; 1564 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1565 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1566 1567 iter_reg = fw->xmt4_dma_reg; 1568 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1569 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1570 1571 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1572 1573 /* Receive DMA registers. */ 1574 iter_reg = fw->rcvt0_data_dma_reg; 1575 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1576 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1577 1578 iter_reg = fw->rcvt1_data_dma_reg; 1579 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1580 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1581 1582 /* RISC registers. */ 1583 iter_reg = fw->risc_gp_reg; 1584 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1585 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1586 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1587 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1588 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1589 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1590 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1591 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1592 1593 /* Local memory controller registers. */ 1594 iter_reg = fw->lmc_reg; 1595 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1596 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1597 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1598 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1599 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1600 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1601 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1602 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1603 1604 /* Fibre Protocol Module registers. */ 1605 iter_reg = fw->fpm_hdw_reg; 1606 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1607 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1608 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1609 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1610 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1611 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1612 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1613 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1614 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1615 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1616 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1617 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1618 1619 /* Frame Buffer registers. */ 1620 iter_reg = fw->fb_hdw_reg; 1621 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1622 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1623 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1624 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1625 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1626 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1627 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1628 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1629 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1630 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1631 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1632 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1633 1634 /* Multi queue registers */ 1635 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1636 &last_chain); 1637 1638 rval = qla24xx_soft_reset(ha); 1639 if (rval != QLA_SUCCESS) 1640 goto qla25xx_fw_dump_failed_0; 1641 1642 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1643 &nxt); 1644 if (rval != QLA_SUCCESS) 1645 goto qla25xx_fw_dump_failed_0; 1646 1647 nxt = qla2xxx_copy_queues(ha, nxt); 1648 1649 qla24xx_copy_eft(ha, nxt); 1650 1651 /* Chain entries -- started with MQ. */ 1652 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1653 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1654 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1655 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 1656 if (last_chain) { 1657 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1658 *last_chain |= htonl(DUMP_CHAIN_LAST); 1659 } 1660 1661 /* Adjust valid length. */ 1662 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1663 1664 qla25xx_fw_dump_failed_0: 1665 qla2xxx_dump_post_process(base_vha, rval); 1666 1667 qla25xx_fw_dump_failed: 1668 #ifndef __CHECKER__ 1669 if (!hardware_locked) 1670 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1671 #else 1672 ; 1673 #endif 1674 } 1675 1676 void 1677 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1678 { 1679 int rval; 1680 uint32_t cnt; 1681 struct qla_hw_data *ha = vha->hw; 1682 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1683 uint32_t __iomem *dmp_reg; 1684 uint32_t *iter_reg; 1685 uint16_t __iomem *mbx_reg; 1686 unsigned long flags; 1687 struct qla81xx_fw_dump *fw; 1688 void *nxt, *nxt_chain; 1689 uint32_t *last_chain = NULL; 1690 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1691 1692 flags = 0; 1693 ha->fw_dump_cap_flags = 0; 1694 1695 #ifndef __CHECKER__ 1696 if (!hardware_locked) 1697 spin_lock_irqsave(&ha->hardware_lock, flags); 1698 #endif 1699 1700 if (!ha->fw_dump) { 1701 ql_log(ql_log_warn, vha, 0xd00a, 1702 "No buffer available for dump.\n"); 1703 goto qla81xx_fw_dump_failed; 1704 } 1705 1706 if (ha->fw_dumped) { 1707 ql_log(ql_log_warn, vha, 0xd00b, 1708 "Firmware has been previously dumped (%p) " 1709 "-- ignoring request.\n", 1710 ha->fw_dump); 1711 goto qla81xx_fw_dump_failed; 1712 } 1713 fw = &ha->fw_dump->isp.isp81; 1714 qla2xxx_prep_dump(ha, ha->fw_dump); 1715 1716 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1717 1718 /* 1719 * Pause RISC. No need to track timeout, as resetting the chip 1720 * is the right approach incase of pause timeout 1721 */ 1722 qla24xx_pause_risc(reg, ha); 1723 1724 /* Host/Risc registers. */ 1725 iter_reg = fw->host_risc_reg; 1726 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1727 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1728 1729 /* PCIe registers. */ 1730 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1731 RD_REG_DWORD(®->iobase_addr); 1732 WRT_REG_DWORD(®->iobase_window, 0x01); 1733 dmp_reg = ®->iobase_c4; 1734 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); 1735 dmp_reg++; 1736 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); 1737 dmp_reg++; 1738 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1739 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1740 1741 WRT_REG_DWORD(®->iobase_window, 0x00); 1742 RD_REG_DWORD(®->iobase_window); 1743 1744 /* Host interface registers. */ 1745 dmp_reg = ®->flash_addr; 1746 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) 1747 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); 1748 1749 /* Disable interrupts. */ 1750 WRT_REG_DWORD(®->ictrl, 0); 1751 RD_REG_DWORD(®->ictrl); 1752 1753 /* Shadow registers. */ 1754 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1755 RD_REG_DWORD(®->iobase_addr); 1756 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1757 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1758 1759 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1760 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1761 1762 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1763 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1764 1765 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1766 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1767 1768 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1769 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1770 1771 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1772 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1773 1774 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1775 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1776 1777 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1778 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1779 1780 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1781 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1782 1783 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1784 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1785 1786 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1787 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1788 1789 /* RISC I/O register. */ 1790 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1791 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1792 1793 /* Mailbox registers. */ 1794 mbx_reg = ®->mailbox0; 1795 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) 1796 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 1797 1798 /* Transfer sequence registers. */ 1799 iter_reg = fw->xseq_gp_reg; 1800 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1801 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1802 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1803 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1804 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1805 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1806 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1807 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1808 1809 iter_reg = fw->xseq_0_reg; 1810 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1811 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1812 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1813 1814 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1815 1816 /* Receive sequence registers. */ 1817 iter_reg = fw->rseq_gp_reg; 1818 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1819 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1820 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1821 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1822 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1823 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1824 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1825 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1826 1827 iter_reg = fw->rseq_0_reg; 1828 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1829 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1830 1831 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1832 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1833 1834 /* Auxiliary sequence registers. */ 1835 iter_reg = fw->aseq_gp_reg; 1836 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1837 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1838 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1839 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1840 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1841 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1842 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1843 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1844 1845 iter_reg = fw->aseq_0_reg; 1846 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1847 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1848 1849 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1850 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1851 1852 /* Command DMA registers. */ 1853 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1854 1855 /* Queues. */ 1856 iter_reg = fw->req0_dma_reg; 1857 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1858 dmp_reg = ®->iobase_q; 1859 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1860 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1861 1862 iter_reg = fw->resp0_dma_reg; 1863 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1864 dmp_reg = ®->iobase_q; 1865 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1866 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1867 1868 iter_reg = fw->req1_dma_reg; 1869 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1870 dmp_reg = ®->iobase_q; 1871 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 1872 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 1873 1874 /* Transmit DMA registers. */ 1875 iter_reg = fw->xmt0_dma_reg; 1876 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1877 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1878 1879 iter_reg = fw->xmt1_dma_reg; 1880 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1881 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1882 1883 iter_reg = fw->xmt2_dma_reg; 1884 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1885 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1886 1887 iter_reg = fw->xmt3_dma_reg; 1888 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1889 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1890 1891 iter_reg = fw->xmt4_dma_reg; 1892 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1893 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1894 1895 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1896 1897 /* Receive DMA registers. */ 1898 iter_reg = fw->rcvt0_data_dma_reg; 1899 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1900 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1901 1902 iter_reg = fw->rcvt1_data_dma_reg; 1903 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1904 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1905 1906 /* RISC registers. */ 1907 iter_reg = fw->risc_gp_reg; 1908 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1909 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1910 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1911 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1912 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1913 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1914 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1915 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1916 1917 /* Local memory controller registers. */ 1918 iter_reg = fw->lmc_reg; 1919 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1920 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1921 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1922 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1923 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1924 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1925 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1926 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1927 1928 /* Fibre Protocol Module registers. */ 1929 iter_reg = fw->fpm_hdw_reg; 1930 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1931 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1932 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1933 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1934 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1935 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1936 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1937 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1938 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1939 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1940 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1941 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1942 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 1943 qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 1944 1945 /* Frame Buffer registers. */ 1946 iter_reg = fw->fb_hdw_reg; 1947 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1948 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1949 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1950 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1951 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1952 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1953 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1954 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1955 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1956 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1957 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1958 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 1959 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1960 1961 /* Multi queue registers */ 1962 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1963 &last_chain); 1964 1965 rval = qla24xx_soft_reset(ha); 1966 if (rval != QLA_SUCCESS) 1967 goto qla81xx_fw_dump_failed_0; 1968 1969 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1970 &nxt); 1971 if (rval != QLA_SUCCESS) 1972 goto qla81xx_fw_dump_failed_0; 1973 1974 nxt = qla2xxx_copy_queues(ha, nxt); 1975 1976 qla24xx_copy_eft(ha, nxt); 1977 1978 /* Chain entries -- started with MQ. */ 1979 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1980 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1981 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1982 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 1983 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); 1984 if (last_chain) { 1985 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 1986 *last_chain |= htonl(DUMP_CHAIN_LAST); 1987 } 1988 1989 /* Adjust valid length. */ 1990 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 1991 1992 qla81xx_fw_dump_failed_0: 1993 qla2xxx_dump_post_process(base_vha, rval); 1994 1995 qla81xx_fw_dump_failed: 1996 #ifndef __CHECKER__ 1997 if (!hardware_locked) 1998 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1999 #else 2000 ; 2001 #endif 2002 } 2003 2004 void 2005 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 2006 { 2007 int rval; 2008 uint32_t cnt; 2009 struct qla_hw_data *ha = vha->hw; 2010 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2011 uint32_t __iomem *dmp_reg; 2012 uint32_t *iter_reg; 2013 uint16_t __iomem *mbx_reg; 2014 unsigned long flags; 2015 struct qla83xx_fw_dump *fw; 2016 void *nxt, *nxt_chain; 2017 uint32_t *last_chain = NULL; 2018 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2019 2020 flags = 0; 2021 ha->fw_dump_cap_flags = 0; 2022 2023 #ifndef __CHECKER__ 2024 if (!hardware_locked) 2025 spin_lock_irqsave(&ha->hardware_lock, flags); 2026 #endif 2027 2028 if (!ha->fw_dump) { 2029 ql_log(ql_log_warn, vha, 0xd00c, 2030 "No buffer available for dump!!!\n"); 2031 goto qla83xx_fw_dump_failed; 2032 } 2033 2034 if (ha->fw_dumped) { 2035 ql_log(ql_log_warn, vha, 0xd00d, 2036 "Firmware has been previously dumped (%p) -- ignoring " 2037 "request...\n", ha->fw_dump); 2038 goto qla83xx_fw_dump_failed; 2039 } 2040 QLA_FW_STOPPED(ha); 2041 fw = &ha->fw_dump->isp.isp83; 2042 qla2xxx_prep_dump(ha, ha->fw_dump); 2043 2044 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 2045 2046 /* 2047 * Pause RISC. No need to track timeout, as resetting the chip 2048 * is the right approach incase of pause timeout 2049 */ 2050 qla24xx_pause_risc(reg, ha); 2051 2052 WRT_REG_DWORD(®->iobase_addr, 0x6000); 2053 dmp_reg = ®->iobase_window; 2054 RD_REG_DWORD(dmp_reg); 2055 WRT_REG_DWORD(dmp_reg, 0); 2056 2057 dmp_reg = ®->unused_4_1[0]; 2058 RD_REG_DWORD(dmp_reg); 2059 WRT_REG_DWORD(dmp_reg, 0); 2060 2061 WRT_REG_DWORD(®->iobase_addr, 0x6010); 2062 dmp_reg = ®->unused_4_1[2]; 2063 RD_REG_DWORD(dmp_reg); 2064 WRT_REG_DWORD(dmp_reg, 0); 2065 2066 /* select PCR and disable ecc checking and correction */ 2067 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 2068 RD_REG_DWORD(®->iobase_addr); 2069 WRT_REG_DWORD(®->iobase_select, 0x60000000); /* write to F0h = PCR */ 2070 2071 /* Host/Risc registers. */ 2072 iter_reg = fw->host_risc_reg; 2073 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 2074 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); 2075 qla24xx_read_window(reg, 0x7040, 16, iter_reg); 2076 2077 /* PCIe registers. */ 2078 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2079 RD_REG_DWORD(®->iobase_addr); 2080 WRT_REG_DWORD(®->iobase_window, 0x01); 2081 dmp_reg = ®->iobase_c4; 2082 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); 2083 dmp_reg++; 2084 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); 2085 dmp_reg++; 2086 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 2087 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 2088 2089 WRT_REG_DWORD(®->iobase_window, 0x00); 2090 RD_REG_DWORD(®->iobase_window); 2091 2092 /* Host interface registers. */ 2093 dmp_reg = ®->flash_addr; 2094 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) 2095 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); 2096 2097 /* Disable interrupts. */ 2098 WRT_REG_DWORD(®->ictrl, 0); 2099 RD_REG_DWORD(®->ictrl); 2100 2101 /* Shadow registers. */ 2102 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 2103 RD_REG_DWORD(®->iobase_addr); 2104 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 2105 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2106 2107 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 2108 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2109 2110 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 2111 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2112 2113 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 2114 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2115 2116 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 2117 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2118 2119 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 2120 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2121 2122 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 2123 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2124 2125 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 2126 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2127 2128 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 2129 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2130 2131 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 2132 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2133 2134 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 2135 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 2136 2137 /* RISC I/O register. */ 2138 WRT_REG_DWORD(®->iobase_addr, 0x0010); 2139 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 2140 2141 /* Mailbox registers. */ 2142 mbx_reg = ®->mailbox0; 2143 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) 2144 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 2145 2146 /* Transfer sequence registers. */ 2147 iter_reg = fw->xseq_gp_reg; 2148 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); 2149 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); 2150 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); 2151 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); 2152 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); 2153 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); 2154 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); 2155 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); 2156 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 2157 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 2158 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 2159 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 2160 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 2161 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 2162 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 2163 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 2164 2165 iter_reg = fw->xseq_0_reg; 2166 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 2167 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 2168 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 2169 2170 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 2171 2172 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); 2173 2174 /* Receive sequence registers. */ 2175 iter_reg = fw->rseq_gp_reg; 2176 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); 2177 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); 2178 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); 2179 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); 2180 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); 2181 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); 2182 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); 2183 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); 2184 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 2185 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 2186 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 2187 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 2188 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 2189 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 2190 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 2191 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 2192 2193 iter_reg = fw->rseq_0_reg; 2194 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 2195 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 2196 2197 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 2198 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 2199 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); 2200 2201 /* Auxiliary sequence registers. */ 2202 iter_reg = fw->aseq_gp_reg; 2203 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 2204 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 2205 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 2206 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 2207 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 2208 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 2209 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 2210 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); 2211 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); 2212 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); 2213 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); 2214 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); 2215 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); 2216 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); 2217 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); 2218 qla24xx_read_window(reg, 0xB170, 16, iter_reg); 2219 2220 iter_reg = fw->aseq_0_reg; 2221 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 2222 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 2223 2224 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 2225 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 2226 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); 2227 2228 /* Command DMA registers. */ 2229 iter_reg = fw->cmd_dma_reg; 2230 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); 2231 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); 2232 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); 2233 qla24xx_read_window(reg, 0x71F0, 16, iter_reg); 2234 2235 /* Queues. */ 2236 iter_reg = fw->req0_dma_reg; 2237 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 2238 dmp_reg = ®->iobase_q; 2239 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2240 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 2241 2242 iter_reg = fw->resp0_dma_reg; 2243 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 2244 dmp_reg = ®->iobase_q; 2245 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2246 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 2247 2248 iter_reg = fw->req1_dma_reg; 2249 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 2250 dmp_reg = ®->iobase_q; 2251 for (cnt = 0; cnt < 7; cnt++, dmp_reg++) 2252 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); 2253 2254 /* Transmit DMA registers. */ 2255 iter_reg = fw->xmt0_dma_reg; 2256 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 2257 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 2258 2259 iter_reg = fw->xmt1_dma_reg; 2260 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 2261 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 2262 2263 iter_reg = fw->xmt2_dma_reg; 2264 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 2265 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 2266 2267 iter_reg = fw->xmt3_dma_reg; 2268 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 2269 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 2270 2271 iter_reg = fw->xmt4_dma_reg; 2272 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 2273 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 2274 2275 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 2276 2277 /* Receive DMA registers. */ 2278 iter_reg = fw->rcvt0_data_dma_reg; 2279 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 2280 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 2281 2282 iter_reg = fw->rcvt1_data_dma_reg; 2283 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 2284 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 2285 2286 /* RISC registers. */ 2287 iter_reg = fw->risc_gp_reg; 2288 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 2289 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 2290 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 2291 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 2292 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 2293 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 2294 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 2295 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 2296 2297 /* Local memory controller registers. */ 2298 iter_reg = fw->lmc_reg; 2299 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 2300 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 2301 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 2302 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 2303 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 2304 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 2305 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 2306 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 2307 2308 /* Fibre Protocol Module registers. */ 2309 iter_reg = fw->fpm_hdw_reg; 2310 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 2311 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 2312 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 2313 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 2314 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 2315 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 2316 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 2317 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 2318 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 2319 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 2320 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 2321 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 2322 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 2323 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 2324 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); 2325 qla24xx_read_window(reg, 0x40F0, 16, iter_reg); 2326 2327 /* RQ0 Array registers. */ 2328 iter_reg = fw->rq0_array_reg; 2329 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); 2330 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); 2331 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); 2332 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); 2333 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); 2334 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); 2335 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); 2336 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); 2337 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); 2338 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); 2339 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); 2340 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); 2341 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); 2342 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); 2343 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); 2344 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); 2345 2346 /* RQ1 Array registers. */ 2347 iter_reg = fw->rq1_array_reg; 2348 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); 2349 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); 2350 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); 2351 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); 2352 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); 2353 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); 2354 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); 2355 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); 2356 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); 2357 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); 2358 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); 2359 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); 2360 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); 2361 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); 2362 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); 2363 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); 2364 2365 /* RP0 Array registers. */ 2366 iter_reg = fw->rp0_array_reg; 2367 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); 2368 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); 2369 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); 2370 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); 2371 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); 2372 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); 2373 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); 2374 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); 2375 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); 2376 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); 2377 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); 2378 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); 2379 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); 2380 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); 2381 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); 2382 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); 2383 2384 /* RP1 Array registers. */ 2385 iter_reg = fw->rp1_array_reg; 2386 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); 2387 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); 2388 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); 2389 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); 2390 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); 2391 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); 2392 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); 2393 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); 2394 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); 2395 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); 2396 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); 2397 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); 2398 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); 2399 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); 2400 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); 2401 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); 2402 2403 iter_reg = fw->at0_array_reg; 2404 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); 2405 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); 2406 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); 2407 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); 2408 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); 2409 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); 2410 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); 2411 qla24xx_read_window(reg, 0x70F0, 16, iter_reg); 2412 2413 /* I/O Queue Control registers. */ 2414 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); 2415 2416 /* Frame Buffer registers. */ 2417 iter_reg = fw->fb_hdw_reg; 2418 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 2419 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 2420 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 2421 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 2422 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 2423 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); 2424 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); 2425 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 2426 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 2427 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 2428 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 2429 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 2430 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 2431 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 2432 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); 2433 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); 2434 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); 2435 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); 2436 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); 2437 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); 2438 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); 2439 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); 2440 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); 2441 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); 2442 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); 2443 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); 2444 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 2445 2446 /* Multi queue registers */ 2447 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 2448 &last_chain); 2449 2450 rval = qla24xx_soft_reset(ha); 2451 if (rval != QLA_SUCCESS) { 2452 ql_log(ql_log_warn, vha, 0xd00e, 2453 "SOFT RESET FAILED, forcing continuation of dump!!!\n"); 2454 rval = QLA_SUCCESS; 2455 2456 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); 2457 2458 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 2459 RD_REG_DWORD(®->hccr); 2460 2461 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 2462 RD_REG_DWORD(®->hccr); 2463 2464 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 2465 RD_REG_DWORD(®->hccr); 2466 2467 for (cnt = 30000; cnt && (RD_REG_WORD(®->mailbox0)); cnt--) 2468 udelay(5); 2469 2470 if (!cnt) { 2471 nxt = fw->code_ram; 2472 nxt += sizeof(fw->code_ram); 2473 nxt += (ha->fw_memory_size - 0x100000 + 1); 2474 goto copy_queue; 2475 } else { 2476 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2477 ql_log(ql_log_warn, vha, 0xd010, 2478 "bigger hammer success?\n"); 2479 } 2480 } 2481 2482 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 2483 &nxt); 2484 if (rval != QLA_SUCCESS) 2485 goto qla83xx_fw_dump_failed_0; 2486 2487 copy_queue: 2488 nxt = qla2xxx_copy_queues(ha, nxt); 2489 2490 qla24xx_copy_eft(ha, nxt); 2491 2492 /* Chain entries -- started with MQ. */ 2493 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2494 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2495 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 2496 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); 2497 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); 2498 if (last_chain) { 2499 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); 2500 *last_chain |= htonl(DUMP_CHAIN_LAST); 2501 } 2502 2503 /* Adjust valid length. */ 2504 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); 2505 2506 qla83xx_fw_dump_failed_0: 2507 qla2xxx_dump_post_process(base_vha, rval); 2508 2509 qla83xx_fw_dump_failed: 2510 #ifndef __CHECKER__ 2511 if (!hardware_locked) 2512 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2513 #else 2514 ; 2515 #endif 2516 } 2517 2518 /****************************************************************************/ 2519 /* Driver Debug Functions. */ 2520 /****************************************************************************/ 2521 2522 /* 2523 * This function is for formatting and logging debug information. 2524 * It is to be used when vha is available. It formats the message 2525 * and logs it to the messages file. 2526 * parameters: 2527 * level: The level of the debug messages to be printed. 2528 * If ql2xextended_error_logging value is correctly set, 2529 * this message will appear in the messages file. 2530 * vha: Pointer to the scsi_qla_host_t. 2531 * id: This is a unique identifier for the level. It identifies the 2532 * part of the code from where the message originated. 2533 * msg: The message to be displayed. 2534 */ 2535 void 2536 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) 2537 { 2538 va_list va; 2539 struct va_format vaf; 2540 2541 if (!ql_mask_match(level)) 2542 return; 2543 2544 va_start(va, fmt); 2545 2546 vaf.fmt = fmt; 2547 vaf.va = &va; 2548 2549 if (vha != NULL) { 2550 const struct pci_dev *pdev = vha->hw->pdev; 2551 /* <module-name> <pci-name> <msg-id>:<host> Message */ 2552 pr_warn("%s [%s]-%04x:%ld: %pV", 2553 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, 2554 vha->host_no, &vaf); 2555 } else { 2556 pr_warn("%s [%s]-%04x: : %pV", 2557 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); 2558 } 2559 2560 va_end(va); 2561 2562 } 2563 2564 /* 2565 * This function is for formatting and logging debug information. 2566 * It is to be used when vha is not available and pci is available, 2567 * i.e., before host allocation. It formats the message and logs it 2568 * to the messages file. 2569 * parameters: 2570 * level: The level of the debug messages to be printed. 2571 * If ql2xextended_error_logging value is correctly set, 2572 * this message will appear in the messages file. 2573 * pdev: Pointer to the struct pci_dev. 2574 * id: This is a unique id for the level. It identifies the part 2575 * of the code from where the message originated. 2576 * msg: The message to be displayed. 2577 */ 2578 void 2579 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) 2580 { 2581 va_list va; 2582 struct va_format vaf; 2583 2584 if (pdev == NULL) 2585 return; 2586 if (!ql_mask_match(level)) 2587 return; 2588 2589 va_start(va, fmt); 2590 2591 vaf.fmt = fmt; 2592 vaf.va = &va; 2593 2594 /* <module-name> <dev-name>:<msg-id> Message */ 2595 pr_warn("%s [%s]-%04x: : %pV", 2596 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); 2597 2598 va_end(va); 2599 } 2600 2601 /* 2602 * This function is for formatting and logging log messages. 2603 * It is to be used when vha is available. It formats the message 2604 * and logs it to the messages file. All the messages will be logged 2605 * irrespective of value of ql2xextended_error_logging. 2606 * parameters: 2607 * level: The level of the log messages to be printed in the 2608 * messages file. 2609 * vha: Pointer to the scsi_qla_host_t 2610 * id: This is a unique id for the level. It identifies the 2611 * part of the code from where the message originated. 2612 * msg: The message to be displayed. 2613 */ 2614 void 2615 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) 2616 { 2617 va_list va; 2618 struct va_format vaf; 2619 char pbuf[128]; 2620 2621 if (level > ql_errlev) 2622 return; 2623 2624 if (vha != NULL) { 2625 const struct pci_dev *pdev = vha->hw->pdev; 2626 /* <module-name> <msg-id>:<host> Message */ 2627 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", 2628 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); 2629 } else { 2630 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 2631 QL_MSGHDR, "0000:00:00.0", id); 2632 } 2633 pbuf[sizeof(pbuf) - 1] = 0; 2634 2635 va_start(va, fmt); 2636 2637 vaf.fmt = fmt; 2638 vaf.va = &va; 2639 2640 switch (level) { 2641 case ql_log_fatal: /* FATAL LOG */ 2642 pr_crit("%s%pV", pbuf, &vaf); 2643 break; 2644 case ql_log_warn: 2645 pr_err("%s%pV", pbuf, &vaf); 2646 break; 2647 case ql_log_info: 2648 pr_warn("%s%pV", pbuf, &vaf); 2649 break; 2650 default: 2651 pr_info("%s%pV", pbuf, &vaf); 2652 break; 2653 } 2654 2655 va_end(va); 2656 } 2657 2658 /* 2659 * This function is for formatting and logging log messages. 2660 * It is to be used when vha is not available and pci is available, 2661 * i.e., before host allocation. It formats the message and logs 2662 * it to the messages file. All the messages are logged irrespective 2663 * of the value of ql2xextended_error_logging. 2664 * parameters: 2665 * level: The level of the log messages to be printed in the 2666 * messages file. 2667 * pdev: Pointer to the struct pci_dev. 2668 * id: This is a unique id for the level. It identifies the 2669 * part of the code from where the message originated. 2670 * msg: The message to be displayed. 2671 */ 2672 void 2673 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) 2674 { 2675 va_list va; 2676 struct va_format vaf; 2677 char pbuf[128]; 2678 2679 if (pdev == NULL) 2680 return; 2681 if (level > ql_errlev) 2682 return; 2683 2684 /* <module-name> <dev-name>:<msg-id> Message */ 2685 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 2686 QL_MSGHDR, dev_name(&(pdev->dev)), id); 2687 pbuf[sizeof(pbuf) - 1] = 0; 2688 2689 va_start(va, fmt); 2690 2691 vaf.fmt = fmt; 2692 vaf.va = &va; 2693 2694 switch (level) { 2695 case ql_log_fatal: /* FATAL LOG */ 2696 pr_crit("%s%pV", pbuf, &vaf); 2697 break; 2698 case ql_log_warn: 2699 pr_err("%s%pV", pbuf, &vaf); 2700 break; 2701 case ql_log_info: 2702 pr_warn("%s%pV", pbuf, &vaf); 2703 break; 2704 default: 2705 pr_info("%s%pV", pbuf, &vaf); 2706 break; 2707 } 2708 2709 va_end(va); 2710 } 2711 2712 void 2713 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) 2714 { 2715 int i; 2716 struct qla_hw_data *ha = vha->hw; 2717 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2718 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2719 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 2720 uint16_t __iomem *mbx_reg; 2721 2722 if (!ql_mask_match(level)) 2723 return; 2724 2725 if (IS_P3P_TYPE(ha)) 2726 mbx_reg = ®82->mailbox_in[0]; 2727 else if (IS_FWI2_CAPABLE(ha)) 2728 mbx_reg = ®24->mailbox0; 2729 else 2730 mbx_reg = MAILBOX_REG(ha, reg, 0); 2731 2732 ql_dbg(level, vha, id, "Mailbox registers:\n"); 2733 for (i = 0; i < 6; i++, mbx_reg++) 2734 ql_dbg(level, vha, id, 2735 "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg)); 2736 } 2737 2738 2739 void 2740 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, 2741 uint size) 2742 { 2743 uint cnt; 2744 2745 if (!ql_mask_match(level)) 2746 return; 2747 2748 ql_dbg(level, vha, id, 2749 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); 2750 ql_dbg(level, vha, id, 2751 "----- -----------------------------------------------\n"); 2752 for (cnt = 0; cnt < size; cnt += 16) { 2753 ql_dbg(level, vha, id, "%04x: ", cnt); 2754 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, 2755 buf + cnt, min(16U, size - cnt), false); 2756 } 2757 } 2758 2759 /* 2760 * This function is for formatting and logging log messages. 2761 * It is to be used when vha is available. It formats the message 2762 * and logs it to the messages file. All the messages will be logged 2763 * irrespective of value of ql2xextended_error_logging. 2764 * parameters: 2765 * level: The level of the log messages to be printed in the 2766 * messages file. 2767 * vha: Pointer to the scsi_qla_host_t 2768 * id: This is a unique id for the level. It identifies the 2769 * part of the code from where the message originated. 2770 * msg: The message to be displayed. 2771 */ 2772 void 2773 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, 2774 const char *fmt, ...) 2775 { 2776 va_list va; 2777 struct va_format vaf; 2778 char pbuf[128]; 2779 2780 if (level > ql_errlev) 2781 return; 2782 2783 if (qpair != NULL) { 2784 const struct pci_dev *pdev = qpair->pdev; 2785 /* <module-name> <msg-id>:<host> Message */ 2786 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ", 2787 QL_MSGHDR, dev_name(&(pdev->dev)), id); 2788 } else { 2789 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 2790 QL_MSGHDR, "0000:00:00.0", id); 2791 } 2792 pbuf[sizeof(pbuf) - 1] = 0; 2793 2794 va_start(va, fmt); 2795 2796 vaf.fmt = fmt; 2797 vaf.va = &va; 2798 2799 switch (level) { 2800 case ql_log_fatal: /* FATAL LOG */ 2801 pr_crit("%s%pV", pbuf, &vaf); 2802 break; 2803 case ql_log_warn: 2804 pr_err("%s%pV", pbuf, &vaf); 2805 break; 2806 case ql_log_info: 2807 pr_warn("%s%pV", pbuf, &vaf); 2808 break; 2809 default: 2810 pr_info("%s%pV", pbuf, &vaf); 2811 break; 2812 } 2813 2814 va_end(va); 2815 } 2816 2817 /* 2818 * This function is for formatting and logging debug information. 2819 * It is to be used when vha is available. It formats the message 2820 * and logs it to the messages file. 2821 * parameters: 2822 * level: The level of the debug messages to be printed. 2823 * If ql2xextended_error_logging value is correctly set, 2824 * this message will appear in the messages file. 2825 * vha: Pointer to the scsi_qla_host_t. 2826 * id: This is a unique identifier for the level. It identifies the 2827 * part of the code from where the message originated. 2828 * msg: The message to be displayed. 2829 */ 2830 void 2831 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, 2832 const char *fmt, ...) 2833 { 2834 va_list va; 2835 struct va_format vaf; 2836 2837 if (!ql_mask_match(level)) 2838 return; 2839 2840 va_start(va, fmt); 2841 2842 vaf.fmt = fmt; 2843 vaf.va = &va; 2844 2845 if (qpair != NULL) { 2846 const struct pci_dev *pdev = qpair->pdev; 2847 /* <module-name> <pci-name> <msg-id>:<host> Message */ 2848 pr_warn("%s [%s]-%04x: %pV", 2849 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, 2850 &vaf); 2851 } else { 2852 pr_warn("%s [%s]-%04x: : %pV", 2853 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); 2854 } 2855 2856 va_end(va); 2857 2858 } 2859