1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_tmpl.h" 9 10 #define ISPREG(vha) (&(vha)->hw->iobase->isp24) 11 #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr) 12 #define IOBASE(vha) IOBAR(ISPREG(vha)) 13 14 static inline void 15 qla27xx_insert16(uint16_t value, void *buf, ulong *len) 16 { 17 if (buf) { 18 buf += *len; 19 *(__le16 *)buf = cpu_to_le16(value); 20 } 21 *len += sizeof(value); 22 } 23 24 static inline void 25 qla27xx_insert32(uint32_t value, void *buf, ulong *len) 26 { 27 if (buf) { 28 buf += *len; 29 *(__le32 *)buf = cpu_to_le32(value); 30 } 31 *len += sizeof(value); 32 } 33 34 static inline void 35 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) 36 { 37 if (buf && mem && size) { 38 buf += *len; 39 memcpy(buf, mem, size); 40 } 41 *len += size; 42 } 43 44 static inline void 45 qla27xx_read8(void __iomem *window, void *buf, ulong *len) 46 { 47 uint8_t value = ~0; 48 49 if (buf) { 50 value = RD_REG_BYTE(window); 51 } 52 qla27xx_insert32(value, buf, len); 53 } 54 55 static inline void 56 qla27xx_read16(void __iomem *window, void *buf, ulong *len) 57 { 58 uint16_t value = ~0; 59 60 if (buf) { 61 value = RD_REG_WORD(window); 62 } 63 qla27xx_insert32(value, buf, len); 64 } 65 66 static inline void 67 qla27xx_read32(void __iomem *window, void *buf, ulong *len) 68 { 69 uint32_t value = ~0; 70 71 if (buf) { 72 value = RD_REG_DWORD(window); 73 } 74 qla27xx_insert32(value, buf, len); 75 } 76 77 static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *) 78 { 79 return 80 (width == 1) ? qla27xx_read8 : 81 (width == 2) ? qla27xx_read16 : 82 qla27xx_read32; 83 } 84 85 static inline void 86 qla27xx_read_reg(__iomem struct device_reg_24xx *reg, 87 uint offset, void *buf, ulong *len) 88 { 89 void __iomem *window = (void __iomem *)reg + offset; 90 91 qla27xx_read32(window, buf, len); 92 } 93 94 static inline void 95 qla27xx_write_reg(__iomem struct device_reg_24xx *reg, 96 uint offset, uint32_t data, void *buf) 97 { 98 if (buf) { 99 void __iomem *window = (void __iomem *)reg + offset; 100 101 WRT_REG_DWORD(window, data); 102 } 103 } 104 105 static inline void 106 qla27xx_read_window(__iomem struct device_reg_24xx *reg, 107 uint32_t addr, uint offset, uint count, uint width, void *buf, 108 ulong *len) 109 { 110 void __iomem *window = (void __iomem *)reg + offset; 111 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width); 112 113 qla27xx_write_reg(reg, IOBAR(reg), addr, buf); 114 while (count--) { 115 qla27xx_insert32(addr, buf, len); 116 readn(window, buf, len); 117 window += width; 118 addr++; 119 } 120 } 121 122 static inline void 123 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) 124 { 125 if (buf) 126 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; 127 } 128 129 static inline struct qla27xx_fwdt_entry * 130 qla27xx_next_entry(struct qla27xx_fwdt_entry *ent) 131 { 132 return (void *)ent + le32_to_cpu(ent->hdr.size); 133 } 134 135 static struct qla27xx_fwdt_entry * 136 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, 137 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 138 { 139 ql_dbg(ql_dbg_misc, vha, 0xd100, 140 "%s: nop [%lx]\n", __func__, *len); 141 qla27xx_skip_entry(ent, buf); 142 143 return qla27xx_next_entry(ent); 144 } 145 146 static struct qla27xx_fwdt_entry * 147 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, 148 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 149 { 150 ql_dbg(ql_dbg_misc, vha, 0xd1ff, 151 "%s: end [%lx]\n", __func__, *len); 152 qla27xx_skip_entry(ent, buf); 153 154 /* terminate */ 155 return NULL; 156 } 157 158 static struct qla27xx_fwdt_entry * 159 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, 160 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 161 { 162 ulong addr = le32_to_cpu(ent->t256.base_addr); 163 uint offset = ent->t256.pci_offset; 164 ulong count = le16_to_cpu(ent->t256.reg_count); 165 uint width = ent->t256.reg_width; 166 167 ql_dbg(ql_dbg_misc, vha, 0xd200, 168 "%s: rdio t1 [%lx]\n", __func__, *len); 169 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); 170 171 return qla27xx_next_entry(ent); 172 } 173 174 static struct qla27xx_fwdt_entry * 175 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, 176 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 177 { 178 ulong addr = le32_to_cpu(ent->t257.base_addr); 179 uint offset = ent->t257.pci_offset; 180 ulong data = le32_to_cpu(ent->t257.write_data); 181 182 ql_dbg(ql_dbg_misc, vha, 0xd201, 183 "%s: wrio t1 [%lx]\n", __func__, *len); 184 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); 185 qla27xx_write_reg(ISPREG(vha), offset, data, buf); 186 187 return qla27xx_next_entry(ent); 188 } 189 190 static struct qla27xx_fwdt_entry * 191 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, 192 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 193 { 194 uint banksel = ent->t258.banksel_offset; 195 ulong bank = le32_to_cpu(ent->t258.bank); 196 ulong addr = le32_to_cpu(ent->t258.base_addr); 197 uint offset = ent->t258.pci_offset; 198 uint count = le16_to_cpu(ent->t258.reg_count); 199 uint width = ent->t258.reg_width; 200 201 ql_dbg(ql_dbg_misc, vha, 0xd202, 202 "%s: rdio t2 [%lx]\n", __func__, *len); 203 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); 204 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); 205 206 return qla27xx_next_entry(ent); 207 } 208 209 static struct qla27xx_fwdt_entry * 210 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, 211 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 212 { 213 ulong addr = le32_to_cpu(ent->t259.base_addr); 214 uint banksel = ent->t259.banksel_offset; 215 ulong bank = le32_to_cpu(ent->t259.bank); 216 uint offset = ent->t259.pci_offset; 217 ulong data = le32_to_cpu(ent->t259.write_data); 218 219 ql_dbg(ql_dbg_misc, vha, 0xd203, 220 "%s: wrio t2 [%lx]\n", __func__, *len); 221 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); 222 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); 223 qla27xx_write_reg(ISPREG(vha), offset, data, buf); 224 225 return qla27xx_next_entry(ent); 226 } 227 228 static struct qla27xx_fwdt_entry * 229 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, 230 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 231 { 232 uint offset = ent->t260.pci_offset; 233 234 ql_dbg(ql_dbg_misc, vha, 0xd204, 235 "%s: rdpci [%lx]\n", __func__, *len); 236 qla27xx_insert32(offset, buf, len); 237 qla27xx_read_reg(ISPREG(vha), offset, buf, len); 238 239 return qla27xx_next_entry(ent); 240 } 241 242 static struct qla27xx_fwdt_entry * 243 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, 244 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 245 { 246 uint offset = ent->t261.pci_offset; 247 ulong data = le32_to_cpu(ent->t261.write_data); 248 249 ql_dbg(ql_dbg_misc, vha, 0xd205, 250 "%s: wrpci [%lx]\n", __func__, *len); 251 qla27xx_write_reg(ISPREG(vha), offset, data, buf); 252 253 return qla27xx_next_entry(ent); 254 } 255 256 static struct qla27xx_fwdt_entry * 257 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, 258 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 259 { 260 uint area = ent->t262.ram_area; 261 ulong start = le32_to_cpu(ent->t262.start_addr); 262 ulong end = le32_to_cpu(ent->t262.end_addr); 263 ulong dwords; 264 265 ql_dbg(ql_dbg_misc, vha, 0xd206, 266 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); 267 268 if (area == T262_RAM_AREA_CRITICAL_RAM) { 269 ; 270 } else if (area == T262_RAM_AREA_EXTERNAL_RAM) { 271 end = vha->hw->fw_memory_size; 272 if (buf) 273 ent->t262.end_addr = cpu_to_le32(end); 274 } else if (area == T262_RAM_AREA_SHARED_RAM) { 275 start = vha->hw->fw_shared_ram_start; 276 end = vha->hw->fw_shared_ram_end; 277 if (buf) { 278 ent->t262.start_addr = cpu_to_le32(start); 279 ent->t262.end_addr = cpu_to_le32(end); 280 } 281 } else if (area == T262_RAM_AREA_DDR_RAM) { 282 start = vha->hw->fw_ddr_ram_start; 283 end = vha->hw->fw_ddr_ram_end; 284 if (buf) { 285 ent->t262.start_addr = cpu_to_le32(start); 286 ent->t262.end_addr = cpu_to_le32(end); 287 } 288 } else if (area == T262_RAM_AREA_MISC) { 289 if (buf) { 290 ent->t262.start_addr = cpu_to_le32(start); 291 ent->t262.end_addr = cpu_to_le32(end); 292 } 293 } else { 294 ql_dbg(ql_dbg_misc, vha, 0xd022, 295 "%s: unknown area %x\n", __func__, area); 296 qla27xx_skip_entry(ent, buf); 297 goto done; 298 } 299 300 if (end < start || start == 0 || end == 0) { 301 ql_dbg(ql_dbg_misc, vha, 0xd023, 302 "%s: unusable range (start=%lx end=%lx)\n", 303 __func__, start, end); 304 qla27xx_skip_entry(ent, buf); 305 goto done; 306 } 307 308 dwords = end - start + 1; 309 if (buf) { 310 buf += *len; 311 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); 312 } 313 *len += dwords * sizeof(uint32_t); 314 done: 315 return qla27xx_next_entry(ent); 316 } 317 318 static struct qla27xx_fwdt_entry * 319 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, 320 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 321 { 322 uint type = ent->t263.queue_type; 323 uint count = 0; 324 uint i; 325 uint length; 326 327 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207, 328 "%s: getq(%x) [%lx]\n", __func__, type, *len); 329 if (type == T263_QUEUE_TYPE_REQ) { 330 for (i = 0; i < vha->hw->max_req_queues; i++) { 331 struct req_que *req = vha->hw->req_q_map[i]; 332 333 if (req || !buf) { 334 length = req ? 335 req->length : REQUEST_ENTRY_CNT_24XX; 336 qla27xx_insert16(i, buf, len); 337 qla27xx_insert16(length, buf, len); 338 qla27xx_insertbuf(req ? req->ring : NULL, 339 length * sizeof(*req->ring), buf, len); 340 count++; 341 } 342 } 343 } else if (type == T263_QUEUE_TYPE_RSP) { 344 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 345 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 346 347 if (rsp || !buf) { 348 length = rsp ? 349 rsp->length : RESPONSE_ENTRY_CNT_MQ; 350 qla27xx_insert16(i, buf, len); 351 qla27xx_insert16(length, buf, len); 352 qla27xx_insertbuf(rsp ? rsp->ring : NULL, 353 length * sizeof(*rsp->ring), buf, len); 354 count++; 355 } 356 } 357 } else if (QLA_TGT_MODE_ENABLED() && 358 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { 359 struct qla_hw_data *ha = vha->hw; 360 struct atio *atr = ha->tgt.atio_ring; 361 362 if (atr || !buf) { 363 length = ha->tgt.atio_q_length; 364 qla27xx_insert16(0, buf, len); 365 qla27xx_insert16(length, buf, len); 366 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len); 367 count++; 368 } 369 } else { 370 ql_dbg(ql_dbg_misc, vha, 0xd026, 371 "%s: unknown queue %x\n", __func__, type); 372 qla27xx_skip_entry(ent, buf); 373 } 374 375 if (buf) { 376 if (count) 377 ent->t263.num_queues = count; 378 else 379 qla27xx_skip_entry(ent, buf); 380 } 381 382 return qla27xx_next_entry(ent); 383 } 384 385 static struct qla27xx_fwdt_entry * 386 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, 387 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 388 { 389 ql_dbg(ql_dbg_misc, vha, 0xd208, 390 "%s: getfce [%lx]\n", __func__, *len); 391 if (vha->hw->fce) { 392 if (buf) { 393 ent->t264.fce_trace_size = FCE_SIZE; 394 ent->t264.write_pointer = vha->hw->fce_wr; 395 ent->t264.base_pointer = vha->hw->fce_dma; 396 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; 397 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; 398 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; 399 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; 400 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; 401 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; 402 } 403 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len); 404 } else { 405 ql_dbg(ql_dbg_misc, vha, 0xd027, 406 "%s: missing fce\n", __func__); 407 qla27xx_skip_entry(ent, buf); 408 } 409 410 return qla27xx_next_entry(ent); 411 } 412 413 static struct qla27xx_fwdt_entry * 414 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, 415 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 416 { 417 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209, 418 "%s: pause risc [%lx]\n", __func__, *len); 419 if (buf) 420 qla24xx_pause_risc(ISPREG(vha), vha->hw); 421 422 return qla27xx_next_entry(ent); 423 } 424 425 static struct qla27xx_fwdt_entry * 426 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, 427 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 428 { 429 ql_dbg(ql_dbg_misc, vha, 0xd20a, 430 "%s: reset risc [%lx]\n", __func__, *len); 431 if (buf) 432 WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS); 433 434 return qla27xx_next_entry(ent); 435 } 436 437 static struct qla27xx_fwdt_entry * 438 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, 439 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 440 { 441 uint offset = ent->t267.pci_offset; 442 ulong data = le32_to_cpu(ent->t267.data); 443 444 ql_dbg(ql_dbg_misc, vha, 0xd20b, 445 "%s: dis intr [%lx]\n", __func__, *len); 446 qla27xx_write_reg(ISPREG(vha), offset, data, buf); 447 448 return qla27xx_next_entry(ent); 449 } 450 451 static struct qla27xx_fwdt_entry * 452 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, 453 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 454 { 455 ql_dbg(ql_dbg_misc, vha, 0xd20c, 456 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); 457 switch (ent->t268.buf_type) { 458 case T268_BUF_TYPE_EXTD_TRACE: 459 if (vha->hw->eft) { 460 if (buf) { 461 ent->t268.buf_size = EFT_SIZE; 462 ent->t268.start_addr = vha->hw->eft_dma; 463 } 464 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len); 465 } else { 466 ql_dbg(ql_dbg_misc, vha, 0xd028, 467 "%s: missing eft\n", __func__); 468 qla27xx_skip_entry(ent, buf); 469 } 470 break; 471 case T268_BUF_TYPE_EXCH_BUFOFF: 472 if (vha->hw->exchoffld_buf) { 473 if (buf) { 474 ent->t268.buf_size = vha->hw->exchoffld_size; 475 ent->t268.start_addr = 476 vha->hw->exchoffld_buf_dma; 477 } 478 qla27xx_insertbuf(vha->hw->exchoffld_buf, 479 vha->hw->exchoffld_size, buf, len); 480 } else { 481 ql_dbg(ql_dbg_misc, vha, 0xd028, 482 "%s: missing exch offld\n", __func__); 483 qla27xx_skip_entry(ent, buf); 484 } 485 break; 486 case T268_BUF_TYPE_EXTD_LOGIN: 487 if (vha->hw->exlogin_buf) { 488 if (buf) { 489 ent->t268.buf_size = vha->hw->exlogin_size; 490 ent->t268.start_addr = 491 vha->hw->exlogin_buf_dma; 492 } 493 qla27xx_insertbuf(vha->hw->exlogin_buf, 494 vha->hw->exlogin_size, buf, len); 495 } else { 496 ql_dbg(ql_dbg_misc, vha, 0xd028, 497 "%s: missing ext login\n", __func__); 498 qla27xx_skip_entry(ent, buf); 499 } 500 break; 501 502 case T268_BUF_TYPE_REQ_MIRROR: 503 case T268_BUF_TYPE_RSP_MIRROR: 504 /* 505 * Mirror pointers are not implemented in the 506 * driver, instead shadow pointers are used by 507 * the drier. Skip these entries. 508 */ 509 qla27xx_skip_entry(ent, buf); 510 break; 511 default: 512 ql_dbg(ql_dbg_async, vha, 0xd02b, 513 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); 514 qla27xx_skip_entry(ent, buf); 515 break; 516 } 517 518 return qla27xx_next_entry(ent); 519 } 520 521 static struct qla27xx_fwdt_entry * 522 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, 523 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 524 { 525 ql_dbg(ql_dbg_misc, vha, 0xd20d, 526 "%s: scratch [%lx]\n", __func__, *len); 527 qla27xx_insert32(0xaaaaaaaa, buf, len); 528 qla27xx_insert32(0xbbbbbbbb, buf, len); 529 qla27xx_insert32(0xcccccccc, buf, len); 530 qla27xx_insert32(0xdddddddd, buf, len); 531 qla27xx_insert32(*len + sizeof(uint32_t), buf, len); 532 if (buf) 533 ent->t269.scratch_size = 5 * sizeof(uint32_t); 534 535 return qla27xx_next_entry(ent); 536 } 537 538 static struct qla27xx_fwdt_entry * 539 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, 540 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 541 { 542 ulong addr = le32_to_cpu(ent->t270.addr); 543 ulong dwords = le32_to_cpu(ent->t270.count); 544 545 ql_dbg(ql_dbg_misc, vha, 0xd20e, 546 "%s: rdremreg [%lx]\n", __func__, *len); 547 qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf); 548 while (dwords--) { 549 qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf); 550 qla27xx_insert32(addr, buf, len); 551 qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len); 552 addr += sizeof(uint32_t); 553 } 554 555 return qla27xx_next_entry(ent); 556 } 557 558 static struct qla27xx_fwdt_entry * 559 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, 560 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 561 { 562 ulong addr = le32_to_cpu(ent->t271.addr); 563 ulong data = le32_to_cpu(ent->t271.data); 564 565 ql_dbg(ql_dbg_misc, vha, 0xd20f, 566 "%s: wrremreg [%lx]\n", __func__, *len); 567 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf); 568 qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf); 569 qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf); 570 571 return qla27xx_next_entry(ent); 572 } 573 574 static struct qla27xx_fwdt_entry * 575 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, 576 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 577 { 578 ulong dwords = le32_to_cpu(ent->t272.count); 579 ulong start = le32_to_cpu(ent->t272.addr); 580 581 ql_dbg(ql_dbg_misc, vha, 0xd210, 582 "%s: rdremram [%lx]\n", __func__, *len); 583 if (buf) { 584 ql_dbg(ql_dbg_misc, vha, 0xd02c, 585 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); 586 buf += *len; 587 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); 588 } 589 *len += dwords * sizeof(uint32_t); 590 591 return qla27xx_next_entry(ent); 592 } 593 594 static struct qla27xx_fwdt_entry * 595 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, 596 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 597 { 598 ulong dwords = le32_to_cpu(ent->t273.count); 599 ulong addr = le32_to_cpu(ent->t273.addr); 600 uint32_t value; 601 602 ql_dbg(ql_dbg_misc, vha, 0xd211, 603 "%s: pcicfg [%lx]\n", __func__, *len); 604 while (dwords--) { 605 value = ~0; 606 if (pci_read_config_dword(vha->hw->pdev, addr, &value)) 607 ql_dbg(ql_dbg_misc, vha, 0xd02d, 608 "%s: failed pcicfg read at %lx\n", __func__, addr); 609 qla27xx_insert32(addr, buf, len); 610 qla27xx_insert32(value, buf, len); 611 addr += sizeof(uint32_t); 612 } 613 614 return qla27xx_next_entry(ent); 615 } 616 617 static struct qla27xx_fwdt_entry * 618 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, 619 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 620 { 621 ulong type = ent->t274.queue_type; 622 uint count = 0; 623 uint i; 624 625 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212, 626 "%s: getqsh(%lx) [%lx]\n", __func__, type, *len); 627 if (type == T274_QUEUE_TYPE_REQ_SHAD) { 628 for (i = 0; i < vha->hw->max_req_queues; i++) { 629 struct req_que *req = vha->hw->req_q_map[i]; 630 631 if (req || !buf) { 632 qla27xx_insert16(i, buf, len); 633 qla27xx_insert16(1, buf, len); 634 qla27xx_insert32(req && req->out_ptr ? 635 *req->out_ptr : 0, buf, len); 636 count++; 637 } 638 } 639 } else if (type == T274_QUEUE_TYPE_RSP_SHAD) { 640 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 641 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 642 643 if (rsp || !buf) { 644 qla27xx_insert16(i, buf, len); 645 qla27xx_insert16(1, buf, len); 646 qla27xx_insert32(rsp && rsp->in_ptr ? 647 *rsp->in_ptr : 0, buf, len); 648 count++; 649 } 650 } 651 } else if (QLA_TGT_MODE_ENABLED() && 652 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { 653 struct qla_hw_data *ha = vha->hw; 654 struct atio *atr = ha->tgt.atio_ring_ptr; 655 656 if (atr || !buf) { 657 qla27xx_insert16(0, buf, len); 658 qla27xx_insert16(1, buf, len); 659 qla27xx_insert32(ha->tgt.atio_q_in ? 660 readl(ha->tgt.atio_q_in) : 0, buf, len); 661 count++; 662 } 663 } else { 664 ql_dbg(ql_dbg_misc, vha, 0xd02f, 665 "%s: unknown queue %lx\n", __func__, type); 666 qla27xx_skip_entry(ent, buf); 667 } 668 669 if (buf) { 670 if (count) 671 ent->t274.num_queues = count; 672 else 673 qla27xx_skip_entry(ent, buf); 674 } 675 676 return qla27xx_next_entry(ent); 677 } 678 679 static struct qla27xx_fwdt_entry * 680 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, 681 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 682 { 683 ulong offset = offsetof(typeof(*ent), t275.buffer); 684 ulong length = le32_to_cpu(ent->t275.length); 685 ulong size = le32_to_cpu(ent->hdr.size); 686 void *buffer = ent->t275.buffer; 687 688 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213, 689 "%s: buffer(%lx) [%lx]\n", __func__, length, *len); 690 if (!length) { 691 ql_dbg(ql_dbg_misc, vha, 0xd020, 692 "%s: buffer zero length\n", __func__); 693 qla27xx_skip_entry(ent, buf); 694 goto done; 695 } 696 if (offset + length > size) { 697 length = size - offset; 698 ql_dbg(ql_dbg_misc, vha, 0xd030, 699 "%s: buffer overflow, truncate [%lx]\n", __func__, length); 700 ent->t275.length = cpu_to_le32(length); 701 } 702 703 qla27xx_insertbuf(buffer, length, buf, len); 704 done: 705 return qla27xx_next_entry(ent); 706 } 707 708 static struct qla27xx_fwdt_entry * 709 qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha, 710 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 711 { 712 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214, 713 "%s: cond [%lx]\n", __func__, *len); 714 715 if (buf) { 716 ulong cond1 = le32_to_cpu(ent->t276.cond1); 717 ulong cond2 = le32_to_cpu(ent->t276.cond2); 718 uint type = vha->hw->pdev->device >> 4 & 0xf; 719 uint func = vha->hw->port_no & 0x3; 720 721 if (type != cond1 || func != cond2) { 722 struct qla27xx_fwdt_template *tmp = buf; 723 724 tmp->count--; 725 ent = qla27xx_next_entry(ent); 726 qla27xx_skip_entry(ent, buf); 727 } 728 } 729 730 return qla27xx_next_entry(ent); 731 } 732 733 static struct qla27xx_fwdt_entry * 734 qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha, 735 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 736 { 737 ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr); 738 ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data); 739 ulong data_addr = le32_to_cpu(ent->t277.data_addr); 740 741 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215, 742 "%s: rdpep [%lx]\n", __func__, *len); 743 qla27xx_insert32(wr_cmd_data, buf, len); 744 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); 745 qla27xx_read_reg(ISPREG(vha), data_addr, buf, len); 746 747 return qla27xx_next_entry(ent); 748 } 749 750 static struct qla27xx_fwdt_entry * 751 qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha, 752 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 753 { 754 ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr); 755 ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data); 756 ulong data_addr = le32_to_cpu(ent->t278.data_addr); 757 ulong wr_data = le32_to_cpu(ent->t278.wr_data); 758 759 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216, 760 "%s: wrpep [%lx]\n", __func__, *len); 761 qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf); 762 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); 763 764 return qla27xx_next_entry(ent); 765 } 766 767 static struct qla27xx_fwdt_entry * 768 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, 769 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 770 { 771 ulong type = le32_to_cpu(ent->hdr.type); 772 773 ql_dbg(ql_dbg_misc, vha, 0xd2ff, 774 "%s: other %lx [%lx]\n", __func__, type, *len); 775 qla27xx_skip_entry(ent, buf); 776 777 return qla27xx_next_entry(ent); 778 } 779 780 static struct { 781 uint type; 782 typeof(qla27xx_fwdt_entry_other)(*call); 783 } qla27xx_fwdt_entry_call[] = { 784 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 }, 785 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 }, 786 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 }, 787 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 }, 788 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 }, 789 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 }, 790 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 }, 791 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 }, 792 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 }, 793 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 }, 794 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 }, 795 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 }, 796 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 }, 797 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 }, 798 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 }, 799 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 }, 800 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 }, 801 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 }, 802 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 }, 803 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 }, 804 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 }, 805 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 }, 806 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 }, 807 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 }, 808 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 }, 809 { -1, qla27xx_fwdt_entry_other } 810 }; 811 812 static inline 813 typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type)) 814 { 815 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call; 816 817 while (list->type < type) 818 list++; 819 820 if (list->type == type) 821 return list->call; 822 return qla27xx_fwdt_entry_other; 823 } 824 825 static void 826 qla27xx_walk_template(struct scsi_qla_host *vha, 827 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) 828 { 829 struct qla27xx_fwdt_entry *ent = (void *)tmp + 830 le32_to_cpu(tmp->entry_offset); 831 ulong type; 832 833 tmp->count = le32_to_cpu(tmp->entry_count); 834 ql_dbg(ql_dbg_misc, vha, 0xd01a, 835 "%s: entry count %u\n", __func__, tmp->count); 836 while (ent && tmp->count--) { 837 type = le32_to_cpu(ent->hdr.type); 838 ent = qla27xx_find_entry(type)(vha, ent, buf, len); 839 if (!ent) 840 break; 841 } 842 843 if (tmp->count) 844 ql_dbg(ql_dbg_misc, vha, 0xd018, 845 "%s: entry count residual=+%u\n", __func__, tmp->count); 846 847 if (ent) 848 ql_dbg(ql_dbg_misc, vha, 0xd019, 849 "%s: missing end entry\n", __func__); 850 } 851 852 static void 853 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp) 854 { 855 tmp->capture_timestamp = jiffies; 856 } 857 858 static void 859 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) 860 { 861 uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; 862 863 WARN_ON_ONCE(sscanf(qla2x00_version_str, 864 "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", 865 v+0, v+1, v+2, v+3, v+4, v+5) != 6); 866 867 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; 868 tmp->driver_info[1] = v[5] << 8 | v[4]; 869 tmp->driver_info[2] = 0x12345678; 870 } 871 872 static void 873 qla27xx_firmware_info(struct scsi_qla_host *vha, 874 struct qla27xx_fwdt_template *tmp) 875 { 876 tmp->firmware_version[0] = vha->hw->fw_major_version; 877 tmp->firmware_version[1] = vha->hw->fw_minor_version; 878 tmp->firmware_version[2] = vha->hw->fw_subminor_version; 879 tmp->firmware_version[3] = 880 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes; 881 tmp->firmware_version[4] = 882 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]; 883 } 884 885 static void 886 ql27xx_edit_template(struct scsi_qla_host *vha, 887 struct qla27xx_fwdt_template *tmp) 888 { 889 qla27xx_time_stamp(tmp); 890 qla27xx_driver_info(tmp); 891 qla27xx_firmware_info(vha, tmp); 892 } 893 894 static inline uint32_t 895 qla27xx_template_checksum(void *p, ulong size) 896 { 897 __le32 *buf = p; 898 uint64_t sum = 0; 899 900 size /= sizeof(*buf); 901 902 for ( ; size--; buf++) 903 sum += le32_to_cpu(*buf); 904 905 sum = (sum & 0xffffffff) + (sum >> 32); 906 907 return ~sum; 908 } 909 910 static inline int 911 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) 912 { 913 return qla27xx_template_checksum(tmp, tmp->template_size) == 0; 914 } 915 916 static inline int 917 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) 918 { 919 return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP; 920 } 921 922 static ulong 923 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha, 924 struct qla27xx_fwdt_template *tmp, void *buf) 925 { 926 ulong len = 0; 927 928 if (qla27xx_fwdt_template_valid(tmp)) { 929 len = tmp->template_size; 930 tmp = memcpy(buf, tmp, len); 931 ql27xx_edit_template(vha, tmp); 932 qla27xx_walk_template(vha, tmp, buf, &len); 933 } 934 935 return len; 936 } 937 938 ulong 939 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p) 940 { 941 struct qla27xx_fwdt_template *tmp = p; 942 ulong len = 0; 943 944 if (qla27xx_fwdt_template_valid(tmp)) { 945 len = tmp->template_size; 946 qla27xx_walk_template(vha, tmp, NULL, &len); 947 } 948 949 return len; 950 } 951 952 ulong 953 qla27xx_fwdt_template_size(void *p) 954 { 955 struct qla27xx_fwdt_template *tmp = p; 956 957 return tmp->template_size; 958 } 959 960 int 961 qla27xx_fwdt_template_valid(void *p) 962 { 963 struct qla27xx_fwdt_template *tmp = p; 964 965 if (!qla27xx_verify_template_header(tmp)) { 966 ql_log(ql_log_warn, NULL, 0xd01c, 967 "%s: template type %x\n", __func__, 968 le32_to_cpu(tmp->template_type)); 969 return false; 970 } 971 972 if (!qla27xx_verify_template_checksum(tmp)) { 973 ql_log(ql_log_warn, NULL, 0xd01d, 974 "%s: failed template checksum\n", __func__); 975 return false; 976 } 977 978 return true; 979 } 980 981 void 982 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) 983 { 984 ulong flags = 0; 985 986 #ifndef __CHECKER__ 987 if (!hardware_locked) 988 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 989 #endif 990 991 if (!vha->hw->fw_dump) { 992 ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n"); 993 } else if (vha->hw->fw_dumped) { 994 ql_log(ql_log_warn, vha, 0xd01f, 995 "-> Firmware already dumped (%p) -- ignoring request\n", 996 vha->hw->fw_dump); 997 } else { 998 struct fwdt *fwdt = vha->hw->fwdt; 999 uint j; 1000 ulong len; 1001 void *buf = vha->hw->fw_dump; 1002 1003 for (j = 0; j < 2; j++, fwdt++, buf += len) { 1004 ql_log(ql_log_warn, vha, 0xd011, 1005 "-> fwdt%u running...\n", j); 1006 if (!fwdt->template) { 1007 ql_log(ql_log_warn, vha, 0xd012, 1008 "-> fwdt%u no template\n", j); 1009 break; 1010 } 1011 len = qla27xx_execute_fwdt_template(vha, 1012 fwdt->template, buf); 1013 if (len != fwdt->dump_size) { 1014 ql_log(ql_log_warn, vha, 0xd013, 1015 "-> fwdt%u fwdump residual=%+ld\n", 1016 j, fwdt->dump_size - len); 1017 } 1018 } 1019 vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump; 1020 vha->hw->fw_dumped = 1; 1021 1022 ql_log(ql_log_warn, vha, 0xd015, 1023 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n", 1024 vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags); 1025 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 1026 } 1027 1028 #ifndef __CHECKER__ 1029 if (!hardware_locked) 1030 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 1031 #endif 1032 } 1033