1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_tmpl.h" 9 10 /* note default template is in big endian */ 11 static const uint32_t ql27xx_fwdt_default_template[] = { 12 0x63000000, 0xa4000000, 0x7c050000, 0x00000000, 13 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4, 14 0x00000000, 0x00000000, 0x00000000, 0x00000000, 15 0x00000000, 0x00000000, 0x00000000, 0x00000000, 16 0x00000000, 0x00000000, 0x00000000, 0x00000000, 17 0x00000000, 0x00000000, 0x00000000, 0x00000000, 18 0x00000000, 0x00000000, 0x00000000, 0x00000000, 19 0x00000000, 0x00000000, 0x00000000, 0x00000000, 20 0x00000000, 0x00000000, 0x00000000, 0x00000000, 21 0x00000000, 0x00000000, 0x00000000, 0x00000000, 22 0x00000000, 0x04010000, 0x14000000, 0x00000000, 23 0x02000000, 0x44000000, 0x09010000, 0x10000000, 24 0x00000000, 0x02000000, 0x01010000, 0x1c000000, 25 0x00000000, 0x02000000, 0x00600000, 0x00000000, 26 0xc0000000, 0x01010000, 0x1c000000, 0x00000000, 27 0x02000000, 0x00600000, 0x00000000, 0xcc000000, 28 0x01010000, 0x1c000000, 0x00000000, 0x02000000, 29 0x10600000, 0x00000000, 0xd4000000, 0x01010000, 30 0x1c000000, 0x00000000, 0x02000000, 0x700f0000, 31 0x00000060, 0xf0000000, 0x00010000, 0x18000000, 32 0x00000000, 0x02000000, 0x00700000, 0x041000c0, 33 0x00010000, 0x18000000, 0x00000000, 0x02000000, 34 0x10700000, 0x041000c0, 0x00010000, 0x18000000, 35 0x00000000, 0x02000000, 0x40700000, 0x041000c0, 36 0x01010000, 0x1c000000, 0x00000000, 0x02000000, 37 0x007c0000, 0x01000000, 0xc0000000, 0x00010000, 38 0x18000000, 0x00000000, 0x02000000, 0x007c0000, 39 0x040300c4, 0x00010000, 0x18000000, 0x00000000, 40 0x02000000, 0x007c0000, 0x040100c0, 0x01010000, 41 0x1c000000, 0x00000000, 0x02000000, 0x007c0000, 42 0x00000000, 0xc0000000, 0x00010000, 0x18000000, 43 0x00000000, 0x02000000, 0x007c0000, 0x04200000, 44 0x0b010000, 0x18000000, 0x00000000, 0x02000000, 45 0x0c000000, 0x00000000, 0x02010000, 0x20000000, 46 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 47 0xf0000000, 0x000000b0, 0x02010000, 0x20000000, 48 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 49 0xf0000000, 0x000010b0, 0x02010000, 0x20000000, 50 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 51 0xf0000000, 0x000020b0, 0x02010000, 0x20000000, 52 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 53 0xf0000000, 0x000030b0, 0x02010000, 0x20000000, 54 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 55 0xf0000000, 0x000040b0, 0x02010000, 0x20000000, 56 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 57 0xf0000000, 0x000050b0, 0x02010000, 0x20000000, 58 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 59 0xf0000000, 0x000060b0, 0x02010000, 0x20000000, 60 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 61 0xf0000000, 0x000070b0, 0x02010000, 0x20000000, 62 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 63 0xf0000000, 0x000080b0, 0x02010000, 0x20000000, 64 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 65 0xf0000000, 0x000090b0, 0x02010000, 0x20000000, 66 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, 67 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000, 68 0x00000000, 0x02000000, 0x0a000000, 0x040100c0, 69 0x00010000, 0x18000000, 0x00000000, 0x02000000, 70 0x0a000000, 0x04200080, 0x00010000, 0x18000000, 71 0x00000000, 0x02000000, 0x00be0000, 0x041000c0, 72 0x00010000, 0x18000000, 0x00000000, 0x02000000, 73 0x10be0000, 0x041000c0, 0x00010000, 0x18000000, 74 0x00000000, 0x02000000, 0x20be0000, 0x041000c0, 75 0x00010000, 0x18000000, 0x00000000, 0x02000000, 76 0x30be0000, 0x041000c0, 0x00010000, 0x18000000, 77 0x00000000, 0x02000000, 0x00b00000, 0x041000c0, 78 0x00010000, 0x18000000, 0x00000000, 0x02000000, 79 0x10b00000, 0x041000c0, 0x00010000, 0x18000000, 80 0x00000000, 0x02000000, 0x20b00000, 0x041000c0, 81 0x00010000, 0x18000000, 0x00000000, 0x02000000, 82 0x30b00000, 0x041000c0, 0x00010000, 0x18000000, 83 0x00000000, 0x02000000, 0x00300000, 0x041000c0, 84 0x00010000, 0x18000000, 0x00000000, 0x02000000, 85 0x10300000, 0x041000c0, 0x00010000, 0x18000000, 86 0x00000000, 0x02000000, 0x20300000, 0x041000c0, 87 0x00010000, 0x18000000, 0x00000000, 0x02000000, 88 0x30300000, 0x041000c0, 0x0a010000, 0x10000000, 89 0x00000000, 0x02000000, 0x06010000, 0x1c000000, 90 0x00000000, 0x02000000, 0x01000000, 0x00000200, 91 0xff230200, 0x06010000, 0x1c000000, 0x00000000, 92 0x02000000, 0x02000000, 0x00001000, 0x00000000, 93 0x07010000, 0x18000000, 0x00000000, 0x02000000, 94 0x00000000, 0x01000000, 0x07010000, 0x18000000, 95 0x00000000, 0x02000000, 0x00000000, 0x02000000, 96 0x07010000, 0x18000000, 0x00000000, 0x02000000, 97 0x00000000, 0x03000000, 0x0d010000, 0x14000000, 98 0x00000000, 0x02000000, 0x00000000, 0xff000000, 99 0x10000000, 0x00000000, 0x00000080, 100 }; 101 102 static inline void __iomem * 103 qla27xx_isp_reg(struct scsi_qla_host *vha) 104 { 105 return &vha->hw->iobase->isp24; 106 } 107 108 static inline void 109 qla27xx_insert16(uint16_t value, void *buf, ulong *len) 110 { 111 if (buf) { 112 buf += *len; 113 *(__le16 *)buf = cpu_to_le16(value); 114 } 115 *len += sizeof(value); 116 } 117 118 static inline void 119 qla27xx_insert32(uint32_t value, void *buf, ulong *len) 120 { 121 if (buf) { 122 buf += *len; 123 *(__le32 *)buf = cpu_to_le32(value); 124 } 125 *len += sizeof(value); 126 } 127 128 static inline void 129 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) 130 { 131 ulong cnt = size; 132 133 if (buf && mem) { 134 buf += *len; 135 while (cnt >= sizeof(uint32_t)) { 136 *(__le32 *)buf = cpu_to_le32p(mem); 137 buf += sizeof(uint32_t); 138 mem += sizeof(uint32_t); 139 cnt -= sizeof(uint32_t); 140 } 141 if (cnt) 142 memcpy(buf, mem, cnt); 143 } 144 *len += size; 145 } 146 147 static inline void 148 qla27xx_read8(void *window, void *buf, ulong *len) 149 { 150 uint8_t value = ~0; 151 152 if (buf) { 153 value = RD_REG_BYTE((__iomem void *)window); 154 ql_dbg(ql_dbg_misc, NULL, 0xd011, 155 "%s: -> %x\n", __func__, value); 156 } 157 qla27xx_insert32(value, buf, len); 158 } 159 160 static inline void 161 qla27xx_read16(void *window, void *buf, ulong *len) 162 { 163 uint16_t value = ~0; 164 165 if (buf) { 166 value = RD_REG_WORD((__iomem void *)window); 167 ql_dbg(ql_dbg_misc, NULL, 0xd012, 168 "%s: -> %x\n", __func__, value); 169 } 170 qla27xx_insert32(value, buf, len); 171 } 172 173 static inline void 174 qla27xx_read32(void *window, void *buf, ulong *len) 175 { 176 uint32_t value = ~0; 177 178 if (buf) { 179 value = RD_REG_DWORD((__iomem void *)window); 180 ql_dbg(ql_dbg_misc, NULL, 0xd013, 181 "%s: -> %x\n", __func__, value); 182 } 183 qla27xx_insert32(value, buf, len); 184 } 185 186 static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *) 187 { 188 return 189 (width == 1) ? qla27xx_read8 : 190 (width == 2) ? qla27xx_read16 : 191 qla27xx_read32; 192 } 193 194 static inline void 195 qla27xx_read_reg(__iomem struct device_reg_24xx *reg, 196 uint offset, void *buf, ulong *len) 197 { 198 void *window = (void *)reg + offset; 199 200 if (buf) { 201 ql_dbg(ql_dbg_misc, NULL, 0xd014, 202 "%s: @%x\n", __func__, offset); 203 } 204 qla27xx_read32(window, buf, len); 205 } 206 207 static inline void 208 qla27xx_write_reg(__iomem struct device_reg_24xx *reg, 209 uint offset, uint32_t data, void *buf) 210 { 211 __iomem void *window = reg + offset; 212 213 if (buf) { 214 ql_dbg(ql_dbg_misc, NULL, 0xd015, 215 "%s: @%x <- %x\n", __func__, offset, data); 216 WRT_REG_DWORD(window, data); 217 } 218 } 219 220 static inline void 221 qla27xx_read_window(__iomem struct device_reg_24xx *reg, 222 uint32_t addr, uint offset, uint count, uint width, void *buf, 223 ulong *len) 224 { 225 void *window = (void *)reg + offset; 226 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); 227 228 if (buf) { 229 ql_dbg(ql_dbg_misc, NULL, 0xd016, 230 "%s: base=%x offset=%x count=%x width=%x\n", 231 __func__, addr, offset, count, width); 232 } 233 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); 234 while (count--) { 235 qla27xx_insert32(addr, buf, len); 236 readn(window, buf, len); 237 window += width; 238 addr++; 239 } 240 } 241 242 static inline void 243 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) 244 { 245 if (buf) 246 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; 247 } 248 249 static int 250 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, 251 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 252 { 253 ql_dbg(ql_dbg_misc, vha, 0xd100, 254 "%s: nop [%lx]\n", __func__, *len); 255 qla27xx_skip_entry(ent, buf); 256 257 return false; 258 } 259 260 static int 261 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, 262 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 263 { 264 ql_dbg(ql_dbg_misc, vha, 0xd1ff, 265 "%s: end [%lx]\n", __func__, *len); 266 qla27xx_skip_entry(ent, buf); 267 268 /* terminate */ 269 return true; 270 } 271 272 static int 273 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, 274 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 275 { 276 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 277 278 ql_dbg(ql_dbg_misc, vha, 0xd200, 279 "%s: rdio t1 [%lx]\n", __func__, *len); 280 qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset, 281 ent->t256.reg_count, ent->t256.reg_width, buf, len); 282 283 return false; 284 } 285 286 static int 287 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, 288 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 289 { 290 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 291 292 ql_dbg(ql_dbg_misc, vha, 0xd201, 293 "%s: wrio t1 [%lx]\n", __func__, *len); 294 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf); 295 qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf); 296 297 return false; 298 } 299 300 static int 301 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, 302 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 303 { 304 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 305 306 ql_dbg(ql_dbg_misc, vha, 0xd202, 307 "%s: rdio t2 [%lx]\n", __func__, *len); 308 qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf); 309 qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset, 310 ent->t258.reg_count, ent->t258.reg_width, buf, len); 311 312 return false; 313 } 314 315 static int 316 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, 317 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 318 { 319 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 320 321 ql_dbg(ql_dbg_misc, vha, 0xd203, 322 "%s: wrio t2 [%lx]\n", __func__, *len); 323 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf); 324 qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf); 325 qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf); 326 327 return false; 328 } 329 330 static int 331 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, 332 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 333 { 334 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 335 336 ql_dbg(ql_dbg_misc, vha, 0xd204, 337 "%s: rdpci [%lx]\n", __func__, *len); 338 qla27xx_insert32(ent->t260.pci_offset, buf, len); 339 qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); 340 341 return false; 342 } 343 344 static int 345 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, 346 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 347 { 348 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 349 350 ql_dbg(ql_dbg_misc, vha, 0xd205, 351 "%s: wrpci [%lx]\n", __func__, *len); 352 qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); 353 354 return false; 355 } 356 357 static int 358 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, 359 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 360 { 361 ulong dwords; 362 ulong start; 363 ulong end; 364 365 ql_dbg(ql_dbg_misc, vha, 0xd206, 366 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); 367 start = ent->t262.start_addr; 368 end = ent->t262.end_addr; 369 370 if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) { 371 ; 372 } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) { 373 end = vha->hw->fw_memory_size; 374 if (buf) 375 ent->t262.end_addr = end; 376 } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) { 377 start = vha->hw->fw_shared_ram_start; 378 end = vha->hw->fw_shared_ram_end; 379 if (buf) { 380 ent->t262.start_addr = start; 381 ent->t262.end_addr = end; 382 } 383 } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { 384 ql_dbg(ql_dbg_misc, vha, 0xd021, 385 "%s: unsupported ddr ram\n", __func__); 386 qla27xx_skip_entry(ent, buf); 387 goto done; 388 } else { 389 ql_dbg(ql_dbg_misc, vha, 0xd022, 390 "%s: unknown area %u\n", __func__, ent->t262.ram_area); 391 qla27xx_skip_entry(ent, buf); 392 goto done; 393 } 394 395 if (end < start || end == 0) { 396 ql_dbg(ql_dbg_misc, vha, 0xd023, 397 "%s: unusable range (start=%x end=%x)\n", __func__, 398 ent->t262.end_addr, ent->t262.start_addr); 399 qla27xx_skip_entry(ent, buf); 400 goto done; 401 } 402 403 dwords = end - start + 1; 404 if (buf) { 405 ql_dbg(ql_dbg_misc, vha, 0xd024, 406 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); 407 buf += *len; 408 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); 409 } 410 *len += dwords * sizeof(uint32_t); 411 done: 412 return false; 413 } 414 415 static int 416 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, 417 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 418 { 419 uint count = 0; 420 uint i; 421 uint length; 422 423 ql_dbg(ql_dbg_misc, vha, 0xd207, 424 "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len); 425 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { 426 for (i = 0; i < vha->hw->max_req_queues; i++) { 427 struct req_que *req = vha->hw->req_q_map[i]; 428 if (req || !buf) { 429 length = req ? 430 req->length : REQUEST_ENTRY_CNT_24XX; 431 qla27xx_insert16(i, buf, len); 432 qla27xx_insert16(length, buf, len); 433 qla27xx_insertbuf(req ? req->ring : NULL, 434 length * sizeof(*req->ring), buf, len); 435 count++; 436 } 437 } 438 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 439 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 440 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 441 if (rsp || !buf) { 442 length = rsp ? 443 rsp->length : RESPONSE_ENTRY_CNT_MQ; 444 qla27xx_insert16(i, buf, len); 445 qla27xx_insert16(length, buf, len); 446 qla27xx_insertbuf(rsp ? rsp->ring : NULL, 447 length * sizeof(*rsp->ring), buf, len); 448 count++; 449 } 450 } 451 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { 452 ql_dbg(ql_dbg_misc, vha, 0xd025, 453 "%s: unsupported atio queue\n", __func__); 454 qla27xx_skip_entry(ent, buf); 455 } else { 456 ql_dbg(ql_dbg_misc, vha, 0xd026, 457 "%s: unknown queue %u\n", __func__, ent->t263.queue_type); 458 qla27xx_skip_entry(ent, buf); 459 } 460 461 if (buf) 462 ent->t263.num_queues = count; 463 464 return false; 465 } 466 467 static int 468 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, 469 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 470 { 471 ql_dbg(ql_dbg_misc, vha, 0xd208, 472 "%s: getfce [%lx]\n", __func__, *len); 473 if (vha->hw->fce) { 474 if (buf) { 475 ent->t264.fce_trace_size = FCE_SIZE; 476 ent->t264.write_pointer = vha->hw->fce_wr; 477 ent->t264.base_pointer = vha->hw->fce_dma; 478 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; 479 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; 480 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; 481 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; 482 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; 483 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; 484 } 485 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len); 486 } else { 487 ql_dbg(ql_dbg_misc, vha, 0xd027, 488 "%s: missing fce\n", __func__); 489 qla27xx_skip_entry(ent, buf); 490 } 491 492 return false; 493 } 494 495 static int 496 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, 497 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 498 { 499 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 500 501 ql_dbg(ql_dbg_misc, vha, 0xd209, 502 "%s: pause risc [%lx]\n", __func__, *len); 503 if (buf) 504 qla24xx_pause_risc(reg, vha->hw); 505 506 return false; 507 } 508 509 static int 510 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, 511 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 512 { 513 ql_dbg(ql_dbg_misc, vha, 0xd20a, 514 "%s: reset risc [%lx]\n", __func__, *len); 515 if (buf) 516 qla24xx_soft_reset(vha->hw); 517 518 return false; 519 } 520 521 static int 522 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, 523 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 524 { 525 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 526 527 ql_dbg(ql_dbg_misc, vha, 0xd20b, 528 "%s: dis intr [%lx]\n", __func__, *len); 529 qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf); 530 531 return false; 532 } 533 534 static int 535 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, 536 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 537 { 538 ql_dbg(ql_dbg_misc, vha, 0xd20c, 539 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); 540 if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) { 541 if (vha->hw->eft) { 542 if (buf) { 543 ent->t268.buf_size = EFT_SIZE; 544 ent->t268.start_addr = vha->hw->eft_dma; 545 } 546 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len); 547 } else { 548 ql_dbg(ql_dbg_misc, vha, 0xd028, 549 "%s: missing eft\n", __func__); 550 qla27xx_skip_entry(ent, buf); 551 } 552 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) { 553 ql_dbg(ql_dbg_misc, vha, 0xd029, 554 "%s: unsupported exchange offload buffer\n", __func__); 555 qla27xx_skip_entry(ent, buf); 556 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) { 557 ql_dbg(ql_dbg_misc, vha, 0xd02a, 558 "%s: unsupported extended login buffer\n", __func__); 559 qla27xx_skip_entry(ent, buf); 560 } else { 561 ql_dbg(ql_dbg_misc, vha, 0xd02b, 562 "%s: unknown buf %x\n", __func__, ent->t268.buf_type); 563 qla27xx_skip_entry(ent, buf); 564 } 565 566 return false; 567 } 568 569 static int 570 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, 571 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 572 { 573 ql_dbg(ql_dbg_misc, vha, 0xd20d, 574 "%s: scratch [%lx]\n", __func__, *len); 575 qla27xx_insert32(0xaaaaaaaa, buf, len); 576 qla27xx_insert32(0xbbbbbbbb, buf, len); 577 qla27xx_insert32(0xcccccccc, buf, len); 578 qla27xx_insert32(0xdddddddd, buf, len); 579 qla27xx_insert32(*len + sizeof(uint32_t), buf, len); 580 if (buf) 581 ent->t269.scratch_size = 5 * sizeof(uint32_t); 582 583 return false; 584 } 585 586 static int 587 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, 588 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 589 { 590 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 591 ulong dwords = ent->t270.count; 592 ulong addr = ent->t270.addr; 593 594 ql_dbg(ql_dbg_misc, vha, 0xd20e, 595 "%s: rdremreg [%lx]\n", __func__, *len); 596 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); 597 while (dwords--) { 598 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); 599 qla27xx_insert32(addr, buf, len); 600 qla27xx_read_reg(reg, 0xc4, buf, len); 601 addr += sizeof(uint32_t); 602 } 603 604 return false; 605 } 606 607 static int 608 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, 609 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 610 { 611 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 612 ulong addr = ent->t271.addr; 613 ulong data = ent->t271.data; 614 615 ql_dbg(ql_dbg_misc, vha, 0xd20f, 616 "%s: wrremreg [%lx]\n", __func__, *len); 617 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); 618 qla27xx_write_reg(reg, 0xc4, data, buf); 619 qla27xx_write_reg(reg, 0xc0, addr, buf); 620 621 return false; 622 } 623 624 static int 625 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, 626 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 627 { 628 ulong dwords = ent->t272.count; 629 ulong start = ent->t272.addr; 630 631 ql_dbg(ql_dbg_misc, vha, 0xd210, 632 "%s: rdremram [%lx]\n", __func__, *len); 633 if (buf) { 634 ql_dbg(ql_dbg_misc, vha, 0xd02c, 635 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); 636 buf += *len; 637 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); 638 } 639 *len += dwords * sizeof(uint32_t); 640 641 return false; 642 } 643 644 static int 645 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, 646 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 647 { 648 ulong dwords = ent->t273.count; 649 ulong addr = ent->t273.addr; 650 uint32_t value; 651 652 ql_dbg(ql_dbg_misc, vha, 0xd211, 653 "%s: pcicfg [%lx]\n", __func__, *len); 654 while (dwords--) { 655 value = ~0; 656 if (pci_read_config_dword(vha->hw->pdev, addr, &value)) 657 ql_dbg(ql_dbg_misc, vha, 0xd02d, 658 "%s: failed pcicfg read at %lx\n", __func__, addr); 659 qla27xx_insert32(addr, buf, len); 660 qla27xx_insert32(value, buf, len); 661 addr += sizeof(uint32_t); 662 } 663 664 return false; 665 } 666 667 static int 668 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, 669 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 670 { 671 uint count = 0; 672 uint i; 673 674 ql_dbg(ql_dbg_misc, vha, 0xd212, 675 "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len); 676 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { 677 for (i = 0; i < vha->hw->max_req_queues; i++) { 678 struct req_que *req = vha->hw->req_q_map[i]; 679 if (req || !buf) { 680 qla27xx_insert16(i, buf, len); 681 qla27xx_insert16(1, buf, len); 682 qla27xx_insert32(req && req->out_ptr ? 683 *req->out_ptr : 0, buf, len); 684 count++; 685 } 686 } 687 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 688 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 689 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 690 if (rsp || !buf) { 691 qla27xx_insert16(i, buf, len); 692 qla27xx_insert16(1, buf, len); 693 qla27xx_insert32(rsp && rsp->in_ptr ? 694 *rsp->in_ptr : 0, buf, len); 695 count++; 696 } 697 } 698 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { 699 ql_dbg(ql_dbg_misc, vha, 0xd02e, 700 "%s: unsupported atio queue\n", __func__); 701 qla27xx_skip_entry(ent, buf); 702 } else { 703 ql_dbg(ql_dbg_misc, vha, 0xd02f, 704 "%s: unknown queue %u\n", __func__, ent->t274.queue_type); 705 qla27xx_skip_entry(ent, buf); 706 } 707 708 if (buf) 709 ent->t274.num_queues = count; 710 711 if (!count) 712 qla27xx_skip_entry(ent, buf); 713 714 return false; 715 } 716 717 static int 718 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, 719 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 720 { 721 ql_dbg(ql_dbg_misc, vha, 0xd2ff, 722 "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len); 723 qla27xx_skip_entry(ent, buf); 724 725 return false; 726 } 727 728 struct qla27xx_fwdt_entry_call { 729 int type; 730 int (*call)( 731 struct scsi_qla_host *, 732 struct qla27xx_fwdt_entry *, 733 void *, 734 ulong *); 735 }; 736 737 static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { 738 { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } , 739 { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } , 740 { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } , 741 { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } , 742 { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } , 743 { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } , 744 { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } , 745 { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } , 746 { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } , 747 { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } , 748 { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } , 749 { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } , 750 { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } , 751 { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } , 752 { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } , 753 { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } , 754 { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } , 755 { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , 756 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , 757 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , 758 { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , 759 { -1 , qla27xx_fwdt_entry_other } 760 }; 761 762 static inline int (*qla27xx_find_entry(int type)) 763 (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *) 764 { 765 struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list; 766 767 while (list->type != -1 && list->type != type) 768 list++; 769 770 return list->call; 771 } 772 773 static inline void * 774 qla27xx_next_entry(void *p) 775 { 776 struct qla27xx_fwdt_entry *ent = p; 777 778 return p + ent->hdr.entry_size; 779 } 780 781 static void 782 qla27xx_walk_template(struct scsi_qla_host *vha, 783 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) 784 { 785 struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset; 786 ulong count = tmp->entry_count; 787 788 ql_dbg(ql_dbg_misc, vha, 0xd01a, 789 "%s: entry count %lx\n", __func__, count); 790 while (count--) { 791 if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len)) 792 break; 793 ent = qla27xx_next_entry(ent); 794 } 795 ql_dbg(ql_dbg_misc, vha, 0xd01b, 796 "%s: len=%lx\n", __func__, *len); 797 } 798 799 static void 800 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp) 801 { 802 tmp->capture_timestamp = jiffies; 803 } 804 805 static void 806 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) 807 { 808 uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; 809 int rval = 0; 810 811 rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", 812 v+0, v+1, v+2, v+3, v+4, v+5); 813 814 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; 815 tmp->driver_info[1] = v[5] << 8 | v[4]; 816 tmp->driver_info[2] = 0x12345678; 817 } 818 819 static void 820 qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp, 821 struct scsi_qla_host *vha) 822 { 823 tmp->firmware_version[0] = vha->hw->fw_major_version; 824 tmp->firmware_version[1] = vha->hw->fw_minor_version; 825 tmp->firmware_version[2] = vha->hw->fw_subminor_version; 826 tmp->firmware_version[3] = 827 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes; 828 tmp->firmware_version[4] = 829 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]; 830 } 831 832 static void 833 ql27xx_edit_template(struct scsi_qla_host *vha, 834 struct qla27xx_fwdt_template *tmp) 835 { 836 qla27xx_time_stamp(tmp); 837 qla27xx_driver_info(tmp); 838 qla27xx_firmware_info(tmp, vha); 839 } 840 841 static inline uint32_t 842 qla27xx_template_checksum(void *p, ulong size) 843 { 844 uint32_t *buf = p; 845 uint64_t sum = 0; 846 847 size /= sizeof(*buf); 848 849 while (size--) 850 sum += *buf++; 851 852 sum = (sum & 0xffffffff) + (sum >> 32); 853 854 return ~sum; 855 } 856 857 static inline int 858 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) 859 { 860 return qla27xx_template_checksum(tmp, tmp->template_size) == 0; 861 } 862 863 static inline int 864 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) 865 { 866 return tmp->template_type == TEMPLATE_TYPE_FWDUMP; 867 } 868 869 static void 870 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) 871 { 872 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; 873 ulong len; 874 875 if (qla27xx_fwdt_template_valid(tmp)) { 876 len = tmp->template_size; 877 tmp = memcpy(vha->hw->fw_dump, tmp, len); 878 ql27xx_edit_template(vha, tmp); 879 qla27xx_walk_template(vha, tmp, tmp, &len); 880 vha->hw->fw_dump_len = len; 881 vha->hw->fw_dumped = 1; 882 } 883 } 884 885 ulong 886 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) 887 { 888 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; 889 ulong len = 0; 890 891 if (qla27xx_fwdt_template_valid(tmp)) { 892 len = tmp->template_size; 893 qla27xx_walk_template(vha, tmp, NULL, &len); 894 } 895 896 return len; 897 } 898 899 ulong 900 qla27xx_fwdt_template_size(void *p) 901 { 902 struct qla27xx_fwdt_template *tmp = p; 903 904 return tmp->template_size; 905 } 906 907 ulong 908 qla27xx_fwdt_template_default_size(void) 909 { 910 return sizeof(ql27xx_fwdt_default_template); 911 } 912 913 const void * 914 qla27xx_fwdt_template_default(void) 915 { 916 return ql27xx_fwdt_default_template; 917 } 918 919 int 920 qla27xx_fwdt_template_valid(void *p) 921 { 922 struct qla27xx_fwdt_template *tmp = p; 923 924 if (!qla27xx_verify_template_header(tmp)) { 925 ql_log(ql_log_warn, NULL, 0xd01c, 926 "%s: template type %x\n", __func__, tmp->template_type); 927 return false; 928 } 929 930 if (!qla27xx_verify_template_checksum(tmp)) { 931 ql_log(ql_log_warn, NULL, 0xd01d, 932 "%s: failed template checksum\n", __func__); 933 return false; 934 } 935 936 return true; 937 } 938 939 void 940 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) 941 { 942 ulong flags = 0; 943 944 if (!hardware_locked) 945 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 946 947 if (!vha->hw->fw_dump) 948 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); 949 else if (!vha->hw->fw_dump_template) 950 ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n"); 951 else 952 qla27xx_execute_fwdt_template(vha); 953 954 if (!hardware_locked) 955 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 956 } 957