1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/kthread.h> 11 #include <linux/vmalloc.h> 12 #include <linux/slab.h> 13 #include <linux/delay.h> 14 15 static int qla24xx_vport_disable(struct fc_vport *, bool); 16 17 /* SYSFS attributes --------------------------------------------------------- */ 18 19 static ssize_t 20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, 21 struct bin_attribute *bin_attr, 22 char *buf, loff_t off, size_t count) 23 { 24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 25 struct device, kobj))); 26 struct qla_hw_data *ha = vha->hw; 27 int rval = 0; 28 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) 30 return 0; 31 32 if (IS_P3P_TYPE(ha)) { 33 if (off < ha->md_template_size) { 34 rval = memory_read_from_buffer(buf, count, 35 &off, ha->md_tmplt_hdr, ha->md_template_size); 36 return rval; 37 } 38 off -= ha->md_template_size; 39 rval = memory_read_from_buffer(buf, count, 40 &off, ha->md_dump, ha->md_dump_size); 41 return rval; 42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) 43 return memory_read_from_buffer(buf, count, &off, ha->mctp_dump, 44 MCTP_DUMP_SIZE); 45 else if (ha->fw_dump_reading) 46 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 47 ha->fw_dump_len); 48 else 49 return 0; 50 } 51 52 static ssize_t 53 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, 54 struct bin_attribute *bin_attr, 55 char *buf, loff_t off, size_t count) 56 { 57 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 58 struct device, kobj))); 59 struct qla_hw_data *ha = vha->hw; 60 int reading; 61 62 if (off != 0) 63 return (0); 64 65 reading = simple_strtol(buf, NULL, 10); 66 switch (reading) { 67 case 0: 68 if (!ha->fw_dump_reading) 69 break; 70 71 ql_log(ql_log_info, vha, 0x705d, 72 "Firmware dump cleared on (%ld).\n", vha->host_no); 73 74 if (IS_P3P_TYPE(ha)) { 75 qla82xx_md_free(vha); 76 qla82xx_md_prep(vha); 77 } 78 ha->fw_dump_reading = 0; 79 ha->fw_dumped = 0; 80 break; 81 case 1: 82 if (ha->fw_dumped && !ha->fw_dump_reading) { 83 ha->fw_dump_reading = 1; 84 85 ql_log(ql_log_info, vha, 0x705e, 86 "Raw firmware dump ready for read on (%ld).\n", 87 vha->host_no); 88 } 89 break; 90 case 2: 91 qla2x00_alloc_fw_dump(vha); 92 break; 93 case 3: 94 if (IS_QLA82XX(ha)) { 95 qla82xx_idc_lock(ha); 96 qla82xx_set_reset_owner(vha); 97 qla82xx_idc_unlock(ha); 98 } else if (IS_QLA8044(ha)) { 99 qla8044_idc_lock(ha); 100 qla82xx_set_reset_owner(vha); 101 qla8044_idc_unlock(ha); 102 } else 103 qla2x00_system_error(vha); 104 break; 105 case 4: 106 if (IS_P3P_TYPE(ha)) { 107 if (ha->md_tmplt_hdr) 108 ql_dbg(ql_dbg_user, vha, 0x705b, 109 "MiniDump supported with this firmware.\n"); 110 else 111 ql_dbg(ql_dbg_user, vha, 0x709d, 112 "MiniDump not supported with this firmware.\n"); 113 } 114 break; 115 case 5: 116 if (IS_P3P_TYPE(ha)) 117 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 118 break; 119 case 6: 120 if (!ha->mctp_dump_reading) 121 break; 122 ql_log(ql_log_info, vha, 0x70c1, 123 "MCTP dump cleared on (%ld).\n", vha->host_no); 124 ha->mctp_dump_reading = 0; 125 ha->mctp_dumped = 0; 126 break; 127 case 7: 128 if (ha->mctp_dumped && !ha->mctp_dump_reading) { 129 ha->mctp_dump_reading = 1; 130 ql_log(ql_log_info, vha, 0x70c2, 131 "Raw mctp dump ready for read on (%ld).\n", 132 vha->host_no); 133 } 134 break; 135 } 136 return count; 137 } 138 139 static struct bin_attribute sysfs_fw_dump_attr = { 140 .attr = { 141 .name = "fw_dump", 142 .mode = S_IRUSR | S_IWUSR, 143 }, 144 .size = 0, 145 .read = qla2x00_sysfs_read_fw_dump, 146 .write = qla2x00_sysfs_write_fw_dump, 147 }; 148 149 static ssize_t 150 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, 151 struct bin_attribute *bin_attr, 152 char *buf, loff_t off, size_t count) 153 { 154 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 155 struct device, kobj))); 156 struct qla_hw_data *ha = vha->hw; 157 158 if (!capable(CAP_SYS_ADMIN)) 159 return 0; 160 161 if (IS_NOCACHE_VPD_TYPE(ha)) 162 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 163 ha->nvram_size); 164 return memory_read_from_buffer(buf, count, &off, ha->nvram, 165 ha->nvram_size); 166 } 167 168 static ssize_t 169 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, 170 struct bin_attribute *bin_attr, 171 char *buf, loff_t off, size_t count) 172 { 173 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 174 struct device, kobj))); 175 struct qla_hw_data *ha = vha->hw; 176 uint16_t cnt; 177 178 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 179 !ha->isp_ops->write_nvram) 180 return -EINVAL; 181 182 /* Checksum NVRAM. */ 183 if (IS_FWI2_CAPABLE(ha)) { 184 uint32_t *iter; 185 uint32_t chksum; 186 187 iter = (uint32_t *)buf; 188 chksum = 0; 189 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) 190 chksum += le32_to_cpu(*iter); 191 chksum = ~chksum + 1; 192 *iter = cpu_to_le32(chksum); 193 } else { 194 uint8_t *iter; 195 uint8_t chksum; 196 197 iter = (uint8_t *)buf; 198 chksum = 0; 199 for (cnt = 0; cnt < count - 1; cnt++) 200 chksum += *iter++; 201 chksum = ~chksum + 1; 202 *iter = chksum; 203 } 204 205 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 206 ql_log(ql_log_warn, vha, 0x705f, 207 "HBA not online, failing NVRAM update.\n"); 208 return -EAGAIN; 209 } 210 211 /* Write NVRAM. */ 212 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); 213 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 214 count); 215 216 ql_dbg(ql_dbg_user, vha, 0x7060, 217 "Setting ISP_ABORT_NEEDED\n"); 218 /* NVRAM settings take effect immediately. */ 219 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 220 qla2xxx_wake_dpc(vha); 221 qla2x00_wait_for_chip_reset(vha); 222 223 return count; 224 } 225 226 static struct bin_attribute sysfs_nvram_attr = { 227 .attr = { 228 .name = "nvram", 229 .mode = S_IRUSR | S_IWUSR, 230 }, 231 .size = 512, 232 .read = qla2x00_sysfs_read_nvram, 233 .write = qla2x00_sysfs_write_nvram, 234 }; 235 236 static ssize_t 237 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, 238 struct bin_attribute *bin_attr, 239 char *buf, loff_t off, size_t count) 240 { 241 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 242 struct device, kobj))); 243 struct qla_hw_data *ha = vha->hw; 244 ssize_t rval = 0; 245 246 mutex_lock(&ha->optrom_mutex); 247 248 if (ha->optrom_state != QLA_SREADING) 249 goto out; 250 251 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 252 ha->optrom_region_size); 253 254 out: 255 mutex_unlock(&ha->optrom_mutex); 256 257 return rval; 258 } 259 260 static ssize_t 261 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, 262 struct bin_attribute *bin_attr, 263 char *buf, loff_t off, size_t count) 264 { 265 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 266 struct device, kobj))); 267 struct qla_hw_data *ha = vha->hw; 268 269 mutex_lock(&ha->optrom_mutex); 270 271 if (ha->optrom_state != QLA_SWRITING) { 272 mutex_unlock(&ha->optrom_mutex); 273 return -EINVAL; 274 } 275 if (off > ha->optrom_region_size) { 276 mutex_unlock(&ha->optrom_mutex); 277 return -ERANGE; 278 } 279 if (off + count > ha->optrom_region_size) 280 count = ha->optrom_region_size - off; 281 282 memcpy(&ha->optrom_buffer[off], buf, count); 283 mutex_unlock(&ha->optrom_mutex); 284 285 return count; 286 } 287 288 static struct bin_attribute sysfs_optrom_attr = { 289 .attr = { 290 .name = "optrom", 291 .mode = S_IRUSR | S_IWUSR, 292 }, 293 .size = 0, 294 .read = qla2x00_sysfs_read_optrom, 295 .write = qla2x00_sysfs_write_optrom, 296 }; 297 298 static ssize_t 299 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, 300 struct bin_attribute *bin_attr, 301 char *buf, loff_t off, size_t count) 302 { 303 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 304 struct device, kobj))); 305 struct qla_hw_data *ha = vha->hw; 306 uint32_t start = 0; 307 uint32_t size = ha->optrom_size; 308 int val, valid; 309 ssize_t rval = count; 310 311 if (off) 312 return -EINVAL; 313 314 if (unlikely(pci_channel_offline(ha->pdev))) 315 return -EAGAIN; 316 317 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 318 return -EINVAL; 319 if (start > ha->optrom_size) 320 return -EINVAL; 321 if (size > ha->optrom_size - start) 322 size = ha->optrom_size - start; 323 324 mutex_lock(&ha->optrom_mutex); 325 switch (val) { 326 case 0: 327 if (ha->optrom_state != QLA_SREADING && 328 ha->optrom_state != QLA_SWRITING) { 329 rval = -EINVAL; 330 goto out; 331 } 332 ha->optrom_state = QLA_SWAITING; 333 334 ql_dbg(ql_dbg_user, vha, 0x7061, 335 "Freeing flash region allocation -- 0x%x bytes.\n", 336 ha->optrom_region_size); 337 338 vfree(ha->optrom_buffer); 339 ha->optrom_buffer = NULL; 340 break; 341 case 1: 342 if (ha->optrom_state != QLA_SWAITING) { 343 rval = -EINVAL; 344 goto out; 345 } 346 347 ha->optrom_region_start = start; 348 ha->optrom_region_size = start + size; 349 350 ha->optrom_state = QLA_SREADING; 351 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 352 if (ha->optrom_buffer == NULL) { 353 ql_log(ql_log_warn, vha, 0x7062, 354 "Unable to allocate memory for optrom retrieval " 355 "(%x).\n", ha->optrom_region_size); 356 357 ha->optrom_state = QLA_SWAITING; 358 rval = -ENOMEM; 359 goto out; 360 } 361 362 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 363 ql_log(ql_log_warn, vha, 0x7063, 364 "HBA not online, failing NVRAM update.\n"); 365 rval = -EAGAIN; 366 goto out; 367 } 368 369 ql_dbg(ql_dbg_user, vha, 0x7064, 370 "Reading flash region -- 0x%x/0x%x.\n", 371 ha->optrom_region_start, ha->optrom_region_size); 372 373 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 374 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 375 ha->optrom_region_start, ha->optrom_region_size); 376 break; 377 case 2: 378 if (ha->optrom_state != QLA_SWAITING) { 379 rval = -EINVAL; 380 goto out; 381 } 382 383 /* 384 * We need to be more restrictive on which FLASH regions are 385 * allowed to be updated via user-space. Regions accessible 386 * via this method include: 387 * 388 * ISP21xx/ISP22xx/ISP23xx type boards: 389 * 390 * 0x000000 -> 0x020000 -- Boot code. 391 * 392 * ISP2322/ISP24xx type boards: 393 * 394 * 0x000000 -> 0x07ffff -- Boot code. 395 * 0x080000 -> 0x0fffff -- Firmware. 396 * 397 * ISP25xx type boards: 398 * 399 * 0x000000 -> 0x07ffff -- Boot code. 400 * 0x080000 -> 0x0fffff -- Firmware. 401 * 0x120000 -> 0x12ffff -- VPD and HBA parameters. 402 */ 403 valid = 0; 404 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 405 valid = 1; 406 else if (start == (ha->flt_region_boot * 4) || 407 start == (ha->flt_region_fw * 4)) 408 valid = 1; 409 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) 410 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) 411 || IS_QLA27XX(ha)) 412 valid = 1; 413 if (!valid) { 414 ql_log(ql_log_warn, vha, 0x7065, 415 "Invalid start region 0x%x/0x%x.\n", start, size); 416 rval = -EINVAL; 417 goto out; 418 } 419 420 ha->optrom_region_start = start; 421 ha->optrom_region_size = start + size; 422 423 ha->optrom_state = QLA_SWRITING; 424 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 425 if (ha->optrom_buffer == NULL) { 426 ql_log(ql_log_warn, vha, 0x7066, 427 "Unable to allocate memory for optrom update " 428 "(%x)\n", ha->optrom_region_size); 429 430 ha->optrom_state = QLA_SWAITING; 431 rval = -ENOMEM; 432 goto out; 433 } 434 435 ql_dbg(ql_dbg_user, vha, 0x7067, 436 "Staging flash region write -- 0x%x/0x%x.\n", 437 ha->optrom_region_start, ha->optrom_region_size); 438 439 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 440 break; 441 case 3: 442 if (ha->optrom_state != QLA_SWRITING) { 443 rval = -EINVAL; 444 goto out; 445 } 446 447 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 448 ql_log(ql_log_warn, vha, 0x7068, 449 "HBA not online, failing flash update.\n"); 450 rval = -EAGAIN; 451 goto out; 452 } 453 454 ql_dbg(ql_dbg_user, vha, 0x7069, 455 "Writing flash region -- 0x%x/0x%x.\n", 456 ha->optrom_region_start, ha->optrom_region_size); 457 458 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 459 ha->optrom_region_start, ha->optrom_region_size); 460 break; 461 default: 462 rval = -EINVAL; 463 } 464 465 out: 466 mutex_unlock(&ha->optrom_mutex); 467 return rval; 468 } 469 470 static struct bin_attribute sysfs_optrom_ctl_attr = { 471 .attr = { 472 .name = "optrom_ctl", 473 .mode = S_IWUSR, 474 }, 475 .size = 0, 476 .write = qla2x00_sysfs_write_optrom_ctl, 477 }; 478 479 static ssize_t 480 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, 481 struct bin_attribute *bin_attr, 482 char *buf, loff_t off, size_t count) 483 { 484 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 485 struct device, kobj))); 486 struct qla_hw_data *ha = vha->hw; 487 uint32_t faddr; 488 489 if (unlikely(pci_channel_offline(ha->pdev))) 490 return -EAGAIN; 491 492 if (!capable(CAP_SYS_ADMIN)) 493 return -EINVAL; 494 495 if (IS_NOCACHE_VPD_TYPE(ha)) { 496 faddr = ha->flt_region_vpd << 2; 497 498 if (IS_QLA27XX(ha) && 499 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) 500 faddr = ha->flt_region_vpd_sec << 2; 501 502 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, 503 ha->vpd_size); 504 } 505 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 506 } 507 508 static ssize_t 509 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, 510 struct bin_attribute *bin_attr, 511 char *buf, loff_t off, size_t count) 512 { 513 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 514 struct device, kobj))); 515 struct qla_hw_data *ha = vha->hw; 516 uint8_t *tmp_data; 517 518 if (unlikely(pci_channel_offline(ha->pdev))) 519 return 0; 520 521 if (qla2x00_chip_is_down(vha)) 522 return 0; 523 524 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 525 !ha->isp_ops->write_nvram) 526 return 0; 527 528 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 529 ql_log(ql_log_warn, vha, 0x706a, 530 "HBA not online, failing VPD update.\n"); 531 return -EAGAIN; 532 } 533 534 /* Write NVRAM. */ 535 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); 536 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); 537 538 /* Update flash version information for 4Gb & above. */ 539 if (!IS_FWI2_CAPABLE(ha)) 540 return -EINVAL; 541 542 tmp_data = vmalloc(256); 543 if (!tmp_data) { 544 ql_log(ql_log_warn, vha, 0x706b, 545 "Unable to allocate memory for VPD information update.\n"); 546 return -ENOMEM; 547 } 548 ha->isp_ops->get_flash_version(vha, tmp_data); 549 vfree(tmp_data); 550 551 return count; 552 } 553 554 static struct bin_attribute sysfs_vpd_attr = { 555 .attr = { 556 .name = "vpd", 557 .mode = S_IRUSR | S_IWUSR, 558 }, 559 .size = 0, 560 .read = qla2x00_sysfs_read_vpd, 561 .write = qla2x00_sysfs_write_vpd, 562 }; 563 564 static ssize_t 565 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, 566 struct bin_attribute *bin_attr, 567 char *buf, loff_t off, size_t count) 568 { 569 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 570 struct device, kobj))); 571 int rval; 572 573 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) 574 return 0; 575 576 if (qla2x00_chip_is_down(vha)) 577 return 0; 578 579 rval = qla2x00_read_sfp_dev(vha, buf, count); 580 if (rval) 581 return -EIO; 582 583 return count; 584 } 585 586 static struct bin_attribute sysfs_sfp_attr = { 587 .attr = { 588 .name = "sfp", 589 .mode = S_IRUSR | S_IWUSR, 590 }, 591 .size = SFP_DEV_SIZE, 592 .read = qla2x00_sysfs_read_sfp, 593 }; 594 595 static ssize_t 596 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, 597 struct bin_attribute *bin_attr, 598 char *buf, loff_t off, size_t count) 599 { 600 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 601 struct device, kobj))); 602 struct qla_hw_data *ha = vha->hw; 603 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 604 int type; 605 uint32_t idc_control; 606 uint8_t *tmp_data = NULL; 607 if (off != 0) 608 return -EINVAL; 609 610 type = simple_strtol(buf, NULL, 10); 611 switch (type) { 612 case 0x2025c: 613 ql_log(ql_log_info, vha, 0x706e, 614 "Issuing ISP reset.\n"); 615 616 scsi_block_requests(vha->host); 617 if (IS_QLA82XX(ha)) { 618 ha->flags.isp82xx_no_md_cap = 1; 619 qla82xx_idc_lock(ha); 620 qla82xx_set_reset_owner(vha); 621 qla82xx_idc_unlock(ha); 622 } else if (IS_QLA8044(ha)) { 623 qla8044_idc_lock(ha); 624 idc_control = qla8044_rd_reg(ha, 625 QLA8044_IDC_DRV_CTRL); 626 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, 627 (idc_control | GRACEFUL_RESET_BIT1)); 628 qla82xx_set_reset_owner(vha); 629 qla8044_idc_unlock(ha); 630 } else { 631 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 632 qla2xxx_wake_dpc(vha); 633 } 634 qla2x00_wait_for_chip_reset(vha); 635 scsi_unblock_requests(vha->host); 636 break; 637 case 0x2025d: 638 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 639 return -EPERM; 640 641 ql_log(ql_log_info, vha, 0x706f, 642 "Issuing MPI reset.\n"); 643 644 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 645 uint32_t idc_control; 646 647 qla83xx_idc_lock(vha, 0); 648 __qla83xx_get_idc_control(vha, &idc_control); 649 idc_control |= QLA83XX_IDC_GRACEFUL_RESET; 650 __qla83xx_set_idc_control(vha, idc_control); 651 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 652 QLA8XXX_DEV_NEED_RESET); 653 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 654 qla83xx_idc_unlock(vha, 0); 655 break; 656 } else { 657 /* Make sure FC side is not in reset */ 658 qla2x00_wait_for_hba_online(vha); 659 660 /* Issue MPI reset */ 661 scsi_block_requests(vha->host); 662 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 663 ql_log(ql_log_warn, vha, 0x7070, 664 "MPI reset failed.\n"); 665 scsi_unblock_requests(vha->host); 666 break; 667 } 668 case 0x2025e: 669 if (!IS_P3P_TYPE(ha) || vha != base_vha) { 670 ql_log(ql_log_info, vha, 0x7071, 671 "FCoE ctx reset not supported.\n"); 672 return -EPERM; 673 } 674 675 ql_log(ql_log_info, vha, 0x7072, 676 "Issuing FCoE ctx reset.\n"); 677 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 678 qla2xxx_wake_dpc(vha); 679 qla2x00_wait_for_fcoe_ctx_reset(vha); 680 break; 681 case 0x2025f: 682 if (!IS_QLA8031(ha)) 683 return -EPERM; 684 ql_log(ql_log_info, vha, 0x70bc, 685 "Disabling Reset by IDC control\n"); 686 qla83xx_idc_lock(vha, 0); 687 __qla83xx_get_idc_control(vha, &idc_control); 688 idc_control |= QLA83XX_IDC_RESET_DISABLED; 689 __qla83xx_set_idc_control(vha, idc_control); 690 qla83xx_idc_unlock(vha, 0); 691 break; 692 case 0x20260: 693 if (!IS_QLA8031(ha)) 694 return -EPERM; 695 ql_log(ql_log_info, vha, 0x70bd, 696 "Enabling Reset by IDC control\n"); 697 qla83xx_idc_lock(vha, 0); 698 __qla83xx_get_idc_control(vha, &idc_control); 699 idc_control &= ~QLA83XX_IDC_RESET_DISABLED; 700 __qla83xx_set_idc_control(vha, idc_control); 701 qla83xx_idc_unlock(vha, 0); 702 break; 703 case 0x20261: 704 ql_dbg(ql_dbg_user, vha, 0x70e0, 705 "Updating cache versions without reset "); 706 707 tmp_data = vmalloc(256); 708 if (!tmp_data) { 709 ql_log(ql_log_warn, vha, 0x70e1, 710 "Unable to allocate memory for VPD information update.\n"); 711 return -ENOMEM; 712 } 713 ha->isp_ops->get_flash_version(vha, tmp_data); 714 vfree(tmp_data); 715 break; 716 } 717 return count; 718 } 719 720 static struct bin_attribute sysfs_reset_attr = { 721 .attr = { 722 .name = "reset", 723 .mode = S_IWUSR, 724 }, 725 .size = 0, 726 .write = qla2x00_sysfs_write_reset, 727 }; 728 729 static ssize_t 730 qla2x00_issue_logo(struct file *filp, struct kobject *kobj, 731 struct bin_attribute *bin_attr, 732 char *buf, loff_t off, size_t count) 733 { 734 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 735 struct device, kobj))); 736 int type; 737 port_id_t did; 738 739 if (!capable(CAP_SYS_ADMIN)) 740 return 0; 741 742 if (unlikely(pci_channel_offline(vha->hw->pdev))) 743 return 0; 744 745 if (qla2x00_chip_is_down(vha)) 746 return 0; 747 748 type = simple_strtol(buf, NULL, 10); 749 750 did.b.domain = (type & 0x00ff0000) >> 16; 751 did.b.area = (type & 0x0000ff00) >> 8; 752 did.b.al_pa = (type & 0x000000ff); 753 754 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n", 755 did.b.domain, did.b.area, did.b.al_pa); 756 757 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); 758 759 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); 760 return count; 761 } 762 763 static struct bin_attribute sysfs_issue_logo_attr = { 764 .attr = { 765 .name = "issue_logo", 766 .mode = S_IWUSR, 767 }, 768 .size = 0, 769 .write = qla2x00_issue_logo, 770 }; 771 772 static ssize_t 773 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, 774 struct bin_attribute *bin_attr, 775 char *buf, loff_t off, size_t count) 776 { 777 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 778 struct device, kobj))); 779 struct qla_hw_data *ha = vha->hw; 780 int rval; 781 uint16_t actual_size; 782 783 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) 784 return 0; 785 786 if (unlikely(pci_channel_offline(ha->pdev))) 787 return 0; 788 789 if (qla2x00_chip_is_down(vha)) 790 return 0; 791 792 if (ha->xgmac_data) 793 goto do_read; 794 795 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 796 &ha->xgmac_data_dma, GFP_KERNEL); 797 if (!ha->xgmac_data) { 798 ql_log(ql_log_warn, vha, 0x7076, 799 "Unable to allocate memory for XGMAC read-data.\n"); 800 return 0; 801 } 802 803 do_read: 804 actual_size = 0; 805 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); 806 807 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 808 XGMAC_DATA_SIZE, &actual_size); 809 if (rval != QLA_SUCCESS) { 810 ql_log(ql_log_warn, vha, 0x7077, 811 "Unable to read XGMAC data (%x).\n", rval); 812 count = 0; 813 } 814 815 count = actual_size > count ? count: actual_size; 816 memcpy(buf, ha->xgmac_data, count); 817 818 return count; 819 } 820 821 static struct bin_attribute sysfs_xgmac_stats_attr = { 822 .attr = { 823 .name = "xgmac_stats", 824 .mode = S_IRUSR, 825 }, 826 .size = 0, 827 .read = qla2x00_sysfs_read_xgmac_stats, 828 }; 829 830 static ssize_t 831 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, 832 struct bin_attribute *bin_attr, 833 char *buf, loff_t off, size_t count) 834 { 835 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 836 struct device, kobj))); 837 struct qla_hw_data *ha = vha->hw; 838 int rval; 839 840 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 841 return 0; 842 843 if (ha->dcbx_tlv) 844 goto do_read; 845 846 if (qla2x00_chip_is_down(vha)) 847 return 0; 848 849 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 850 &ha->dcbx_tlv_dma, GFP_KERNEL); 851 if (!ha->dcbx_tlv) { 852 ql_log(ql_log_warn, vha, 0x7078, 853 "Unable to allocate memory for DCBX TLV read-data.\n"); 854 return -ENOMEM; 855 } 856 857 do_read: 858 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 859 860 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 861 DCBX_TLV_DATA_SIZE); 862 if (rval != QLA_SUCCESS) { 863 ql_log(ql_log_warn, vha, 0x7079, 864 "Unable to read DCBX TLV (%x).\n", rval); 865 return -EIO; 866 } 867 868 memcpy(buf, ha->dcbx_tlv, count); 869 870 return count; 871 } 872 873 static struct bin_attribute sysfs_dcbx_tlv_attr = { 874 .attr = { 875 .name = "dcbx_tlv", 876 .mode = S_IRUSR, 877 }, 878 .size = 0, 879 .read = qla2x00_sysfs_read_dcbx_tlv, 880 }; 881 882 static struct sysfs_entry { 883 char *name; 884 struct bin_attribute *attr; 885 int is4GBp_only; 886 } bin_file_entries[] = { 887 { "fw_dump", &sysfs_fw_dump_attr, }, 888 { "nvram", &sysfs_nvram_attr, }, 889 { "optrom", &sysfs_optrom_attr, }, 890 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 891 { "vpd", &sysfs_vpd_attr, 1 }, 892 { "sfp", &sysfs_sfp_attr, 1 }, 893 { "reset", &sysfs_reset_attr, }, 894 { "issue_logo", &sysfs_issue_logo_attr, }, 895 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 896 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 897 { NULL }, 898 }; 899 900 void 901 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) 902 { 903 struct Scsi_Host *host = vha->host; 904 struct sysfs_entry *iter; 905 int ret; 906 907 for (iter = bin_file_entries; iter->name; iter++) { 908 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw)) 909 continue; 910 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 911 continue; 912 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 913 continue; 914 915 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 916 iter->attr); 917 if (ret) 918 ql_log(ql_log_warn, vha, 0x00f3, 919 "Unable to create sysfs %s binary attribute (%d).\n", 920 iter->name, ret); 921 else 922 ql_dbg(ql_dbg_init, vha, 0x00f4, 923 "Successfully created sysfs %s binary attribute.\n", 924 iter->name); 925 } 926 } 927 928 void 929 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) 930 { 931 struct Scsi_Host *host = vha->host; 932 struct sysfs_entry *iter; 933 struct qla_hw_data *ha = vha->hw; 934 935 for (iter = bin_file_entries; iter->name; iter++) { 936 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 937 continue; 938 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 939 continue; 940 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 941 continue; 942 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) 943 continue; 944 945 sysfs_remove_bin_file(&host->shost_gendev.kobj, 946 iter->attr); 947 } 948 949 if (stop_beacon && ha->beacon_blink_led == 1) 950 ha->isp_ops->beacon_off(vha); 951 } 952 953 /* Scsi_Host attributes. */ 954 955 static ssize_t 956 qla2x00_drvr_version_show(struct device *dev, 957 struct device_attribute *attr, char *buf) 958 { 959 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); 960 } 961 962 static ssize_t 963 qla2x00_fw_version_show(struct device *dev, 964 struct device_attribute *attr, char *buf) 965 { 966 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 967 struct qla_hw_data *ha = vha->hw; 968 char fw_str[128]; 969 970 return scnprintf(buf, PAGE_SIZE, "%s\n", 971 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str))); 972 } 973 974 static ssize_t 975 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 976 char *buf) 977 { 978 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 979 struct qla_hw_data *ha = vha->hw; 980 uint32_t sn; 981 982 if (IS_QLAFX00(vha->hw)) { 983 return scnprintf(buf, PAGE_SIZE, "%s\n", 984 vha->hw->mr.serial_num); 985 } else if (IS_FWI2_CAPABLE(ha)) { 986 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); 987 return strlen(strcat(buf, "\n")); 988 } 989 990 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 991 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 992 sn % 100000); 993 } 994 995 static ssize_t 996 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 997 char *buf) 998 { 999 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1000 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); 1001 } 1002 1003 static ssize_t 1004 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 1005 char *buf) 1006 { 1007 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1008 struct qla_hw_data *ha = vha->hw; 1009 1010 if (IS_QLAFX00(vha->hw)) 1011 return scnprintf(buf, PAGE_SIZE, "%s\n", 1012 vha->hw->mr.hw_version); 1013 1014 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 1015 ha->product_id[0], ha->product_id[1], ha->product_id[2], 1016 ha->product_id[3]); 1017 } 1018 1019 static ssize_t 1020 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 1021 char *buf) 1022 { 1023 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1024 1025 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 1026 } 1027 1028 static ssize_t 1029 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 1030 char *buf) 1031 { 1032 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1033 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc); 1034 } 1035 1036 static ssize_t 1037 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 1038 char *buf) 1039 { 1040 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1041 char pci_info[30]; 1042 1043 return scnprintf(buf, PAGE_SIZE, "%s\n", 1044 vha->hw->isp_ops->pci_info_str(vha, pci_info)); 1045 } 1046 1047 static ssize_t 1048 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 1049 char *buf) 1050 { 1051 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1052 struct qla_hw_data *ha = vha->hw; 1053 int len = 0; 1054 1055 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 1056 atomic_read(&vha->loop_state) == LOOP_DEAD || 1057 vha->device_flags & DFLG_NO_CABLE) 1058 len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); 1059 else if (atomic_read(&vha->loop_state) != LOOP_READY || 1060 qla2x00_chip_is_down(vha)) 1061 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 1062 else { 1063 len = scnprintf(buf, PAGE_SIZE, "Link Up - "); 1064 1065 switch (ha->current_topology) { 1066 case ISP_CFG_NL: 1067 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1068 break; 1069 case ISP_CFG_FL: 1070 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); 1071 break; 1072 case ISP_CFG_N: 1073 len += scnprintf(buf + len, PAGE_SIZE-len, 1074 "N_Port to N_Port\n"); 1075 break; 1076 case ISP_CFG_F: 1077 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); 1078 break; 1079 default: 1080 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1081 break; 1082 } 1083 } 1084 return len; 1085 } 1086 1087 static ssize_t 1088 qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 1089 char *buf) 1090 { 1091 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1092 int len = 0; 1093 1094 switch (vha->hw->zio_mode) { 1095 case QLA_ZIO_MODE_6: 1096 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 1097 break; 1098 case QLA_ZIO_DISABLED: 1099 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1100 break; 1101 } 1102 return len; 1103 } 1104 1105 static ssize_t 1106 qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 1107 const char *buf, size_t count) 1108 { 1109 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1110 struct qla_hw_data *ha = vha->hw; 1111 int val = 0; 1112 uint16_t zio_mode; 1113 1114 if (!IS_ZIO_SUPPORTED(ha)) 1115 return -ENOTSUPP; 1116 1117 if (sscanf(buf, "%d", &val) != 1) 1118 return -EINVAL; 1119 1120 if (val) 1121 zio_mode = QLA_ZIO_MODE_6; 1122 else 1123 zio_mode = QLA_ZIO_DISABLED; 1124 1125 /* Update per-hba values and queue a reset. */ 1126 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 1127 ha->zio_mode = zio_mode; 1128 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1129 } 1130 return strlen(buf); 1131 } 1132 1133 static ssize_t 1134 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 1135 char *buf) 1136 { 1137 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1138 1139 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); 1140 } 1141 1142 static ssize_t 1143 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 1144 const char *buf, size_t count) 1145 { 1146 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1147 int val = 0; 1148 uint16_t zio_timer; 1149 1150 if (sscanf(buf, "%d", &val) != 1) 1151 return -EINVAL; 1152 if (val > 25500 || val < 100) 1153 return -ERANGE; 1154 1155 zio_timer = (uint16_t)(val / 100); 1156 vha->hw->zio_timer = zio_timer; 1157 1158 return strlen(buf); 1159 } 1160 1161 static ssize_t 1162 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1163 char *buf) 1164 { 1165 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1166 int len = 0; 1167 1168 if (vha->hw->beacon_blink_led) 1169 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 1170 else 1171 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1172 return len; 1173 } 1174 1175 static ssize_t 1176 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 1177 const char *buf, size_t count) 1178 { 1179 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1180 struct qla_hw_data *ha = vha->hw; 1181 int val = 0; 1182 int rval; 1183 1184 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1185 return -EPERM; 1186 1187 if (qla2x00_chip_is_down(vha)) { 1188 ql_log(ql_log_warn, vha, 0x707a, 1189 "Abort ISP active -- ignoring beacon request.\n"); 1190 return -EBUSY; 1191 } 1192 1193 if (sscanf(buf, "%d", &val) != 1) 1194 return -EINVAL; 1195 1196 if (val) 1197 rval = ha->isp_ops->beacon_on(vha); 1198 else 1199 rval = ha->isp_ops->beacon_off(vha); 1200 1201 if (rval != QLA_SUCCESS) 1202 count = 0; 1203 1204 return count; 1205 } 1206 1207 static ssize_t 1208 qla2x00_optrom_bios_version_show(struct device *dev, 1209 struct device_attribute *attr, char *buf) 1210 { 1211 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1212 struct qla_hw_data *ha = vha->hw; 1213 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 1214 ha->bios_revision[0]); 1215 } 1216 1217 static ssize_t 1218 qla2x00_optrom_efi_version_show(struct device *dev, 1219 struct device_attribute *attr, char *buf) 1220 { 1221 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1222 struct qla_hw_data *ha = vha->hw; 1223 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 1224 ha->efi_revision[0]); 1225 } 1226 1227 static ssize_t 1228 qla2x00_optrom_fcode_version_show(struct device *dev, 1229 struct device_attribute *attr, char *buf) 1230 { 1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1232 struct qla_hw_data *ha = vha->hw; 1233 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 1234 ha->fcode_revision[0]); 1235 } 1236 1237 static ssize_t 1238 qla2x00_optrom_fw_version_show(struct device *dev, 1239 struct device_attribute *attr, char *buf) 1240 { 1241 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1242 struct qla_hw_data *ha = vha->hw; 1243 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 1244 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 1245 ha->fw_revision[3]); 1246 } 1247 1248 static ssize_t 1249 qla2x00_optrom_gold_fw_version_show(struct device *dev, 1250 struct device_attribute *attr, char *buf) 1251 { 1252 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1253 struct qla_hw_data *ha = vha->hw; 1254 1255 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 1256 return scnprintf(buf, PAGE_SIZE, "\n"); 1257 1258 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1259 ha->gold_fw_version[0], ha->gold_fw_version[1], 1260 ha->gold_fw_version[2], ha->gold_fw_version[3]); 1261 } 1262 1263 static ssize_t 1264 qla2x00_total_isp_aborts_show(struct device *dev, 1265 struct device_attribute *attr, char *buf) 1266 { 1267 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1268 return scnprintf(buf, PAGE_SIZE, "%d\n", 1269 vha->qla_stats.total_isp_aborts); 1270 } 1271 1272 static ssize_t 1273 qla24xx_84xx_fw_version_show(struct device *dev, 1274 struct device_attribute *attr, char *buf) 1275 { 1276 int rval = QLA_SUCCESS; 1277 uint16_t status[2] = {0, 0}; 1278 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1279 struct qla_hw_data *ha = vha->hw; 1280 1281 if (!IS_QLA84XX(ha)) 1282 return scnprintf(buf, PAGE_SIZE, "\n"); 1283 1284 if (ha->cs84xx->op_fw_version == 0) 1285 rval = qla84xx_verify_chip(vha, status); 1286 1287 if ((rval == QLA_SUCCESS) && (status[0] == 0)) 1288 return scnprintf(buf, PAGE_SIZE, "%u\n", 1289 (uint32_t)ha->cs84xx->op_fw_version); 1290 1291 return scnprintf(buf, PAGE_SIZE, "\n"); 1292 } 1293 1294 static ssize_t 1295 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1296 char *buf) 1297 { 1298 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1299 struct qla_hw_data *ha = vha->hw; 1300 1301 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && 1302 !IS_QLA27XX(ha)) 1303 return scnprintf(buf, PAGE_SIZE, "\n"); 1304 1305 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1306 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], 1307 ha->mpi_capabilities); 1308 } 1309 1310 static ssize_t 1311 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, 1312 char *buf) 1313 { 1314 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1315 struct qla_hw_data *ha = vha->hw; 1316 1317 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 1318 return scnprintf(buf, PAGE_SIZE, "\n"); 1319 1320 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1321 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); 1322 } 1323 1324 static ssize_t 1325 qla2x00_flash_block_size_show(struct device *dev, 1326 struct device_attribute *attr, char *buf) 1327 { 1328 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1329 struct qla_hw_data *ha = vha->hw; 1330 1331 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1332 } 1333 1334 static ssize_t 1335 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, 1336 char *buf) 1337 { 1338 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1339 1340 if (!IS_CNA_CAPABLE(vha->hw)) 1341 return scnprintf(buf, PAGE_SIZE, "\n"); 1342 1343 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1344 } 1345 1346 static ssize_t 1347 qla2x00_vn_port_mac_address_show(struct device *dev, 1348 struct device_attribute *attr, char *buf) 1349 { 1350 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1351 1352 if (!IS_CNA_CAPABLE(vha->hw)) 1353 return scnprintf(buf, PAGE_SIZE, "\n"); 1354 1355 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); 1356 } 1357 1358 static ssize_t 1359 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, 1360 char *buf) 1361 { 1362 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1363 1364 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); 1365 } 1366 1367 static ssize_t 1368 qla2x00_thermal_temp_show(struct device *dev, 1369 struct device_attribute *attr, char *buf) 1370 { 1371 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1372 uint16_t temp = 0; 1373 1374 if (qla2x00_chip_is_down(vha)) { 1375 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); 1376 goto done; 1377 } 1378 1379 if (vha->hw->flags.eeh_busy) { 1380 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); 1381 goto done; 1382 } 1383 1384 if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) 1385 return scnprintf(buf, PAGE_SIZE, "%d\n", temp); 1386 1387 done: 1388 return scnprintf(buf, PAGE_SIZE, "\n"); 1389 } 1390 1391 static ssize_t 1392 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1393 char *buf) 1394 { 1395 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1396 int rval = QLA_FUNCTION_FAILED; 1397 uint16_t state[6]; 1398 uint32_t pstate; 1399 1400 if (IS_QLAFX00(vha->hw)) { 1401 pstate = qlafx00_fw_state_show(dev, attr, buf); 1402 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); 1403 } 1404 1405 if (qla2x00_chip_is_down(vha)) 1406 ql_log(ql_log_warn, vha, 0x707c, 1407 "ISP reset active.\n"); 1408 else if (!vha->hw->flags.eeh_busy) 1409 rval = qla2x00_get_firmware_state(vha, state); 1410 if (rval != QLA_SUCCESS) 1411 memset(state, -1, sizeof(state)); 1412 1413 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1414 state[0], state[1], state[2], state[3], state[4], state[5]); 1415 } 1416 1417 static ssize_t 1418 qla2x00_diag_requests_show(struct device *dev, 1419 struct device_attribute *attr, char *buf) 1420 { 1421 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1422 1423 if (!IS_BIDI_CAPABLE(vha->hw)) 1424 return scnprintf(buf, PAGE_SIZE, "\n"); 1425 1426 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); 1427 } 1428 1429 static ssize_t 1430 qla2x00_diag_megabytes_show(struct device *dev, 1431 struct device_attribute *attr, char *buf) 1432 { 1433 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1434 1435 if (!IS_BIDI_CAPABLE(vha->hw)) 1436 return scnprintf(buf, PAGE_SIZE, "\n"); 1437 1438 return scnprintf(buf, PAGE_SIZE, "%llu\n", 1439 vha->bidi_stats.transfer_bytes >> 20); 1440 } 1441 1442 static ssize_t 1443 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, 1444 char *buf) 1445 { 1446 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1447 struct qla_hw_data *ha = vha->hw; 1448 uint32_t size; 1449 1450 if (!ha->fw_dumped) 1451 size = 0; 1452 else if (IS_P3P_TYPE(ha)) 1453 size = ha->md_template_size + ha->md_dump_size; 1454 else 1455 size = ha->fw_dump_len; 1456 1457 return scnprintf(buf, PAGE_SIZE, "%d\n", size); 1458 } 1459 1460 static ssize_t 1461 qla2x00_allow_cna_fw_dump_show(struct device *dev, 1462 struct device_attribute *attr, char *buf) 1463 { 1464 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1465 1466 if (!IS_P3P_TYPE(vha->hw)) 1467 return scnprintf(buf, PAGE_SIZE, "\n"); 1468 else 1469 return scnprintf(buf, PAGE_SIZE, "%s\n", 1470 vha->hw->allow_cna_fw_dump ? "true" : "false"); 1471 } 1472 1473 static ssize_t 1474 qla2x00_allow_cna_fw_dump_store(struct device *dev, 1475 struct device_attribute *attr, const char *buf, size_t count) 1476 { 1477 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1478 int val = 0; 1479 1480 if (!IS_P3P_TYPE(vha->hw)) 1481 return -EINVAL; 1482 1483 if (sscanf(buf, "%d", &val) != 1) 1484 return -EINVAL; 1485 1486 vha->hw->allow_cna_fw_dump = val != 0; 1487 1488 return strlen(buf); 1489 } 1490 1491 static ssize_t 1492 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, 1493 char *buf) 1494 { 1495 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1496 struct qla_hw_data *ha = vha->hw; 1497 1498 if (!IS_QLA27XX(ha)) 1499 return scnprintf(buf, PAGE_SIZE, "\n"); 1500 1501 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1502 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); 1503 } 1504 1505 static ssize_t 1506 qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr, 1507 char *buf) 1508 { 1509 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1510 struct qla_hw_data *ha = vha->hw; 1511 1512 if (!IS_QLA27XX(ha)) 1513 return scnprintf(buf, PAGE_SIZE, "\n"); 1514 1515 return scnprintf(buf, PAGE_SIZE, "%s\n", 1516 ha->min_link_speed == 5 ? "32Gps" : 1517 ha->min_link_speed == 4 ? "16Gps" : 1518 ha->min_link_speed == 3 ? "8Gps" : 1519 ha->min_link_speed == 2 ? "4Gps" : 1520 ha->min_link_speed != 0 ? "unknown" : ""); 1521 } 1522 1523 static ssize_t 1524 qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr, 1525 char *buf) 1526 { 1527 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1528 struct qla_hw_data *ha = vha->hw; 1529 1530 if (!IS_QLA27XX(ha)) 1531 return scnprintf(buf, PAGE_SIZE, "\n"); 1532 1533 return scnprintf(buf, PAGE_SIZE, "%s\n", 1534 ha->max_speed_sup ? "32Gps" : "16Gps"); 1535 } 1536 1537 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1538 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1539 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1540 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); 1541 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); 1542 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 1543 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 1544 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 1545 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); 1546 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 1547 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 1548 qla2x00_zio_timer_store); 1549 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 1550 qla2x00_beacon_store); 1551 static DEVICE_ATTR(optrom_bios_version, S_IRUGO, 1552 qla2x00_optrom_bios_version_show, NULL); 1553 static DEVICE_ATTR(optrom_efi_version, S_IRUGO, 1554 qla2x00_optrom_efi_version_show, NULL); 1555 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, 1556 qla2x00_optrom_fcode_version_show, NULL); 1557 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1558 NULL); 1559 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO, 1560 qla2x00_optrom_gold_fw_version_show, NULL); 1561 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, 1562 NULL); 1563 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1564 NULL); 1565 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1566 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1567 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1568 NULL); 1569 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); 1570 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, 1571 qla2x00_vn_port_mac_address_show, NULL); 1572 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1573 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1574 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); 1575 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); 1576 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); 1577 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); 1578 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, 1579 qla2x00_allow_cna_fw_dump_show, 1580 qla2x00_allow_cna_fw_dump_store); 1581 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); 1582 static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL); 1583 static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL); 1584 1585 struct device_attribute *qla2x00_host_attrs[] = { 1586 &dev_attr_driver_version, 1587 &dev_attr_fw_version, 1588 &dev_attr_serial_num, 1589 &dev_attr_isp_name, 1590 &dev_attr_isp_id, 1591 &dev_attr_model_name, 1592 &dev_attr_model_desc, 1593 &dev_attr_pci_info, 1594 &dev_attr_link_state, 1595 &dev_attr_zio, 1596 &dev_attr_zio_timer, 1597 &dev_attr_beacon, 1598 &dev_attr_optrom_bios_version, 1599 &dev_attr_optrom_efi_version, 1600 &dev_attr_optrom_fcode_version, 1601 &dev_attr_optrom_fw_version, 1602 &dev_attr_84xx_fw_version, 1603 &dev_attr_total_isp_aborts, 1604 &dev_attr_mpi_version, 1605 &dev_attr_phy_version, 1606 &dev_attr_flash_block_size, 1607 &dev_attr_vlan_id, 1608 &dev_attr_vn_port_mac_address, 1609 &dev_attr_fabric_param, 1610 &dev_attr_fw_state, 1611 &dev_attr_optrom_gold_fw_version, 1612 &dev_attr_thermal_temp, 1613 &dev_attr_diag_requests, 1614 &dev_attr_diag_megabytes, 1615 &dev_attr_fw_dump_size, 1616 &dev_attr_allow_cna_fw_dump, 1617 &dev_attr_pep_version, 1618 &dev_attr_min_link_speed, 1619 &dev_attr_max_speed_sup, 1620 NULL, 1621 }; 1622 1623 /* Host attributes. */ 1624 1625 static void 1626 qla2x00_get_host_port_id(struct Scsi_Host *shost) 1627 { 1628 scsi_qla_host_t *vha = shost_priv(shost); 1629 1630 fc_host_port_id(shost) = vha->d_id.b.domain << 16 | 1631 vha->d_id.b.area << 8 | vha->d_id.b.al_pa; 1632 } 1633 1634 static void 1635 qla2x00_get_host_speed(struct Scsi_Host *shost) 1636 { 1637 struct qla_hw_data *ha = ((struct scsi_qla_host *) 1638 (shost_priv(shost)))->hw; 1639 u32 speed = FC_PORTSPEED_UNKNOWN; 1640 1641 if (IS_QLAFX00(ha)) { 1642 qlafx00_get_host_speed(shost); 1643 return; 1644 } 1645 1646 switch (ha->link_data_rate) { 1647 case PORT_SPEED_1GB: 1648 speed = FC_PORTSPEED_1GBIT; 1649 break; 1650 case PORT_SPEED_2GB: 1651 speed = FC_PORTSPEED_2GBIT; 1652 break; 1653 case PORT_SPEED_4GB: 1654 speed = FC_PORTSPEED_4GBIT; 1655 break; 1656 case PORT_SPEED_8GB: 1657 speed = FC_PORTSPEED_8GBIT; 1658 break; 1659 case PORT_SPEED_10GB: 1660 speed = FC_PORTSPEED_10GBIT; 1661 break; 1662 case PORT_SPEED_16GB: 1663 speed = FC_PORTSPEED_16GBIT; 1664 break; 1665 case PORT_SPEED_32GB: 1666 speed = FC_PORTSPEED_32GBIT; 1667 break; 1668 } 1669 fc_host_speed(shost) = speed; 1670 } 1671 1672 static void 1673 qla2x00_get_host_port_type(struct Scsi_Host *shost) 1674 { 1675 scsi_qla_host_t *vha = shost_priv(shost); 1676 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 1677 1678 if (vha->vp_idx) { 1679 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 1680 return; 1681 } 1682 switch (vha->hw->current_topology) { 1683 case ISP_CFG_NL: 1684 port_type = FC_PORTTYPE_LPORT; 1685 break; 1686 case ISP_CFG_FL: 1687 port_type = FC_PORTTYPE_NLPORT; 1688 break; 1689 case ISP_CFG_N: 1690 port_type = FC_PORTTYPE_PTP; 1691 break; 1692 case ISP_CFG_F: 1693 port_type = FC_PORTTYPE_NPORT; 1694 break; 1695 } 1696 fc_host_port_type(shost) = port_type; 1697 } 1698 1699 static void 1700 qla2x00_get_starget_node_name(struct scsi_target *starget) 1701 { 1702 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1703 scsi_qla_host_t *vha = shost_priv(host); 1704 fc_port_t *fcport; 1705 u64 node_name = 0; 1706 1707 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1708 if (fcport->rport && 1709 starget->id == fcport->rport->scsi_target_id) { 1710 node_name = wwn_to_u64(fcport->node_name); 1711 break; 1712 } 1713 } 1714 1715 fc_starget_node_name(starget) = node_name; 1716 } 1717 1718 static void 1719 qla2x00_get_starget_port_name(struct scsi_target *starget) 1720 { 1721 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1722 scsi_qla_host_t *vha = shost_priv(host); 1723 fc_port_t *fcport; 1724 u64 port_name = 0; 1725 1726 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1727 if (fcport->rport && 1728 starget->id == fcport->rport->scsi_target_id) { 1729 port_name = wwn_to_u64(fcport->port_name); 1730 break; 1731 } 1732 } 1733 1734 fc_starget_port_name(starget) = port_name; 1735 } 1736 1737 static void 1738 qla2x00_get_starget_port_id(struct scsi_target *starget) 1739 { 1740 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1741 scsi_qla_host_t *vha = shost_priv(host); 1742 fc_port_t *fcport; 1743 uint32_t port_id = ~0U; 1744 1745 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1746 if (fcport->rport && 1747 starget->id == fcport->rport->scsi_target_id) { 1748 port_id = fcport->d_id.b.domain << 16 | 1749 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 1750 break; 1751 } 1752 } 1753 1754 fc_starget_port_id(starget) = port_id; 1755 } 1756 1757 static void 1758 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1759 { 1760 if (timeout) 1761 rport->dev_loss_tmo = timeout; 1762 else 1763 rport->dev_loss_tmo = 1; 1764 } 1765 1766 static void 1767 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 1768 { 1769 struct Scsi_Host *host = rport_to_shost(rport); 1770 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1771 unsigned long flags; 1772 1773 if (!fcport) 1774 return; 1775 1776 /* Now that the rport has been deleted, set the fcport state to 1777 FCS_DEVICE_DEAD */ 1778 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); 1779 1780 /* 1781 * Transport has effectively 'deleted' the rport, clear 1782 * all local references. 1783 */ 1784 spin_lock_irqsave(host->host_lock, flags); 1785 fcport->rport = fcport->drport = NULL; 1786 *((fc_port_t **)rport->dd_data) = NULL; 1787 spin_unlock_irqrestore(host->host_lock, flags); 1788 1789 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1790 return; 1791 1792 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1793 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1794 return; 1795 } 1796 } 1797 1798 static void 1799 qla2x00_terminate_rport_io(struct fc_rport *rport) 1800 { 1801 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1802 1803 if (!fcport) 1804 return; 1805 1806 if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) 1807 return; 1808 1809 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1810 return; 1811 1812 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1813 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1814 return; 1815 } 1816 /* 1817 * At this point all fcport's software-states are cleared. Perform any 1818 * final cleanup of firmware resources (PCBs and XCBs). 1819 */ 1820 if (fcport->loop_id != FC_NO_LOOP_ID) { 1821 if (IS_FWI2_CAPABLE(fcport->vha->hw)) 1822 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1823 fcport->loop_id, fcport->d_id.b.domain, 1824 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1825 else 1826 qla2x00_port_logout(fcport->vha, fcport); 1827 } 1828 } 1829 1830 static int 1831 qla2x00_issue_lip(struct Scsi_Host *shost) 1832 { 1833 scsi_qla_host_t *vha = shost_priv(shost); 1834 1835 if (IS_QLAFX00(vha->hw)) 1836 return 0; 1837 1838 qla2x00_loop_reset(vha); 1839 return 0; 1840 } 1841 1842 static struct fc_host_statistics * 1843 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1844 { 1845 scsi_qla_host_t *vha = shost_priv(shost); 1846 struct qla_hw_data *ha = vha->hw; 1847 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1848 int rval; 1849 struct link_statistics *stats; 1850 dma_addr_t stats_dma; 1851 struct fc_host_statistics *p = &vha->fc_host_stat; 1852 1853 memset(p, -1, sizeof(*p)); 1854 1855 if (IS_QLAFX00(vha->hw)) 1856 goto done; 1857 1858 if (test_bit(UNLOADING, &vha->dpc_flags)) 1859 goto done; 1860 1861 if (unlikely(pci_channel_offline(ha->pdev))) 1862 goto done; 1863 1864 if (qla2x00_chip_is_down(vha)) 1865 goto done; 1866 1867 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 1868 &stats_dma, GFP_KERNEL); 1869 if (!stats) { 1870 ql_log(ql_log_warn, vha, 0x707d, 1871 "Failed to allocate memory for stats.\n"); 1872 goto done; 1873 } 1874 1875 rval = QLA_FUNCTION_FAILED; 1876 if (IS_FWI2_CAPABLE(ha)) { 1877 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); 1878 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && 1879 !ha->dpc_active) { 1880 /* Must be in a 'READY' state for statistics retrieval. */ 1881 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, 1882 stats, stats_dma); 1883 } 1884 1885 if (rval != QLA_SUCCESS) 1886 goto done_free; 1887 1888 p->link_failure_count = stats->link_fail_cnt; 1889 p->loss_of_sync_count = stats->loss_sync_cnt; 1890 p->loss_of_signal_count = stats->loss_sig_cnt; 1891 p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; 1892 p->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1893 p->invalid_crc_count = stats->inval_crc_cnt; 1894 if (IS_FWI2_CAPABLE(ha)) { 1895 p->lip_count = stats->lip_cnt; 1896 p->tx_frames = stats->tx_frames; 1897 p->rx_frames = stats->rx_frames; 1898 p->dumped_frames = stats->discarded_frames; 1899 p->nos_count = stats->nos_rcvd; 1900 p->error_frames = 1901 stats->dropped_frames + stats->discarded_frames; 1902 p->rx_words = vha->qla_stats.input_bytes; 1903 p->tx_words = vha->qla_stats.output_bytes; 1904 } 1905 p->fcp_control_requests = vha->qla_stats.control_requests; 1906 p->fcp_input_requests = vha->qla_stats.input_requests; 1907 p->fcp_output_requests = vha->qla_stats.output_requests; 1908 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; 1909 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; 1910 p->seconds_since_last_reset = 1911 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; 1912 do_div(p->seconds_since_last_reset, HZ); 1913 1914 done_free: 1915 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), 1916 stats, stats_dma); 1917 done: 1918 return p; 1919 } 1920 1921 static void 1922 qla2x00_reset_host_stats(struct Scsi_Host *shost) 1923 { 1924 scsi_qla_host_t *vha = shost_priv(shost); 1925 struct qla_hw_data *ha = vha->hw; 1926 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1927 struct link_statistics *stats; 1928 dma_addr_t stats_dma; 1929 1930 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 1931 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 1932 1933 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 1934 1935 if (IS_FWI2_CAPABLE(ha)) { 1936 stats = dma_alloc_coherent(&ha->pdev->dev, 1937 sizeof(*stats), &stats_dma, GFP_KERNEL); 1938 if (!stats) { 1939 ql_log(ql_log_warn, vha, 0x70d7, 1940 "Failed to allocate memory for stats.\n"); 1941 return; 1942 } 1943 1944 /* reset firmware statistics */ 1945 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); 1946 1947 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 1948 stats, stats_dma); 1949 } 1950 } 1951 1952 static void 1953 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1954 { 1955 scsi_qla_host_t *vha = shost_priv(shost); 1956 1957 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost), 1958 sizeof(fc_host_symbolic_name(shost))); 1959 } 1960 1961 static void 1962 qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1963 { 1964 scsi_qla_host_t *vha = shost_priv(shost); 1965 1966 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1967 } 1968 1969 static void 1970 qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1971 { 1972 scsi_qla_host_t *vha = shost_priv(shost); 1973 uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \ 1974 0xFF, 0xFF, 0xFF, 0xFF}; 1975 u64 fabric_name = wwn_to_u64(node_name); 1976 1977 if (vha->device_flags & SWITCH_FOUND) 1978 fabric_name = wwn_to_u64(vha->fabric_node_name); 1979 1980 fc_host_fabric_name(shost) = fabric_name; 1981 } 1982 1983 static void 1984 qla2x00_get_host_port_state(struct Scsi_Host *shost) 1985 { 1986 scsi_qla_host_t *vha = shost_priv(shost); 1987 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 1988 1989 if (!base_vha->flags.online) { 1990 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1991 return; 1992 } 1993 1994 switch (atomic_read(&base_vha->loop_state)) { 1995 case LOOP_UPDATE: 1996 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 1997 break; 1998 case LOOP_DOWN: 1999 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) 2000 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 2001 else 2002 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 2003 break; 2004 case LOOP_DEAD: 2005 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 2006 break; 2007 case LOOP_READY: 2008 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 2009 break; 2010 default: 2011 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 2012 break; 2013 } 2014 } 2015 2016 static int 2017 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 2018 { 2019 int ret = 0; 2020 uint8_t qos = 0; 2021 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 2022 scsi_qla_host_t *vha = NULL; 2023 struct qla_hw_data *ha = base_vha->hw; 2024 int cnt; 2025 struct req_que *req = ha->req_q_map[0]; 2026 struct qla_qpair *qpair; 2027 2028 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 2029 if (ret) { 2030 ql_log(ql_log_warn, vha, 0x707e, 2031 "Vport sanity check failed, status %x\n", ret); 2032 return (ret); 2033 } 2034 2035 vha = qla24xx_create_vhost(fc_vport); 2036 if (vha == NULL) { 2037 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); 2038 return FC_VPORT_FAILED; 2039 } 2040 if (disable) { 2041 atomic_set(&vha->vp_state, VP_OFFLINE); 2042 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 2043 } else 2044 atomic_set(&vha->vp_state, VP_FAILED); 2045 2046 /* ready to create vport */ 2047 ql_log(ql_log_info, vha, 0x7080, 2048 "VP entry id %d assigned.\n", vha->vp_idx); 2049 2050 /* initialized vport states */ 2051 atomic_set(&vha->loop_state, LOOP_DOWN); 2052 vha->vp_err_state= VP_ERR_PORTDWN; 2053 vha->vp_prev_err_state= VP_ERR_UNKWN; 2054 /* Check if physical ha port is Up */ 2055 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 2056 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 2057 /* Don't retry or attempt login of this virtual port */ 2058 ql_dbg(ql_dbg_user, vha, 0x7081, 2059 "Vport loop state is not UP.\n"); 2060 atomic_set(&vha->loop_state, LOOP_DEAD); 2061 if (!disable) 2062 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 2063 } 2064 2065 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2066 if (ha->fw_attributes & BIT_4) { 2067 int prot = 0, guard; 2068 vha->flags.difdix_supported = 1; 2069 ql_dbg(ql_dbg_user, vha, 0x7082, 2070 "Registered for DIF/DIX type 1 and 3 protection.\n"); 2071 if (ql2xenabledif == 1) 2072 prot = SHOST_DIX_TYPE0_PROTECTION; 2073 scsi_host_set_prot(vha->host, 2074 prot | SHOST_DIF_TYPE1_PROTECTION 2075 | SHOST_DIF_TYPE2_PROTECTION 2076 | SHOST_DIF_TYPE3_PROTECTION 2077 | SHOST_DIX_TYPE1_PROTECTION 2078 | SHOST_DIX_TYPE2_PROTECTION 2079 | SHOST_DIX_TYPE3_PROTECTION); 2080 2081 guard = SHOST_DIX_GUARD_CRC; 2082 2083 if (IS_PI_IPGUARD_CAPABLE(ha) && 2084 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 2085 guard |= SHOST_DIX_GUARD_IP; 2086 2087 scsi_host_set_guard(vha->host, guard); 2088 } else 2089 vha->flags.difdix_supported = 0; 2090 } 2091 2092 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 2093 &ha->pdev->dev)) { 2094 ql_dbg(ql_dbg_user, vha, 0x7083, 2095 "scsi_add_host failure for VP[%d].\n", vha->vp_idx); 2096 goto vport_create_failed_2; 2097 } 2098 2099 /* initialize attributes */ 2100 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 2101 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 2102 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 2103 fc_host_supported_classes(vha->host) = 2104 fc_host_supported_classes(base_vha->host); 2105 fc_host_supported_speeds(vha->host) = 2106 fc_host_supported_speeds(base_vha->host); 2107 2108 qlt_vport_create(vha, ha); 2109 qla24xx_vport_disable(fc_vport, disable); 2110 2111 if (!ql2xmqsupport || !ha->npiv_info) 2112 goto vport_queue; 2113 2114 /* Create a request queue in QoS mode for the vport */ 2115 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 2116 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 2117 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 2118 8) == 0) { 2119 qos = ha->npiv_info[cnt].q_qos; 2120 break; 2121 } 2122 } 2123 2124 if (qos) { 2125 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true); 2126 if (!qpair) 2127 ql_log(ql_log_warn, vha, 0x7084, 2128 "Can't create qpair for VP[%d]\n", 2129 vha->vp_idx); 2130 else { 2131 ql_dbg(ql_dbg_multiq, vha, 0xc001, 2132 "Queue pair: %d Qos: %d) created for VP[%d]\n", 2133 qpair->id, qos, vha->vp_idx); 2134 ql_dbg(ql_dbg_user, vha, 0x7085, 2135 "Queue Pair: %d Qos: %d) created for VP[%d]\n", 2136 qpair->id, qos, vha->vp_idx); 2137 req = qpair->req; 2138 vha->qpair = qpair; 2139 } 2140 } 2141 2142 vport_queue: 2143 vha->req = req; 2144 return 0; 2145 2146 vport_create_failed_2: 2147 qla24xx_disable_vp(vha); 2148 qla24xx_deallocate_vp_id(vha); 2149 scsi_host_put(vha->host); 2150 return FC_VPORT_FAILED; 2151 } 2152 2153 static int 2154 qla24xx_vport_delete(struct fc_vport *fc_vport) 2155 { 2156 scsi_qla_host_t *vha = fc_vport->dd_data; 2157 struct qla_hw_data *ha = vha->hw; 2158 uint16_t id = vha->vp_idx; 2159 2160 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 2161 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 2162 msleep(1000); 2163 2164 qla24xx_disable_vp(vha); 2165 qla2x00_wait_for_sess_deletion(vha); 2166 2167 vha->flags.delete_progress = 1; 2168 2169 qlt_remove_target(ha, vha); 2170 2171 fc_remove_host(vha->host); 2172 2173 scsi_remove_host(vha->host); 2174 2175 /* Allow timer to run to drain queued items, when removing vp */ 2176 qla24xx_deallocate_vp_id(vha); 2177 2178 if (vha->timer_active) { 2179 qla2x00_vp_stop_timer(vha); 2180 ql_dbg(ql_dbg_user, vha, 0x7086, 2181 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 2182 } 2183 2184 qla2x00_free_fcports(vha); 2185 2186 mutex_lock(&ha->vport_lock); 2187 ha->cur_vport_count--; 2188 clear_bit(vha->vp_idx, ha->vp_idx_map); 2189 mutex_unlock(&ha->vport_lock); 2190 2191 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2192 vha->gnl.ldma); 2193 2194 vfree(vha->scan.l); 2195 2196 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2197 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 2198 ql_log(ql_log_warn, vha, 0x7087, 2199 "Queue Pair delete failed.\n"); 2200 } 2201 2202 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); 2203 scsi_host_put(vha->host); 2204 return 0; 2205 } 2206 2207 static int 2208 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 2209 { 2210 scsi_qla_host_t *vha = fc_vport->dd_data; 2211 2212 if (disable) 2213 qla24xx_disable_vp(vha); 2214 else 2215 qla24xx_enable_vp(vha); 2216 2217 return 0; 2218 } 2219 2220 struct fc_function_template qla2xxx_transport_functions = { 2221 2222 .show_host_node_name = 1, 2223 .show_host_port_name = 1, 2224 .show_host_supported_classes = 1, 2225 .show_host_supported_speeds = 1, 2226 2227 .get_host_port_id = qla2x00_get_host_port_id, 2228 .show_host_port_id = 1, 2229 .get_host_speed = qla2x00_get_host_speed, 2230 .show_host_speed = 1, 2231 .get_host_port_type = qla2x00_get_host_port_type, 2232 .show_host_port_type = 1, 2233 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 2234 .show_host_symbolic_name = 1, 2235 .set_host_system_hostname = qla2x00_set_host_system_hostname, 2236 .show_host_system_hostname = 1, 2237 .get_host_fabric_name = qla2x00_get_host_fabric_name, 2238 .show_host_fabric_name = 1, 2239 .get_host_port_state = qla2x00_get_host_port_state, 2240 .show_host_port_state = 1, 2241 2242 .dd_fcrport_size = sizeof(struct fc_port *), 2243 .show_rport_supported_classes = 1, 2244 2245 .get_starget_node_name = qla2x00_get_starget_node_name, 2246 .show_starget_node_name = 1, 2247 .get_starget_port_name = qla2x00_get_starget_port_name, 2248 .show_starget_port_name = 1, 2249 .get_starget_port_id = qla2x00_get_starget_port_id, 2250 .show_starget_port_id = 1, 2251 2252 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 2253 .show_rport_dev_loss_tmo = 1, 2254 2255 .issue_fc_host_lip = qla2x00_issue_lip, 2256 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2257 .terminate_rport_io = qla2x00_terminate_rport_io, 2258 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2259 .reset_fc_host_stats = qla2x00_reset_host_stats, 2260 2261 .vport_create = qla24xx_vport_create, 2262 .vport_disable = qla24xx_vport_disable, 2263 .vport_delete = qla24xx_vport_delete, 2264 .bsg_request = qla24xx_bsg_request, 2265 .bsg_timeout = qla24xx_bsg_timeout, 2266 }; 2267 2268 struct fc_function_template qla2xxx_transport_vport_functions = { 2269 2270 .show_host_node_name = 1, 2271 .show_host_port_name = 1, 2272 .show_host_supported_classes = 1, 2273 2274 .get_host_port_id = qla2x00_get_host_port_id, 2275 .show_host_port_id = 1, 2276 .get_host_speed = qla2x00_get_host_speed, 2277 .show_host_speed = 1, 2278 .get_host_port_type = qla2x00_get_host_port_type, 2279 .show_host_port_type = 1, 2280 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 2281 .show_host_symbolic_name = 1, 2282 .set_host_system_hostname = qla2x00_set_host_system_hostname, 2283 .show_host_system_hostname = 1, 2284 .get_host_fabric_name = qla2x00_get_host_fabric_name, 2285 .show_host_fabric_name = 1, 2286 .get_host_port_state = qla2x00_get_host_port_state, 2287 .show_host_port_state = 1, 2288 2289 .dd_fcrport_size = sizeof(struct fc_port *), 2290 .show_rport_supported_classes = 1, 2291 2292 .get_starget_node_name = qla2x00_get_starget_node_name, 2293 .show_starget_node_name = 1, 2294 .get_starget_port_name = qla2x00_get_starget_port_name, 2295 .show_starget_port_name = 1, 2296 .get_starget_port_id = qla2x00_get_starget_port_id, 2297 .show_starget_port_id = 1, 2298 2299 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 2300 .show_rport_dev_loss_tmo = 1, 2301 2302 .issue_fc_host_lip = qla2x00_issue_lip, 2303 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2304 .terminate_rport_io = qla2x00_terminate_rport_io, 2305 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2306 .reset_fc_host_stats = qla2x00_reset_host_stats, 2307 2308 .bsg_request = qla24xx_bsg_request, 2309 .bsg_timeout = qla24xx_bsg_timeout, 2310 }; 2311 2312 void 2313 qla2x00_init_host_attr(scsi_qla_host_t *vha) 2314 { 2315 struct qla_hw_data *ha = vha->hw; 2316 u32 speed = FC_PORTSPEED_UNKNOWN; 2317 2318 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 2319 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 2320 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 2321 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ? 2322 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; 2323 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 2324 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 2325 2326 if (IS_CNA_CAPABLE(ha)) 2327 speed = FC_PORTSPEED_10GBIT; 2328 else if (IS_QLA2031(ha)) 2329 speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | 2330 FC_PORTSPEED_4GBIT; 2331 else if (IS_QLA25XX(ha)) 2332 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 2333 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2334 else if (IS_QLA24XX_TYPE(ha)) 2335 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 2336 FC_PORTSPEED_1GBIT; 2337 else if (IS_QLA23XX(ha)) 2338 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2339 else if (IS_QLAFX00(ha)) 2340 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 2341 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2342 else if (IS_QLA27XX(ha)) 2343 speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | 2344 FC_PORTSPEED_8GBIT; 2345 else 2346 speed = FC_PORTSPEED_1GBIT; 2347 fc_host_supported_speeds(vha->host) = speed; 2348 } 2349