1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/kthread.h> 11 #include <linux/vmalloc.h> 12 #include <linux/slab.h> 13 #include <linux/delay.h> 14 15 static int qla24xx_vport_disable(struct fc_vport *, bool); 16 17 /* SYSFS attributes --------------------------------------------------------- */ 18 19 static ssize_t 20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, 21 struct bin_attribute *bin_attr, 22 char *buf, loff_t off, size_t count) 23 { 24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 25 struct device, kobj))); 26 struct qla_hw_data *ha = vha->hw; 27 int rval = 0; 28 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading || 30 ha->mpi_fw_dump_reading)) 31 return 0; 32 33 mutex_lock(&ha->optrom_mutex); 34 if (IS_P3P_TYPE(ha)) { 35 if (off < ha->md_template_size) { 36 rval = memory_read_from_buffer(buf, count, 37 &off, ha->md_tmplt_hdr, ha->md_template_size); 38 } else { 39 off -= ha->md_template_size; 40 rval = memory_read_from_buffer(buf, count, 41 &off, ha->md_dump, ha->md_dump_size); 42 } 43 } else if (ha->mctp_dumped && ha->mctp_dump_reading) { 44 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump, 45 MCTP_DUMP_SIZE); 46 } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) { 47 rval = memory_read_from_buffer(buf, count, &off, 48 ha->mpi_fw_dump, 49 ha->mpi_fw_dump_len); 50 } else if (ha->fw_dump_reading) { 51 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, 52 ha->fw_dump_len); 53 } else { 54 rval = 0; 55 } 56 mutex_unlock(&ha->optrom_mutex); 57 return rval; 58 } 59 60 static ssize_t 61 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, 62 struct bin_attribute *bin_attr, 63 char *buf, loff_t off, size_t count) 64 { 65 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 66 struct device, kobj))); 67 struct qla_hw_data *ha = vha->hw; 68 int reading; 69 70 if (off != 0) 71 return (0); 72 73 reading = simple_strtol(buf, NULL, 10); 74 switch (reading) { 75 case 0: 76 if (!ha->fw_dump_reading) 77 break; 78 79 ql_log(ql_log_info, vha, 0x705d, 80 "Firmware dump cleared on (%ld).\n", vha->host_no); 81 82 if (IS_P3P_TYPE(ha)) { 83 qla82xx_md_free(vha); 84 qla82xx_md_prep(vha); 85 } 86 ha->fw_dump_reading = 0; 87 ha->fw_dumped = false; 88 break; 89 case 1: 90 if (ha->fw_dumped && !ha->fw_dump_reading) { 91 ha->fw_dump_reading = 1; 92 93 ql_log(ql_log_info, vha, 0x705e, 94 "Raw firmware dump ready for read on (%ld).\n", 95 vha->host_no); 96 } 97 break; 98 case 2: 99 qla2x00_alloc_fw_dump(vha); 100 break; 101 case 3: 102 if (IS_QLA82XX(ha)) { 103 qla82xx_idc_lock(ha); 104 qla82xx_set_reset_owner(vha); 105 qla82xx_idc_unlock(ha); 106 } else if (IS_QLA8044(ha)) { 107 qla8044_idc_lock(ha); 108 qla82xx_set_reset_owner(vha); 109 qla8044_idc_unlock(ha); 110 } else { 111 qla2x00_system_error(vha); 112 } 113 break; 114 case 4: 115 if (IS_P3P_TYPE(ha)) { 116 if (ha->md_tmplt_hdr) 117 ql_dbg(ql_dbg_user, vha, 0x705b, 118 "MiniDump supported with this firmware.\n"); 119 else 120 ql_dbg(ql_dbg_user, vha, 0x709d, 121 "MiniDump not supported with this firmware.\n"); 122 } 123 break; 124 case 5: 125 if (IS_P3P_TYPE(ha)) 126 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 127 break; 128 case 6: 129 if (!ha->mctp_dump_reading) 130 break; 131 ql_log(ql_log_info, vha, 0x70c1, 132 "MCTP dump cleared on (%ld).\n", vha->host_no); 133 ha->mctp_dump_reading = 0; 134 ha->mctp_dumped = 0; 135 break; 136 case 7: 137 if (ha->mctp_dumped && !ha->mctp_dump_reading) { 138 ha->mctp_dump_reading = 1; 139 ql_log(ql_log_info, vha, 0x70c2, 140 "Raw mctp dump ready for read on (%ld).\n", 141 vha->host_no); 142 } 143 break; 144 case 8: 145 if (!ha->mpi_fw_dump_reading) 146 break; 147 ql_log(ql_log_info, vha, 0x70e7, 148 "MPI firmware dump cleared on (%ld).\n", vha->host_no); 149 ha->mpi_fw_dump_reading = 0; 150 ha->mpi_fw_dumped = 0; 151 break; 152 case 9: 153 if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) { 154 ha->mpi_fw_dump_reading = 1; 155 ql_log(ql_log_info, vha, 0x70e8, 156 "Raw MPI firmware dump ready for read on (%ld).\n", 157 vha->host_no); 158 } 159 break; 160 } 161 return count; 162 } 163 164 static struct bin_attribute sysfs_fw_dump_attr = { 165 .attr = { 166 .name = "fw_dump", 167 .mode = S_IRUSR | S_IWUSR, 168 }, 169 .size = 0, 170 .read = qla2x00_sysfs_read_fw_dump, 171 .write = qla2x00_sysfs_write_fw_dump, 172 }; 173 174 static ssize_t 175 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, 176 struct bin_attribute *bin_attr, 177 char *buf, loff_t off, size_t count) 178 { 179 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 180 struct device, kobj))); 181 struct qla_hw_data *ha = vha->hw; 182 uint32_t faddr; 183 struct active_regions active_regions = { }; 184 185 if (!capable(CAP_SYS_ADMIN)) 186 return 0; 187 188 mutex_lock(&ha->optrom_mutex); 189 if (qla2x00_chip_is_down(vha)) { 190 mutex_unlock(&ha->optrom_mutex); 191 return -EAGAIN; 192 } 193 194 if (!IS_NOCACHE_VPD_TYPE(ha)) { 195 mutex_unlock(&ha->optrom_mutex); 196 goto skip; 197 } 198 199 faddr = ha->flt_region_nvram; 200 if (IS_QLA28XX(ha)) { 201 qla28xx_get_aux_images(vha, &active_regions); 202 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 203 faddr = ha->flt_region_nvram_sec; 204 } 205 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); 206 207 mutex_unlock(&ha->optrom_mutex); 208 209 skip: 210 return memory_read_from_buffer(buf, count, &off, ha->nvram, 211 ha->nvram_size); 212 } 213 214 static ssize_t 215 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, 216 struct bin_attribute *bin_attr, 217 char *buf, loff_t off, size_t count) 218 { 219 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 220 struct device, kobj))); 221 struct qla_hw_data *ha = vha->hw; 222 uint16_t cnt; 223 224 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 225 !ha->isp_ops->write_nvram) 226 return -EINVAL; 227 228 /* Checksum NVRAM. */ 229 if (IS_FWI2_CAPABLE(ha)) { 230 __le32 *iter = (__force __le32 *)buf; 231 uint32_t chksum; 232 233 chksum = 0; 234 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) 235 chksum += le32_to_cpu(*iter); 236 chksum = ~chksum + 1; 237 *iter = cpu_to_le32(chksum); 238 } else { 239 uint8_t *iter; 240 uint8_t chksum; 241 242 iter = (uint8_t *)buf; 243 chksum = 0; 244 for (cnt = 0; cnt < count - 1; cnt++) 245 chksum += *iter++; 246 chksum = ~chksum + 1; 247 *iter = chksum; 248 } 249 250 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 251 ql_log(ql_log_warn, vha, 0x705f, 252 "HBA not online, failing NVRAM update.\n"); 253 return -EAGAIN; 254 } 255 256 mutex_lock(&ha->optrom_mutex); 257 if (qla2x00_chip_is_down(vha)) { 258 mutex_unlock(&ha->optrom_mutex); 259 return -EAGAIN; 260 } 261 262 /* Write NVRAM. */ 263 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count); 264 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base, 265 count); 266 mutex_unlock(&ha->optrom_mutex); 267 268 ql_dbg(ql_dbg_user, vha, 0x7060, 269 "Setting ISP_ABORT_NEEDED\n"); 270 /* NVRAM settings take effect immediately. */ 271 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 272 qla2xxx_wake_dpc(vha); 273 qla2x00_wait_for_chip_reset(vha); 274 275 return count; 276 } 277 278 static struct bin_attribute sysfs_nvram_attr = { 279 .attr = { 280 .name = "nvram", 281 .mode = S_IRUSR | S_IWUSR, 282 }, 283 .size = 512, 284 .read = qla2x00_sysfs_read_nvram, 285 .write = qla2x00_sysfs_write_nvram, 286 }; 287 288 static ssize_t 289 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, 290 struct bin_attribute *bin_attr, 291 char *buf, loff_t off, size_t count) 292 { 293 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 294 struct device, kobj))); 295 struct qla_hw_data *ha = vha->hw; 296 ssize_t rval = 0; 297 298 mutex_lock(&ha->optrom_mutex); 299 300 if (ha->optrom_state != QLA_SREADING) 301 goto out; 302 303 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 304 ha->optrom_region_size); 305 306 out: 307 mutex_unlock(&ha->optrom_mutex); 308 309 return rval; 310 } 311 312 static ssize_t 313 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, 314 struct bin_attribute *bin_attr, 315 char *buf, loff_t off, size_t count) 316 { 317 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 318 struct device, kobj))); 319 struct qla_hw_data *ha = vha->hw; 320 321 mutex_lock(&ha->optrom_mutex); 322 323 if (ha->optrom_state != QLA_SWRITING) { 324 mutex_unlock(&ha->optrom_mutex); 325 return -EINVAL; 326 } 327 if (off > ha->optrom_region_size) { 328 mutex_unlock(&ha->optrom_mutex); 329 return -ERANGE; 330 } 331 if (off + count > ha->optrom_region_size) 332 count = ha->optrom_region_size - off; 333 334 memcpy(&ha->optrom_buffer[off], buf, count); 335 mutex_unlock(&ha->optrom_mutex); 336 337 return count; 338 } 339 340 static struct bin_attribute sysfs_optrom_attr = { 341 .attr = { 342 .name = "optrom", 343 .mode = S_IRUSR | S_IWUSR, 344 }, 345 .size = 0, 346 .read = qla2x00_sysfs_read_optrom, 347 .write = qla2x00_sysfs_write_optrom, 348 }; 349 350 static ssize_t 351 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, 352 struct bin_attribute *bin_attr, 353 char *buf, loff_t off, size_t count) 354 { 355 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 356 struct device, kobj))); 357 struct qla_hw_data *ha = vha->hw; 358 uint32_t start = 0; 359 uint32_t size = ha->optrom_size; 360 int val, valid; 361 ssize_t rval = count; 362 363 if (off) 364 return -EINVAL; 365 366 if (unlikely(pci_channel_offline(ha->pdev))) 367 return -EAGAIN; 368 369 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 370 return -EINVAL; 371 if (start > ha->optrom_size) 372 return -EINVAL; 373 if (size > ha->optrom_size - start) 374 size = ha->optrom_size - start; 375 376 mutex_lock(&ha->optrom_mutex); 377 if (qla2x00_chip_is_down(vha)) { 378 mutex_unlock(&ha->optrom_mutex); 379 return -EAGAIN; 380 } 381 switch (val) { 382 case 0: 383 if (ha->optrom_state != QLA_SREADING && 384 ha->optrom_state != QLA_SWRITING) { 385 rval = -EINVAL; 386 goto out; 387 } 388 ha->optrom_state = QLA_SWAITING; 389 390 ql_dbg(ql_dbg_user, vha, 0x7061, 391 "Freeing flash region allocation -- 0x%x bytes.\n", 392 ha->optrom_region_size); 393 394 vfree(ha->optrom_buffer); 395 ha->optrom_buffer = NULL; 396 break; 397 case 1: 398 if (ha->optrom_state != QLA_SWAITING) { 399 rval = -EINVAL; 400 goto out; 401 } 402 403 ha->optrom_region_start = start; 404 ha->optrom_region_size = size; 405 406 ha->optrom_state = QLA_SREADING; 407 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 408 if (ha->optrom_buffer == NULL) { 409 ql_log(ql_log_warn, vha, 0x7062, 410 "Unable to allocate memory for optrom retrieval " 411 "(%x).\n", ha->optrom_region_size); 412 413 ha->optrom_state = QLA_SWAITING; 414 rval = -ENOMEM; 415 goto out; 416 } 417 418 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 419 ql_log(ql_log_warn, vha, 0x7063, 420 "HBA not online, failing NVRAM update.\n"); 421 rval = -EAGAIN; 422 goto out; 423 } 424 425 ql_dbg(ql_dbg_user, vha, 0x7064, 426 "Reading flash region -- 0x%x/0x%x.\n", 427 ha->optrom_region_start, ha->optrom_region_size); 428 429 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 430 ha->optrom_region_start, ha->optrom_region_size); 431 break; 432 case 2: 433 if (ha->optrom_state != QLA_SWAITING) { 434 rval = -EINVAL; 435 goto out; 436 } 437 438 /* 439 * We need to be more restrictive on which FLASH regions are 440 * allowed to be updated via user-space. Regions accessible 441 * via this method include: 442 * 443 * ISP21xx/ISP22xx/ISP23xx type boards: 444 * 445 * 0x000000 -> 0x020000 -- Boot code. 446 * 447 * ISP2322/ISP24xx type boards: 448 * 449 * 0x000000 -> 0x07ffff -- Boot code. 450 * 0x080000 -> 0x0fffff -- Firmware. 451 * 452 * ISP25xx type boards: 453 * 454 * 0x000000 -> 0x07ffff -- Boot code. 455 * 0x080000 -> 0x0fffff -- Firmware. 456 * 0x120000 -> 0x12ffff -- VPD and HBA parameters. 457 * 458 * > ISP25xx type boards: 459 * 460 * None -- should go through BSG. 461 */ 462 valid = 0; 463 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 464 valid = 1; 465 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 466 valid = 1; 467 if (!valid) { 468 ql_log(ql_log_warn, vha, 0x7065, 469 "Invalid start region 0x%x/0x%x.\n", start, size); 470 rval = -EINVAL; 471 goto out; 472 } 473 474 ha->optrom_region_start = start; 475 ha->optrom_region_size = size; 476 477 ha->optrom_state = QLA_SWRITING; 478 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 479 if (ha->optrom_buffer == NULL) { 480 ql_log(ql_log_warn, vha, 0x7066, 481 "Unable to allocate memory for optrom update " 482 "(%x)\n", ha->optrom_region_size); 483 484 ha->optrom_state = QLA_SWAITING; 485 rval = -ENOMEM; 486 goto out; 487 } 488 489 ql_dbg(ql_dbg_user, vha, 0x7067, 490 "Staging flash region write -- 0x%x/0x%x.\n", 491 ha->optrom_region_start, ha->optrom_region_size); 492 493 break; 494 case 3: 495 if (ha->optrom_state != QLA_SWRITING) { 496 rval = -EINVAL; 497 goto out; 498 } 499 500 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 501 ql_log(ql_log_warn, vha, 0x7068, 502 "HBA not online, failing flash update.\n"); 503 rval = -EAGAIN; 504 goto out; 505 } 506 507 ql_dbg(ql_dbg_user, vha, 0x7069, 508 "Writing flash region -- 0x%x/0x%x.\n", 509 ha->optrom_region_start, ha->optrom_region_size); 510 511 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 512 ha->optrom_region_start, ha->optrom_region_size); 513 if (rval) 514 rval = -EIO; 515 break; 516 default: 517 rval = -EINVAL; 518 } 519 520 out: 521 mutex_unlock(&ha->optrom_mutex); 522 return rval; 523 } 524 525 static struct bin_attribute sysfs_optrom_ctl_attr = { 526 .attr = { 527 .name = "optrom_ctl", 528 .mode = S_IWUSR, 529 }, 530 .size = 0, 531 .write = qla2x00_sysfs_write_optrom_ctl, 532 }; 533 534 static ssize_t 535 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, 536 struct bin_attribute *bin_attr, 537 char *buf, loff_t off, size_t count) 538 { 539 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 540 struct device, kobj))); 541 struct qla_hw_data *ha = vha->hw; 542 uint32_t faddr; 543 struct active_regions active_regions = { }; 544 545 if (unlikely(pci_channel_offline(ha->pdev))) 546 return -EAGAIN; 547 548 if (!capable(CAP_SYS_ADMIN)) 549 return -EINVAL; 550 551 if (IS_NOCACHE_VPD_TYPE(ha)) 552 goto skip; 553 554 faddr = ha->flt_region_vpd << 2; 555 556 if (IS_QLA28XX(ha)) { 557 qla28xx_get_aux_images(vha, &active_regions); 558 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 559 faddr = ha->flt_region_vpd_sec << 2; 560 561 ql_dbg(ql_dbg_init, vha, 0x7070, 562 "Loading %s nvram image.\n", 563 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 564 "primary" : "secondary"); 565 } 566 567 mutex_lock(&ha->optrom_mutex); 568 if (qla2x00_chip_is_down(vha)) { 569 mutex_unlock(&ha->optrom_mutex); 570 return -EAGAIN; 571 } 572 573 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); 574 mutex_unlock(&ha->optrom_mutex); 575 576 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); 577 skip: 578 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 579 } 580 581 static ssize_t 582 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, 583 struct bin_attribute *bin_attr, 584 char *buf, loff_t off, size_t count) 585 { 586 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 587 struct device, kobj))); 588 struct qla_hw_data *ha = vha->hw; 589 uint8_t *tmp_data; 590 591 if (unlikely(pci_channel_offline(ha->pdev))) 592 return 0; 593 594 if (qla2x00_chip_is_down(vha)) 595 return 0; 596 597 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 598 !ha->isp_ops->write_nvram) 599 return 0; 600 601 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 602 ql_log(ql_log_warn, vha, 0x706a, 603 "HBA not online, failing VPD update.\n"); 604 return -EAGAIN; 605 } 606 607 mutex_lock(&ha->optrom_mutex); 608 if (qla2x00_chip_is_down(vha)) { 609 mutex_unlock(&ha->optrom_mutex); 610 return -EAGAIN; 611 } 612 613 /* Write NVRAM. */ 614 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count); 615 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count); 616 617 /* Update flash version information for 4Gb & above. */ 618 if (!IS_FWI2_CAPABLE(ha)) { 619 mutex_unlock(&ha->optrom_mutex); 620 return -EINVAL; 621 } 622 623 tmp_data = vmalloc(256); 624 if (!tmp_data) { 625 mutex_unlock(&ha->optrom_mutex); 626 ql_log(ql_log_warn, vha, 0x706b, 627 "Unable to allocate memory for VPD information update.\n"); 628 return -ENOMEM; 629 } 630 ha->isp_ops->get_flash_version(vha, tmp_data); 631 vfree(tmp_data); 632 633 mutex_unlock(&ha->optrom_mutex); 634 635 return count; 636 } 637 638 static struct bin_attribute sysfs_vpd_attr = { 639 .attr = { 640 .name = "vpd", 641 .mode = S_IRUSR | S_IWUSR, 642 }, 643 .size = 0, 644 .read = qla2x00_sysfs_read_vpd, 645 .write = qla2x00_sysfs_write_vpd, 646 }; 647 648 static ssize_t 649 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, 650 struct bin_attribute *bin_attr, 651 char *buf, loff_t off, size_t count) 652 { 653 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 654 struct device, kobj))); 655 int rval; 656 657 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) 658 return 0; 659 660 mutex_lock(&vha->hw->optrom_mutex); 661 if (qla2x00_chip_is_down(vha)) { 662 mutex_unlock(&vha->hw->optrom_mutex); 663 return 0; 664 } 665 666 rval = qla2x00_read_sfp_dev(vha, buf, count); 667 mutex_unlock(&vha->hw->optrom_mutex); 668 669 if (rval) 670 return -EIO; 671 672 return count; 673 } 674 675 static struct bin_attribute sysfs_sfp_attr = { 676 .attr = { 677 .name = "sfp", 678 .mode = S_IRUSR | S_IWUSR, 679 }, 680 .size = SFP_DEV_SIZE, 681 .read = qla2x00_sysfs_read_sfp, 682 }; 683 684 static ssize_t 685 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, 686 struct bin_attribute *bin_attr, 687 char *buf, loff_t off, size_t count) 688 { 689 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 690 struct device, kobj))); 691 struct qla_hw_data *ha = vha->hw; 692 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 693 int type; 694 uint32_t idc_control; 695 uint8_t *tmp_data = NULL; 696 697 if (off != 0) 698 return -EINVAL; 699 700 type = simple_strtol(buf, NULL, 10); 701 switch (type) { 702 case 0x2025c: 703 ql_log(ql_log_info, vha, 0x706e, 704 "Issuing ISP reset.\n"); 705 706 scsi_block_requests(vha->host); 707 if (IS_QLA82XX(ha)) { 708 ha->flags.isp82xx_no_md_cap = 1; 709 qla82xx_idc_lock(ha); 710 qla82xx_set_reset_owner(vha); 711 qla82xx_idc_unlock(ha); 712 } else if (IS_QLA8044(ha)) { 713 qla8044_idc_lock(ha); 714 idc_control = qla8044_rd_reg(ha, 715 QLA8044_IDC_DRV_CTRL); 716 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, 717 (idc_control | GRACEFUL_RESET_BIT1)); 718 qla82xx_set_reset_owner(vha); 719 qla8044_idc_unlock(ha); 720 } else { 721 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 722 qla2xxx_wake_dpc(vha); 723 } 724 qla2x00_wait_for_chip_reset(vha); 725 scsi_unblock_requests(vha->host); 726 break; 727 case 0x2025d: 728 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 729 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 730 return -EPERM; 731 732 ql_log(ql_log_info, vha, 0x706f, 733 "Issuing MPI reset.\n"); 734 735 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 736 uint32_t idc_control; 737 738 qla83xx_idc_lock(vha, 0); 739 __qla83xx_get_idc_control(vha, &idc_control); 740 idc_control |= QLA83XX_IDC_GRACEFUL_RESET; 741 __qla83xx_set_idc_control(vha, idc_control); 742 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 743 QLA8XXX_DEV_NEED_RESET); 744 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 745 qla83xx_idc_unlock(vha, 0); 746 break; 747 } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 748 qla27xx_reset_mpi(vha); 749 } else { 750 /* Make sure FC side is not in reset */ 751 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != 752 QLA_SUCCESS); 753 754 /* Issue MPI reset */ 755 scsi_block_requests(vha->host); 756 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 757 ql_log(ql_log_warn, vha, 0x7070, 758 "MPI reset failed.\n"); 759 scsi_unblock_requests(vha->host); 760 break; 761 } 762 break; 763 case 0x2025e: 764 if (!IS_P3P_TYPE(ha) || vha != base_vha) { 765 ql_log(ql_log_info, vha, 0x7071, 766 "FCoE ctx reset not supported.\n"); 767 return -EPERM; 768 } 769 770 ql_log(ql_log_info, vha, 0x7072, 771 "Issuing FCoE ctx reset.\n"); 772 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 773 qla2xxx_wake_dpc(vha); 774 qla2x00_wait_for_fcoe_ctx_reset(vha); 775 break; 776 case 0x2025f: 777 if (!IS_QLA8031(ha)) 778 return -EPERM; 779 ql_log(ql_log_info, vha, 0x70bc, 780 "Disabling Reset by IDC control\n"); 781 qla83xx_idc_lock(vha, 0); 782 __qla83xx_get_idc_control(vha, &idc_control); 783 idc_control |= QLA83XX_IDC_RESET_DISABLED; 784 __qla83xx_set_idc_control(vha, idc_control); 785 qla83xx_idc_unlock(vha, 0); 786 break; 787 case 0x20260: 788 if (!IS_QLA8031(ha)) 789 return -EPERM; 790 ql_log(ql_log_info, vha, 0x70bd, 791 "Enabling Reset by IDC control\n"); 792 qla83xx_idc_lock(vha, 0); 793 __qla83xx_get_idc_control(vha, &idc_control); 794 idc_control &= ~QLA83XX_IDC_RESET_DISABLED; 795 __qla83xx_set_idc_control(vha, idc_control); 796 qla83xx_idc_unlock(vha, 0); 797 break; 798 case 0x20261: 799 ql_dbg(ql_dbg_user, vha, 0x70e0, 800 "Updating cache versions without reset "); 801 802 tmp_data = vmalloc(256); 803 if (!tmp_data) { 804 ql_log(ql_log_warn, vha, 0x70e1, 805 "Unable to allocate memory for VPD information update.\n"); 806 return -ENOMEM; 807 } 808 ha->isp_ops->get_flash_version(vha, tmp_data); 809 vfree(tmp_data); 810 break; 811 } 812 return count; 813 } 814 815 static struct bin_attribute sysfs_reset_attr = { 816 .attr = { 817 .name = "reset", 818 .mode = S_IWUSR, 819 }, 820 .size = 0, 821 .write = qla2x00_sysfs_write_reset, 822 }; 823 824 static ssize_t 825 qla2x00_issue_logo(struct file *filp, struct kobject *kobj, 826 struct bin_attribute *bin_attr, 827 char *buf, loff_t off, size_t count) 828 { 829 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 830 struct device, kobj))); 831 int type; 832 port_id_t did; 833 834 if (!capable(CAP_SYS_ADMIN)) 835 return 0; 836 837 if (unlikely(pci_channel_offline(vha->hw->pdev))) 838 return 0; 839 840 if (qla2x00_chip_is_down(vha)) 841 return 0; 842 843 type = simple_strtol(buf, NULL, 10); 844 845 did.b.domain = (type & 0x00ff0000) >> 16; 846 did.b.area = (type & 0x0000ff00) >> 8; 847 did.b.al_pa = (type & 0x000000ff); 848 849 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n", 850 did.b.domain, did.b.area, did.b.al_pa); 851 852 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); 853 854 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); 855 return count; 856 } 857 858 static struct bin_attribute sysfs_issue_logo_attr = { 859 .attr = { 860 .name = "issue_logo", 861 .mode = S_IWUSR, 862 }, 863 .size = 0, 864 .write = qla2x00_issue_logo, 865 }; 866 867 static ssize_t 868 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, 869 struct bin_attribute *bin_attr, 870 char *buf, loff_t off, size_t count) 871 { 872 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 873 struct device, kobj))); 874 struct qla_hw_data *ha = vha->hw; 875 int rval; 876 uint16_t actual_size; 877 878 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) 879 return 0; 880 881 if (unlikely(pci_channel_offline(ha->pdev))) 882 return 0; 883 mutex_lock(&vha->hw->optrom_mutex); 884 if (qla2x00_chip_is_down(vha)) { 885 mutex_unlock(&vha->hw->optrom_mutex); 886 return 0; 887 } 888 889 if (ha->xgmac_data) 890 goto do_read; 891 892 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 893 &ha->xgmac_data_dma, GFP_KERNEL); 894 if (!ha->xgmac_data) { 895 mutex_unlock(&vha->hw->optrom_mutex); 896 ql_log(ql_log_warn, vha, 0x7076, 897 "Unable to allocate memory for XGMAC read-data.\n"); 898 return 0; 899 } 900 901 do_read: 902 actual_size = 0; 903 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); 904 905 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 906 XGMAC_DATA_SIZE, &actual_size); 907 908 mutex_unlock(&vha->hw->optrom_mutex); 909 if (rval != QLA_SUCCESS) { 910 ql_log(ql_log_warn, vha, 0x7077, 911 "Unable to read XGMAC data (%x).\n", rval); 912 count = 0; 913 } 914 915 count = actual_size > count ? count : actual_size; 916 memcpy(buf, ha->xgmac_data, count); 917 918 return count; 919 } 920 921 static struct bin_attribute sysfs_xgmac_stats_attr = { 922 .attr = { 923 .name = "xgmac_stats", 924 .mode = S_IRUSR, 925 }, 926 .size = 0, 927 .read = qla2x00_sysfs_read_xgmac_stats, 928 }; 929 930 static ssize_t 931 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, 932 struct bin_attribute *bin_attr, 933 char *buf, loff_t off, size_t count) 934 { 935 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 936 struct device, kobj))); 937 struct qla_hw_data *ha = vha->hw; 938 int rval; 939 940 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 941 return 0; 942 943 if (ha->dcbx_tlv) 944 goto do_read; 945 mutex_lock(&vha->hw->optrom_mutex); 946 if (qla2x00_chip_is_down(vha)) { 947 mutex_unlock(&vha->hw->optrom_mutex); 948 return 0; 949 } 950 951 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 952 &ha->dcbx_tlv_dma, GFP_KERNEL); 953 if (!ha->dcbx_tlv) { 954 mutex_unlock(&vha->hw->optrom_mutex); 955 ql_log(ql_log_warn, vha, 0x7078, 956 "Unable to allocate memory for DCBX TLV read-data.\n"); 957 return -ENOMEM; 958 } 959 960 do_read: 961 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 962 963 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 964 DCBX_TLV_DATA_SIZE); 965 966 mutex_unlock(&vha->hw->optrom_mutex); 967 968 if (rval != QLA_SUCCESS) { 969 ql_log(ql_log_warn, vha, 0x7079, 970 "Unable to read DCBX TLV (%x).\n", rval); 971 return -EIO; 972 } 973 974 memcpy(buf, ha->dcbx_tlv, count); 975 976 return count; 977 } 978 979 static struct bin_attribute sysfs_dcbx_tlv_attr = { 980 .attr = { 981 .name = "dcbx_tlv", 982 .mode = S_IRUSR, 983 }, 984 .size = 0, 985 .read = qla2x00_sysfs_read_dcbx_tlv, 986 }; 987 988 static struct sysfs_entry { 989 char *name; 990 struct bin_attribute *attr; 991 int type; 992 } bin_file_entries[] = { 993 { "fw_dump", &sysfs_fw_dump_attr, }, 994 { "nvram", &sysfs_nvram_attr, }, 995 { "optrom", &sysfs_optrom_attr, }, 996 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 997 { "vpd", &sysfs_vpd_attr, 1 }, 998 { "sfp", &sysfs_sfp_attr, 1 }, 999 { "reset", &sysfs_reset_attr, }, 1000 { "issue_logo", &sysfs_issue_logo_attr, }, 1001 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 1002 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 1003 { NULL }, 1004 }; 1005 1006 void 1007 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) 1008 { 1009 struct Scsi_Host *host = vha->host; 1010 struct sysfs_entry *iter; 1011 int ret; 1012 1013 for (iter = bin_file_entries; iter->name; iter++) { 1014 if (iter->type && !IS_FWI2_CAPABLE(vha->hw)) 1015 continue; 1016 if (iter->type == 2 && !IS_QLA25XX(vha->hw)) 1017 continue; 1018 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw))) 1019 continue; 1020 1021 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 1022 iter->attr); 1023 if (ret) 1024 ql_log(ql_log_warn, vha, 0x00f3, 1025 "Unable to create sysfs %s binary attribute (%d).\n", 1026 iter->name, ret); 1027 else 1028 ql_dbg(ql_dbg_init, vha, 0x00f4, 1029 "Successfully created sysfs %s binary attribute.\n", 1030 iter->name); 1031 } 1032 } 1033 1034 void 1035 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) 1036 { 1037 struct Scsi_Host *host = vha->host; 1038 struct sysfs_entry *iter; 1039 struct qla_hw_data *ha = vha->hw; 1040 1041 for (iter = bin_file_entries; iter->name; iter++) { 1042 if (iter->type && !IS_FWI2_CAPABLE(ha)) 1043 continue; 1044 if (iter->type == 2 && !IS_QLA25XX(ha)) 1045 continue; 1046 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) 1047 continue; 1048 if (iter->type == 0x27 && 1049 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))) 1050 continue; 1051 1052 sysfs_remove_bin_file(&host->shost_gendev.kobj, 1053 iter->attr); 1054 } 1055 1056 if (stop_beacon && ha->beacon_blink_led == 1) 1057 ha->isp_ops->beacon_off(vha); 1058 } 1059 1060 /* Scsi_Host attributes. */ 1061 1062 static ssize_t 1063 qla2x00_driver_version_show(struct device *dev, 1064 struct device_attribute *attr, char *buf) 1065 { 1066 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); 1067 } 1068 1069 static ssize_t 1070 qla2x00_fw_version_show(struct device *dev, 1071 struct device_attribute *attr, char *buf) 1072 { 1073 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1074 struct qla_hw_data *ha = vha->hw; 1075 char fw_str[128]; 1076 1077 return scnprintf(buf, PAGE_SIZE, "%s\n", 1078 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str))); 1079 } 1080 1081 static ssize_t 1082 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 1083 char *buf) 1084 { 1085 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1086 struct qla_hw_data *ha = vha->hw; 1087 uint32_t sn; 1088 1089 if (IS_QLAFX00(vha->hw)) { 1090 return scnprintf(buf, PAGE_SIZE, "%s\n", 1091 vha->hw->mr.serial_num); 1092 } else if (IS_FWI2_CAPABLE(ha)) { 1093 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); 1094 return strlen(strcat(buf, "\n")); 1095 } 1096 1097 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 1098 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 1099 sn % 100000); 1100 } 1101 1102 static ssize_t 1103 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 1104 char *buf) 1105 { 1106 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1107 1108 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); 1109 } 1110 1111 static ssize_t 1112 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 1113 char *buf) 1114 { 1115 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1116 struct qla_hw_data *ha = vha->hw; 1117 1118 if (IS_QLAFX00(vha->hw)) 1119 return scnprintf(buf, PAGE_SIZE, "%s\n", 1120 vha->hw->mr.hw_version); 1121 1122 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 1123 ha->product_id[0], ha->product_id[1], ha->product_id[2], 1124 ha->product_id[3]); 1125 } 1126 1127 static ssize_t 1128 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 1129 char *buf) 1130 { 1131 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1132 1133 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 1134 } 1135 1136 static ssize_t 1137 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 1138 char *buf) 1139 { 1140 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1141 1142 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc); 1143 } 1144 1145 static ssize_t 1146 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 1147 char *buf) 1148 { 1149 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1150 char pci_info[30]; 1151 1152 return scnprintf(buf, PAGE_SIZE, "%s\n", 1153 vha->hw->isp_ops->pci_info_str(vha, pci_info, 1154 sizeof(pci_info))); 1155 } 1156 1157 static ssize_t 1158 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 1159 char *buf) 1160 { 1161 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1162 struct qla_hw_data *ha = vha->hw; 1163 int len = 0; 1164 1165 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 1166 atomic_read(&vha->loop_state) == LOOP_DEAD || 1167 vha->device_flags & DFLG_NO_CABLE) 1168 len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); 1169 else if (atomic_read(&vha->loop_state) != LOOP_READY || 1170 qla2x00_chip_is_down(vha)) 1171 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 1172 else { 1173 len = scnprintf(buf, PAGE_SIZE, "Link Up - "); 1174 1175 switch (ha->current_topology) { 1176 case ISP_CFG_NL: 1177 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1178 break; 1179 case ISP_CFG_FL: 1180 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); 1181 break; 1182 case ISP_CFG_N: 1183 len += scnprintf(buf + len, PAGE_SIZE-len, 1184 "N_Port to N_Port\n"); 1185 break; 1186 case ISP_CFG_F: 1187 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); 1188 break; 1189 default: 1190 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1191 break; 1192 } 1193 } 1194 return len; 1195 } 1196 1197 static ssize_t 1198 qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 1199 char *buf) 1200 { 1201 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1202 int len = 0; 1203 1204 switch (vha->hw->zio_mode) { 1205 case QLA_ZIO_MODE_6: 1206 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 1207 break; 1208 case QLA_ZIO_DISABLED: 1209 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1210 break; 1211 } 1212 return len; 1213 } 1214 1215 static ssize_t 1216 qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 1217 const char *buf, size_t count) 1218 { 1219 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1220 struct qla_hw_data *ha = vha->hw; 1221 int val = 0; 1222 uint16_t zio_mode; 1223 1224 if (!IS_ZIO_SUPPORTED(ha)) 1225 return -ENOTSUPP; 1226 1227 if (sscanf(buf, "%d", &val) != 1) 1228 return -EINVAL; 1229 1230 if (val) 1231 zio_mode = QLA_ZIO_MODE_6; 1232 else 1233 zio_mode = QLA_ZIO_DISABLED; 1234 1235 /* Update per-hba values and queue a reset. */ 1236 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 1237 ha->zio_mode = zio_mode; 1238 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1239 } 1240 return strlen(buf); 1241 } 1242 1243 static ssize_t 1244 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 1245 char *buf) 1246 { 1247 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1248 1249 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); 1250 } 1251 1252 static ssize_t 1253 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 1254 const char *buf, size_t count) 1255 { 1256 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1257 int val = 0; 1258 uint16_t zio_timer; 1259 1260 if (sscanf(buf, "%d", &val) != 1) 1261 return -EINVAL; 1262 if (val > 25500 || val < 100) 1263 return -ERANGE; 1264 1265 zio_timer = (uint16_t)(val / 100); 1266 vha->hw->zio_timer = zio_timer; 1267 1268 return strlen(buf); 1269 } 1270 1271 static ssize_t 1272 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr, 1273 char *buf) 1274 { 1275 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1276 1277 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n", 1278 vha->hw->last_zio_threshold); 1279 } 1280 1281 static ssize_t 1282 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr, 1283 const char *buf, size_t count) 1284 { 1285 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1286 int val = 0; 1287 1288 if (vha->hw->zio_mode != QLA_ZIO_MODE_6) 1289 return -EINVAL; 1290 if (sscanf(buf, "%d", &val) != 1) 1291 return -EINVAL; 1292 if (val < 0 || val > 256) 1293 return -ERANGE; 1294 1295 atomic_set(&vha->hw->zio_threshold, val); 1296 return strlen(buf); 1297 } 1298 1299 static ssize_t 1300 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1301 char *buf) 1302 { 1303 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1304 int len = 0; 1305 1306 if (vha->hw->beacon_blink_led) 1307 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 1308 else 1309 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1310 return len; 1311 } 1312 1313 static ssize_t 1314 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 1315 const char *buf, size_t count) 1316 { 1317 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1318 struct qla_hw_data *ha = vha->hw; 1319 int val = 0; 1320 int rval; 1321 1322 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1323 return -EPERM; 1324 1325 if (sscanf(buf, "%d", &val) != 1) 1326 return -EINVAL; 1327 1328 mutex_lock(&vha->hw->optrom_mutex); 1329 if (qla2x00_chip_is_down(vha)) { 1330 mutex_unlock(&vha->hw->optrom_mutex); 1331 ql_log(ql_log_warn, vha, 0x707a, 1332 "Abort ISP active -- ignoring beacon request.\n"); 1333 return -EBUSY; 1334 } 1335 1336 if (val) 1337 rval = ha->isp_ops->beacon_on(vha); 1338 else 1339 rval = ha->isp_ops->beacon_off(vha); 1340 1341 if (rval != QLA_SUCCESS) 1342 count = 0; 1343 1344 mutex_unlock(&vha->hw->optrom_mutex); 1345 1346 return count; 1347 } 1348 1349 static ssize_t 1350 qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr, 1351 char *buf) 1352 { 1353 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1354 struct qla_hw_data *ha = vha->hw; 1355 uint16_t led[3] = { 0 }; 1356 1357 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1358 return -EPERM; 1359 1360 if (ql26xx_led_config(vha, 0, led)) 1361 return scnprintf(buf, PAGE_SIZE, "\n"); 1362 1363 return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n", 1364 led[0], led[1], led[2]); 1365 } 1366 1367 static ssize_t 1368 qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr, 1369 const char *buf, size_t count) 1370 { 1371 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1372 struct qla_hw_data *ha = vha->hw; 1373 uint16_t options = BIT_0; 1374 uint16_t led[3] = { 0 }; 1375 uint16_t word[4]; 1376 int n; 1377 1378 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1379 return -EPERM; 1380 1381 n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3); 1382 if (n == 4) { 1383 if (word[0] == 3) { 1384 options |= BIT_3|BIT_2|BIT_1; 1385 led[0] = word[1]; 1386 led[1] = word[2]; 1387 led[2] = word[3]; 1388 goto write; 1389 } 1390 return -EINVAL; 1391 } 1392 1393 if (n == 2) { 1394 /* check led index */ 1395 if (word[0] == 0) { 1396 options |= BIT_2; 1397 led[0] = word[1]; 1398 goto write; 1399 } 1400 if (word[0] == 1) { 1401 options |= BIT_3; 1402 led[1] = word[1]; 1403 goto write; 1404 } 1405 if (word[0] == 2) { 1406 options |= BIT_1; 1407 led[2] = word[1]; 1408 goto write; 1409 } 1410 return -EINVAL; 1411 } 1412 1413 return -EINVAL; 1414 1415 write: 1416 if (ql26xx_led_config(vha, options, led)) 1417 return -EFAULT; 1418 1419 return count; 1420 } 1421 1422 static ssize_t 1423 qla2x00_optrom_bios_version_show(struct device *dev, 1424 struct device_attribute *attr, char *buf) 1425 { 1426 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1427 struct qla_hw_data *ha = vha->hw; 1428 1429 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 1430 ha->bios_revision[0]); 1431 } 1432 1433 static ssize_t 1434 qla2x00_optrom_efi_version_show(struct device *dev, 1435 struct device_attribute *attr, char *buf) 1436 { 1437 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1438 struct qla_hw_data *ha = vha->hw; 1439 1440 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 1441 ha->efi_revision[0]); 1442 } 1443 1444 static ssize_t 1445 qla2x00_optrom_fcode_version_show(struct device *dev, 1446 struct device_attribute *attr, char *buf) 1447 { 1448 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1449 struct qla_hw_data *ha = vha->hw; 1450 1451 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 1452 ha->fcode_revision[0]); 1453 } 1454 1455 static ssize_t 1456 qla2x00_optrom_fw_version_show(struct device *dev, 1457 struct device_attribute *attr, char *buf) 1458 { 1459 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1460 struct qla_hw_data *ha = vha->hw; 1461 1462 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 1463 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 1464 ha->fw_revision[3]); 1465 } 1466 1467 static ssize_t 1468 qla2x00_optrom_gold_fw_version_show(struct device *dev, 1469 struct device_attribute *attr, char *buf) 1470 { 1471 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1472 struct qla_hw_data *ha = vha->hw; 1473 1474 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 1475 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1476 return scnprintf(buf, PAGE_SIZE, "\n"); 1477 1478 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1479 ha->gold_fw_version[0], ha->gold_fw_version[1], 1480 ha->gold_fw_version[2], ha->gold_fw_version[3]); 1481 } 1482 1483 static ssize_t 1484 qla2x00_total_isp_aborts_show(struct device *dev, 1485 struct device_attribute *attr, char *buf) 1486 { 1487 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1488 1489 return scnprintf(buf, PAGE_SIZE, "%d\n", 1490 vha->qla_stats.total_isp_aborts); 1491 } 1492 1493 static ssize_t 1494 qla24xx_84xx_fw_version_show(struct device *dev, 1495 struct device_attribute *attr, char *buf) 1496 { 1497 int rval = QLA_SUCCESS; 1498 uint16_t status[2] = { 0 }; 1499 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1500 struct qla_hw_data *ha = vha->hw; 1501 1502 if (!IS_QLA84XX(ha)) 1503 return scnprintf(buf, PAGE_SIZE, "\n"); 1504 1505 if (!ha->cs84xx->op_fw_version) { 1506 rval = qla84xx_verify_chip(vha, status); 1507 1508 if (!rval && !status[0]) 1509 return scnprintf(buf, PAGE_SIZE, "%u\n", 1510 (uint32_t)ha->cs84xx->op_fw_version); 1511 } 1512 1513 return scnprintf(buf, PAGE_SIZE, "\n"); 1514 } 1515 1516 static ssize_t 1517 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr, 1518 char *buf) 1519 { 1520 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1521 struct qla_hw_data *ha = vha->hw; 1522 1523 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1524 return scnprintf(buf, PAGE_SIZE, "\n"); 1525 1526 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1527 ha->serdes_version[0], ha->serdes_version[1], 1528 ha->serdes_version[2]); 1529 } 1530 1531 static ssize_t 1532 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1533 char *buf) 1534 { 1535 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1536 struct qla_hw_data *ha = vha->hw; 1537 1538 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && 1539 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1540 return scnprintf(buf, PAGE_SIZE, "\n"); 1541 1542 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1543 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], 1544 ha->mpi_capabilities); 1545 } 1546 1547 static ssize_t 1548 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, 1549 char *buf) 1550 { 1551 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1552 struct qla_hw_data *ha = vha->hw; 1553 1554 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 1555 return scnprintf(buf, PAGE_SIZE, "\n"); 1556 1557 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1558 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); 1559 } 1560 1561 static ssize_t 1562 qla2x00_flash_block_size_show(struct device *dev, 1563 struct device_attribute *attr, char *buf) 1564 { 1565 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1566 struct qla_hw_data *ha = vha->hw; 1567 1568 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1569 } 1570 1571 static ssize_t 1572 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, 1573 char *buf) 1574 { 1575 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1576 1577 if (!IS_CNA_CAPABLE(vha->hw)) 1578 return scnprintf(buf, PAGE_SIZE, "\n"); 1579 1580 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1581 } 1582 1583 static ssize_t 1584 qla2x00_vn_port_mac_address_show(struct device *dev, 1585 struct device_attribute *attr, char *buf) 1586 { 1587 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1588 1589 if (!IS_CNA_CAPABLE(vha->hw)) 1590 return scnprintf(buf, PAGE_SIZE, "\n"); 1591 1592 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); 1593 } 1594 1595 static ssize_t 1596 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, 1597 char *buf) 1598 { 1599 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1600 1601 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); 1602 } 1603 1604 static ssize_t 1605 qla2x00_thermal_temp_show(struct device *dev, 1606 struct device_attribute *attr, char *buf) 1607 { 1608 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1609 uint16_t temp = 0; 1610 int rc; 1611 1612 mutex_lock(&vha->hw->optrom_mutex); 1613 if (qla2x00_chip_is_down(vha)) { 1614 mutex_unlock(&vha->hw->optrom_mutex); 1615 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); 1616 goto done; 1617 } 1618 1619 if (vha->hw->flags.eeh_busy) { 1620 mutex_unlock(&vha->hw->optrom_mutex); 1621 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); 1622 goto done; 1623 } 1624 1625 rc = qla2x00_get_thermal_temp(vha, &temp); 1626 mutex_unlock(&vha->hw->optrom_mutex); 1627 if (rc == QLA_SUCCESS) 1628 return scnprintf(buf, PAGE_SIZE, "%d\n", temp); 1629 1630 done: 1631 return scnprintf(buf, PAGE_SIZE, "\n"); 1632 } 1633 1634 static ssize_t 1635 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1636 char *buf) 1637 { 1638 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1639 int rval = QLA_FUNCTION_FAILED; 1640 uint16_t state[6]; 1641 uint32_t pstate; 1642 1643 if (IS_QLAFX00(vha->hw)) { 1644 pstate = qlafx00_fw_state_show(dev, attr, buf); 1645 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); 1646 } 1647 1648 mutex_lock(&vha->hw->optrom_mutex); 1649 if (qla2x00_chip_is_down(vha)) { 1650 mutex_unlock(&vha->hw->optrom_mutex); 1651 ql_log(ql_log_warn, vha, 0x707c, 1652 "ISP reset active.\n"); 1653 goto out; 1654 } else if (vha->hw->flags.eeh_busy) { 1655 mutex_unlock(&vha->hw->optrom_mutex); 1656 goto out; 1657 } 1658 1659 rval = qla2x00_get_firmware_state(vha, state); 1660 mutex_unlock(&vha->hw->optrom_mutex); 1661 out: 1662 if (rval != QLA_SUCCESS) { 1663 memset(state, -1, sizeof(state)); 1664 rval = qla2x00_get_firmware_state(vha, state); 1665 } 1666 1667 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1668 state[0], state[1], state[2], state[3], state[4], state[5]); 1669 } 1670 1671 static ssize_t 1672 qla2x00_diag_requests_show(struct device *dev, 1673 struct device_attribute *attr, char *buf) 1674 { 1675 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1676 1677 if (!IS_BIDI_CAPABLE(vha->hw)) 1678 return scnprintf(buf, PAGE_SIZE, "\n"); 1679 1680 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); 1681 } 1682 1683 static ssize_t 1684 qla2x00_diag_megabytes_show(struct device *dev, 1685 struct device_attribute *attr, char *buf) 1686 { 1687 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1688 1689 if (!IS_BIDI_CAPABLE(vha->hw)) 1690 return scnprintf(buf, PAGE_SIZE, "\n"); 1691 1692 return scnprintf(buf, PAGE_SIZE, "%llu\n", 1693 vha->bidi_stats.transfer_bytes >> 20); 1694 } 1695 1696 static ssize_t 1697 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, 1698 char *buf) 1699 { 1700 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1701 struct qla_hw_data *ha = vha->hw; 1702 uint32_t size; 1703 1704 if (!ha->fw_dumped) 1705 size = 0; 1706 else if (IS_P3P_TYPE(ha)) 1707 size = ha->md_template_size + ha->md_dump_size; 1708 else 1709 size = ha->fw_dump_len; 1710 1711 return scnprintf(buf, PAGE_SIZE, "%d\n", size); 1712 } 1713 1714 static ssize_t 1715 qla2x00_allow_cna_fw_dump_show(struct device *dev, 1716 struct device_attribute *attr, char *buf) 1717 { 1718 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1719 1720 if (!IS_P3P_TYPE(vha->hw)) 1721 return scnprintf(buf, PAGE_SIZE, "\n"); 1722 else 1723 return scnprintf(buf, PAGE_SIZE, "%s\n", 1724 vha->hw->allow_cna_fw_dump ? "true" : "false"); 1725 } 1726 1727 static ssize_t 1728 qla2x00_allow_cna_fw_dump_store(struct device *dev, 1729 struct device_attribute *attr, const char *buf, size_t count) 1730 { 1731 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1732 int val = 0; 1733 1734 if (!IS_P3P_TYPE(vha->hw)) 1735 return -EINVAL; 1736 1737 if (sscanf(buf, "%d", &val) != 1) 1738 return -EINVAL; 1739 1740 vha->hw->allow_cna_fw_dump = val != 0; 1741 1742 return strlen(buf); 1743 } 1744 1745 static ssize_t 1746 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, 1747 char *buf) 1748 { 1749 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1750 struct qla_hw_data *ha = vha->hw; 1751 1752 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1753 return scnprintf(buf, PAGE_SIZE, "\n"); 1754 1755 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1756 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); 1757 } 1758 1759 static ssize_t 1760 qla2x00_min_supported_speed_show(struct device *dev, 1761 struct device_attribute *attr, char *buf) 1762 { 1763 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1764 struct qla_hw_data *ha = vha->hw; 1765 1766 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1767 return scnprintf(buf, PAGE_SIZE, "\n"); 1768 1769 return scnprintf(buf, PAGE_SIZE, "%s\n", 1770 ha->min_supported_speed == 6 ? "64Gps" : 1771 ha->min_supported_speed == 5 ? "32Gps" : 1772 ha->min_supported_speed == 4 ? "16Gps" : 1773 ha->min_supported_speed == 3 ? "8Gps" : 1774 ha->min_supported_speed == 2 ? "4Gps" : 1775 ha->min_supported_speed != 0 ? "unknown" : ""); 1776 } 1777 1778 static ssize_t 1779 qla2x00_max_supported_speed_show(struct device *dev, 1780 struct device_attribute *attr, char *buf) 1781 { 1782 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1783 struct qla_hw_data *ha = vha->hw; 1784 1785 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1786 return scnprintf(buf, PAGE_SIZE, "\n"); 1787 1788 return scnprintf(buf, PAGE_SIZE, "%s\n", 1789 ha->max_supported_speed == 2 ? "64Gps" : 1790 ha->max_supported_speed == 1 ? "32Gps" : 1791 ha->max_supported_speed == 0 ? "16Gps" : "unknown"); 1792 } 1793 1794 static ssize_t 1795 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, 1796 const char *buf, size_t count) 1797 { 1798 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); 1799 ulong type, speed; 1800 int oldspeed, rval; 1801 int mode = QLA_SET_DATA_RATE_LR; 1802 struct qla_hw_data *ha = vha->hw; 1803 1804 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { 1805 ql_log(ql_log_warn, vha, 0x70d8, 1806 "Speed setting not supported \n"); 1807 return -EINVAL; 1808 } 1809 1810 rval = kstrtol(buf, 10, &type); 1811 if (rval) 1812 return rval; 1813 speed = type; 1814 if (type == 40 || type == 80 || type == 160 || 1815 type == 320) { 1816 ql_dbg(ql_dbg_user, vha, 0x70d9, 1817 "Setting will be affected after a loss of sync\n"); 1818 type = type/10; 1819 mode = QLA_SET_DATA_RATE_NOLR; 1820 } 1821 1822 oldspeed = ha->set_data_rate; 1823 1824 switch (type) { 1825 case 0: 1826 ha->set_data_rate = PORT_SPEED_AUTO; 1827 break; 1828 case 4: 1829 ha->set_data_rate = PORT_SPEED_4GB; 1830 break; 1831 case 8: 1832 ha->set_data_rate = PORT_SPEED_8GB; 1833 break; 1834 case 16: 1835 ha->set_data_rate = PORT_SPEED_16GB; 1836 break; 1837 case 32: 1838 ha->set_data_rate = PORT_SPEED_32GB; 1839 break; 1840 default: 1841 ql_log(ql_log_warn, vha, 0x1199, 1842 "Unrecognized speed setting:%lx. Setting Autoneg\n", 1843 speed); 1844 ha->set_data_rate = PORT_SPEED_AUTO; 1845 } 1846 1847 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate)) 1848 return -EINVAL; 1849 1850 ql_log(ql_log_info, vha, 0x70da, 1851 "Setting speed to %lx Gbps \n", type); 1852 1853 rval = qla2x00_set_data_rate(vha, mode); 1854 if (rval != QLA_SUCCESS) 1855 return -EIO; 1856 1857 return strlen(buf); 1858 } 1859 1860 static ssize_t 1861 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, 1862 char *buf) 1863 { 1864 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); 1865 struct qla_hw_data *ha = vha->hw; 1866 ssize_t rval; 1867 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"}; 1868 1869 rval = qla2x00_get_data_rate(vha); 1870 if (rval != QLA_SUCCESS) { 1871 ql_log(ql_log_warn, vha, 0x70db, 1872 "Unable to get port speed rval:%zd\n", rval); 1873 return -EINVAL; 1874 } 1875 1876 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); 1877 } 1878 1879 /* ----- */ 1880 1881 static ssize_t 1882 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1883 { 1884 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1885 int len = 0; 1886 1887 len += scnprintf(buf + len, PAGE_SIZE-len, 1888 "Supported options: enabled | disabled | dual | exclusive\n"); 1889 1890 /* --- */ 1891 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: "); 1892 1893 switch (vha->qlini_mode) { 1894 case QLA2XXX_INI_MODE_EXCLUSIVE: 1895 len += scnprintf(buf + len, PAGE_SIZE-len, 1896 QLA2XXX_INI_MODE_STR_EXCLUSIVE); 1897 break; 1898 case QLA2XXX_INI_MODE_DISABLED: 1899 len += scnprintf(buf + len, PAGE_SIZE-len, 1900 QLA2XXX_INI_MODE_STR_DISABLED); 1901 break; 1902 case QLA2XXX_INI_MODE_ENABLED: 1903 len += scnprintf(buf + len, PAGE_SIZE-len, 1904 QLA2XXX_INI_MODE_STR_ENABLED); 1905 break; 1906 case QLA2XXX_INI_MODE_DUAL: 1907 len += scnprintf(buf + len, PAGE_SIZE-len, 1908 QLA2XXX_INI_MODE_STR_DUAL); 1909 break; 1910 } 1911 len += scnprintf(buf + len, PAGE_SIZE-len, "\n"); 1912 1913 return len; 1914 } 1915 1916 static char *mode_to_str[] = { 1917 "exclusive", 1918 "disabled", 1919 "enabled", 1920 "dual", 1921 }; 1922 1923 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) 1924 static void qla_set_ini_mode(scsi_qla_host_t *vha, int op) 1925 { 1926 enum { 1927 NO_ACTION, 1928 MODE_CHANGE_ACCEPT, 1929 MODE_CHANGE_NO_ACTION, 1930 TARGET_STILL_ACTIVE, 1931 }; 1932 int action = NO_ACTION; 1933 int set_mode = 0; 1934 u8 eo_toggle = 0; /* exchange offload flipped */ 1935 1936 switch (vha->qlini_mode) { 1937 case QLA2XXX_INI_MODE_DISABLED: 1938 switch (op) { 1939 case QLA2XXX_INI_MODE_DISABLED: 1940 if (qla_tgt_mode_enabled(vha)) { 1941 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 1942 vha->hw->flags.exchoffld_enabled) 1943 eo_toggle = 1; 1944 if (((vha->ql2xexchoffld != 1945 vha->u_ql2xexchoffld) && 1946 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 1947 eo_toggle) { 1948 /* 1949 * The number of exchange to be offload 1950 * was tweaked or offload option was 1951 * flipped 1952 */ 1953 action = MODE_CHANGE_ACCEPT; 1954 } else { 1955 action = MODE_CHANGE_NO_ACTION; 1956 } 1957 } else { 1958 action = MODE_CHANGE_NO_ACTION; 1959 } 1960 break; 1961 case QLA2XXX_INI_MODE_EXCLUSIVE: 1962 if (qla_tgt_mode_enabled(vha)) { 1963 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 1964 vha->hw->flags.exchoffld_enabled) 1965 eo_toggle = 1; 1966 if (((vha->ql2xexchoffld != 1967 vha->u_ql2xexchoffld) && 1968 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 1969 eo_toggle) { 1970 /* 1971 * The number of exchange to be offload 1972 * was tweaked or offload option was 1973 * flipped 1974 */ 1975 action = MODE_CHANGE_ACCEPT; 1976 } else { 1977 action = MODE_CHANGE_NO_ACTION; 1978 } 1979 } else { 1980 action = MODE_CHANGE_ACCEPT; 1981 } 1982 break; 1983 case QLA2XXX_INI_MODE_DUAL: 1984 action = MODE_CHANGE_ACCEPT; 1985 /* active_mode is target only, reset it to dual */ 1986 if (qla_tgt_mode_enabled(vha)) { 1987 set_mode = 1; 1988 action = MODE_CHANGE_ACCEPT; 1989 } else { 1990 action = MODE_CHANGE_NO_ACTION; 1991 } 1992 break; 1993 1994 case QLA2XXX_INI_MODE_ENABLED: 1995 if (qla_tgt_mode_enabled(vha)) 1996 action = TARGET_STILL_ACTIVE; 1997 else { 1998 action = MODE_CHANGE_ACCEPT; 1999 set_mode = 1; 2000 } 2001 break; 2002 } 2003 break; 2004 2005 case QLA2XXX_INI_MODE_EXCLUSIVE: 2006 switch (op) { 2007 case QLA2XXX_INI_MODE_EXCLUSIVE: 2008 if (qla_tgt_mode_enabled(vha)) { 2009 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 2010 vha->hw->flags.exchoffld_enabled) 2011 eo_toggle = 1; 2012 if (((vha->ql2xexchoffld != 2013 vha->u_ql2xexchoffld) && 2014 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 2015 eo_toggle) 2016 /* 2017 * The number of exchange to be offload 2018 * was tweaked or offload option was 2019 * flipped 2020 */ 2021 action = MODE_CHANGE_ACCEPT; 2022 else 2023 action = NO_ACTION; 2024 } else 2025 action = NO_ACTION; 2026 2027 break; 2028 2029 case QLA2XXX_INI_MODE_DISABLED: 2030 if (qla_tgt_mode_enabled(vha)) { 2031 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 2032 vha->hw->flags.exchoffld_enabled) 2033 eo_toggle = 1; 2034 if (((vha->ql2xexchoffld != 2035 vha->u_ql2xexchoffld) && 2036 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 2037 eo_toggle) 2038 action = MODE_CHANGE_ACCEPT; 2039 else 2040 action = MODE_CHANGE_NO_ACTION; 2041 } else 2042 action = MODE_CHANGE_NO_ACTION; 2043 break; 2044 2045 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */ 2046 if (qla_tgt_mode_enabled(vha)) { 2047 action = MODE_CHANGE_ACCEPT; 2048 set_mode = 1; 2049 } else 2050 action = MODE_CHANGE_ACCEPT; 2051 break; 2052 2053 case QLA2XXX_INI_MODE_ENABLED: 2054 if (qla_tgt_mode_enabled(vha)) 2055 action = TARGET_STILL_ACTIVE; 2056 else { 2057 if (vha->hw->flags.fw_started) 2058 action = MODE_CHANGE_NO_ACTION; 2059 else 2060 action = MODE_CHANGE_ACCEPT; 2061 } 2062 break; 2063 } 2064 break; 2065 2066 case QLA2XXX_INI_MODE_ENABLED: 2067 switch (op) { 2068 case QLA2XXX_INI_MODE_ENABLED: 2069 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) != 2070 vha->hw->flags.exchoffld_enabled) 2071 eo_toggle = 1; 2072 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) && 2073 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) || 2074 eo_toggle) 2075 action = MODE_CHANGE_ACCEPT; 2076 else 2077 action = NO_ACTION; 2078 break; 2079 case QLA2XXX_INI_MODE_DUAL: 2080 case QLA2XXX_INI_MODE_DISABLED: 2081 action = MODE_CHANGE_ACCEPT; 2082 break; 2083 default: 2084 action = MODE_CHANGE_NO_ACTION; 2085 break; 2086 } 2087 break; 2088 2089 case QLA2XXX_INI_MODE_DUAL: 2090 switch (op) { 2091 case QLA2XXX_INI_MODE_DUAL: 2092 if (qla_tgt_mode_enabled(vha) || 2093 qla_dual_mode_enabled(vha)) { 2094 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + 2095 vha->u_ql2xiniexchg) != 2096 vha->hw->flags.exchoffld_enabled) 2097 eo_toggle = 1; 2098 2099 if ((((vha->ql2xexchoffld + 2100 vha->ql2xiniexchg) != 2101 (vha->u_ql2xiniexchg + 2102 vha->u_ql2xexchoffld)) && 2103 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + 2104 vha->u_ql2xexchoffld)) || eo_toggle) 2105 action = MODE_CHANGE_ACCEPT; 2106 else 2107 action = NO_ACTION; 2108 } else { 2109 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + 2110 vha->u_ql2xiniexchg) != 2111 vha->hw->flags.exchoffld_enabled) 2112 eo_toggle = 1; 2113 2114 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg) 2115 != (vha->u_ql2xiniexchg + 2116 vha->u_ql2xexchoffld)) && 2117 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + 2118 vha->u_ql2xexchoffld)) || eo_toggle) 2119 action = MODE_CHANGE_NO_ACTION; 2120 else 2121 action = NO_ACTION; 2122 } 2123 break; 2124 2125 case QLA2XXX_INI_MODE_DISABLED: 2126 if (qla_tgt_mode_enabled(vha) || 2127 qla_dual_mode_enabled(vha)) { 2128 /* turning off initiator mode */ 2129 set_mode = 1; 2130 action = MODE_CHANGE_ACCEPT; 2131 } else { 2132 action = MODE_CHANGE_NO_ACTION; 2133 } 2134 break; 2135 2136 case QLA2XXX_INI_MODE_EXCLUSIVE: 2137 if (qla_tgt_mode_enabled(vha) || 2138 qla_dual_mode_enabled(vha)) { 2139 set_mode = 1; 2140 action = MODE_CHANGE_ACCEPT; 2141 } else { 2142 action = MODE_CHANGE_ACCEPT; 2143 } 2144 break; 2145 2146 case QLA2XXX_INI_MODE_ENABLED: 2147 if (qla_tgt_mode_enabled(vha) || 2148 qla_dual_mode_enabled(vha)) { 2149 action = TARGET_STILL_ACTIVE; 2150 } else { 2151 action = MODE_CHANGE_ACCEPT; 2152 } 2153 } 2154 break; 2155 } 2156 2157 switch (action) { 2158 case MODE_CHANGE_ACCEPT: 2159 ql_log(ql_log_warn, vha, 0xffff, 2160 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", 2161 mode_to_str[vha->qlini_mode], mode_to_str[op], 2162 vha->ql2xexchoffld, vha->u_ql2xexchoffld, 2163 vha->ql2xiniexchg, vha->u_ql2xiniexchg); 2164 2165 vha->qlini_mode = op; 2166 vha->ql2xexchoffld = vha->u_ql2xexchoffld; 2167 vha->ql2xiniexchg = vha->u_ql2xiniexchg; 2168 if (set_mode) 2169 qlt_set_mode(vha); 2170 vha->flags.online = 1; 2171 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2172 break; 2173 2174 case MODE_CHANGE_NO_ACTION: 2175 ql_log(ql_log_warn, vha, 0xffff, 2176 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", 2177 mode_to_str[vha->qlini_mode], mode_to_str[op], 2178 vha->ql2xexchoffld, vha->u_ql2xexchoffld, 2179 vha->ql2xiniexchg, vha->u_ql2xiniexchg); 2180 vha->qlini_mode = op; 2181 vha->ql2xexchoffld = vha->u_ql2xexchoffld; 2182 vha->ql2xiniexchg = vha->u_ql2xiniexchg; 2183 break; 2184 2185 case TARGET_STILL_ACTIVE: 2186 ql_log(ql_log_warn, vha, 0xffff, 2187 "Target Mode is active. Unable to change Mode.\n"); 2188 break; 2189 2190 case NO_ACTION: 2191 default: 2192 ql_log(ql_log_warn, vha, 0xffff, 2193 "Mode unchange. No action taken. %d|%d pct %d|%d.\n", 2194 vha->qlini_mode, op, 2195 vha->ql2xexchoffld, vha->u_ql2xexchoffld); 2196 break; 2197 } 2198 } 2199 2200 static ssize_t 2201 qlini_mode_store(struct device *dev, struct device_attribute *attr, 2202 const char *buf, size_t count) 2203 { 2204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2205 int ini; 2206 2207 if (!buf) 2208 return -EINVAL; 2209 2210 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf, 2211 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0) 2212 ini = QLA2XXX_INI_MODE_EXCLUSIVE; 2213 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf, 2214 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0) 2215 ini = QLA2XXX_INI_MODE_DISABLED; 2216 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf, 2217 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0) 2218 ini = QLA2XXX_INI_MODE_ENABLED; 2219 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf, 2220 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0) 2221 ini = QLA2XXX_INI_MODE_DUAL; 2222 else 2223 return -EINVAL; 2224 2225 qla_set_ini_mode(vha, ini); 2226 return strlen(buf); 2227 } 2228 2229 static ssize_t 2230 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr, 2231 char *buf) 2232 { 2233 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2234 int len = 0; 2235 2236 len += scnprintf(buf + len, PAGE_SIZE-len, 2237 "target exchange: new %d : current: %d\n\n", 2238 vha->u_ql2xexchoffld, vha->ql2xexchoffld); 2239 2240 len += scnprintf(buf + len, PAGE_SIZE-len, 2241 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", 2242 vha->host_no); 2243 2244 return len; 2245 } 2246 2247 static ssize_t 2248 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr, 2249 const char *buf, size_t count) 2250 { 2251 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2252 int val = 0; 2253 2254 if (sscanf(buf, "%d", &val) != 1) 2255 return -EINVAL; 2256 2257 if (val > FW_MAX_EXCHANGES_CNT) 2258 val = FW_MAX_EXCHANGES_CNT; 2259 else if (val < 0) 2260 val = 0; 2261 2262 vha->u_ql2xexchoffld = val; 2263 return strlen(buf); 2264 } 2265 2266 static ssize_t 2267 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr, 2268 char *buf) 2269 { 2270 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2271 int len = 0; 2272 2273 len += scnprintf(buf + len, PAGE_SIZE-len, 2274 "target exchange: new %d : current: %d\n\n", 2275 vha->u_ql2xiniexchg, vha->ql2xiniexchg); 2276 2277 len += scnprintf(buf + len, PAGE_SIZE-len, 2278 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", 2279 vha->host_no); 2280 2281 return len; 2282 } 2283 2284 static ssize_t 2285 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr, 2286 const char *buf, size_t count) 2287 { 2288 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2289 int val = 0; 2290 2291 if (sscanf(buf, "%d", &val) != 1) 2292 return -EINVAL; 2293 2294 if (val > FW_MAX_EXCHANGES_CNT) 2295 val = FW_MAX_EXCHANGES_CNT; 2296 else if (val < 0) 2297 val = 0; 2298 2299 vha->u_ql2xiniexchg = val; 2300 return strlen(buf); 2301 } 2302 2303 static ssize_t 2304 qla2x00_dif_bundle_statistics_show(struct device *dev, 2305 struct device_attribute *attr, char *buf) 2306 { 2307 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2308 struct qla_hw_data *ha = vha->hw; 2309 2310 return scnprintf(buf, PAGE_SIZE, 2311 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n", 2312 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads, 2313 ha->dif_bundle_writes, ha->dif_bundle_kallocs, 2314 ha->dif_bundle_dma_allocs, ha->pool.unusable.count); 2315 } 2316 2317 static ssize_t 2318 qla2x00_fw_attr_show(struct device *dev, 2319 struct device_attribute *attr, char *buf) 2320 { 2321 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2322 struct qla_hw_data *ha = vha->hw; 2323 2324 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2325 return scnprintf(buf, PAGE_SIZE, "\n"); 2326 2327 return scnprintf(buf, PAGE_SIZE, "%llx\n", 2328 (uint64_t)ha->fw_attributes_ext[1] << 48 | 2329 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2330 (uint64_t)ha->fw_attributes_h << 16 | 2331 (uint64_t)ha->fw_attributes); 2332 } 2333 2334 static ssize_t 2335 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr, 2336 char *buf) 2337 { 2338 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2339 2340 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no); 2341 } 2342 2343 static ssize_t 2344 qla2x00_dport_diagnostics_show(struct device *dev, 2345 struct device_attribute *attr, char *buf) 2346 { 2347 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2348 2349 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2350 !IS_QLA28XX(vha->hw)) 2351 return scnprintf(buf, PAGE_SIZE, "\n"); 2352 2353 if (!*vha->dport_data) 2354 return scnprintf(buf, PAGE_SIZE, "\n"); 2355 2356 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 2357 vha->dport_data[0], vha->dport_data[1], 2358 vha->dport_data[2], vha->dport_data[3]); 2359 } 2360 static DEVICE_ATTR(dport_diagnostics, 0444, 2361 qla2x00_dport_diagnostics_show, NULL); 2362 2363 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); 2364 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 2365 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 2366 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); 2367 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); 2368 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 2369 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 2370 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 2371 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); 2372 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 2373 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 2374 qla2x00_zio_timer_store); 2375 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 2376 qla2x00_beacon_store); 2377 static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show, 2378 qla2x00_beacon_config_store); 2379 static DEVICE_ATTR(optrom_bios_version, S_IRUGO, 2380 qla2x00_optrom_bios_version_show, NULL); 2381 static DEVICE_ATTR(optrom_efi_version, S_IRUGO, 2382 qla2x00_optrom_efi_version_show, NULL); 2383 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, 2384 qla2x00_optrom_fcode_version_show, NULL); 2385 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 2386 NULL); 2387 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO, 2388 qla2x00_optrom_gold_fw_version_show, NULL); 2389 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, 2390 NULL); 2391 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 2392 NULL); 2393 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL); 2394 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 2395 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 2396 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 2397 NULL); 2398 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); 2399 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, 2400 qla2x00_vn_port_mac_address_show, NULL); 2401 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 2402 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 2403 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); 2404 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); 2405 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); 2406 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); 2407 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, 2408 qla2x00_allow_cna_fw_dump_show, 2409 qla2x00_allow_cna_fw_dump_store); 2410 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); 2411 static DEVICE_ATTR(min_supported_speed, 0444, 2412 qla2x00_min_supported_speed_show, NULL); 2413 static DEVICE_ATTR(max_supported_speed, 0444, 2414 qla2x00_max_supported_speed_show, NULL); 2415 static DEVICE_ATTR(zio_threshold, 0644, 2416 qla_zio_threshold_show, 2417 qla_zio_threshold_store); 2418 static DEVICE_ATTR_RW(qlini_mode); 2419 static DEVICE_ATTR_RW(ql2xexchoffld); 2420 static DEVICE_ATTR_RW(ql2xiniexchg); 2421 static DEVICE_ATTR(dif_bundle_statistics, 0444, 2422 qla2x00_dif_bundle_statistics_show, NULL); 2423 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, 2424 qla2x00_port_speed_store); 2425 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); 2426 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); 2427 2428 2429 struct device_attribute *qla2x00_host_attrs[] = { 2430 &dev_attr_driver_version, 2431 &dev_attr_fw_version, 2432 &dev_attr_serial_num, 2433 &dev_attr_isp_name, 2434 &dev_attr_isp_id, 2435 &dev_attr_model_name, 2436 &dev_attr_model_desc, 2437 &dev_attr_pci_info, 2438 &dev_attr_link_state, 2439 &dev_attr_zio, 2440 &dev_attr_zio_timer, 2441 &dev_attr_beacon, 2442 &dev_attr_beacon_config, 2443 &dev_attr_optrom_bios_version, 2444 &dev_attr_optrom_efi_version, 2445 &dev_attr_optrom_fcode_version, 2446 &dev_attr_optrom_fw_version, 2447 &dev_attr_84xx_fw_version, 2448 &dev_attr_total_isp_aborts, 2449 &dev_attr_serdes_version, 2450 &dev_attr_mpi_version, 2451 &dev_attr_phy_version, 2452 &dev_attr_flash_block_size, 2453 &dev_attr_vlan_id, 2454 &dev_attr_vn_port_mac_address, 2455 &dev_attr_fabric_param, 2456 &dev_attr_fw_state, 2457 &dev_attr_optrom_gold_fw_version, 2458 &dev_attr_thermal_temp, 2459 &dev_attr_diag_requests, 2460 &dev_attr_diag_megabytes, 2461 &dev_attr_fw_dump_size, 2462 &dev_attr_allow_cna_fw_dump, 2463 &dev_attr_pep_version, 2464 &dev_attr_min_supported_speed, 2465 &dev_attr_max_supported_speed, 2466 &dev_attr_zio_threshold, 2467 &dev_attr_dif_bundle_statistics, 2468 &dev_attr_port_speed, 2469 &dev_attr_port_no, 2470 &dev_attr_fw_attr, 2471 &dev_attr_dport_diagnostics, 2472 NULL, /* reserve for qlini_mode */ 2473 NULL, /* reserve for ql2xiniexchg */ 2474 NULL, /* reserve for ql2xexchoffld */ 2475 NULL, 2476 }; 2477 2478 void qla_insert_tgt_attrs(void) 2479 { 2480 struct device_attribute **attr; 2481 2482 /* advance to empty slot */ 2483 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr) 2484 continue; 2485 2486 *attr = &dev_attr_qlini_mode; 2487 attr++; 2488 *attr = &dev_attr_ql2xiniexchg; 2489 attr++; 2490 *attr = &dev_attr_ql2xexchoffld; 2491 } 2492 2493 /* Host attributes. */ 2494 2495 static void 2496 qla2x00_get_host_port_id(struct Scsi_Host *shost) 2497 { 2498 scsi_qla_host_t *vha = shost_priv(shost); 2499 2500 fc_host_port_id(shost) = vha->d_id.b.domain << 16 | 2501 vha->d_id.b.area << 8 | vha->d_id.b.al_pa; 2502 } 2503 2504 static void 2505 qla2x00_get_host_speed(struct Scsi_Host *shost) 2506 { 2507 scsi_qla_host_t *vha = shost_priv(shost); 2508 u32 speed; 2509 2510 if (IS_QLAFX00(vha->hw)) { 2511 qlafx00_get_host_speed(shost); 2512 return; 2513 } 2514 2515 switch (vha->hw->link_data_rate) { 2516 case PORT_SPEED_1GB: 2517 speed = FC_PORTSPEED_1GBIT; 2518 break; 2519 case PORT_SPEED_2GB: 2520 speed = FC_PORTSPEED_2GBIT; 2521 break; 2522 case PORT_SPEED_4GB: 2523 speed = FC_PORTSPEED_4GBIT; 2524 break; 2525 case PORT_SPEED_8GB: 2526 speed = FC_PORTSPEED_8GBIT; 2527 break; 2528 case PORT_SPEED_10GB: 2529 speed = FC_PORTSPEED_10GBIT; 2530 break; 2531 case PORT_SPEED_16GB: 2532 speed = FC_PORTSPEED_16GBIT; 2533 break; 2534 case PORT_SPEED_32GB: 2535 speed = FC_PORTSPEED_32GBIT; 2536 break; 2537 case PORT_SPEED_64GB: 2538 speed = FC_PORTSPEED_64GBIT; 2539 break; 2540 default: 2541 speed = FC_PORTSPEED_UNKNOWN; 2542 break; 2543 } 2544 2545 fc_host_speed(shost) = speed; 2546 } 2547 2548 static void 2549 qla2x00_get_host_port_type(struct Scsi_Host *shost) 2550 { 2551 scsi_qla_host_t *vha = shost_priv(shost); 2552 uint32_t port_type; 2553 2554 if (vha->vp_idx) { 2555 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 2556 return; 2557 } 2558 switch (vha->hw->current_topology) { 2559 case ISP_CFG_NL: 2560 port_type = FC_PORTTYPE_LPORT; 2561 break; 2562 case ISP_CFG_FL: 2563 port_type = FC_PORTTYPE_NLPORT; 2564 break; 2565 case ISP_CFG_N: 2566 port_type = FC_PORTTYPE_PTP; 2567 break; 2568 case ISP_CFG_F: 2569 port_type = FC_PORTTYPE_NPORT; 2570 break; 2571 default: 2572 port_type = FC_PORTTYPE_UNKNOWN; 2573 break; 2574 } 2575 2576 fc_host_port_type(shost) = port_type; 2577 } 2578 2579 static void 2580 qla2x00_get_starget_node_name(struct scsi_target *starget) 2581 { 2582 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 2583 scsi_qla_host_t *vha = shost_priv(host); 2584 fc_port_t *fcport; 2585 u64 node_name = 0; 2586 2587 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2588 if (fcport->rport && 2589 starget->id == fcport->rport->scsi_target_id) { 2590 node_name = wwn_to_u64(fcport->node_name); 2591 break; 2592 } 2593 } 2594 2595 fc_starget_node_name(starget) = node_name; 2596 } 2597 2598 static void 2599 qla2x00_get_starget_port_name(struct scsi_target *starget) 2600 { 2601 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 2602 scsi_qla_host_t *vha = shost_priv(host); 2603 fc_port_t *fcport; 2604 u64 port_name = 0; 2605 2606 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2607 if (fcport->rport && 2608 starget->id == fcport->rport->scsi_target_id) { 2609 port_name = wwn_to_u64(fcport->port_name); 2610 break; 2611 } 2612 } 2613 2614 fc_starget_port_name(starget) = port_name; 2615 } 2616 2617 static void 2618 qla2x00_get_starget_port_id(struct scsi_target *starget) 2619 { 2620 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 2621 scsi_qla_host_t *vha = shost_priv(host); 2622 fc_port_t *fcport; 2623 uint32_t port_id = ~0U; 2624 2625 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2626 if (fcport->rport && 2627 starget->id == fcport->rport->scsi_target_id) { 2628 port_id = fcport->d_id.b.domain << 16 | 2629 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2630 break; 2631 } 2632 } 2633 2634 fc_starget_port_id(starget) = port_id; 2635 } 2636 2637 static inline void 2638 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 2639 { 2640 rport->dev_loss_tmo = timeout ? timeout : 1; 2641 } 2642 2643 static void 2644 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 2645 { 2646 struct Scsi_Host *host = rport_to_shost(rport); 2647 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 2648 unsigned long flags; 2649 2650 if (!fcport) 2651 return; 2652 2653 /* Now that the rport has been deleted, set the fcport state to 2654 FCS_DEVICE_DEAD */ 2655 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); 2656 2657 /* 2658 * Transport has effectively 'deleted' the rport, clear 2659 * all local references. 2660 */ 2661 spin_lock_irqsave(host->host_lock, flags); 2662 fcport->rport = fcport->drport = NULL; 2663 *((fc_port_t **)rport->dd_data) = NULL; 2664 spin_unlock_irqrestore(host->host_lock, flags); 2665 2666 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 2667 return; 2668 2669 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 2670 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 2671 return; 2672 } 2673 } 2674 2675 static void 2676 qla2x00_terminate_rport_io(struct fc_rport *rport) 2677 { 2678 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 2679 2680 if (!fcport) 2681 return; 2682 2683 if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) 2684 return; 2685 2686 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 2687 return; 2688 2689 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 2690 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 2691 return; 2692 } 2693 /* 2694 * At this point all fcport's software-states are cleared. Perform any 2695 * final cleanup of firmware resources (PCBs and XCBs). 2696 */ 2697 if (fcport->loop_id != FC_NO_LOOP_ID) { 2698 if (IS_FWI2_CAPABLE(fcport->vha->hw)) 2699 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 2700 fcport->loop_id, fcport->d_id.b.domain, 2701 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2702 else 2703 qla2x00_port_logout(fcport->vha, fcport); 2704 } 2705 } 2706 2707 static int 2708 qla2x00_issue_lip(struct Scsi_Host *shost) 2709 { 2710 scsi_qla_host_t *vha = shost_priv(shost); 2711 2712 if (IS_QLAFX00(vha->hw)) 2713 return 0; 2714 2715 qla2x00_loop_reset(vha); 2716 return 0; 2717 } 2718 2719 static struct fc_host_statistics * 2720 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 2721 { 2722 scsi_qla_host_t *vha = shost_priv(shost); 2723 struct qla_hw_data *ha = vha->hw; 2724 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2725 int rval; 2726 struct link_statistics *stats; 2727 dma_addr_t stats_dma; 2728 struct fc_host_statistics *p = &vha->fc_host_stat; 2729 2730 memset(p, -1, sizeof(*p)); 2731 2732 if (IS_QLAFX00(vha->hw)) 2733 goto done; 2734 2735 if (test_bit(UNLOADING, &vha->dpc_flags)) 2736 goto done; 2737 2738 if (unlikely(pci_channel_offline(ha->pdev))) 2739 goto done; 2740 2741 if (qla2x00_chip_is_down(vha)) 2742 goto done; 2743 2744 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2745 GFP_KERNEL); 2746 if (!stats) { 2747 ql_log(ql_log_warn, vha, 0x707d, 2748 "Failed to allocate memory for stats.\n"); 2749 goto done; 2750 } 2751 2752 rval = QLA_FUNCTION_FAILED; 2753 if (IS_FWI2_CAPABLE(ha)) { 2754 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); 2755 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && 2756 !ha->dpc_active) { 2757 /* Must be in a 'READY' state for statistics retrieval. */ 2758 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, 2759 stats, stats_dma); 2760 } 2761 2762 if (rval != QLA_SUCCESS) 2763 goto done_free; 2764 2765 p->link_failure_count = le32_to_cpu(stats->link_fail_cnt); 2766 p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt); 2767 p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt); 2768 p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt); 2769 p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt); 2770 p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt); 2771 if (IS_FWI2_CAPABLE(ha)) { 2772 p->lip_count = le32_to_cpu(stats->lip_cnt); 2773 p->tx_frames = le32_to_cpu(stats->tx_frames); 2774 p->rx_frames = le32_to_cpu(stats->rx_frames); 2775 p->dumped_frames = le32_to_cpu(stats->discarded_frames); 2776 p->nos_count = le32_to_cpu(stats->nos_rcvd); 2777 p->error_frames = 2778 le32_to_cpu(stats->dropped_frames) + 2779 le32_to_cpu(stats->discarded_frames); 2780 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2781 p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt); 2782 p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt); 2783 } else { 2784 p->rx_words = vha->qla_stats.input_bytes; 2785 p->tx_words = vha->qla_stats.output_bytes; 2786 } 2787 } 2788 p->fcp_control_requests = vha->qla_stats.control_requests; 2789 p->fcp_input_requests = vha->qla_stats.input_requests; 2790 p->fcp_output_requests = vha->qla_stats.output_requests; 2791 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; 2792 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; 2793 p->seconds_since_last_reset = 2794 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; 2795 do_div(p->seconds_since_last_reset, HZ); 2796 2797 done_free: 2798 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), 2799 stats, stats_dma); 2800 done: 2801 return p; 2802 } 2803 2804 static void 2805 qla2x00_reset_host_stats(struct Scsi_Host *shost) 2806 { 2807 scsi_qla_host_t *vha = shost_priv(shost); 2808 struct qla_hw_data *ha = vha->hw; 2809 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2810 struct link_statistics *stats; 2811 dma_addr_t stats_dma; 2812 2813 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 2814 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 2815 2816 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 2817 2818 if (IS_FWI2_CAPABLE(ha)) { 2819 stats = dma_alloc_coherent(&ha->pdev->dev, 2820 sizeof(*stats), &stats_dma, GFP_KERNEL); 2821 if (!stats) { 2822 ql_log(ql_log_warn, vha, 0x70d7, 2823 "Failed to allocate memory for stats.\n"); 2824 return; 2825 } 2826 2827 /* reset firmware statistics */ 2828 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); 2829 2830 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2831 stats, stats_dma); 2832 } 2833 } 2834 2835 static void 2836 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 2837 { 2838 scsi_qla_host_t *vha = shost_priv(shost); 2839 2840 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost), 2841 sizeof(fc_host_symbolic_name(shost))); 2842 } 2843 2844 static void 2845 qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 2846 { 2847 scsi_qla_host_t *vha = shost_priv(shost); 2848 2849 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 2850 } 2851 2852 static void 2853 qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 2854 { 2855 scsi_qla_host_t *vha = shost_priv(shost); 2856 static const uint8_t node_name[WWN_SIZE] = { 2857 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF 2858 }; 2859 u64 fabric_name = wwn_to_u64(node_name); 2860 2861 if (vha->device_flags & SWITCH_FOUND) 2862 fabric_name = wwn_to_u64(vha->fabric_node_name); 2863 2864 fc_host_fabric_name(shost) = fabric_name; 2865 } 2866 2867 static void 2868 qla2x00_get_host_port_state(struct Scsi_Host *shost) 2869 { 2870 scsi_qla_host_t *vha = shost_priv(shost); 2871 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 2872 2873 if (!base_vha->flags.online) { 2874 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 2875 return; 2876 } 2877 2878 switch (atomic_read(&base_vha->loop_state)) { 2879 case LOOP_UPDATE: 2880 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 2881 break; 2882 case LOOP_DOWN: 2883 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) 2884 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 2885 else 2886 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 2887 break; 2888 case LOOP_DEAD: 2889 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 2890 break; 2891 case LOOP_READY: 2892 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 2893 break; 2894 default: 2895 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 2896 break; 2897 } 2898 } 2899 2900 static int 2901 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 2902 { 2903 int ret = 0; 2904 uint8_t qos = 0; 2905 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 2906 scsi_qla_host_t *vha = NULL; 2907 struct qla_hw_data *ha = base_vha->hw; 2908 int cnt; 2909 struct req_que *req = ha->req_q_map[0]; 2910 struct qla_qpair *qpair; 2911 2912 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 2913 if (ret) { 2914 ql_log(ql_log_warn, vha, 0x707e, 2915 "Vport sanity check failed, status %x\n", ret); 2916 return (ret); 2917 } 2918 2919 vha = qla24xx_create_vhost(fc_vport); 2920 if (vha == NULL) { 2921 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); 2922 return FC_VPORT_FAILED; 2923 } 2924 if (disable) { 2925 atomic_set(&vha->vp_state, VP_OFFLINE); 2926 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 2927 } else 2928 atomic_set(&vha->vp_state, VP_FAILED); 2929 2930 /* ready to create vport */ 2931 ql_log(ql_log_info, vha, 0x7080, 2932 "VP entry id %d assigned.\n", vha->vp_idx); 2933 2934 /* initialized vport states */ 2935 atomic_set(&vha->loop_state, LOOP_DOWN); 2936 vha->vp_err_state = VP_ERR_PORTDWN; 2937 vha->vp_prev_err_state = VP_ERR_UNKWN; 2938 /* Check if physical ha port is Up */ 2939 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 2940 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 2941 /* Don't retry or attempt login of this virtual port */ 2942 ql_dbg(ql_dbg_user, vha, 0x7081, 2943 "Vport loop state is not UP.\n"); 2944 atomic_set(&vha->loop_state, LOOP_DEAD); 2945 if (!disable) 2946 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 2947 } 2948 2949 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2950 if (ha->fw_attributes & BIT_4) { 2951 int prot = 0, guard; 2952 2953 vha->flags.difdix_supported = 1; 2954 ql_dbg(ql_dbg_user, vha, 0x7082, 2955 "Registered for DIF/DIX type 1 and 3 protection.\n"); 2956 if (ql2xenabledif == 1) 2957 prot = SHOST_DIX_TYPE0_PROTECTION; 2958 scsi_host_set_prot(vha->host, 2959 prot | SHOST_DIF_TYPE1_PROTECTION 2960 | SHOST_DIF_TYPE2_PROTECTION 2961 | SHOST_DIF_TYPE3_PROTECTION 2962 | SHOST_DIX_TYPE1_PROTECTION 2963 | SHOST_DIX_TYPE2_PROTECTION 2964 | SHOST_DIX_TYPE3_PROTECTION); 2965 2966 guard = SHOST_DIX_GUARD_CRC; 2967 2968 if (IS_PI_IPGUARD_CAPABLE(ha) && 2969 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 2970 guard |= SHOST_DIX_GUARD_IP; 2971 2972 scsi_host_set_guard(vha->host, guard); 2973 } else 2974 vha->flags.difdix_supported = 0; 2975 } 2976 2977 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 2978 &ha->pdev->dev)) { 2979 ql_dbg(ql_dbg_user, vha, 0x7083, 2980 "scsi_add_host failure for VP[%d].\n", vha->vp_idx); 2981 goto vport_create_failed_2; 2982 } 2983 2984 /* initialize attributes */ 2985 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 2986 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 2987 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 2988 fc_host_supported_classes(vha->host) = 2989 fc_host_supported_classes(base_vha->host); 2990 fc_host_supported_speeds(vha->host) = 2991 fc_host_supported_speeds(base_vha->host); 2992 2993 qlt_vport_create(vha, ha); 2994 qla24xx_vport_disable(fc_vport, disable); 2995 2996 if (!ql2xmqsupport || !ha->npiv_info) 2997 goto vport_queue; 2998 2999 /* Create a request queue in QoS mode for the vport */ 3000 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 3001 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 3002 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 3003 8) == 0) { 3004 qos = ha->npiv_info[cnt].q_qos; 3005 break; 3006 } 3007 } 3008 3009 if (qos) { 3010 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true); 3011 if (!qpair) 3012 ql_log(ql_log_warn, vha, 0x7084, 3013 "Can't create qpair for VP[%d]\n", 3014 vha->vp_idx); 3015 else { 3016 ql_dbg(ql_dbg_multiq, vha, 0xc001, 3017 "Queue pair: %d Qos: %d) created for VP[%d]\n", 3018 qpair->id, qos, vha->vp_idx); 3019 ql_dbg(ql_dbg_user, vha, 0x7085, 3020 "Queue Pair: %d Qos: %d) created for VP[%d]\n", 3021 qpair->id, qos, vha->vp_idx); 3022 req = qpair->req; 3023 vha->qpair = qpair; 3024 } 3025 } 3026 3027 vport_queue: 3028 vha->req = req; 3029 return 0; 3030 3031 vport_create_failed_2: 3032 qla24xx_disable_vp(vha); 3033 qla24xx_deallocate_vp_id(vha); 3034 scsi_host_put(vha->host); 3035 return FC_VPORT_FAILED; 3036 } 3037 3038 static int 3039 qla24xx_vport_delete(struct fc_vport *fc_vport) 3040 { 3041 scsi_qla_host_t *vha = fc_vport->dd_data; 3042 struct qla_hw_data *ha = vha->hw; 3043 uint16_t id = vha->vp_idx; 3044 3045 set_bit(VPORT_DELETE, &vha->dpc_flags); 3046 3047 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 3048 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 3049 msleep(1000); 3050 3051 3052 qla24xx_disable_vp(vha); 3053 qla2x00_wait_for_sess_deletion(vha); 3054 3055 qla_nvme_delete(vha); 3056 vha->flags.delete_progress = 1; 3057 3058 qlt_remove_target(ha, vha); 3059 3060 fc_remove_host(vha->host); 3061 3062 scsi_remove_host(vha->host); 3063 3064 /* Allow timer to run to drain queued items, when removing vp */ 3065 qla24xx_deallocate_vp_id(vha); 3066 3067 if (vha->timer_active) { 3068 qla2x00_vp_stop_timer(vha); 3069 ql_dbg(ql_dbg_user, vha, 0x7086, 3070 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 3071 } 3072 3073 qla2x00_free_fcports(vha); 3074 3075 mutex_lock(&ha->vport_lock); 3076 ha->cur_vport_count--; 3077 clear_bit(vha->vp_idx, ha->vp_idx_map); 3078 mutex_unlock(&ha->vport_lock); 3079 3080 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 3081 vha->gnl.ldma); 3082 3083 vha->gnl.l = NULL; 3084 3085 vfree(vha->scan.l); 3086 3087 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 3088 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 3089 ql_log(ql_log_warn, vha, 0x7087, 3090 "Queue Pair delete failed.\n"); 3091 } 3092 3093 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); 3094 scsi_host_put(vha->host); 3095 return 0; 3096 } 3097 3098 static int 3099 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 3100 { 3101 scsi_qla_host_t *vha = fc_vport->dd_data; 3102 3103 if (disable) 3104 qla24xx_disable_vp(vha); 3105 else 3106 qla24xx_enable_vp(vha); 3107 3108 return 0; 3109 } 3110 3111 struct fc_function_template qla2xxx_transport_functions = { 3112 3113 .show_host_node_name = 1, 3114 .show_host_port_name = 1, 3115 .show_host_supported_classes = 1, 3116 .show_host_supported_speeds = 1, 3117 3118 .get_host_port_id = qla2x00_get_host_port_id, 3119 .show_host_port_id = 1, 3120 .get_host_speed = qla2x00_get_host_speed, 3121 .show_host_speed = 1, 3122 .get_host_port_type = qla2x00_get_host_port_type, 3123 .show_host_port_type = 1, 3124 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 3125 .show_host_symbolic_name = 1, 3126 .set_host_system_hostname = qla2x00_set_host_system_hostname, 3127 .show_host_system_hostname = 1, 3128 .get_host_fabric_name = qla2x00_get_host_fabric_name, 3129 .show_host_fabric_name = 1, 3130 .get_host_port_state = qla2x00_get_host_port_state, 3131 .show_host_port_state = 1, 3132 3133 .dd_fcrport_size = sizeof(struct fc_port *), 3134 .show_rport_supported_classes = 1, 3135 3136 .get_starget_node_name = qla2x00_get_starget_node_name, 3137 .show_starget_node_name = 1, 3138 .get_starget_port_name = qla2x00_get_starget_port_name, 3139 .show_starget_port_name = 1, 3140 .get_starget_port_id = qla2x00_get_starget_port_id, 3141 .show_starget_port_id = 1, 3142 3143 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 3144 .show_rport_dev_loss_tmo = 1, 3145 3146 .issue_fc_host_lip = qla2x00_issue_lip, 3147 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 3148 .terminate_rport_io = qla2x00_terminate_rport_io, 3149 .get_fc_host_stats = qla2x00_get_fc_host_stats, 3150 .reset_fc_host_stats = qla2x00_reset_host_stats, 3151 3152 .vport_create = qla24xx_vport_create, 3153 .vport_disable = qla24xx_vport_disable, 3154 .vport_delete = qla24xx_vport_delete, 3155 .bsg_request = qla24xx_bsg_request, 3156 .bsg_timeout = qla24xx_bsg_timeout, 3157 }; 3158 3159 struct fc_function_template qla2xxx_transport_vport_functions = { 3160 3161 .show_host_node_name = 1, 3162 .show_host_port_name = 1, 3163 .show_host_supported_classes = 1, 3164 3165 .get_host_port_id = qla2x00_get_host_port_id, 3166 .show_host_port_id = 1, 3167 .get_host_speed = qla2x00_get_host_speed, 3168 .show_host_speed = 1, 3169 .get_host_port_type = qla2x00_get_host_port_type, 3170 .show_host_port_type = 1, 3171 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 3172 .show_host_symbolic_name = 1, 3173 .set_host_system_hostname = qla2x00_set_host_system_hostname, 3174 .show_host_system_hostname = 1, 3175 .get_host_fabric_name = qla2x00_get_host_fabric_name, 3176 .show_host_fabric_name = 1, 3177 .get_host_port_state = qla2x00_get_host_port_state, 3178 .show_host_port_state = 1, 3179 3180 .dd_fcrport_size = sizeof(struct fc_port *), 3181 .show_rport_supported_classes = 1, 3182 3183 .get_starget_node_name = qla2x00_get_starget_node_name, 3184 .show_starget_node_name = 1, 3185 .get_starget_port_name = qla2x00_get_starget_port_name, 3186 .show_starget_port_name = 1, 3187 .get_starget_port_id = qla2x00_get_starget_port_id, 3188 .show_starget_port_id = 1, 3189 3190 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 3191 .show_rport_dev_loss_tmo = 1, 3192 3193 .issue_fc_host_lip = qla2x00_issue_lip, 3194 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 3195 .terminate_rport_io = qla2x00_terminate_rport_io, 3196 .get_fc_host_stats = qla2x00_get_fc_host_stats, 3197 .reset_fc_host_stats = qla2x00_reset_host_stats, 3198 3199 .bsg_request = qla24xx_bsg_request, 3200 .bsg_timeout = qla24xx_bsg_timeout, 3201 }; 3202 3203 void 3204 qla2x00_init_host_attr(scsi_qla_host_t *vha) 3205 { 3206 struct qla_hw_data *ha = vha->hw; 3207 u32 speeds = FC_PORTSPEED_UNKNOWN; 3208 3209 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 3210 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 3211 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 3212 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ? 3213 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; 3214 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 3215 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 3216 3217 if (IS_CNA_CAPABLE(ha)) 3218 speeds = FC_PORTSPEED_10GBIT; 3219 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 3220 if (ha->max_supported_speed == 2) { 3221 if (ha->min_supported_speed <= 6) 3222 speeds |= FC_PORTSPEED_64GBIT; 3223 } 3224 if (ha->max_supported_speed == 2 || 3225 ha->max_supported_speed == 1) { 3226 if (ha->min_supported_speed <= 5) 3227 speeds |= FC_PORTSPEED_32GBIT; 3228 } 3229 if (ha->max_supported_speed == 2 || 3230 ha->max_supported_speed == 1 || 3231 ha->max_supported_speed == 0) { 3232 if (ha->min_supported_speed <= 4) 3233 speeds |= FC_PORTSPEED_16GBIT; 3234 } 3235 if (ha->max_supported_speed == 1 || 3236 ha->max_supported_speed == 0) { 3237 if (ha->min_supported_speed <= 3) 3238 speeds |= FC_PORTSPEED_8GBIT; 3239 } 3240 if (ha->max_supported_speed == 0) { 3241 if (ha->min_supported_speed <= 2) 3242 speeds |= FC_PORTSPEED_4GBIT; 3243 } 3244 } else if (IS_QLA2031(ha)) 3245 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT| 3246 FC_PORTSPEED_4GBIT; 3247 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) 3248 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT| 3249 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; 3250 else if (IS_QLA24XX_TYPE(ha)) 3251 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT| 3252 FC_PORTSPEED_1GBIT; 3253 else if (IS_QLA23XX(ha)) 3254 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; 3255 else 3256 speeds = FC_PORTSPEED_1GBIT; 3257 3258 fc_host_supported_speeds(vha->host) = speeds; 3259 } 3260