1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/kthread.h> 11 #include <linux/vmalloc.h> 12 #include <linux/slab.h> 13 #include <linux/delay.h> 14 15 static int qla24xx_vport_disable(struct fc_vport *, bool); 16 17 /* SYSFS attributes --------------------------------------------------------- */ 18 19 static ssize_t 20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, 21 struct bin_attribute *bin_attr, 22 char *buf, loff_t off, size_t count) 23 { 24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 25 struct device, kobj))); 26 struct qla_hw_data *ha = vha->hw; 27 int rval = 0; 28 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) 30 return 0; 31 32 mutex_lock(&ha->optrom_mutex); 33 if (IS_P3P_TYPE(ha)) { 34 if (off < ha->md_template_size) { 35 rval = memory_read_from_buffer(buf, count, 36 &off, ha->md_tmplt_hdr, ha->md_template_size); 37 } else { 38 off -= ha->md_template_size; 39 rval = memory_read_from_buffer(buf, count, 40 &off, ha->md_dump, ha->md_dump_size); 41 } 42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) { 43 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump, 44 MCTP_DUMP_SIZE); 45 } else if (ha->fw_dump_reading) { 46 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, 47 ha->fw_dump_len); 48 } else { 49 rval = 0; 50 } 51 mutex_unlock(&ha->optrom_mutex); 52 return rval; 53 } 54 55 static ssize_t 56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, 57 struct bin_attribute *bin_attr, 58 char *buf, loff_t off, size_t count) 59 { 60 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 61 struct device, kobj))); 62 struct qla_hw_data *ha = vha->hw; 63 int reading; 64 65 if (off != 0) 66 return (0); 67 68 reading = simple_strtol(buf, NULL, 10); 69 switch (reading) { 70 case 0: 71 if (!ha->fw_dump_reading) 72 break; 73 74 ql_log(ql_log_info, vha, 0x705d, 75 "Firmware dump cleared on (%ld).\n", vha->host_no); 76 77 if (IS_P3P_TYPE(ha)) { 78 qla82xx_md_free(vha); 79 qla82xx_md_prep(vha); 80 } 81 ha->fw_dump_reading = 0; 82 ha->fw_dumped = 0; 83 break; 84 case 1: 85 if (ha->fw_dumped && !ha->fw_dump_reading) { 86 ha->fw_dump_reading = 1; 87 88 ql_log(ql_log_info, vha, 0x705e, 89 "Raw firmware dump ready for read on (%ld).\n", 90 vha->host_no); 91 } 92 break; 93 case 2: 94 qla2x00_alloc_fw_dump(vha); 95 break; 96 case 3: 97 if (IS_QLA82XX(ha)) { 98 qla82xx_idc_lock(ha); 99 qla82xx_set_reset_owner(vha); 100 qla82xx_idc_unlock(ha); 101 } else if (IS_QLA8044(ha)) { 102 qla8044_idc_lock(ha); 103 qla82xx_set_reset_owner(vha); 104 qla8044_idc_unlock(ha); 105 } else 106 qla2x00_system_error(vha); 107 break; 108 case 4: 109 if (IS_P3P_TYPE(ha)) { 110 if (ha->md_tmplt_hdr) 111 ql_dbg(ql_dbg_user, vha, 0x705b, 112 "MiniDump supported with this firmware.\n"); 113 else 114 ql_dbg(ql_dbg_user, vha, 0x709d, 115 "MiniDump not supported with this firmware.\n"); 116 } 117 break; 118 case 5: 119 if (IS_P3P_TYPE(ha)) 120 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 121 break; 122 case 6: 123 if (!ha->mctp_dump_reading) 124 break; 125 ql_log(ql_log_info, vha, 0x70c1, 126 "MCTP dump cleared on (%ld).\n", vha->host_no); 127 ha->mctp_dump_reading = 0; 128 ha->mctp_dumped = 0; 129 break; 130 case 7: 131 if (ha->mctp_dumped && !ha->mctp_dump_reading) { 132 ha->mctp_dump_reading = 1; 133 ql_log(ql_log_info, vha, 0x70c2, 134 "Raw mctp dump ready for read on (%ld).\n", 135 vha->host_no); 136 } 137 break; 138 } 139 return count; 140 } 141 142 static struct bin_attribute sysfs_fw_dump_attr = { 143 .attr = { 144 .name = "fw_dump", 145 .mode = S_IRUSR | S_IWUSR, 146 }, 147 .size = 0, 148 .read = qla2x00_sysfs_read_fw_dump, 149 .write = qla2x00_sysfs_write_fw_dump, 150 }; 151 152 static ssize_t 153 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, 154 struct bin_attribute *bin_attr, 155 char *buf, loff_t off, size_t count) 156 { 157 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 158 struct device, kobj))); 159 struct qla_hw_data *ha = vha->hw; 160 uint32_t faddr; 161 struct active_regions active_regions = { }; 162 163 if (!capable(CAP_SYS_ADMIN)) 164 return 0; 165 166 mutex_lock(&ha->optrom_mutex); 167 if (qla2x00_chip_is_down(vha)) { 168 mutex_unlock(&ha->optrom_mutex); 169 return -EAGAIN; 170 } 171 172 if (!IS_NOCACHE_VPD_TYPE(ha)) { 173 mutex_unlock(&ha->optrom_mutex); 174 goto skip; 175 } 176 177 faddr = ha->flt_region_nvram; 178 if (IS_QLA28XX(ha)) { 179 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 180 faddr = ha->flt_region_nvram_sec; 181 } 182 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); 183 184 mutex_unlock(&ha->optrom_mutex); 185 186 skip: 187 return memory_read_from_buffer(buf, count, &off, ha->nvram, 188 ha->nvram_size); 189 } 190 191 static ssize_t 192 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, 193 struct bin_attribute *bin_attr, 194 char *buf, loff_t off, size_t count) 195 { 196 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 197 struct device, kobj))); 198 struct qla_hw_data *ha = vha->hw; 199 uint16_t cnt; 200 201 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 202 !ha->isp_ops->write_nvram) 203 return -EINVAL; 204 205 /* Checksum NVRAM. */ 206 if (IS_FWI2_CAPABLE(ha)) { 207 uint32_t *iter; 208 uint32_t chksum; 209 210 iter = (uint32_t *)buf; 211 chksum = 0; 212 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) 213 chksum += le32_to_cpu(*iter); 214 chksum = ~chksum + 1; 215 *iter = cpu_to_le32(chksum); 216 } else { 217 uint8_t *iter; 218 uint8_t chksum; 219 220 iter = (uint8_t *)buf; 221 chksum = 0; 222 for (cnt = 0; cnt < count - 1; cnt++) 223 chksum += *iter++; 224 chksum = ~chksum + 1; 225 *iter = chksum; 226 } 227 228 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 229 ql_log(ql_log_warn, vha, 0x705f, 230 "HBA not online, failing NVRAM update.\n"); 231 return -EAGAIN; 232 } 233 234 mutex_lock(&ha->optrom_mutex); 235 if (qla2x00_chip_is_down(vha)) { 236 mutex_unlock(&ha->optrom_mutex); 237 return -EAGAIN; 238 } 239 240 /* Write NVRAM. */ 241 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count); 242 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base, 243 count); 244 mutex_unlock(&ha->optrom_mutex); 245 246 ql_dbg(ql_dbg_user, vha, 0x7060, 247 "Setting ISP_ABORT_NEEDED\n"); 248 /* NVRAM settings take effect immediately. */ 249 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 250 qla2xxx_wake_dpc(vha); 251 qla2x00_wait_for_chip_reset(vha); 252 253 return count; 254 } 255 256 static struct bin_attribute sysfs_nvram_attr = { 257 .attr = { 258 .name = "nvram", 259 .mode = S_IRUSR | S_IWUSR, 260 }, 261 .size = 512, 262 .read = qla2x00_sysfs_read_nvram, 263 .write = qla2x00_sysfs_write_nvram, 264 }; 265 266 static ssize_t 267 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, 268 struct bin_attribute *bin_attr, 269 char *buf, loff_t off, size_t count) 270 { 271 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 272 struct device, kobj))); 273 struct qla_hw_data *ha = vha->hw; 274 ssize_t rval = 0; 275 276 mutex_lock(&ha->optrom_mutex); 277 278 if (ha->optrom_state != QLA_SREADING) 279 goto out; 280 281 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 282 ha->optrom_region_size); 283 284 out: 285 mutex_unlock(&ha->optrom_mutex); 286 287 return rval; 288 } 289 290 static ssize_t 291 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, 292 struct bin_attribute *bin_attr, 293 char *buf, loff_t off, size_t count) 294 { 295 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 296 struct device, kobj))); 297 struct qla_hw_data *ha = vha->hw; 298 299 mutex_lock(&ha->optrom_mutex); 300 301 if (ha->optrom_state != QLA_SWRITING) { 302 mutex_unlock(&ha->optrom_mutex); 303 return -EINVAL; 304 } 305 if (off > ha->optrom_region_size) { 306 mutex_unlock(&ha->optrom_mutex); 307 return -ERANGE; 308 } 309 if (off + count > ha->optrom_region_size) 310 count = ha->optrom_region_size - off; 311 312 memcpy(&ha->optrom_buffer[off], buf, count); 313 mutex_unlock(&ha->optrom_mutex); 314 315 return count; 316 } 317 318 static struct bin_attribute sysfs_optrom_attr = { 319 .attr = { 320 .name = "optrom", 321 .mode = S_IRUSR | S_IWUSR, 322 }, 323 .size = 0, 324 .read = qla2x00_sysfs_read_optrom, 325 .write = qla2x00_sysfs_write_optrom, 326 }; 327 328 static ssize_t 329 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, 330 struct bin_attribute *bin_attr, 331 char *buf, loff_t off, size_t count) 332 { 333 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 334 struct device, kobj))); 335 struct qla_hw_data *ha = vha->hw; 336 uint32_t start = 0; 337 uint32_t size = ha->optrom_size; 338 int val, valid; 339 ssize_t rval = count; 340 341 if (off) 342 return -EINVAL; 343 344 if (unlikely(pci_channel_offline(ha->pdev))) 345 return -EAGAIN; 346 347 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 348 return -EINVAL; 349 if (start > ha->optrom_size) 350 return -EINVAL; 351 if (size > ha->optrom_size - start) 352 size = ha->optrom_size - start; 353 354 mutex_lock(&ha->optrom_mutex); 355 if (qla2x00_chip_is_down(vha)) { 356 mutex_unlock(&ha->optrom_mutex); 357 return -EAGAIN; 358 } 359 switch (val) { 360 case 0: 361 if (ha->optrom_state != QLA_SREADING && 362 ha->optrom_state != QLA_SWRITING) { 363 rval = -EINVAL; 364 goto out; 365 } 366 ha->optrom_state = QLA_SWAITING; 367 368 ql_dbg(ql_dbg_user, vha, 0x7061, 369 "Freeing flash region allocation -- 0x%x bytes.\n", 370 ha->optrom_region_size); 371 372 vfree(ha->optrom_buffer); 373 ha->optrom_buffer = NULL; 374 break; 375 case 1: 376 if (ha->optrom_state != QLA_SWAITING) { 377 rval = -EINVAL; 378 goto out; 379 } 380 381 ha->optrom_region_start = start; 382 ha->optrom_region_size = size; 383 384 ha->optrom_state = QLA_SREADING; 385 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 386 if (ha->optrom_buffer == NULL) { 387 ql_log(ql_log_warn, vha, 0x7062, 388 "Unable to allocate memory for optrom retrieval " 389 "(%x).\n", ha->optrom_region_size); 390 391 ha->optrom_state = QLA_SWAITING; 392 rval = -ENOMEM; 393 goto out; 394 } 395 396 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 397 ql_log(ql_log_warn, vha, 0x7063, 398 "HBA not online, failing NVRAM update.\n"); 399 rval = -EAGAIN; 400 goto out; 401 } 402 403 ql_dbg(ql_dbg_user, vha, 0x7064, 404 "Reading flash region -- 0x%x/0x%x.\n", 405 ha->optrom_region_start, ha->optrom_region_size); 406 407 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 408 ha->optrom_region_start, ha->optrom_region_size); 409 break; 410 case 2: 411 if (ha->optrom_state != QLA_SWAITING) { 412 rval = -EINVAL; 413 goto out; 414 } 415 416 /* 417 * We need to be more restrictive on which FLASH regions are 418 * allowed to be updated via user-space. Regions accessible 419 * via this method include: 420 * 421 * ISP21xx/ISP22xx/ISP23xx type boards: 422 * 423 * 0x000000 -> 0x020000 -- Boot code. 424 * 425 * ISP2322/ISP24xx type boards: 426 * 427 * 0x000000 -> 0x07ffff -- Boot code. 428 * 0x080000 -> 0x0fffff -- Firmware. 429 * 430 * ISP25xx type boards: 431 * 432 * 0x000000 -> 0x07ffff -- Boot code. 433 * 0x080000 -> 0x0fffff -- Firmware. 434 * 0x120000 -> 0x12ffff -- VPD and HBA parameters. 435 * 436 * > ISP25xx type boards: 437 * 438 * None -- should go through BSG. 439 */ 440 valid = 0; 441 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 442 valid = 1; 443 else if (start == (ha->flt_region_boot * 4) || 444 start == (ha->flt_region_fw * 4)) 445 valid = 1; 446 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 447 valid = 1; 448 if (!valid) { 449 ql_log(ql_log_warn, vha, 0x7065, 450 "Invalid start region 0x%x/0x%x.\n", start, size); 451 rval = -EINVAL; 452 goto out; 453 } 454 455 ha->optrom_region_start = start; 456 ha->optrom_region_size = size; 457 458 ha->optrom_state = QLA_SWRITING; 459 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 460 if (ha->optrom_buffer == NULL) { 461 ql_log(ql_log_warn, vha, 0x7066, 462 "Unable to allocate memory for optrom update " 463 "(%x)\n", ha->optrom_region_size); 464 465 ha->optrom_state = QLA_SWAITING; 466 rval = -ENOMEM; 467 goto out; 468 } 469 470 ql_dbg(ql_dbg_user, vha, 0x7067, 471 "Staging flash region write -- 0x%x/0x%x.\n", 472 ha->optrom_region_start, ha->optrom_region_size); 473 474 break; 475 case 3: 476 if (ha->optrom_state != QLA_SWRITING) { 477 rval = -EINVAL; 478 goto out; 479 } 480 481 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 482 ql_log(ql_log_warn, vha, 0x7068, 483 "HBA not online, failing flash update.\n"); 484 rval = -EAGAIN; 485 goto out; 486 } 487 488 ql_dbg(ql_dbg_user, vha, 0x7069, 489 "Writing flash region -- 0x%x/0x%x.\n", 490 ha->optrom_region_start, ha->optrom_region_size); 491 492 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 493 ha->optrom_region_start, ha->optrom_region_size); 494 break; 495 default: 496 rval = -EINVAL; 497 } 498 499 out: 500 mutex_unlock(&ha->optrom_mutex); 501 return rval; 502 } 503 504 static struct bin_attribute sysfs_optrom_ctl_attr = { 505 .attr = { 506 .name = "optrom_ctl", 507 .mode = S_IWUSR, 508 }, 509 .size = 0, 510 .write = qla2x00_sysfs_write_optrom_ctl, 511 }; 512 513 static ssize_t 514 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, 515 struct bin_attribute *bin_attr, 516 char *buf, loff_t off, size_t count) 517 { 518 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 519 struct device, kobj))); 520 struct qla_hw_data *ha = vha->hw; 521 uint32_t faddr; 522 struct active_regions active_regions = { }; 523 524 if (unlikely(pci_channel_offline(ha->pdev))) 525 return -EAGAIN; 526 527 if (!capable(CAP_SYS_ADMIN)) 528 return -EINVAL; 529 530 if (IS_NOCACHE_VPD_TYPE(ha)) 531 goto skip; 532 533 faddr = ha->flt_region_vpd << 2; 534 535 if (IS_QLA28XX(ha)) { 536 qla28xx_get_aux_images(vha, &active_regions); 537 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 538 faddr = ha->flt_region_vpd_sec << 2; 539 540 ql_dbg(ql_dbg_init, vha, 0x7070, 541 "Loading %s nvram image.\n", 542 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 543 "primary" : "secondary"); 544 } 545 546 mutex_lock(&ha->optrom_mutex); 547 if (qla2x00_chip_is_down(vha)) { 548 mutex_unlock(&ha->optrom_mutex); 549 return -EAGAIN; 550 } 551 552 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); 553 mutex_unlock(&ha->optrom_mutex); 554 555 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); 556 skip: 557 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 558 } 559 560 static ssize_t 561 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, 562 struct bin_attribute *bin_attr, 563 char *buf, loff_t off, size_t count) 564 { 565 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 566 struct device, kobj))); 567 struct qla_hw_data *ha = vha->hw; 568 uint8_t *tmp_data; 569 570 if (unlikely(pci_channel_offline(ha->pdev))) 571 return 0; 572 573 if (qla2x00_chip_is_down(vha)) 574 return 0; 575 576 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 577 !ha->isp_ops->write_nvram) 578 return 0; 579 580 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 581 ql_log(ql_log_warn, vha, 0x706a, 582 "HBA not online, failing VPD update.\n"); 583 return -EAGAIN; 584 } 585 586 mutex_lock(&ha->optrom_mutex); 587 if (qla2x00_chip_is_down(vha)) { 588 mutex_unlock(&ha->optrom_mutex); 589 return -EAGAIN; 590 } 591 592 /* Write NVRAM. */ 593 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count); 594 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count); 595 596 /* Update flash version information for 4Gb & above. */ 597 if (!IS_FWI2_CAPABLE(ha)) { 598 mutex_unlock(&ha->optrom_mutex); 599 return -EINVAL; 600 } 601 602 tmp_data = vmalloc(256); 603 if (!tmp_data) { 604 mutex_unlock(&ha->optrom_mutex); 605 ql_log(ql_log_warn, vha, 0x706b, 606 "Unable to allocate memory for VPD information update.\n"); 607 return -ENOMEM; 608 } 609 ha->isp_ops->get_flash_version(vha, tmp_data); 610 vfree(tmp_data); 611 612 mutex_unlock(&ha->optrom_mutex); 613 614 return count; 615 } 616 617 static struct bin_attribute sysfs_vpd_attr = { 618 .attr = { 619 .name = "vpd", 620 .mode = S_IRUSR | S_IWUSR, 621 }, 622 .size = 0, 623 .read = qla2x00_sysfs_read_vpd, 624 .write = qla2x00_sysfs_write_vpd, 625 }; 626 627 static ssize_t 628 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, 629 struct bin_attribute *bin_attr, 630 char *buf, loff_t off, size_t count) 631 { 632 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 633 struct device, kobj))); 634 int rval; 635 636 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) 637 return 0; 638 639 mutex_lock(&vha->hw->optrom_mutex); 640 if (qla2x00_chip_is_down(vha)) { 641 mutex_unlock(&vha->hw->optrom_mutex); 642 return 0; 643 } 644 645 rval = qla2x00_read_sfp_dev(vha, buf, count); 646 mutex_unlock(&vha->hw->optrom_mutex); 647 648 if (rval) 649 return -EIO; 650 651 return count; 652 } 653 654 static struct bin_attribute sysfs_sfp_attr = { 655 .attr = { 656 .name = "sfp", 657 .mode = S_IRUSR | S_IWUSR, 658 }, 659 .size = SFP_DEV_SIZE, 660 .read = qla2x00_sysfs_read_sfp, 661 }; 662 663 static ssize_t 664 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, 665 struct bin_attribute *bin_attr, 666 char *buf, loff_t off, size_t count) 667 { 668 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 669 struct device, kobj))); 670 struct qla_hw_data *ha = vha->hw; 671 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 672 int type; 673 uint32_t idc_control; 674 uint8_t *tmp_data = NULL; 675 676 if (off != 0) 677 return -EINVAL; 678 679 type = simple_strtol(buf, NULL, 10); 680 switch (type) { 681 case 0x2025c: 682 ql_log(ql_log_info, vha, 0x706e, 683 "Issuing ISP reset.\n"); 684 685 scsi_block_requests(vha->host); 686 if (IS_QLA82XX(ha)) { 687 ha->flags.isp82xx_no_md_cap = 1; 688 qla82xx_idc_lock(ha); 689 qla82xx_set_reset_owner(vha); 690 qla82xx_idc_unlock(ha); 691 } else if (IS_QLA8044(ha)) { 692 qla8044_idc_lock(ha); 693 idc_control = qla8044_rd_reg(ha, 694 QLA8044_IDC_DRV_CTRL); 695 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, 696 (idc_control | GRACEFUL_RESET_BIT1)); 697 qla82xx_set_reset_owner(vha); 698 qla8044_idc_unlock(ha); 699 } else { 700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 701 qla2xxx_wake_dpc(vha); 702 } 703 qla2x00_wait_for_chip_reset(vha); 704 scsi_unblock_requests(vha->host); 705 break; 706 case 0x2025d: 707 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 708 return -EPERM; 709 710 ql_log(ql_log_info, vha, 0x706f, 711 "Issuing MPI reset.\n"); 712 713 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 714 uint32_t idc_control; 715 716 qla83xx_idc_lock(vha, 0); 717 __qla83xx_get_idc_control(vha, &idc_control); 718 idc_control |= QLA83XX_IDC_GRACEFUL_RESET; 719 __qla83xx_set_idc_control(vha, idc_control); 720 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 721 QLA8XXX_DEV_NEED_RESET); 722 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 723 qla83xx_idc_unlock(vha, 0); 724 break; 725 } else { 726 /* Make sure FC side is not in reset */ 727 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != 728 QLA_SUCCESS); 729 730 /* Issue MPI reset */ 731 scsi_block_requests(vha->host); 732 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 733 ql_log(ql_log_warn, vha, 0x7070, 734 "MPI reset failed.\n"); 735 scsi_unblock_requests(vha->host); 736 break; 737 } 738 case 0x2025e: 739 if (!IS_P3P_TYPE(ha) || vha != base_vha) { 740 ql_log(ql_log_info, vha, 0x7071, 741 "FCoE ctx reset not supported.\n"); 742 return -EPERM; 743 } 744 745 ql_log(ql_log_info, vha, 0x7072, 746 "Issuing FCoE ctx reset.\n"); 747 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 748 qla2xxx_wake_dpc(vha); 749 qla2x00_wait_for_fcoe_ctx_reset(vha); 750 break; 751 case 0x2025f: 752 if (!IS_QLA8031(ha)) 753 return -EPERM; 754 ql_log(ql_log_info, vha, 0x70bc, 755 "Disabling Reset by IDC control\n"); 756 qla83xx_idc_lock(vha, 0); 757 __qla83xx_get_idc_control(vha, &idc_control); 758 idc_control |= QLA83XX_IDC_RESET_DISABLED; 759 __qla83xx_set_idc_control(vha, idc_control); 760 qla83xx_idc_unlock(vha, 0); 761 break; 762 case 0x20260: 763 if (!IS_QLA8031(ha)) 764 return -EPERM; 765 ql_log(ql_log_info, vha, 0x70bd, 766 "Enabling Reset by IDC control\n"); 767 qla83xx_idc_lock(vha, 0); 768 __qla83xx_get_idc_control(vha, &idc_control); 769 idc_control &= ~QLA83XX_IDC_RESET_DISABLED; 770 __qla83xx_set_idc_control(vha, idc_control); 771 qla83xx_idc_unlock(vha, 0); 772 break; 773 case 0x20261: 774 ql_dbg(ql_dbg_user, vha, 0x70e0, 775 "Updating cache versions without reset "); 776 777 tmp_data = vmalloc(256); 778 if (!tmp_data) { 779 ql_log(ql_log_warn, vha, 0x70e1, 780 "Unable to allocate memory for VPD information update.\n"); 781 return -ENOMEM; 782 } 783 ha->isp_ops->get_flash_version(vha, tmp_data); 784 vfree(tmp_data); 785 break; 786 } 787 return count; 788 } 789 790 static struct bin_attribute sysfs_reset_attr = { 791 .attr = { 792 .name = "reset", 793 .mode = S_IWUSR, 794 }, 795 .size = 0, 796 .write = qla2x00_sysfs_write_reset, 797 }; 798 799 static ssize_t 800 qla2x00_issue_logo(struct file *filp, struct kobject *kobj, 801 struct bin_attribute *bin_attr, 802 char *buf, loff_t off, size_t count) 803 { 804 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 805 struct device, kobj))); 806 int type; 807 port_id_t did; 808 809 if (!capable(CAP_SYS_ADMIN)) 810 return 0; 811 812 if (unlikely(pci_channel_offline(vha->hw->pdev))) 813 return 0; 814 815 if (qla2x00_chip_is_down(vha)) 816 return 0; 817 818 type = simple_strtol(buf, NULL, 10); 819 820 did.b.domain = (type & 0x00ff0000) >> 16; 821 did.b.area = (type & 0x0000ff00) >> 8; 822 did.b.al_pa = (type & 0x000000ff); 823 824 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n", 825 did.b.domain, did.b.area, did.b.al_pa); 826 827 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); 828 829 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); 830 return count; 831 } 832 833 static struct bin_attribute sysfs_issue_logo_attr = { 834 .attr = { 835 .name = "issue_logo", 836 .mode = S_IWUSR, 837 }, 838 .size = 0, 839 .write = qla2x00_issue_logo, 840 }; 841 842 static ssize_t 843 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, 844 struct bin_attribute *bin_attr, 845 char *buf, loff_t off, size_t count) 846 { 847 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 848 struct device, kobj))); 849 struct qla_hw_data *ha = vha->hw; 850 int rval; 851 uint16_t actual_size; 852 853 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) 854 return 0; 855 856 if (unlikely(pci_channel_offline(ha->pdev))) 857 return 0; 858 mutex_lock(&vha->hw->optrom_mutex); 859 if (qla2x00_chip_is_down(vha)) { 860 mutex_unlock(&vha->hw->optrom_mutex); 861 return 0; 862 } 863 864 if (ha->xgmac_data) 865 goto do_read; 866 867 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 868 &ha->xgmac_data_dma, GFP_KERNEL); 869 if (!ha->xgmac_data) { 870 mutex_unlock(&vha->hw->optrom_mutex); 871 ql_log(ql_log_warn, vha, 0x7076, 872 "Unable to allocate memory for XGMAC read-data.\n"); 873 return 0; 874 } 875 876 do_read: 877 actual_size = 0; 878 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); 879 880 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 881 XGMAC_DATA_SIZE, &actual_size); 882 883 mutex_unlock(&vha->hw->optrom_mutex); 884 if (rval != QLA_SUCCESS) { 885 ql_log(ql_log_warn, vha, 0x7077, 886 "Unable to read XGMAC data (%x).\n", rval); 887 count = 0; 888 } 889 890 count = actual_size > count ? count : actual_size; 891 memcpy(buf, ha->xgmac_data, count); 892 893 return count; 894 } 895 896 static struct bin_attribute sysfs_xgmac_stats_attr = { 897 .attr = { 898 .name = "xgmac_stats", 899 .mode = S_IRUSR, 900 }, 901 .size = 0, 902 .read = qla2x00_sysfs_read_xgmac_stats, 903 }; 904 905 static ssize_t 906 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, 907 struct bin_attribute *bin_attr, 908 char *buf, loff_t off, size_t count) 909 { 910 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 911 struct device, kobj))); 912 struct qla_hw_data *ha = vha->hw; 913 int rval; 914 915 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 916 return 0; 917 918 if (ha->dcbx_tlv) 919 goto do_read; 920 mutex_lock(&vha->hw->optrom_mutex); 921 if (qla2x00_chip_is_down(vha)) { 922 mutex_unlock(&vha->hw->optrom_mutex); 923 return 0; 924 } 925 926 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 927 &ha->dcbx_tlv_dma, GFP_KERNEL); 928 if (!ha->dcbx_tlv) { 929 mutex_unlock(&vha->hw->optrom_mutex); 930 ql_log(ql_log_warn, vha, 0x7078, 931 "Unable to allocate memory for DCBX TLV read-data.\n"); 932 return -ENOMEM; 933 } 934 935 do_read: 936 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 937 938 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 939 DCBX_TLV_DATA_SIZE); 940 941 mutex_unlock(&vha->hw->optrom_mutex); 942 943 if (rval != QLA_SUCCESS) { 944 ql_log(ql_log_warn, vha, 0x7079, 945 "Unable to read DCBX TLV (%x).\n", rval); 946 return -EIO; 947 } 948 949 memcpy(buf, ha->dcbx_tlv, count); 950 951 return count; 952 } 953 954 static struct bin_attribute sysfs_dcbx_tlv_attr = { 955 .attr = { 956 .name = "dcbx_tlv", 957 .mode = S_IRUSR, 958 }, 959 .size = 0, 960 .read = qla2x00_sysfs_read_dcbx_tlv, 961 }; 962 963 static struct sysfs_entry { 964 char *name; 965 struct bin_attribute *attr; 966 int type; 967 } bin_file_entries[] = { 968 { "fw_dump", &sysfs_fw_dump_attr, }, 969 { "nvram", &sysfs_nvram_attr, }, 970 { "optrom", &sysfs_optrom_attr, }, 971 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 972 { "vpd", &sysfs_vpd_attr, 1 }, 973 { "sfp", &sysfs_sfp_attr, 1 }, 974 { "reset", &sysfs_reset_attr, }, 975 { "issue_logo", &sysfs_issue_logo_attr, }, 976 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 977 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 978 { NULL }, 979 }; 980 981 void 982 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) 983 { 984 struct Scsi_Host *host = vha->host; 985 struct sysfs_entry *iter; 986 int ret; 987 988 for (iter = bin_file_entries; iter->name; iter++) { 989 if (iter->type && !IS_FWI2_CAPABLE(vha->hw)) 990 continue; 991 if (iter->type == 2 && !IS_QLA25XX(vha->hw)) 992 continue; 993 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw))) 994 continue; 995 996 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 997 iter->attr); 998 if (ret) 999 ql_log(ql_log_warn, vha, 0x00f3, 1000 "Unable to create sysfs %s binary attribute (%d).\n", 1001 iter->name, ret); 1002 else 1003 ql_dbg(ql_dbg_init, vha, 0x00f4, 1004 "Successfully created sysfs %s binary attribute.\n", 1005 iter->name); 1006 } 1007 } 1008 1009 void 1010 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) 1011 { 1012 struct Scsi_Host *host = vha->host; 1013 struct sysfs_entry *iter; 1014 struct qla_hw_data *ha = vha->hw; 1015 1016 for (iter = bin_file_entries; iter->name; iter++) { 1017 if (iter->type && !IS_FWI2_CAPABLE(ha)) 1018 continue; 1019 if (iter->type == 2 && !IS_QLA25XX(ha)) 1020 continue; 1021 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) 1022 continue; 1023 if (iter->type == 0x27 && 1024 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))) 1025 continue; 1026 1027 sysfs_remove_bin_file(&host->shost_gendev.kobj, 1028 iter->attr); 1029 } 1030 1031 if (stop_beacon && ha->beacon_blink_led == 1) 1032 ha->isp_ops->beacon_off(vha); 1033 } 1034 1035 /* Scsi_Host attributes. */ 1036 1037 static ssize_t 1038 qla2x00_driver_version_show(struct device *dev, 1039 struct device_attribute *attr, char *buf) 1040 { 1041 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); 1042 } 1043 1044 static ssize_t 1045 qla2x00_fw_version_show(struct device *dev, 1046 struct device_attribute *attr, char *buf) 1047 { 1048 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1049 struct qla_hw_data *ha = vha->hw; 1050 char fw_str[128]; 1051 1052 return scnprintf(buf, PAGE_SIZE, "%s\n", 1053 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str))); 1054 } 1055 1056 static ssize_t 1057 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 1058 char *buf) 1059 { 1060 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1061 struct qla_hw_data *ha = vha->hw; 1062 uint32_t sn; 1063 1064 if (IS_QLAFX00(vha->hw)) { 1065 return scnprintf(buf, PAGE_SIZE, "%s\n", 1066 vha->hw->mr.serial_num); 1067 } else if (IS_FWI2_CAPABLE(ha)) { 1068 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); 1069 return strlen(strcat(buf, "\n")); 1070 } 1071 1072 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 1073 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 1074 sn % 100000); 1075 } 1076 1077 static ssize_t 1078 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 1079 char *buf) 1080 { 1081 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1082 1083 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); 1084 } 1085 1086 static ssize_t 1087 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 1088 char *buf) 1089 { 1090 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1091 struct qla_hw_data *ha = vha->hw; 1092 1093 if (IS_QLAFX00(vha->hw)) 1094 return scnprintf(buf, PAGE_SIZE, "%s\n", 1095 vha->hw->mr.hw_version); 1096 1097 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 1098 ha->product_id[0], ha->product_id[1], ha->product_id[2], 1099 ha->product_id[3]); 1100 } 1101 1102 static ssize_t 1103 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 1104 char *buf) 1105 { 1106 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1107 1108 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 1109 } 1110 1111 static ssize_t 1112 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 1113 char *buf) 1114 { 1115 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1116 1117 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc); 1118 } 1119 1120 static ssize_t 1121 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 1122 char *buf) 1123 { 1124 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1125 char pci_info[30]; 1126 1127 return scnprintf(buf, PAGE_SIZE, "%s\n", 1128 vha->hw->isp_ops->pci_info_str(vha, pci_info, 1129 sizeof(pci_info))); 1130 } 1131 1132 static ssize_t 1133 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 1134 char *buf) 1135 { 1136 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1137 struct qla_hw_data *ha = vha->hw; 1138 int len = 0; 1139 1140 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 1141 atomic_read(&vha->loop_state) == LOOP_DEAD || 1142 vha->device_flags & DFLG_NO_CABLE) 1143 len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); 1144 else if (atomic_read(&vha->loop_state) != LOOP_READY || 1145 qla2x00_chip_is_down(vha)) 1146 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 1147 else { 1148 len = scnprintf(buf, PAGE_SIZE, "Link Up - "); 1149 1150 switch (ha->current_topology) { 1151 case ISP_CFG_NL: 1152 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1153 break; 1154 case ISP_CFG_FL: 1155 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); 1156 break; 1157 case ISP_CFG_N: 1158 len += scnprintf(buf + len, PAGE_SIZE-len, 1159 "N_Port to N_Port\n"); 1160 break; 1161 case ISP_CFG_F: 1162 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); 1163 break; 1164 default: 1165 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1166 break; 1167 } 1168 } 1169 return len; 1170 } 1171 1172 static ssize_t 1173 qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 1174 char *buf) 1175 { 1176 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1177 int len = 0; 1178 1179 switch (vha->hw->zio_mode) { 1180 case QLA_ZIO_MODE_6: 1181 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 1182 break; 1183 case QLA_ZIO_DISABLED: 1184 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1185 break; 1186 } 1187 return len; 1188 } 1189 1190 static ssize_t 1191 qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 1192 const char *buf, size_t count) 1193 { 1194 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1195 struct qla_hw_data *ha = vha->hw; 1196 int val = 0; 1197 uint16_t zio_mode; 1198 1199 if (!IS_ZIO_SUPPORTED(ha)) 1200 return -ENOTSUPP; 1201 1202 if (sscanf(buf, "%d", &val) != 1) 1203 return -EINVAL; 1204 1205 if (val) 1206 zio_mode = QLA_ZIO_MODE_6; 1207 else 1208 zio_mode = QLA_ZIO_DISABLED; 1209 1210 /* Update per-hba values and queue a reset. */ 1211 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 1212 ha->zio_mode = zio_mode; 1213 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1214 } 1215 return strlen(buf); 1216 } 1217 1218 static ssize_t 1219 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 1220 char *buf) 1221 { 1222 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1223 1224 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); 1225 } 1226 1227 static ssize_t 1228 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 1229 const char *buf, size_t count) 1230 { 1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1232 int val = 0; 1233 uint16_t zio_timer; 1234 1235 if (sscanf(buf, "%d", &val) != 1) 1236 return -EINVAL; 1237 if (val > 25500 || val < 100) 1238 return -ERANGE; 1239 1240 zio_timer = (uint16_t)(val / 100); 1241 vha->hw->zio_timer = zio_timer; 1242 1243 return strlen(buf); 1244 } 1245 1246 static ssize_t 1247 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr, 1248 char *buf) 1249 { 1250 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1251 1252 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n", 1253 vha->hw->last_zio_threshold); 1254 } 1255 1256 static ssize_t 1257 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr, 1258 const char *buf, size_t count) 1259 { 1260 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1261 int val = 0; 1262 1263 if (vha->hw->zio_mode != QLA_ZIO_MODE_6) 1264 return -EINVAL; 1265 if (sscanf(buf, "%d", &val) != 1) 1266 return -EINVAL; 1267 if (val < 0 || val > 256) 1268 return -ERANGE; 1269 1270 atomic_set(&vha->hw->zio_threshold, val); 1271 return strlen(buf); 1272 } 1273 1274 static ssize_t 1275 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1276 char *buf) 1277 { 1278 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1279 int len = 0; 1280 1281 if (vha->hw->beacon_blink_led) 1282 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 1283 else 1284 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1285 return len; 1286 } 1287 1288 static ssize_t 1289 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 1290 const char *buf, size_t count) 1291 { 1292 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1293 struct qla_hw_data *ha = vha->hw; 1294 int val = 0; 1295 int rval; 1296 1297 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1298 return -EPERM; 1299 1300 if (sscanf(buf, "%d", &val) != 1) 1301 return -EINVAL; 1302 1303 mutex_lock(&vha->hw->optrom_mutex); 1304 if (qla2x00_chip_is_down(vha)) { 1305 mutex_unlock(&vha->hw->optrom_mutex); 1306 ql_log(ql_log_warn, vha, 0x707a, 1307 "Abort ISP active -- ignoring beacon request.\n"); 1308 return -EBUSY; 1309 } 1310 1311 if (val) 1312 rval = ha->isp_ops->beacon_on(vha); 1313 else 1314 rval = ha->isp_ops->beacon_off(vha); 1315 1316 if (rval != QLA_SUCCESS) 1317 count = 0; 1318 1319 mutex_unlock(&vha->hw->optrom_mutex); 1320 1321 return count; 1322 } 1323 1324 static ssize_t 1325 qla2x00_optrom_bios_version_show(struct device *dev, 1326 struct device_attribute *attr, char *buf) 1327 { 1328 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1329 struct qla_hw_data *ha = vha->hw; 1330 1331 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 1332 ha->bios_revision[0]); 1333 } 1334 1335 static ssize_t 1336 qla2x00_optrom_efi_version_show(struct device *dev, 1337 struct device_attribute *attr, char *buf) 1338 { 1339 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1340 struct qla_hw_data *ha = vha->hw; 1341 1342 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 1343 ha->efi_revision[0]); 1344 } 1345 1346 static ssize_t 1347 qla2x00_optrom_fcode_version_show(struct device *dev, 1348 struct device_attribute *attr, char *buf) 1349 { 1350 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1351 struct qla_hw_data *ha = vha->hw; 1352 1353 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 1354 ha->fcode_revision[0]); 1355 } 1356 1357 static ssize_t 1358 qla2x00_optrom_fw_version_show(struct device *dev, 1359 struct device_attribute *attr, char *buf) 1360 { 1361 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1362 struct qla_hw_data *ha = vha->hw; 1363 1364 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 1365 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 1366 ha->fw_revision[3]); 1367 } 1368 1369 static ssize_t 1370 qla2x00_optrom_gold_fw_version_show(struct device *dev, 1371 struct device_attribute *attr, char *buf) 1372 { 1373 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1374 struct qla_hw_data *ha = vha->hw; 1375 1376 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 1377 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1378 return scnprintf(buf, PAGE_SIZE, "\n"); 1379 1380 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1381 ha->gold_fw_version[0], ha->gold_fw_version[1], 1382 ha->gold_fw_version[2], ha->gold_fw_version[3]); 1383 } 1384 1385 static ssize_t 1386 qla2x00_total_isp_aborts_show(struct device *dev, 1387 struct device_attribute *attr, char *buf) 1388 { 1389 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1390 1391 return scnprintf(buf, PAGE_SIZE, "%d\n", 1392 vha->qla_stats.total_isp_aborts); 1393 } 1394 1395 static ssize_t 1396 qla24xx_84xx_fw_version_show(struct device *dev, 1397 struct device_attribute *attr, char *buf) 1398 { 1399 int rval = QLA_SUCCESS; 1400 uint16_t status[2] = { 0 }; 1401 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1402 struct qla_hw_data *ha = vha->hw; 1403 1404 if (!IS_QLA84XX(ha)) 1405 return scnprintf(buf, PAGE_SIZE, "\n"); 1406 1407 if (!ha->cs84xx->op_fw_version) { 1408 rval = qla84xx_verify_chip(vha, status); 1409 1410 if (!rval && !status[0]) 1411 return scnprintf(buf, PAGE_SIZE, "%u\n", 1412 (uint32_t)ha->cs84xx->op_fw_version); 1413 } 1414 1415 return scnprintf(buf, PAGE_SIZE, "\n"); 1416 } 1417 1418 static ssize_t 1419 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr, 1420 char *buf) 1421 { 1422 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1423 struct qla_hw_data *ha = vha->hw; 1424 1425 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1426 return scnprintf(buf, PAGE_SIZE, "\n"); 1427 1428 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1429 ha->serdes_version[0], ha->serdes_version[1], 1430 ha->serdes_version[2]); 1431 } 1432 1433 static ssize_t 1434 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1435 char *buf) 1436 { 1437 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1438 struct qla_hw_data *ha = vha->hw; 1439 1440 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && 1441 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1442 return scnprintf(buf, PAGE_SIZE, "\n"); 1443 1444 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1445 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], 1446 ha->mpi_capabilities); 1447 } 1448 1449 static ssize_t 1450 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, 1451 char *buf) 1452 { 1453 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1454 struct qla_hw_data *ha = vha->hw; 1455 1456 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 1457 return scnprintf(buf, PAGE_SIZE, "\n"); 1458 1459 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1460 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); 1461 } 1462 1463 static ssize_t 1464 qla2x00_flash_block_size_show(struct device *dev, 1465 struct device_attribute *attr, char *buf) 1466 { 1467 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1468 struct qla_hw_data *ha = vha->hw; 1469 1470 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1471 } 1472 1473 static ssize_t 1474 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, 1475 char *buf) 1476 { 1477 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1478 1479 if (!IS_CNA_CAPABLE(vha->hw)) 1480 return scnprintf(buf, PAGE_SIZE, "\n"); 1481 1482 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1483 } 1484 1485 static ssize_t 1486 qla2x00_vn_port_mac_address_show(struct device *dev, 1487 struct device_attribute *attr, char *buf) 1488 { 1489 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1490 1491 if (!IS_CNA_CAPABLE(vha->hw)) 1492 return scnprintf(buf, PAGE_SIZE, "\n"); 1493 1494 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); 1495 } 1496 1497 static ssize_t 1498 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, 1499 char *buf) 1500 { 1501 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1502 1503 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); 1504 } 1505 1506 static ssize_t 1507 qla2x00_thermal_temp_show(struct device *dev, 1508 struct device_attribute *attr, char *buf) 1509 { 1510 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1511 uint16_t temp = 0; 1512 int rc; 1513 1514 mutex_lock(&vha->hw->optrom_mutex); 1515 if (qla2x00_chip_is_down(vha)) { 1516 mutex_unlock(&vha->hw->optrom_mutex); 1517 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); 1518 goto done; 1519 } 1520 1521 if (vha->hw->flags.eeh_busy) { 1522 mutex_unlock(&vha->hw->optrom_mutex); 1523 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); 1524 goto done; 1525 } 1526 1527 rc = qla2x00_get_thermal_temp(vha, &temp); 1528 mutex_unlock(&vha->hw->optrom_mutex); 1529 if (rc == QLA_SUCCESS) 1530 return scnprintf(buf, PAGE_SIZE, "%d\n", temp); 1531 1532 done: 1533 return scnprintf(buf, PAGE_SIZE, "\n"); 1534 } 1535 1536 static ssize_t 1537 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1538 char *buf) 1539 { 1540 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1541 int rval = QLA_FUNCTION_FAILED; 1542 uint16_t state[6]; 1543 uint32_t pstate; 1544 1545 if (IS_QLAFX00(vha->hw)) { 1546 pstate = qlafx00_fw_state_show(dev, attr, buf); 1547 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); 1548 } 1549 1550 mutex_lock(&vha->hw->optrom_mutex); 1551 if (qla2x00_chip_is_down(vha)) { 1552 mutex_unlock(&vha->hw->optrom_mutex); 1553 ql_log(ql_log_warn, vha, 0x707c, 1554 "ISP reset active.\n"); 1555 goto out; 1556 } else if (vha->hw->flags.eeh_busy) { 1557 mutex_unlock(&vha->hw->optrom_mutex); 1558 goto out; 1559 } 1560 1561 rval = qla2x00_get_firmware_state(vha, state); 1562 mutex_unlock(&vha->hw->optrom_mutex); 1563 out: 1564 if (rval != QLA_SUCCESS) { 1565 memset(state, -1, sizeof(state)); 1566 rval = qla2x00_get_firmware_state(vha, state); 1567 } 1568 1569 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1570 state[0], state[1], state[2], state[3], state[4], state[5]); 1571 } 1572 1573 static ssize_t 1574 qla2x00_diag_requests_show(struct device *dev, 1575 struct device_attribute *attr, char *buf) 1576 { 1577 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1578 1579 if (!IS_BIDI_CAPABLE(vha->hw)) 1580 return scnprintf(buf, PAGE_SIZE, "\n"); 1581 1582 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); 1583 } 1584 1585 static ssize_t 1586 qla2x00_diag_megabytes_show(struct device *dev, 1587 struct device_attribute *attr, char *buf) 1588 { 1589 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1590 1591 if (!IS_BIDI_CAPABLE(vha->hw)) 1592 return scnprintf(buf, PAGE_SIZE, "\n"); 1593 1594 return scnprintf(buf, PAGE_SIZE, "%llu\n", 1595 vha->bidi_stats.transfer_bytes >> 20); 1596 } 1597 1598 static ssize_t 1599 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, 1600 char *buf) 1601 { 1602 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1603 struct qla_hw_data *ha = vha->hw; 1604 uint32_t size; 1605 1606 if (!ha->fw_dumped) 1607 size = 0; 1608 else if (IS_P3P_TYPE(ha)) 1609 size = ha->md_template_size + ha->md_dump_size; 1610 else 1611 size = ha->fw_dump_len; 1612 1613 return scnprintf(buf, PAGE_SIZE, "%d\n", size); 1614 } 1615 1616 static ssize_t 1617 qla2x00_allow_cna_fw_dump_show(struct device *dev, 1618 struct device_attribute *attr, char *buf) 1619 { 1620 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1621 1622 if (!IS_P3P_TYPE(vha->hw)) 1623 return scnprintf(buf, PAGE_SIZE, "\n"); 1624 else 1625 return scnprintf(buf, PAGE_SIZE, "%s\n", 1626 vha->hw->allow_cna_fw_dump ? "true" : "false"); 1627 } 1628 1629 static ssize_t 1630 qla2x00_allow_cna_fw_dump_store(struct device *dev, 1631 struct device_attribute *attr, const char *buf, size_t count) 1632 { 1633 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1634 int val = 0; 1635 1636 if (!IS_P3P_TYPE(vha->hw)) 1637 return -EINVAL; 1638 1639 if (sscanf(buf, "%d", &val) != 1) 1640 return -EINVAL; 1641 1642 vha->hw->allow_cna_fw_dump = val != 0; 1643 1644 return strlen(buf); 1645 } 1646 1647 static ssize_t 1648 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, 1649 char *buf) 1650 { 1651 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1652 struct qla_hw_data *ha = vha->hw; 1653 1654 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1655 return scnprintf(buf, PAGE_SIZE, "\n"); 1656 1657 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1658 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); 1659 } 1660 1661 static ssize_t 1662 qla2x00_min_supported_speed_show(struct device *dev, 1663 struct device_attribute *attr, char *buf) 1664 { 1665 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1666 struct qla_hw_data *ha = vha->hw; 1667 1668 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1669 return scnprintf(buf, PAGE_SIZE, "\n"); 1670 1671 return scnprintf(buf, PAGE_SIZE, "%s\n", 1672 ha->min_supported_speed == 6 ? "64Gps" : 1673 ha->min_supported_speed == 5 ? "32Gps" : 1674 ha->min_supported_speed == 4 ? "16Gps" : 1675 ha->min_supported_speed == 3 ? "8Gps" : 1676 ha->min_supported_speed == 2 ? "4Gps" : 1677 ha->min_supported_speed != 0 ? "unknown" : ""); 1678 } 1679 1680 static ssize_t 1681 qla2x00_max_supported_speed_show(struct device *dev, 1682 struct device_attribute *attr, char *buf) 1683 { 1684 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1685 struct qla_hw_data *ha = vha->hw; 1686 1687 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1688 return scnprintf(buf, PAGE_SIZE, "\n"); 1689 1690 return scnprintf(buf, PAGE_SIZE, "%s\n", 1691 ha->max_supported_speed == 2 ? "64Gps" : 1692 ha->max_supported_speed == 1 ? "32Gps" : 1693 ha->max_supported_speed == 0 ? "16Gps" : "unknown"); 1694 } 1695 1696 static ssize_t 1697 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, 1698 const char *buf, size_t count) 1699 { 1700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); 1701 ulong type, speed; 1702 int oldspeed, rval; 1703 int mode = QLA_SET_DATA_RATE_LR; 1704 struct qla_hw_data *ha = vha->hw; 1705 1706 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { 1707 ql_log(ql_log_warn, vha, 0x70d8, 1708 "Speed setting not supported \n"); 1709 return -EINVAL; 1710 } 1711 1712 rval = kstrtol(buf, 10, &type); 1713 if (rval) 1714 return rval; 1715 speed = type; 1716 if (type == 40 || type == 80 || type == 160 || 1717 type == 320) { 1718 ql_dbg(ql_dbg_user, vha, 0x70d9, 1719 "Setting will be affected after a loss of sync\n"); 1720 type = type/10; 1721 mode = QLA_SET_DATA_RATE_NOLR; 1722 } 1723 1724 oldspeed = ha->set_data_rate; 1725 1726 switch (type) { 1727 case 0: 1728 ha->set_data_rate = PORT_SPEED_AUTO; 1729 break; 1730 case 4: 1731 ha->set_data_rate = PORT_SPEED_4GB; 1732 break; 1733 case 8: 1734 ha->set_data_rate = PORT_SPEED_8GB; 1735 break; 1736 case 16: 1737 ha->set_data_rate = PORT_SPEED_16GB; 1738 break; 1739 case 32: 1740 ha->set_data_rate = PORT_SPEED_32GB; 1741 break; 1742 default: 1743 ql_log(ql_log_warn, vha, 0x1199, 1744 "Unrecognized speed setting:%lx. Setting Autoneg\n", 1745 speed); 1746 ha->set_data_rate = PORT_SPEED_AUTO; 1747 } 1748 1749 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate)) 1750 return -EINVAL; 1751 1752 ql_log(ql_log_info, vha, 0x70da, 1753 "Setting speed to %lx Gbps \n", type); 1754 1755 rval = qla2x00_set_data_rate(vha, mode); 1756 if (rval != QLA_SUCCESS) 1757 return -EIO; 1758 1759 return strlen(buf); 1760 } 1761 1762 static ssize_t 1763 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, 1764 char *buf) 1765 { 1766 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); 1767 struct qla_hw_data *ha = vha->hw; 1768 ssize_t rval; 1769 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"}; 1770 1771 rval = qla2x00_get_data_rate(vha); 1772 if (rval != QLA_SUCCESS) { 1773 ql_log(ql_log_warn, vha, 0x70db, 1774 "Unable to get port speed rval:%zd\n", rval); 1775 return -EINVAL; 1776 } 1777 1778 ql_log(ql_log_info, vha, 0x70d6, 1779 "port speed:%d\n", ha->link_data_rate); 1780 1781 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); 1782 } 1783 1784 /* ----- */ 1785 1786 static ssize_t 1787 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1788 { 1789 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1790 int len = 0; 1791 1792 len += scnprintf(buf + len, PAGE_SIZE-len, 1793 "Supported options: enabled | disabled | dual | exclusive\n"); 1794 1795 /* --- */ 1796 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: "); 1797 1798 switch (vha->qlini_mode) { 1799 case QLA2XXX_INI_MODE_EXCLUSIVE: 1800 len += scnprintf(buf + len, PAGE_SIZE-len, 1801 QLA2XXX_INI_MODE_STR_EXCLUSIVE); 1802 break; 1803 case QLA2XXX_INI_MODE_DISABLED: 1804 len += scnprintf(buf + len, PAGE_SIZE-len, 1805 QLA2XXX_INI_MODE_STR_DISABLED); 1806 break; 1807 case QLA2XXX_INI_MODE_ENABLED: 1808 len += scnprintf(buf + len, PAGE_SIZE-len, 1809 QLA2XXX_INI_MODE_STR_ENABLED); 1810 break; 1811 case QLA2XXX_INI_MODE_DUAL: 1812 len += scnprintf(buf + len, PAGE_SIZE-len, 1813 QLA2XXX_INI_MODE_STR_DUAL); 1814 break; 1815 } 1816 len += scnprintf(buf + len, PAGE_SIZE-len, "\n"); 1817 1818 return len; 1819 } 1820 1821 static char *mode_to_str[] = { 1822 "exclusive", 1823 "disabled", 1824 "enabled", 1825 "dual", 1826 }; 1827 1828 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) 1829 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op) 1830 { 1831 int rc = 0; 1832 enum { 1833 NO_ACTION, 1834 MODE_CHANGE_ACCEPT, 1835 MODE_CHANGE_NO_ACTION, 1836 TARGET_STILL_ACTIVE, 1837 }; 1838 int action = NO_ACTION; 1839 int set_mode = 0; 1840 u8 eo_toggle = 0; /* exchange offload flipped */ 1841 1842 switch (vha->qlini_mode) { 1843 case QLA2XXX_INI_MODE_DISABLED: 1844 switch (op) { 1845 case QLA2XXX_INI_MODE_DISABLED: 1846 if (qla_tgt_mode_enabled(vha)) { 1847 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 1848 vha->hw->flags.exchoffld_enabled) 1849 eo_toggle = 1; 1850 if (((vha->ql2xexchoffld != 1851 vha->u_ql2xexchoffld) && 1852 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 1853 eo_toggle) { 1854 /* 1855 * The number of exchange to be offload 1856 * was tweaked or offload option was 1857 * flipped 1858 */ 1859 action = MODE_CHANGE_ACCEPT; 1860 } else { 1861 action = MODE_CHANGE_NO_ACTION; 1862 } 1863 } else { 1864 action = MODE_CHANGE_NO_ACTION; 1865 } 1866 break; 1867 case QLA2XXX_INI_MODE_EXCLUSIVE: 1868 if (qla_tgt_mode_enabled(vha)) { 1869 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 1870 vha->hw->flags.exchoffld_enabled) 1871 eo_toggle = 1; 1872 if (((vha->ql2xexchoffld != 1873 vha->u_ql2xexchoffld) && 1874 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 1875 eo_toggle) { 1876 /* 1877 * The number of exchange to be offload 1878 * was tweaked or offload option was 1879 * flipped 1880 */ 1881 action = MODE_CHANGE_ACCEPT; 1882 } else { 1883 action = MODE_CHANGE_NO_ACTION; 1884 } 1885 } else { 1886 action = MODE_CHANGE_ACCEPT; 1887 } 1888 break; 1889 case QLA2XXX_INI_MODE_DUAL: 1890 action = MODE_CHANGE_ACCEPT; 1891 /* active_mode is target only, reset it to dual */ 1892 if (qla_tgt_mode_enabled(vha)) { 1893 set_mode = 1; 1894 action = MODE_CHANGE_ACCEPT; 1895 } else { 1896 action = MODE_CHANGE_NO_ACTION; 1897 } 1898 break; 1899 1900 case QLA2XXX_INI_MODE_ENABLED: 1901 if (qla_tgt_mode_enabled(vha)) 1902 action = TARGET_STILL_ACTIVE; 1903 else { 1904 action = MODE_CHANGE_ACCEPT; 1905 set_mode = 1; 1906 } 1907 break; 1908 } 1909 break; 1910 1911 case QLA2XXX_INI_MODE_EXCLUSIVE: 1912 switch (op) { 1913 case QLA2XXX_INI_MODE_EXCLUSIVE: 1914 if (qla_tgt_mode_enabled(vha)) { 1915 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 1916 vha->hw->flags.exchoffld_enabled) 1917 eo_toggle = 1; 1918 if (((vha->ql2xexchoffld != 1919 vha->u_ql2xexchoffld) && 1920 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 1921 eo_toggle) 1922 /* 1923 * The number of exchange to be offload 1924 * was tweaked or offload option was 1925 * flipped 1926 */ 1927 action = MODE_CHANGE_ACCEPT; 1928 else 1929 action = NO_ACTION; 1930 } else 1931 action = NO_ACTION; 1932 1933 break; 1934 1935 case QLA2XXX_INI_MODE_DISABLED: 1936 if (qla_tgt_mode_enabled(vha)) { 1937 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != 1938 vha->hw->flags.exchoffld_enabled) 1939 eo_toggle = 1; 1940 if (((vha->ql2xexchoffld != 1941 vha->u_ql2xexchoffld) && 1942 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || 1943 eo_toggle) 1944 action = MODE_CHANGE_ACCEPT; 1945 else 1946 action = MODE_CHANGE_NO_ACTION; 1947 } else 1948 action = MODE_CHANGE_NO_ACTION; 1949 break; 1950 1951 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */ 1952 if (qla_tgt_mode_enabled(vha)) { 1953 action = MODE_CHANGE_ACCEPT; 1954 set_mode = 1; 1955 } else 1956 action = MODE_CHANGE_ACCEPT; 1957 break; 1958 1959 case QLA2XXX_INI_MODE_ENABLED: 1960 if (qla_tgt_mode_enabled(vha)) 1961 action = TARGET_STILL_ACTIVE; 1962 else { 1963 if (vha->hw->flags.fw_started) 1964 action = MODE_CHANGE_NO_ACTION; 1965 else 1966 action = MODE_CHANGE_ACCEPT; 1967 } 1968 break; 1969 } 1970 break; 1971 1972 case QLA2XXX_INI_MODE_ENABLED: 1973 switch (op) { 1974 case QLA2XXX_INI_MODE_ENABLED: 1975 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) != 1976 vha->hw->flags.exchoffld_enabled) 1977 eo_toggle = 1; 1978 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) && 1979 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) || 1980 eo_toggle) 1981 action = MODE_CHANGE_ACCEPT; 1982 else 1983 action = NO_ACTION; 1984 break; 1985 case QLA2XXX_INI_MODE_DUAL: 1986 case QLA2XXX_INI_MODE_DISABLED: 1987 action = MODE_CHANGE_ACCEPT; 1988 break; 1989 default: 1990 action = MODE_CHANGE_NO_ACTION; 1991 break; 1992 } 1993 break; 1994 1995 case QLA2XXX_INI_MODE_DUAL: 1996 switch (op) { 1997 case QLA2XXX_INI_MODE_DUAL: 1998 if (qla_tgt_mode_enabled(vha) || 1999 qla_dual_mode_enabled(vha)) { 2000 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + 2001 vha->u_ql2xiniexchg) != 2002 vha->hw->flags.exchoffld_enabled) 2003 eo_toggle = 1; 2004 2005 if ((((vha->ql2xexchoffld + 2006 vha->ql2xiniexchg) != 2007 (vha->u_ql2xiniexchg + 2008 vha->u_ql2xexchoffld)) && 2009 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + 2010 vha->u_ql2xexchoffld)) || eo_toggle) 2011 action = MODE_CHANGE_ACCEPT; 2012 else 2013 action = NO_ACTION; 2014 } else { 2015 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + 2016 vha->u_ql2xiniexchg) != 2017 vha->hw->flags.exchoffld_enabled) 2018 eo_toggle = 1; 2019 2020 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg) 2021 != (vha->u_ql2xiniexchg + 2022 vha->u_ql2xexchoffld)) && 2023 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + 2024 vha->u_ql2xexchoffld)) || eo_toggle) 2025 action = MODE_CHANGE_NO_ACTION; 2026 else 2027 action = NO_ACTION; 2028 } 2029 break; 2030 2031 case QLA2XXX_INI_MODE_DISABLED: 2032 if (qla_tgt_mode_enabled(vha) || 2033 qla_dual_mode_enabled(vha)) { 2034 /* turning off initiator mode */ 2035 set_mode = 1; 2036 action = MODE_CHANGE_ACCEPT; 2037 } else { 2038 action = MODE_CHANGE_NO_ACTION; 2039 } 2040 break; 2041 2042 case QLA2XXX_INI_MODE_EXCLUSIVE: 2043 if (qla_tgt_mode_enabled(vha) || 2044 qla_dual_mode_enabled(vha)) { 2045 set_mode = 1; 2046 action = MODE_CHANGE_ACCEPT; 2047 } else { 2048 action = MODE_CHANGE_ACCEPT; 2049 } 2050 break; 2051 2052 case QLA2XXX_INI_MODE_ENABLED: 2053 if (qla_tgt_mode_enabled(vha) || 2054 qla_dual_mode_enabled(vha)) { 2055 action = TARGET_STILL_ACTIVE; 2056 } else { 2057 action = MODE_CHANGE_ACCEPT; 2058 } 2059 } 2060 break; 2061 } 2062 2063 switch (action) { 2064 case MODE_CHANGE_ACCEPT: 2065 ql_log(ql_log_warn, vha, 0xffff, 2066 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", 2067 mode_to_str[vha->qlini_mode], mode_to_str[op], 2068 vha->ql2xexchoffld, vha->u_ql2xexchoffld, 2069 vha->ql2xiniexchg, vha->u_ql2xiniexchg); 2070 2071 vha->qlini_mode = op; 2072 vha->ql2xexchoffld = vha->u_ql2xexchoffld; 2073 vha->ql2xiniexchg = vha->u_ql2xiniexchg; 2074 if (set_mode) 2075 qlt_set_mode(vha); 2076 vha->flags.online = 1; 2077 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2078 break; 2079 2080 case MODE_CHANGE_NO_ACTION: 2081 ql_log(ql_log_warn, vha, 0xffff, 2082 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", 2083 mode_to_str[vha->qlini_mode], mode_to_str[op], 2084 vha->ql2xexchoffld, vha->u_ql2xexchoffld, 2085 vha->ql2xiniexchg, vha->u_ql2xiniexchg); 2086 vha->qlini_mode = op; 2087 vha->ql2xexchoffld = vha->u_ql2xexchoffld; 2088 vha->ql2xiniexchg = vha->u_ql2xiniexchg; 2089 break; 2090 2091 case TARGET_STILL_ACTIVE: 2092 ql_log(ql_log_warn, vha, 0xffff, 2093 "Target Mode is active. Unable to change Mode.\n"); 2094 break; 2095 2096 case NO_ACTION: 2097 default: 2098 ql_log(ql_log_warn, vha, 0xffff, 2099 "Mode unchange. No action taken. %d|%d pct %d|%d.\n", 2100 vha->qlini_mode, op, 2101 vha->ql2xexchoffld, vha->u_ql2xexchoffld); 2102 break; 2103 } 2104 2105 return rc; 2106 } 2107 2108 static ssize_t 2109 qlini_mode_store(struct device *dev, struct device_attribute *attr, 2110 const char *buf, size_t count) 2111 { 2112 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2113 int ini; 2114 2115 if (!buf) 2116 return -EINVAL; 2117 2118 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf, 2119 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0) 2120 ini = QLA2XXX_INI_MODE_EXCLUSIVE; 2121 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf, 2122 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0) 2123 ini = QLA2XXX_INI_MODE_DISABLED; 2124 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf, 2125 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0) 2126 ini = QLA2XXX_INI_MODE_ENABLED; 2127 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf, 2128 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0) 2129 ini = QLA2XXX_INI_MODE_DUAL; 2130 else 2131 return -EINVAL; 2132 2133 qla_set_ini_mode(vha, ini); 2134 return strlen(buf); 2135 } 2136 2137 static ssize_t 2138 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr, 2139 char *buf) 2140 { 2141 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2142 int len = 0; 2143 2144 len += scnprintf(buf + len, PAGE_SIZE-len, 2145 "target exchange: new %d : current: %d\n\n", 2146 vha->u_ql2xexchoffld, vha->ql2xexchoffld); 2147 2148 len += scnprintf(buf + len, PAGE_SIZE-len, 2149 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", 2150 vha->host_no); 2151 2152 return len; 2153 } 2154 2155 static ssize_t 2156 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr, 2157 const char *buf, size_t count) 2158 { 2159 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2160 int val = 0; 2161 2162 if (sscanf(buf, "%d", &val) != 1) 2163 return -EINVAL; 2164 2165 if (val > FW_MAX_EXCHANGES_CNT) 2166 val = FW_MAX_EXCHANGES_CNT; 2167 else if (val < 0) 2168 val = 0; 2169 2170 vha->u_ql2xexchoffld = val; 2171 return strlen(buf); 2172 } 2173 2174 static ssize_t 2175 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr, 2176 char *buf) 2177 { 2178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2179 int len = 0; 2180 2181 len += scnprintf(buf + len, PAGE_SIZE-len, 2182 "target exchange: new %d : current: %d\n\n", 2183 vha->u_ql2xiniexchg, vha->ql2xiniexchg); 2184 2185 len += scnprintf(buf + len, PAGE_SIZE-len, 2186 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", 2187 vha->host_no); 2188 2189 return len; 2190 } 2191 2192 static ssize_t 2193 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr, 2194 const char *buf, size_t count) 2195 { 2196 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2197 int val = 0; 2198 2199 if (sscanf(buf, "%d", &val) != 1) 2200 return -EINVAL; 2201 2202 if (val > FW_MAX_EXCHANGES_CNT) 2203 val = FW_MAX_EXCHANGES_CNT; 2204 else if (val < 0) 2205 val = 0; 2206 2207 vha->u_ql2xiniexchg = val; 2208 return strlen(buf); 2209 } 2210 2211 static ssize_t 2212 qla2x00_dif_bundle_statistics_show(struct device *dev, 2213 struct device_attribute *attr, char *buf) 2214 { 2215 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2216 struct qla_hw_data *ha = vha->hw; 2217 2218 return scnprintf(buf, PAGE_SIZE, 2219 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n", 2220 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads, 2221 ha->dif_bundle_writes, ha->dif_bundle_kallocs, 2222 ha->dif_bundle_dma_allocs, ha->pool.unusable.count); 2223 } 2224 2225 static ssize_t 2226 qla2x00_fw_attr_show(struct device *dev, 2227 struct device_attribute *attr, char *buf) 2228 { 2229 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2230 struct qla_hw_data *ha = vha->hw; 2231 2232 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2233 return scnprintf(buf, PAGE_SIZE, "\n"); 2234 2235 return scnprintf(buf, PAGE_SIZE, "%llx\n", 2236 (uint64_t)ha->fw_attributes_ext[1] << 48 | 2237 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2238 (uint64_t)ha->fw_attributes_h << 16 | 2239 (uint64_t)ha->fw_attributes); 2240 } 2241 2242 static ssize_t 2243 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr, 2244 char *buf) 2245 { 2246 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2247 2248 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no); 2249 } 2250 2251 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); 2252 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 2253 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 2254 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); 2255 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); 2256 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 2257 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 2258 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 2259 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); 2260 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 2261 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 2262 qla2x00_zio_timer_store); 2263 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 2264 qla2x00_beacon_store); 2265 static DEVICE_ATTR(optrom_bios_version, S_IRUGO, 2266 qla2x00_optrom_bios_version_show, NULL); 2267 static DEVICE_ATTR(optrom_efi_version, S_IRUGO, 2268 qla2x00_optrom_efi_version_show, NULL); 2269 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, 2270 qla2x00_optrom_fcode_version_show, NULL); 2271 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 2272 NULL); 2273 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO, 2274 qla2x00_optrom_gold_fw_version_show, NULL); 2275 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, 2276 NULL); 2277 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 2278 NULL); 2279 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL); 2280 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 2281 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 2282 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 2283 NULL); 2284 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); 2285 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, 2286 qla2x00_vn_port_mac_address_show, NULL); 2287 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 2288 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 2289 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); 2290 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); 2291 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); 2292 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); 2293 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, 2294 qla2x00_allow_cna_fw_dump_show, 2295 qla2x00_allow_cna_fw_dump_store); 2296 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); 2297 static DEVICE_ATTR(min_supported_speed, 0444, 2298 qla2x00_min_supported_speed_show, NULL); 2299 static DEVICE_ATTR(max_supported_speed, 0444, 2300 qla2x00_max_supported_speed_show, NULL); 2301 static DEVICE_ATTR(zio_threshold, 0644, 2302 qla_zio_threshold_show, 2303 qla_zio_threshold_store); 2304 static DEVICE_ATTR_RW(qlini_mode); 2305 static DEVICE_ATTR_RW(ql2xexchoffld); 2306 static DEVICE_ATTR_RW(ql2xiniexchg); 2307 static DEVICE_ATTR(dif_bundle_statistics, 0444, 2308 qla2x00_dif_bundle_statistics_show, NULL); 2309 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, 2310 qla2x00_port_speed_store); 2311 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); 2312 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); 2313 2314 2315 struct device_attribute *qla2x00_host_attrs[] = { 2316 &dev_attr_driver_version, 2317 &dev_attr_fw_version, 2318 &dev_attr_serial_num, 2319 &dev_attr_isp_name, 2320 &dev_attr_isp_id, 2321 &dev_attr_model_name, 2322 &dev_attr_model_desc, 2323 &dev_attr_pci_info, 2324 &dev_attr_link_state, 2325 &dev_attr_zio, 2326 &dev_attr_zio_timer, 2327 &dev_attr_beacon, 2328 &dev_attr_optrom_bios_version, 2329 &dev_attr_optrom_efi_version, 2330 &dev_attr_optrom_fcode_version, 2331 &dev_attr_optrom_fw_version, 2332 &dev_attr_84xx_fw_version, 2333 &dev_attr_total_isp_aborts, 2334 &dev_attr_serdes_version, 2335 &dev_attr_mpi_version, 2336 &dev_attr_phy_version, 2337 &dev_attr_flash_block_size, 2338 &dev_attr_vlan_id, 2339 &dev_attr_vn_port_mac_address, 2340 &dev_attr_fabric_param, 2341 &dev_attr_fw_state, 2342 &dev_attr_optrom_gold_fw_version, 2343 &dev_attr_thermal_temp, 2344 &dev_attr_diag_requests, 2345 &dev_attr_diag_megabytes, 2346 &dev_attr_fw_dump_size, 2347 &dev_attr_allow_cna_fw_dump, 2348 &dev_attr_pep_version, 2349 &dev_attr_min_supported_speed, 2350 &dev_attr_max_supported_speed, 2351 &dev_attr_zio_threshold, 2352 &dev_attr_dif_bundle_statistics, 2353 &dev_attr_port_speed, 2354 &dev_attr_port_no, 2355 &dev_attr_fw_attr, 2356 NULL, /* reserve for qlini_mode */ 2357 NULL, /* reserve for ql2xiniexchg */ 2358 NULL, /* reserve for ql2xexchoffld */ 2359 NULL, 2360 }; 2361 2362 void qla_insert_tgt_attrs(void) 2363 { 2364 struct device_attribute **attr; 2365 2366 /* advance to empty slot */ 2367 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr) 2368 continue; 2369 2370 *attr = &dev_attr_qlini_mode; 2371 attr++; 2372 *attr = &dev_attr_ql2xiniexchg; 2373 attr++; 2374 *attr = &dev_attr_ql2xexchoffld; 2375 } 2376 2377 /* Host attributes. */ 2378 2379 static void 2380 qla2x00_get_host_port_id(struct Scsi_Host *shost) 2381 { 2382 scsi_qla_host_t *vha = shost_priv(shost); 2383 2384 fc_host_port_id(shost) = vha->d_id.b.domain << 16 | 2385 vha->d_id.b.area << 8 | vha->d_id.b.al_pa; 2386 } 2387 2388 static void 2389 qla2x00_get_host_speed(struct Scsi_Host *shost) 2390 { 2391 scsi_qla_host_t *vha = shost_priv(shost); 2392 u32 speed; 2393 2394 if (IS_QLAFX00(vha->hw)) { 2395 qlafx00_get_host_speed(shost); 2396 return; 2397 } 2398 2399 switch (vha->hw->link_data_rate) { 2400 case PORT_SPEED_1GB: 2401 speed = FC_PORTSPEED_1GBIT; 2402 break; 2403 case PORT_SPEED_2GB: 2404 speed = FC_PORTSPEED_2GBIT; 2405 break; 2406 case PORT_SPEED_4GB: 2407 speed = FC_PORTSPEED_4GBIT; 2408 break; 2409 case PORT_SPEED_8GB: 2410 speed = FC_PORTSPEED_8GBIT; 2411 break; 2412 case PORT_SPEED_10GB: 2413 speed = FC_PORTSPEED_10GBIT; 2414 break; 2415 case PORT_SPEED_16GB: 2416 speed = FC_PORTSPEED_16GBIT; 2417 break; 2418 case PORT_SPEED_32GB: 2419 speed = FC_PORTSPEED_32GBIT; 2420 break; 2421 case PORT_SPEED_64GB: 2422 speed = FC_PORTSPEED_64GBIT; 2423 break; 2424 default: 2425 speed = FC_PORTSPEED_UNKNOWN; 2426 break; 2427 } 2428 2429 fc_host_speed(shost) = speed; 2430 } 2431 2432 static void 2433 qla2x00_get_host_port_type(struct Scsi_Host *shost) 2434 { 2435 scsi_qla_host_t *vha = shost_priv(shost); 2436 uint32_t port_type; 2437 2438 if (vha->vp_idx) { 2439 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 2440 return; 2441 } 2442 switch (vha->hw->current_topology) { 2443 case ISP_CFG_NL: 2444 port_type = FC_PORTTYPE_LPORT; 2445 break; 2446 case ISP_CFG_FL: 2447 port_type = FC_PORTTYPE_NLPORT; 2448 break; 2449 case ISP_CFG_N: 2450 port_type = FC_PORTTYPE_PTP; 2451 break; 2452 case ISP_CFG_F: 2453 port_type = FC_PORTTYPE_NPORT; 2454 break; 2455 default: 2456 port_type = FC_PORTTYPE_UNKNOWN; 2457 break; 2458 } 2459 2460 fc_host_port_type(shost) = port_type; 2461 } 2462 2463 static void 2464 qla2x00_get_starget_node_name(struct scsi_target *starget) 2465 { 2466 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 2467 scsi_qla_host_t *vha = shost_priv(host); 2468 fc_port_t *fcport; 2469 u64 node_name = 0; 2470 2471 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2472 if (fcport->rport && 2473 starget->id == fcport->rport->scsi_target_id) { 2474 node_name = wwn_to_u64(fcport->node_name); 2475 break; 2476 } 2477 } 2478 2479 fc_starget_node_name(starget) = node_name; 2480 } 2481 2482 static void 2483 qla2x00_get_starget_port_name(struct scsi_target *starget) 2484 { 2485 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 2486 scsi_qla_host_t *vha = shost_priv(host); 2487 fc_port_t *fcport; 2488 u64 port_name = 0; 2489 2490 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2491 if (fcport->rport && 2492 starget->id == fcport->rport->scsi_target_id) { 2493 port_name = wwn_to_u64(fcport->port_name); 2494 break; 2495 } 2496 } 2497 2498 fc_starget_port_name(starget) = port_name; 2499 } 2500 2501 static void 2502 qla2x00_get_starget_port_id(struct scsi_target *starget) 2503 { 2504 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 2505 scsi_qla_host_t *vha = shost_priv(host); 2506 fc_port_t *fcport; 2507 uint32_t port_id = ~0U; 2508 2509 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2510 if (fcport->rport && 2511 starget->id == fcport->rport->scsi_target_id) { 2512 port_id = fcport->d_id.b.domain << 16 | 2513 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2514 break; 2515 } 2516 } 2517 2518 fc_starget_port_id(starget) = port_id; 2519 } 2520 2521 static inline void 2522 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 2523 { 2524 rport->dev_loss_tmo = timeout ? timeout : 1; 2525 } 2526 2527 static void 2528 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 2529 { 2530 struct Scsi_Host *host = rport_to_shost(rport); 2531 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 2532 unsigned long flags; 2533 2534 if (!fcport) 2535 return; 2536 2537 /* Now that the rport has been deleted, set the fcport state to 2538 FCS_DEVICE_DEAD */ 2539 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); 2540 2541 /* 2542 * Transport has effectively 'deleted' the rport, clear 2543 * all local references. 2544 */ 2545 spin_lock_irqsave(host->host_lock, flags); 2546 fcport->rport = fcport->drport = NULL; 2547 *((fc_port_t **)rport->dd_data) = NULL; 2548 spin_unlock_irqrestore(host->host_lock, flags); 2549 2550 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 2551 return; 2552 2553 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 2554 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 2555 return; 2556 } 2557 } 2558 2559 static void 2560 qla2x00_terminate_rport_io(struct fc_rport *rport) 2561 { 2562 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 2563 2564 if (!fcport) 2565 return; 2566 2567 if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) 2568 return; 2569 2570 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 2571 return; 2572 2573 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 2574 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 2575 return; 2576 } 2577 /* 2578 * At this point all fcport's software-states are cleared. Perform any 2579 * final cleanup of firmware resources (PCBs and XCBs). 2580 */ 2581 if (fcport->loop_id != FC_NO_LOOP_ID) { 2582 if (IS_FWI2_CAPABLE(fcport->vha->hw)) 2583 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 2584 fcport->loop_id, fcport->d_id.b.domain, 2585 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2586 else 2587 qla2x00_port_logout(fcport->vha, fcport); 2588 } 2589 } 2590 2591 static int 2592 qla2x00_issue_lip(struct Scsi_Host *shost) 2593 { 2594 scsi_qla_host_t *vha = shost_priv(shost); 2595 2596 if (IS_QLAFX00(vha->hw)) 2597 return 0; 2598 2599 qla2x00_loop_reset(vha); 2600 return 0; 2601 } 2602 2603 static struct fc_host_statistics * 2604 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 2605 { 2606 scsi_qla_host_t *vha = shost_priv(shost); 2607 struct qla_hw_data *ha = vha->hw; 2608 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2609 int rval; 2610 struct link_statistics *stats; 2611 dma_addr_t stats_dma; 2612 struct fc_host_statistics *p = &vha->fc_host_stat; 2613 2614 memset(p, -1, sizeof(*p)); 2615 2616 if (IS_QLAFX00(vha->hw)) 2617 goto done; 2618 2619 if (test_bit(UNLOADING, &vha->dpc_flags)) 2620 goto done; 2621 2622 if (unlikely(pci_channel_offline(ha->pdev))) 2623 goto done; 2624 2625 if (qla2x00_chip_is_down(vha)) 2626 goto done; 2627 2628 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2629 GFP_KERNEL); 2630 if (!stats) { 2631 ql_log(ql_log_warn, vha, 0x707d, 2632 "Failed to allocate memory for stats.\n"); 2633 goto done; 2634 } 2635 2636 rval = QLA_FUNCTION_FAILED; 2637 if (IS_FWI2_CAPABLE(ha)) { 2638 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); 2639 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && 2640 !ha->dpc_active) { 2641 /* Must be in a 'READY' state for statistics retrieval. */ 2642 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, 2643 stats, stats_dma); 2644 } 2645 2646 if (rval != QLA_SUCCESS) 2647 goto done_free; 2648 2649 p->link_failure_count = stats->link_fail_cnt; 2650 p->loss_of_sync_count = stats->loss_sync_cnt; 2651 p->loss_of_signal_count = stats->loss_sig_cnt; 2652 p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; 2653 p->invalid_tx_word_count = stats->inval_xmit_word_cnt; 2654 p->invalid_crc_count = stats->inval_crc_cnt; 2655 if (IS_FWI2_CAPABLE(ha)) { 2656 p->lip_count = stats->lip_cnt; 2657 p->tx_frames = stats->tx_frames; 2658 p->rx_frames = stats->rx_frames; 2659 p->dumped_frames = stats->discarded_frames; 2660 p->nos_count = stats->nos_rcvd; 2661 p->error_frames = 2662 stats->dropped_frames + stats->discarded_frames; 2663 p->rx_words = vha->qla_stats.input_bytes; 2664 p->tx_words = vha->qla_stats.output_bytes; 2665 } 2666 p->fcp_control_requests = vha->qla_stats.control_requests; 2667 p->fcp_input_requests = vha->qla_stats.input_requests; 2668 p->fcp_output_requests = vha->qla_stats.output_requests; 2669 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; 2670 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; 2671 p->seconds_since_last_reset = 2672 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; 2673 do_div(p->seconds_since_last_reset, HZ); 2674 2675 done_free: 2676 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), 2677 stats, stats_dma); 2678 done: 2679 return p; 2680 } 2681 2682 static void 2683 qla2x00_reset_host_stats(struct Scsi_Host *shost) 2684 { 2685 scsi_qla_host_t *vha = shost_priv(shost); 2686 struct qla_hw_data *ha = vha->hw; 2687 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2688 struct link_statistics *stats; 2689 dma_addr_t stats_dma; 2690 2691 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 2692 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 2693 2694 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 2695 2696 if (IS_FWI2_CAPABLE(ha)) { 2697 stats = dma_alloc_coherent(&ha->pdev->dev, 2698 sizeof(*stats), &stats_dma, GFP_KERNEL); 2699 if (!stats) { 2700 ql_log(ql_log_warn, vha, 0x70d7, 2701 "Failed to allocate memory for stats.\n"); 2702 return; 2703 } 2704 2705 /* reset firmware statistics */ 2706 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); 2707 2708 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2709 stats, stats_dma); 2710 } 2711 } 2712 2713 static void 2714 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 2715 { 2716 scsi_qla_host_t *vha = shost_priv(shost); 2717 2718 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost), 2719 sizeof(fc_host_symbolic_name(shost))); 2720 } 2721 2722 static void 2723 qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 2724 { 2725 scsi_qla_host_t *vha = shost_priv(shost); 2726 2727 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 2728 } 2729 2730 static void 2731 qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 2732 { 2733 scsi_qla_host_t *vha = shost_priv(shost); 2734 static const uint8_t node_name[WWN_SIZE] = { 2735 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF 2736 }; 2737 u64 fabric_name = wwn_to_u64(node_name); 2738 2739 if (vha->device_flags & SWITCH_FOUND) 2740 fabric_name = wwn_to_u64(vha->fabric_node_name); 2741 2742 fc_host_fabric_name(shost) = fabric_name; 2743 } 2744 2745 static void 2746 qla2x00_get_host_port_state(struct Scsi_Host *shost) 2747 { 2748 scsi_qla_host_t *vha = shost_priv(shost); 2749 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 2750 2751 if (!base_vha->flags.online) { 2752 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 2753 return; 2754 } 2755 2756 switch (atomic_read(&base_vha->loop_state)) { 2757 case LOOP_UPDATE: 2758 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 2759 break; 2760 case LOOP_DOWN: 2761 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) 2762 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 2763 else 2764 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 2765 break; 2766 case LOOP_DEAD: 2767 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 2768 break; 2769 case LOOP_READY: 2770 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 2771 break; 2772 default: 2773 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 2774 break; 2775 } 2776 } 2777 2778 static int 2779 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 2780 { 2781 int ret = 0; 2782 uint8_t qos = 0; 2783 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 2784 scsi_qla_host_t *vha = NULL; 2785 struct qla_hw_data *ha = base_vha->hw; 2786 int cnt; 2787 struct req_que *req = ha->req_q_map[0]; 2788 struct qla_qpair *qpair; 2789 2790 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 2791 if (ret) { 2792 ql_log(ql_log_warn, vha, 0x707e, 2793 "Vport sanity check failed, status %x\n", ret); 2794 return (ret); 2795 } 2796 2797 vha = qla24xx_create_vhost(fc_vport); 2798 if (vha == NULL) { 2799 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); 2800 return FC_VPORT_FAILED; 2801 } 2802 if (disable) { 2803 atomic_set(&vha->vp_state, VP_OFFLINE); 2804 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 2805 } else 2806 atomic_set(&vha->vp_state, VP_FAILED); 2807 2808 /* ready to create vport */ 2809 ql_log(ql_log_info, vha, 0x7080, 2810 "VP entry id %d assigned.\n", vha->vp_idx); 2811 2812 /* initialized vport states */ 2813 atomic_set(&vha->loop_state, LOOP_DOWN); 2814 vha->vp_err_state = VP_ERR_PORTDWN; 2815 vha->vp_prev_err_state = VP_ERR_UNKWN; 2816 /* Check if physical ha port is Up */ 2817 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 2818 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 2819 /* Don't retry or attempt login of this virtual port */ 2820 ql_dbg(ql_dbg_user, vha, 0x7081, 2821 "Vport loop state is not UP.\n"); 2822 atomic_set(&vha->loop_state, LOOP_DEAD); 2823 if (!disable) 2824 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 2825 } 2826 2827 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2828 if (ha->fw_attributes & BIT_4) { 2829 int prot = 0, guard; 2830 2831 vha->flags.difdix_supported = 1; 2832 ql_dbg(ql_dbg_user, vha, 0x7082, 2833 "Registered for DIF/DIX type 1 and 3 protection.\n"); 2834 if (ql2xenabledif == 1) 2835 prot = SHOST_DIX_TYPE0_PROTECTION; 2836 scsi_host_set_prot(vha->host, 2837 prot | SHOST_DIF_TYPE1_PROTECTION 2838 | SHOST_DIF_TYPE2_PROTECTION 2839 | SHOST_DIF_TYPE3_PROTECTION 2840 | SHOST_DIX_TYPE1_PROTECTION 2841 | SHOST_DIX_TYPE2_PROTECTION 2842 | SHOST_DIX_TYPE3_PROTECTION); 2843 2844 guard = SHOST_DIX_GUARD_CRC; 2845 2846 if (IS_PI_IPGUARD_CAPABLE(ha) && 2847 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 2848 guard |= SHOST_DIX_GUARD_IP; 2849 2850 scsi_host_set_guard(vha->host, guard); 2851 } else 2852 vha->flags.difdix_supported = 0; 2853 } 2854 2855 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 2856 &ha->pdev->dev)) { 2857 ql_dbg(ql_dbg_user, vha, 0x7083, 2858 "scsi_add_host failure for VP[%d].\n", vha->vp_idx); 2859 goto vport_create_failed_2; 2860 } 2861 2862 /* initialize attributes */ 2863 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 2864 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 2865 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 2866 fc_host_supported_classes(vha->host) = 2867 fc_host_supported_classes(base_vha->host); 2868 fc_host_supported_speeds(vha->host) = 2869 fc_host_supported_speeds(base_vha->host); 2870 2871 qlt_vport_create(vha, ha); 2872 qla24xx_vport_disable(fc_vport, disable); 2873 2874 if (!ql2xmqsupport || !ha->npiv_info) 2875 goto vport_queue; 2876 2877 /* Create a request queue in QoS mode for the vport */ 2878 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 2879 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 2880 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 2881 8) == 0) { 2882 qos = ha->npiv_info[cnt].q_qos; 2883 break; 2884 } 2885 } 2886 2887 if (qos) { 2888 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true); 2889 if (!qpair) 2890 ql_log(ql_log_warn, vha, 0x7084, 2891 "Can't create qpair for VP[%d]\n", 2892 vha->vp_idx); 2893 else { 2894 ql_dbg(ql_dbg_multiq, vha, 0xc001, 2895 "Queue pair: %d Qos: %d) created for VP[%d]\n", 2896 qpair->id, qos, vha->vp_idx); 2897 ql_dbg(ql_dbg_user, vha, 0x7085, 2898 "Queue Pair: %d Qos: %d) created for VP[%d]\n", 2899 qpair->id, qos, vha->vp_idx); 2900 req = qpair->req; 2901 vha->qpair = qpair; 2902 } 2903 } 2904 2905 vport_queue: 2906 vha->req = req; 2907 return 0; 2908 2909 vport_create_failed_2: 2910 qla24xx_disable_vp(vha); 2911 qla24xx_deallocate_vp_id(vha); 2912 scsi_host_put(vha->host); 2913 return FC_VPORT_FAILED; 2914 } 2915 2916 static int 2917 qla24xx_vport_delete(struct fc_vport *fc_vport) 2918 { 2919 scsi_qla_host_t *vha = fc_vport->dd_data; 2920 struct qla_hw_data *ha = vha->hw; 2921 uint16_t id = vha->vp_idx; 2922 2923 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 2924 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 2925 msleep(1000); 2926 2927 qla_nvme_delete(vha); 2928 2929 qla24xx_disable_vp(vha); 2930 qla2x00_wait_for_sess_deletion(vha); 2931 2932 vha->flags.delete_progress = 1; 2933 2934 qlt_remove_target(ha, vha); 2935 2936 fc_remove_host(vha->host); 2937 2938 scsi_remove_host(vha->host); 2939 2940 /* Allow timer to run to drain queued items, when removing vp */ 2941 qla24xx_deallocate_vp_id(vha); 2942 2943 if (vha->timer_active) { 2944 qla2x00_vp_stop_timer(vha); 2945 ql_dbg(ql_dbg_user, vha, 0x7086, 2946 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 2947 } 2948 2949 qla2x00_free_fcports(vha); 2950 2951 mutex_lock(&ha->vport_lock); 2952 ha->cur_vport_count--; 2953 clear_bit(vha->vp_idx, ha->vp_idx_map); 2954 mutex_unlock(&ha->vport_lock); 2955 2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2957 vha->gnl.ldma); 2958 2959 vha->gnl.l = NULL; 2960 2961 vfree(vha->scan.l); 2962 2963 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2964 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 2965 ql_log(ql_log_warn, vha, 0x7087, 2966 "Queue Pair delete failed.\n"); 2967 } 2968 2969 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); 2970 scsi_host_put(vha->host); 2971 return 0; 2972 } 2973 2974 static int 2975 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 2976 { 2977 scsi_qla_host_t *vha = fc_vport->dd_data; 2978 2979 if (disable) 2980 qla24xx_disable_vp(vha); 2981 else 2982 qla24xx_enable_vp(vha); 2983 2984 return 0; 2985 } 2986 2987 struct fc_function_template qla2xxx_transport_functions = { 2988 2989 .show_host_node_name = 1, 2990 .show_host_port_name = 1, 2991 .show_host_supported_classes = 1, 2992 .show_host_supported_speeds = 1, 2993 2994 .get_host_port_id = qla2x00_get_host_port_id, 2995 .show_host_port_id = 1, 2996 .get_host_speed = qla2x00_get_host_speed, 2997 .show_host_speed = 1, 2998 .get_host_port_type = qla2x00_get_host_port_type, 2999 .show_host_port_type = 1, 3000 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 3001 .show_host_symbolic_name = 1, 3002 .set_host_system_hostname = qla2x00_set_host_system_hostname, 3003 .show_host_system_hostname = 1, 3004 .get_host_fabric_name = qla2x00_get_host_fabric_name, 3005 .show_host_fabric_name = 1, 3006 .get_host_port_state = qla2x00_get_host_port_state, 3007 .show_host_port_state = 1, 3008 3009 .dd_fcrport_size = sizeof(struct fc_port *), 3010 .show_rport_supported_classes = 1, 3011 3012 .get_starget_node_name = qla2x00_get_starget_node_name, 3013 .show_starget_node_name = 1, 3014 .get_starget_port_name = qla2x00_get_starget_port_name, 3015 .show_starget_port_name = 1, 3016 .get_starget_port_id = qla2x00_get_starget_port_id, 3017 .show_starget_port_id = 1, 3018 3019 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 3020 .show_rport_dev_loss_tmo = 1, 3021 3022 .issue_fc_host_lip = qla2x00_issue_lip, 3023 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 3024 .terminate_rport_io = qla2x00_terminate_rport_io, 3025 .get_fc_host_stats = qla2x00_get_fc_host_stats, 3026 .reset_fc_host_stats = qla2x00_reset_host_stats, 3027 3028 .vport_create = qla24xx_vport_create, 3029 .vport_disable = qla24xx_vport_disable, 3030 .vport_delete = qla24xx_vport_delete, 3031 .bsg_request = qla24xx_bsg_request, 3032 .bsg_timeout = qla24xx_bsg_timeout, 3033 }; 3034 3035 struct fc_function_template qla2xxx_transport_vport_functions = { 3036 3037 .show_host_node_name = 1, 3038 .show_host_port_name = 1, 3039 .show_host_supported_classes = 1, 3040 3041 .get_host_port_id = qla2x00_get_host_port_id, 3042 .show_host_port_id = 1, 3043 .get_host_speed = qla2x00_get_host_speed, 3044 .show_host_speed = 1, 3045 .get_host_port_type = qla2x00_get_host_port_type, 3046 .show_host_port_type = 1, 3047 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 3048 .show_host_symbolic_name = 1, 3049 .set_host_system_hostname = qla2x00_set_host_system_hostname, 3050 .show_host_system_hostname = 1, 3051 .get_host_fabric_name = qla2x00_get_host_fabric_name, 3052 .show_host_fabric_name = 1, 3053 .get_host_port_state = qla2x00_get_host_port_state, 3054 .show_host_port_state = 1, 3055 3056 .dd_fcrport_size = sizeof(struct fc_port *), 3057 .show_rport_supported_classes = 1, 3058 3059 .get_starget_node_name = qla2x00_get_starget_node_name, 3060 .show_starget_node_name = 1, 3061 .get_starget_port_name = qla2x00_get_starget_port_name, 3062 .show_starget_port_name = 1, 3063 .get_starget_port_id = qla2x00_get_starget_port_id, 3064 .show_starget_port_id = 1, 3065 3066 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 3067 .show_rport_dev_loss_tmo = 1, 3068 3069 .issue_fc_host_lip = qla2x00_issue_lip, 3070 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 3071 .terminate_rport_io = qla2x00_terminate_rport_io, 3072 .get_fc_host_stats = qla2x00_get_fc_host_stats, 3073 .reset_fc_host_stats = qla2x00_reset_host_stats, 3074 3075 .bsg_request = qla24xx_bsg_request, 3076 .bsg_timeout = qla24xx_bsg_timeout, 3077 }; 3078 3079 void 3080 qla2x00_init_host_attr(scsi_qla_host_t *vha) 3081 { 3082 struct qla_hw_data *ha = vha->hw; 3083 u32 speeds = FC_PORTSPEED_UNKNOWN; 3084 3085 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 3086 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 3087 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 3088 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ? 3089 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; 3090 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 3091 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 3092 3093 if (IS_CNA_CAPABLE(ha)) 3094 speeds = FC_PORTSPEED_10GBIT; 3095 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 3096 if (ha->max_supported_speed == 2) { 3097 if (ha->min_supported_speed <= 6) 3098 speeds |= FC_PORTSPEED_64GBIT; 3099 } 3100 if (ha->max_supported_speed == 2 || 3101 ha->max_supported_speed == 1) { 3102 if (ha->min_supported_speed <= 5) 3103 speeds |= FC_PORTSPEED_32GBIT; 3104 } 3105 if (ha->max_supported_speed == 2 || 3106 ha->max_supported_speed == 1 || 3107 ha->max_supported_speed == 0) { 3108 if (ha->min_supported_speed <= 4) 3109 speeds |= FC_PORTSPEED_16GBIT; 3110 } 3111 if (ha->max_supported_speed == 1 || 3112 ha->max_supported_speed == 0) { 3113 if (ha->min_supported_speed <= 3) 3114 speeds |= FC_PORTSPEED_8GBIT; 3115 } 3116 if (ha->max_supported_speed == 0) { 3117 if (ha->min_supported_speed <= 2) 3118 speeds |= FC_PORTSPEED_4GBIT; 3119 } 3120 } else if (IS_QLA2031(ha)) 3121 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT| 3122 FC_PORTSPEED_4GBIT; 3123 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) 3124 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT| 3125 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; 3126 else if (IS_QLA24XX_TYPE(ha)) 3127 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT| 3128 FC_PORTSPEED_1GBIT; 3129 else if (IS_QLA23XX(ha)) 3130 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; 3131 else 3132 speeds = FC_PORTSPEED_1GBIT; 3133 3134 fc_host_supported_speeds(vha->host) = speeds; 3135 } 3136