1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 13 static int qla24xx_vport_disable(struct fc_vport *, bool); 14 15 /* SYSFS attributes --------------------------------------------------------- */ 16 17 static ssize_t 18 qla2x00_sysfs_read_fw_dump(struct kobject *kobj, 19 struct bin_attribute *bin_attr, 20 char *buf, loff_t off, size_t count) 21 { 22 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 23 struct device, kobj))); 24 struct qla_hw_data *ha = vha->hw; 25 26 if (ha->fw_dump_reading == 0) 27 return 0; 28 29 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 30 ha->fw_dump_len); 31 } 32 33 static ssize_t 34 qla2x00_sysfs_write_fw_dump(struct kobject *kobj, 35 struct bin_attribute *bin_attr, 36 char *buf, loff_t off, size_t count) 37 { 38 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 39 struct device, kobj))); 40 struct qla_hw_data *ha = vha->hw; 41 int reading; 42 43 if (off != 0) 44 return (0); 45 46 reading = simple_strtol(buf, NULL, 10); 47 switch (reading) { 48 case 0: 49 if (!ha->fw_dump_reading) 50 break; 51 52 qla_printk(KERN_INFO, ha, 53 "Firmware dump cleared on (%ld).\n", vha->host_no); 54 55 ha->fw_dump_reading = 0; 56 ha->fw_dumped = 0; 57 break; 58 case 1: 59 if (ha->fw_dumped && !ha->fw_dump_reading) { 60 ha->fw_dump_reading = 1; 61 62 qla_printk(KERN_INFO, ha, 63 "Raw firmware dump ready for read on (%ld).\n", 64 vha->host_no); 65 } 66 break; 67 case 2: 68 qla2x00_alloc_fw_dump(vha); 69 break; 70 case 3: 71 qla2x00_system_error(vha); 72 break; 73 } 74 return (count); 75 } 76 77 static struct bin_attribute sysfs_fw_dump_attr = { 78 .attr = { 79 .name = "fw_dump", 80 .mode = S_IRUSR | S_IWUSR, 81 }, 82 .size = 0, 83 .read = qla2x00_sysfs_read_fw_dump, 84 .write = qla2x00_sysfs_write_fw_dump, 85 }; 86 87 static ssize_t 88 qla2x00_sysfs_read_nvram(struct kobject *kobj, 89 struct bin_attribute *bin_attr, 90 char *buf, loff_t off, size_t count) 91 { 92 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 93 struct device, kobj))); 94 struct qla_hw_data *ha = vha->hw; 95 96 if (!capable(CAP_SYS_ADMIN)) 97 return 0; 98 99 if (IS_NOCACHE_VPD_TYPE(ha)) 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 101 ha->nvram_size); 102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 103 ha->nvram_size); 104 } 105 106 static ssize_t 107 qla2x00_sysfs_write_nvram(struct kobject *kobj, 108 struct bin_attribute *bin_attr, 109 char *buf, loff_t off, size_t count) 110 { 111 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 112 struct device, kobj))); 113 struct qla_hw_data *ha = vha->hw; 114 uint16_t cnt; 115 116 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 117 !ha->isp_ops->write_nvram) 118 return 0; 119 120 /* Checksum NVRAM. */ 121 if (IS_FWI2_CAPABLE(ha)) { 122 uint32_t *iter; 123 uint32_t chksum; 124 125 iter = (uint32_t *)buf; 126 chksum = 0; 127 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++) 128 chksum += le32_to_cpu(*iter++); 129 chksum = ~chksum + 1; 130 *iter = cpu_to_le32(chksum); 131 } else { 132 uint8_t *iter; 133 uint8_t chksum; 134 135 iter = (uint8_t *)buf; 136 chksum = 0; 137 for (cnt = 0; cnt < count - 1; cnt++) 138 chksum += *iter++; 139 chksum = ~chksum + 1; 140 *iter = chksum; 141 } 142 143 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 144 qla_printk(KERN_WARNING, ha, 145 "HBA not online, failing NVRAM update.\n"); 146 return -EAGAIN; 147 } 148 149 /* Write NVRAM. */ 150 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); 151 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 152 count); 153 154 /* NVRAM settings take effect immediately. */ 155 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 156 qla2xxx_wake_dpc(vha); 157 qla2x00_wait_for_chip_reset(vha); 158 159 return (count); 160 } 161 162 static struct bin_attribute sysfs_nvram_attr = { 163 .attr = { 164 .name = "nvram", 165 .mode = S_IRUSR | S_IWUSR, 166 }, 167 .size = 512, 168 .read = qla2x00_sysfs_read_nvram, 169 .write = qla2x00_sysfs_write_nvram, 170 }; 171 172 static ssize_t 173 qla2x00_sysfs_read_optrom(struct kobject *kobj, 174 struct bin_attribute *bin_attr, 175 char *buf, loff_t off, size_t count) 176 { 177 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 178 struct device, kobj))); 179 struct qla_hw_data *ha = vha->hw; 180 181 if (ha->optrom_state != QLA_SREADING) 182 return 0; 183 184 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 185 ha->optrom_region_size); 186 } 187 188 static ssize_t 189 qla2x00_sysfs_write_optrom(struct kobject *kobj, 190 struct bin_attribute *bin_attr, 191 char *buf, loff_t off, size_t count) 192 { 193 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 194 struct device, kobj))); 195 struct qla_hw_data *ha = vha->hw; 196 197 if (ha->optrom_state != QLA_SWRITING) 198 return -EINVAL; 199 if (off > ha->optrom_region_size) 200 return -ERANGE; 201 if (off + count > ha->optrom_region_size) 202 count = ha->optrom_region_size - off; 203 204 memcpy(&ha->optrom_buffer[off], buf, count); 205 206 return count; 207 } 208 209 static struct bin_attribute sysfs_optrom_attr = { 210 .attr = { 211 .name = "optrom", 212 .mode = S_IRUSR | S_IWUSR, 213 }, 214 .size = 0, 215 .read = qla2x00_sysfs_read_optrom, 216 .write = qla2x00_sysfs_write_optrom, 217 }; 218 219 static ssize_t 220 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, 221 struct bin_attribute *bin_attr, 222 char *buf, loff_t off, size_t count) 223 { 224 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 225 struct device, kobj))); 226 struct qla_hw_data *ha = vha->hw; 227 228 uint32_t start = 0; 229 uint32_t size = ha->optrom_size; 230 int val, valid; 231 232 if (off) 233 return 0; 234 235 if (unlikely(pci_channel_offline(ha->pdev))) 236 return 0; 237 238 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 239 return -EINVAL; 240 if (start > ha->optrom_size) 241 return -EINVAL; 242 243 switch (val) { 244 case 0: 245 if (ha->optrom_state != QLA_SREADING && 246 ha->optrom_state != QLA_SWRITING) 247 break; 248 249 ha->optrom_state = QLA_SWAITING; 250 251 DEBUG2(qla_printk(KERN_INFO, ha, 252 "Freeing flash region allocation -- 0x%x bytes.\n", 253 ha->optrom_region_size)); 254 255 vfree(ha->optrom_buffer); 256 ha->optrom_buffer = NULL; 257 break; 258 case 1: 259 if (ha->optrom_state != QLA_SWAITING) 260 break; 261 262 ha->optrom_region_start = start; 263 ha->optrom_region_size = start + size > ha->optrom_size ? 264 ha->optrom_size - start : size; 265 266 ha->optrom_state = QLA_SREADING; 267 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 268 if (ha->optrom_buffer == NULL) { 269 qla_printk(KERN_WARNING, ha, 270 "Unable to allocate memory for optrom retrieval " 271 "(%x).\n", ha->optrom_region_size); 272 273 ha->optrom_state = QLA_SWAITING; 274 return count; 275 } 276 277 DEBUG2(qla_printk(KERN_INFO, ha, 278 "Reading flash region -- 0x%x/0x%x.\n", 279 ha->optrom_region_start, ha->optrom_region_size)); 280 281 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 282 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 283 ha->optrom_region_start, ha->optrom_region_size); 284 break; 285 case 2: 286 if (ha->optrom_state != QLA_SWAITING) 287 break; 288 289 /* 290 * We need to be more restrictive on which FLASH regions are 291 * allowed to be updated via user-space. Regions accessible 292 * via this method include: 293 * 294 * ISP21xx/ISP22xx/ISP23xx type boards: 295 * 296 * 0x000000 -> 0x020000 -- Boot code. 297 * 298 * ISP2322/ISP24xx type boards: 299 * 300 * 0x000000 -> 0x07ffff -- Boot code. 301 * 0x080000 -> 0x0fffff -- Firmware. 302 * 303 * ISP25xx type boards: 304 * 305 * 0x000000 -> 0x07ffff -- Boot code. 306 * 0x080000 -> 0x0fffff -- Firmware. 307 * 0x120000 -> 0x12ffff -- VPD and HBA parameters. 308 */ 309 valid = 0; 310 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 311 valid = 1; 312 else if (start == (ha->flt_region_boot * 4) || 313 start == (ha->flt_region_fw * 4)) 314 valid = 1; 315 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 316 valid = 1; 317 if (!valid) { 318 qla_printk(KERN_WARNING, ha, 319 "Invalid start region 0x%x/0x%x.\n", start, size); 320 return -EINVAL; 321 } 322 323 ha->optrom_region_start = start; 324 ha->optrom_region_size = start + size > ha->optrom_size ? 325 ha->optrom_size - start : size; 326 327 ha->optrom_state = QLA_SWRITING; 328 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 329 if (ha->optrom_buffer == NULL) { 330 qla_printk(KERN_WARNING, ha, 331 "Unable to allocate memory for optrom update " 332 "(%x).\n", ha->optrom_region_size); 333 334 ha->optrom_state = QLA_SWAITING; 335 return count; 336 } 337 338 DEBUG2(qla_printk(KERN_INFO, ha, 339 "Staging flash region write -- 0x%x/0x%x.\n", 340 ha->optrom_region_start, ha->optrom_region_size)); 341 342 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 343 break; 344 case 3: 345 if (ha->optrom_state != QLA_SWRITING) 346 break; 347 348 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 349 qla_printk(KERN_WARNING, ha, 350 "HBA not online, failing flash update.\n"); 351 return -EAGAIN; 352 } 353 354 DEBUG2(qla_printk(KERN_INFO, ha, 355 "Writing flash region -- 0x%x/0x%x.\n", 356 ha->optrom_region_start, ha->optrom_region_size)); 357 358 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 359 ha->optrom_region_start, ha->optrom_region_size); 360 break; 361 default: 362 count = -EINVAL; 363 } 364 return count; 365 } 366 367 static struct bin_attribute sysfs_optrom_ctl_attr = { 368 .attr = { 369 .name = "optrom_ctl", 370 .mode = S_IWUSR, 371 }, 372 .size = 0, 373 .write = qla2x00_sysfs_write_optrom_ctl, 374 }; 375 376 static ssize_t 377 qla2x00_sysfs_read_vpd(struct kobject *kobj, 378 struct bin_attribute *bin_attr, 379 char *buf, loff_t off, size_t count) 380 { 381 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 382 struct device, kobj))); 383 struct qla_hw_data *ha = vha->hw; 384 385 if (unlikely(pci_channel_offline(ha->pdev))) 386 return 0; 387 388 if (!capable(CAP_SYS_ADMIN)) 389 return 0; 390 391 if (IS_NOCACHE_VPD_TYPE(ha)) 392 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 393 ha->vpd_size); 394 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 395 } 396 397 static ssize_t 398 qla2x00_sysfs_write_vpd(struct kobject *kobj, 399 struct bin_attribute *bin_attr, 400 char *buf, loff_t off, size_t count) 401 { 402 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 403 struct device, kobj))); 404 struct qla_hw_data *ha = vha->hw; 405 uint8_t *tmp_data; 406 407 if (unlikely(pci_channel_offline(ha->pdev))) 408 return 0; 409 410 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 411 !ha->isp_ops->write_nvram) 412 return 0; 413 414 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 415 qla_printk(KERN_WARNING, ha, 416 "HBA not online, failing VPD update.\n"); 417 return -EAGAIN; 418 } 419 420 /* Write NVRAM. */ 421 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); 422 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); 423 424 /* Update flash version information for 4Gb & above. */ 425 if (!IS_FWI2_CAPABLE(ha)) 426 goto done; 427 428 tmp_data = vmalloc(256); 429 if (!tmp_data) { 430 qla_printk(KERN_WARNING, ha, 431 "Unable to allocate memory for VPD information update.\n"); 432 goto done; 433 } 434 ha->isp_ops->get_flash_version(vha, tmp_data); 435 vfree(tmp_data); 436 done: 437 return count; 438 } 439 440 static struct bin_attribute sysfs_vpd_attr = { 441 .attr = { 442 .name = "vpd", 443 .mode = S_IRUSR | S_IWUSR, 444 }, 445 .size = 0, 446 .read = qla2x00_sysfs_read_vpd, 447 .write = qla2x00_sysfs_write_vpd, 448 }; 449 450 static ssize_t 451 qla2x00_sysfs_read_sfp(struct kobject *kobj, 452 struct bin_attribute *bin_attr, 453 char *buf, loff_t off, size_t count) 454 { 455 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 456 struct device, kobj))); 457 struct qla_hw_data *ha = vha->hw; 458 uint16_t iter, addr, offset; 459 int rval; 460 461 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) 462 return 0; 463 464 if (ha->sfp_data) 465 goto do_read; 466 467 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 468 &ha->sfp_data_dma); 469 if (!ha->sfp_data) { 470 qla_printk(KERN_WARNING, ha, 471 "Unable to allocate memory for SFP read-data.\n"); 472 return 0; 473 } 474 475 do_read: 476 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 477 addr = 0xa0; 478 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; 479 iter++, offset += SFP_BLOCK_SIZE) { 480 if (iter == 4) { 481 /* Skip to next device address. */ 482 addr = 0xa2; 483 offset = 0; 484 } 485 486 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset, 487 SFP_BLOCK_SIZE); 488 if (rval != QLA_SUCCESS) { 489 qla_printk(KERN_WARNING, ha, 490 "Unable to read SFP data (%x/%x/%x).\n", rval, 491 addr, offset); 492 count = 0; 493 break; 494 } 495 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); 496 buf += SFP_BLOCK_SIZE; 497 } 498 499 return count; 500 } 501 502 static struct bin_attribute sysfs_sfp_attr = { 503 .attr = { 504 .name = "sfp", 505 .mode = S_IRUSR | S_IWUSR, 506 }, 507 .size = SFP_DEV_SIZE * 2, 508 .read = qla2x00_sysfs_read_sfp, 509 }; 510 511 static ssize_t 512 qla2x00_sysfs_write_reset(struct kobject *kobj, 513 struct bin_attribute *bin_attr, 514 char *buf, loff_t off, size_t count) 515 { 516 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 517 struct device, kobj))); 518 struct qla_hw_data *ha = vha->hw; 519 int type; 520 521 if (off != 0) 522 return 0; 523 524 type = simple_strtol(buf, NULL, 10); 525 switch (type) { 526 case 0x2025c: 527 qla_printk(KERN_INFO, ha, 528 "Issuing ISP reset on (%ld).\n", vha->host_no); 529 530 scsi_block_requests(vha->host); 531 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 532 qla2xxx_wake_dpc(vha); 533 qla2x00_wait_for_chip_reset(vha); 534 scsi_unblock_requests(vha->host); 535 break; 536 case 0x2025d: 537 if (!IS_QLA81XX(ha)) 538 break; 539 540 qla_printk(KERN_INFO, ha, 541 "Issuing MPI reset on (%ld).\n", vha->host_no); 542 543 /* Make sure FC side is not in reset */ 544 qla2x00_wait_for_hba_online(vha); 545 546 /* Issue MPI reset */ 547 scsi_block_requests(vha->host); 548 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 549 qla_printk(KERN_WARNING, ha, 550 "MPI reset failed on (%ld).\n", vha->host_no); 551 scsi_unblock_requests(vha->host); 552 break; 553 } 554 return count; 555 } 556 557 static struct bin_attribute sysfs_reset_attr = { 558 .attr = { 559 .name = "reset", 560 .mode = S_IWUSR, 561 }, 562 .size = 0, 563 .write = qla2x00_sysfs_write_reset, 564 }; 565 566 static ssize_t 567 qla2x00_sysfs_write_edc(struct kobject *kobj, 568 struct bin_attribute *bin_attr, 569 char *buf, loff_t off, size_t count) 570 { 571 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 572 struct device, kobj))); 573 struct qla_hw_data *ha = vha->hw; 574 uint16_t dev, adr, opt, len; 575 int rval; 576 577 ha->edc_data_len = 0; 578 579 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 580 return 0; 581 582 if (!ha->edc_data) { 583 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 584 &ha->edc_data_dma); 585 if (!ha->edc_data) { 586 DEBUG2(qla_printk(KERN_INFO, ha, 587 "Unable to allocate memory for EDC write.\n")); 588 return 0; 589 } 590 } 591 592 dev = le16_to_cpup((void *)&buf[0]); 593 adr = le16_to_cpup((void *)&buf[2]); 594 opt = le16_to_cpup((void *)&buf[4]); 595 len = le16_to_cpup((void *)&buf[6]); 596 597 if (!(opt & BIT_0)) 598 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8) 599 return -EINVAL; 600 601 memcpy(ha->edc_data, &buf[8], len); 602 603 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma, 604 ha->edc_data, len, opt); 605 if (rval != QLA_SUCCESS) { 606 DEBUG2(qla_printk(KERN_INFO, ha, 607 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 608 rval, dev, adr, opt, len, *buf)); 609 return 0; 610 } 611 612 return count; 613 } 614 615 static struct bin_attribute sysfs_edc_attr = { 616 .attr = { 617 .name = "edc", 618 .mode = S_IWUSR, 619 }, 620 .size = 0, 621 .write = qla2x00_sysfs_write_edc, 622 }; 623 624 static ssize_t 625 qla2x00_sysfs_write_edc_status(struct kobject *kobj, 626 struct bin_attribute *bin_attr, 627 char *buf, loff_t off, size_t count) 628 { 629 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 630 struct device, kobj))); 631 struct qla_hw_data *ha = vha->hw; 632 uint16_t dev, adr, opt, len; 633 int rval; 634 635 ha->edc_data_len = 0; 636 637 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 638 return 0; 639 640 if (!ha->edc_data) { 641 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 642 &ha->edc_data_dma); 643 if (!ha->edc_data) { 644 DEBUG2(qla_printk(KERN_INFO, ha, 645 "Unable to allocate memory for EDC status.\n")); 646 return 0; 647 } 648 } 649 650 dev = le16_to_cpup((void *)&buf[0]); 651 adr = le16_to_cpup((void *)&buf[2]); 652 opt = le16_to_cpup((void *)&buf[4]); 653 len = le16_to_cpup((void *)&buf[6]); 654 655 if (!(opt & BIT_0)) 656 if (len == 0 || len > DMA_POOL_SIZE) 657 return -EINVAL; 658 659 memset(ha->edc_data, 0, len); 660 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma, 661 ha->edc_data, len, opt); 662 if (rval != QLA_SUCCESS) { 663 DEBUG2(qla_printk(KERN_INFO, ha, 664 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 665 rval, dev, adr, opt, len)); 666 return 0; 667 } 668 669 ha->edc_data_len = len; 670 671 return count; 672 } 673 674 static ssize_t 675 qla2x00_sysfs_read_edc_status(struct kobject *kobj, 676 struct bin_attribute *bin_attr, 677 char *buf, loff_t off, size_t count) 678 { 679 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 680 struct device, kobj))); 681 struct qla_hw_data *ha = vha->hw; 682 683 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0) 684 return 0; 685 686 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count) 687 return -EINVAL; 688 689 memcpy(buf, ha->edc_data, ha->edc_data_len); 690 691 return ha->edc_data_len; 692 } 693 694 static struct bin_attribute sysfs_edc_status_attr = { 695 .attr = { 696 .name = "edc_status", 697 .mode = S_IRUSR | S_IWUSR, 698 }, 699 .size = 0, 700 .write = qla2x00_sysfs_write_edc_status, 701 .read = qla2x00_sysfs_read_edc_status, 702 }; 703 704 static ssize_t 705 qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj, 706 struct bin_attribute *bin_attr, 707 char *buf, loff_t off, size_t count) 708 { 709 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 710 struct device, kobj))); 711 struct qla_hw_data *ha = vha->hw; 712 int rval; 713 uint16_t actual_size; 714 715 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) 716 return 0; 717 718 if (ha->xgmac_data) 719 goto do_read; 720 721 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 722 &ha->xgmac_data_dma, GFP_KERNEL); 723 if (!ha->xgmac_data) { 724 qla_printk(KERN_WARNING, ha, 725 "Unable to allocate memory for XGMAC read-data.\n"); 726 return 0; 727 } 728 729 do_read: 730 actual_size = 0; 731 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); 732 733 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 734 XGMAC_DATA_SIZE, &actual_size); 735 if (rval != QLA_SUCCESS) { 736 qla_printk(KERN_WARNING, ha, 737 "Unable to read XGMAC data (%x).\n", rval); 738 count = 0; 739 } 740 741 count = actual_size > count ? count: actual_size; 742 memcpy(buf, ha->xgmac_data, count); 743 744 return count; 745 } 746 747 static struct bin_attribute sysfs_xgmac_stats_attr = { 748 .attr = { 749 .name = "xgmac_stats", 750 .mode = S_IRUSR, 751 }, 752 .size = 0, 753 .read = qla2x00_sysfs_read_xgmac_stats, 754 }; 755 756 static ssize_t 757 qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj, 758 struct bin_attribute *bin_attr, 759 char *buf, loff_t off, size_t count) 760 { 761 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 762 struct device, kobj))); 763 struct qla_hw_data *ha = vha->hw; 764 int rval; 765 uint16_t actual_size; 766 767 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 768 return 0; 769 770 if (ha->dcbx_tlv) 771 goto do_read; 772 773 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 774 &ha->dcbx_tlv_dma, GFP_KERNEL); 775 if (!ha->dcbx_tlv) { 776 qla_printk(KERN_WARNING, ha, 777 "Unable to allocate memory for DCBX TLV read-data.\n"); 778 return 0; 779 } 780 781 do_read: 782 actual_size = 0; 783 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 784 785 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 786 DCBX_TLV_DATA_SIZE); 787 if (rval != QLA_SUCCESS) { 788 qla_printk(KERN_WARNING, ha, 789 "Unable to read DCBX TLV data (%x).\n", rval); 790 count = 0; 791 } 792 793 memcpy(buf, ha->dcbx_tlv, count); 794 795 return count; 796 } 797 798 static struct bin_attribute sysfs_dcbx_tlv_attr = { 799 .attr = { 800 .name = "dcbx_tlv", 801 .mode = S_IRUSR, 802 }, 803 .size = 0, 804 .read = qla2x00_sysfs_read_dcbx_tlv, 805 }; 806 807 static struct sysfs_entry { 808 char *name; 809 struct bin_attribute *attr; 810 int is4GBp_only; 811 } bin_file_entries[] = { 812 { "fw_dump", &sysfs_fw_dump_attr, }, 813 { "nvram", &sysfs_nvram_attr, }, 814 { "optrom", &sysfs_optrom_attr, }, 815 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 816 { "vpd", &sysfs_vpd_attr, 1 }, 817 { "sfp", &sysfs_sfp_attr, 1 }, 818 { "reset", &sysfs_reset_attr, }, 819 { "edc", &sysfs_edc_attr, 2 }, 820 { "edc_status", &sysfs_edc_status_attr, 2 }, 821 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 822 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 823 { NULL }, 824 }; 825 826 void 827 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) 828 { 829 struct Scsi_Host *host = vha->host; 830 struct sysfs_entry *iter; 831 int ret; 832 833 for (iter = bin_file_entries; iter->name; iter++) { 834 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw)) 835 continue; 836 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 837 continue; 838 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) 839 continue; 840 841 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 842 iter->attr); 843 if (ret) 844 qla_printk(KERN_INFO, vha->hw, 845 "Unable to create sysfs %s binary attribute " 846 "(%d).\n", iter->name, ret); 847 } 848 } 849 850 void 851 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) 852 { 853 struct Scsi_Host *host = vha->host; 854 struct sysfs_entry *iter; 855 struct qla_hw_data *ha = vha->hw; 856 857 for (iter = bin_file_entries; iter->name; iter++) { 858 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 859 continue; 860 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 861 continue; 862 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) 863 continue; 864 865 sysfs_remove_bin_file(&host->shost_gendev.kobj, 866 iter->attr); 867 } 868 869 if (ha->beacon_blink_led == 1) 870 ha->isp_ops->beacon_off(vha); 871 } 872 873 /* Scsi_Host attributes. */ 874 875 static ssize_t 876 qla2x00_drvr_version_show(struct device *dev, 877 struct device_attribute *attr, char *buf) 878 { 879 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); 880 } 881 882 static ssize_t 883 qla2x00_fw_version_show(struct device *dev, 884 struct device_attribute *attr, char *buf) 885 { 886 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 887 struct qla_hw_data *ha = vha->hw; 888 char fw_str[128]; 889 890 return snprintf(buf, PAGE_SIZE, "%s\n", 891 ha->isp_ops->fw_version_str(vha, fw_str)); 892 } 893 894 static ssize_t 895 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 896 char *buf) 897 { 898 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 899 struct qla_hw_data *ha = vha->hw; 900 uint32_t sn; 901 902 if (IS_FWI2_CAPABLE(ha)) { 903 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE); 904 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 905 } 906 907 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 908 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 909 sn % 100000); 910 } 911 912 static ssize_t 913 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 914 char *buf) 915 { 916 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 917 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); 918 } 919 920 static ssize_t 921 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 922 char *buf) 923 { 924 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 925 struct qla_hw_data *ha = vha->hw; 926 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 927 ha->product_id[0], ha->product_id[1], ha->product_id[2], 928 ha->product_id[3]); 929 } 930 931 static ssize_t 932 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 933 char *buf) 934 { 935 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 936 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 937 } 938 939 static ssize_t 940 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 941 char *buf) 942 { 943 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 944 return snprintf(buf, PAGE_SIZE, "%s\n", 945 vha->hw->model_desc ? vha->hw->model_desc : ""); 946 } 947 948 static ssize_t 949 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 950 char *buf) 951 { 952 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 953 char pci_info[30]; 954 955 return snprintf(buf, PAGE_SIZE, "%s\n", 956 vha->hw->isp_ops->pci_info_str(vha, pci_info)); 957 } 958 959 static ssize_t 960 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 961 char *buf) 962 { 963 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 964 struct qla_hw_data *ha = vha->hw; 965 int len = 0; 966 967 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 968 atomic_read(&vha->loop_state) == LOOP_DEAD) 969 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 970 else if (atomic_read(&vha->loop_state) != LOOP_READY || 971 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 972 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 973 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 974 else { 975 len = snprintf(buf, PAGE_SIZE, "Link Up - "); 976 977 switch (ha->current_topology) { 978 case ISP_CFG_NL: 979 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 980 break; 981 case ISP_CFG_FL: 982 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); 983 break; 984 case ISP_CFG_N: 985 len += snprintf(buf + len, PAGE_SIZE-len, 986 "N_Port to N_Port\n"); 987 break; 988 case ISP_CFG_F: 989 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); 990 break; 991 default: 992 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 993 break; 994 } 995 } 996 return len; 997 } 998 999 static ssize_t 1000 qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 1001 char *buf) 1002 { 1003 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1004 int len = 0; 1005 1006 switch (vha->hw->zio_mode) { 1007 case QLA_ZIO_MODE_6: 1008 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 1009 break; 1010 case QLA_ZIO_DISABLED: 1011 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1012 break; 1013 } 1014 return len; 1015 } 1016 1017 static ssize_t 1018 qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 1019 const char *buf, size_t count) 1020 { 1021 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1022 struct qla_hw_data *ha = vha->hw; 1023 int val = 0; 1024 uint16_t zio_mode; 1025 1026 if (!IS_ZIO_SUPPORTED(ha)) 1027 return -ENOTSUPP; 1028 1029 if (sscanf(buf, "%d", &val) != 1) 1030 return -EINVAL; 1031 1032 if (val) 1033 zio_mode = QLA_ZIO_MODE_6; 1034 else 1035 zio_mode = QLA_ZIO_DISABLED; 1036 1037 /* Update per-hba values and queue a reset. */ 1038 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 1039 ha->zio_mode = zio_mode; 1040 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1041 } 1042 return strlen(buf); 1043 } 1044 1045 static ssize_t 1046 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 1047 char *buf) 1048 { 1049 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1050 1051 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); 1052 } 1053 1054 static ssize_t 1055 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 1056 const char *buf, size_t count) 1057 { 1058 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1059 int val = 0; 1060 uint16_t zio_timer; 1061 1062 if (sscanf(buf, "%d", &val) != 1) 1063 return -EINVAL; 1064 if (val > 25500 || val < 100) 1065 return -ERANGE; 1066 1067 zio_timer = (uint16_t)(val / 100); 1068 vha->hw->zio_timer = zio_timer; 1069 1070 return strlen(buf); 1071 } 1072 1073 static ssize_t 1074 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1075 char *buf) 1076 { 1077 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1078 int len = 0; 1079 1080 if (vha->hw->beacon_blink_led) 1081 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 1082 else 1083 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1084 return len; 1085 } 1086 1087 static ssize_t 1088 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 1089 const char *buf, size_t count) 1090 { 1091 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1092 struct qla_hw_data *ha = vha->hw; 1093 int val = 0; 1094 int rval; 1095 1096 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1097 return -EPERM; 1098 1099 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1100 qla_printk(KERN_WARNING, ha, 1101 "Abort ISP active -- ignoring beacon request.\n"); 1102 return -EBUSY; 1103 } 1104 1105 if (sscanf(buf, "%d", &val) != 1) 1106 return -EINVAL; 1107 1108 if (val) 1109 rval = ha->isp_ops->beacon_on(vha); 1110 else 1111 rval = ha->isp_ops->beacon_off(vha); 1112 1113 if (rval != QLA_SUCCESS) 1114 count = 0; 1115 1116 return count; 1117 } 1118 1119 static ssize_t 1120 qla2x00_optrom_bios_version_show(struct device *dev, 1121 struct device_attribute *attr, char *buf) 1122 { 1123 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1124 struct qla_hw_data *ha = vha->hw; 1125 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 1126 ha->bios_revision[0]); 1127 } 1128 1129 static ssize_t 1130 qla2x00_optrom_efi_version_show(struct device *dev, 1131 struct device_attribute *attr, char *buf) 1132 { 1133 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1134 struct qla_hw_data *ha = vha->hw; 1135 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 1136 ha->efi_revision[0]); 1137 } 1138 1139 static ssize_t 1140 qla2x00_optrom_fcode_version_show(struct device *dev, 1141 struct device_attribute *attr, char *buf) 1142 { 1143 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1144 struct qla_hw_data *ha = vha->hw; 1145 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 1146 ha->fcode_revision[0]); 1147 } 1148 1149 static ssize_t 1150 qla2x00_optrom_fw_version_show(struct device *dev, 1151 struct device_attribute *attr, char *buf) 1152 { 1153 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1154 struct qla_hw_data *ha = vha->hw; 1155 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 1156 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 1157 ha->fw_revision[3]); 1158 } 1159 1160 static ssize_t 1161 qla2x00_total_isp_aborts_show(struct device *dev, 1162 struct device_attribute *attr, char *buf) 1163 { 1164 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1165 struct qla_hw_data *ha = vha->hw; 1166 return snprintf(buf, PAGE_SIZE, "%d\n", 1167 ha->qla_stats.total_isp_aborts); 1168 } 1169 1170 static ssize_t 1171 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1172 char *buf) 1173 { 1174 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1175 struct qla_hw_data *ha = vha->hw; 1176 1177 if (!IS_QLA81XX(ha)) 1178 return snprintf(buf, PAGE_SIZE, "\n"); 1179 1180 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1181 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], 1182 ha->mpi_capabilities); 1183 } 1184 1185 static ssize_t 1186 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, 1187 char *buf) 1188 { 1189 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1190 struct qla_hw_data *ha = vha->hw; 1191 1192 if (!IS_QLA81XX(ha)) 1193 return snprintf(buf, PAGE_SIZE, "\n"); 1194 1195 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1196 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); 1197 } 1198 1199 static ssize_t 1200 qla2x00_flash_block_size_show(struct device *dev, 1201 struct device_attribute *attr, char *buf) 1202 { 1203 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1204 struct qla_hw_data *ha = vha->hw; 1205 1206 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1207 } 1208 1209 static ssize_t 1210 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, 1211 char *buf) 1212 { 1213 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1214 1215 if (!IS_QLA81XX(vha->hw)) 1216 return snprintf(buf, PAGE_SIZE, "\n"); 1217 1218 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1219 } 1220 1221 static ssize_t 1222 qla2x00_vn_port_mac_address_show(struct device *dev, 1223 struct device_attribute *attr, char *buf) 1224 { 1225 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1226 1227 if (!IS_QLA81XX(vha->hw)) 1228 return snprintf(buf, PAGE_SIZE, "\n"); 1229 1230 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", 1231 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], 1232 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], 1233 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); 1234 } 1235 1236 static ssize_t 1237 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, 1238 char *buf) 1239 { 1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1241 1242 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); 1243 } 1244 1245 static ssize_t 1246 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1247 char *buf) 1248 { 1249 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1250 int rval = QLA_FUNCTION_FAILED; 1251 uint16_t state[5]; 1252 1253 if (!vha->hw->flags.eeh_busy) 1254 rval = qla2x00_get_firmware_state(vha, state); 1255 if (rval != QLA_SUCCESS) 1256 memset(state, -1, sizeof(state)); 1257 1258 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], 1259 state[1], state[2], state[3], state[4]); 1260 } 1261 1262 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1263 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1264 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1265 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); 1266 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); 1267 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 1268 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 1269 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 1270 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); 1271 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 1272 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 1273 qla2x00_zio_timer_store); 1274 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 1275 qla2x00_beacon_store); 1276 static DEVICE_ATTR(optrom_bios_version, S_IRUGO, 1277 qla2x00_optrom_bios_version_show, NULL); 1278 static DEVICE_ATTR(optrom_efi_version, S_IRUGO, 1279 qla2x00_optrom_efi_version_show, NULL); 1280 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, 1281 qla2x00_optrom_fcode_version_show, NULL); 1282 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1283 NULL); 1284 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1285 NULL); 1286 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1287 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1288 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1289 NULL); 1290 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); 1291 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, 1292 qla2x00_vn_port_mac_address_show, NULL); 1293 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1294 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1295 1296 struct device_attribute *qla2x00_host_attrs[] = { 1297 &dev_attr_driver_version, 1298 &dev_attr_fw_version, 1299 &dev_attr_serial_num, 1300 &dev_attr_isp_name, 1301 &dev_attr_isp_id, 1302 &dev_attr_model_name, 1303 &dev_attr_model_desc, 1304 &dev_attr_pci_info, 1305 &dev_attr_link_state, 1306 &dev_attr_zio, 1307 &dev_attr_zio_timer, 1308 &dev_attr_beacon, 1309 &dev_attr_optrom_bios_version, 1310 &dev_attr_optrom_efi_version, 1311 &dev_attr_optrom_fcode_version, 1312 &dev_attr_optrom_fw_version, 1313 &dev_attr_total_isp_aborts, 1314 &dev_attr_mpi_version, 1315 &dev_attr_phy_version, 1316 &dev_attr_flash_block_size, 1317 &dev_attr_vlan_id, 1318 &dev_attr_vn_port_mac_address, 1319 &dev_attr_fabric_param, 1320 &dev_attr_fw_state, 1321 NULL, 1322 }; 1323 1324 /* Host attributes. */ 1325 1326 static void 1327 qla2x00_get_host_port_id(struct Scsi_Host *shost) 1328 { 1329 scsi_qla_host_t *vha = shost_priv(shost); 1330 1331 fc_host_port_id(shost) = vha->d_id.b.domain << 16 | 1332 vha->d_id.b.area << 8 | vha->d_id.b.al_pa; 1333 } 1334 1335 static void 1336 qla2x00_get_host_speed(struct Scsi_Host *shost) 1337 { 1338 struct qla_hw_data *ha = ((struct scsi_qla_host *) 1339 (shost_priv(shost)))->hw; 1340 u32 speed = FC_PORTSPEED_UNKNOWN; 1341 1342 switch (ha->link_data_rate) { 1343 case PORT_SPEED_1GB: 1344 speed = FC_PORTSPEED_1GBIT; 1345 break; 1346 case PORT_SPEED_2GB: 1347 speed = FC_PORTSPEED_2GBIT; 1348 break; 1349 case PORT_SPEED_4GB: 1350 speed = FC_PORTSPEED_4GBIT; 1351 break; 1352 case PORT_SPEED_8GB: 1353 speed = FC_PORTSPEED_8GBIT; 1354 break; 1355 case PORT_SPEED_10GB: 1356 speed = FC_PORTSPEED_10GBIT; 1357 break; 1358 } 1359 fc_host_speed(shost) = speed; 1360 } 1361 1362 static void 1363 qla2x00_get_host_port_type(struct Scsi_Host *shost) 1364 { 1365 scsi_qla_host_t *vha = shost_priv(shost); 1366 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 1367 1368 if (vha->vp_idx) { 1369 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 1370 return; 1371 } 1372 switch (vha->hw->current_topology) { 1373 case ISP_CFG_NL: 1374 port_type = FC_PORTTYPE_LPORT; 1375 break; 1376 case ISP_CFG_FL: 1377 port_type = FC_PORTTYPE_NLPORT; 1378 break; 1379 case ISP_CFG_N: 1380 port_type = FC_PORTTYPE_PTP; 1381 break; 1382 case ISP_CFG_F: 1383 port_type = FC_PORTTYPE_NPORT; 1384 break; 1385 } 1386 fc_host_port_type(shost) = port_type; 1387 } 1388 1389 static void 1390 qla2x00_get_starget_node_name(struct scsi_target *starget) 1391 { 1392 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1393 scsi_qla_host_t *vha = shost_priv(host); 1394 fc_port_t *fcport; 1395 u64 node_name = 0; 1396 1397 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1398 if (fcport->rport && 1399 starget->id == fcport->rport->scsi_target_id) { 1400 node_name = wwn_to_u64(fcport->node_name); 1401 break; 1402 } 1403 } 1404 1405 fc_starget_node_name(starget) = node_name; 1406 } 1407 1408 static void 1409 qla2x00_get_starget_port_name(struct scsi_target *starget) 1410 { 1411 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1412 scsi_qla_host_t *vha = shost_priv(host); 1413 fc_port_t *fcport; 1414 u64 port_name = 0; 1415 1416 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1417 if (fcport->rport && 1418 starget->id == fcport->rport->scsi_target_id) { 1419 port_name = wwn_to_u64(fcport->port_name); 1420 break; 1421 } 1422 } 1423 1424 fc_starget_port_name(starget) = port_name; 1425 } 1426 1427 static void 1428 qla2x00_get_starget_port_id(struct scsi_target *starget) 1429 { 1430 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1431 scsi_qla_host_t *vha = shost_priv(host); 1432 fc_port_t *fcport; 1433 uint32_t port_id = ~0U; 1434 1435 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1436 if (fcport->rport && 1437 starget->id == fcport->rport->scsi_target_id) { 1438 port_id = fcport->d_id.b.domain << 16 | 1439 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 1440 break; 1441 } 1442 } 1443 1444 fc_starget_port_id(starget) = port_id; 1445 } 1446 1447 static void 1448 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1449 { 1450 if (timeout) 1451 rport->dev_loss_tmo = timeout; 1452 else 1453 rport->dev_loss_tmo = 1; 1454 } 1455 1456 static void 1457 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 1458 { 1459 struct Scsi_Host *host = rport_to_shost(rport); 1460 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1461 1462 if (!fcport) 1463 return; 1464 1465 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1466 return; 1467 1468 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1469 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1470 return; 1471 } 1472 1473 /* 1474 * Transport has effectively 'deleted' the rport, clear 1475 * all local references. 1476 */ 1477 spin_lock_irq(host->host_lock); 1478 fcport->rport = NULL; 1479 *((fc_port_t **)rport->dd_data) = NULL; 1480 spin_unlock_irq(host->host_lock); 1481 } 1482 1483 static void 1484 qla2x00_terminate_rport_io(struct fc_rport *rport) 1485 { 1486 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1487 1488 if (!fcport) 1489 return; 1490 1491 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1492 return; 1493 1494 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1495 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1496 return; 1497 } 1498 /* 1499 * At this point all fcport's software-states are cleared. Perform any 1500 * final cleanup of firmware resources (PCBs and XCBs). 1501 */ 1502 if (fcport->loop_id != FC_NO_LOOP_ID && 1503 !test_bit(UNLOADING, &fcport->vha->dpc_flags)) 1504 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1505 fcport->loop_id, fcport->d_id.b.domain, 1506 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1507 1508 qla2x00_abort_fcport_cmds(fcport); 1509 } 1510 1511 static int 1512 qla2x00_issue_lip(struct Scsi_Host *shost) 1513 { 1514 scsi_qla_host_t *vha = shost_priv(shost); 1515 1516 qla2x00_loop_reset(vha); 1517 return 0; 1518 } 1519 1520 static struct fc_host_statistics * 1521 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1522 { 1523 scsi_qla_host_t *vha = shost_priv(shost); 1524 struct qla_hw_data *ha = vha->hw; 1525 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1526 int rval; 1527 struct link_statistics *stats; 1528 dma_addr_t stats_dma; 1529 struct fc_host_statistics *pfc_host_stat; 1530 1531 pfc_host_stat = &ha->fc_host_stat; 1532 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1533 1534 if (test_bit(UNLOADING, &vha->dpc_flags)) 1535 goto done; 1536 1537 if (unlikely(pci_channel_offline(ha->pdev))) 1538 goto done; 1539 1540 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1541 if (stats == NULL) { 1542 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1543 __func__, base_vha->host_no)); 1544 goto done; 1545 } 1546 memset(stats, 0, DMA_POOL_SIZE); 1547 1548 rval = QLA_FUNCTION_FAILED; 1549 if (IS_FWI2_CAPABLE(ha)) { 1550 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); 1551 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && 1552 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) && 1553 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1554 !ha->dpc_active) { 1555 /* Must be in a 'READY' state for statistics retrieval. */ 1556 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, 1557 stats, stats_dma); 1558 } 1559 1560 if (rval != QLA_SUCCESS) 1561 goto done_free; 1562 1563 pfc_host_stat->link_failure_count = stats->link_fail_cnt; 1564 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt; 1565 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt; 1566 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; 1567 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1568 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; 1569 if (IS_FWI2_CAPABLE(ha)) { 1570 pfc_host_stat->lip_count = stats->lip_cnt; 1571 pfc_host_stat->tx_frames = stats->tx_frames; 1572 pfc_host_stat->rx_frames = stats->rx_frames; 1573 pfc_host_stat->dumped_frames = stats->dumped_frames; 1574 pfc_host_stat->nos_count = stats->nos_rcvd; 1575 } 1576 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; 1577 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; 1578 1579 done_free: 1580 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1581 done: 1582 return pfc_host_stat; 1583 } 1584 1585 static void 1586 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1587 { 1588 scsi_qla_host_t *vha = shost_priv(shost); 1589 1590 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost)); 1591 } 1592 1593 static void 1594 qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1595 { 1596 scsi_qla_host_t *vha = shost_priv(shost); 1597 1598 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1599 } 1600 1601 static void 1602 qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1603 { 1604 scsi_qla_host_t *vha = shost_priv(shost); 1605 u64 node_name; 1606 1607 if (vha->device_flags & SWITCH_FOUND) 1608 node_name = wwn_to_u64(vha->fabric_node_name); 1609 else 1610 node_name = wwn_to_u64(vha->node_name); 1611 1612 fc_host_fabric_name(shost) = node_name; 1613 } 1614 1615 static void 1616 qla2x00_get_host_port_state(struct Scsi_Host *shost) 1617 { 1618 scsi_qla_host_t *vha = shost_priv(shost); 1619 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 1620 1621 if (!base_vha->flags.online) 1622 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1623 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT) 1624 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1625 else 1626 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1627 } 1628 1629 static int 1630 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1631 { 1632 int ret = 0; 1633 uint8_t qos = 0; 1634 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1635 scsi_qla_host_t *vha = NULL; 1636 struct qla_hw_data *ha = base_vha->hw; 1637 uint16_t options = 0; 1638 int cnt; 1639 struct req_que *req = ha->req_q_map[0]; 1640 1641 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1642 if (ret) { 1643 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1644 "status %x\n", ret)); 1645 return (ret); 1646 } 1647 1648 vha = qla24xx_create_vhost(fc_vport); 1649 if (vha == NULL) { 1650 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1651 vha)); 1652 return FC_VPORT_FAILED; 1653 } 1654 if (disable) { 1655 atomic_set(&vha->vp_state, VP_OFFLINE); 1656 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 1657 } else 1658 atomic_set(&vha->vp_state, VP_FAILED); 1659 1660 /* ready to create vport */ 1661 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1662 vha->vp_idx); 1663 1664 /* initialized vport states */ 1665 atomic_set(&vha->loop_state, LOOP_DOWN); 1666 vha->vp_err_state= VP_ERR_PORTDWN; 1667 vha->vp_prev_err_state= VP_ERR_UNKWN; 1668 /* Check if physical ha port is Up */ 1669 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1670 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1671 /* Don't retry or attempt login of this virtual port */ 1672 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1673 base_vha->host_no)); 1674 atomic_set(&vha->loop_state, LOOP_DEAD); 1675 if (!disable) 1676 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1677 } 1678 1679 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1680 &ha->pdev->dev)) { 1681 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1682 vha->host_no, vha->vp_idx)); 1683 goto vport_create_failed_2; 1684 } 1685 1686 /* initialize attributes */ 1687 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1688 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1689 fc_host_supported_classes(vha->host) = 1690 fc_host_supported_classes(base_vha->host); 1691 fc_host_supported_speeds(vha->host) = 1692 fc_host_supported_speeds(base_vha->host); 1693 1694 qla24xx_vport_disable(fc_vport, disable); 1695 1696 if (ha->flags.cpu_affinity_enabled) { 1697 req = ha->req_q_map[1]; 1698 goto vport_queue; 1699 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1700 goto vport_queue; 1701 /* Create a request queue in QoS mode for the vport */ 1702 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 1703 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 1704 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 1705 8) == 0) { 1706 qos = ha->npiv_info[cnt].q_qos; 1707 break; 1708 } 1709 } 1710 if (qos) { 1711 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1712 qos); 1713 if (!ret) 1714 qla_printk(KERN_WARNING, ha, 1715 "Can't create request queue for vp_idx:%d\n", 1716 vha->vp_idx); 1717 else { 1718 DEBUG2(qla_printk(KERN_INFO, ha, 1719 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1720 ret, qos, vha->vp_idx)); 1721 req = ha->req_q_map[ret]; 1722 } 1723 } 1724 1725 vport_queue: 1726 vha->req = req; 1727 return 0; 1728 1729 vport_create_failed_2: 1730 qla24xx_disable_vp(vha); 1731 qla24xx_deallocate_vp_id(vha); 1732 scsi_host_put(vha->host); 1733 return FC_VPORT_FAILED; 1734 } 1735 1736 static int 1737 qla24xx_vport_delete(struct fc_vport *fc_vport) 1738 { 1739 scsi_qla_host_t *vha = fc_vport->dd_data; 1740 fc_port_t *fcport, *tfcport; 1741 struct qla_hw_data *ha = vha->hw; 1742 uint16_t id = vha->vp_idx; 1743 1744 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1745 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 1746 msleep(1000); 1747 1748 qla24xx_disable_vp(vha); 1749 1750 fc_remove_host(vha->host); 1751 1752 scsi_remove_host(vha->host); 1753 1754 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 1755 list_del(&fcport->list); 1756 kfree(fcport); 1757 fcport = NULL; 1758 } 1759 1760 qla24xx_deallocate_vp_id(vha); 1761 1762 mutex_lock(&ha->vport_lock); 1763 ha->cur_vport_count--; 1764 clear_bit(vha->vp_idx, ha->vp_idx_map); 1765 mutex_unlock(&ha->vport_lock); 1766 1767 if (vha->timer_active) { 1768 qla2x00_vp_stop_timer(vha); 1769 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " 1770 "has stopped\n", 1771 vha->host_no, vha->vp_idx, vha)); 1772 } 1773 1774 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1775 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1776 qla_printk(KERN_WARNING, ha, 1777 "Queue delete failed.\n"); 1778 } 1779 1780 scsi_host_put(vha->host); 1781 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1782 return 0; 1783 } 1784 1785 static int 1786 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 1787 { 1788 scsi_qla_host_t *vha = fc_vport->dd_data; 1789 1790 if (disable) 1791 qla24xx_disable_vp(vha); 1792 else 1793 qla24xx_enable_vp(vha); 1794 1795 return 0; 1796 } 1797 1798 struct fc_function_template qla2xxx_transport_functions = { 1799 1800 .show_host_node_name = 1, 1801 .show_host_port_name = 1, 1802 .show_host_supported_classes = 1, 1803 .show_host_supported_speeds = 1, 1804 1805 .get_host_port_id = qla2x00_get_host_port_id, 1806 .show_host_port_id = 1, 1807 .get_host_speed = qla2x00_get_host_speed, 1808 .show_host_speed = 1, 1809 .get_host_port_type = qla2x00_get_host_port_type, 1810 .show_host_port_type = 1, 1811 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 1812 .show_host_symbolic_name = 1, 1813 .set_host_system_hostname = qla2x00_set_host_system_hostname, 1814 .show_host_system_hostname = 1, 1815 .get_host_fabric_name = qla2x00_get_host_fabric_name, 1816 .show_host_fabric_name = 1, 1817 .get_host_port_state = qla2x00_get_host_port_state, 1818 .show_host_port_state = 1, 1819 1820 .dd_fcrport_size = sizeof(struct fc_port *), 1821 .show_rport_supported_classes = 1, 1822 1823 .get_starget_node_name = qla2x00_get_starget_node_name, 1824 .show_starget_node_name = 1, 1825 .get_starget_port_name = qla2x00_get_starget_port_name, 1826 .show_starget_port_name = 1, 1827 .get_starget_port_id = qla2x00_get_starget_port_id, 1828 .show_starget_port_id = 1, 1829 1830 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1831 .show_rport_dev_loss_tmo = 1, 1832 1833 .issue_fc_host_lip = qla2x00_issue_lip, 1834 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 1835 .terminate_rport_io = qla2x00_terminate_rport_io, 1836 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1837 1838 .vport_create = qla24xx_vport_create, 1839 .vport_disable = qla24xx_vport_disable, 1840 .vport_delete = qla24xx_vport_delete, 1841 }; 1842 1843 struct fc_function_template qla2xxx_transport_vport_functions = { 1844 1845 .show_host_node_name = 1, 1846 .show_host_port_name = 1, 1847 .show_host_supported_classes = 1, 1848 1849 .get_host_port_id = qla2x00_get_host_port_id, 1850 .show_host_port_id = 1, 1851 .get_host_speed = qla2x00_get_host_speed, 1852 .show_host_speed = 1, 1853 .get_host_port_type = qla2x00_get_host_port_type, 1854 .show_host_port_type = 1, 1855 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 1856 .show_host_symbolic_name = 1, 1857 .set_host_system_hostname = qla2x00_set_host_system_hostname, 1858 .show_host_system_hostname = 1, 1859 .get_host_fabric_name = qla2x00_get_host_fabric_name, 1860 .show_host_fabric_name = 1, 1861 .get_host_port_state = qla2x00_get_host_port_state, 1862 .show_host_port_state = 1, 1863 1864 .dd_fcrport_size = sizeof(struct fc_port *), 1865 .show_rport_supported_classes = 1, 1866 1867 .get_starget_node_name = qla2x00_get_starget_node_name, 1868 .show_starget_node_name = 1, 1869 .get_starget_port_name = qla2x00_get_starget_port_name, 1870 .show_starget_port_name = 1, 1871 .get_starget_port_id = qla2x00_get_starget_port_id, 1872 .show_starget_port_id = 1, 1873 1874 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1875 .show_rport_dev_loss_tmo = 1, 1876 1877 .issue_fc_host_lip = qla2x00_issue_lip, 1878 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 1879 .terminate_rport_io = qla2x00_terminate_rport_io, 1880 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1881 }; 1882 1883 void 1884 qla2x00_init_host_attr(scsi_qla_host_t *vha) 1885 { 1886 struct qla_hw_data *ha = vha->hw; 1887 u32 speed = FC_PORTSPEED_UNKNOWN; 1888 1889 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1890 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1891 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1892 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1893 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1894 1895 if (IS_QLA81XX(ha)) 1896 speed = FC_PORTSPEED_10GBIT; 1897 else if (IS_QLA25XX(ha)) 1898 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1899 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1900 else if (IS_QLA24XX_TYPE(ha)) 1901 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 1902 FC_PORTSPEED_1GBIT; 1903 else if (IS_QLA23XX(ha)) 1904 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1905 else 1906 speed = FC_PORTSPEED_1GBIT; 1907 fc_host_supported_speeds(vha->host) = speed; 1908 } 1909