1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 13 static int qla24xx_vport_disable(struct fc_vport *, bool); 14 15 /* SYSFS attributes --------------------------------------------------------- */ 16 17 static ssize_t 18 qla2x00_sysfs_read_fw_dump(struct kobject *kobj, 19 struct bin_attribute *bin_attr, 20 char *buf, loff_t off, size_t count) 21 { 22 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 23 struct device, kobj))); 24 struct qla_hw_data *ha = vha->hw; 25 26 if (ha->fw_dump_reading == 0) 27 return 0; 28 29 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 30 ha->fw_dump_len); 31 } 32 33 static ssize_t 34 qla2x00_sysfs_write_fw_dump(struct kobject *kobj, 35 struct bin_attribute *bin_attr, 36 char *buf, loff_t off, size_t count) 37 { 38 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 39 struct device, kobj))); 40 struct qla_hw_data *ha = vha->hw; 41 int reading; 42 43 if (off != 0) 44 return (0); 45 46 reading = simple_strtol(buf, NULL, 10); 47 switch (reading) { 48 case 0: 49 if (!ha->fw_dump_reading) 50 break; 51 52 qla_printk(KERN_INFO, ha, 53 "Firmware dump cleared on (%ld).\n", vha->host_no); 54 55 ha->fw_dump_reading = 0; 56 ha->fw_dumped = 0; 57 break; 58 case 1: 59 if (ha->fw_dumped && !ha->fw_dump_reading) { 60 ha->fw_dump_reading = 1; 61 62 qla_printk(KERN_INFO, ha, 63 "Raw firmware dump ready for read on (%ld).\n", 64 vha->host_no); 65 } 66 break; 67 case 2: 68 qla2x00_alloc_fw_dump(vha); 69 break; 70 case 3: 71 qla2x00_system_error(vha); 72 break; 73 } 74 return (count); 75 } 76 77 static struct bin_attribute sysfs_fw_dump_attr = { 78 .attr = { 79 .name = "fw_dump", 80 .mode = S_IRUSR | S_IWUSR, 81 }, 82 .size = 0, 83 .read = qla2x00_sysfs_read_fw_dump, 84 .write = qla2x00_sysfs_write_fw_dump, 85 }; 86 87 static ssize_t 88 qla2x00_sysfs_read_nvram(struct kobject *kobj, 89 struct bin_attribute *bin_attr, 90 char *buf, loff_t off, size_t count) 91 { 92 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 93 struct device, kobj))); 94 struct qla_hw_data *ha = vha->hw; 95 96 if (!capable(CAP_SYS_ADMIN)) 97 return 0; 98 99 if (IS_NOCACHE_VPD_TYPE(ha)) 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 101 ha->nvram_size); 102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 103 ha->nvram_size); 104 } 105 106 static ssize_t 107 qla2x00_sysfs_write_nvram(struct kobject *kobj, 108 struct bin_attribute *bin_attr, 109 char *buf, loff_t off, size_t count) 110 { 111 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 112 struct device, kobj))); 113 struct qla_hw_data *ha = vha->hw; 114 uint16_t cnt; 115 116 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 117 !ha->isp_ops->write_nvram) 118 return 0; 119 120 /* Checksum NVRAM. */ 121 if (IS_FWI2_CAPABLE(ha)) { 122 uint32_t *iter; 123 uint32_t chksum; 124 125 iter = (uint32_t *)buf; 126 chksum = 0; 127 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++) 128 chksum += le32_to_cpu(*iter++); 129 chksum = ~chksum + 1; 130 *iter = cpu_to_le32(chksum); 131 } else { 132 uint8_t *iter; 133 uint8_t chksum; 134 135 iter = (uint8_t *)buf; 136 chksum = 0; 137 for (cnt = 0; cnt < count - 1; cnt++) 138 chksum += *iter++; 139 chksum = ~chksum + 1; 140 *iter = chksum; 141 } 142 143 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 144 qla_printk(KERN_WARNING, ha, 145 "HBA not online, failing NVRAM update.\n"); 146 return -EAGAIN; 147 } 148 149 /* Write NVRAM. */ 150 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); 151 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 152 count); 153 154 /* NVRAM settings take effect immediately. */ 155 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 156 qla2xxx_wake_dpc(vha); 157 qla2x00_wait_for_chip_reset(vha); 158 159 return (count); 160 } 161 162 static struct bin_attribute sysfs_nvram_attr = { 163 .attr = { 164 .name = "nvram", 165 .mode = S_IRUSR | S_IWUSR, 166 }, 167 .size = 512, 168 .read = qla2x00_sysfs_read_nvram, 169 .write = qla2x00_sysfs_write_nvram, 170 }; 171 172 static ssize_t 173 qla2x00_sysfs_read_optrom(struct kobject *kobj, 174 struct bin_attribute *bin_attr, 175 char *buf, loff_t off, size_t count) 176 { 177 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 178 struct device, kobj))); 179 struct qla_hw_data *ha = vha->hw; 180 181 if (ha->optrom_state != QLA_SREADING) 182 return 0; 183 184 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 185 ha->optrom_region_size); 186 } 187 188 static ssize_t 189 qla2x00_sysfs_write_optrom(struct kobject *kobj, 190 struct bin_attribute *bin_attr, 191 char *buf, loff_t off, size_t count) 192 { 193 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 194 struct device, kobj))); 195 struct qla_hw_data *ha = vha->hw; 196 197 if (ha->optrom_state != QLA_SWRITING) 198 return -EINVAL; 199 if (off > ha->optrom_region_size) 200 return -ERANGE; 201 if (off + count > ha->optrom_region_size) 202 count = ha->optrom_region_size - off; 203 204 memcpy(&ha->optrom_buffer[off], buf, count); 205 206 return count; 207 } 208 209 static struct bin_attribute sysfs_optrom_attr = { 210 .attr = { 211 .name = "optrom", 212 .mode = S_IRUSR | S_IWUSR, 213 }, 214 .size = 0, 215 .read = qla2x00_sysfs_read_optrom, 216 .write = qla2x00_sysfs_write_optrom, 217 }; 218 219 static ssize_t 220 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, 221 struct bin_attribute *bin_attr, 222 char *buf, loff_t off, size_t count) 223 { 224 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 225 struct device, kobj))); 226 struct qla_hw_data *ha = vha->hw; 227 228 uint32_t start = 0; 229 uint32_t size = ha->optrom_size; 230 int val, valid; 231 232 if (off) 233 return 0; 234 235 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 236 return -EINVAL; 237 if (start > ha->optrom_size) 238 return -EINVAL; 239 240 switch (val) { 241 case 0: 242 if (ha->optrom_state != QLA_SREADING && 243 ha->optrom_state != QLA_SWRITING) 244 break; 245 246 ha->optrom_state = QLA_SWAITING; 247 248 DEBUG2(qla_printk(KERN_INFO, ha, 249 "Freeing flash region allocation -- 0x%x bytes.\n", 250 ha->optrom_region_size)); 251 252 vfree(ha->optrom_buffer); 253 ha->optrom_buffer = NULL; 254 break; 255 case 1: 256 if (ha->optrom_state != QLA_SWAITING) 257 break; 258 259 ha->optrom_region_start = start; 260 ha->optrom_region_size = start + size > ha->optrom_size ? 261 ha->optrom_size - start : size; 262 263 ha->optrom_state = QLA_SREADING; 264 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 265 if (ha->optrom_buffer == NULL) { 266 qla_printk(KERN_WARNING, ha, 267 "Unable to allocate memory for optrom retrieval " 268 "(%x).\n", ha->optrom_region_size); 269 270 ha->optrom_state = QLA_SWAITING; 271 return count; 272 } 273 274 DEBUG2(qla_printk(KERN_INFO, ha, 275 "Reading flash region -- 0x%x/0x%x.\n", 276 ha->optrom_region_start, ha->optrom_region_size)); 277 278 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 279 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 280 ha->optrom_region_start, ha->optrom_region_size); 281 break; 282 case 2: 283 if (ha->optrom_state != QLA_SWAITING) 284 break; 285 286 /* 287 * We need to be more restrictive on which FLASH regions are 288 * allowed to be updated via user-space. Regions accessible 289 * via this method include: 290 * 291 * ISP21xx/ISP22xx/ISP23xx type boards: 292 * 293 * 0x000000 -> 0x020000 -- Boot code. 294 * 295 * ISP2322/ISP24xx type boards: 296 * 297 * 0x000000 -> 0x07ffff -- Boot code. 298 * 0x080000 -> 0x0fffff -- Firmware. 299 * 300 * ISP25xx type boards: 301 * 302 * 0x000000 -> 0x07ffff -- Boot code. 303 * 0x080000 -> 0x0fffff -- Firmware. 304 * 0x120000 -> 0x12ffff -- VPD and HBA parameters. 305 */ 306 valid = 0; 307 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 308 valid = 1; 309 else if (start == (ha->flt_region_boot * 4) || 310 start == (ha->flt_region_fw * 4)) 311 valid = 1; 312 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 313 valid = 1; 314 if (!valid) { 315 qla_printk(KERN_WARNING, ha, 316 "Invalid start region 0x%x/0x%x.\n", start, size); 317 return -EINVAL; 318 } 319 320 ha->optrom_region_start = start; 321 ha->optrom_region_size = start + size > ha->optrom_size ? 322 ha->optrom_size - start : size; 323 324 ha->optrom_state = QLA_SWRITING; 325 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 326 if (ha->optrom_buffer == NULL) { 327 qla_printk(KERN_WARNING, ha, 328 "Unable to allocate memory for optrom update " 329 "(%x).\n", ha->optrom_region_size); 330 331 ha->optrom_state = QLA_SWAITING; 332 return count; 333 } 334 335 DEBUG2(qla_printk(KERN_INFO, ha, 336 "Staging flash region write -- 0x%x/0x%x.\n", 337 ha->optrom_region_start, ha->optrom_region_size)); 338 339 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 340 break; 341 case 3: 342 if (ha->optrom_state != QLA_SWRITING) 343 break; 344 345 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 346 qla_printk(KERN_WARNING, ha, 347 "HBA not online, failing flash update.\n"); 348 return -EAGAIN; 349 } 350 351 DEBUG2(qla_printk(KERN_INFO, ha, 352 "Writing flash region -- 0x%x/0x%x.\n", 353 ha->optrom_region_start, ha->optrom_region_size)); 354 355 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 356 ha->optrom_region_start, ha->optrom_region_size); 357 break; 358 default: 359 count = -EINVAL; 360 } 361 return count; 362 } 363 364 static struct bin_attribute sysfs_optrom_ctl_attr = { 365 .attr = { 366 .name = "optrom_ctl", 367 .mode = S_IWUSR, 368 }, 369 .size = 0, 370 .write = qla2x00_sysfs_write_optrom_ctl, 371 }; 372 373 static ssize_t 374 qla2x00_sysfs_read_vpd(struct kobject *kobj, 375 struct bin_attribute *bin_attr, 376 char *buf, loff_t off, size_t count) 377 { 378 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 379 struct device, kobj))); 380 struct qla_hw_data *ha = vha->hw; 381 382 if (!capable(CAP_SYS_ADMIN)) 383 return 0; 384 385 if (IS_NOCACHE_VPD_TYPE(ha)) 386 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 387 ha->vpd_size); 388 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 389 } 390 391 static ssize_t 392 qla2x00_sysfs_write_vpd(struct kobject *kobj, 393 struct bin_attribute *bin_attr, 394 char *buf, loff_t off, size_t count) 395 { 396 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 397 struct device, kobj))); 398 struct qla_hw_data *ha = vha->hw; 399 uint8_t *tmp_data; 400 401 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 402 !ha->isp_ops->write_nvram) 403 return 0; 404 405 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 406 qla_printk(KERN_WARNING, ha, 407 "HBA not online, failing VPD update.\n"); 408 return -EAGAIN; 409 } 410 411 /* Write NVRAM. */ 412 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); 413 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); 414 415 /* Update flash version information for 4Gb & above. */ 416 if (!IS_FWI2_CAPABLE(ha)) 417 goto done; 418 419 tmp_data = vmalloc(256); 420 if (!tmp_data) { 421 qla_printk(KERN_WARNING, ha, 422 "Unable to allocate memory for VPD information update.\n"); 423 goto done; 424 } 425 ha->isp_ops->get_flash_version(vha, tmp_data); 426 vfree(tmp_data); 427 done: 428 return count; 429 } 430 431 static struct bin_attribute sysfs_vpd_attr = { 432 .attr = { 433 .name = "vpd", 434 .mode = S_IRUSR | S_IWUSR, 435 }, 436 .size = 0, 437 .read = qla2x00_sysfs_read_vpd, 438 .write = qla2x00_sysfs_write_vpd, 439 }; 440 441 static ssize_t 442 qla2x00_sysfs_read_sfp(struct kobject *kobj, 443 struct bin_attribute *bin_attr, 444 char *buf, loff_t off, size_t count) 445 { 446 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 447 struct device, kobj))); 448 struct qla_hw_data *ha = vha->hw; 449 uint16_t iter, addr, offset; 450 int rval; 451 452 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) 453 return 0; 454 455 if (ha->sfp_data) 456 goto do_read; 457 458 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 459 &ha->sfp_data_dma); 460 if (!ha->sfp_data) { 461 qla_printk(KERN_WARNING, ha, 462 "Unable to allocate memory for SFP read-data.\n"); 463 return 0; 464 } 465 466 do_read: 467 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 468 addr = 0xa0; 469 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; 470 iter++, offset += SFP_BLOCK_SIZE) { 471 if (iter == 4) { 472 /* Skip to next device address. */ 473 addr = 0xa2; 474 offset = 0; 475 } 476 477 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset, 478 SFP_BLOCK_SIZE); 479 if (rval != QLA_SUCCESS) { 480 qla_printk(KERN_WARNING, ha, 481 "Unable to read SFP data (%x/%x/%x).\n", rval, 482 addr, offset); 483 count = 0; 484 break; 485 } 486 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); 487 buf += SFP_BLOCK_SIZE; 488 } 489 490 return count; 491 } 492 493 static struct bin_attribute sysfs_sfp_attr = { 494 .attr = { 495 .name = "sfp", 496 .mode = S_IRUSR | S_IWUSR, 497 }, 498 .size = SFP_DEV_SIZE * 2, 499 .read = qla2x00_sysfs_read_sfp, 500 }; 501 502 static ssize_t 503 qla2x00_sysfs_write_reset(struct kobject *kobj, 504 struct bin_attribute *bin_attr, 505 char *buf, loff_t off, size_t count) 506 { 507 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 508 struct device, kobj))); 509 struct qla_hw_data *ha = vha->hw; 510 int type; 511 512 if (off != 0) 513 return 0; 514 515 type = simple_strtol(buf, NULL, 10); 516 switch (type) { 517 case 0x2025c: 518 qla_printk(KERN_INFO, ha, 519 "Issuing ISP reset on (%ld).\n", vha->host_no); 520 521 scsi_block_requests(vha->host); 522 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 523 qla2xxx_wake_dpc(vha); 524 qla2x00_wait_for_chip_reset(vha); 525 scsi_unblock_requests(vha->host); 526 break; 527 case 0x2025d: 528 if (!IS_QLA81XX(ha)) 529 break; 530 531 qla_printk(KERN_INFO, ha, 532 "Issuing MPI reset on (%ld).\n", vha->host_no); 533 534 /* Make sure FC side is not in reset */ 535 qla2x00_wait_for_hba_online(vha); 536 537 /* Issue MPI reset */ 538 scsi_block_requests(vha->host); 539 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 540 qla_printk(KERN_WARNING, ha, 541 "MPI reset failed on (%ld).\n", vha->host_no); 542 scsi_unblock_requests(vha->host); 543 break; 544 } 545 return count; 546 } 547 548 static struct bin_attribute sysfs_reset_attr = { 549 .attr = { 550 .name = "reset", 551 .mode = S_IWUSR, 552 }, 553 .size = 0, 554 .write = qla2x00_sysfs_write_reset, 555 }; 556 557 static ssize_t 558 qla2x00_sysfs_write_edc(struct kobject *kobj, 559 struct bin_attribute *bin_attr, 560 char *buf, loff_t off, size_t count) 561 { 562 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 563 struct device, kobj))); 564 struct qla_hw_data *ha = vha->hw; 565 uint16_t dev, adr, opt, len; 566 int rval; 567 568 ha->edc_data_len = 0; 569 570 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 571 return 0; 572 573 if (!ha->edc_data) { 574 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 575 &ha->edc_data_dma); 576 if (!ha->edc_data) { 577 DEBUG2(qla_printk(KERN_INFO, ha, 578 "Unable to allocate memory for EDC write.\n")); 579 return 0; 580 } 581 } 582 583 dev = le16_to_cpup((void *)&buf[0]); 584 adr = le16_to_cpup((void *)&buf[2]); 585 opt = le16_to_cpup((void *)&buf[4]); 586 len = le16_to_cpup((void *)&buf[6]); 587 588 if (!(opt & BIT_0)) 589 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8) 590 return -EINVAL; 591 592 memcpy(ha->edc_data, &buf[8], len); 593 594 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma, 595 ha->edc_data, len, opt); 596 if (rval != QLA_SUCCESS) { 597 DEBUG2(qla_printk(KERN_INFO, ha, 598 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 599 rval, dev, adr, opt, len, *buf)); 600 return 0; 601 } 602 603 return count; 604 } 605 606 static struct bin_attribute sysfs_edc_attr = { 607 .attr = { 608 .name = "edc", 609 .mode = S_IWUSR, 610 }, 611 .size = 0, 612 .write = qla2x00_sysfs_write_edc, 613 }; 614 615 static ssize_t 616 qla2x00_sysfs_write_edc_status(struct kobject *kobj, 617 struct bin_attribute *bin_attr, 618 char *buf, loff_t off, size_t count) 619 { 620 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 621 struct device, kobj))); 622 struct qla_hw_data *ha = vha->hw; 623 uint16_t dev, adr, opt, len; 624 int rval; 625 626 ha->edc_data_len = 0; 627 628 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 629 return 0; 630 631 if (!ha->edc_data) { 632 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 633 &ha->edc_data_dma); 634 if (!ha->edc_data) { 635 DEBUG2(qla_printk(KERN_INFO, ha, 636 "Unable to allocate memory for EDC status.\n")); 637 return 0; 638 } 639 } 640 641 dev = le16_to_cpup((void *)&buf[0]); 642 adr = le16_to_cpup((void *)&buf[2]); 643 opt = le16_to_cpup((void *)&buf[4]); 644 len = le16_to_cpup((void *)&buf[6]); 645 646 if (!(opt & BIT_0)) 647 if (len == 0 || len > DMA_POOL_SIZE) 648 return -EINVAL; 649 650 memset(ha->edc_data, 0, len); 651 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma, 652 ha->edc_data, len, opt); 653 if (rval != QLA_SUCCESS) { 654 DEBUG2(qla_printk(KERN_INFO, ha, 655 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 656 rval, dev, adr, opt, len)); 657 return 0; 658 } 659 660 ha->edc_data_len = len; 661 662 return count; 663 } 664 665 static ssize_t 666 qla2x00_sysfs_read_edc_status(struct kobject *kobj, 667 struct bin_attribute *bin_attr, 668 char *buf, loff_t off, size_t count) 669 { 670 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 671 struct device, kobj))); 672 struct qla_hw_data *ha = vha->hw; 673 674 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0) 675 return 0; 676 677 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count) 678 return -EINVAL; 679 680 memcpy(buf, ha->edc_data, ha->edc_data_len); 681 682 return ha->edc_data_len; 683 } 684 685 static struct bin_attribute sysfs_edc_status_attr = { 686 .attr = { 687 .name = "edc_status", 688 .mode = S_IRUSR | S_IWUSR, 689 }, 690 .size = 0, 691 .write = qla2x00_sysfs_write_edc_status, 692 .read = qla2x00_sysfs_read_edc_status, 693 }; 694 695 static ssize_t 696 qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj, 697 struct bin_attribute *bin_attr, 698 char *buf, loff_t off, size_t count) 699 { 700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 701 struct device, kobj))); 702 struct qla_hw_data *ha = vha->hw; 703 int rval; 704 uint16_t actual_size; 705 706 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) 707 return 0; 708 709 if (ha->xgmac_data) 710 goto do_read; 711 712 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 713 &ha->xgmac_data_dma, GFP_KERNEL); 714 if (!ha->xgmac_data) { 715 qla_printk(KERN_WARNING, ha, 716 "Unable to allocate memory for XGMAC read-data.\n"); 717 return 0; 718 } 719 720 do_read: 721 actual_size = 0; 722 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); 723 724 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 725 XGMAC_DATA_SIZE, &actual_size); 726 if (rval != QLA_SUCCESS) { 727 qla_printk(KERN_WARNING, ha, 728 "Unable to read XGMAC data (%x).\n", rval); 729 count = 0; 730 } 731 732 count = actual_size > count ? count: actual_size; 733 memcpy(buf, ha->xgmac_data, count); 734 735 return count; 736 } 737 738 static struct bin_attribute sysfs_xgmac_stats_attr = { 739 .attr = { 740 .name = "xgmac_stats", 741 .mode = S_IRUSR, 742 }, 743 .size = 0, 744 .read = qla2x00_sysfs_read_xgmac_stats, 745 }; 746 747 static ssize_t 748 qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj, 749 struct bin_attribute *bin_attr, 750 char *buf, loff_t off, size_t count) 751 { 752 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 753 struct device, kobj))); 754 struct qla_hw_data *ha = vha->hw; 755 int rval; 756 uint16_t actual_size; 757 758 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 759 return 0; 760 761 if (ha->dcbx_tlv) 762 goto do_read; 763 764 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 765 &ha->dcbx_tlv_dma, GFP_KERNEL); 766 if (!ha->dcbx_tlv) { 767 qla_printk(KERN_WARNING, ha, 768 "Unable to allocate memory for DCBX TLV read-data.\n"); 769 return 0; 770 } 771 772 do_read: 773 actual_size = 0; 774 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 775 776 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 777 DCBX_TLV_DATA_SIZE); 778 if (rval != QLA_SUCCESS) { 779 qla_printk(KERN_WARNING, ha, 780 "Unable to read DCBX TLV data (%x).\n", rval); 781 count = 0; 782 } 783 784 memcpy(buf, ha->dcbx_tlv, count); 785 786 return count; 787 } 788 789 static struct bin_attribute sysfs_dcbx_tlv_attr = { 790 .attr = { 791 .name = "dcbx_tlv", 792 .mode = S_IRUSR, 793 }, 794 .size = 0, 795 .read = qla2x00_sysfs_read_dcbx_tlv, 796 }; 797 798 static struct sysfs_entry { 799 char *name; 800 struct bin_attribute *attr; 801 int is4GBp_only; 802 } bin_file_entries[] = { 803 { "fw_dump", &sysfs_fw_dump_attr, }, 804 { "nvram", &sysfs_nvram_attr, }, 805 { "optrom", &sysfs_optrom_attr, }, 806 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 807 { "vpd", &sysfs_vpd_attr, 1 }, 808 { "sfp", &sysfs_sfp_attr, 1 }, 809 { "reset", &sysfs_reset_attr, }, 810 { "edc", &sysfs_edc_attr, 2 }, 811 { "edc_status", &sysfs_edc_status_attr, 2 }, 812 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 813 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 814 { NULL }, 815 }; 816 817 void 818 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) 819 { 820 struct Scsi_Host *host = vha->host; 821 struct sysfs_entry *iter; 822 int ret; 823 824 for (iter = bin_file_entries; iter->name; iter++) { 825 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw)) 826 continue; 827 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 828 continue; 829 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) 830 continue; 831 832 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 833 iter->attr); 834 if (ret) 835 qla_printk(KERN_INFO, vha->hw, 836 "Unable to create sysfs %s binary attribute " 837 "(%d).\n", iter->name, ret); 838 } 839 } 840 841 void 842 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) 843 { 844 struct Scsi_Host *host = vha->host; 845 struct sysfs_entry *iter; 846 struct qla_hw_data *ha = vha->hw; 847 848 for (iter = bin_file_entries; iter->name; iter++) { 849 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 850 continue; 851 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 852 continue; 853 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) 854 continue; 855 856 sysfs_remove_bin_file(&host->shost_gendev.kobj, 857 iter->attr); 858 } 859 860 if (ha->beacon_blink_led == 1) 861 ha->isp_ops->beacon_off(vha); 862 } 863 864 /* Scsi_Host attributes. */ 865 866 static ssize_t 867 qla2x00_drvr_version_show(struct device *dev, 868 struct device_attribute *attr, char *buf) 869 { 870 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); 871 } 872 873 static ssize_t 874 qla2x00_fw_version_show(struct device *dev, 875 struct device_attribute *attr, char *buf) 876 { 877 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 878 struct qla_hw_data *ha = vha->hw; 879 char fw_str[128]; 880 881 return snprintf(buf, PAGE_SIZE, "%s\n", 882 ha->isp_ops->fw_version_str(vha, fw_str)); 883 } 884 885 static ssize_t 886 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 887 char *buf) 888 { 889 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 890 struct qla_hw_data *ha = vha->hw; 891 uint32_t sn; 892 893 if (IS_FWI2_CAPABLE(ha)) { 894 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE); 895 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 896 } 897 898 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 899 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 900 sn % 100000); 901 } 902 903 static ssize_t 904 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 905 char *buf) 906 { 907 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 908 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); 909 } 910 911 static ssize_t 912 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 913 char *buf) 914 { 915 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 916 struct qla_hw_data *ha = vha->hw; 917 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 918 ha->product_id[0], ha->product_id[1], ha->product_id[2], 919 ha->product_id[3]); 920 } 921 922 static ssize_t 923 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 924 char *buf) 925 { 926 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 927 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 928 } 929 930 static ssize_t 931 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 932 char *buf) 933 { 934 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 935 return snprintf(buf, PAGE_SIZE, "%s\n", 936 vha->hw->model_desc ? vha->hw->model_desc : ""); 937 } 938 939 static ssize_t 940 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 941 char *buf) 942 { 943 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 944 char pci_info[30]; 945 946 return snprintf(buf, PAGE_SIZE, "%s\n", 947 vha->hw->isp_ops->pci_info_str(vha, pci_info)); 948 } 949 950 static ssize_t 951 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 952 char *buf) 953 { 954 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 955 struct qla_hw_data *ha = vha->hw; 956 int len = 0; 957 958 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 959 atomic_read(&vha->loop_state) == LOOP_DEAD) 960 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 961 else if (atomic_read(&vha->loop_state) != LOOP_READY || 962 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 963 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 964 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 965 else { 966 len = snprintf(buf, PAGE_SIZE, "Link Up - "); 967 968 switch (ha->current_topology) { 969 case ISP_CFG_NL: 970 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 971 break; 972 case ISP_CFG_FL: 973 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); 974 break; 975 case ISP_CFG_N: 976 len += snprintf(buf + len, PAGE_SIZE-len, 977 "N_Port to N_Port\n"); 978 break; 979 case ISP_CFG_F: 980 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); 981 break; 982 default: 983 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 984 break; 985 } 986 } 987 return len; 988 } 989 990 static ssize_t 991 qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 992 char *buf) 993 { 994 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 995 int len = 0; 996 997 switch (vha->hw->zio_mode) { 998 case QLA_ZIO_MODE_6: 999 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 1000 break; 1001 case QLA_ZIO_DISABLED: 1002 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1003 break; 1004 } 1005 return len; 1006 } 1007 1008 static ssize_t 1009 qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 1010 const char *buf, size_t count) 1011 { 1012 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1013 struct qla_hw_data *ha = vha->hw; 1014 int val = 0; 1015 uint16_t zio_mode; 1016 1017 if (!IS_ZIO_SUPPORTED(ha)) 1018 return -ENOTSUPP; 1019 1020 if (sscanf(buf, "%d", &val) != 1) 1021 return -EINVAL; 1022 1023 if (val) 1024 zio_mode = QLA_ZIO_MODE_6; 1025 else 1026 zio_mode = QLA_ZIO_DISABLED; 1027 1028 /* Update per-hba values and queue a reset. */ 1029 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 1030 ha->zio_mode = zio_mode; 1031 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1032 } 1033 return strlen(buf); 1034 } 1035 1036 static ssize_t 1037 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 1038 char *buf) 1039 { 1040 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1041 1042 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); 1043 } 1044 1045 static ssize_t 1046 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 1047 const char *buf, size_t count) 1048 { 1049 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1050 int val = 0; 1051 uint16_t zio_timer; 1052 1053 if (sscanf(buf, "%d", &val) != 1) 1054 return -EINVAL; 1055 if (val > 25500 || val < 100) 1056 return -ERANGE; 1057 1058 zio_timer = (uint16_t)(val / 100); 1059 vha->hw->zio_timer = zio_timer; 1060 1061 return strlen(buf); 1062 } 1063 1064 static ssize_t 1065 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1066 char *buf) 1067 { 1068 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1069 int len = 0; 1070 1071 if (vha->hw->beacon_blink_led) 1072 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 1073 else 1074 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1075 return len; 1076 } 1077 1078 static ssize_t 1079 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 1080 const char *buf, size_t count) 1081 { 1082 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1083 struct qla_hw_data *ha = vha->hw; 1084 int val = 0; 1085 int rval; 1086 1087 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1088 return -EPERM; 1089 1090 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1091 qla_printk(KERN_WARNING, ha, 1092 "Abort ISP active -- ignoring beacon request.\n"); 1093 return -EBUSY; 1094 } 1095 1096 if (sscanf(buf, "%d", &val) != 1) 1097 return -EINVAL; 1098 1099 if (val) 1100 rval = ha->isp_ops->beacon_on(vha); 1101 else 1102 rval = ha->isp_ops->beacon_off(vha); 1103 1104 if (rval != QLA_SUCCESS) 1105 count = 0; 1106 1107 return count; 1108 } 1109 1110 static ssize_t 1111 qla2x00_optrom_bios_version_show(struct device *dev, 1112 struct device_attribute *attr, char *buf) 1113 { 1114 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1115 struct qla_hw_data *ha = vha->hw; 1116 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 1117 ha->bios_revision[0]); 1118 } 1119 1120 static ssize_t 1121 qla2x00_optrom_efi_version_show(struct device *dev, 1122 struct device_attribute *attr, char *buf) 1123 { 1124 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1125 struct qla_hw_data *ha = vha->hw; 1126 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 1127 ha->efi_revision[0]); 1128 } 1129 1130 static ssize_t 1131 qla2x00_optrom_fcode_version_show(struct device *dev, 1132 struct device_attribute *attr, char *buf) 1133 { 1134 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1135 struct qla_hw_data *ha = vha->hw; 1136 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 1137 ha->fcode_revision[0]); 1138 } 1139 1140 static ssize_t 1141 qla2x00_optrom_fw_version_show(struct device *dev, 1142 struct device_attribute *attr, char *buf) 1143 { 1144 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1145 struct qla_hw_data *ha = vha->hw; 1146 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 1147 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 1148 ha->fw_revision[3]); 1149 } 1150 1151 static ssize_t 1152 qla2x00_total_isp_aborts_show(struct device *dev, 1153 struct device_attribute *attr, char *buf) 1154 { 1155 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1156 struct qla_hw_data *ha = vha->hw; 1157 return snprintf(buf, PAGE_SIZE, "%d\n", 1158 ha->qla_stats.total_isp_aborts); 1159 } 1160 1161 static ssize_t 1162 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1163 char *buf) 1164 { 1165 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1166 struct qla_hw_data *ha = vha->hw; 1167 1168 if (!IS_QLA81XX(ha)) 1169 return snprintf(buf, PAGE_SIZE, "\n"); 1170 1171 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1172 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], 1173 ha->mpi_capabilities); 1174 } 1175 1176 static ssize_t 1177 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, 1178 char *buf) 1179 { 1180 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1181 struct qla_hw_data *ha = vha->hw; 1182 1183 if (!IS_QLA81XX(ha)) 1184 return snprintf(buf, PAGE_SIZE, "\n"); 1185 1186 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1187 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); 1188 } 1189 1190 static ssize_t 1191 qla2x00_flash_block_size_show(struct device *dev, 1192 struct device_attribute *attr, char *buf) 1193 { 1194 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1195 struct qla_hw_data *ha = vha->hw; 1196 1197 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1198 } 1199 1200 static ssize_t 1201 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, 1202 char *buf) 1203 { 1204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1205 1206 if (!IS_QLA81XX(vha->hw)) 1207 return snprintf(buf, PAGE_SIZE, "\n"); 1208 1209 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1210 } 1211 1212 static ssize_t 1213 qla2x00_vn_port_mac_address_show(struct device *dev, 1214 struct device_attribute *attr, char *buf) 1215 { 1216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1217 1218 if (!IS_QLA81XX(vha->hw)) 1219 return snprintf(buf, PAGE_SIZE, "\n"); 1220 1221 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", 1222 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], 1223 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], 1224 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); 1225 } 1226 1227 static ssize_t 1228 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, 1229 char *buf) 1230 { 1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1232 1233 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); 1234 } 1235 1236 static ssize_t 1237 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1238 char *buf) 1239 { 1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1241 int rval; 1242 uint16_t state[5]; 1243 1244 rval = qla2x00_get_firmware_state(vha, state); 1245 if (rval != QLA_SUCCESS) 1246 memset(state, -1, sizeof(state)); 1247 1248 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], 1249 state[1], state[2], state[3], state[4]); 1250 } 1251 1252 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1253 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1254 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1255 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); 1256 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); 1257 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 1258 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 1259 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 1260 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); 1261 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 1262 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 1263 qla2x00_zio_timer_store); 1264 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 1265 qla2x00_beacon_store); 1266 static DEVICE_ATTR(optrom_bios_version, S_IRUGO, 1267 qla2x00_optrom_bios_version_show, NULL); 1268 static DEVICE_ATTR(optrom_efi_version, S_IRUGO, 1269 qla2x00_optrom_efi_version_show, NULL); 1270 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, 1271 qla2x00_optrom_fcode_version_show, NULL); 1272 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1273 NULL); 1274 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1275 NULL); 1276 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1277 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1278 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1279 NULL); 1280 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); 1281 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, 1282 qla2x00_vn_port_mac_address_show, NULL); 1283 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1284 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1285 1286 struct device_attribute *qla2x00_host_attrs[] = { 1287 &dev_attr_driver_version, 1288 &dev_attr_fw_version, 1289 &dev_attr_serial_num, 1290 &dev_attr_isp_name, 1291 &dev_attr_isp_id, 1292 &dev_attr_model_name, 1293 &dev_attr_model_desc, 1294 &dev_attr_pci_info, 1295 &dev_attr_link_state, 1296 &dev_attr_zio, 1297 &dev_attr_zio_timer, 1298 &dev_attr_beacon, 1299 &dev_attr_optrom_bios_version, 1300 &dev_attr_optrom_efi_version, 1301 &dev_attr_optrom_fcode_version, 1302 &dev_attr_optrom_fw_version, 1303 &dev_attr_total_isp_aborts, 1304 &dev_attr_mpi_version, 1305 &dev_attr_phy_version, 1306 &dev_attr_flash_block_size, 1307 &dev_attr_vlan_id, 1308 &dev_attr_vn_port_mac_address, 1309 &dev_attr_fabric_param, 1310 &dev_attr_fw_state, 1311 NULL, 1312 }; 1313 1314 /* Host attributes. */ 1315 1316 static void 1317 qla2x00_get_host_port_id(struct Scsi_Host *shost) 1318 { 1319 scsi_qla_host_t *vha = shost_priv(shost); 1320 1321 fc_host_port_id(shost) = vha->d_id.b.domain << 16 | 1322 vha->d_id.b.area << 8 | vha->d_id.b.al_pa; 1323 } 1324 1325 static void 1326 qla2x00_get_host_speed(struct Scsi_Host *shost) 1327 { 1328 struct qla_hw_data *ha = ((struct scsi_qla_host *) 1329 (shost_priv(shost)))->hw; 1330 u32 speed = FC_PORTSPEED_UNKNOWN; 1331 1332 switch (ha->link_data_rate) { 1333 case PORT_SPEED_1GB: 1334 speed = FC_PORTSPEED_1GBIT; 1335 break; 1336 case PORT_SPEED_2GB: 1337 speed = FC_PORTSPEED_2GBIT; 1338 break; 1339 case PORT_SPEED_4GB: 1340 speed = FC_PORTSPEED_4GBIT; 1341 break; 1342 case PORT_SPEED_8GB: 1343 speed = FC_PORTSPEED_8GBIT; 1344 break; 1345 case PORT_SPEED_10GB: 1346 speed = FC_PORTSPEED_10GBIT; 1347 break; 1348 } 1349 fc_host_speed(shost) = speed; 1350 } 1351 1352 static void 1353 qla2x00_get_host_port_type(struct Scsi_Host *shost) 1354 { 1355 scsi_qla_host_t *vha = shost_priv(shost); 1356 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 1357 1358 if (vha->vp_idx) { 1359 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 1360 return; 1361 } 1362 switch (vha->hw->current_topology) { 1363 case ISP_CFG_NL: 1364 port_type = FC_PORTTYPE_LPORT; 1365 break; 1366 case ISP_CFG_FL: 1367 port_type = FC_PORTTYPE_NLPORT; 1368 break; 1369 case ISP_CFG_N: 1370 port_type = FC_PORTTYPE_PTP; 1371 break; 1372 case ISP_CFG_F: 1373 port_type = FC_PORTTYPE_NPORT; 1374 break; 1375 } 1376 fc_host_port_type(shost) = port_type; 1377 } 1378 1379 static void 1380 qla2x00_get_starget_node_name(struct scsi_target *starget) 1381 { 1382 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1383 scsi_qla_host_t *vha = shost_priv(host); 1384 fc_port_t *fcport; 1385 u64 node_name = 0; 1386 1387 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1388 if (fcport->rport && 1389 starget->id == fcport->rport->scsi_target_id) { 1390 node_name = wwn_to_u64(fcport->node_name); 1391 break; 1392 } 1393 } 1394 1395 fc_starget_node_name(starget) = node_name; 1396 } 1397 1398 static void 1399 qla2x00_get_starget_port_name(struct scsi_target *starget) 1400 { 1401 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1402 scsi_qla_host_t *vha = shost_priv(host); 1403 fc_port_t *fcport; 1404 u64 port_name = 0; 1405 1406 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1407 if (fcport->rport && 1408 starget->id == fcport->rport->scsi_target_id) { 1409 port_name = wwn_to_u64(fcport->port_name); 1410 break; 1411 } 1412 } 1413 1414 fc_starget_port_name(starget) = port_name; 1415 } 1416 1417 static void 1418 qla2x00_get_starget_port_id(struct scsi_target *starget) 1419 { 1420 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1421 scsi_qla_host_t *vha = shost_priv(host); 1422 fc_port_t *fcport; 1423 uint32_t port_id = ~0U; 1424 1425 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1426 if (fcport->rport && 1427 starget->id == fcport->rport->scsi_target_id) { 1428 port_id = fcport->d_id.b.domain << 16 | 1429 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 1430 break; 1431 } 1432 } 1433 1434 fc_starget_port_id(starget) = port_id; 1435 } 1436 1437 static void 1438 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1439 { 1440 if (timeout) 1441 rport->dev_loss_tmo = timeout; 1442 else 1443 rport->dev_loss_tmo = 1; 1444 } 1445 1446 static void 1447 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 1448 { 1449 struct Scsi_Host *host = rport_to_shost(rport); 1450 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1451 1452 if (!fcport) 1453 return; 1454 1455 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) 1456 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1457 else 1458 qla2x00_abort_fcport_cmds(fcport); 1459 1460 /* 1461 * Transport has effectively 'deleted' the rport, clear 1462 * all local references. 1463 */ 1464 spin_lock_irq(host->host_lock); 1465 fcport->rport = NULL; 1466 *((fc_port_t **)rport->dd_data) = NULL; 1467 spin_unlock_irq(host->host_lock); 1468 } 1469 1470 static void 1471 qla2x00_terminate_rport_io(struct fc_rport *rport) 1472 { 1473 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1474 1475 if (!fcport) 1476 return; 1477 1478 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1479 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1480 return; 1481 } 1482 /* 1483 * At this point all fcport's software-states are cleared. Perform any 1484 * final cleanup of firmware resources (PCBs and XCBs). 1485 */ 1486 if (fcport->loop_id != FC_NO_LOOP_ID && 1487 !test_bit(UNLOADING, &fcport->vha->dpc_flags)) 1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1489 fcport->loop_id, fcport->d_id.b.domain, 1490 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1491 1492 qla2x00_abort_fcport_cmds(fcport); 1493 } 1494 1495 static int 1496 qla2x00_issue_lip(struct Scsi_Host *shost) 1497 { 1498 scsi_qla_host_t *vha = shost_priv(shost); 1499 1500 qla2x00_loop_reset(vha); 1501 return 0; 1502 } 1503 1504 static struct fc_host_statistics * 1505 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1506 { 1507 scsi_qla_host_t *vha = shost_priv(shost); 1508 struct qla_hw_data *ha = vha->hw; 1509 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1510 int rval; 1511 struct link_statistics *stats; 1512 dma_addr_t stats_dma; 1513 struct fc_host_statistics *pfc_host_stat; 1514 1515 pfc_host_stat = &ha->fc_host_stat; 1516 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1517 1518 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1519 if (stats == NULL) { 1520 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1521 __func__, base_vha->host_no)); 1522 goto done; 1523 } 1524 memset(stats, 0, DMA_POOL_SIZE); 1525 1526 rval = QLA_FUNCTION_FAILED; 1527 if (IS_FWI2_CAPABLE(ha)) { 1528 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); 1529 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && 1530 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) && 1531 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1532 !ha->dpc_active) { 1533 /* Must be in a 'READY' state for statistics retrieval. */ 1534 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, 1535 stats, stats_dma); 1536 } 1537 1538 if (rval != QLA_SUCCESS) 1539 goto done_free; 1540 1541 pfc_host_stat->link_failure_count = stats->link_fail_cnt; 1542 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt; 1543 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt; 1544 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; 1545 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1546 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; 1547 if (IS_FWI2_CAPABLE(ha)) { 1548 pfc_host_stat->lip_count = stats->lip_cnt; 1549 pfc_host_stat->tx_frames = stats->tx_frames; 1550 pfc_host_stat->rx_frames = stats->rx_frames; 1551 pfc_host_stat->dumped_frames = stats->dumped_frames; 1552 pfc_host_stat->nos_count = stats->nos_rcvd; 1553 } 1554 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; 1555 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; 1556 1557 done_free: 1558 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1559 done: 1560 return pfc_host_stat; 1561 } 1562 1563 static void 1564 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1565 { 1566 scsi_qla_host_t *vha = shost_priv(shost); 1567 1568 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost)); 1569 } 1570 1571 static void 1572 qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1573 { 1574 scsi_qla_host_t *vha = shost_priv(shost); 1575 1576 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1577 } 1578 1579 static void 1580 qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1581 { 1582 scsi_qla_host_t *vha = shost_priv(shost); 1583 u64 node_name; 1584 1585 if (vha->device_flags & SWITCH_FOUND) 1586 node_name = wwn_to_u64(vha->fabric_node_name); 1587 else 1588 node_name = wwn_to_u64(vha->node_name); 1589 1590 fc_host_fabric_name(shost) = node_name; 1591 } 1592 1593 static void 1594 qla2x00_get_host_port_state(struct Scsi_Host *shost) 1595 { 1596 scsi_qla_host_t *vha = shost_priv(shost); 1597 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 1598 1599 if (!base_vha->flags.online) 1600 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1601 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT) 1602 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1603 else 1604 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1605 } 1606 1607 static int 1608 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1609 { 1610 int ret = 0; 1611 uint8_t qos = 0; 1612 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1613 scsi_qla_host_t *vha = NULL; 1614 struct qla_hw_data *ha = base_vha->hw; 1615 uint16_t options = 0; 1616 int cnt; 1617 struct req_que *req = ha->req_q_map[0]; 1618 1619 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1620 if (ret) { 1621 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1622 "status %x\n", ret)); 1623 return (ret); 1624 } 1625 1626 vha = qla24xx_create_vhost(fc_vport); 1627 if (vha == NULL) { 1628 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1629 vha)); 1630 return FC_VPORT_FAILED; 1631 } 1632 if (disable) { 1633 atomic_set(&vha->vp_state, VP_OFFLINE); 1634 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 1635 } else 1636 atomic_set(&vha->vp_state, VP_FAILED); 1637 1638 /* ready to create vport */ 1639 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1640 vha->vp_idx); 1641 1642 /* initialized vport states */ 1643 atomic_set(&vha->loop_state, LOOP_DOWN); 1644 vha->vp_err_state= VP_ERR_PORTDWN; 1645 vha->vp_prev_err_state= VP_ERR_UNKWN; 1646 /* Check if physical ha port is Up */ 1647 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1648 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1649 /* Don't retry or attempt login of this virtual port */ 1650 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1651 base_vha->host_no)); 1652 atomic_set(&vha->loop_state, LOOP_DEAD); 1653 if (!disable) 1654 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1655 } 1656 1657 if (scsi_add_host(vha->host, &fc_vport->dev)) { 1658 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1659 vha->host_no, vha->vp_idx)); 1660 goto vport_create_failed_2; 1661 } 1662 1663 /* initialize attributes */ 1664 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1665 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1666 fc_host_supported_classes(vha->host) = 1667 fc_host_supported_classes(base_vha->host); 1668 fc_host_supported_speeds(vha->host) = 1669 fc_host_supported_speeds(base_vha->host); 1670 1671 qla24xx_vport_disable(fc_vport, disable); 1672 1673 if (ha->flags.cpu_affinity_enabled) { 1674 req = ha->req_q_map[1]; 1675 goto vport_queue; 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1677 goto vport_queue; 1678 /* Create a request queue in QoS mode for the vport */ 1679 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 1680 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 1681 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 1682 8) == 0) { 1683 qos = ha->npiv_info[cnt].q_qos; 1684 break; 1685 } 1686 } 1687 if (qos) { 1688 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1689 qos); 1690 if (!ret) 1691 qla_printk(KERN_WARNING, ha, 1692 "Can't create request queue for vp_idx:%d\n", 1693 vha->vp_idx); 1694 else { 1695 DEBUG2(qla_printk(KERN_INFO, ha, 1696 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1697 ret, qos, vha->vp_idx)); 1698 req = ha->req_q_map[ret]; 1699 } 1700 } 1701 1702 vport_queue: 1703 vha->req = req; 1704 return 0; 1705 1706 vport_create_failed_2: 1707 qla24xx_disable_vp(vha); 1708 qla24xx_deallocate_vp_id(vha); 1709 scsi_host_put(vha->host); 1710 return FC_VPORT_FAILED; 1711 } 1712 1713 static int 1714 qla24xx_vport_delete(struct fc_vport *fc_vport) 1715 { 1716 scsi_qla_host_t *vha = fc_vport->dd_data; 1717 fc_port_t *fcport, *tfcport; 1718 struct qla_hw_data *ha = vha->hw; 1719 uint16_t id = vha->vp_idx; 1720 1721 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1722 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 1723 msleep(1000); 1724 1725 qla24xx_disable_vp(vha); 1726 1727 fc_remove_host(vha->host); 1728 1729 scsi_remove_host(vha->host); 1730 1731 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 1732 list_del(&fcport->list); 1733 kfree(fcport); 1734 fcport = NULL; 1735 } 1736 1737 qla24xx_deallocate_vp_id(vha); 1738 1739 mutex_lock(&ha->vport_lock); 1740 ha->cur_vport_count--; 1741 clear_bit(vha->vp_idx, ha->vp_idx_map); 1742 mutex_unlock(&ha->vport_lock); 1743 1744 if (vha->timer_active) { 1745 qla2x00_vp_stop_timer(vha); 1746 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " 1747 "has stopped\n", 1748 vha->host_no, vha->vp_idx, vha)); 1749 } 1750 1751 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1752 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1753 qla_printk(KERN_WARNING, ha, 1754 "Queue delete failed.\n"); 1755 } 1756 1757 scsi_host_put(vha->host); 1758 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1759 return 0; 1760 } 1761 1762 static int 1763 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 1764 { 1765 scsi_qla_host_t *vha = fc_vport->dd_data; 1766 1767 if (disable) 1768 qla24xx_disable_vp(vha); 1769 else 1770 qla24xx_enable_vp(vha); 1771 1772 return 0; 1773 } 1774 1775 struct fc_function_template qla2xxx_transport_functions = { 1776 1777 .show_host_node_name = 1, 1778 .show_host_port_name = 1, 1779 .show_host_supported_classes = 1, 1780 .show_host_supported_speeds = 1, 1781 1782 .get_host_port_id = qla2x00_get_host_port_id, 1783 .show_host_port_id = 1, 1784 .get_host_speed = qla2x00_get_host_speed, 1785 .show_host_speed = 1, 1786 .get_host_port_type = qla2x00_get_host_port_type, 1787 .show_host_port_type = 1, 1788 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 1789 .show_host_symbolic_name = 1, 1790 .set_host_system_hostname = qla2x00_set_host_system_hostname, 1791 .show_host_system_hostname = 1, 1792 .get_host_fabric_name = qla2x00_get_host_fabric_name, 1793 .show_host_fabric_name = 1, 1794 .get_host_port_state = qla2x00_get_host_port_state, 1795 .show_host_port_state = 1, 1796 1797 .dd_fcrport_size = sizeof(struct fc_port *), 1798 .show_rport_supported_classes = 1, 1799 1800 .get_starget_node_name = qla2x00_get_starget_node_name, 1801 .show_starget_node_name = 1, 1802 .get_starget_port_name = qla2x00_get_starget_port_name, 1803 .show_starget_port_name = 1, 1804 .get_starget_port_id = qla2x00_get_starget_port_id, 1805 .show_starget_port_id = 1, 1806 1807 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1808 .show_rport_dev_loss_tmo = 1, 1809 1810 .issue_fc_host_lip = qla2x00_issue_lip, 1811 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 1812 .terminate_rport_io = qla2x00_terminate_rport_io, 1813 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1814 1815 .vport_create = qla24xx_vport_create, 1816 .vport_disable = qla24xx_vport_disable, 1817 .vport_delete = qla24xx_vport_delete, 1818 }; 1819 1820 struct fc_function_template qla2xxx_transport_vport_functions = { 1821 1822 .show_host_node_name = 1, 1823 .show_host_port_name = 1, 1824 .show_host_supported_classes = 1, 1825 1826 .get_host_port_id = qla2x00_get_host_port_id, 1827 .show_host_port_id = 1, 1828 .get_host_speed = qla2x00_get_host_speed, 1829 .show_host_speed = 1, 1830 .get_host_port_type = qla2x00_get_host_port_type, 1831 .show_host_port_type = 1, 1832 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 1833 .show_host_symbolic_name = 1, 1834 .set_host_system_hostname = qla2x00_set_host_system_hostname, 1835 .show_host_system_hostname = 1, 1836 .get_host_fabric_name = qla2x00_get_host_fabric_name, 1837 .show_host_fabric_name = 1, 1838 .get_host_port_state = qla2x00_get_host_port_state, 1839 .show_host_port_state = 1, 1840 1841 .dd_fcrport_size = sizeof(struct fc_port *), 1842 .show_rport_supported_classes = 1, 1843 1844 .get_starget_node_name = qla2x00_get_starget_node_name, 1845 .show_starget_node_name = 1, 1846 .get_starget_port_name = qla2x00_get_starget_port_name, 1847 .show_starget_port_name = 1, 1848 .get_starget_port_id = qla2x00_get_starget_port_id, 1849 .show_starget_port_id = 1, 1850 1851 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1852 .show_rport_dev_loss_tmo = 1, 1853 1854 .issue_fc_host_lip = qla2x00_issue_lip, 1855 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 1856 .terminate_rport_io = qla2x00_terminate_rport_io, 1857 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1858 }; 1859 1860 void 1861 qla2x00_init_host_attr(scsi_qla_host_t *vha) 1862 { 1863 struct qla_hw_data *ha = vha->hw; 1864 u32 speed = FC_PORTSPEED_UNKNOWN; 1865 1866 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1867 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1868 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1869 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1870 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1871 1872 if (IS_QLA81XX(ha)) 1873 speed = FC_PORTSPEED_10GBIT; 1874 else if (IS_QLA25XX(ha)) 1875 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1876 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1877 else if (IS_QLA24XX_TYPE(ha)) 1878 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 1879 FC_PORTSPEED_1GBIT; 1880 else if (IS_QLA23XX(ha)) 1881 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1882 else 1883 speed = FC_PORTSPEED_1GBIT; 1884 fc_host_supported_speeds(vha->host) = speed; 1885 } 1886