1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/kthread.h> 11 #include <linux/vmalloc.h> 12 #include <linux/slab.h> 13 #include <linux/delay.h> 14 15 static int qla24xx_vport_disable(struct fc_vport *, bool); 16 17 /* SYSFS attributes --------------------------------------------------------- */ 18 19 static ssize_t 20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, 21 struct bin_attribute *bin_attr, 22 char *buf, loff_t off, size_t count) 23 { 24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 25 struct device, kobj))); 26 struct qla_hw_data *ha = vha->hw; 27 int rval = 0; 28 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) 30 return 0; 31 32 if (IS_P3P_TYPE(ha)) { 33 if (off < ha->md_template_size) { 34 rval = memory_read_from_buffer(buf, count, 35 &off, ha->md_tmplt_hdr, ha->md_template_size); 36 return rval; 37 } 38 off -= ha->md_template_size; 39 rval = memory_read_from_buffer(buf, count, 40 &off, ha->md_dump, ha->md_dump_size); 41 return rval; 42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) 43 return memory_read_from_buffer(buf, count, &off, ha->mctp_dump, 44 MCTP_DUMP_SIZE); 45 else if (ha->fw_dump_reading) 46 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 47 ha->fw_dump_len); 48 else 49 return 0; 50 } 51 52 static ssize_t 53 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, 54 struct bin_attribute *bin_attr, 55 char *buf, loff_t off, size_t count) 56 { 57 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 58 struct device, kobj))); 59 struct qla_hw_data *ha = vha->hw; 60 int reading; 61 62 if (off != 0) 63 return (0); 64 65 reading = simple_strtol(buf, NULL, 10); 66 switch (reading) { 67 case 0: 68 if (!ha->fw_dump_reading) 69 break; 70 71 ql_log(ql_log_info, vha, 0x705d, 72 "Firmware dump cleared on (%ld).\n", vha->host_no); 73 74 if (IS_P3P_TYPE(ha)) { 75 qla82xx_md_free(vha); 76 qla82xx_md_prep(vha); 77 } 78 ha->fw_dump_reading = 0; 79 ha->fw_dumped = 0; 80 break; 81 case 1: 82 if (ha->fw_dumped && !ha->fw_dump_reading) { 83 ha->fw_dump_reading = 1; 84 85 ql_log(ql_log_info, vha, 0x705e, 86 "Raw firmware dump ready for read on (%ld).\n", 87 vha->host_no); 88 } 89 break; 90 case 2: 91 qla2x00_alloc_fw_dump(vha); 92 break; 93 case 3: 94 if (IS_QLA82XX(ha)) { 95 qla82xx_idc_lock(ha); 96 qla82xx_set_reset_owner(vha); 97 qla82xx_idc_unlock(ha); 98 } else if (IS_QLA8044(ha)) { 99 qla8044_idc_lock(ha); 100 qla82xx_set_reset_owner(vha); 101 qla8044_idc_unlock(ha); 102 } else 103 qla2x00_system_error(vha); 104 break; 105 case 4: 106 if (IS_P3P_TYPE(ha)) { 107 if (ha->md_tmplt_hdr) 108 ql_dbg(ql_dbg_user, vha, 0x705b, 109 "MiniDump supported with this firmware.\n"); 110 else 111 ql_dbg(ql_dbg_user, vha, 0x709d, 112 "MiniDump not supported with this firmware.\n"); 113 } 114 break; 115 case 5: 116 if (IS_P3P_TYPE(ha)) 117 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 118 break; 119 case 6: 120 if (!ha->mctp_dump_reading) 121 break; 122 ql_log(ql_log_info, vha, 0x70c1, 123 "MCTP dump cleared on (%ld).\n", vha->host_no); 124 ha->mctp_dump_reading = 0; 125 ha->mctp_dumped = 0; 126 break; 127 case 7: 128 if (ha->mctp_dumped && !ha->mctp_dump_reading) { 129 ha->mctp_dump_reading = 1; 130 ql_log(ql_log_info, vha, 0x70c2, 131 "Raw mctp dump ready for read on (%ld).\n", 132 vha->host_no); 133 } 134 break; 135 } 136 return count; 137 } 138 139 static struct bin_attribute sysfs_fw_dump_attr = { 140 .attr = { 141 .name = "fw_dump", 142 .mode = S_IRUSR | S_IWUSR, 143 }, 144 .size = 0, 145 .read = qla2x00_sysfs_read_fw_dump, 146 .write = qla2x00_sysfs_write_fw_dump, 147 }; 148 149 static ssize_t 150 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, 151 struct bin_attribute *bin_attr, 152 char *buf, loff_t off, size_t count) 153 { 154 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 155 struct device, kobj))); 156 struct qla_hw_data *ha = vha->hw; 157 158 if (!capable(CAP_SYS_ADMIN)) 159 return 0; 160 161 if (IS_NOCACHE_VPD_TYPE(ha)) 162 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 163 ha->nvram_size); 164 return memory_read_from_buffer(buf, count, &off, ha->nvram, 165 ha->nvram_size); 166 } 167 168 static ssize_t 169 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, 170 struct bin_attribute *bin_attr, 171 char *buf, loff_t off, size_t count) 172 { 173 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 174 struct device, kobj))); 175 struct qla_hw_data *ha = vha->hw; 176 uint16_t cnt; 177 178 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 179 !ha->isp_ops->write_nvram) 180 return -EINVAL; 181 182 /* Checksum NVRAM. */ 183 if (IS_FWI2_CAPABLE(ha)) { 184 uint32_t *iter; 185 uint32_t chksum; 186 187 iter = (uint32_t *)buf; 188 chksum = 0; 189 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++) 190 chksum += le32_to_cpu(*iter++); 191 chksum = ~chksum + 1; 192 *iter = cpu_to_le32(chksum); 193 } else { 194 uint8_t *iter; 195 uint8_t chksum; 196 197 iter = (uint8_t *)buf; 198 chksum = 0; 199 for (cnt = 0; cnt < count - 1; cnt++) 200 chksum += *iter++; 201 chksum = ~chksum + 1; 202 *iter = chksum; 203 } 204 205 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 206 ql_log(ql_log_warn, vha, 0x705f, 207 "HBA not online, failing NVRAM update.\n"); 208 return -EAGAIN; 209 } 210 211 /* Write NVRAM. */ 212 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); 213 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 214 count); 215 216 ql_dbg(ql_dbg_user, vha, 0x7060, 217 "Setting ISP_ABORT_NEEDED\n"); 218 /* NVRAM settings take effect immediately. */ 219 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 220 qla2xxx_wake_dpc(vha); 221 qla2x00_wait_for_chip_reset(vha); 222 223 return count; 224 } 225 226 static struct bin_attribute sysfs_nvram_attr = { 227 .attr = { 228 .name = "nvram", 229 .mode = S_IRUSR | S_IWUSR, 230 }, 231 .size = 512, 232 .read = qla2x00_sysfs_read_nvram, 233 .write = qla2x00_sysfs_write_nvram, 234 }; 235 236 static ssize_t 237 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, 238 struct bin_attribute *bin_attr, 239 char *buf, loff_t off, size_t count) 240 { 241 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 242 struct device, kobj))); 243 struct qla_hw_data *ha = vha->hw; 244 245 if (ha->optrom_state != QLA_SREADING) 246 return 0; 247 248 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 249 ha->optrom_region_size); 250 } 251 252 static ssize_t 253 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, 254 struct bin_attribute *bin_attr, 255 char *buf, loff_t off, size_t count) 256 { 257 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 258 struct device, kobj))); 259 struct qla_hw_data *ha = vha->hw; 260 261 if (ha->optrom_state != QLA_SWRITING) 262 return -EINVAL; 263 if (off > ha->optrom_region_size) 264 return -ERANGE; 265 if (off + count > ha->optrom_region_size) 266 count = ha->optrom_region_size - off; 267 268 memcpy(&ha->optrom_buffer[off], buf, count); 269 270 return count; 271 } 272 273 static struct bin_attribute sysfs_optrom_attr = { 274 .attr = { 275 .name = "optrom", 276 .mode = S_IRUSR | S_IWUSR, 277 }, 278 .size = 0, 279 .read = qla2x00_sysfs_read_optrom, 280 .write = qla2x00_sysfs_write_optrom, 281 }; 282 283 static ssize_t 284 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, 285 struct bin_attribute *bin_attr, 286 char *buf, loff_t off, size_t count) 287 { 288 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 289 struct device, kobj))); 290 struct qla_hw_data *ha = vha->hw; 291 292 uint32_t start = 0; 293 uint32_t size = ha->optrom_size; 294 int val, valid; 295 296 if (off) 297 return -EINVAL; 298 299 if (unlikely(pci_channel_offline(ha->pdev))) 300 return -EAGAIN; 301 302 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 303 return -EINVAL; 304 if (start > ha->optrom_size) 305 return -EINVAL; 306 307 switch (val) { 308 case 0: 309 if (ha->optrom_state != QLA_SREADING && 310 ha->optrom_state != QLA_SWRITING) 311 return -EINVAL; 312 313 ha->optrom_state = QLA_SWAITING; 314 315 ql_dbg(ql_dbg_user, vha, 0x7061, 316 "Freeing flash region allocation -- 0x%x bytes.\n", 317 ha->optrom_region_size); 318 319 vfree(ha->optrom_buffer); 320 ha->optrom_buffer = NULL; 321 break; 322 case 1: 323 if (ha->optrom_state != QLA_SWAITING) 324 return -EINVAL; 325 326 ha->optrom_region_start = start; 327 ha->optrom_region_size = start + size > ha->optrom_size ? 328 ha->optrom_size - start : size; 329 330 ha->optrom_state = QLA_SREADING; 331 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 332 if (ha->optrom_buffer == NULL) { 333 ql_log(ql_log_warn, vha, 0x7062, 334 "Unable to allocate memory for optrom retrieval " 335 "(%x).\n", ha->optrom_region_size); 336 337 ha->optrom_state = QLA_SWAITING; 338 return -ENOMEM; 339 } 340 341 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 342 ql_log(ql_log_warn, vha, 0x7063, 343 "HBA not online, failing NVRAM update.\n"); 344 return -EAGAIN; 345 } 346 347 ql_dbg(ql_dbg_user, vha, 0x7064, 348 "Reading flash region -- 0x%x/0x%x.\n", 349 ha->optrom_region_start, ha->optrom_region_size); 350 351 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 352 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 353 ha->optrom_region_start, ha->optrom_region_size); 354 break; 355 case 2: 356 if (ha->optrom_state != QLA_SWAITING) 357 return -EINVAL; 358 359 /* 360 * We need to be more restrictive on which FLASH regions are 361 * allowed to be updated via user-space. Regions accessible 362 * via this method include: 363 * 364 * ISP21xx/ISP22xx/ISP23xx type boards: 365 * 366 * 0x000000 -> 0x020000 -- Boot code. 367 * 368 * ISP2322/ISP24xx type boards: 369 * 370 * 0x000000 -> 0x07ffff -- Boot code. 371 * 0x080000 -> 0x0fffff -- Firmware. 372 * 373 * ISP25xx type boards: 374 * 375 * 0x000000 -> 0x07ffff -- Boot code. 376 * 0x080000 -> 0x0fffff -- Firmware. 377 * 0x120000 -> 0x12ffff -- VPD and HBA parameters. 378 */ 379 valid = 0; 380 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 381 valid = 1; 382 else if (start == (ha->flt_region_boot * 4) || 383 start == (ha->flt_region_fw * 4)) 384 valid = 1; 385 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) 386 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 387 valid = 1; 388 if (!valid) { 389 ql_log(ql_log_warn, vha, 0x7065, 390 "Invalid start region 0x%x/0x%x.\n", start, size); 391 return -EINVAL; 392 } 393 394 ha->optrom_region_start = start; 395 ha->optrom_region_size = start + size > ha->optrom_size ? 396 ha->optrom_size - start : size; 397 398 ha->optrom_state = QLA_SWRITING; 399 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 400 if (ha->optrom_buffer == NULL) { 401 ql_log(ql_log_warn, vha, 0x7066, 402 "Unable to allocate memory for optrom update " 403 "(%x)\n", ha->optrom_region_size); 404 405 ha->optrom_state = QLA_SWAITING; 406 return -ENOMEM; 407 } 408 409 ql_dbg(ql_dbg_user, vha, 0x7067, 410 "Staging flash region write -- 0x%x/0x%x.\n", 411 ha->optrom_region_start, ha->optrom_region_size); 412 413 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 414 break; 415 case 3: 416 if (ha->optrom_state != QLA_SWRITING) 417 return -EINVAL; 418 419 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 420 ql_log(ql_log_warn, vha, 0x7068, 421 "HBA not online, failing flash update.\n"); 422 return -EAGAIN; 423 } 424 425 ql_dbg(ql_dbg_user, vha, 0x7069, 426 "Writing flash region -- 0x%x/0x%x.\n", 427 ha->optrom_region_start, ha->optrom_region_size); 428 429 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 430 ha->optrom_region_start, ha->optrom_region_size); 431 break; 432 default: 433 return -EINVAL; 434 } 435 return count; 436 } 437 438 static struct bin_attribute sysfs_optrom_ctl_attr = { 439 .attr = { 440 .name = "optrom_ctl", 441 .mode = S_IWUSR, 442 }, 443 .size = 0, 444 .write = qla2x00_sysfs_write_optrom_ctl, 445 }; 446 447 static ssize_t 448 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, 449 struct bin_attribute *bin_attr, 450 char *buf, loff_t off, size_t count) 451 { 452 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 453 struct device, kobj))); 454 struct qla_hw_data *ha = vha->hw; 455 456 if (unlikely(pci_channel_offline(ha->pdev))) 457 return -EAGAIN; 458 459 if (!capable(CAP_SYS_ADMIN)) 460 return -EINVAL; 461 462 if (IS_NOCACHE_VPD_TYPE(ha)) 463 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 464 ha->vpd_size); 465 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 466 } 467 468 static ssize_t 469 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, 470 struct bin_attribute *bin_attr, 471 char *buf, loff_t off, size_t count) 472 { 473 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 474 struct device, kobj))); 475 struct qla_hw_data *ha = vha->hw; 476 uint8_t *tmp_data; 477 478 if (unlikely(pci_channel_offline(ha->pdev))) 479 return 0; 480 481 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 482 !ha->isp_ops->write_nvram) 483 return 0; 484 485 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 486 ql_log(ql_log_warn, vha, 0x706a, 487 "HBA not online, failing VPD update.\n"); 488 return -EAGAIN; 489 } 490 491 /* Write NVRAM. */ 492 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); 493 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); 494 495 /* Update flash version information for 4Gb & above. */ 496 if (!IS_FWI2_CAPABLE(ha)) 497 return -EINVAL; 498 499 tmp_data = vmalloc(256); 500 if (!tmp_data) { 501 ql_log(ql_log_warn, vha, 0x706b, 502 "Unable to allocate memory for VPD information update.\n"); 503 return -ENOMEM; 504 } 505 ha->isp_ops->get_flash_version(vha, tmp_data); 506 vfree(tmp_data); 507 508 return count; 509 } 510 511 static struct bin_attribute sysfs_vpd_attr = { 512 .attr = { 513 .name = "vpd", 514 .mode = S_IRUSR | S_IWUSR, 515 }, 516 .size = 0, 517 .read = qla2x00_sysfs_read_vpd, 518 .write = qla2x00_sysfs_write_vpd, 519 }; 520 521 static ssize_t 522 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, 523 struct bin_attribute *bin_attr, 524 char *buf, loff_t off, size_t count) 525 { 526 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 527 struct device, kobj))); 528 struct qla_hw_data *ha = vha->hw; 529 uint16_t iter, addr, offset; 530 int rval; 531 532 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) 533 return 0; 534 535 if (ha->sfp_data) 536 goto do_read; 537 538 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 539 &ha->sfp_data_dma); 540 if (!ha->sfp_data) { 541 ql_log(ql_log_warn, vha, 0x706c, 542 "Unable to allocate memory for SFP read-data.\n"); 543 return 0; 544 } 545 546 do_read: 547 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 548 addr = 0xa0; 549 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; 550 iter++, offset += SFP_BLOCK_SIZE) { 551 if (iter == 4) { 552 /* Skip to next device address. */ 553 addr = 0xa2; 554 offset = 0; 555 } 556 557 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 558 addr, offset, SFP_BLOCK_SIZE, 0); 559 if (rval != QLA_SUCCESS) { 560 ql_log(ql_log_warn, vha, 0x706d, 561 "Unable to read SFP data (%x/%x/%x).\n", rval, 562 addr, offset); 563 564 return -EIO; 565 } 566 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); 567 buf += SFP_BLOCK_SIZE; 568 } 569 570 return count; 571 } 572 573 static struct bin_attribute sysfs_sfp_attr = { 574 .attr = { 575 .name = "sfp", 576 .mode = S_IRUSR | S_IWUSR, 577 }, 578 .size = SFP_DEV_SIZE * 2, 579 .read = qla2x00_sysfs_read_sfp, 580 }; 581 582 static ssize_t 583 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, 584 struct bin_attribute *bin_attr, 585 char *buf, loff_t off, size_t count) 586 { 587 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 588 struct device, kobj))); 589 struct qla_hw_data *ha = vha->hw; 590 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 591 int type; 592 uint32_t idc_control; 593 uint8_t *tmp_data = NULL; 594 if (off != 0) 595 return -EINVAL; 596 597 type = simple_strtol(buf, NULL, 10); 598 switch (type) { 599 case 0x2025c: 600 ql_log(ql_log_info, vha, 0x706e, 601 "Issuing ISP reset.\n"); 602 603 scsi_block_requests(vha->host); 604 if (IS_QLA82XX(ha)) { 605 ha->flags.isp82xx_no_md_cap = 1; 606 qla82xx_idc_lock(ha); 607 qla82xx_set_reset_owner(vha); 608 qla82xx_idc_unlock(ha); 609 } else if (IS_QLA8044(ha)) { 610 qla8044_idc_lock(ha); 611 idc_control = qla8044_rd_reg(ha, 612 QLA8044_IDC_DRV_CTRL); 613 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, 614 (idc_control | GRACEFUL_RESET_BIT1)); 615 qla82xx_set_reset_owner(vha); 616 qla8044_idc_unlock(ha); 617 } else { 618 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 619 qla2xxx_wake_dpc(vha); 620 } 621 qla2x00_wait_for_chip_reset(vha); 622 scsi_unblock_requests(vha->host); 623 break; 624 case 0x2025d: 625 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 626 return -EPERM; 627 628 ql_log(ql_log_info, vha, 0x706f, 629 "Issuing MPI reset.\n"); 630 631 if (IS_QLA83XX(ha)) { 632 uint32_t idc_control; 633 634 qla83xx_idc_lock(vha, 0); 635 __qla83xx_get_idc_control(vha, &idc_control); 636 idc_control |= QLA83XX_IDC_GRACEFUL_RESET; 637 __qla83xx_set_idc_control(vha, idc_control); 638 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 639 QLA8XXX_DEV_NEED_RESET); 640 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 641 qla83xx_idc_unlock(vha, 0); 642 break; 643 } else { 644 /* Make sure FC side is not in reset */ 645 qla2x00_wait_for_hba_online(vha); 646 647 /* Issue MPI reset */ 648 scsi_block_requests(vha->host); 649 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 650 ql_log(ql_log_warn, vha, 0x7070, 651 "MPI reset failed.\n"); 652 scsi_unblock_requests(vha->host); 653 break; 654 } 655 case 0x2025e: 656 if (!IS_P3P_TYPE(ha) || vha != base_vha) { 657 ql_log(ql_log_info, vha, 0x7071, 658 "FCoE ctx reset no supported.\n"); 659 return -EPERM; 660 } 661 662 ql_log(ql_log_info, vha, 0x7072, 663 "Issuing FCoE ctx reset.\n"); 664 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 665 qla2xxx_wake_dpc(vha); 666 qla2x00_wait_for_fcoe_ctx_reset(vha); 667 break; 668 case 0x2025f: 669 if (!IS_QLA8031(ha)) 670 return -EPERM; 671 ql_log(ql_log_info, vha, 0x70bc, 672 "Disabling Reset by IDC control\n"); 673 qla83xx_idc_lock(vha, 0); 674 __qla83xx_get_idc_control(vha, &idc_control); 675 idc_control |= QLA83XX_IDC_RESET_DISABLED; 676 __qla83xx_set_idc_control(vha, idc_control); 677 qla83xx_idc_unlock(vha, 0); 678 break; 679 case 0x20260: 680 if (!IS_QLA8031(ha)) 681 return -EPERM; 682 ql_log(ql_log_info, vha, 0x70bd, 683 "Enabling Reset by IDC control\n"); 684 qla83xx_idc_lock(vha, 0); 685 __qla83xx_get_idc_control(vha, &idc_control); 686 idc_control &= ~QLA83XX_IDC_RESET_DISABLED; 687 __qla83xx_set_idc_control(vha, idc_control); 688 qla83xx_idc_unlock(vha, 0); 689 break; 690 case 0x20261: 691 ql_dbg(ql_dbg_user, vha, 0x70e0, 692 "Updating cache versions without reset "); 693 694 tmp_data = vmalloc(256); 695 if (!tmp_data) { 696 ql_log(ql_log_warn, vha, 0x70e1, 697 "Unable to allocate memory for VPD information update.\n"); 698 return -ENOMEM; 699 } 700 ha->isp_ops->get_flash_version(vha, tmp_data); 701 vfree(tmp_data); 702 break; 703 } 704 return count; 705 } 706 707 static struct bin_attribute sysfs_reset_attr = { 708 .attr = { 709 .name = "reset", 710 .mode = S_IWUSR, 711 }, 712 .size = 0, 713 .write = qla2x00_sysfs_write_reset, 714 }; 715 716 static ssize_t 717 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, 718 struct bin_attribute *bin_attr, 719 char *buf, loff_t off, size_t count) 720 { 721 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 722 struct device, kobj))); 723 struct qla_hw_data *ha = vha->hw; 724 int rval; 725 uint16_t actual_size; 726 727 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) 728 return 0; 729 730 if (ha->xgmac_data) 731 goto do_read; 732 733 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 734 &ha->xgmac_data_dma, GFP_KERNEL); 735 if (!ha->xgmac_data) { 736 ql_log(ql_log_warn, vha, 0x7076, 737 "Unable to allocate memory for XGMAC read-data.\n"); 738 return 0; 739 } 740 741 do_read: 742 actual_size = 0; 743 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); 744 745 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 746 XGMAC_DATA_SIZE, &actual_size); 747 if (rval != QLA_SUCCESS) { 748 ql_log(ql_log_warn, vha, 0x7077, 749 "Unable to read XGMAC data (%x).\n", rval); 750 count = 0; 751 } 752 753 count = actual_size > count ? count: actual_size; 754 memcpy(buf, ha->xgmac_data, count); 755 756 return count; 757 } 758 759 static struct bin_attribute sysfs_xgmac_stats_attr = { 760 .attr = { 761 .name = "xgmac_stats", 762 .mode = S_IRUSR, 763 }, 764 .size = 0, 765 .read = qla2x00_sysfs_read_xgmac_stats, 766 }; 767 768 static ssize_t 769 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, 770 struct bin_attribute *bin_attr, 771 char *buf, loff_t off, size_t count) 772 { 773 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 774 struct device, kobj))); 775 struct qla_hw_data *ha = vha->hw; 776 int rval; 777 uint16_t actual_size; 778 779 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 780 return 0; 781 782 if (ha->dcbx_tlv) 783 goto do_read; 784 785 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 786 &ha->dcbx_tlv_dma, GFP_KERNEL); 787 if (!ha->dcbx_tlv) { 788 ql_log(ql_log_warn, vha, 0x7078, 789 "Unable to allocate memory for DCBX TLV read-data.\n"); 790 return -ENOMEM; 791 } 792 793 do_read: 794 actual_size = 0; 795 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 796 797 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 798 DCBX_TLV_DATA_SIZE); 799 if (rval != QLA_SUCCESS) { 800 ql_log(ql_log_warn, vha, 0x7079, 801 "Unable to read DCBX TLV (%x).\n", rval); 802 return -EIO; 803 } 804 805 memcpy(buf, ha->dcbx_tlv, count); 806 807 return count; 808 } 809 810 static struct bin_attribute sysfs_dcbx_tlv_attr = { 811 .attr = { 812 .name = "dcbx_tlv", 813 .mode = S_IRUSR, 814 }, 815 .size = 0, 816 .read = qla2x00_sysfs_read_dcbx_tlv, 817 }; 818 819 static struct sysfs_entry { 820 char *name; 821 struct bin_attribute *attr; 822 int is4GBp_only; 823 } bin_file_entries[] = { 824 { "fw_dump", &sysfs_fw_dump_attr, }, 825 { "nvram", &sysfs_nvram_attr, }, 826 { "optrom", &sysfs_optrom_attr, }, 827 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 828 { "vpd", &sysfs_vpd_attr, 1 }, 829 { "sfp", &sysfs_sfp_attr, 1 }, 830 { "reset", &sysfs_reset_attr, }, 831 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 832 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 833 { NULL }, 834 }; 835 836 void 837 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) 838 { 839 struct Scsi_Host *host = vha->host; 840 struct sysfs_entry *iter; 841 int ret; 842 843 for (iter = bin_file_entries; iter->name; iter++) { 844 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw)) 845 continue; 846 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 847 continue; 848 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 849 continue; 850 851 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 852 iter->attr); 853 if (ret) 854 ql_log(ql_log_warn, vha, 0x00f3, 855 "Unable to create sysfs %s binary attribute (%d).\n", 856 iter->name, ret); 857 else 858 ql_dbg(ql_dbg_init, vha, 0x00f4, 859 "Successfully created sysfs %s binary attribure.\n", 860 iter->name); 861 } 862 } 863 864 void 865 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) 866 { 867 struct Scsi_Host *host = vha->host; 868 struct sysfs_entry *iter; 869 struct qla_hw_data *ha = vha->hw; 870 871 for (iter = bin_file_entries; iter->name; iter++) { 872 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 873 continue; 874 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 875 continue; 876 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 877 continue; 878 879 sysfs_remove_bin_file(&host->shost_gendev.kobj, 880 iter->attr); 881 } 882 883 if (ha->beacon_blink_led == 1) 884 ha->isp_ops->beacon_off(vha); 885 } 886 887 /* Scsi_Host attributes. */ 888 889 static ssize_t 890 qla2x00_drvr_version_show(struct device *dev, 891 struct device_attribute *attr, char *buf) 892 { 893 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); 894 } 895 896 static ssize_t 897 qla2x00_fw_version_show(struct device *dev, 898 struct device_attribute *attr, char *buf) 899 { 900 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 901 struct qla_hw_data *ha = vha->hw; 902 char fw_str[128]; 903 904 return snprintf(buf, PAGE_SIZE, "%s\n", 905 ha->isp_ops->fw_version_str(vha, fw_str)); 906 } 907 908 static ssize_t 909 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 910 char *buf) 911 { 912 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 913 struct qla_hw_data *ha = vha->hw; 914 uint32_t sn; 915 916 if (IS_QLAFX00(vha->hw)) { 917 return snprintf(buf, PAGE_SIZE, "%s\n", 918 vha->hw->mr.serial_num); 919 } else if (IS_FWI2_CAPABLE(ha)) { 920 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE); 921 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 922 } 923 924 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 925 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 926 sn % 100000); 927 } 928 929 static ssize_t 930 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 931 char *buf) 932 { 933 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 934 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); 935 } 936 937 static ssize_t 938 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 939 char *buf) 940 { 941 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 942 struct qla_hw_data *ha = vha->hw; 943 944 if (IS_QLAFX00(vha->hw)) 945 return snprintf(buf, PAGE_SIZE, "%s\n", 946 vha->hw->mr.hw_version); 947 948 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 949 ha->product_id[0], ha->product_id[1], ha->product_id[2], 950 ha->product_id[3]); 951 } 952 953 static ssize_t 954 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 955 char *buf) 956 { 957 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 958 959 if (IS_QLAFX00(vha->hw)) 960 return snprintf(buf, PAGE_SIZE, "%s\n", 961 vha->hw->mr.product_name); 962 963 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 964 } 965 966 static ssize_t 967 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 968 char *buf) 969 { 970 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 971 return snprintf(buf, PAGE_SIZE, "%s\n", 972 vha->hw->model_desc ? vha->hw->model_desc : ""); 973 } 974 975 static ssize_t 976 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 977 char *buf) 978 { 979 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 980 char pci_info[30]; 981 982 return snprintf(buf, PAGE_SIZE, "%s\n", 983 vha->hw->isp_ops->pci_info_str(vha, pci_info)); 984 } 985 986 static ssize_t 987 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 988 char *buf) 989 { 990 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 991 struct qla_hw_data *ha = vha->hw; 992 int len = 0; 993 994 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 995 atomic_read(&vha->loop_state) == LOOP_DEAD || 996 vha->device_flags & DFLG_NO_CABLE) 997 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 998 else if (atomic_read(&vha->loop_state) != LOOP_READY || 999 qla2x00_reset_active(vha)) 1000 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 1001 else { 1002 len = snprintf(buf, PAGE_SIZE, "Link Up - "); 1003 1004 switch (ha->current_topology) { 1005 case ISP_CFG_NL: 1006 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1007 break; 1008 case ISP_CFG_FL: 1009 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); 1010 break; 1011 case ISP_CFG_N: 1012 len += snprintf(buf + len, PAGE_SIZE-len, 1013 "N_Port to N_Port\n"); 1014 break; 1015 case ISP_CFG_F: 1016 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); 1017 break; 1018 default: 1019 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); 1020 break; 1021 } 1022 } 1023 return len; 1024 } 1025 1026 static ssize_t 1027 qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 1028 char *buf) 1029 { 1030 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1031 int len = 0; 1032 1033 switch (vha->hw->zio_mode) { 1034 case QLA_ZIO_MODE_6: 1035 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 1036 break; 1037 case QLA_ZIO_DISABLED: 1038 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1039 break; 1040 } 1041 return len; 1042 } 1043 1044 static ssize_t 1045 qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 1046 const char *buf, size_t count) 1047 { 1048 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1049 struct qla_hw_data *ha = vha->hw; 1050 int val = 0; 1051 uint16_t zio_mode; 1052 1053 if (!IS_ZIO_SUPPORTED(ha)) 1054 return -ENOTSUPP; 1055 1056 if (sscanf(buf, "%d", &val) != 1) 1057 return -EINVAL; 1058 1059 if (val) 1060 zio_mode = QLA_ZIO_MODE_6; 1061 else 1062 zio_mode = QLA_ZIO_DISABLED; 1063 1064 /* Update per-hba values and queue a reset. */ 1065 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 1066 ha->zio_mode = zio_mode; 1067 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1068 } 1069 return strlen(buf); 1070 } 1071 1072 static ssize_t 1073 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 1074 char *buf) 1075 { 1076 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1077 1078 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); 1079 } 1080 1081 static ssize_t 1082 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 1083 const char *buf, size_t count) 1084 { 1085 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1086 int val = 0; 1087 uint16_t zio_timer; 1088 1089 if (sscanf(buf, "%d", &val) != 1) 1090 return -EINVAL; 1091 if (val > 25500 || val < 100) 1092 return -ERANGE; 1093 1094 zio_timer = (uint16_t)(val / 100); 1095 vha->hw->zio_timer = zio_timer; 1096 1097 return strlen(buf); 1098 } 1099 1100 static ssize_t 1101 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1102 char *buf) 1103 { 1104 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1105 int len = 0; 1106 1107 if (vha->hw->beacon_blink_led) 1108 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 1109 else 1110 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 1111 return len; 1112 } 1113 1114 static ssize_t 1115 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 1116 const char *buf, size_t count) 1117 { 1118 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1119 struct qla_hw_data *ha = vha->hw; 1120 int val = 0; 1121 int rval; 1122 1123 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1124 return -EPERM; 1125 1126 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1127 ql_log(ql_log_warn, vha, 0x707a, 1128 "Abort ISP active -- ignoring beacon request.\n"); 1129 return -EBUSY; 1130 } 1131 1132 if (sscanf(buf, "%d", &val) != 1) 1133 return -EINVAL; 1134 1135 if (val) 1136 rval = ha->isp_ops->beacon_on(vha); 1137 else 1138 rval = ha->isp_ops->beacon_off(vha); 1139 1140 if (rval != QLA_SUCCESS) 1141 count = 0; 1142 1143 return count; 1144 } 1145 1146 static ssize_t 1147 qla2x00_optrom_bios_version_show(struct device *dev, 1148 struct device_attribute *attr, char *buf) 1149 { 1150 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1151 struct qla_hw_data *ha = vha->hw; 1152 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 1153 ha->bios_revision[0]); 1154 } 1155 1156 static ssize_t 1157 qla2x00_optrom_efi_version_show(struct device *dev, 1158 struct device_attribute *attr, char *buf) 1159 { 1160 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1161 struct qla_hw_data *ha = vha->hw; 1162 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 1163 ha->efi_revision[0]); 1164 } 1165 1166 static ssize_t 1167 qla2x00_optrom_fcode_version_show(struct device *dev, 1168 struct device_attribute *attr, char *buf) 1169 { 1170 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1171 struct qla_hw_data *ha = vha->hw; 1172 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 1173 ha->fcode_revision[0]); 1174 } 1175 1176 static ssize_t 1177 qla2x00_optrom_fw_version_show(struct device *dev, 1178 struct device_attribute *attr, char *buf) 1179 { 1180 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1181 struct qla_hw_data *ha = vha->hw; 1182 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 1183 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 1184 ha->fw_revision[3]); 1185 } 1186 1187 static ssize_t 1188 qla2x00_optrom_gold_fw_version_show(struct device *dev, 1189 struct device_attribute *attr, char *buf) 1190 { 1191 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1192 struct qla_hw_data *ha = vha->hw; 1193 1194 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1195 return snprintf(buf, PAGE_SIZE, "\n"); 1196 1197 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1198 ha->gold_fw_version[0], ha->gold_fw_version[1], 1199 ha->gold_fw_version[2], ha->gold_fw_version[3]); 1200 } 1201 1202 static ssize_t 1203 qla2x00_total_isp_aborts_show(struct device *dev, 1204 struct device_attribute *attr, char *buf) 1205 { 1206 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1207 return snprintf(buf, PAGE_SIZE, "%d\n", 1208 vha->qla_stats.total_isp_aborts); 1209 } 1210 1211 static ssize_t 1212 qla24xx_84xx_fw_version_show(struct device *dev, 1213 struct device_attribute *attr, char *buf) 1214 { 1215 int rval = QLA_SUCCESS; 1216 uint16_t status[2] = {0, 0}; 1217 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1218 struct qla_hw_data *ha = vha->hw; 1219 1220 if (!IS_QLA84XX(ha)) 1221 return snprintf(buf, PAGE_SIZE, "\n"); 1222 1223 if (ha->cs84xx->op_fw_version == 0) 1224 rval = qla84xx_verify_chip(vha, status); 1225 1226 if ((rval == QLA_SUCCESS) && (status[0] == 0)) 1227 return snprintf(buf, PAGE_SIZE, "%u\n", 1228 (uint32_t)ha->cs84xx->op_fw_version); 1229 1230 return snprintf(buf, PAGE_SIZE, "\n"); 1231 } 1232 1233 static ssize_t 1234 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1235 char *buf) 1236 { 1237 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1238 struct qla_hw_data *ha = vha->hw; 1239 1240 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 1241 return snprintf(buf, PAGE_SIZE, "\n"); 1242 1243 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1244 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], 1245 ha->mpi_capabilities); 1246 } 1247 1248 static ssize_t 1249 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, 1250 char *buf) 1251 { 1252 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1253 struct qla_hw_data *ha = vha->hw; 1254 1255 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 1256 return snprintf(buf, PAGE_SIZE, "\n"); 1257 1258 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1259 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); 1260 } 1261 1262 static ssize_t 1263 qla2x00_flash_block_size_show(struct device *dev, 1264 struct device_attribute *attr, char *buf) 1265 { 1266 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1267 struct qla_hw_data *ha = vha->hw; 1268 1269 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1270 } 1271 1272 static ssize_t 1273 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, 1274 char *buf) 1275 { 1276 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1277 1278 if (!IS_CNA_CAPABLE(vha->hw)) 1279 return snprintf(buf, PAGE_SIZE, "\n"); 1280 1281 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1282 } 1283 1284 static ssize_t 1285 qla2x00_vn_port_mac_address_show(struct device *dev, 1286 struct device_attribute *attr, char *buf) 1287 { 1288 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1289 1290 if (!IS_CNA_CAPABLE(vha->hw)) 1291 return snprintf(buf, PAGE_SIZE, "\n"); 1292 1293 return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); 1294 } 1295 1296 static ssize_t 1297 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, 1298 char *buf) 1299 { 1300 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1301 1302 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); 1303 } 1304 1305 static ssize_t 1306 qla2x00_thermal_temp_show(struct device *dev, 1307 struct device_attribute *attr, char *buf) 1308 { 1309 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1310 uint16_t temp = 0; 1311 1312 if (qla2x00_reset_active(vha)) { 1313 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); 1314 goto done; 1315 } 1316 1317 if (vha->hw->flags.eeh_busy) { 1318 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); 1319 goto done; 1320 } 1321 1322 if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) 1323 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1324 1325 done: 1326 return snprintf(buf, PAGE_SIZE, "\n"); 1327 } 1328 1329 static ssize_t 1330 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, 1331 char *buf) 1332 { 1333 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1334 int rval = QLA_FUNCTION_FAILED; 1335 uint16_t state[5]; 1336 uint32_t pstate; 1337 1338 if (IS_QLAFX00(vha->hw)) { 1339 pstate = qlafx00_fw_state_show(dev, attr, buf); 1340 return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate); 1341 } 1342 1343 if (qla2x00_reset_active(vha)) 1344 ql_log(ql_log_warn, vha, 0x707c, 1345 "ISP reset active.\n"); 1346 else if (!vha->hw->flags.eeh_busy) 1347 rval = qla2x00_get_firmware_state(vha, state); 1348 if (rval != QLA_SUCCESS) 1349 memset(state, -1, sizeof(state)); 1350 1351 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], 1352 state[1], state[2], state[3], state[4]); 1353 } 1354 1355 static ssize_t 1356 qla2x00_diag_requests_show(struct device *dev, 1357 struct device_attribute *attr, char *buf) 1358 { 1359 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1360 1361 if (!IS_BIDI_CAPABLE(vha->hw)) 1362 return snprintf(buf, PAGE_SIZE, "\n"); 1363 1364 return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); 1365 } 1366 1367 static ssize_t 1368 qla2x00_diag_megabytes_show(struct device *dev, 1369 struct device_attribute *attr, char *buf) 1370 { 1371 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1372 1373 if (!IS_BIDI_CAPABLE(vha->hw)) 1374 return snprintf(buf, PAGE_SIZE, "\n"); 1375 1376 return snprintf(buf, PAGE_SIZE, "%llu\n", 1377 vha->bidi_stats.transfer_bytes >> 20); 1378 } 1379 1380 static ssize_t 1381 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, 1382 char *buf) 1383 { 1384 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1385 struct qla_hw_data *ha = vha->hw; 1386 uint32_t size; 1387 1388 if (!ha->fw_dumped) 1389 size = 0; 1390 else if (IS_QLA82XX(ha)) 1391 size = ha->md_template_size + ha->md_dump_size; 1392 else 1393 size = ha->fw_dump_len; 1394 1395 return snprintf(buf, PAGE_SIZE, "%d\n", size); 1396 } 1397 1398 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1399 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1400 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1401 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); 1402 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); 1403 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 1404 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 1405 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 1406 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); 1407 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 1408 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 1409 qla2x00_zio_timer_store); 1410 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 1411 qla2x00_beacon_store); 1412 static DEVICE_ATTR(optrom_bios_version, S_IRUGO, 1413 qla2x00_optrom_bios_version_show, NULL); 1414 static DEVICE_ATTR(optrom_efi_version, S_IRUGO, 1415 qla2x00_optrom_efi_version_show, NULL); 1416 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, 1417 qla2x00_optrom_fcode_version_show, NULL); 1418 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1419 NULL); 1420 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO, 1421 qla2x00_optrom_gold_fw_version_show, NULL); 1422 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, 1423 NULL); 1424 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1425 NULL); 1426 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1427 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1428 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1429 NULL); 1430 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); 1431 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, 1432 qla2x00_vn_port_mac_address_show, NULL); 1433 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1434 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1435 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); 1436 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); 1437 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); 1438 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); 1439 1440 struct device_attribute *qla2x00_host_attrs[] = { 1441 &dev_attr_driver_version, 1442 &dev_attr_fw_version, 1443 &dev_attr_serial_num, 1444 &dev_attr_isp_name, 1445 &dev_attr_isp_id, 1446 &dev_attr_model_name, 1447 &dev_attr_model_desc, 1448 &dev_attr_pci_info, 1449 &dev_attr_link_state, 1450 &dev_attr_zio, 1451 &dev_attr_zio_timer, 1452 &dev_attr_beacon, 1453 &dev_attr_optrom_bios_version, 1454 &dev_attr_optrom_efi_version, 1455 &dev_attr_optrom_fcode_version, 1456 &dev_attr_optrom_fw_version, 1457 &dev_attr_84xx_fw_version, 1458 &dev_attr_total_isp_aborts, 1459 &dev_attr_mpi_version, 1460 &dev_attr_phy_version, 1461 &dev_attr_flash_block_size, 1462 &dev_attr_vlan_id, 1463 &dev_attr_vn_port_mac_address, 1464 &dev_attr_fabric_param, 1465 &dev_attr_fw_state, 1466 &dev_attr_optrom_gold_fw_version, 1467 &dev_attr_thermal_temp, 1468 &dev_attr_diag_requests, 1469 &dev_attr_diag_megabytes, 1470 &dev_attr_fw_dump_size, 1471 NULL, 1472 }; 1473 1474 /* Host attributes. */ 1475 1476 static void 1477 qla2x00_get_host_port_id(struct Scsi_Host *shost) 1478 { 1479 scsi_qla_host_t *vha = shost_priv(shost); 1480 1481 fc_host_port_id(shost) = vha->d_id.b.domain << 16 | 1482 vha->d_id.b.area << 8 | vha->d_id.b.al_pa; 1483 } 1484 1485 static void 1486 qla2x00_get_host_speed(struct Scsi_Host *shost) 1487 { 1488 struct qla_hw_data *ha = ((struct scsi_qla_host *) 1489 (shost_priv(shost)))->hw; 1490 u32 speed = FC_PORTSPEED_UNKNOWN; 1491 1492 if (IS_QLAFX00(ha)) { 1493 qlafx00_get_host_speed(shost); 1494 return; 1495 } 1496 1497 switch (ha->link_data_rate) { 1498 case PORT_SPEED_1GB: 1499 speed = FC_PORTSPEED_1GBIT; 1500 break; 1501 case PORT_SPEED_2GB: 1502 speed = FC_PORTSPEED_2GBIT; 1503 break; 1504 case PORT_SPEED_4GB: 1505 speed = FC_PORTSPEED_4GBIT; 1506 break; 1507 case PORT_SPEED_8GB: 1508 speed = FC_PORTSPEED_8GBIT; 1509 break; 1510 case PORT_SPEED_10GB: 1511 speed = FC_PORTSPEED_10GBIT; 1512 break; 1513 case PORT_SPEED_16GB: 1514 speed = FC_PORTSPEED_16GBIT; 1515 break; 1516 } 1517 fc_host_speed(shost) = speed; 1518 } 1519 1520 static void 1521 qla2x00_get_host_port_type(struct Scsi_Host *shost) 1522 { 1523 scsi_qla_host_t *vha = shost_priv(shost); 1524 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 1525 1526 if (vha->vp_idx) { 1527 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 1528 return; 1529 } 1530 switch (vha->hw->current_topology) { 1531 case ISP_CFG_NL: 1532 port_type = FC_PORTTYPE_LPORT; 1533 break; 1534 case ISP_CFG_FL: 1535 port_type = FC_PORTTYPE_NLPORT; 1536 break; 1537 case ISP_CFG_N: 1538 port_type = FC_PORTTYPE_PTP; 1539 break; 1540 case ISP_CFG_F: 1541 port_type = FC_PORTTYPE_NPORT; 1542 break; 1543 } 1544 fc_host_port_type(shost) = port_type; 1545 } 1546 1547 static void 1548 qla2x00_get_starget_node_name(struct scsi_target *starget) 1549 { 1550 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1551 scsi_qla_host_t *vha = shost_priv(host); 1552 fc_port_t *fcport; 1553 u64 node_name = 0; 1554 1555 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1556 if (fcport->rport && 1557 starget->id == fcport->rport->scsi_target_id) { 1558 node_name = wwn_to_u64(fcport->node_name); 1559 break; 1560 } 1561 } 1562 1563 fc_starget_node_name(starget) = node_name; 1564 } 1565 1566 static void 1567 qla2x00_get_starget_port_name(struct scsi_target *starget) 1568 { 1569 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1570 scsi_qla_host_t *vha = shost_priv(host); 1571 fc_port_t *fcport; 1572 u64 port_name = 0; 1573 1574 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1575 if (fcport->rport && 1576 starget->id == fcport->rport->scsi_target_id) { 1577 port_name = wwn_to_u64(fcport->port_name); 1578 break; 1579 } 1580 } 1581 1582 fc_starget_port_name(starget) = port_name; 1583 } 1584 1585 static void 1586 qla2x00_get_starget_port_id(struct scsi_target *starget) 1587 { 1588 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 1589 scsi_qla_host_t *vha = shost_priv(host); 1590 fc_port_t *fcport; 1591 uint32_t port_id = ~0U; 1592 1593 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1594 if (fcport->rport && 1595 starget->id == fcport->rport->scsi_target_id) { 1596 port_id = fcport->d_id.b.domain << 16 | 1597 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 1598 break; 1599 } 1600 } 1601 1602 fc_starget_port_id(starget) = port_id; 1603 } 1604 1605 static void 1606 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1607 { 1608 if (timeout) 1609 rport->dev_loss_tmo = timeout; 1610 else 1611 rport->dev_loss_tmo = 1; 1612 } 1613 1614 static void 1615 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 1616 { 1617 struct Scsi_Host *host = rport_to_shost(rport); 1618 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1619 unsigned long flags; 1620 1621 if (!fcport) 1622 return; 1623 1624 /* Now that the rport has been deleted, set the fcport state to 1625 FCS_DEVICE_DEAD */ 1626 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); 1627 1628 /* 1629 * Transport has effectively 'deleted' the rport, clear 1630 * all local references. 1631 */ 1632 spin_lock_irqsave(host->host_lock, flags); 1633 fcport->rport = fcport->drport = NULL; 1634 *((fc_port_t **)rport->dd_data) = NULL; 1635 spin_unlock_irqrestore(host->host_lock, flags); 1636 1637 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1638 return; 1639 1640 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1641 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1642 return; 1643 } 1644 } 1645 1646 static void 1647 qla2x00_terminate_rport_io(struct fc_rport *rport) 1648 { 1649 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1650 1651 if (!fcport) 1652 return; 1653 1654 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1655 return; 1656 1657 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1658 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1659 return; 1660 } 1661 /* 1662 * At this point all fcport's software-states are cleared. Perform any 1663 * final cleanup of firmware resources (PCBs and XCBs). 1664 */ 1665 if (fcport->loop_id != FC_NO_LOOP_ID) { 1666 if (IS_FWI2_CAPABLE(fcport->vha->hw)) 1667 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1668 fcport->loop_id, fcport->d_id.b.domain, 1669 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1670 else 1671 qla2x00_port_logout(fcport->vha, fcport); 1672 } 1673 } 1674 1675 static int 1676 qla2x00_issue_lip(struct Scsi_Host *shost) 1677 { 1678 scsi_qla_host_t *vha = shost_priv(shost); 1679 1680 if (IS_QLAFX00(vha->hw)) 1681 return 0; 1682 1683 qla2x00_loop_reset(vha); 1684 return 0; 1685 } 1686 1687 static struct fc_host_statistics * 1688 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1689 { 1690 scsi_qla_host_t *vha = shost_priv(shost); 1691 struct qla_hw_data *ha = vha->hw; 1692 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1693 int rval; 1694 struct link_statistics *stats; 1695 dma_addr_t stats_dma; 1696 struct fc_host_statistics *pfc_host_stat; 1697 1698 pfc_host_stat = &vha->fc_host_stat; 1699 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1700 1701 if (IS_QLAFX00(vha->hw)) 1702 goto done; 1703 1704 if (test_bit(UNLOADING, &vha->dpc_flags)) 1705 goto done; 1706 1707 if (unlikely(pci_channel_offline(ha->pdev))) 1708 goto done; 1709 1710 if (qla2x00_reset_active(vha)) 1711 goto done; 1712 1713 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1714 if (stats == NULL) { 1715 ql_log(ql_log_warn, vha, 0x707d, 1716 "Failed to allocate memory for stats.\n"); 1717 goto done; 1718 } 1719 memset(stats, 0, DMA_POOL_SIZE); 1720 1721 rval = QLA_FUNCTION_FAILED; 1722 if (IS_FWI2_CAPABLE(ha)) { 1723 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); 1724 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && 1725 !ha->dpc_active) { 1726 /* Must be in a 'READY' state for statistics retrieval. */ 1727 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, 1728 stats, stats_dma); 1729 } 1730 1731 if (rval != QLA_SUCCESS) 1732 goto done_free; 1733 1734 pfc_host_stat->link_failure_count = stats->link_fail_cnt; 1735 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt; 1736 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt; 1737 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; 1738 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1739 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; 1740 if (IS_FWI2_CAPABLE(ha)) { 1741 pfc_host_stat->lip_count = stats->lip_cnt; 1742 pfc_host_stat->tx_frames = stats->tx_frames; 1743 pfc_host_stat->rx_frames = stats->rx_frames; 1744 pfc_host_stat->dumped_frames = stats->discarded_frames; 1745 pfc_host_stat->nos_count = stats->nos_rcvd; 1746 pfc_host_stat->error_frames = 1747 stats->dropped_frames + stats->discarded_frames; 1748 pfc_host_stat->rx_words = vha->qla_stats.input_bytes; 1749 pfc_host_stat->tx_words = vha->qla_stats.output_bytes; 1750 } 1751 pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests; 1752 pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; 1753 pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; 1754 pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; 1755 pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; 1756 pfc_host_stat->seconds_since_last_reset = 1757 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; 1758 do_div(pfc_host_stat->seconds_since_last_reset, HZ); 1759 1760 done_free: 1761 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1762 done: 1763 return pfc_host_stat; 1764 } 1765 1766 static void 1767 qla2x00_reset_host_stats(struct Scsi_Host *shost) 1768 { 1769 scsi_qla_host_t *vha = shost_priv(shost); 1770 1771 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 1772 1773 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 1774 } 1775 1776 static void 1777 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1778 { 1779 scsi_qla_host_t *vha = shost_priv(shost); 1780 1781 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost)); 1782 } 1783 1784 static void 1785 qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1786 { 1787 scsi_qla_host_t *vha = shost_priv(shost); 1788 1789 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1790 } 1791 1792 static void 1793 qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1794 { 1795 scsi_qla_host_t *vha = shost_priv(shost); 1796 uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \ 1797 0xFF, 0xFF, 0xFF, 0xFF}; 1798 u64 fabric_name = wwn_to_u64(node_name); 1799 1800 if (vha->device_flags & SWITCH_FOUND) 1801 fabric_name = wwn_to_u64(vha->fabric_node_name); 1802 1803 fc_host_fabric_name(shost) = fabric_name; 1804 } 1805 1806 static void 1807 qla2x00_get_host_port_state(struct Scsi_Host *shost) 1808 { 1809 scsi_qla_host_t *vha = shost_priv(shost); 1810 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 1811 1812 if (!base_vha->flags.online) { 1813 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1814 return; 1815 } 1816 1817 switch (atomic_read(&base_vha->loop_state)) { 1818 case LOOP_UPDATE: 1819 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 1820 break; 1821 case LOOP_DOWN: 1822 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) 1823 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; 1824 else 1825 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 1826 break; 1827 case LOOP_DEAD: 1828 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 1829 break; 1830 case LOOP_READY: 1831 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1832 break; 1833 default: 1834 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1835 break; 1836 } 1837 } 1838 1839 static int 1840 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1841 { 1842 int ret = 0; 1843 uint8_t qos = 0; 1844 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1845 scsi_qla_host_t *vha = NULL; 1846 struct qla_hw_data *ha = base_vha->hw; 1847 uint16_t options = 0; 1848 int cnt; 1849 struct req_que *req = ha->req_q_map[0]; 1850 1851 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1852 if (ret) { 1853 ql_log(ql_log_warn, vha, 0x707e, 1854 "Vport sanity check failed, status %x\n", ret); 1855 return (ret); 1856 } 1857 1858 vha = qla24xx_create_vhost(fc_vport); 1859 if (vha == NULL) { 1860 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); 1861 return FC_VPORT_FAILED; 1862 } 1863 if (disable) { 1864 atomic_set(&vha->vp_state, VP_OFFLINE); 1865 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 1866 } else 1867 atomic_set(&vha->vp_state, VP_FAILED); 1868 1869 /* ready to create vport */ 1870 ql_log(ql_log_info, vha, 0x7080, 1871 "VP entry id %d assigned.\n", vha->vp_idx); 1872 1873 /* initialized vport states */ 1874 atomic_set(&vha->loop_state, LOOP_DOWN); 1875 vha->vp_err_state= VP_ERR_PORTDWN; 1876 vha->vp_prev_err_state= VP_ERR_UNKWN; 1877 /* Check if physical ha port is Up */ 1878 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1879 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1880 /* Don't retry or attempt login of this virtual port */ 1881 ql_dbg(ql_dbg_user, vha, 0x7081, 1882 "Vport loop state is not UP.\n"); 1883 atomic_set(&vha->loop_state, LOOP_DEAD); 1884 if (!disable) 1885 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1886 } 1887 1888 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 1889 if (ha->fw_attributes & BIT_4) { 1890 int prot = 0, guard; 1891 vha->flags.difdix_supported = 1; 1892 ql_dbg(ql_dbg_user, vha, 0x7082, 1893 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1894 if (ql2xenabledif == 1) 1895 prot = SHOST_DIX_TYPE0_PROTECTION; 1896 scsi_host_set_prot(vha->host, 1897 prot | SHOST_DIF_TYPE1_PROTECTION 1898 | SHOST_DIF_TYPE2_PROTECTION 1899 | SHOST_DIF_TYPE3_PROTECTION 1900 | SHOST_DIX_TYPE1_PROTECTION 1901 | SHOST_DIX_TYPE2_PROTECTION 1902 | SHOST_DIX_TYPE3_PROTECTION); 1903 1904 guard = SHOST_DIX_GUARD_CRC; 1905 1906 if (IS_PI_IPGUARD_CAPABLE(ha) && 1907 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 1908 guard |= SHOST_DIX_GUARD_IP; 1909 1910 scsi_host_set_guard(vha->host, guard); 1911 } else 1912 vha->flags.difdix_supported = 0; 1913 } 1914 1915 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1916 &ha->pdev->dev)) { 1917 ql_dbg(ql_dbg_user, vha, 0x7083, 1918 "scsi_add_host failure for VP[%d].\n", vha->vp_idx); 1919 goto vport_create_failed_2; 1920 } 1921 1922 /* initialize attributes */ 1923 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 1924 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1925 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1926 fc_host_supported_classes(vha->host) = 1927 fc_host_supported_classes(base_vha->host); 1928 fc_host_supported_speeds(vha->host) = 1929 fc_host_supported_speeds(base_vha->host); 1930 1931 qlt_vport_create(vha, ha); 1932 qla24xx_vport_disable(fc_vport, disable); 1933 1934 if (ha->flags.cpu_affinity_enabled) { 1935 req = ha->req_q_map[1]; 1936 ql_dbg(ql_dbg_multiq, vha, 0xc000, 1937 "Request queue %p attached with " 1938 "VP[%d], cpu affinity =%d\n", 1939 req, vha->vp_idx, ha->flags.cpu_affinity_enabled); 1940 goto vport_queue; 1941 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1942 goto vport_queue; 1943 /* Create a request queue in QoS mode for the vport */ 1944 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 1945 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 1946 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 1947 8) == 0) { 1948 qos = ha->npiv_info[cnt].q_qos; 1949 break; 1950 } 1951 } 1952 1953 if (qos) { 1954 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1955 qos); 1956 if (!ret) 1957 ql_log(ql_log_warn, vha, 0x7084, 1958 "Can't create request queue for VP[%d]\n", 1959 vha->vp_idx); 1960 else { 1961 ql_dbg(ql_dbg_multiq, vha, 0xc001, 1962 "Request Que:%d Q0s: %d) created for VP[%d]\n", 1963 ret, qos, vha->vp_idx); 1964 ql_dbg(ql_dbg_user, vha, 0x7085, 1965 "Request Que:%d Q0s: %d) created for VP[%d]\n", 1966 ret, qos, vha->vp_idx); 1967 req = ha->req_q_map[ret]; 1968 } 1969 } 1970 1971 vport_queue: 1972 vha->req = req; 1973 return 0; 1974 1975 vport_create_failed_2: 1976 qla24xx_disable_vp(vha); 1977 qla24xx_deallocate_vp_id(vha); 1978 scsi_host_put(vha->host); 1979 return FC_VPORT_FAILED; 1980 } 1981 1982 static int 1983 qla24xx_vport_delete(struct fc_vport *fc_vport) 1984 { 1985 scsi_qla_host_t *vha = fc_vport->dd_data; 1986 struct qla_hw_data *ha = vha->hw; 1987 uint16_t id = vha->vp_idx; 1988 1989 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1990 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 1991 msleep(1000); 1992 1993 qla24xx_disable_vp(vha); 1994 1995 vha->flags.delete_progress = 1; 1996 1997 fc_remove_host(vha->host); 1998 1999 scsi_remove_host(vha->host); 2000 2001 /* Allow timer to run to drain queued items, when removing vp */ 2002 qla24xx_deallocate_vp_id(vha); 2003 2004 if (vha->timer_active) { 2005 qla2x00_vp_stop_timer(vha); 2006 ql_dbg(ql_dbg_user, vha, 0x7086, 2007 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 2008 } 2009 2010 BUG_ON(atomic_read(&vha->vref_count)); 2011 2012 qla2x00_free_fcports(vha); 2013 2014 mutex_lock(&ha->vport_lock); 2015 ha->cur_vport_count--; 2016 clear_bit(vha->vp_idx, ha->vp_idx_map); 2017 mutex_unlock(&ha->vport_lock); 2018 2019 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 2020 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 2021 ql_log(ql_log_warn, vha, 0x7087, 2022 "Queue delete failed.\n"); 2023 } 2024 2025 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); 2026 scsi_host_put(vha->host); 2027 return 0; 2028 } 2029 2030 static int 2031 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 2032 { 2033 scsi_qla_host_t *vha = fc_vport->dd_data; 2034 2035 if (disable) 2036 qla24xx_disable_vp(vha); 2037 else 2038 qla24xx_enable_vp(vha); 2039 2040 return 0; 2041 } 2042 2043 struct fc_function_template qla2xxx_transport_functions = { 2044 2045 .show_host_node_name = 1, 2046 .show_host_port_name = 1, 2047 .show_host_supported_classes = 1, 2048 .show_host_supported_speeds = 1, 2049 2050 .get_host_port_id = qla2x00_get_host_port_id, 2051 .show_host_port_id = 1, 2052 .get_host_speed = qla2x00_get_host_speed, 2053 .show_host_speed = 1, 2054 .get_host_port_type = qla2x00_get_host_port_type, 2055 .show_host_port_type = 1, 2056 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 2057 .show_host_symbolic_name = 1, 2058 .set_host_system_hostname = qla2x00_set_host_system_hostname, 2059 .show_host_system_hostname = 1, 2060 .get_host_fabric_name = qla2x00_get_host_fabric_name, 2061 .show_host_fabric_name = 1, 2062 .get_host_port_state = qla2x00_get_host_port_state, 2063 .show_host_port_state = 1, 2064 2065 .dd_fcrport_size = sizeof(struct fc_port *), 2066 .show_rport_supported_classes = 1, 2067 2068 .get_starget_node_name = qla2x00_get_starget_node_name, 2069 .show_starget_node_name = 1, 2070 .get_starget_port_name = qla2x00_get_starget_port_name, 2071 .show_starget_port_name = 1, 2072 .get_starget_port_id = qla2x00_get_starget_port_id, 2073 .show_starget_port_id = 1, 2074 2075 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 2076 .show_rport_dev_loss_tmo = 1, 2077 2078 .issue_fc_host_lip = qla2x00_issue_lip, 2079 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2080 .terminate_rport_io = qla2x00_terminate_rport_io, 2081 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2082 .reset_fc_host_stats = qla2x00_reset_host_stats, 2083 2084 .vport_create = qla24xx_vport_create, 2085 .vport_disable = qla24xx_vport_disable, 2086 .vport_delete = qla24xx_vport_delete, 2087 .bsg_request = qla24xx_bsg_request, 2088 .bsg_timeout = qla24xx_bsg_timeout, 2089 }; 2090 2091 struct fc_function_template qla2xxx_transport_vport_functions = { 2092 2093 .show_host_node_name = 1, 2094 .show_host_port_name = 1, 2095 .show_host_supported_classes = 1, 2096 2097 .get_host_port_id = qla2x00_get_host_port_id, 2098 .show_host_port_id = 1, 2099 .get_host_speed = qla2x00_get_host_speed, 2100 .show_host_speed = 1, 2101 .get_host_port_type = qla2x00_get_host_port_type, 2102 .show_host_port_type = 1, 2103 .get_host_symbolic_name = qla2x00_get_host_symbolic_name, 2104 .show_host_symbolic_name = 1, 2105 .set_host_system_hostname = qla2x00_set_host_system_hostname, 2106 .show_host_system_hostname = 1, 2107 .get_host_fabric_name = qla2x00_get_host_fabric_name, 2108 .show_host_fabric_name = 1, 2109 .get_host_port_state = qla2x00_get_host_port_state, 2110 .show_host_port_state = 1, 2111 2112 .dd_fcrport_size = sizeof(struct fc_port *), 2113 .show_rport_supported_classes = 1, 2114 2115 .get_starget_node_name = qla2x00_get_starget_node_name, 2116 .show_starget_node_name = 1, 2117 .get_starget_port_name = qla2x00_get_starget_port_name, 2118 .show_starget_port_name = 1, 2119 .get_starget_port_id = qla2x00_get_starget_port_id, 2120 .show_starget_port_id = 1, 2121 2122 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 2123 .show_rport_dev_loss_tmo = 1, 2124 2125 .issue_fc_host_lip = qla2x00_issue_lip, 2126 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2127 .terminate_rport_io = qla2x00_terminate_rport_io, 2128 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2129 .reset_fc_host_stats = qla2x00_reset_host_stats, 2130 2131 .bsg_request = qla24xx_bsg_request, 2132 .bsg_timeout = qla24xx_bsg_timeout, 2133 }; 2134 2135 void 2136 qla2x00_init_host_attr(scsi_qla_host_t *vha) 2137 { 2138 struct qla_hw_data *ha = vha->hw; 2139 u32 speed = FC_PORTSPEED_UNKNOWN; 2140 2141 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 2142 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 2143 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 2144 fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ? 2145 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; 2146 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 2147 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 2148 2149 if (IS_CNA_CAPABLE(ha)) 2150 speed = FC_PORTSPEED_10GBIT; 2151 else if (IS_QLA2031(ha)) 2152 speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | 2153 FC_PORTSPEED_4GBIT; 2154 else if (IS_QLA25XX(ha)) 2155 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 2156 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2157 else if (IS_QLA24XX_TYPE(ha)) 2158 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 2159 FC_PORTSPEED_1GBIT; 2160 else if (IS_QLA23XX(ha)) 2161 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2162 else if (IS_QLAFX00(ha)) 2163 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 2164 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2165 else 2166 speed = FC_PORTSPEED_1GBIT; 2167 fc_host_supported_speeds(vha->host) = speed; 2168 } 2169