1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * sysfs attributes. 6 * 7 * Copyright IBM Corp. 2008, 2010 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/slab.h> 14 #include "zfcp_ext.h" 15 16 #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ 17 struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\ 18 _show, _store) 19 #define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \ 20 static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ 21 struct device_attribute *at,\ 22 char *buf) \ 23 { \ 24 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 25 \ 26 return sprintf(buf, _format, _value); \ 27 } \ 28 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 29 zfcp_sysfs_##_feat##_##_name##_show, NULL); 30 31 #define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ 32 static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ 33 struct device_attribute *at,\ 34 char *buf) \ 35 { \ 36 return sprintf(buf, _format, _value); \ 37 } \ 38 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 39 zfcp_sysfs_##_feat##_##_name##_show, NULL); 40 41 #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ 42 static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 43 struct device_attribute *at,\ 44 char *buf) \ 45 { \ 46 struct ccw_device *cdev = to_ccwdev(dev); \ 47 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \ 48 int i; \ 49 \ 50 if (!adapter) \ 51 return -ENODEV; \ 52 \ 53 i = sprintf(buf, _format, _value); \ 54 zfcp_ccw_adapter_put(adapter); \ 55 return i; \ 56 } \ 57 static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \ 58 zfcp_sysfs_adapter_##_name##_show, NULL); 59 60 ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); 61 ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n", 62 (unsigned long long) adapter->peer_wwnn); 63 ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n", 64 (unsigned long long) adapter->peer_wwpn); 65 ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 66 ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 67 ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 68 ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version); 69 ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) & 70 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 71 72 ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", 73 atomic_read(&port->status)); 74 ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n", 75 (atomic_read(&port->status) & 76 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 77 ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0); 78 79 ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", 80 zfcp_unit_sdev_status(unit)); 81 ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", 82 (zfcp_unit_sdev_status(unit) & 83 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 84 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 85 (zfcp_unit_sdev_status(unit) & 86 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 87 ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); 88 ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); 89 90 static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, 91 struct device_attribute *attr, 92 char *buf) 93 { 94 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 95 96 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 97 return sprintf(buf, "1\n"); 98 99 return sprintf(buf, "0\n"); 100 } 101 102 static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, 103 struct device_attribute *attr, 104 const char *buf, size_t count) 105 { 106 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 107 unsigned long val; 108 109 if (kstrtoul(buf, 0, &val) || val != 0) 110 return -EINVAL; 111 112 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); 113 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2"); 114 zfcp_erp_wait(port->adapter); 115 116 return count; 117 } 118 static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO, 119 zfcp_sysfs_port_failed_show, 120 zfcp_sysfs_port_failed_store); 121 122 static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, 123 struct device_attribute *attr, 124 char *buf) 125 { 126 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); 127 struct scsi_device *sdev; 128 unsigned int status, failed = 1; 129 130 sdev = zfcp_unit_sdev(unit); 131 if (sdev) { 132 status = atomic_read(&sdev_to_zfcp(sdev)->status); 133 failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; 134 scsi_device_put(sdev); 135 } 136 137 return sprintf(buf, "%d\n", failed); 138 } 139 140 static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, 141 struct device_attribute *attr, 142 const char *buf, size_t count) 143 { 144 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); 145 unsigned long val; 146 struct scsi_device *sdev; 147 148 if (kstrtoul(buf, 0, &val) || val != 0) 149 return -EINVAL; 150 151 sdev = zfcp_unit_sdev(unit); 152 if (sdev) { 153 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); 154 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 155 "syufai2"); 156 zfcp_erp_wait(unit->port->adapter); 157 } else 158 zfcp_unit_scsi_scan(unit); 159 160 return count; 161 } 162 static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO, 163 zfcp_sysfs_unit_failed_show, 164 zfcp_sysfs_unit_failed_store); 165 166 static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, 167 struct device_attribute *attr, 168 char *buf) 169 { 170 struct ccw_device *cdev = to_ccwdev(dev); 171 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 172 int i; 173 174 if (!adapter) 175 return -ENODEV; 176 177 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 178 i = sprintf(buf, "1\n"); 179 else 180 i = sprintf(buf, "0\n"); 181 182 zfcp_ccw_adapter_put(adapter); 183 return i; 184 } 185 186 static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, 187 struct device_attribute *attr, 188 const char *buf, size_t count) 189 { 190 struct ccw_device *cdev = to_ccwdev(dev); 191 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 192 unsigned long val; 193 int retval = 0; 194 195 if (!adapter) 196 return -ENODEV; 197 198 if (kstrtoul(buf, 0, &val) || val != 0) { 199 retval = -EINVAL; 200 goto out; 201 } 202 203 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 204 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 205 "syafai2"); 206 zfcp_erp_wait(adapter); 207 out: 208 zfcp_ccw_adapter_put(adapter); 209 return retval ? retval : (ssize_t) count; 210 } 211 static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO, 212 zfcp_sysfs_adapter_failed_show, 213 zfcp_sysfs_adapter_failed_store); 214 215 static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, 216 struct device_attribute *attr, 217 const char *buf, size_t count) 218 { 219 struct ccw_device *cdev = to_ccwdev(dev); 220 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 221 222 if (!adapter) 223 return -ENODEV; 224 225 /* 226 * Users wish is our command: immediately schedule and flush a 227 * worker to conduct a synchronous port scan, that is, neither 228 * a random delay nor a rate limit is applied here. 229 */ 230 queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0); 231 flush_delayed_work(&adapter->scan_work); 232 zfcp_ccw_adapter_put(adapter); 233 234 return (ssize_t) count; 235 } 236 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 237 zfcp_sysfs_port_rescan_store); 238 239 DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); 240 241 static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, 242 struct device_attribute *attr, 243 const char *buf, size_t count) 244 { 245 struct ccw_device *cdev = to_ccwdev(dev); 246 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 247 struct zfcp_port *port; 248 u64 wwpn; 249 int retval = -EINVAL; 250 251 if (!adapter) 252 return -ENODEV; 253 254 if (kstrtoull(buf, 0, (unsigned long long *) &wwpn)) 255 goto out; 256 257 port = zfcp_get_port_by_wwpn(adapter, wwpn); 258 if (!port) 259 goto out; 260 else 261 retval = 0; 262 263 mutex_lock(&zfcp_sysfs_port_units_mutex); 264 if (atomic_read(&port->units) > 0) { 265 retval = -EBUSY; 266 mutex_unlock(&zfcp_sysfs_port_units_mutex); 267 goto out; 268 } 269 /* port is about to be removed, so no more unit_add */ 270 atomic_set(&port->units, -1); 271 mutex_unlock(&zfcp_sysfs_port_units_mutex); 272 273 write_lock_irq(&adapter->port_list_lock); 274 list_del(&port->list); 275 write_unlock_irq(&adapter->port_list_lock); 276 277 put_device(&port->dev); 278 279 zfcp_erp_port_shutdown(port, 0, "syprs_1"); 280 device_unregister(&port->dev); 281 out: 282 zfcp_ccw_adapter_put(adapter); 283 return retval ? retval : (ssize_t) count; 284 } 285 static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, 286 zfcp_sysfs_port_remove_store); 287 288 static struct attribute *zfcp_adapter_attrs[] = { 289 &dev_attr_adapter_failed.attr, 290 &dev_attr_adapter_in_recovery.attr, 291 &dev_attr_adapter_port_remove.attr, 292 &dev_attr_adapter_port_rescan.attr, 293 &dev_attr_adapter_peer_wwnn.attr, 294 &dev_attr_adapter_peer_wwpn.attr, 295 &dev_attr_adapter_peer_d_id.attr, 296 &dev_attr_adapter_card_version.attr, 297 &dev_attr_adapter_lic_version.attr, 298 &dev_attr_adapter_status.attr, 299 &dev_attr_adapter_hardware_version.attr, 300 NULL 301 }; 302 303 struct attribute_group zfcp_sysfs_adapter_attrs = { 304 .attrs = zfcp_adapter_attrs, 305 }; 306 307 static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, 308 struct device_attribute *attr, 309 const char *buf, size_t count) 310 { 311 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 312 u64 fcp_lun; 313 int retval; 314 315 if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun)) 316 return -EINVAL; 317 318 retval = zfcp_unit_add(port, fcp_lun); 319 if (retval) 320 return retval; 321 322 return count; 323 } 324 static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 325 326 static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, 327 struct device_attribute *attr, 328 const char *buf, size_t count) 329 { 330 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 331 u64 fcp_lun; 332 333 if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun)) 334 return -EINVAL; 335 336 if (zfcp_unit_remove(port, fcp_lun)) 337 return -EINVAL; 338 339 return count; 340 } 341 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 342 343 static struct attribute *zfcp_port_attrs[] = { 344 &dev_attr_unit_add.attr, 345 &dev_attr_unit_remove.attr, 346 &dev_attr_port_failed.attr, 347 &dev_attr_port_in_recovery.attr, 348 &dev_attr_port_status.attr, 349 &dev_attr_port_access_denied.attr, 350 NULL 351 }; 352 static struct attribute_group zfcp_port_attr_group = { 353 .attrs = zfcp_port_attrs, 354 }; 355 const struct attribute_group *zfcp_port_attr_groups[] = { 356 &zfcp_port_attr_group, 357 NULL, 358 }; 359 360 static struct attribute *zfcp_unit_attrs[] = { 361 &dev_attr_unit_failed.attr, 362 &dev_attr_unit_in_recovery.attr, 363 &dev_attr_unit_status.attr, 364 &dev_attr_unit_access_denied.attr, 365 &dev_attr_unit_access_shared.attr, 366 &dev_attr_unit_access_readonly.attr, 367 NULL 368 }; 369 static struct attribute_group zfcp_unit_attr_group = { 370 .attrs = zfcp_unit_attrs, 371 }; 372 const struct attribute_group *zfcp_unit_attr_groups[] = { 373 &zfcp_unit_attr_group, 374 NULL, 375 }; 376 377 #define ZFCP_DEFINE_LATENCY_ATTR(_name) \ 378 static ssize_t \ 379 zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ 380 struct device_attribute *attr, \ 381 char *buf) { \ 382 struct scsi_device *sdev = to_scsi_device(dev); \ 383 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ 384 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ 385 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \ 386 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ 387 \ 388 spin_lock_bh(&lat->lock); \ 389 fsum = lat->_name.fabric.sum * adapter->timer_ticks; \ 390 fmin = lat->_name.fabric.min * adapter->timer_ticks; \ 391 fmax = lat->_name.fabric.max * adapter->timer_ticks; \ 392 csum = lat->_name.channel.sum * adapter->timer_ticks; \ 393 cmin = lat->_name.channel.min * adapter->timer_ticks; \ 394 cmax = lat->_name.channel.max * adapter->timer_ticks; \ 395 cc = lat->_name.counter; \ 396 spin_unlock_bh(&lat->lock); \ 397 \ 398 do_div(fsum, 1000); \ 399 do_div(fmin, 1000); \ 400 do_div(fmax, 1000); \ 401 do_div(csum, 1000); \ 402 do_div(cmin, 1000); \ 403 do_div(cmax, 1000); \ 404 \ 405 return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \ 406 fmin, fmax, fsum, cmin, cmax, csum, cc); \ 407 } \ 408 static ssize_t \ 409 zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ 410 struct device_attribute *attr, \ 411 const char *buf, size_t count) \ 412 { \ 413 struct scsi_device *sdev = to_scsi_device(dev); \ 414 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ 415 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ 416 unsigned long flags; \ 417 \ 418 spin_lock_irqsave(&lat->lock, flags); \ 419 lat->_name.fabric.sum = 0; \ 420 lat->_name.fabric.min = 0xFFFFFFFF; \ 421 lat->_name.fabric.max = 0; \ 422 lat->_name.channel.sum = 0; \ 423 lat->_name.channel.min = 0xFFFFFFFF; \ 424 lat->_name.channel.max = 0; \ 425 lat->_name.counter = 0; \ 426 spin_unlock_irqrestore(&lat->lock, flags); \ 427 \ 428 return (ssize_t) count; \ 429 } \ 430 static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \ 431 zfcp_sysfs_unit_##_name##_latency_show, \ 432 zfcp_sysfs_unit_##_name##_latency_store); 433 434 ZFCP_DEFINE_LATENCY_ATTR(read); 435 ZFCP_DEFINE_LATENCY_ATTR(write); 436 ZFCP_DEFINE_LATENCY_ATTR(cmd); 437 438 #define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \ 439 static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ 440 struct device_attribute *attr,\ 441 char *buf) \ 442 { \ 443 struct scsi_device *sdev = to_scsi_device(dev); \ 444 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ 445 \ 446 return sprintf(buf, _format, _value); \ 447 } \ 448 static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 449 450 ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 451 dev_name(&zfcp_sdev->port->adapter->ccw_device->dev)); 452 ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", 453 (unsigned long long) zfcp_sdev->port->wwpn); 454 455 static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, 456 struct device_attribute *attr, 457 char *buf) 458 { 459 struct scsi_device *sdev = to_scsi_device(dev); 460 461 return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); 462 } 463 static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); 464 465 ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n", 466 (atomic_read(&zfcp_sdev->status) & 467 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 468 469 static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev, 470 struct device_attribute *attr, 471 char *buf) 472 { 473 struct scsi_device *sdev = to_scsi_device(dev); 474 unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status); 475 unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; 476 477 return sprintf(buf, "%d\n", failed); 478 } 479 480 static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev, 481 struct device_attribute *attr, 482 const char *buf, size_t count) 483 { 484 struct scsi_device *sdev = to_scsi_device(dev); 485 unsigned long val; 486 487 if (kstrtoul(buf, 0, &val) || val != 0) 488 return -EINVAL; 489 490 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); 491 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 492 "syufai3"); 493 zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter); 494 495 return count; 496 } 497 static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO, 498 zfcp_sysfs_scsi_zfcp_failed_show, 499 zfcp_sysfs_scsi_zfcp_failed_store); 500 501 ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n", 502 (atomic_read(&zfcp_sdev->status) & 503 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 504 505 ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n", 506 atomic_read(&zfcp_sdev->status)); 507 508 struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 509 &dev_attr_fcp_lun, 510 &dev_attr_wwpn, 511 &dev_attr_hba_id, 512 &dev_attr_read_latency, 513 &dev_attr_write_latency, 514 &dev_attr_cmd_latency, 515 &dev_attr_zfcp_access_denied, 516 &dev_attr_zfcp_failed, 517 &dev_attr_zfcp_in_recovery, 518 &dev_attr_zfcp_status, 519 NULL 520 }; 521 522 static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, 523 struct device_attribute *attr, 524 char *buf) 525 { 526 struct Scsi_Host *scsi_host = dev_to_shost(dev); 527 struct fsf_qtcb_bottom_port *qtcb_port; 528 struct zfcp_adapter *adapter; 529 int retval; 530 531 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 532 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) 533 return -EOPNOTSUPP; 534 535 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL); 536 if (!qtcb_port) 537 return -ENOMEM; 538 539 retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); 540 if (!retval) 541 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, 542 qtcb_port->cb_util, qtcb_port->a_util); 543 kfree(qtcb_port); 544 return retval; 545 } 546 static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL); 547 548 static int zfcp_sysfs_adapter_ex_config(struct device *dev, 549 struct fsf_statistics_info *stat_inf) 550 { 551 struct Scsi_Host *scsi_host = dev_to_shost(dev); 552 struct fsf_qtcb_bottom_config *qtcb_config; 553 struct zfcp_adapter *adapter; 554 int retval; 555 556 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 557 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) 558 return -EOPNOTSUPP; 559 560 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config), 561 GFP_KERNEL); 562 if (!qtcb_config) 563 return -ENOMEM; 564 565 retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); 566 if (!retval) 567 *stat_inf = qtcb_config->stat_info; 568 569 kfree(qtcb_config); 570 return retval; 571 } 572 573 #define ZFCP_SHOST_ATTR(_name, _format, _arg...) \ 574 static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 575 struct device_attribute *attr,\ 576 char *buf) \ 577 { \ 578 struct fsf_statistics_info stat_info; \ 579 int retval; \ 580 \ 581 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \ 582 if (retval) \ 583 return retval; \ 584 \ 585 return sprintf(buf, _format, ## _arg); \ 586 } \ 587 static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); 588 589 ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n", 590 (unsigned long long) stat_info.input_req, 591 (unsigned long long) stat_info.output_req, 592 (unsigned long long) stat_info.control_req); 593 594 ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n", 595 (unsigned long long) stat_info.input_mb, 596 (unsigned long long) stat_info.output_mb); 597 598 ZFCP_SHOST_ATTR(seconds_active, "%llu\n", 599 (unsigned long long) stat_info.seconds_act); 600 601 static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, 602 struct device_attribute *attr, 603 char *buf) 604 { 605 struct Scsi_Host *scsi_host = class_to_shost(dev); 606 struct zfcp_qdio *qdio = 607 ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio; 608 u64 util; 609 610 spin_lock_bh(&qdio->stat_lock); 611 util = qdio->req_q_util; 612 spin_unlock_bh(&qdio->stat_lock); 613 614 return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), 615 (unsigned long long)util); 616 } 617 static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); 618 619 struct device_attribute *zfcp_sysfs_shost_attrs[] = { 620 &dev_attr_utilization, 621 &dev_attr_requests, 622 &dev_attr_megabytes, 623 &dev_attr_seconds_active, 624 &dev_attr_queue_full, 625 NULL 626 }; 627