1 /* 2 * zfcp device driver 3 * 4 * Interface to Linux SCSI midlayer. 5 * 6 * Copyright IBM Corp. 2002, 2016 7 */ 8 9 #define KMSG_COMPONENT "zfcp" 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/slab.h> 15 #include <scsi/fc/fc_fcp.h> 16 #include <scsi/scsi_eh.h> 17 #include <linux/atomic.h> 18 #include "zfcp_ext.h" 19 #include "zfcp_dbf.h" 20 #include "zfcp_fc.h" 21 #include "zfcp_reqlist.h" 22 23 static unsigned int default_depth = 32; 24 module_param_named(queue_depth, default_depth, uint, 0600); 25 MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 26 27 static bool enable_dif; 28 module_param_named(dif, enable_dif, bool, 0400); 29 MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 30 31 static bool allow_lun_scan = 1; 32 module_param(allow_lun_scan, bool, 0600); 33 MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); 34 35 static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) 36 { 37 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 38 39 /* if previous slave_alloc returned early, there is nothing to do */ 40 if (!zfcp_sdev->port) 41 return; 42 43 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); 44 put_device(&zfcp_sdev->port->dev); 45 } 46 47 static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 48 { 49 if (sdp->tagged_supported) 50 scsi_change_queue_depth(sdp, default_depth); 51 return 0; 52 } 53 54 static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 55 { 56 set_host_byte(scpnt, result); 57 zfcp_dbf_scsi_fail_send(scpnt); 58 scpnt->scsi_done(scpnt); 59 } 60 61 static 62 int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) 63 { 64 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 65 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); 66 int status, scsi_result, ret; 67 68 /* reset the status for this request */ 69 scpnt->result = 0; 70 scpnt->host_scribble = NULL; 71 72 scsi_result = fc_remote_port_chkready(rport); 73 if (unlikely(scsi_result)) { 74 scpnt->result = scsi_result; 75 zfcp_dbf_scsi_fail_send(scpnt); 76 scpnt->scsi_done(scpnt); 77 return 0; 78 } 79 80 status = atomic_read(&zfcp_sdev->status); 81 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && 82 !(atomic_read(&zfcp_sdev->port->status) & 83 ZFCP_STATUS_COMMON_ERP_FAILED)) { 84 /* only LUN access denied, but port is good 85 * not covered by FC transport, have to fail here */ 86 zfcp_scsi_command_fail(scpnt, DID_ERROR); 87 return 0; 88 } 89 90 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { 91 /* This could be 92 * call to rport_delete pending: mimic retry from 93 * fc_remote_port_chkready until rport is BLOCKED 94 */ 95 zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); 96 return 0; 97 } 98 99 ret = zfcp_fsf_fcp_cmnd(scpnt); 100 if (unlikely(ret == -EBUSY)) 101 return SCSI_MLQUEUE_DEVICE_BUSY; 102 else if (unlikely(ret < 0)) 103 return SCSI_MLQUEUE_HOST_BUSY; 104 105 return ret; 106 } 107 108 static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) 109 { 110 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 111 struct zfcp_adapter *adapter = 112 (struct zfcp_adapter *) sdev->host->hostdata[0]; 113 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 114 struct zfcp_port *port; 115 struct zfcp_unit *unit; 116 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; 117 118 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 119 if (!port) 120 return -ENXIO; 121 122 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); 123 if (unit) 124 put_device(&unit->dev); 125 126 if (!unit && !(allow_lun_scan && npiv)) { 127 put_device(&port->dev); 128 return -ENXIO; 129 } 130 131 zfcp_sdev->port = port; 132 zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF; 133 zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF; 134 zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF; 135 zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF; 136 zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF; 137 zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; 138 spin_lock_init(&zfcp_sdev->latencies.lock); 139 140 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); 141 zfcp_erp_lun_reopen(sdev, 0, "scsla_1"); 142 zfcp_erp_wait(port->adapter); 143 144 return 0; 145 } 146 147 static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 148 { 149 struct Scsi_Host *scsi_host = scpnt->device->host; 150 struct zfcp_adapter *adapter = 151 (struct zfcp_adapter *) scsi_host->hostdata[0]; 152 struct zfcp_fsf_req *old_req, *abrt_req; 153 unsigned long flags; 154 unsigned long old_reqid = (unsigned long) scpnt->host_scribble; 155 int retval = SUCCESS, ret; 156 int retry = 3; 157 char *dbf_tag; 158 159 /* avoid race condition between late normal completion and abort */ 160 write_lock_irqsave(&adapter->abort_lock, flags); 161 162 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); 163 if (!old_req) { 164 write_unlock_irqrestore(&adapter->abort_lock, flags); 165 zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL); 166 return FAILED; /* completion could be in progress */ 167 } 168 old_req->data = NULL; 169 170 /* don't access old fsf_req after releasing the abort_lock */ 171 write_unlock_irqrestore(&adapter->abort_lock, flags); 172 173 while (retry--) { 174 abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt); 175 if (abrt_req) 176 break; 177 178 zfcp_erp_wait(adapter); 179 ret = fc_block_scsi_eh(scpnt); 180 if (ret) { 181 zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL); 182 return ret; 183 } 184 if (!(atomic_read(&adapter->status) & 185 ZFCP_STATUS_COMMON_RUNNING)) { 186 zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL); 187 return SUCCESS; 188 } 189 } 190 if (!abrt_req) { 191 zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL); 192 return FAILED; 193 } 194 195 wait_for_completion(&abrt_req->completion); 196 197 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 198 dbf_tag = "abrt_ok"; 199 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) 200 dbf_tag = "abrt_nn"; 201 else { 202 dbf_tag = "abrt_fa"; 203 retval = FAILED; 204 } 205 zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req); 206 zfcp_fsf_req_free(abrt_req); 207 return retval; 208 } 209 210 struct zfcp_scsi_req_filter { 211 u8 tmf_scope; 212 u32 lun_handle; 213 u32 port_handle; 214 }; 215 216 static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data) 217 { 218 struct zfcp_scsi_req_filter *filter = 219 (struct zfcp_scsi_req_filter *)data; 220 221 /* already aborted - prevent side-effects - or not a SCSI command */ 222 if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND) 223 return; 224 225 /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */ 226 if (old_req->qtcb->header.port_handle != filter->port_handle) 227 return; 228 229 if (filter->tmf_scope == FCP_TMF_LUN_RESET && 230 old_req->qtcb->header.lun_handle != filter->lun_handle) 231 return; 232 233 zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req); 234 old_req->data = NULL; 235 } 236 237 static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) 238 { 239 struct zfcp_adapter *adapter = zsdev->port->adapter; 240 struct zfcp_scsi_req_filter filter = { 241 .tmf_scope = FCP_TMF_TGT_RESET, 242 .port_handle = zsdev->port->handle, 243 }; 244 unsigned long flags; 245 246 if (tm_flags == FCP_TMF_LUN_RESET) { 247 filter.tmf_scope = FCP_TMF_LUN_RESET; 248 filter.lun_handle = zsdev->lun_handle; 249 } 250 251 /* 252 * abort_lock secures against other processings - in the abort-function 253 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data 254 */ 255 write_lock_irqsave(&adapter->abort_lock, flags); 256 zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, 257 &filter); 258 write_unlock_irqrestore(&adapter->abort_lock, flags); 259 } 260 261 static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 262 { 263 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 264 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 265 struct zfcp_fsf_req *fsf_req = NULL; 266 int retval = SUCCESS, ret; 267 int retry = 3; 268 269 while (retry--) { 270 fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); 271 if (fsf_req) 272 break; 273 274 zfcp_erp_wait(adapter); 275 ret = fc_block_scsi_eh(scpnt); 276 if (ret) 277 return ret; 278 279 if (!(atomic_read(&adapter->status) & 280 ZFCP_STATUS_COMMON_RUNNING)) { 281 zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); 282 return SUCCESS; 283 } 284 } 285 if (!fsf_req) 286 return FAILED; 287 288 wait_for_completion(&fsf_req->completion); 289 290 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 291 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); 292 retval = FAILED; 293 } else { 294 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); 295 zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); 296 } 297 298 zfcp_fsf_req_free(fsf_req); 299 return retval; 300 } 301 302 static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 303 { 304 return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); 305 } 306 307 static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 308 { 309 return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); 310 } 311 312 static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 313 { 314 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 315 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 316 int ret; 317 318 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 319 zfcp_erp_wait(adapter); 320 ret = fc_block_scsi_eh(scpnt); 321 if (ret) 322 return ret; 323 324 return SUCCESS; 325 } 326 327 struct scsi_transport_template *zfcp_scsi_transport_template; 328 329 static struct scsi_host_template zfcp_scsi_host_template = { 330 .module = THIS_MODULE, 331 .name = "zfcp", 332 .queuecommand = zfcp_scsi_queuecommand, 333 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 334 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 335 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, 336 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 337 .slave_alloc = zfcp_scsi_slave_alloc, 338 .slave_configure = zfcp_scsi_slave_configure, 339 .slave_destroy = zfcp_scsi_slave_destroy, 340 .change_queue_depth = scsi_change_queue_depth, 341 .proc_name = "zfcp", 342 .can_queue = 4096, 343 .this_id = -1, 344 .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 345 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2), 346 /* GCD, adjusted later */ 347 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 348 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 349 /* GCD, adjusted later */ 350 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 351 .use_clustering = 1, 352 .shost_attrs = zfcp_sysfs_shost_attrs, 353 .sdev_attrs = zfcp_sysfs_sdev_attrs, 354 .track_queue_depth = 1, 355 }; 356 357 /** 358 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer 359 * @adapter: The zfcp adapter to register with the SCSI midlayer 360 */ 361 int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) 362 { 363 struct ccw_dev_id dev_id; 364 365 if (adapter->scsi_host) 366 return 0; 367 368 ccw_device_get_id(adapter->ccw_device, &dev_id); 369 /* register adapter as SCSI host with mid layer of SCSI stack */ 370 adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, 371 sizeof (struct zfcp_adapter *)); 372 if (!adapter->scsi_host) { 373 dev_err(&adapter->ccw_device->dev, 374 "Registering the FCP device with the " 375 "SCSI stack failed\n"); 376 return -EIO; 377 } 378 379 /* tell the SCSI stack some characteristics of this adapter */ 380 adapter->scsi_host->max_id = 511; 381 adapter->scsi_host->max_lun = 0xFFFFFFFF; 382 adapter->scsi_host->max_channel = 0; 383 adapter->scsi_host->unique_id = dev_id.devno; 384 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ 385 adapter->scsi_host->transportt = zfcp_scsi_transport_template; 386 387 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 388 389 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { 390 scsi_host_put(adapter->scsi_host); 391 return -EIO; 392 } 393 394 return 0; 395 } 396 397 /** 398 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer 399 * @adapter: The zfcp adapter to unregister. 400 */ 401 void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) 402 { 403 struct Scsi_Host *shost; 404 struct zfcp_port *port; 405 406 shost = adapter->scsi_host; 407 if (!shost) 408 return; 409 410 read_lock_irq(&adapter->port_list_lock); 411 list_for_each_entry(port, &adapter->port_list, list) 412 port->rport = NULL; 413 read_unlock_irq(&adapter->port_list_lock); 414 415 fc_remove_host(shost); 416 scsi_remove_host(shost); 417 scsi_host_put(shost); 418 adapter->scsi_host = NULL; 419 } 420 421 static struct fc_host_statistics* 422 zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) 423 { 424 struct fc_host_statistics *fc_stats; 425 426 if (!adapter->fc_stats) { 427 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); 428 if (!fc_stats) 429 return NULL; 430 adapter->fc_stats = fc_stats; /* freed in adapter_release */ 431 } 432 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); 433 return adapter->fc_stats; 434 } 435 436 static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, 437 struct fsf_qtcb_bottom_port *data, 438 struct fsf_qtcb_bottom_port *old) 439 { 440 fc_stats->seconds_since_last_reset = 441 data->seconds_since_last_reset - old->seconds_since_last_reset; 442 fc_stats->tx_frames = data->tx_frames - old->tx_frames; 443 fc_stats->tx_words = data->tx_words - old->tx_words; 444 fc_stats->rx_frames = data->rx_frames - old->rx_frames; 445 fc_stats->rx_words = data->rx_words - old->rx_words; 446 fc_stats->lip_count = data->lip - old->lip; 447 fc_stats->nos_count = data->nos - old->nos; 448 fc_stats->error_frames = data->error_frames - old->error_frames; 449 fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; 450 fc_stats->link_failure_count = data->link_failure - old->link_failure; 451 fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; 452 fc_stats->loss_of_signal_count = 453 data->loss_of_signal - old->loss_of_signal; 454 fc_stats->prim_seq_protocol_err_count = 455 data->psp_error_counts - old->psp_error_counts; 456 fc_stats->invalid_tx_word_count = 457 data->invalid_tx_words - old->invalid_tx_words; 458 fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; 459 fc_stats->fcp_input_requests = 460 data->input_requests - old->input_requests; 461 fc_stats->fcp_output_requests = 462 data->output_requests - old->output_requests; 463 fc_stats->fcp_control_requests = 464 data->control_requests - old->control_requests; 465 fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; 466 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; 467 } 468 469 static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, 470 struct fsf_qtcb_bottom_port *data) 471 { 472 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; 473 fc_stats->tx_frames = data->tx_frames; 474 fc_stats->tx_words = data->tx_words; 475 fc_stats->rx_frames = data->rx_frames; 476 fc_stats->rx_words = data->rx_words; 477 fc_stats->lip_count = data->lip; 478 fc_stats->nos_count = data->nos; 479 fc_stats->error_frames = data->error_frames; 480 fc_stats->dumped_frames = data->dumped_frames; 481 fc_stats->link_failure_count = data->link_failure; 482 fc_stats->loss_of_sync_count = data->loss_of_sync; 483 fc_stats->loss_of_signal_count = data->loss_of_signal; 484 fc_stats->prim_seq_protocol_err_count = data->psp_error_counts; 485 fc_stats->invalid_tx_word_count = data->invalid_tx_words; 486 fc_stats->invalid_crc_count = data->invalid_crcs; 487 fc_stats->fcp_input_requests = data->input_requests; 488 fc_stats->fcp_output_requests = data->output_requests; 489 fc_stats->fcp_control_requests = data->control_requests; 490 fc_stats->fcp_input_megabytes = data->input_mb; 491 fc_stats->fcp_output_megabytes = data->output_mb; 492 } 493 494 static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) 495 { 496 struct zfcp_adapter *adapter; 497 struct fc_host_statistics *fc_stats; 498 struct fsf_qtcb_bottom_port *data; 499 int ret; 500 501 adapter = (struct zfcp_adapter *)host->hostdata[0]; 502 fc_stats = zfcp_init_fc_host_stats(adapter); 503 if (!fc_stats) 504 return NULL; 505 506 data = kzalloc(sizeof(*data), GFP_KERNEL); 507 if (!data) 508 return NULL; 509 510 ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); 511 if (ret) { 512 kfree(data); 513 return NULL; 514 } 515 516 if (adapter->stats_reset && 517 ((jiffies/HZ - adapter->stats_reset) < 518 data->seconds_since_last_reset)) 519 zfcp_adjust_fc_host_stats(fc_stats, data, 520 adapter->stats_reset_data); 521 else 522 zfcp_set_fc_host_stats(fc_stats, data); 523 524 kfree(data); 525 return fc_stats; 526 } 527 528 static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) 529 { 530 struct zfcp_adapter *adapter; 531 struct fsf_qtcb_bottom_port *data; 532 int ret; 533 534 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 535 data = kzalloc(sizeof(*data), GFP_KERNEL); 536 if (!data) 537 return; 538 539 ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); 540 if (ret) 541 kfree(data); 542 else { 543 adapter->stats_reset = jiffies/HZ; 544 kfree(adapter->stats_reset_data); 545 adapter->stats_reset_data = data; /* finally freed in 546 adapter_release */ 547 } 548 } 549 550 static void zfcp_get_host_port_state(struct Scsi_Host *shost) 551 { 552 struct zfcp_adapter *adapter = 553 (struct zfcp_adapter *)shost->hostdata[0]; 554 int status = atomic_read(&adapter->status); 555 556 if ((status & ZFCP_STATUS_COMMON_RUNNING) && 557 !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)) 558 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 559 else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 560 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 561 else if (status & ZFCP_STATUS_COMMON_ERP_FAILED) 562 fc_host_port_state(shost) = FC_PORTSTATE_ERROR; 563 else 564 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 565 } 566 567 static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) 568 { 569 rport->dev_loss_tmo = timeout; 570 } 571 572 /** 573 * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport 574 * @rport: The FC rport where to teminate I/O 575 * 576 * Abort all pending SCSI commands for a port by closing the 577 * port. Using a reopen avoids a conflict with a shutdown 578 * overwriting a reopen. The "forced" ensures that a disappeared port 579 * is not opened again as valid due to the cached plogi data in 580 * non-NPIV mode. 581 */ 582 static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 583 { 584 struct zfcp_port *port; 585 struct Scsi_Host *shost = rport_to_shost(rport); 586 struct zfcp_adapter *adapter = 587 (struct zfcp_adapter *)shost->hostdata[0]; 588 589 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 590 591 if (port) { 592 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); 593 put_device(&port->dev); 594 } 595 } 596 597 static void zfcp_scsi_rport_register(struct zfcp_port *port) 598 { 599 struct fc_rport_identifiers ids; 600 struct fc_rport *rport; 601 602 if (port->rport) 603 return; 604 605 ids.node_name = port->wwnn; 606 ids.port_name = port->wwpn; 607 ids.port_id = port->d_id; 608 ids.roles = FC_RPORT_ROLE_FCP_TARGET; 609 610 zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, 611 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, 612 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); 613 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 614 if (!rport) { 615 dev_err(&port->adapter->ccw_device->dev, 616 "Registering port 0x%016Lx failed\n", 617 (unsigned long long)port->wwpn); 618 return; 619 } 620 621 rport->maxframe_size = port->maxframe_size; 622 rport->supported_classes = port->supported_classes; 623 port->rport = rport; 624 port->starget_id = rport->scsi_target_id; 625 626 zfcp_unit_queue_scsi_scan(port); 627 } 628 629 static void zfcp_scsi_rport_block(struct zfcp_port *port) 630 { 631 struct fc_rport *rport = port->rport; 632 633 if (rport) { 634 zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, 635 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, 636 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); 637 fc_remote_port_delete(rport); 638 port->rport = NULL; 639 } 640 } 641 642 void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 643 { 644 get_device(&port->dev); 645 port->rport_task = RPORT_ADD; 646 647 if (!queue_work(port->adapter->work_queue, &port->rport_work)) 648 put_device(&port->dev); 649 } 650 651 void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) 652 { 653 get_device(&port->dev); 654 port->rport_task = RPORT_DEL; 655 656 if (port->rport && queue_work(port->adapter->work_queue, 657 &port->rport_work)) 658 return; 659 660 put_device(&port->dev); 661 } 662 663 void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 664 { 665 unsigned long flags; 666 struct zfcp_port *port; 667 668 read_lock_irqsave(&adapter->port_list_lock, flags); 669 list_for_each_entry(port, &adapter->port_list, list) 670 zfcp_scsi_schedule_rport_block(port); 671 read_unlock_irqrestore(&adapter->port_list_lock, flags); 672 } 673 674 void zfcp_scsi_rport_work(struct work_struct *work) 675 { 676 struct zfcp_port *port = container_of(work, struct zfcp_port, 677 rport_work); 678 679 while (port->rport_task) { 680 if (port->rport_task == RPORT_ADD) { 681 port->rport_task = RPORT_NONE; 682 zfcp_scsi_rport_register(port); 683 } else { 684 port->rport_task = RPORT_NONE; 685 zfcp_scsi_rport_block(port); 686 } 687 } 688 689 put_device(&port->dev); 690 } 691 692 /** 693 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host 694 * @adapter: The adapter where to configure DIF/DIX for the SCSI host 695 */ 696 void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) 697 { 698 unsigned int mask = 0; 699 unsigned int data_div; 700 struct Scsi_Host *shost = adapter->scsi_host; 701 702 data_div = atomic_read(&adapter->status) & 703 ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED; 704 705 if (enable_dif && 706 adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1) 707 mask |= SHOST_DIF_TYPE1_PROTECTION; 708 709 if (enable_dif && data_div && 710 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 711 mask |= SHOST_DIX_TYPE1_PROTECTION; 712 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 713 shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2; 714 shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2; 715 shost->max_sectors = shost->sg_tablesize * 8; 716 } 717 718 scsi_host_set_prot(shost, mask); 719 } 720 721 /** 722 * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error 723 * @scmd: The SCSI command to report the error for 724 * @ascq: The ASCQ to put in the sense buffer 725 * 726 * See the error handling in sd_done for the sense codes used here. 727 * Set DID_SOFT_ERROR to retry the request, if possible. 728 */ 729 void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq) 730 { 731 scsi_build_sense_buffer(1, scmd->sense_buffer, 732 ILLEGAL_REQUEST, 0x10, ascq); 733 set_driver_byte(scmd, DRIVER_SENSE); 734 scmd->result |= SAM_STAT_CHECK_CONDITION; 735 set_host_byte(scmd, DID_SOFT_ERROR); 736 } 737 738 struct fc_function_template zfcp_transport_functions = { 739 .show_starget_port_id = 1, 740 .show_starget_port_name = 1, 741 .show_starget_node_name = 1, 742 .show_rport_supported_classes = 1, 743 .show_rport_maxframe_size = 1, 744 .show_rport_dev_loss_tmo = 1, 745 .show_host_node_name = 1, 746 .show_host_port_name = 1, 747 .show_host_permanent_port_name = 1, 748 .show_host_supported_classes = 1, 749 .show_host_supported_fc4s = 1, 750 .show_host_supported_speeds = 1, 751 .show_host_maxframe_size = 1, 752 .show_host_serial_number = 1, 753 .get_fc_host_stats = zfcp_get_fc_host_stats, 754 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 755 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 756 .get_host_port_state = zfcp_get_host_port_state, 757 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 758 .show_host_port_state = 1, 759 .show_host_active_fc4s = 1, 760 .bsg_request = zfcp_fc_exec_bsg_job, 761 .bsg_timeout = zfcp_fc_timeout_bsg_job, 762 /* no functions registered for following dynamic attributes but 763 directly set by LLDD */ 764 .show_host_port_type = 1, 765 .show_host_symbolic_name = 1, 766 .show_host_speed = 1, 767 .show_host_port_id = 1, 768 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), 769 }; 770