1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Implementation of FSF commands. 6 * 7 * Copyright IBM Corp. 2002, 2020 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/blktrace_api.h> 14 #include <linux/jiffies.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <scsi/fc/fc_els.h> 18 #include "zfcp_ext.h" 19 #include "zfcp_fc.h" 20 #include "zfcp_dbf.h" 21 #include "zfcp_qdio.h" 22 #include "zfcp_reqlist.h" 23 #include "zfcp_diag.h" 24 25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ 26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) 27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */ 28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 29 30 struct kmem_cache *zfcp_fsf_qtcb_cache; 31 32 static bool ber_stop = true; 33 module_param(ber_stop, bool, 0600); 34 MODULE_PARM_DESC(ber_stop, 35 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); 36 37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 38 { 39 struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); 40 struct zfcp_adapter *adapter = fsf_req->adapter; 41 42 zfcp_qdio_siosl(adapter); 43 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 44 "fsrth_1"); 45 } 46 47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 48 unsigned long timeout) 49 { 50 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 51 fsf_req->timer.expires = jiffies + timeout; 52 add_timer(&fsf_req->timer); 53 } 54 55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 56 { 57 BUG_ON(!fsf_req->erp_action); 58 fsf_req->timer.function = zfcp_erp_timeout_handler; 59 fsf_req->timer.expires = jiffies + 30 * HZ; 60 add_timer(&fsf_req->timer); 61 } 62 63 /* association between FSF command and FSF QTCB type */ 64 static u32 fsf_qtcb_type[] = { 65 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 66 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 67 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 68 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 69 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 70 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 71 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 72 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 73 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 74 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 75 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 76 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 77 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 78 }; 79 80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 81 { 82 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 83 "operational because of an unsupported FC class\n"); 84 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 85 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 86 } 87 88 /** 89 * zfcp_fsf_req_free - free memory used by fsf request 90 * @req: pointer to struct zfcp_fsf_req 91 */ 92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 93 { 94 if (likely(req->pool)) { 95 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 97 mempool_free(req, req->pool); 98 return; 99 } 100 101 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 102 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 103 kfree(req); 104 } 105 106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 107 { 108 unsigned long flags; 109 struct fsf_status_read_buffer *sr_buf = req->data; 110 struct zfcp_adapter *adapter = req->adapter; 111 struct zfcp_port *port; 112 int d_id = ntoh24(sr_buf->d_id); 113 114 read_lock_irqsave(&adapter->port_list_lock, flags); 115 list_for_each_entry(port, &adapter->port_list, list) 116 if (port->d_id == d_id) { 117 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 118 break; 119 } 120 read_unlock_irqrestore(&adapter->port_list_lock, flags); 121 } 122 123 void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter) 124 { 125 struct Scsi_Host *shost = adapter->scsi_host; 126 127 adapter->hydra_version = 0; 128 adapter->peer_wwpn = 0; 129 adapter->peer_wwnn = 0; 130 adapter->peer_d_id = 0; 131 132 /* if there is no shost yet, we have nothing to zero-out */ 133 if (shost == NULL) 134 return; 135 136 fc_host_port_id(shost) = 0; 137 fc_host_fabric_name(shost) = 0; 138 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 139 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 140 snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0); 141 memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE); 142 } 143 144 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 145 struct fsf_link_down_info *link_down) 146 { 147 struct zfcp_adapter *adapter = req->adapter; 148 149 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 150 return; 151 152 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 153 154 zfcp_scsi_schedule_rports_block(adapter); 155 156 zfcp_fsf_fc_host_link_down(adapter); 157 158 if (!link_down) 159 goto out; 160 161 switch (link_down->error_code) { 162 case FSF_PSQ_LINK_NO_LIGHT: 163 dev_warn(&req->adapter->ccw_device->dev, 164 "There is no light signal from the local " 165 "fibre channel cable\n"); 166 break; 167 case FSF_PSQ_LINK_WRAP_PLUG: 168 dev_warn(&req->adapter->ccw_device->dev, 169 "There is a wrap plug instead of a fibre " 170 "channel cable\n"); 171 break; 172 case FSF_PSQ_LINK_NO_FCP: 173 dev_warn(&req->adapter->ccw_device->dev, 174 "The adjacent fibre channel node does not " 175 "support FCP\n"); 176 break; 177 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 178 dev_warn(&req->adapter->ccw_device->dev, 179 "The FCP device is suspended because of a " 180 "firmware update\n"); 181 break; 182 case FSF_PSQ_LINK_INVALID_WWPN: 183 dev_warn(&req->adapter->ccw_device->dev, 184 "The FCP device detected a WWPN that is " 185 "duplicate or not valid\n"); 186 break; 187 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 188 dev_warn(&req->adapter->ccw_device->dev, 189 "The fibre channel fabric does not support NPIV\n"); 190 break; 191 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 192 dev_warn(&req->adapter->ccw_device->dev, 193 "The FCP adapter cannot support more NPIV ports\n"); 194 break; 195 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 196 dev_warn(&req->adapter->ccw_device->dev, 197 "The adjacent switch cannot support " 198 "more NPIV ports\n"); 199 break; 200 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 201 dev_warn(&req->adapter->ccw_device->dev, 202 "The FCP adapter could not log in to the " 203 "fibre channel fabric\n"); 204 break; 205 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 206 dev_warn(&req->adapter->ccw_device->dev, 207 "The WWPN assignment file on the FCP adapter " 208 "has been damaged\n"); 209 break; 210 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 211 dev_warn(&req->adapter->ccw_device->dev, 212 "The mode table on the FCP adapter " 213 "has been damaged\n"); 214 break; 215 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 216 dev_warn(&req->adapter->ccw_device->dev, 217 "All NPIV ports on the FCP adapter have " 218 "been assigned\n"); 219 break; 220 default: 221 dev_warn(&req->adapter->ccw_device->dev, 222 "The link between the FCP adapter and " 223 "the FC fabric is down\n"); 224 } 225 out: 226 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 227 } 228 229 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 230 { 231 struct fsf_status_read_buffer *sr_buf = req->data; 232 struct fsf_link_down_info *ldi = 233 (struct fsf_link_down_info *) &sr_buf->payload; 234 235 switch (sr_buf->status_subtype) { 236 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 237 case FSF_STATUS_READ_SUB_FDISC_FAILED: 238 zfcp_fsf_link_down_info_eval(req, ldi); 239 break; 240 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 241 zfcp_fsf_link_down_info_eval(req, NULL); 242 } 243 } 244 245 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 246 { 247 struct zfcp_adapter *adapter = req->adapter; 248 struct fsf_status_read_buffer *sr_buf = req->data; 249 250 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 251 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 252 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 253 zfcp_fsf_req_free(req); 254 return; 255 } 256 257 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 258 259 switch (sr_buf->status_type) { 260 case FSF_STATUS_READ_PORT_CLOSED: 261 zfcp_fsf_status_read_port_closed(req); 262 break; 263 case FSF_STATUS_READ_INCOMING_ELS: 264 zfcp_fc_incoming_els(req); 265 break; 266 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 267 break; 268 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 269 zfcp_dbf_hba_bit_err("fssrh_3", req); 270 if (ber_stop) { 271 dev_warn(&adapter->ccw_device->dev, 272 "All paths over this FCP device are disused because of excessive bit errors\n"); 273 zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); 274 } else { 275 dev_warn(&adapter->ccw_device->dev, 276 "The error threshold for checksum statistics has been exceeded\n"); 277 } 278 break; 279 case FSF_STATUS_READ_LINK_DOWN: 280 zfcp_fsf_status_read_link_down(req); 281 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 282 break; 283 case FSF_STATUS_READ_LINK_UP: 284 dev_info(&adapter->ccw_device->dev, 285 "The local link has been restored\n"); 286 /* All ports should be marked as ready to run again */ 287 zfcp_erp_set_adapter_status(adapter, 288 ZFCP_STATUS_COMMON_RUNNING); 289 zfcp_erp_adapter_reopen(adapter, 290 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 291 ZFCP_STATUS_COMMON_ERP_FAILED, 292 "fssrh_2"); 293 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 294 295 break; 296 case FSF_STATUS_READ_NOTIFICATION_LOST: 297 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 298 zfcp_fc_conditional_port_scan(adapter); 299 break; 300 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 301 adapter->adapter_features = sr_buf->payload.word[0]; 302 break; 303 } 304 305 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 306 zfcp_fsf_req_free(req); 307 308 atomic_inc(&adapter->stat_miss); 309 queue_work(adapter->work_queue, &adapter->stat_work); 310 } 311 312 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 313 { 314 switch (req->qtcb->header.fsf_status_qual.word[0]) { 315 case FSF_SQ_FCP_RSP_AVAILABLE: 316 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 317 case FSF_SQ_NO_RETRY_POSSIBLE: 318 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 319 return; 320 case FSF_SQ_COMMAND_ABORTED: 321 break; 322 case FSF_SQ_NO_RECOM: 323 dev_err(&req->adapter->ccw_device->dev, 324 "The FCP adapter reported a problem " 325 "that cannot be recovered\n"); 326 zfcp_qdio_siosl(req->adapter); 327 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 328 break; 329 } 330 /* all non-return stats set FSFREQ_ERROR*/ 331 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 332 } 333 334 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 335 { 336 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 337 return; 338 339 switch (req->qtcb->header.fsf_status) { 340 case FSF_UNKNOWN_COMMAND: 341 dev_err(&req->adapter->ccw_device->dev, 342 "The FCP adapter does not recognize the command 0x%x\n", 343 req->qtcb->header.fsf_command); 344 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 345 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 346 break; 347 case FSF_ADAPTER_STATUS_AVAILABLE: 348 zfcp_fsf_fsfstatus_qual_eval(req); 349 break; 350 } 351 } 352 353 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 354 { 355 struct zfcp_adapter *adapter = req->adapter; 356 struct fsf_qtcb *qtcb = req->qtcb; 357 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 358 359 zfcp_dbf_hba_fsf_response(req); 360 361 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 362 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 363 return; 364 } 365 366 switch (qtcb->prefix.prot_status) { 367 case FSF_PROT_GOOD: 368 case FSF_PROT_FSF_STATUS_PRESENTED: 369 return; 370 case FSF_PROT_QTCB_VERSION_ERROR: 371 dev_err(&adapter->ccw_device->dev, 372 "QTCB version 0x%x not supported by FCP adapter " 373 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 374 psq->word[0], psq->word[1]); 375 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 376 break; 377 case FSF_PROT_ERROR_STATE: 378 case FSF_PROT_SEQ_NUMB_ERROR: 379 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 380 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 381 break; 382 case FSF_PROT_UNSUPP_QTCB_TYPE: 383 dev_err(&adapter->ccw_device->dev, 384 "The QTCB type is not supported by the FCP adapter\n"); 385 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 386 break; 387 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 388 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 389 &adapter->status); 390 break; 391 case FSF_PROT_DUPLICATE_REQUEST_ID: 392 dev_err(&adapter->ccw_device->dev, 393 "0x%Lx is an ambiguous request identifier\n", 394 (unsigned long long)qtcb->bottom.support.req_handle); 395 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 396 break; 397 case FSF_PROT_LINK_DOWN: 398 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 399 /* go through reopen to flush pending requests */ 400 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 401 break; 402 case FSF_PROT_REEST_QUEUE: 403 /* All ports should be marked as ready to run again */ 404 zfcp_erp_set_adapter_status(adapter, 405 ZFCP_STATUS_COMMON_RUNNING); 406 zfcp_erp_adapter_reopen(adapter, 407 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 408 ZFCP_STATUS_COMMON_ERP_FAILED, 409 "fspse_8"); 410 break; 411 default: 412 dev_err(&adapter->ccw_device->dev, 413 "0x%x is not a valid transfer protocol status\n", 414 qtcb->prefix.prot_status); 415 zfcp_qdio_siosl(adapter); 416 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 417 } 418 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 419 } 420 421 /** 422 * zfcp_fsf_req_complete - process completion of a FSF request 423 * @req: The FSF request that has been completed. 424 * 425 * When a request has been completed either from the FCP adapter, 426 * or it has been dismissed due to a queue shutdown, this function 427 * is called to process the completion status and trigger further 428 * events related to the FSF request. 429 */ 430 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 431 { 432 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) { 433 zfcp_fsf_status_read_handler(req); 434 return; 435 } 436 437 del_timer_sync(&req->timer); 438 zfcp_fsf_protstatus_eval(req); 439 zfcp_fsf_fsfstatus_eval(req); 440 req->handler(req); 441 442 if (req->erp_action) 443 zfcp_erp_notify(req->erp_action, 0); 444 445 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 446 zfcp_fsf_req_free(req); 447 else 448 complete(&req->completion); 449 } 450 451 /** 452 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 453 * @adapter: pointer to struct zfcp_adapter 454 * 455 * Never ever call this without shutting down the adapter first. 456 * Otherwise the adapter would continue using and corrupting s390 storage. 457 * Included BUG_ON() call to ensure this is done. 458 * ERP is supposed to be the only user of this function. 459 */ 460 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 461 { 462 struct zfcp_fsf_req *req, *tmp; 463 LIST_HEAD(remove_queue); 464 465 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 466 zfcp_reqlist_move(adapter->req_list, &remove_queue); 467 468 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 469 list_del(&req->list); 470 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 471 zfcp_fsf_req_complete(req); 472 } 473 } 474 475 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 476 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 477 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 478 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 479 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 480 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 481 #define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) 482 #define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) 483 #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) 484 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 485 486 u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 487 { 488 u32 fdmi_speed = 0; 489 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 490 fdmi_speed |= FC_PORTSPEED_1GBIT; 491 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 492 fdmi_speed |= FC_PORTSPEED_2GBIT; 493 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 494 fdmi_speed |= FC_PORTSPEED_4GBIT; 495 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 496 fdmi_speed |= FC_PORTSPEED_10GBIT; 497 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 498 fdmi_speed |= FC_PORTSPEED_8GBIT; 499 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 500 fdmi_speed |= FC_PORTSPEED_16GBIT; 501 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) 502 fdmi_speed |= FC_PORTSPEED_32GBIT; 503 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) 504 fdmi_speed |= FC_PORTSPEED_64GBIT; 505 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) 506 fdmi_speed |= FC_PORTSPEED_128GBIT; 507 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 508 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 509 return fdmi_speed; 510 } 511 512 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 513 { 514 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 515 struct zfcp_adapter *adapter = req->adapter; 516 struct fc_els_flogi *plogi; 517 518 /* adjust pointers for missing command code */ 519 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 520 - sizeof(u32)); 521 522 if (req->data) 523 memcpy(req->data, bottom, sizeof(*bottom)); 524 525 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 526 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 527 (u16)FSF_STATUS_READS_RECOM); 528 529 /* no error return above here, otherwise must fix call chains */ 530 /* do not evaluate invalid fields */ 531 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) 532 return 0; 533 534 adapter->hydra_version = bottom->adapter_type; 535 536 switch (bottom->fc_topology) { 537 case FSF_TOPO_P2P: 538 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 539 adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); 540 adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); 541 break; 542 case FSF_TOPO_FABRIC: 543 break; 544 case FSF_TOPO_AL: 545 default: 546 dev_err(&adapter->ccw_device->dev, 547 "Unknown or unsupported arbitrated loop " 548 "fibre channel topology detected\n"); 549 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 550 return -EIO; 551 } 552 553 return 0; 554 } 555 556 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 557 { 558 struct zfcp_adapter *adapter = req->adapter; 559 struct zfcp_diag_header *const diag_hdr = 560 &adapter->diagnostics->config_data.header; 561 struct fsf_qtcb *qtcb = req->qtcb; 562 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 563 564 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 565 return; 566 567 adapter->fsf_lic_version = bottom->lic_version; 568 adapter->adapter_features = bottom->adapter_features; 569 adapter->connection_features = bottom->connection_features; 570 adapter->peer_wwpn = 0; 571 adapter->peer_wwnn = 0; 572 adapter->peer_d_id = 0; 573 574 switch (qtcb->header.fsf_status) { 575 case FSF_GOOD: 576 /* 577 * usually we wait with an update till the cache is too old, 578 * but because we have the data available, update it anyway 579 */ 580 zfcp_diag_update_xdata(diag_hdr, bottom, false); 581 582 zfcp_scsi_shost_update_config_data(adapter, bottom, false); 583 if (zfcp_fsf_exchange_config_evaluate(req)) 584 return; 585 586 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 587 dev_err(&adapter->ccw_device->dev, 588 "FCP adapter maximum QTCB size (%d bytes) " 589 "is too small\n", 590 bottom->max_qtcb_size); 591 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 592 return; 593 } 594 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 595 &adapter->status); 596 break; 597 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 598 zfcp_diag_update_xdata(diag_hdr, bottom, true); 599 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 600 601 /* avoids adapter shutdown to be able to recognize 602 * events such as LINK UP */ 603 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 604 &adapter->status); 605 zfcp_fsf_link_down_info_eval(req, 606 &qtcb->header.fsf_status_qual.link_down_info); 607 608 zfcp_scsi_shost_update_config_data(adapter, bottom, true); 609 if (zfcp_fsf_exchange_config_evaluate(req)) 610 return; 611 break; 612 default: 613 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 614 return; 615 } 616 617 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) 618 adapter->hardware_version = bottom->hardware_version; 619 620 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 621 dev_err(&adapter->ccw_device->dev, 622 "The FCP adapter only supports newer " 623 "control block versions\n"); 624 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 625 return; 626 } 627 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 628 dev_err(&adapter->ccw_device->dev, 629 "The FCP adapter only supports older " 630 "control block versions\n"); 631 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 632 } 633 } 634 635 /* 636 * Mapping of FC Endpoint Security flag masks to mnemonics 637 * 638 * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any 639 * changes. 640 */ 641 static const struct { 642 u32 mask; 643 char *name; 644 } zfcp_fsf_fc_security_mnemonics[] = { 645 { FSF_FC_SECURITY_AUTH, "Authentication" }, 646 { FSF_FC_SECURITY_ENC_FCSP2 | 647 FSF_FC_SECURITY_ENC_ERAS, "Encryption" }, 648 }; 649 650 /* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */ 651 #define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15 652 653 /** 654 * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into 655 * mnemonics and place in a buffer 656 * @buf : the buffer to place the translated FC Endpoint Security flag(s) 657 * into 658 * @size : the size of the buffer, including the trailing null space 659 * @fc_security: one or more FC Endpoint Security flags, or zero 660 * @fmt : specifies whether a list or a single item is to be put into the 661 * buffer 662 * 663 * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics. 664 * If the FC Endpoint Security flags are zero "none" is placed into the buffer. 665 * 666 * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by 667 * a comma followed by a space into the buffer. If one or more FC Endpoint 668 * Security flags cannot be translated into a mnemonic, as they are undefined 669 * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal 670 * representation is placed into the buffer. 671 * 672 * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into 673 * the buffer. If the FC Endpoint Security flag cannot be translated, as it is 674 * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal 675 * representation is placed into the buffer. If more than one FC Endpoint 676 * Security flag was specified, their value in hexadecimal representation is 677 * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 678 * can be used to define a buffer that is large enough to hold one mnemonic. 679 * 680 * Return: The number of characters written into buf not including the trailing 681 * '\0'. If size is == 0 the function returns 0. 682 */ 683 ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security, 684 enum zfcp_fsf_print_fmt fmt) 685 { 686 const char *prefix = ""; 687 ssize_t len = 0; 688 int i; 689 690 if (fc_security == 0) 691 return scnprintf(buf, size, "none"); 692 if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1) 693 return scnprintf(buf, size, "0x%08x", fc_security); 694 695 for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) { 696 if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask)) 697 continue; 698 699 len += scnprintf(buf + len, size - len, "%s%s", prefix, 700 zfcp_fsf_fc_security_mnemonics[i].name); 701 prefix = ", "; 702 fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask; 703 } 704 705 if (fc_security != 0) 706 len += scnprintf(buf + len, size - len, "%s0x%08x", 707 prefix, fc_security); 708 709 return len; 710 } 711 712 static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter, 713 struct zfcp_fsf_req *req) 714 { 715 if (adapter->fc_security_algorithms == 716 adapter->fc_security_algorithms_old) { 717 /* no change, no trace */ 718 return; 719 } 720 721 zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN, 722 adapter->fc_security_algorithms_old, 723 adapter->fc_security_algorithms); 724 725 adapter->fc_security_algorithms_old = adapter->fc_security_algorithms; 726 } 727 728 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 729 { 730 struct zfcp_adapter *adapter = req->adapter; 731 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 732 733 if (req->data) 734 memcpy(req->data, bottom, sizeof(*bottom)); 735 736 if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY) 737 adapter->fc_security_algorithms = 738 bottom->fc_security_algorithms; 739 else 740 adapter->fc_security_algorithms = 0; 741 zfcp_fsf_dbf_adapter_fc_security(adapter, req); 742 } 743 744 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 745 { 746 struct zfcp_diag_header *const diag_hdr = 747 &req->adapter->diagnostics->port_data.header; 748 struct fsf_qtcb *qtcb = req->qtcb; 749 struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port; 750 751 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 752 return; 753 754 switch (qtcb->header.fsf_status) { 755 case FSF_GOOD: 756 /* 757 * usually we wait with an update till the cache is too old, 758 * but because we have the data available, update it anyway 759 */ 760 zfcp_diag_update_xdata(diag_hdr, bottom, false); 761 762 zfcp_scsi_shost_update_port_data(req->adapter, bottom); 763 zfcp_fsf_exchange_port_evaluate(req); 764 break; 765 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 766 zfcp_diag_update_xdata(diag_hdr, bottom, true); 767 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 768 769 zfcp_fsf_link_down_info_eval(req, 770 &qtcb->header.fsf_status_qual.link_down_info); 771 772 zfcp_scsi_shost_update_port_data(req->adapter, bottom); 773 zfcp_fsf_exchange_port_evaluate(req); 774 break; 775 } 776 } 777 778 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 779 { 780 struct zfcp_fsf_req *req; 781 782 if (likely(pool)) 783 req = mempool_alloc(pool, GFP_ATOMIC); 784 else 785 req = kmalloc(sizeof(*req), GFP_ATOMIC); 786 787 if (unlikely(!req)) 788 return NULL; 789 790 memset(req, 0, sizeof(*req)); 791 req->pool = pool; 792 return req; 793 } 794 795 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) 796 { 797 struct fsf_qtcb *qtcb; 798 799 if (likely(pool)) 800 qtcb = mempool_alloc(pool, GFP_ATOMIC); 801 else 802 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 803 804 if (unlikely(!qtcb)) 805 return NULL; 806 807 memset(qtcb, 0, sizeof(*qtcb)); 808 return qtcb; 809 } 810 811 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 812 u32 fsf_cmd, u8 sbtype, 813 mempool_t *pool) 814 { 815 struct zfcp_adapter *adapter = qdio->adapter; 816 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 817 818 if (unlikely(!req)) 819 return ERR_PTR(-ENOMEM); 820 821 if (adapter->req_no == 0) 822 adapter->req_no++; 823 824 INIT_LIST_HEAD(&req->list); 825 timer_setup(&req->timer, NULL, 0); 826 init_completion(&req->completion); 827 828 req->adapter = adapter; 829 req->req_id = adapter->req_no; 830 831 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 832 if (likely(pool)) 833 req->qtcb = zfcp_fsf_qtcb_alloc( 834 adapter->pool.qtcb_pool); 835 else 836 req->qtcb = zfcp_fsf_qtcb_alloc(NULL); 837 838 if (unlikely(!req->qtcb)) { 839 zfcp_fsf_req_free(req); 840 return ERR_PTR(-ENOMEM); 841 } 842 843 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 844 req->qtcb->prefix.req_id = req->req_id; 845 req->qtcb->prefix.ulp_info = 26; 846 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 847 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 848 req->qtcb->header.req_handle = req->req_id; 849 req->qtcb->header.fsf_command = fsf_cmd; 850 } 851 852 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 853 req->qtcb, sizeof(struct fsf_qtcb)); 854 855 return req; 856 } 857 858 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 859 { 860 const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); 861 struct zfcp_adapter *adapter = req->adapter; 862 struct zfcp_qdio *qdio = adapter->qdio; 863 int req_id = req->req_id; 864 865 zfcp_reqlist_add(adapter->req_list, req); 866 867 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 868 req->issued = get_tod_clock(); 869 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 870 del_timer_sync(&req->timer); 871 /* lookup request again, list might have changed */ 872 zfcp_reqlist_find_rm(adapter->req_list, req_id); 873 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 874 return -EIO; 875 } 876 877 /* 878 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT. 879 * ONLY TOUCH SYNC req AGAIN ON req->completion. 880 * 881 * The request might complete and be freed concurrently at any point 882 * now. This is not protected by the QDIO-lock (req_q_lock). So any 883 * uncontrolled access after this might result in an use-after-free bug. 884 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and 885 * when it is completed via req->completion, is it safe to use req 886 * again. 887 */ 888 889 /* Don't increase for unsolicited status */ 890 if (!is_srb) 891 adapter->fsf_req_seq_no++; 892 adapter->req_no++; 893 894 return 0; 895 } 896 897 /** 898 * zfcp_fsf_status_read - send status read request 899 * @qdio: pointer to struct zfcp_qdio 900 * Returns: 0 on success, ERROR otherwise 901 */ 902 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 903 { 904 struct zfcp_adapter *adapter = qdio->adapter; 905 struct zfcp_fsf_req *req; 906 struct fsf_status_read_buffer *sr_buf; 907 struct page *page; 908 int retval = -EIO; 909 910 spin_lock_irq(&qdio->req_q_lock); 911 if (zfcp_qdio_sbal_get(qdio)) 912 goto out; 913 914 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 915 SBAL_SFLAGS0_TYPE_STATUS, 916 adapter->pool.status_read_req); 917 if (IS_ERR(req)) { 918 retval = PTR_ERR(req); 919 goto out; 920 } 921 922 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 923 if (!page) { 924 retval = -ENOMEM; 925 goto failed_buf; 926 } 927 sr_buf = page_address(page); 928 memset(sr_buf, 0, sizeof(*sr_buf)); 929 req->data = sr_buf; 930 931 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 932 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 933 934 retval = zfcp_fsf_req_send(req); 935 if (retval) 936 goto failed_req_send; 937 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 938 939 goto out; 940 941 failed_req_send: 942 req->data = NULL; 943 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 944 failed_buf: 945 zfcp_dbf_hba_fsf_uss("fssr__1", req); 946 zfcp_fsf_req_free(req); 947 out: 948 spin_unlock_irq(&qdio->req_q_lock); 949 return retval; 950 } 951 952 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 953 { 954 struct scsi_device *sdev = req->data; 955 struct zfcp_scsi_dev *zfcp_sdev; 956 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 957 958 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 959 return; 960 961 zfcp_sdev = sdev_to_zfcp(sdev); 962 963 switch (req->qtcb->header.fsf_status) { 964 case FSF_PORT_HANDLE_NOT_VALID: 965 if (fsq->word[0] == fsq->word[1]) { 966 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 967 "fsafch1"); 968 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 969 } 970 break; 971 case FSF_LUN_HANDLE_NOT_VALID: 972 if (fsq->word[0] == fsq->word[1]) { 973 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 974 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 975 } 976 break; 977 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 978 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 979 break; 980 case FSF_PORT_BOXED: 981 zfcp_erp_set_port_status(zfcp_sdev->port, 982 ZFCP_STATUS_COMMON_ACCESS_BOXED); 983 zfcp_erp_port_reopen(zfcp_sdev->port, 984 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 985 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 986 break; 987 case FSF_LUN_BOXED: 988 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 989 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 990 "fsafch4"); 991 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 992 break; 993 case FSF_ADAPTER_STATUS_AVAILABLE: 994 switch (fsq->word[0]) { 995 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 996 zfcp_fc_test_link(zfcp_sdev->port); 997 fallthrough; 998 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 999 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1000 break; 1001 } 1002 break; 1003 case FSF_GOOD: 1004 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 1005 break; 1006 } 1007 } 1008 1009 /** 1010 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 1011 * @scmnd: The SCSI command to abort 1012 * Returns: pointer to struct zfcp_fsf_req 1013 */ 1014 1015 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 1016 { 1017 struct zfcp_fsf_req *req = NULL; 1018 struct scsi_device *sdev = scmnd->device; 1019 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1020 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 1021 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 1022 1023 spin_lock_irq(&qdio->req_q_lock); 1024 if (zfcp_qdio_sbal_get(qdio)) 1025 goto out; 1026 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 1027 SBAL_SFLAGS0_TYPE_READ, 1028 qdio->adapter->pool.scsi_abort); 1029 if (IS_ERR(req)) { 1030 req = NULL; 1031 goto out; 1032 } 1033 1034 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 1035 ZFCP_STATUS_COMMON_UNBLOCKED))) 1036 goto out_error_free; 1037 1038 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1039 1040 req->data = sdev; 1041 req->handler = zfcp_fsf_abort_fcp_command_handler; 1042 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 1043 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 1044 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 1045 1046 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 1047 if (!zfcp_fsf_req_send(req)) { 1048 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 1049 goto out; 1050 } 1051 1052 out_error_free: 1053 zfcp_fsf_req_free(req); 1054 req = NULL; 1055 out: 1056 spin_unlock_irq(&qdio->req_q_lock); 1057 return req; 1058 } 1059 1060 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 1061 { 1062 struct zfcp_adapter *adapter = req->adapter; 1063 struct zfcp_fsf_ct_els *ct = req->data; 1064 struct fsf_qtcb_header *header = &req->qtcb->header; 1065 1066 ct->status = -EINVAL; 1067 1068 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1069 goto skip_fsfstatus; 1070 1071 switch (header->fsf_status) { 1072 case FSF_GOOD: 1073 ct->status = 0; 1074 zfcp_dbf_san_res("fsscth2", req); 1075 break; 1076 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1077 zfcp_fsf_class_not_supp(req); 1078 break; 1079 case FSF_ADAPTER_STATUS_AVAILABLE: 1080 switch (header->fsf_status_qual.word[0]){ 1081 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1082 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1083 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1084 break; 1085 } 1086 break; 1087 case FSF_PORT_BOXED: 1088 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1089 break; 1090 case FSF_PORT_HANDLE_NOT_VALID: 1091 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 1092 fallthrough; 1093 case FSF_GENERIC_COMMAND_REJECTED: 1094 case FSF_PAYLOAD_SIZE_MISMATCH: 1095 case FSF_REQUEST_SIZE_TOO_LARGE: 1096 case FSF_RESPONSE_SIZE_TOO_LARGE: 1097 case FSF_SBAL_MISMATCH: 1098 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1099 break; 1100 } 1101 1102 skip_fsfstatus: 1103 if (ct->handler) 1104 ct->handler(ct->handler_data); 1105 } 1106 1107 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 1108 struct zfcp_qdio_req *q_req, 1109 struct scatterlist *sg_req, 1110 struct scatterlist *sg_resp) 1111 { 1112 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 1113 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 1114 zfcp_qdio_set_sbale_last(qdio, q_req); 1115 } 1116 1117 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1118 struct scatterlist *sg_req, 1119 struct scatterlist *sg_resp) 1120 { 1121 struct zfcp_adapter *adapter = req->adapter; 1122 struct zfcp_qdio *qdio = adapter->qdio; 1123 struct fsf_qtcb *qtcb = req->qtcb; 1124 u32 feat = adapter->adapter_features; 1125 1126 if (zfcp_adapter_multi_buffer_active(adapter)) { 1127 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1128 return -EIO; 1129 qtcb->bottom.support.req_buf_length = 1130 zfcp_qdio_real_bytes(sg_req); 1131 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1132 return -EIO; 1133 qtcb->bottom.support.resp_buf_length = 1134 zfcp_qdio_real_bytes(sg_resp); 1135 1136 zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); 1137 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1138 zfcp_qdio_set_scount(qdio, &req->qdio_req); 1139 return 0; 1140 } 1141 1142 /* use single, unchained SBAL if it can hold the request */ 1143 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 1144 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 1145 sg_req, sg_resp); 1146 return 0; 1147 } 1148 1149 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 1150 return -EOPNOTSUPP; 1151 1152 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1153 return -EIO; 1154 1155 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 1156 1157 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1158 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1159 1160 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1161 return -EIO; 1162 1163 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1164 1165 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1166 1167 return 0; 1168 } 1169 1170 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1171 struct scatterlist *sg_req, 1172 struct scatterlist *sg_resp, 1173 unsigned int timeout) 1174 { 1175 int ret; 1176 1177 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1178 if (ret) 1179 return ret; 1180 1181 /* common settings for ct/gs and els requests */ 1182 if (timeout > 255) 1183 timeout = 255; /* max value accepted by hardware */ 1184 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1185 req->qtcb->bottom.support.timeout = timeout; 1186 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1187 1188 return 0; 1189 } 1190 1191 /** 1192 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1193 * @wka_port: pointer to zfcp WKA port to send CT/GS to 1194 * @ct: pointer to struct zfcp_send_ct with data for request 1195 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1196 * @timeout: timeout that hardware should use, and a later software timeout 1197 */ 1198 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1199 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1200 unsigned int timeout) 1201 { 1202 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1203 struct zfcp_fsf_req *req; 1204 int ret = -EIO; 1205 1206 spin_lock_irq(&qdio->req_q_lock); 1207 if (zfcp_qdio_sbal_get(qdio)) 1208 goto out; 1209 1210 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1211 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1212 1213 if (IS_ERR(req)) { 1214 ret = PTR_ERR(req); 1215 goto out; 1216 } 1217 1218 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1219 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1220 if (ret) 1221 goto failed_send; 1222 1223 req->handler = zfcp_fsf_send_ct_handler; 1224 req->qtcb->header.port_handle = wka_port->handle; 1225 ct->d_id = wka_port->d_id; 1226 req->data = ct; 1227 1228 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1229 1230 ret = zfcp_fsf_req_send(req); 1231 if (ret) 1232 goto failed_send; 1233 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1234 1235 goto out; 1236 1237 failed_send: 1238 zfcp_fsf_req_free(req); 1239 out: 1240 spin_unlock_irq(&qdio->req_q_lock); 1241 return ret; 1242 } 1243 1244 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1245 { 1246 struct zfcp_fsf_ct_els *send_els = req->data; 1247 struct fsf_qtcb_header *header = &req->qtcb->header; 1248 1249 send_els->status = -EINVAL; 1250 1251 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1252 goto skip_fsfstatus; 1253 1254 switch (header->fsf_status) { 1255 case FSF_GOOD: 1256 send_els->status = 0; 1257 zfcp_dbf_san_res("fsselh1", req); 1258 break; 1259 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1260 zfcp_fsf_class_not_supp(req); 1261 break; 1262 case FSF_ADAPTER_STATUS_AVAILABLE: 1263 switch (header->fsf_status_qual.word[0]){ 1264 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1265 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1266 case FSF_SQ_RETRY_IF_POSSIBLE: 1267 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1268 break; 1269 } 1270 break; 1271 case FSF_ELS_COMMAND_REJECTED: 1272 case FSF_PAYLOAD_SIZE_MISMATCH: 1273 case FSF_REQUEST_SIZE_TOO_LARGE: 1274 case FSF_RESPONSE_SIZE_TOO_LARGE: 1275 break; 1276 case FSF_SBAL_MISMATCH: 1277 /* should never occur, avoided in zfcp_fsf_send_els */ 1278 fallthrough; 1279 default: 1280 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1281 break; 1282 } 1283 skip_fsfstatus: 1284 if (send_els->handler) 1285 send_els->handler(send_els->handler_data); 1286 } 1287 1288 /** 1289 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1290 * @adapter: pointer to zfcp adapter 1291 * @d_id: N_Port_ID to send ELS to 1292 * @els: pointer to struct zfcp_send_els with data for the command 1293 * @timeout: timeout that hardware should use, and a later software timeout 1294 */ 1295 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1296 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1297 { 1298 struct zfcp_fsf_req *req; 1299 struct zfcp_qdio *qdio = adapter->qdio; 1300 int ret = -EIO; 1301 1302 spin_lock_irq(&qdio->req_q_lock); 1303 if (zfcp_qdio_sbal_get(qdio)) 1304 goto out; 1305 1306 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1307 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1308 1309 if (IS_ERR(req)) { 1310 ret = PTR_ERR(req); 1311 goto out; 1312 } 1313 1314 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1315 1316 if (!zfcp_adapter_multi_buffer_active(adapter)) 1317 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1318 1319 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1320 1321 if (ret) 1322 goto failed_send; 1323 1324 hton24(req->qtcb->bottom.support.d_id, d_id); 1325 req->handler = zfcp_fsf_send_els_handler; 1326 els->d_id = d_id; 1327 req->data = els; 1328 1329 zfcp_dbf_san_req("fssels1", req, d_id); 1330 1331 ret = zfcp_fsf_req_send(req); 1332 if (ret) 1333 goto failed_send; 1334 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1335 1336 goto out; 1337 1338 failed_send: 1339 zfcp_fsf_req_free(req); 1340 out: 1341 spin_unlock_irq(&qdio->req_q_lock); 1342 return ret; 1343 } 1344 1345 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1346 { 1347 struct zfcp_fsf_req *req; 1348 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1349 int retval = -EIO; 1350 1351 spin_lock_irq(&qdio->req_q_lock); 1352 if (zfcp_qdio_sbal_get(qdio)) 1353 goto out; 1354 1355 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1356 SBAL_SFLAGS0_TYPE_READ, 1357 qdio->adapter->pool.erp_req); 1358 1359 if (IS_ERR(req)) { 1360 retval = PTR_ERR(req); 1361 goto out; 1362 } 1363 1364 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1365 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1366 1367 req->qtcb->bottom.config.feature_selection = 1368 FSF_FEATURE_NOTIFICATION_LOST | 1369 FSF_FEATURE_UPDATE_ALERT | 1370 FSF_FEATURE_REQUEST_SFP_DATA | 1371 FSF_FEATURE_FC_SECURITY; 1372 req->erp_action = erp_action; 1373 req->handler = zfcp_fsf_exchange_config_data_handler; 1374 erp_action->fsf_req_id = req->req_id; 1375 1376 zfcp_fsf_start_erp_timer(req); 1377 retval = zfcp_fsf_req_send(req); 1378 if (retval) { 1379 zfcp_fsf_req_free(req); 1380 erp_action->fsf_req_id = 0; 1381 } 1382 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1383 out: 1384 spin_unlock_irq(&qdio->req_q_lock); 1385 return retval; 1386 } 1387 1388 1389 /** 1390 * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel. 1391 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1392 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1393 * might be %NULL. 1394 * 1395 * Returns: 1396 * * 0 - Exchange Config Data was successful, @data is complete 1397 * * -EIO - Exchange Config Data was not successful, @data is invalid 1398 * * -EAGAIN - @data contains incomplete data 1399 * * -ENOMEM - Some memory allocation failed along the way 1400 */ 1401 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1402 struct fsf_qtcb_bottom_config *data) 1403 { 1404 struct zfcp_fsf_req *req = NULL; 1405 int retval = -EIO; 1406 1407 spin_lock_irq(&qdio->req_q_lock); 1408 if (zfcp_qdio_sbal_get(qdio)) 1409 goto out_unlock; 1410 1411 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1412 SBAL_SFLAGS0_TYPE_READ, NULL); 1413 1414 if (IS_ERR(req)) { 1415 retval = PTR_ERR(req); 1416 goto out_unlock; 1417 } 1418 1419 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1420 req->handler = zfcp_fsf_exchange_config_data_handler; 1421 1422 req->qtcb->bottom.config.feature_selection = 1423 FSF_FEATURE_NOTIFICATION_LOST | 1424 FSF_FEATURE_UPDATE_ALERT | 1425 FSF_FEATURE_REQUEST_SFP_DATA | 1426 FSF_FEATURE_FC_SECURITY; 1427 1428 if (data) 1429 req->data = data; 1430 1431 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1432 retval = zfcp_fsf_req_send(req); 1433 spin_unlock_irq(&qdio->req_q_lock); 1434 1435 if (!retval) { 1436 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1437 wait_for_completion(&req->completion); 1438 1439 if (req->status & 1440 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1441 retval = -EIO; 1442 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1443 retval = -EAGAIN; 1444 } 1445 1446 zfcp_fsf_req_free(req); 1447 return retval; 1448 1449 out_unlock: 1450 spin_unlock_irq(&qdio->req_q_lock); 1451 return retval; 1452 } 1453 1454 /** 1455 * zfcp_fsf_exchange_port_data - request information about local port 1456 * @erp_action: ERP action for the adapter for which port data is requested 1457 * Returns: 0 on success, error otherwise 1458 */ 1459 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1460 { 1461 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1462 struct zfcp_fsf_req *req; 1463 int retval = -EIO; 1464 1465 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1466 return -EOPNOTSUPP; 1467 1468 spin_lock_irq(&qdio->req_q_lock); 1469 if (zfcp_qdio_sbal_get(qdio)) 1470 goto out; 1471 1472 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1473 SBAL_SFLAGS0_TYPE_READ, 1474 qdio->adapter->pool.erp_req); 1475 1476 if (IS_ERR(req)) { 1477 retval = PTR_ERR(req); 1478 goto out; 1479 } 1480 1481 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1482 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1483 1484 req->handler = zfcp_fsf_exchange_port_data_handler; 1485 req->erp_action = erp_action; 1486 erp_action->fsf_req_id = req->req_id; 1487 1488 zfcp_fsf_start_erp_timer(req); 1489 retval = zfcp_fsf_req_send(req); 1490 if (retval) { 1491 zfcp_fsf_req_free(req); 1492 erp_action->fsf_req_id = 0; 1493 } 1494 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1495 out: 1496 spin_unlock_irq(&qdio->req_q_lock); 1497 return retval; 1498 } 1499 1500 /** 1501 * zfcp_fsf_exchange_port_data_sync() - Request information about local port. 1502 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1503 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1504 * might be %NULL. 1505 * 1506 * Returns: 1507 * * 0 - Exchange Port Data was successful, @data is complete 1508 * * -EIO - Exchange Port Data was not successful, @data is invalid 1509 * * -EAGAIN - @data contains incomplete data 1510 * * -ENOMEM - Some memory allocation failed along the way 1511 * * -EOPNOTSUPP - This operation is not supported 1512 */ 1513 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1514 struct fsf_qtcb_bottom_port *data) 1515 { 1516 struct zfcp_fsf_req *req = NULL; 1517 int retval = -EIO; 1518 1519 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1520 return -EOPNOTSUPP; 1521 1522 spin_lock_irq(&qdio->req_q_lock); 1523 if (zfcp_qdio_sbal_get(qdio)) 1524 goto out_unlock; 1525 1526 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1527 SBAL_SFLAGS0_TYPE_READ, NULL); 1528 1529 if (IS_ERR(req)) { 1530 retval = PTR_ERR(req); 1531 goto out_unlock; 1532 } 1533 1534 if (data) 1535 req->data = data; 1536 1537 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1538 1539 req->handler = zfcp_fsf_exchange_port_data_handler; 1540 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1541 retval = zfcp_fsf_req_send(req); 1542 spin_unlock_irq(&qdio->req_q_lock); 1543 1544 if (!retval) { 1545 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1546 wait_for_completion(&req->completion); 1547 1548 if (req->status & 1549 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1550 retval = -EIO; 1551 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1552 retval = -EAGAIN; 1553 } 1554 1555 zfcp_fsf_req_free(req); 1556 return retval; 1557 1558 out_unlock: 1559 spin_unlock_irq(&qdio->req_q_lock); 1560 return retval; 1561 } 1562 1563 static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port, 1564 struct zfcp_fsf_req *req) 1565 { 1566 char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH]; 1567 char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH]; 1568 1569 if (port->connection_info == port->connection_info_old) { 1570 /* no change, no log nor trace */ 1571 return; 1572 } 1573 1574 zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn, 1575 port->connection_info_old, 1576 port->connection_info); 1577 1578 zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old), 1579 port->connection_info_old, 1580 ZFCP_FSF_PRINT_FMT_SINGLEITEM); 1581 zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new), 1582 port->connection_info, 1583 ZFCP_FSF_PRINT_FMT_SINGLEITEM); 1584 1585 if (strncmp(mnemonic_old, mnemonic_new, 1586 ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) { 1587 /* no change in string representation, no log */ 1588 goto out; 1589 } 1590 1591 if (port->connection_info_old == 0) { 1592 /* activation */ 1593 dev_info(&port->adapter->ccw_device->dev, 1594 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n", 1595 port->wwpn, mnemonic_new); 1596 } else if (port->connection_info == 0) { 1597 /* deactivation */ 1598 dev_warn(&port->adapter->ccw_device->dev, 1599 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n", 1600 port->wwpn, mnemonic_old); 1601 } else { 1602 /* change */ 1603 dev_warn(&port->adapter->ccw_device->dev, 1604 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n", 1605 port->wwpn, mnemonic_old, mnemonic_new); 1606 } 1607 1608 out: 1609 port->connection_info_old = port->connection_info; 1610 } 1611 1612 static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0, 1613 u64 wwpn) 1614 { 1615 switch (fsf_sqw0) { 1616 1617 /* 1618 * Open Port command error codes 1619 */ 1620 1621 case FSF_SQ_SECURITY_REQUIRED: 1622 dev_warn_ratelimited(dev, 1623 "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n", 1624 wwpn); 1625 break; 1626 case FSF_SQ_SECURITY_TIMEOUT: 1627 dev_warn_ratelimited(dev, 1628 "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n", 1629 wwpn); 1630 break; 1631 case FSF_SQ_SECURITY_KM_UNAVAILABLE: 1632 dev_warn_ratelimited(dev, 1633 "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n", 1634 wwpn); 1635 break; 1636 case FSF_SQ_SECURITY_RKM_UNAVAILABLE: 1637 dev_warn_ratelimited(dev, 1638 "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n", 1639 wwpn); 1640 break; 1641 case FSF_SQ_SECURITY_AUTH_FAILURE: 1642 dev_warn_ratelimited(dev, 1643 "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n", 1644 wwpn); 1645 break; 1646 1647 /* 1648 * Send FCP command error codes 1649 */ 1650 1651 case FSF_SQ_SECURITY_ENC_FAILURE: 1652 dev_warn_ratelimited(dev, 1653 "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n", 1654 wwpn); 1655 break; 1656 1657 /* 1658 * Unknown error codes 1659 */ 1660 1661 default: 1662 dev_warn_ratelimited(dev, 1663 "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n", 1664 fsf_sqw0, wwpn); 1665 } 1666 } 1667 1668 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1669 { 1670 struct zfcp_adapter *adapter = req->adapter; 1671 struct zfcp_port *port = req->data; 1672 struct fsf_qtcb_header *header = &req->qtcb->header; 1673 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1674 struct fc_els_flogi *plogi; 1675 1676 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1677 goto out; 1678 1679 switch (header->fsf_status) { 1680 case FSF_PORT_ALREADY_OPEN: 1681 break; 1682 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1683 dev_warn(&adapter->ccw_device->dev, 1684 "Not enough FCP adapter resources to open " 1685 "remote port 0x%016Lx\n", 1686 (unsigned long long)port->wwpn); 1687 zfcp_erp_set_port_status(port, 1688 ZFCP_STATUS_COMMON_ERP_FAILED); 1689 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1690 break; 1691 case FSF_SECURITY_ERROR: 1692 zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev, 1693 header->fsf_status_qual.word[0], 1694 port->wwpn); 1695 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1696 break; 1697 case FSF_ADAPTER_STATUS_AVAILABLE: 1698 switch (header->fsf_status_qual.word[0]) { 1699 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1700 /* no zfcp_fc_test_link() with failed open port */ 1701 fallthrough; 1702 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1703 case FSF_SQ_NO_RETRY_POSSIBLE: 1704 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1705 break; 1706 } 1707 break; 1708 case FSF_GOOD: 1709 port->handle = header->port_handle; 1710 if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY) 1711 port->connection_info = bottom->connection_info; 1712 else 1713 port->connection_info = 0; 1714 zfcp_fsf_log_port_fc_security(port, req); 1715 atomic_or(ZFCP_STATUS_COMMON_OPEN | 1716 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1717 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1718 &port->status); 1719 /* check whether D_ID has changed during open */ 1720 /* 1721 * FIXME: This check is not airtight, as the FCP channel does 1722 * not monitor closures of target port connections caused on 1723 * the remote side. Thus, they might miss out on invalidating 1724 * locally cached WWPNs (and other N_Port parameters) of gone 1725 * target ports. So, our heroic attempt to make things safe 1726 * could be undermined by 'open port' response data tagged with 1727 * obsolete WWPNs. Another reason to monitor potential 1728 * connection closures ourself at least (by interpreting 1729 * incoming ELS' and unsolicited status). It just crosses my 1730 * mind that one should be able to cross-check by means of 1731 * another GID_PN straight after a port has been opened. 1732 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1733 */ 1734 plogi = (struct fc_els_flogi *) bottom->els; 1735 if (bottom->els1_length >= FSF_PLOGI_MIN_LEN) 1736 zfcp_fc_plogi_evaluate(port, plogi); 1737 break; 1738 case FSF_UNKNOWN_OP_SUBTYPE: 1739 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1740 break; 1741 } 1742 1743 out: 1744 put_device(&port->dev); 1745 } 1746 1747 /** 1748 * zfcp_fsf_open_port - create and send open port request 1749 * @erp_action: pointer to struct zfcp_erp_action 1750 * Returns: 0 on success, error otherwise 1751 */ 1752 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1753 { 1754 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1755 struct zfcp_port *port = erp_action->port; 1756 struct zfcp_fsf_req *req; 1757 int retval = -EIO; 1758 1759 spin_lock_irq(&qdio->req_q_lock); 1760 if (zfcp_qdio_sbal_get(qdio)) 1761 goto out; 1762 1763 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1764 SBAL_SFLAGS0_TYPE_READ, 1765 qdio->adapter->pool.erp_req); 1766 1767 if (IS_ERR(req)) { 1768 retval = PTR_ERR(req); 1769 goto out; 1770 } 1771 1772 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1773 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1774 1775 req->handler = zfcp_fsf_open_port_handler; 1776 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1777 req->data = port; 1778 req->erp_action = erp_action; 1779 erp_action->fsf_req_id = req->req_id; 1780 get_device(&port->dev); 1781 1782 zfcp_fsf_start_erp_timer(req); 1783 retval = zfcp_fsf_req_send(req); 1784 if (retval) { 1785 zfcp_fsf_req_free(req); 1786 erp_action->fsf_req_id = 0; 1787 put_device(&port->dev); 1788 } 1789 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1790 out: 1791 spin_unlock_irq(&qdio->req_q_lock); 1792 return retval; 1793 } 1794 1795 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1796 { 1797 struct zfcp_port *port = req->data; 1798 1799 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1800 return; 1801 1802 switch (req->qtcb->header.fsf_status) { 1803 case FSF_PORT_HANDLE_NOT_VALID: 1804 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1805 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1806 break; 1807 case FSF_ADAPTER_STATUS_AVAILABLE: 1808 break; 1809 case FSF_GOOD: 1810 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1811 break; 1812 } 1813 } 1814 1815 /** 1816 * zfcp_fsf_close_port - create and send close port request 1817 * @erp_action: pointer to struct zfcp_erp_action 1818 * Returns: 0 on success, error otherwise 1819 */ 1820 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1821 { 1822 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1823 struct zfcp_fsf_req *req; 1824 int retval = -EIO; 1825 1826 spin_lock_irq(&qdio->req_q_lock); 1827 if (zfcp_qdio_sbal_get(qdio)) 1828 goto out; 1829 1830 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1831 SBAL_SFLAGS0_TYPE_READ, 1832 qdio->adapter->pool.erp_req); 1833 1834 if (IS_ERR(req)) { 1835 retval = PTR_ERR(req); 1836 goto out; 1837 } 1838 1839 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1840 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1841 1842 req->handler = zfcp_fsf_close_port_handler; 1843 req->data = erp_action->port; 1844 req->erp_action = erp_action; 1845 req->qtcb->header.port_handle = erp_action->port->handle; 1846 erp_action->fsf_req_id = req->req_id; 1847 1848 zfcp_fsf_start_erp_timer(req); 1849 retval = zfcp_fsf_req_send(req); 1850 if (retval) { 1851 zfcp_fsf_req_free(req); 1852 erp_action->fsf_req_id = 0; 1853 } 1854 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1855 out: 1856 spin_unlock_irq(&qdio->req_q_lock); 1857 return retval; 1858 } 1859 1860 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1861 { 1862 struct zfcp_fc_wka_port *wka_port = req->data; 1863 struct fsf_qtcb_header *header = &req->qtcb->header; 1864 1865 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1866 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1867 goto out; 1868 } 1869 1870 switch (header->fsf_status) { 1871 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1872 dev_warn(&req->adapter->ccw_device->dev, 1873 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1874 fallthrough; 1875 case FSF_ADAPTER_STATUS_AVAILABLE: 1876 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1877 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1878 break; 1879 case FSF_GOOD: 1880 wka_port->handle = header->port_handle; 1881 fallthrough; 1882 case FSF_PORT_ALREADY_OPEN: 1883 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1884 } 1885 out: 1886 wake_up(&wka_port->completion_wq); 1887 } 1888 1889 /** 1890 * zfcp_fsf_open_wka_port - create and send open wka-port request 1891 * @wka_port: pointer to struct zfcp_fc_wka_port 1892 * Returns: 0 on success, error otherwise 1893 */ 1894 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1895 { 1896 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1897 struct zfcp_fsf_req *req; 1898 unsigned long req_id = 0; 1899 int retval = -EIO; 1900 1901 spin_lock_irq(&qdio->req_q_lock); 1902 if (zfcp_qdio_sbal_get(qdio)) 1903 goto out; 1904 1905 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1906 SBAL_SFLAGS0_TYPE_READ, 1907 qdio->adapter->pool.erp_req); 1908 1909 if (IS_ERR(req)) { 1910 retval = PTR_ERR(req); 1911 goto out; 1912 } 1913 1914 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1915 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1916 1917 req->handler = zfcp_fsf_open_wka_port_handler; 1918 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1919 req->data = wka_port; 1920 1921 req_id = req->req_id; 1922 1923 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1924 retval = zfcp_fsf_req_send(req); 1925 if (retval) 1926 zfcp_fsf_req_free(req); 1927 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1928 out: 1929 spin_unlock_irq(&qdio->req_q_lock); 1930 if (!retval) 1931 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); 1932 return retval; 1933 } 1934 1935 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1936 { 1937 struct zfcp_fc_wka_port *wka_port = req->data; 1938 1939 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1940 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1941 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1942 } 1943 1944 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1945 wake_up(&wka_port->completion_wq); 1946 } 1947 1948 /** 1949 * zfcp_fsf_close_wka_port - create and send close wka port request 1950 * @wka_port: WKA port to open 1951 * Returns: 0 on success, error otherwise 1952 */ 1953 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1954 { 1955 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1956 struct zfcp_fsf_req *req; 1957 unsigned long req_id = 0; 1958 int retval = -EIO; 1959 1960 spin_lock_irq(&qdio->req_q_lock); 1961 if (zfcp_qdio_sbal_get(qdio)) 1962 goto out; 1963 1964 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1965 SBAL_SFLAGS0_TYPE_READ, 1966 qdio->adapter->pool.erp_req); 1967 1968 if (IS_ERR(req)) { 1969 retval = PTR_ERR(req); 1970 goto out; 1971 } 1972 1973 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1974 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1975 1976 req->handler = zfcp_fsf_close_wka_port_handler; 1977 req->data = wka_port; 1978 req->qtcb->header.port_handle = wka_port->handle; 1979 1980 req_id = req->req_id; 1981 1982 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1983 retval = zfcp_fsf_req_send(req); 1984 if (retval) 1985 zfcp_fsf_req_free(req); 1986 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1987 out: 1988 spin_unlock_irq(&qdio->req_q_lock); 1989 if (!retval) 1990 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); 1991 return retval; 1992 } 1993 1994 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1995 { 1996 struct zfcp_port *port = req->data; 1997 struct fsf_qtcb_header *header = &req->qtcb->header; 1998 struct scsi_device *sdev; 1999 2000 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2001 return; 2002 2003 switch (header->fsf_status) { 2004 case FSF_PORT_HANDLE_NOT_VALID: 2005 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 2006 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2007 break; 2008 case FSF_PORT_BOXED: 2009 /* can't use generic zfcp_erp_modify_port_status because 2010 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 2011 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 2012 shost_for_each_device(sdev, port->adapter->scsi_host) 2013 if (sdev_to_zfcp(sdev)->port == port) 2014 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 2015 &sdev_to_zfcp(sdev)->status); 2016 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2017 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 2018 "fscpph2"); 2019 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2020 break; 2021 case FSF_ADAPTER_STATUS_AVAILABLE: 2022 switch (header->fsf_status_qual.word[0]) { 2023 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2024 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2025 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2026 break; 2027 } 2028 break; 2029 case FSF_GOOD: 2030 /* can't use generic zfcp_erp_modify_port_status because 2031 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 2032 */ 2033 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 2034 shost_for_each_device(sdev, port->adapter->scsi_host) 2035 if (sdev_to_zfcp(sdev)->port == port) 2036 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 2037 &sdev_to_zfcp(sdev)->status); 2038 break; 2039 } 2040 } 2041 2042 /** 2043 * zfcp_fsf_close_physical_port - close physical port 2044 * @erp_action: pointer to struct zfcp_erp_action 2045 * Returns: 0 on success 2046 */ 2047 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 2048 { 2049 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2050 struct zfcp_fsf_req *req; 2051 int retval = -EIO; 2052 2053 spin_lock_irq(&qdio->req_q_lock); 2054 if (zfcp_qdio_sbal_get(qdio)) 2055 goto out; 2056 2057 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 2058 SBAL_SFLAGS0_TYPE_READ, 2059 qdio->adapter->pool.erp_req); 2060 2061 if (IS_ERR(req)) { 2062 retval = PTR_ERR(req); 2063 goto out; 2064 } 2065 2066 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2067 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2068 2069 req->data = erp_action->port; 2070 req->qtcb->header.port_handle = erp_action->port->handle; 2071 req->erp_action = erp_action; 2072 req->handler = zfcp_fsf_close_physical_port_handler; 2073 erp_action->fsf_req_id = req->req_id; 2074 2075 zfcp_fsf_start_erp_timer(req); 2076 retval = zfcp_fsf_req_send(req); 2077 if (retval) { 2078 zfcp_fsf_req_free(req); 2079 erp_action->fsf_req_id = 0; 2080 } 2081 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2082 out: 2083 spin_unlock_irq(&qdio->req_q_lock); 2084 return retval; 2085 } 2086 2087 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 2088 { 2089 struct zfcp_adapter *adapter = req->adapter; 2090 struct scsi_device *sdev = req->data; 2091 struct zfcp_scsi_dev *zfcp_sdev; 2092 struct fsf_qtcb_header *header = &req->qtcb->header; 2093 union fsf_status_qual *qual = &header->fsf_status_qual; 2094 2095 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2096 return; 2097 2098 zfcp_sdev = sdev_to_zfcp(sdev); 2099 2100 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 2101 ZFCP_STATUS_COMMON_ACCESS_BOXED, 2102 &zfcp_sdev->status); 2103 2104 switch (header->fsf_status) { 2105 2106 case FSF_PORT_HANDLE_NOT_VALID: 2107 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 2108 fallthrough; 2109 case FSF_LUN_ALREADY_OPEN: 2110 break; 2111 case FSF_PORT_BOXED: 2112 zfcp_erp_set_port_status(zfcp_sdev->port, 2113 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2114 zfcp_erp_port_reopen(zfcp_sdev->port, 2115 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 2116 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2117 break; 2118 case FSF_LUN_SHARING_VIOLATION: 2119 if (qual->word[0]) 2120 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 2121 "LUN 0x%016Lx on port 0x%016Lx is already in " 2122 "use by CSS%d, MIF Image ID %x\n", 2123 zfcp_scsi_dev_lun(sdev), 2124 (unsigned long long)zfcp_sdev->port->wwpn, 2125 qual->fsf_queue_designator.cssid, 2126 qual->fsf_queue_designator.hla); 2127 zfcp_erp_set_lun_status(sdev, 2128 ZFCP_STATUS_COMMON_ERP_FAILED | 2129 ZFCP_STATUS_COMMON_ACCESS_DENIED); 2130 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2131 break; 2132 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 2133 dev_warn(&adapter->ccw_device->dev, 2134 "No handle is available for LUN " 2135 "0x%016Lx on port 0x%016Lx\n", 2136 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2137 (unsigned long long)zfcp_sdev->port->wwpn); 2138 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 2139 fallthrough; 2140 case FSF_INVALID_COMMAND_OPTION: 2141 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2142 break; 2143 case FSF_ADAPTER_STATUS_AVAILABLE: 2144 switch (header->fsf_status_qual.word[0]) { 2145 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2146 zfcp_fc_test_link(zfcp_sdev->port); 2147 fallthrough; 2148 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2149 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2150 break; 2151 } 2152 break; 2153 2154 case FSF_GOOD: 2155 zfcp_sdev->lun_handle = header->lun_handle; 2156 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2157 break; 2158 } 2159 } 2160 2161 /** 2162 * zfcp_fsf_open_lun - open LUN 2163 * @erp_action: pointer to struct zfcp_erp_action 2164 * Returns: 0 on success, error otherwise 2165 */ 2166 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 2167 { 2168 struct zfcp_adapter *adapter = erp_action->adapter; 2169 struct zfcp_qdio *qdio = adapter->qdio; 2170 struct zfcp_fsf_req *req; 2171 int retval = -EIO; 2172 2173 spin_lock_irq(&qdio->req_q_lock); 2174 if (zfcp_qdio_sbal_get(qdio)) 2175 goto out; 2176 2177 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 2178 SBAL_SFLAGS0_TYPE_READ, 2179 adapter->pool.erp_req); 2180 2181 if (IS_ERR(req)) { 2182 retval = PTR_ERR(req); 2183 goto out; 2184 } 2185 2186 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2187 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2188 2189 req->qtcb->header.port_handle = erp_action->port->handle; 2190 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 2191 req->handler = zfcp_fsf_open_lun_handler; 2192 req->data = erp_action->sdev; 2193 req->erp_action = erp_action; 2194 erp_action->fsf_req_id = req->req_id; 2195 2196 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 2197 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 2198 2199 zfcp_fsf_start_erp_timer(req); 2200 retval = zfcp_fsf_req_send(req); 2201 if (retval) { 2202 zfcp_fsf_req_free(req); 2203 erp_action->fsf_req_id = 0; 2204 } 2205 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2206 out: 2207 spin_unlock_irq(&qdio->req_q_lock); 2208 return retval; 2209 } 2210 2211 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 2212 { 2213 struct scsi_device *sdev = req->data; 2214 struct zfcp_scsi_dev *zfcp_sdev; 2215 2216 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2217 return; 2218 2219 zfcp_sdev = sdev_to_zfcp(sdev); 2220 2221 switch (req->qtcb->header.fsf_status) { 2222 case FSF_PORT_HANDLE_NOT_VALID: 2223 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 2224 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2225 break; 2226 case FSF_LUN_HANDLE_NOT_VALID: 2227 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 2228 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2229 break; 2230 case FSF_PORT_BOXED: 2231 zfcp_erp_set_port_status(zfcp_sdev->port, 2232 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2233 zfcp_erp_port_reopen(zfcp_sdev->port, 2234 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 2235 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2236 break; 2237 case FSF_ADAPTER_STATUS_AVAILABLE: 2238 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2239 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2240 zfcp_fc_test_link(zfcp_sdev->port); 2241 fallthrough; 2242 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2243 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2244 break; 2245 } 2246 break; 2247 case FSF_GOOD: 2248 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2249 break; 2250 } 2251 } 2252 2253 /** 2254 * zfcp_fsf_close_LUN - close LUN 2255 * @erp_action: pointer to erp_action triggering the "close LUN" 2256 * Returns: 0 on success, error otherwise 2257 */ 2258 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 2259 { 2260 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2261 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 2262 struct zfcp_fsf_req *req; 2263 int retval = -EIO; 2264 2265 spin_lock_irq(&qdio->req_q_lock); 2266 if (zfcp_qdio_sbal_get(qdio)) 2267 goto out; 2268 2269 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 2270 SBAL_SFLAGS0_TYPE_READ, 2271 qdio->adapter->pool.erp_req); 2272 2273 if (IS_ERR(req)) { 2274 retval = PTR_ERR(req); 2275 goto out; 2276 } 2277 2278 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2279 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2280 2281 req->qtcb->header.port_handle = erp_action->port->handle; 2282 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2283 req->handler = zfcp_fsf_close_lun_handler; 2284 req->data = erp_action->sdev; 2285 req->erp_action = erp_action; 2286 erp_action->fsf_req_id = req->req_id; 2287 2288 zfcp_fsf_start_erp_timer(req); 2289 retval = zfcp_fsf_req_send(req); 2290 if (retval) { 2291 zfcp_fsf_req_free(req); 2292 erp_action->fsf_req_id = 0; 2293 } 2294 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2295 out: 2296 spin_unlock_irq(&qdio->req_q_lock); 2297 return retval; 2298 } 2299 2300 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat) 2301 { 2302 lat_rec->sum += lat; 2303 lat_rec->min = min(lat_rec->min, lat); 2304 lat_rec->max = max(lat_rec->max, lat); 2305 } 2306 2307 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2308 { 2309 struct fsf_qual_latency_info *lat_in; 2310 struct zfcp_latency_cont *lat = NULL; 2311 struct zfcp_scsi_dev *zfcp_sdev; 2312 struct zfcp_blk_drv_data blktrc; 2313 int ticks = req->adapter->timer_ticks; 2314 2315 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 2316 2317 blktrc.flags = 0; 2318 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2319 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2320 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2321 blktrc.inb_usage = 0; 2322 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2323 2324 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2325 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2326 zfcp_sdev = sdev_to_zfcp(scsi->device); 2327 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2328 blktrc.channel_lat = lat_in->channel_lat * ticks; 2329 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2330 2331 switch (req->qtcb->bottom.io.data_direction) { 2332 case FSF_DATADIR_DIF_READ_STRIP: 2333 case FSF_DATADIR_DIF_READ_CONVERT: 2334 case FSF_DATADIR_READ: 2335 lat = &zfcp_sdev->latencies.read; 2336 break; 2337 case FSF_DATADIR_DIF_WRITE_INSERT: 2338 case FSF_DATADIR_DIF_WRITE_CONVERT: 2339 case FSF_DATADIR_WRITE: 2340 lat = &zfcp_sdev->latencies.write; 2341 break; 2342 case FSF_DATADIR_CMND: 2343 lat = &zfcp_sdev->latencies.cmd; 2344 break; 2345 } 2346 2347 if (lat) { 2348 spin_lock(&zfcp_sdev->latencies.lock); 2349 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2350 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2351 lat->counter++; 2352 spin_unlock(&zfcp_sdev->latencies.lock); 2353 } 2354 } 2355 2356 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2357 sizeof(blktrc)); 2358 } 2359 2360 /** 2361 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. 2362 * @req: Pointer to FSF request. 2363 * @sdev: Pointer to SCSI device as request context. 2364 */ 2365 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, 2366 struct scsi_device *sdev) 2367 { 2368 struct zfcp_scsi_dev *zfcp_sdev; 2369 struct fsf_qtcb_header *header = &req->qtcb->header; 2370 2371 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2372 return; 2373 2374 zfcp_sdev = sdev_to_zfcp(sdev); 2375 2376 switch (header->fsf_status) { 2377 case FSF_HANDLE_MISMATCH: 2378 case FSF_PORT_HANDLE_NOT_VALID: 2379 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); 2380 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2381 break; 2382 case FSF_FCPLUN_NOT_VALID: 2383 case FSF_LUN_HANDLE_NOT_VALID: 2384 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2385 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2386 break; 2387 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2388 zfcp_fsf_class_not_supp(req); 2389 break; 2390 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2391 dev_err(&req->adapter->ccw_device->dev, 2392 "Incorrect direction %d, LUN 0x%016Lx on port " 2393 "0x%016Lx closed\n", 2394 req->qtcb->bottom.io.data_direction, 2395 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2396 (unsigned long long)zfcp_sdev->port->wwpn); 2397 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); 2398 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2399 break; 2400 case FSF_CMND_LENGTH_NOT_VALID: 2401 dev_err(&req->adapter->ccw_device->dev, 2402 "Incorrect FCP_CMND length %d, FCP device closed\n", 2403 req->qtcb->bottom.io.fcp_cmnd_length); 2404 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2405 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2406 break; 2407 case FSF_PORT_BOXED: 2408 zfcp_erp_set_port_status(zfcp_sdev->port, 2409 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2410 zfcp_erp_port_reopen(zfcp_sdev->port, 2411 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2412 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2413 break; 2414 case FSF_LUN_BOXED: 2415 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2416 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2417 "fssfch6"); 2418 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2419 break; 2420 case FSF_ADAPTER_STATUS_AVAILABLE: 2421 if (header->fsf_status_qual.word[0] == 2422 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2423 zfcp_fc_test_link(zfcp_sdev->port); 2424 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2425 break; 2426 case FSF_SECURITY_ERROR: 2427 zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev, 2428 header->fsf_status_qual.word[0], 2429 zfcp_sdev->port->wwpn); 2430 zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7"); 2431 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2432 break; 2433 } 2434 } 2435 2436 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2437 { 2438 struct scsi_cmnd *scpnt; 2439 struct fcp_resp_with_ext *fcp_rsp; 2440 unsigned long flags; 2441 2442 read_lock_irqsave(&req->adapter->abort_lock, flags); 2443 2444 scpnt = req->data; 2445 if (unlikely(!scpnt)) { 2446 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2447 return; 2448 } 2449 2450 zfcp_fsf_fcp_handler_common(req, scpnt->device); 2451 2452 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2453 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2454 goto skip_fsfstatus; 2455 } 2456 2457 switch (req->qtcb->header.fsf_status) { 2458 case FSF_INCONSISTENT_PROT_DATA: 2459 case FSF_INVALID_PROT_PARM: 2460 set_host_byte(scpnt, DID_ERROR); 2461 goto skip_fsfstatus; 2462 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2463 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2464 goto skip_fsfstatus; 2465 case FSF_APP_TAG_CHECK_FAILURE: 2466 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2467 goto skip_fsfstatus; 2468 case FSF_REF_TAG_CHECK_FAILURE: 2469 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2470 goto skip_fsfstatus; 2471 } 2472 BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); 2473 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2474 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2475 2476 skip_fsfstatus: 2477 zfcp_fsf_req_trace(req, scpnt); 2478 zfcp_dbf_scsi_result(scpnt, req); 2479 2480 scpnt->host_scribble = NULL; 2481 (scpnt->scsi_done) (scpnt); 2482 /* 2483 * We must hold this lock until scsi_done has been called. 2484 * Otherwise we may call scsi_done after abort regarding this 2485 * command has completed. 2486 * Note: scsi_done must not block! 2487 */ 2488 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2489 } 2490 2491 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2492 { 2493 switch (scsi_get_prot_op(scsi_cmnd)) { 2494 case SCSI_PROT_NORMAL: 2495 switch (scsi_cmnd->sc_data_direction) { 2496 case DMA_NONE: 2497 *data_dir = FSF_DATADIR_CMND; 2498 break; 2499 case DMA_FROM_DEVICE: 2500 *data_dir = FSF_DATADIR_READ; 2501 break; 2502 case DMA_TO_DEVICE: 2503 *data_dir = FSF_DATADIR_WRITE; 2504 break; 2505 case DMA_BIDIRECTIONAL: 2506 return -EINVAL; 2507 } 2508 break; 2509 2510 case SCSI_PROT_READ_STRIP: 2511 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2512 break; 2513 case SCSI_PROT_WRITE_INSERT: 2514 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2515 break; 2516 case SCSI_PROT_READ_PASS: 2517 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2518 break; 2519 case SCSI_PROT_WRITE_PASS: 2520 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2521 break; 2522 default: 2523 return -EINVAL; 2524 } 2525 2526 return 0; 2527 } 2528 2529 /** 2530 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2531 * @scsi_cmnd: scsi command to be sent 2532 */ 2533 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2534 { 2535 struct zfcp_fsf_req *req; 2536 struct fcp_cmnd *fcp_cmnd; 2537 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2538 int retval = -EIO; 2539 struct scsi_device *sdev = scsi_cmnd->device; 2540 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2541 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2542 struct zfcp_qdio *qdio = adapter->qdio; 2543 struct fsf_qtcb_bottom_io *io; 2544 unsigned long flags; 2545 2546 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2547 ZFCP_STATUS_COMMON_UNBLOCKED))) 2548 return -EBUSY; 2549 2550 spin_lock_irqsave(&qdio->req_q_lock, flags); 2551 if (atomic_read(&qdio->req_q_free) <= 0) { 2552 atomic_inc(&qdio->req_q_full); 2553 goto out; 2554 } 2555 2556 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2557 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2558 2559 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2560 sbtype, adapter->pool.scsi_req); 2561 2562 if (IS_ERR(req)) { 2563 retval = PTR_ERR(req); 2564 goto out; 2565 } 2566 2567 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2568 2569 io = &req->qtcb->bottom.io; 2570 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2571 req->data = scsi_cmnd; 2572 req->handler = zfcp_fsf_fcp_cmnd_handler; 2573 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2574 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2575 io->service_class = FSF_CLASS_3; 2576 io->fcp_cmnd_length = FCP_CMND_LEN; 2577 2578 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2579 io->data_block_length = scsi_cmnd->device->sector_size; 2580 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2581 } 2582 2583 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2584 goto failed_scsi_cmnd; 2585 2586 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2587 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2588 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2589 2590 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2591 scsi_prot_sg_count(scsi_cmnd)) { 2592 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2593 scsi_prot_sg_count(scsi_cmnd)); 2594 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2595 scsi_prot_sglist(scsi_cmnd)); 2596 if (retval) 2597 goto failed_scsi_cmnd; 2598 io->prot_data_length = zfcp_qdio_real_bytes( 2599 scsi_prot_sglist(scsi_cmnd)); 2600 } 2601 2602 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2603 scsi_sglist(scsi_cmnd)); 2604 if (unlikely(retval)) 2605 goto failed_scsi_cmnd; 2606 2607 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2608 if (zfcp_adapter_multi_buffer_active(adapter)) 2609 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2610 2611 retval = zfcp_fsf_req_send(req); 2612 if (unlikely(retval)) 2613 goto failed_scsi_cmnd; 2614 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2615 2616 goto out; 2617 2618 failed_scsi_cmnd: 2619 zfcp_fsf_req_free(req); 2620 scsi_cmnd->host_scribble = NULL; 2621 out: 2622 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2623 return retval; 2624 } 2625 2626 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2627 { 2628 struct scsi_device *sdev = req->data; 2629 struct fcp_resp_with_ext *fcp_rsp; 2630 struct fcp_resp_rsp_info *rsp_info; 2631 2632 zfcp_fsf_fcp_handler_common(req, sdev); 2633 2634 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2635 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2636 2637 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2638 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2639 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2640 } 2641 2642 /** 2643 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). 2644 * @sdev: Pointer to SCSI device to send the task management command to. 2645 * @tm_flags: Unsigned byte for task management flags. 2646 * 2647 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. 2648 */ 2649 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, 2650 u8 tm_flags) 2651 { 2652 struct zfcp_fsf_req *req = NULL; 2653 struct fcp_cmnd *fcp_cmnd; 2654 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2655 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2656 2657 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2658 ZFCP_STATUS_COMMON_UNBLOCKED))) 2659 return NULL; 2660 2661 spin_lock_irq(&qdio->req_q_lock); 2662 if (zfcp_qdio_sbal_get(qdio)) 2663 goto out; 2664 2665 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2666 SBAL_SFLAGS0_TYPE_WRITE, 2667 qdio->adapter->pool.scsi_req); 2668 2669 if (IS_ERR(req)) { 2670 req = NULL; 2671 goto out; 2672 } 2673 2674 req->data = sdev; 2675 2676 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2677 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2678 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2679 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2680 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2681 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2682 2683 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2684 2685 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2686 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2687 2688 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 2689 if (!zfcp_fsf_req_send(req)) { 2690 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 2691 goto out; 2692 } 2693 2694 zfcp_fsf_req_free(req); 2695 req = NULL; 2696 out: 2697 spin_unlock_irq(&qdio->req_q_lock); 2698 return req; 2699 } 2700 2701 /** 2702 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2703 * @qdio: pointer to struct zfcp_qdio 2704 * @sbal_idx: response queue index of SBAL to be processed 2705 */ 2706 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2707 { 2708 struct zfcp_adapter *adapter = qdio->adapter; 2709 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2710 struct qdio_buffer_element *sbale; 2711 struct zfcp_fsf_req *fsf_req; 2712 unsigned long req_id; 2713 int idx; 2714 2715 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2716 2717 sbale = &sbal->element[idx]; 2718 req_id = sbale->addr; 2719 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2720 2721 if (!fsf_req) { 2722 /* 2723 * Unknown request means that we have potentially memory 2724 * corruption and must stop the machine immediately. 2725 */ 2726 zfcp_qdio_siosl(adapter); 2727 panic("error: unknown req_id (%lx) on adapter %s.\n", 2728 req_id, dev_name(&adapter->ccw_device->dev)); 2729 } 2730 2731 zfcp_fsf_req_complete(fsf_req); 2732 2733 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2734 break; 2735 } 2736 } 2737