1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Implementation of FSF commands. 6 * 7 * Copyright IBM Corp. 2002, 2018 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/blktrace_api.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <scsi/fc/fc_els.h> 17 #include "zfcp_ext.h" 18 #include "zfcp_fc.h" 19 #include "zfcp_dbf.h" 20 #include "zfcp_qdio.h" 21 #include "zfcp_reqlist.h" 22 23 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ 24 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) 25 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */ 26 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 27 28 struct kmem_cache *zfcp_fsf_qtcb_cache; 29 30 static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 31 { 32 struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); 33 struct zfcp_adapter *adapter = fsf_req->adapter; 34 35 zfcp_qdio_siosl(adapter); 36 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 37 "fsrth_1"); 38 } 39 40 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 41 unsigned long timeout) 42 { 43 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 44 fsf_req->timer.expires = jiffies + timeout; 45 add_timer(&fsf_req->timer); 46 } 47 48 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 49 { 50 BUG_ON(!fsf_req->erp_action); 51 fsf_req->timer.function = zfcp_erp_timeout_handler; 52 fsf_req->timer.expires = jiffies + 30 * HZ; 53 add_timer(&fsf_req->timer); 54 } 55 56 /* association between FSF command and FSF QTCB type */ 57 static u32 fsf_qtcb_type[] = { 58 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 59 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 60 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 61 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 62 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 63 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 64 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 65 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 66 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 67 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 68 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 69 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 70 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 71 }; 72 73 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 74 { 75 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 76 "operational because of an unsupported FC class\n"); 77 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 78 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 79 } 80 81 /** 82 * zfcp_fsf_req_free - free memory used by fsf request 83 * @req: pointer to struct zfcp_fsf_req 84 */ 85 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 86 { 87 if (likely(req->pool)) { 88 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 89 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 90 mempool_free(req, req->pool); 91 return; 92 } 93 94 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 95 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 96 kfree(req); 97 } 98 99 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 100 { 101 unsigned long flags; 102 struct fsf_status_read_buffer *sr_buf = req->data; 103 struct zfcp_adapter *adapter = req->adapter; 104 struct zfcp_port *port; 105 int d_id = ntoh24(sr_buf->d_id); 106 107 read_lock_irqsave(&adapter->port_list_lock, flags); 108 list_for_each_entry(port, &adapter->port_list, list) 109 if (port->d_id == d_id) { 110 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 111 break; 112 } 113 read_unlock_irqrestore(&adapter->port_list_lock, flags); 114 } 115 116 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 117 struct fsf_link_down_info *link_down) 118 { 119 struct zfcp_adapter *adapter = req->adapter; 120 121 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 122 return; 123 124 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 125 126 zfcp_scsi_schedule_rports_block(adapter); 127 128 if (!link_down) 129 goto out; 130 131 switch (link_down->error_code) { 132 case FSF_PSQ_LINK_NO_LIGHT: 133 dev_warn(&req->adapter->ccw_device->dev, 134 "There is no light signal from the local " 135 "fibre channel cable\n"); 136 break; 137 case FSF_PSQ_LINK_WRAP_PLUG: 138 dev_warn(&req->adapter->ccw_device->dev, 139 "There is a wrap plug instead of a fibre " 140 "channel cable\n"); 141 break; 142 case FSF_PSQ_LINK_NO_FCP: 143 dev_warn(&req->adapter->ccw_device->dev, 144 "The adjacent fibre channel node does not " 145 "support FCP\n"); 146 break; 147 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 148 dev_warn(&req->adapter->ccw_device->dev, 149 "The FCP device is suspended because of a " 150 "firmware update\n"); 151 break; 152 case FSF_PSQ_LINK_INVALID_WWPN: 153 dev_warn(&req->adapter->ccw_device->dev, 154 "The FCP device detected a WWPN that is " 155 "duplicate or not valid\n"); 156 break; 157 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 158 dev_warn(&req->adapter->ccw_device->dev, 159 "The fibre channel fabric does not support NPIV\n"); 160 break; 161 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 162 dev_warn(&req->adapter->ccw_device->dev, 163 "The FCP adapter cannot support more NPIV ports\n"); 164 break; 165 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 166 dev_warn(&req->adapter->ccw_device->dev, 167 "The adjacent switch cannot support " 168 "more NPIV ports\n"); 169 break; 170 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 171 dev_warn(&req->adapter->ccw_device->dev, 172 "The FCP adapter could not log in to the " 173 "fibre channel fabric\n"); 174 break; 175 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 176 dev_warn(&req->adapter->ccw_device->dev, 177 "The WWPN assignment file on the FCP adapter " 178 "has been damaged\n"); 179 break; 180 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 181 dev_warn(&req->adapter->ccw_device->dev, 182 "The mode table on the FCP adapter " 183 "has been damaged\n"); 184 break; 185 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 186 dev_warn(&req->adapter->ccw_device->dev, 187 "All NPIV ports on the FCP adapter have " 188 "been assigned\n"); 189 break; 190 default: 191 dev_warn(&req->adapter->ccw_device->dev, 192 "The link between the FCP adapter and " 193 "the FC fabric is down\n"); 194 } 195 out: 196 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 197 } 198 199 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 200 { 201 struct fsf_status_read_buffer *sr_buf = req->data; 202 struct fsf_link_down_info *ldi = 203 (struct fsf_link_down_info *) &sr_buf->payload; 204 205 switch (sr_buf->status_subtype) { 206 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 207 case FSF_STATUS_READ_SUB_FDISC_FAILED: 208 zfcp_fsf_link_down_info_eval(req, ldi); 209 break; 210 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 211 zfcp_fsf_link_down_info_eval(req, NULL); 212 } 213 } 214 215 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 216 { 217 struct zfcp_adapter *adapter = req->adapter; 218 struct fsf_status_read_buffer *sr_buf = req->data; 219 220 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 221 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 222 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 223 zfcp_fsf_req_free(req); 224 return; 225 } 226 227 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 228 229 switch (sr_buf->status_type) { 230 case FSF_STATUS_READ_PORT_CLOSED: 231 zfcp_fsf_status_read_port_closed(req); 232 break; 233 case FSF_STATUS_READ_INCOMING_ELS: 234 zfcp_fc_incoming_els(req); 235 break; 236 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 237 break; 238 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 239 dev_warn(&adapter->ccw_device->dev, 240 "The error threshold for checksum statistics " 241 "has been exceeded\n"); 242 zfcp_dbf_hba_bit_err("fssrh_3", req); 243 break; 244 case FSF_STATUS_READ_LINK_DOWN: 245 zfcp_fsf_status_read_link_down(req); 246 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 247 break; 248 case FSF_STATUS_READ_LINK_UP: 249 dev_info(&adapter->ccw_device->dev, 250 "The local link has been restored\n"); 251 /* All ports should be marked as ready to run again */ 252 zfcp_erp_set_adapter_status(adapter, 253 ZFCP_STATUS_COMMON_RUNNING); 254 zfcp_erp_adapter_reopen(adapter, 255 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 256 ZFCP_STATUS_COMMON_ERP_FAILED, 257 "fssrh_2"); 258 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 259 260 break; 261 case FSF_STATUS_READ_NOTIFICATION_LOST: 262 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 263 zfcp_fc_conditional_port_scan(adapter); 264 break; 265 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 266 adapter->adapter_features = sr_buf->payload.word[0]; 267 break; 268 } 269 270 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 271 zfcp_fsf_req_free(req); 272 273 atomic_inc(&adapter->stat_miss); 274 queue_work(adapter->work_queue, &adapter->stat_work); 275 } 276 277 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 278 { 279 switch (req->qtcb->header.fsf_status_qual.word[0]) { 280 case FSF_SQ_FCP_RSP_AVAILABLE: 281 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 282 case FSF_SQ_NO_RETRY_POSSIBLE: 283 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 284 return; 285 case FSF_SQ_COMMAND_ABORTED: 286 break; 287 case FSF_SQ_NO_RECOM: 288 dev_err(&req->adapter->ccw_device->dev, 289 "The FCP adapter reported a problem " 290 "that cannot be recovered\n"); 291 zfcp_qdio_siosl(req->adapter); 292 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 293 break; 294 } 295 /* all non-return stats set FSFREQ_ERROR*/ 296 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 297 } 298 299 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 300 { 301 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 302 return; 303 304 switch (req->qtcb->header.fsf_status) { 305 case FSF_UNKNOWN_COMMAND: 306 dev_err(&req->adapter->ccw_device->dev, 307 "The FCP adapter does not recognize the command 0x%x\n", 308 req->qtcb->header.fsf_command); 309 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 310 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 311 break; 312 case FSF_ADAPTER_STATUS_AVAILABLE: 313 zfcp_fsf_fsfstatus_qual_eval(req); 314 break; 315 } 316 } 317 318 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 319 { 320 struct zfcp_adapter *adapter = req->adapter; 321 struct fsf_qtcb *qtcb = req->qtcb; 322 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 323 324 zfcp_dbf_hba_fsf_response(req); 325 326 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 327 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 328 return; 329 } 330 331 switch (qtcb->prefix.prot_status) { 332 case FSF_PROT_GOOD: 333 case FSF_PROT_FSF_STATUS_PRESENTED: 334 return; 335 case FSF_PROT_QTCB_VERSION_ERROR: 336 dev_err(&adapter->ccw_device->dev, 337 "QTCB version 0x%x not supported by FCP adapter " 338 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 339 psq->word[0], psq->word[1]); 340 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 341 break; 342 case FSF_PROT_ERROR_STATE: 343 case FSF_PROT_SEQ_NUMB_ERROR: 344 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 345 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 346 break; 347 case FSF_PROT_UNSUPP_QTCB_TYPE: 348 dev_err(&adapter->ccw_device->dev, 349 "The QTCB type is not supported by the FCP adapter\n"); 350 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 351 break; 352 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 353 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 354 &adapter->status); 355 break; 356 case FSF_PROT_DUPLICATE_REQUEST_ID: 357 dev_err(&adapter->ccw_device->dev, 358 "0x%Lx is an ambiguous request identifier\n", 359 (unsigned long long)qtcb->bottom.support.req_handle); 360 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 361 break; 362 case FSF_PROT_LINK_DOWN: 363 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 364 /* go through reopen to flush pending requests */ 365 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 366 break; 367 case FSF_PROT_REEST_QUEUE: 368 /* All ports should be marked as ready to run again */ 369 zfcp_erp_set_adapter_status(adapter, 370 ZFCP_STATUS_COMMON_RUNNING); 371 zfcp_erp_adapter_reopen(adapter, 372 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 373 ZFCP_STATUS_COMMON_ERP_FAILED, 374 "fspse_8"); 375 break; 376 default: 377 dev_err(&adapter->ccw_device->dev, 378 "0x%x is not a valid transfer protocol status\n", 379 qtcb->prefix.prot_status); 380 zfcp_qdio_siosl(adapter); 381 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 382 } 383 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 384 } 385 386 /** 387 * zfcp_fsf_req_complete - process completion of a FSF request 388 * @req: The FSF request that has been completed. 389 * 390 * When a request has been completed either from the FCP adapter, 391 * or it has been dismissed due to a queue shutdown, this function 392 * is called to process the completion status and trigger further 393 * events related to the FSF request. 394 */ 395 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 396 { 397 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) { 398 zfcp_fsf_status_read_handler(req); 399 return; 400 } 401 402 del_timer(&req->timer); 403 zfcp_fsf_protstatus_eval(req); 404 zfcp_fsf_fsfstatus_eval(req); 405 req->handler(req); 406 407 if (req->erp_action) 408 zfcp_erp_notify(req->erp_action, 0); 409 410 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 411 zfcp_fsf_req_free(req); 412 else 413 complete(&req->completion); 414 } 415 416 /** 417 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 418 * @adapter: pointer to struct zfcp_adapter 419 * 420 * Never ever call this without shutting down the adapter first. 421 * Otherwise the adapter would continue using and corrupting s390 storage. 422 * Included BUG_ON() call to ensure this is done. 423 * ERP is supposed to be the only user of this function. 424 */ 425 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 426 { 427 struct zfcp_fsf_req *req, *tmp; 428 LIST_HEAD(remove_queue); 429 430 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 431 zfcp_reqlist_move(adapter->req_list, &remove_queue); 432 433 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 434 list_del(&req->list); 435 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 436 zfcp_fsf_req_complete(req); 437 } 438 } 439 440 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 441 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 442 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 443 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 444 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 445 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 446 #define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) 447 #define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) 448 #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) 449 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 450 451 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 452 { 453 u32 fdmi_speed = 0; 454 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 455 fdmi_speed |= FC_PORTSPEED_1GBIT; 456 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 457 fdmi_speed |= FC_PORTSPEED_2GBIT; 458 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 459 fdmi_speed |= FC_PORTSPEED_4GBIT; 460 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 461 fdmi_speed |= FC_PORTSPEED_10GBIT; 462 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 463 fdmi_speed |= FC_PORTSPEED_8GBIT; 464 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 465 fdmi_speed |= FC_PORTSPEED_16GBIT; 466 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) 467 fdmi_speed |= FC_PORTSPEED_32GBIT; 468 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) 469 fdmi_speed |= FC_PORTSPEED_64GBIT; 470 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) 471 fdmi_speed |= FC_PORTSPEED_128GBIT; 472 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 473 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 474 return fdmi_speed; 475 } 476 477 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 478 { 479 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 480 struct zfcp_adapter *adapter = req->adapter; 481 struct Scsi_Host *shost = adapter->scsi_host; 482 struct fc_els_flogi *nsp, *plogi; 483 484 /* adjust pointers for missing command code */ 485 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param 486 - sizeof(u32)); 487 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 488 - sizeof(u32)); 489 490 if (req->data) 491 memcpy(req->data, bottom, sizeof(*bottom)); 492 493 fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn); 494 fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn); 495 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 496 497 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 498 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 499 (u16)FSF_STATUS_READS_RECOM); 500 501 if (fc_host_permanent_port_name(shost) == -1) 502 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 503 504 zfcp_scsi_set_prot(adapter); 505 506 /* no error return above here, otherwise must fix call chains */ 507 /* do not evaluate invalid fields */ 508 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) 509 return 0; 510 511 fc_host_port_id(shost) = ntoh24(bottom->s_id); 512 fc_host_speed(shost) = 513 zfcp_fsf_convert_portspeed(bottom->fc_link_speed); 514 515 adapter->hydra_version = bottom->adapter_type; 516 517 switch (bottom->fc_topology) { 518 case FSF_TOPO_P2P: 519 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 520 adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); 521 adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); 522 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 523 break; 524 case FSF_TOPO_FABRIC: 525 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) 526 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 527 else 528 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 529 break; 530 case FSF_TOPO_AL: 531 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 532 /* fall through */ 533 default: 534 dev_err(&adapter->ccw_device->dev, 535 "Unknown or unsupported arbitrated loop " 536 "fibre channel topology detected\n"); 537 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 538 return -EIO; 539 } 540 541 return 0; 542 } 543 544 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 545 { 546 struct zfcp_adapter *adapter = req->adapter; 547 struct fsf_qtcb *qtcb = req->qtcb; 548 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 549 struct Scsi_Host *shost = adapter->scsi_host; 550 551 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 552 return; 553 554 adapter->fsf_lic_version = bottom->lic_version; 555 adapter->adapter_features = bottom->adapter_features; 556 adapter->connection_features = bottom->connection_features; 557 adapter->peer_wwpn = 0; 558 adapter->peer_wwnn = 0; 559 adapter->peer_d_id = 0; 560 561 switch (qtcb->header.fsf_status) { 562 case FSF_GOOD: 563 if (zfcp_fsf_exchange_config_evaluate(req)) 564 return; 565 566 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 567 dev_err(&adapter->ccw_device->dev, 568 "FCP adapter maximum QTCB size (%d bytes) " 569 "is too small\n", 570 bottom->max_qtcb_size); 571 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 572 return; 573 } 574 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 575 &adapter->status); 576 break; 577 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 578 fc_host_node_name(shost) = 0; 579 fc_host_port_name(shost) = 0; 580 fc_host_port_id(shost) = 0; 581 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 582 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 583 adapter->hydra_version = 0; 584 585 /* avoids adapter shutdown to be able to recognize 586 * events such as LINK UP */ 587 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 588 &adapter->status); 589 zfcp_fsf_link_down_info_eval(req, 590 &qtcb->header.fsf_status_qual.link_down_info); 591 if (zfcp_fsf_exchange_config_evaluate(req)) 592 return; 593 break; 594 default: 595 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 596 return; 597 } 598 599 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 600 adapter->hardware_version = bottom->hardware_version; 601 memcpy(fc_host_serial_number(shost), bottom->serial_number, 602 min(FC_SERIAL_NUMBER_SIZE, 17)); 603 EBCASC(fc_host_serial_number(shost), 604 min(FC_SERIAL_NUMBER_SIZE, 17)); 605 } 606 607 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 608 dev_err(&adapter->ccw_device->dev, 609 "The FCP adapter only supports newer " 610 "control block versions\n"); 611 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 612 return; 613 } 614 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 615 dev_err(&adapter->ccw_device->dev, 616 "The FCP adapter only supports older " 617 "control block versions\n"); 618 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 619 } 620 } 621 622 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 623 { 624 struct zfcp_adapter *adapter = req->adapter; 625 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 626 struct Scsi_Host *shost = adapter->scsi_host; 627 628 if (req->data) 629 memcpy(req->data, bottom, sizeof(*bottom)); 630 631 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { 632 fc_host_permanent_port_name(shost) = bottom->wwpn; 633 } else 634 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 635 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 636 fc_host_supported_speeds(shost) = 637 zfcp_fsf_convert_portspeed(bottom->supported_speed); 638 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 639 FC_FC4_LIST_SIZE); 640 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 641 FC_FC4_LIST_SIZE); 642 } 643 644 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 645 { 646 struct fsf_qtcb *qtcb = req->qtcb; 647 648 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 649 return; 650 651 switch (qtcb->header.fsf_status) { 652 case FSF_GOOD: 653 zfcp_fsf_exchange_port_evaluate(req); 654 break; 655 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 656 zfcp_fsf_exchange_port_evaluate(req); 657 zfcp_fsf_link_down_info_eval(req, 658 &qtcb->header.fsf_status_qual.link_down_info); 659 break; 660 } 661 } 662 663 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 664 { 665 struct zfcp_fsf_req *req; 666 667 if (likely(pool)) 668 req = mempool_alloc(pool, GFP_ATOMIC); 669 else 670 req = kmalloc(sizeof(*req), GFP_ATOMIC); 671 672 if (unlikely(!req)) 673 return NULL; 674 675 memset(req, 0, sizeof(*req)); 676 req->pool = pool; 677 return req; 678 } 679 680 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) 681 { 682 struct fsf_qtcb *qtcb; 683 684 if (likely(pool)) 685 qtcb = mempool_alloc(pool, GFP_ATOMIC); 686 else 687 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 688 689 if (unlikely(!qtcb)) 690 return NULL; 691 692 memset(qtcb, 0, sizeof(*qtcb)); 693 return qtcb; 694 } 695 696 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 697 u32 fsf_cmd, u8 sbtype, 698 mempool_t *pool) 699 { 700 struct zfcp_adapter *adapter = qdio->adapter; 701 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 702 703 if (unlikely(!req)) 704 return ERR_PTR(-ENOMEM); 705 706 if (adapter->req_no == 0) 707 adapter->req_no++; 708 709 INIT_LIST_HEAD(&req->list); 710 timer_setup(&req->timer, NULL, 0); 711 init_completion(&req->completion); 712 713 req->adapter = adapter; 714 req->req_id = adapter->req_no; 715 716 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 717 if (likely(pool)) 718 req->qtcb = zfcp_fsf_qtcb_alloc( 719 adapter->pool.qtcb_pool); 720 else 721 req->qtcb = zfcp_fsf_qtcb_alloc(NULL); 722 723 if (unlikely(!req->qtcb)) { 724 zfcp_fsf_req_free(req); 725 return ERR_PTR(-ENOMEM); 726 } 727 728 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 729 req->qtcb->prefix.req_id = req->req_id; 730 req->qtcb->prefix.ulp_info = 26; 731 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 732 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 733 req->qtcb->header.req_handle = req->req_id; 734 req->qtcb->header.fsf_command = fsf_cmd; 735 } 736 737 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 738 req->qtcb, sizeof(struct fsf_qtcb)); 739 740 return req; 741 } 742 743 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 744 { 745 const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); 746 struct zfcp_adapter *adapter = req->adapter; 747 struct zfcp_qdio *qdio = adapter->qdio; 748 int req_id = req->req_id; 749 750 zfcp_reqlist_add(adapter->req_list, req); 751 752 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 753 req->issued = get_tod_clock(); 754 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 755 del_timer(&req->timer); 756 /* lookup request again, list might have changed */ 757 zfcp_reqlist_find_rm(adapter->req_list, req_id); 758 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 759 return -EIO; 760 } 761 762 /* 763 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT. 764 * ONLY TOUCH SYNC req AGAIN ON req->completion. 765 * 766 * The request might complete and be freed concurrently at any point 767 * now. This is not protected by the QDIO-lock (req_q_lock). So any 768 * uncontrolled access after this might result in an use-after-free bug. 769 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and 770 * when it is completed via req->completion, is it safe to use req 771 * again. 772 */ 773 774 /* Don't increase for unsolicited status */ 775 if (!is_srb) 776 adapter->fsf_req_seq_no++; 777 adapter->req_no++; 778 779 return 0; 780 } 781 782 /** 783 * zfcp_fsf_status_read - send status read request 784 * @qdio: pointer to struct zfcp_qdio 785 * Returns: 0 on success, ERROR otherwise 786 */ 787 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 788 { 789 struct zfcp_adapter *adapter = qdio->adapter; 790 struct zfcp_fsf_req *req; 791 struct fsf_status_read_buffer *sr_buf; 792 struct page *page; 793 int retval = -EIO; 794 795 spin_lock_irq(&qdio->req_q_lock); 796 if (zfcp_qdio_sbal_get(qdio)) 797 goto out; 798 799 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 800 SBAL_SFLAGS0_TYPE_STATUS, 801 adapter->pool.status_read_req); 802 if (IS_ERR(req)) { 803 retval = PTR_ERR(req); 804 goto out; 805 } 806 807 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 808 if (!page) { 809 retval = -ENOMEM; 810 goto failed_buf; 811 } 812 sr_buf = page_address(page); 813 memset(sr_buf, 0, sizeof(*sr_buf)); 814 req->data = sr_buf; 815 816 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 817 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 818 819 retval = zfcp_fsf_req_send(req); 820 if (retval) 821 goto failed_req_send; 822 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 823 824 goto out; 825 826 failed_req_send: 827 req->data = NULL; 828 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 829 failed_buf: 830 zfcp_dbf_hba_fsf_uss("fssr__1", req); 831 zfcp_fsf_req_free(req); 832 out: 833 spin_unlock_irq(&qdio->req_q_lock); 834 return retval; 835 } 836 837 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 838 { 839 struct scsi_device *sdev = req->data; 840 struct zfcp_scsi_dev *zfcp_sdev; 841 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 842 843 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 844 return; 845 846 zfcp_sdev = sdev_to_zfcp(sdev); 847 848 switch (req->qtcb->header.fsf_status) { 849 case FSF_PORT_HANDLE_NOT_VALID: 850 if (fsq->word[0] == fsq->word[1]) { 851 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 852 "fsafch1"); 853 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 854 } 855 break; 856 case FSF_LUN_HANDLE_NOT_VALID: 857 if (fsq->word[0] == fsq->word[1]) { 858 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 859 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 860 } 861 break; 862 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 863 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 864 break; 865 case FSF_PORT_BOXED: 866 zfcp_erp_set_port_status(zfcp_sdev->port, 867 ZFCP_STATUS_COMMON_ACCESS_BOXED); 868 zfcp_erp_port_reopen(zfcp_sdev->port, 869 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 870 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 871 break; 872 case FSF_LUN_BOXED: 873 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 874 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 875 "fsafch4"); 876 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 877 break; 878 case FSF_ADAPTER_STATUS_AVAILABLE: 879 switch (fsq->word[0]) { 880 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 881 zfcp_fc_test_link(zfcp_sdev->port); 882 /* fall through */ 883 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 884 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 885 break; 886 } 887 break; 888 case FSF_GOOD: 889 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 890 break; 891 } 892 } 893 894 /** 895 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 896 * @scmnd: The SCSI command to abort 897 * Returns: pointer to struct zfcp_fsf_req 898 */ 899 900 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 901 { 902 struct zfcp_fsf_req *req = NULL; 903 struct scsi_device *sdev = scmnd->device; 904 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 905 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 906 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 907 908 spin_lock_irq(&qdio->req_q_lock); 909 if (zfcp_qdio_sbal_get(qdio)) 910 goto out; 911 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 912 SBAL_SFLAGS0_TYPE_READ, 913 qdio->adapter->pool.scsi_abort); 914 if (IS_ERR(req)) { 915 req = NULL; 916 goto out; 917 } 918 919 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 920 ZFCP_STATUS_COMMON_UNBLOCKED))) 921 goto out_error_free; 922 923 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 924 925 req->data = sdev; 926 req->handler = zfcp_fsf_abort_fcp_command_handler; 927 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 928 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 929 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 930 931 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 932 if (!zfcp_fsf_req_send(req)) { 933 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 934 goto out; 935 } 936 937 out_error_free: 938 zfcp_fsf_req_free(req); 939 req = NULL; 940 out: 941 spin_unlock_irq(&qdio->req_q_lock); 942 return req; 943 } 944 945 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 946 { 947 struct zfcp_adapter *adapter = req->adapter; 948 struct zfcp_fsf_ct_els *ct = req->data; 949 struct fsf_qtcb_header *header = &req->qtcb->header; 950 951 ct->status = -EINVAL; 952 953 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 954 goto skip_fsfstatus; 955 956 switch (header->fsf_status) { 957 case FSF_GOOD: 958 ct->status = 0; 959 zfcp_dbf_san_res("fsscth2", req); 960 break; 961 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 962 zfcp_fsf_class_not_supp(req); 963 break; 964 case FSF_ADAPTER_STATUS_AVAILABLE: 965 switch (header->fsf_status_qual.word[0]){ 966 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 967 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 968 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 969 break; 970 } 971 break; 972 case FSF_PORT_BOXED: 973 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 974 break; 975 case FSF_PORT_HANDLE_NOT_VALID: 976 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 977 /* fall through */ 978 case FSF_GENERIC_COMMAND_REJECTED: 979 case FSF_PAYLOAD_SIZE_MISMATCH: 980 case FSF_REQUEST_SIZE_TOO_LARGE: 981 case FSF_RESPONSE_SIZE_TOO_LARGE: 982 case FSF_SBAL_MISMATCH: 983 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 984 break; 985 } 986 987 skip_fsfstatus: 988 if (ct->handler) 989 ct->handler(ct->handler_data); 990 } 991 992 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 993 struct zfcp_qdio_req *q_req, 994 struct scatterlist *sg_req, 995 struct scatterlist *sg_resp) 996 { 997 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 998 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 999 zfcp_qdio_set_sbale_last(qdio, q_req); 1000 } 1001 1002 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1003 struct scatterlist *sg_req, 1004 struct scatterlist *sg_resp) 1005 { 1006 struct zfcp_adapter *adapter = req->adapter; 1007 struct zfcp_qdio *qdio = adapter->qdio; 1008 struct fsf_qtcb *qtcb = req->qtcb; 1009 u32 feat = adapter->adapter_features; 1010 1011 if (zfcp_adapter_multi_buffer_active(adapter)) { 1012 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1013 return -EIO; 1014 qtcb->bottom.support.req_buf_length = 1015 zfcp_qdio_real_bytes(sg_req); 1016 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1017 return -EIO; 1018 qtcb->bottom.support.resp_buf_length = 1019 zfcp_qdio_real_bytes(sg_resp); 1020 1021 zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); 1022 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1023 zfcp_qdio_set_scount(qdio, &req->qdio_req); 1024 return 0; 1025 } 1026 1027 /* use single, unchained SBAL if it can hold the request */ 1028 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 1029 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 1030 sg_req, sg_resp); 1031 return 0; 1032 } 1033 1034 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 1035 return -EOPNOTSUPP; 1036 1037 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1038 return -EIO; 1039 1040 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 1041 1042 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1043 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1044 1045 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1046 return -EIO; 1047 1048 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1049 1050 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1051 1052 return 0; 1053 } 1054 1055 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1056 struct scatterlist *sg_req, 1057 struct scatterlist *sg_resp, 1058 unsigned int timeout) 1059 { 1060 int ret; 1061 1062 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1063 if (ret) 1064 return ret; 1065 1066 /* common settings for ct/gs and els requests */ 1067 if (timeout > 255) 1068 timeout = 255; /* max value accepted by hardware */ 1069 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1070 req->qtcb->bottom.support.timeout = timeout; 1071 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1072 1073 return 0; 1074 } 1075 1076 /** 1077 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1078 * @wka_port: pointer to zfcp WKA port to send CT/GS to 1079 * @ct: pointer to struct zfcp_send_ct with data for request 1080 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1081 * @timeout: timeout that hardware should use, and a later software timeout 1082 */ 1083 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1084 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1085 unsigned int timeout) 1086 { 1087 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1088 struct zfcp_fsf_req *req; 1089 int ret = -EIO; 1090 1091 spin_lock_irq(&qdio->req_q_lock); 1092 if (zfcp_qdio_sbal_get(qdio)) 1093 goto out; 1094 1095 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1096 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1097 1098 if (IS_ERR(req)) { 1099 ret = PTR_ERR(req); 1100 goto out; 1101 } 1102 1103 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1104 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1105 if (ret) 1106 goto failed_send; 1107 1108 req->handler = zfcp_fsf_send_ct_handler; 1109 req->qtcb->header.port_handle = wka_port->handle; 1110 ct->d_id = wka_port->d_id; 1111 req->data = ct; 1112 1113 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1114 1115 ret = zfcp_fsf_req_send(req); 1116 if (ret) 1117 goto failed_send; 1118 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1119 1120 goto out; 1121 1122 failed_send: 1123 zfcp_fsf_req_free(req); 1124 out: 1125 spin_unlock_irq(&qdio->req_q_lock); 1126 return ret; 1127 } 1128 1129 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1130 { 1131 struct zfcp_fsf_ct_els *send_els = req->data; 1132 struct fsf_qtcb_header *header = &req->qtcb->header; 1133 1134 send_els->status = -EINVAL; 1135 1136 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1137 goto skip_fsfstatus; 1138 1139 switch (header->fsf_status) { 1140 case FSF_GOOD: 1141 send_els->status = 0; 1142 zfcp_dbf_san_res("fsselh1", req); 1143 break; 1144 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1145 zfcp_fsf_class_not_supp(req); 1146 break; 1147 case FSF_ADAPTER_STATUS_AVAILABLE: 1148 switch (header->fsf_status_qual.word[0]){ 1149 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1150 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1151 case FSF_SQ_RETRY_IF_POSSIBLE: 1152 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1153 break; 1154 } 1155 break; 1156 case FSF_ELS_COMMAND_REJECTED: 1157 case FSF_PAYLOAD_SIZE_MISMATCH: 1158 case FSF_REQUEST_SIZE_TOO_LARGE: 1159 case FSF_RESPONSE_SIZE_TOO_LARGE: 1160 break; 1161 case FSF_SBAL_MISMATCH: 1162 /* should never occur, avoided in zfcp_fsf_send_els */ 1163 /* fall through */ 1164 default: 1165 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1166 break; 1167 } 1168 skip_fsfstatus: 1169 if (send_els->handler) 1170 send_els->handler(send_els->handler_data); 1171 } 1172 1173 /** 1174 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1175 * @adapter: pointer to zfcp adapter 1176 * @d_id: N_Port_ID to send ELS to 1177 * @els: pointer to struct zfcp_send_els with data for the command 1178 * @timeout: timeout that hardware should use, and a later software timeout 1179 */ 1180 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1181 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1182 { 1183 struct zfcp_fsf_req *req; 1184 struct zfcp_qdio *qdio = adapter->qdio; 1185 int ret = -EIO; 1186 1187 spin_lock_irq(&qdio->req_q_lock); 1188 if (zfcp_qdio_sbal_get(qdio)) 1189 goto out; 1190 1191 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1192 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1193 1194 if (IS_ERR(req)) { 1195 ret = PTR_ERR(req); 1196 goto out; 1197 } 1198 1199 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1200 1201 if (!zfcp_adapter_multi_buffer_active(adapter)) 1202 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1203 1204 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1205 1206 if (ret) 1207 goto failed_send; 1208 1209 hton24(req->qtcb->bottom.support.d_id, d_id); 1210 req->handler = zfcp_fsf_send_els_handler; 1211 els->d_id = d_id; 1212 req->data = els; 1213 1214 zfcp_dbf_san_req("fssels1", req, d_id); 1215 1216 ret = zfcp_fsf_req_send(req); 1217 if (ret) 1218 goto failed_send; 1219 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1220 1221 goto out; 1222 1223 failed_send: 1224 zfcp_fsf_req_free(req); 1225 out: 1226 spin_unlock_irq(&qdio->req_q_lock); 1227 return ret; 1228 } 1229 1230 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1231 { 1232 struct zfcp_fsf_req *req; 1233 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1234 int retval = -EIO; 1235 1236 spin_lock_irq(&qdio->req_q_lock); 1237 if (zfcp_qdio_sbal_get(qdio)) 1238 goto out; 1239 1240 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1241 SBAL_SFLAGS0_TYPE_READ, 1242 qdio->adapter->pool.erp_req); 1243 1244 if (IS_ERR(req)) { 1245 retval = PTR_ERR(req); 1246 goto out; 1247 } 1248 1249 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1250 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1251 1252 req->qtcb->bottom.config.feature_selection = 1253 FSF_FEATURE_NOTIFICATION_LOST | 1254 FSF_FEATURE_UPDATE_ALERT; 1255 req->erp_action = erp_action; 1256 req->handler = zfcp_fsf_exchange_config_data_handler; 1257 erp_action->fsf_req_id = req->req_id; 1258 1259 zfcp_fsf_start_erp_timer(req); 1260 retval = zfcp_fsf_req_send(req); 1261 if (retval) { 1262 zfcp_fsf_req_free(req); 1263 erp_action->fsf_req_id = 0; 1264 } 1265 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1266 out: 1267 spin_unlock_irq(&qdio->req_q_lock); 1268 return retval; 1269 } 1270 1271 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1272 struct fsf_qtcb_bottom_config *data) 1273 { 1274 struct zfcp_fsf_req *req = NULL; 1275 int retval = -EIO; 1276 1277 spin_lock_irq(&qdio->req_q_lock); 1278 if (zfcp_qdio_sbal_get(qdio)) 1279 goto out_unlock; 1280 1281 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1282 SBAL_SFLAGS0_TYPE_READ, NULL); 1283 1284 if (IS_ERR(req)) { 1285 retval = PTR_ERR(req); 1286 goto out_unlock; 1287 } 1288 1289 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1290 req->handler = zfcp_fsf_exchange_config_data_handler; 1291 1292 req->qtcb->bottom.config.feature_selection = 1293 FSF_FEATURE_NOTIFICATION_LOST | 1294 FSF_FEATURE_UPDATE_ALERT; 1295 1296 if (data) 1297 req->data = data; 1298 1299 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1300 retval = zfcp_fsf_req_send(req); 1301 spin_unlock_irq(&qdio->req_q_lock); 1302 if (!retval) { 1303 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1304 wait_for_completion(&req->completion); 1305 } 1306 1307 zfcp_fsf_req_free(req); 1308 return retval; 1309 1310 out_unlock: 1311 spin_unlock_irq(&qdio->req_q_lock); 1312 return retval; 1313 } 1314 1315 /** 1316 * zfcp_fsf_exchange_port_data - request information about local port 1317 * @erp_action: ERP action for the adapter for which port data is requested 1318 * Returns: 0 on success, error otherwise 1319 */ 1320 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1321 { 1322 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1323 struct zfcp_fsf_req *req; 1324 int retval = -EIO; 1325 1326 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1327 return -EOPNOTSUPP; 1328 1329 spin_lock_irq(&qdio->req_q_lock); 1330 if (zfcp_qdio_sbal_get(qdio)) 1331 goto out; 1332 1333 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1334 SBAL_SFLAGS0_TYPE_READ, 1335 qdio->adapter->pool.erp_req); 1336 1337 if (IS_ERR(req)) { 1338 retval = PTR_ERR(req); 1339 goto out; 1340 } 1341 1342 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1343 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1344 1345 req->handler = zfcp_fsf_exchange_port_data_handler; 1346 req->erp_action = erp_action; 1347 erp_action->fsf_req_id = req->req_id; 1348 1349 zfcp_fsf_start_erp_timer(req); 1350 retval = zfcp_fsf_req_send(req); 1351 if (retval) { 1352 zfcp_fsf_req_free(req); 1353 erp_action->fsf_req_id = 0; 1354 } 1355 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1356 out: 1357 spin_unlock_irq(&qdio->req_q_lock); 1358 return retval; 1359 } 1360 1361 /** 1362 * zfcp_fsf_exchange_port_data_sync - request information about local port 1363 * @qdio: pointer to struct zfcp_qdio 1364 * @data: pointer to struct fsf_qtcb_bottom_port 1365 * Returns: 0 on success, error otherwise 1366 */ 1367 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1368 struct fsf_qtcb_bottom_port *data) 1369 { 1370 struct zfcp_fsf_req *req = NULL; 1371 int retval = -EIO; 1372 1373 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1374 return -EOPNOTSUPP; 1375 1376 spin_lock_irq(&qdio->req_q_lock); 1377 if (zfcp_qdio_sbal_get(qdio)) 1378 goto out_unlock; 1379 1380 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1381 SBAL_SFLAGS0_TYPE_READ, NULL); 1382 1383 if (IS_ERR(req)) { 1384 retval = PTR_ERR(req); 1385 goto out_unlock; 1386 } 1387 1388 if (data) 1389 req->data = data; 1390 1391 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1392 1393 req->handler = zfcp_fsf_exchange_port_data_handler; 1394 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1395 retval = zfcp_fsf_req_send(req); 1396 spin_unlock_irq(&qdio->req_q_lock); 1397 1398 if (!retval) { 1399 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1400 wait_for_completion(&req->completion); 1401 } 1402 1403 zfcp_fsf_req_free(req); 1404 1405 return retval; 1406 1407 out_unlock: 1408 spin_unlock_irq(&qdio->req_q_lock); 1409 return retval; 1410 } 1411 1412 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1413 { 1414 struct zfcp_port *port = req->data; 1415 struct fsf_qtcb_header *header = &req->qtcb->header; 1416 struct fc_els_flogi *plogi; 1417 1418 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1419 goto out; 1420 1421 switch (header->fsf_status) { 1422 case FSF_PORT_ALREADY_OPEN: 1423 break; 1424 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1425 dev_warn(&req->adapter->ccw_device->dev, 1426 "Not enough FCP adapter resources to open " 1427 "remote port 0x%016Lx\n", 1428 (unsigned long long)port->wwpn); 1429 zfcp_erp_set_port_status(port, 1430 ZFCP_STATUS_COMMON_ERP_FAILED); 1431 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1432 break; 1433 case FSF_ADAPTER_STATUS_AVAILABLE: 1434 switch (header->fsf_status_qual.word[0]) { 1435 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1436 /* no zfcp_fc_test_link() with failed open port */ 1437 /* fall through */ 1438 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1439 case FSF_SQ_NO_RETRY_POSSIBLE: 1440 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1441 break; 1442 } 1443 break; 1444 case FSF_GOOD: 1445 port->handle = header->port_handle; 1446 atomic_or(ZFCP_STATUS_COMMON_OPEN | 1447 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1448 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1449 &port->status); 1450 /* check whether D_ID has changed during open */ 1451 /* 1452 * FIXME: This check is not airtight, as the FCP channel does 1453 * not monitor closures of target port connections caused on 1454 * the remote side. Thus, they might miss out on invalidating 1455 * locally cached WWPNs (and other N_Port parameters) of gone 1456 * target ports. So, our heroic attempt to make things safe 1457 * could be undermined by 'open port' response data tagged with 1458 * obsolete WWPNs. Another reason to monitor potential 1459 * connection closures ourself at least (by interpreting 1460 * incoming ELS' and unsolicited status). It just crosses my 1461 * mind that one should be able to cross-check by means of 1462 * another GID_PN straight after a port has been opened. 1463 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1464 */ 1465 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; 1466 if (req->qtcb->bottom.support.els1_length >= 1467 FSF_PLOGI_MIN_LEN) 1468 zfcp_fc_plogi_evaluate(port, plogi); 1469 break; 1470 case FSF_UNKNOWN_OP_SUBTYPE: 1471 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1472 break; 1473 } 1474 1475 out: 1476 put_device(&port->dev); 1477 } 1478 1479 /** 1480 * zfcp_fsf_open_port - create and send open port request 1481 * @erp_action: pointer to struct zfcp_erp_action 1482 * Returns: 0 on success, error otherwise 1483 */ 1484 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1485 { 1486 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1487 struct zfcp_port *port = erp_action->port; 1488 struct zfcp_fsf_req *req; 1489 int retval = -EIO; 1490 1491 spin_lock_irq(&qdio->req_q_lock); 1492 if (zfcp_qdio_sbal_get(qdio)) 1493 goto out; 1494 1495 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1496 SBAL_SFLAGS0_TYPE_READ, 1497 qdio->adapter->pool.erp_req); 1498 1499 if (IS_ERR(req)) { 1500 retval = PTR_ERR(req); 1501 goto out; 1502 } 1503 1504 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1505 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1506 1507 req->handler = zfcp_fsf_open_port_handler; 1508 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1509 req->data = port; 1510 req->erp_action = erp_action; 1511 erp_action->fsf_req_id = req->req_id; 1512 get_device(&port->dev); 1513 1514 zfcp_fsf_start_erp_timer(req); 1515 retval = zfcp_fsf_req_send(req); 1516 if (retval) { 1517 zfcp_fsf_req_free(req); 1518 erp_action->fsf_req_id = 0; 1519 put_device(&port->dev); 1520 } 1521 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1522 out: 1523 spin_unlock_irq(&qdio->req_q_lock); 1524 return retval; 1525 } 1526 1527 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1528 { 1529 struct zfcp_port *port = req->data; 1530 1531 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1532 return; 1533 1534 switch (req->qtcb->header.fsf_status) { 1535 case FSF_PORT_HANDLE_NOT_VALID: 1536 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1537 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1538 break; 1539 case FSF_ADAPTER_STATUS_AVAILABLE: 1540 break; 1541 case FSF_GOOD: 1542 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1543 break; 1544 } 1545 } 1546 1547 /** 1548 * zfcp_fsf_close_port - create and send close port request 1549 * @erp_action: pointer to struct zfcp_erp_action 1550 * Returns: 0 on success, error otherwise 1551 */ 1552 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1553 { 1554 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1555 struct zfcp_fsf_req *req; 1556 int retval = -EIO; 1557 1558 spin_lock_irq(&qdio->req_q_lock); 1559 if (zfcp_qdio_sbal_get(qdio)) 1560 goto out; 1561 1562 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1563 SBAL_SFLAGS0_TYPE_READ, 1564 qdio->adapter->pool.erp_req); 1565 1566 if (IS_ERR(req)) { 1567 retval = PTR_ERR(req); 1568 goto out; 1569 } 1570 1571 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1572 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1573 1574 req->handler = zfcp_fsf_close_port_handler; 1575 req->data = erp_action->port; 1576 req->erp_action = erp_action; 1577 req->qtcb->header.port_handle = erp_action->port->handle; 1578 erp_action->fsf_req_id = req->req_id; 1579 1580 zfcp_fsf_start_erp_timer(req); 1581 retval = zfcp_fsf_req_send(req); 1582 if (retval) { 1583 zfcp_fsf_req_free(req); 1584 erp_action->fsf_req_id = 0; 1585 } 1586 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1587 out: 1588 spin_unlock_irq(&qdio->req_q_lock); 1589 return retval; 1590 } 1591 1592 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1593 { 1594 struct zfcp_fc_wka_port *wka_port = req->data; 1595 struct fsf_qtcb_header *header = &req->qtcb->header; 1596 1597 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1598 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1599 goto out; 1600 } 1601 1602 switch (header->fsf_status) { 1603 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1604 dev_warn(&req->adapter->ccw_device->dev, 1605 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1606 /* fall through */ 1607 case FSF_ADAPTER_STATUS_AVAILABLE: 1608 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1609 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1610 break; 1611 case FSF_GOOD: 1612 wka_port->handle = header->port_handle; 1613 /* fall through */ 1614 case FSF_PORT_ALREADY_OPEN: 1615 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1616 } 1617 out: 1618 wake_up(&wka_port->completion_wq); 1619 } 1620 1621 /** 1622 * zfcp_fsf_open_wka_port - create and send open wka-port request 1623 * @wka_port: pointer to struct zfcp_fc_wka_port 1624 * Returns: 0 on success, error otherwise 1625 */ 1626 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1627 { 1628 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1629 struct zfcp_fsf_req *req; 1630 unsigned long req_id = 0; 1631 int retval = -EIO; 1632 1633 spin_lock_irq(&qdio->req_q_lock); 1634 if (zfcp_qdio_sbal_get(qdio)) 1635 goto out; 1636 1637 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1638 SBAL_SFLAGS0_TYPE_READ, 1639 qdio->adapter->pool.erp_req); 1640 1641 if (IS_ERR(req)) { 1642 retval = PTR_ERR(req); 1643 goto out; 1644 } 1645 1646 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1647 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1648 1649 req->handler = zfcp_fsf_open_wka_port_handler; 1650 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1651 req->data = wka_port; 1652 1653 req_id = req->req_id; 1654 1655 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1656 retval = zfcp_fsf_req_send(req); 1657 if (retval) 1658 zfcp_fsf_req_free(req); 1659 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1660 out: 1661 spin_unlock_irq(&qdio->req_q_lock); 1662 if (!retval) 1663 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); 1664 return retval; 1665 } 1666 1667 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1668 { 1669 struct zfcp_fc_wka_port *wka_port = req->data; 1670 1671 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1672 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1673 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1674 } 1675 1676 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1677 wake_up(&wka_port->completion_wq); 1678 } 1679 1680 /** 1681 * zfcp_fsf_close_wka_port - create and send close wka port request 1682 * @wka_port: WKA port to open 1683 * Returns: 0 on success, error otherwise 1684 */ 1685 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1686 { 1687 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1688 struct zfcp_fsf_req *req; 1689 unsigned long req_id = 0; 1690 int retval = -EIO; 1691 1692 spin_lock_irq(&qdio->req_q_lock); 1693 if (zfcp_qdio_sbal_get(qdio)) 1694 goto out; 1695 1696 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1697 SBAL_SFLAGS0_TYPE_READ, 1698 qdio->adapter->pool.erp_req); 1699 1700 if (IS_ERR(req)) { 1701 retval = PTR_ERR(req); 1702 goto out; 1703 } 1704 1705 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1706 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1707 1708 req->handler = zfcp_fsf_close_wka_port_handler; 1709 req->data = wka_port; 1710 req->qtcb->header.port_handle = wka_port->handle; 1711 1712 req_id = req->req_id; 1713 1714 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1715 retval = zfcp_fsf_req_send(req); 1716 if (retval) 1717 zfcp_fsf_req_free(req); 1718 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1719 out: 1720 spin_unlock_irq(&qdio->req_q_lock); 1721 if (!retval) 1722 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); 1723 return retval; 1724 } 1725 1726 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1727 { 1728 struct zfcp_port *port = req->data; 1729 struct fsf_qtcb_header *header = &req->qtcb->header; 1730 struct scsi_device *sdev; 1731 1732 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1733 return; 1734 1735 switch (header->fsf_status) { 1736 case FSF_PORT_HANDLE_NOT_VALID: 1737 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 1738 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1739 break; 1740 case FSF_PORT_BOXED: 1741 /* can't use generic zfcp_erp_modify_port_status because 1742 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1743 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1744 shost_for_each_device(sdev, port->adapter->scsi_host) 1745 if (sdev_to_zfcp(sdev)->port == port) 1746 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1747 &sdev_to_zfcp(sdev)->status); 1748 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1749 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 1750 "fscpph2"); 1751 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1752 break; 1753 case FSF_ADAPTER_STATUS_AVAILABLE: 1754 switch (header->fsf_status_qual.word[0]) { 1755 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1756 /* fall through */ 1757 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1758 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1759 break; 1760 } 1761 break; 1762 case FSF_GOOD: 1763 /* can't use generic zfcp_erp_modify_port_status because 1764 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1765 */ 1766 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1767 shost_for_each_device(sdev, port->adapter->scsi_host) 1768 if (sdev_to_zfcp(sdev)->port == port) 1769 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1770 &sdev_to_zfcp(sdev)->status); 1771 break; 1772 } 1773 } 1774 1775 /** 1776 * zfcp_fsf_close_physical_port - close physical port 1777 * @erp_action: pointer to struct zfcp_erp_action 1778 * Returns: 0 on success 1779 */ 1780 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1781 { 1782 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1783 struct zfcp_fsf_req *req; 1784 int retval = -EIO; 1785 1786 spin_lock_irq(&qdio->req_q_lock); 1787 if (zfcp_qdio_sbal_get(qdio)) 1788 goto out; 1789 1790 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1791 SBAL_SFLAGS0_TYPE_READ, 1792 qdio->adapter->pool.erp_req); 1793 1794 if (IS_ERR(req)) { 1795 retval = PTR_ERR(req); 1796 goto out; 1797 } 1798 1799 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1800 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1801 1802 req->data = erp_action->port; 1803 req->qtcb->header.port_handle = erp_action->port->handle; 1804 req->erp_action = erp_action; 1805 req->handler = zfcp_fsf_close_physical_port_handler; 1806 erp_action->fsf_req_id = req->req_id; 1807 1808 zfcp_fsf_start_erp_timer(req); 1809 retval = zfcp_fsf_req_send(req); 1810 if (retval) { 1811 zfcp_fsf_req_free(req); 1812 erp_action->fsf_req_id = 0; 1813 } 1814 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1815 out: 1816 spin_unlock_irq(&qdio->req_q_lock); 1817 return retval; 1818 } 1819 1820 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 1821 { 1822 struct zfcp_adapter *adapter = req->adapter; 1823 struct scsi_device *sdev = req->data; 1824 struct zfcp_scsi_dev *zfcp_sdev; 1825 struct fsf_qtcb_header *header = &req->qtcb->header; 1826 union fsf_status_qual *qual = &header->fsf_status_qual; 1827 1828 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1829 return; 1830 1831 zfcp_sdev = sdev_to_zfcp(sdev); 1832 1833 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1834 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1835 &zfcp_sdev->status); 1836 1837 switch (header->fsf_status) { 1838 1839 case FSF_PORT_HANDLE_NOT_VALID: 1840 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 1841 /* fall through */ 1842 case FSF_LUN_ALREADY_OPEN: 1843 break; 1844 case FSF_PORT_BOXED: 1845 zfcp_erp_set_port_status(zfcp_sdev->port, 1846 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1847 zfcp_erp_port_reopen(zfcp_sdev->port, 1848 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 1849 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1850 break; 1851 case FSF_LUN_SHARING_VIOLATION: 1852 if (qual->word[0]) 1853 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 1854 "LUN 0x%016Lx on port 0x%016Lx is already in " 1855 "use by CSS%d, MIF Image ID %x\n", 1856 zfcp_scsi_dev_lun(sdev), 1857 (unsigned long long)zfcp_sdev->port->wwpn, 1858 qual->fsf_queue_designator.cssid, 1859 qual->fsf_queue_designator.hla); 1860 zfcp_erp_set_lun_status(sdev, 1861 ZFCP_STATUS_COMMON_ERP_FAILED | 1862 ZFCP_STATUS_COMMON_ACCESS_DENIED); 1863 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1864 break; 1865 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1866 dev_warn(&adapter->ccw_device->dev, 1867 "No handle is available for LUN " 1868 "0x%016Lx on port 0x%016Lx\n", 1869 (unsigned long long)zfcp_scsi_dev_lun(sdev), 1870 (unsigned long long)zfcp_sdev->port->wwpn); 1871 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 1872 /* fall through */ 1873 case FSF_INVALID_COMMAND_OPTION: 1874 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1875 break; 1876 case FSF_ADAPTER_STATUS_AVAILABLE: 1877 switch (header->fsf_status_qual.word[0]) { 1878 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1879 zfcp_fc_test_link(zfcp_sdev->port); 1880 /* fall through */ 1881 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1882 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1883 break; 1884 } 1885 break; 1886 1887 case FSF_GOOD: 1888 zfcp_sdev->lun_handle = header->lun_handle; 1889 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1890 break; 1891 } 1892 } 1893 1894 /** 1895 * zfcp_fsf_open_lun - open LUN 1896 * @erp_action: pointer to struct zfcp_erp_action 1897 * Returns: 0 on success, error otherwise 1898 */ 1899 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 1900 { 1901 struct zfcp_adapter *adapter = erp_action->adapter; 1902 struct zfcp_qdio *qdio = adapter->qdio; 1903 struct zfcp_fsf_req *req; 1904 int retval = -EIO; 1905 1906 spin_lock_irq(&qdio->req_q_lock); 1907 if (zfcp_qdio_sbal_get(qdio)) 1908 goto out; 1909 1910 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1911 SBAL_SFLAGS0_TYPE_READ, 1912 adapter->pool.erp_req); 1913 1914 if (IS_ERR(req)) { 1915 retval = PTR_ERR(req); 1916 goto out; 1917 } 1918 1919 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1920 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1921 1922 req->qtcb->header.port_handle = erp_action->port->handle; 1923 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 1924 req->handler = zfcp_fsf_open_lun_handler; 1925 req->data = erp_action->sdev; 1926 req->erp_action = erp_action; 1927 erp_action->fsf_req_id = req->req_id; 1928 1929 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1930 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1931 1932 zfcp_fsf_start_erp_timer(req); 1933 retval = zfcp_fsf_req_send(req); 1934 if (retval) { 1935 zfcp_fsf_req_free(req); 1936 erp_action->fsf_req_id = 0; 1937 } 1938 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1939 out: 1940 spin_unlock_irq(&qdio->req_q_lock); 1941 return retval; 1942 } 1943 1944 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 1945 { 1946 struct scsi_device *sdev = req->data; 1947 struct zfcp_scsi_dev *zfcp_sdev; 1948 1949 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1950 return; 1951 1952 zfcp_sdev = sdev_to_zfcp(sdev); 1953 1954 switch (req->qtcb->header.fsf_status) { 1955 case FSF_PORT_HANDLE_NOT_VALID: 1956 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 1957 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1958 break; 1959 case FSF_LUN_HANDLE_NOT_VALID: 1960 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 1961 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1962 break; 1963 case FSF_PORT_BOXED: 1964 zfcp_erp_set_port_status(zfcp_sdev->port, 1965 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1966 zfcp_erp_port_reopen(zfcp_sdev->port, 1967 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 1968 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1969 break; 1970 case FSF_ADAPTER_STATUS_AVAILABLE: 1971 switch (req->qtcb->header.fsf_status_qual.word[0]) { 1972 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1973 zfcp_fc_test_link(zfcp_sdev->port); 1974 /* fall through */ 1975 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1976 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1977 break; 1978 } 1979 break; 1980 case FSF_GOOD: 1981 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1982 break; 1983 } 1984 } 1985 1986 /** 1987 * zfcp_fsf_close_LUN - close LUN 1988 * @erp_action: pointer to erp_action triggering the "close LUN" 1989 * Returns: 0 on success, error otherwise 1990 */ 1991 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 1992 { 1993 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1994 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 1995 struct zfcp_fsf_req *req; 1996 int retval = -EIO; 1997 1998 spin_lock_irq(&qdio->req_q_lock); 1999 if (zfcp_qdio_sbal_get(qdio)) 2000 goto out; 2001 2002 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 2003 SBAL_SFLAGS0_TYPE_READ, 2004 qdio->adapter->pool.erp_req); 2005 2006 if (IS_ERR(req)) { 2007 retval = PTR_ERR(req); 2008 goto out; 2009 } 2010 2011 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2012 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2013 2014 req->qtcb->header.port_handle = erp_action->port->handle; 2015 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2016 req->handler = zfcp_fsf_close_lun_handler; 2017 req->data = erp_action->sdev; 2018 req->erp_action = erp_action; 2019 erp_action->fsf_req_id = req->req_id; 2020 2021 zfcp_fsf_start_erp_timer(req); 2022 retval = zfcp_fsf_req_send(req); 2023 if (retval) { 2024 zfcp_fsf_req_free(req); 2025 erp_action->fsf_req_id = 0; 2026 } 2027 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2028 out: 2029 spin_unlock_irq(&qdio->req_q_lock); 2030 return retval; 2031 } 2032 2033 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat) 2034 { 2035 lat_rec->sum += lat; 2036 lat_rec->min = min(lat_rec->min, lat); 2037 lat_rec->max = max(lat_rec->max, lat); 2038 } 2039 2040 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2041 { 2042 struct fsf_qual_latency_info *lat_in; 2043 struct zfcp_latency_cont *lat = NULL; 2044 struct zfcp_scsi_dev *zfcp_sdev; 2045 struct zfcp_blk_drv_data blktrc; 2046 int ticks = req->adapter->timer_ticks; 2047 2048 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 2049 2050 blktrc.flags = 0; 2051 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2052 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2053 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2054 blktrc.inb_usage = 0; 2055 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2056 2057 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2058 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2059 zfcp_sdev = sdev_to_zfcp(scsi->device); 2060 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2061 blktrc.channel_lat = lat_in->channel_lat * ticks; 2062 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2063 2064 switch (req->qtcb->bottom.io.data_direction) { 2065 case FSF_DATADIR_DIF_READ_STRIP: 2066 case FSF_DATADIR_DIF_READ_CONVERT: 2067 case FSF_DATADIR_READ: 2068 lat = &zfcp_sdev->latencies.read; 2069 break; 2070 case FSF_DATADIR_DIF_WRITE_INSERT: 2071 case FSF_DATADIR_DIF_WRITE_CONVERT: 2072 case FSF_DATADIR_WRITE: 2073 lat = &zfcp_sdev->latencies.write; 2074 break; 2075 case FSF_DATADIR_CMND: 2076 lat = &zfcp_sdev->latencies.cmd; 2077 break; 2078 } 2079 2080 if (lat) { 2081 spin_lock(&zfcp_sdev->latencies.lock); 2082 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2083 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2084 lat->counter++; 2085 spin_unlock(&zfcp_sdev->latencies.lock); 2086 } 2087 } 2088 2089 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2090 sizeof(blktrc)); 2091 } 2092 2093 /** 2094 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. 2095 * @req: Pointer to FSF request. 2096 * @sdev: Pointer to SCSI device as request context. 2097 */ 2098 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, 2099 struct scsi_device *sdev) 2100 { 2101 struct zfcp_scsi_dev *zfcp_sdev; 2102 struct fsf_qtcb_header *header = &req->qtcb->header; 2103 2104 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2105 return; 2106 2107 zfcp_sdev = sdev_to_zfcp(sdev); 2108 2109 switch (header->fsf_status) { 2110 case FSF_HANDLE_MISMATCH: 2111 case FSF_PORT_HANDLE_NOT_VALID: 2112 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); 2113 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2114 break; 2115 case FSF_FCPLUN_NOT_VALID: 2116 case FSF_LUN_HANDLE_NOT_VALID: 2117 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2118 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2119 break; 2120 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2121 zfcp_fsf_class_not_supp(req); 2122 break; 2123 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2124 dev_err(&req->adapter->ccw_device->dev, 2125 "Incorrect direction %d, LUN 0x%016Lx on port " 2126 "0x%016Lx closed\n", 2127 req->qtcb->bottom.io.data_direction, 2128 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2129 (unsigned long long)zfcp_sdev->port->wwpn); 2130 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); 2131 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2132 break; 2133 case FSF_CMND_LENGTH_NOT_VALID: 2134 dev_err(&req->adapter->ccw_device->dev, 2135 "Incorrect FCP_CMND length %d, FCP device closed\n", 2136 req->qtcb->bottom.io.fcp_cmnd_length); 2137 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2138 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2139 break; 2140 case FSF_PORT_BOXED: 2141 zfcp_erp_set_port_status(zfcp_sdev->port, 2142 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2143 zfcp_erp_port_reopen(zfcp_sdev->port, 2144 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2145 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2146 break; 2147 case FSF_LUN_BOXED: 2148 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2149 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2150 "fssfch6"); 2151 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2152 break; 2153 case FSF_ADAPTER_STATUS_AVAILABLE: 2154 if (header->fsf_status_qual.word[0] == 2155 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2156 zfcp_fc_test_link(zfcp_sdev->port); 2157 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2158 break; 2159 } 2160 } 2161 2162 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2163 { 2164 struct scsi_cmnd *scpnt; 2165 struct fcp_resp_with_ext *fcp_rsp; 2166 unsigned long flags; 2167 2168 read_lock_irqsave(&req->adapter->abort_lock, flags); 2169 2170 scpnt = req->data; 2171 if (unlikely(!scpnt)) { 2172 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2173 return; 2174 } 2175 2176 zfcp_fsf_fcp_handler_common(req, scpnt->device); 2177 2178 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2179 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2180 goto skip_fsfstatus; 2181 } 2182 2183 switch (req->qtcb->header.fsf_status) { 2184 case FSF_INCONSISTENT_PROT_DATA: 2185 case FSF_INVALID_PROT_PARM: 2186 set_host_byte(scpnt, DID_ERROR); 2187 goto skip_fsfstatus; 2188 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2189 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2190 goto skip_fsfstatus; 2191 case FSF_APP_TAG_CHECK_FAILURE: 2192 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2193 goto skip_fsfstatus; 2194 case FSF_REF_TAG_CHECK_FAILURE: 2195 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2196 goto skip_fsfstatus; 2197 } 2198 BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); 2199 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2200 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2201 2202 skip_fsfstatus: 2203 zfcp_fsf_req_trace(req, scpnt); 2204 zfcp_dbf_scsi_result(scpnt, req); 2205 2206 scpnt->host_scribble = NULL; 2207 (scpnt->scsi_done) (scpnt); 2208 /* 2209 * We must hold this lock until scsi_done has been called. 2210 * Otherwise we may call scsi_done after abort regarding this 2211 * command has completed. 2212 * Note: scsi_done must not block! 2213 */ 2214 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2215 } 2216 2217 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2218 { 2219 switch (scsi_get_prot_op(scsi_cmnd)) { 2220 case SCSI_PROT_NORMAL: 2221 switch (scsi_cmnd->sc_data_direction) { 2222 case DMA_NONE: 2223 *data_dir = FSF_DATADIR_CMND; 2224 break; 2225 case DMA_FROM_DEVICE: 2226 *data_dir = FSF_DATADIR_READ; 2227 break; 2228 case DMA_TO_DEVICE: 2229 *data_dir = FSF_DATADIR_WRITE; 2230 break; 2231 case DMA_BIDIRECTIONAL: 2232 return -EINVAL; 2233 } 2234 break; 2235 2236 case SCSI_PROT_READ_STRIP: 2237 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2238 break; 2239 case SCSI_PROT_WRITE_INSERT: 2240 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2241 break; 2242 case SCSI_PROT_READ_PASS: 2243 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2244 break; 2245 case SCSI_PROT_WRITE_PASS: 2246 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2247 break; 2248 default: 2249 return -EINVAL; 2250 } 2251 2252 return 0; 2253 } 2254 2255 /** 2256 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2257 * @scsi_cmnd: scsi command to be sent 2258 */ 2259 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2260 { 2261 struct zfcp_fsf_req *req; 2262 struct fcp_cmnd *fcp_cmnd; 2263 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2264 int retval = -EIO; 2265 struct scsi_device *sdev = scsi_cmnd->device; 2266 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2267 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2268 struct zfcp_qdio *qdio = adapter->qdio; 2269 struct fsf_qtcb_bottom_io *io; 2270 unsigned long flags; 2271 2272 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2273 ZFCP_STATUS_COMMON_UNBLOCKED))) 2274 return -EBUSY; 2275 2276 spin_lock_irqsave(&qdio->req_q_lock, flags); 2277 if (atomic_read(&qdio->req_q_free) <= 0) { 2278 atomic_inc(&qdio->req_q_full); 2279 goto out; 2280 } 2281 2282 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2283 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2284 2285 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2286 sbtype, adapter->pool.scsi_req); 2287 2288 if (IS_ERR(req)) { 2289 retval = PTR_ERR(req); 2290 goto out; 2291 } 2292 2293 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2294 2295 io = &req->qtcb->bottom.io; 2296 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2297 req->data = scsi_cmnd; 2298 req->handler = zfcp_fsf_fcp_cmnd_handler; 2299 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2300 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2301 io->service_class = FSF_CLASS_3; 2302 io->fcp_cmnd_length = FCP_CMND_LEN; 2303 2304 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2305 io->data_block_length = scsi_cmnd->device->sector_size; 2306 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2307 } 2308 2309 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2310 goto failed_scsi_cmnd; 2311 2312 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2313 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2314 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2315 2316 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2317 scsi_prot_sg_count(scsi_cmnd)) { 2318 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2319 scsi_prot_sg_count(scsi_cmnd)); 2320 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2321 scsi_prot_sglist(scsi_cmnd)); 2322 if (retval) 2323 goto failed_scsi_cmnd; 2324 io->prot_data_length = zfcp_qdio_real_bytes( 2325 scsi_prot_sglist(scsi_cmnd)); 2326 } 2327 2328 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2329 scsi_sglist(scsi_cmnd)); 2330 if (unlikely(retval)) 2331 goto failed_scsi_cmnd; 2332 2333 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2334 if (zfcp_adapter_multi_buffer_active(adapter)) 2335 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2336 2337 retval = zfcp_fsf_req_send(req); 2338 if (unlikely(retval)) 2339 goto failed_scsi_cmnd; 2340 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2341 2342 goto out; 2343 2344 failed_scsi_cmnd: 2345 zfcp_fsf_req_free(req); 2346 scsi_cmnd->host_scribble = NULL; 2347 out: 2348 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2349 return retval; 2350 } 2351 2352 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2353 { 2354 struct scsi_device *sdev = req->data; 2355 struct fcp_resp_with_ext *fcp_rsp; 2356 struct fcp_resp_rsp_info *rsp_info; 2357 2358 zfcp_fsf_fcp_handler_common(req, sdev); 2359 2360 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2361 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2362 2363 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2364 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2365 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2366 } 2367 2368 /** 2369 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). 2370 * @sdev: Pointer to SCSI device to send the task management command to. 2371 * @tm_flags: Unsigned byte for task management flags. 2372 * 2373 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. 2374 */ 2375 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, 2376 u8 tm_flags) 2377 { 2378 struct zfcp_fsf_req *req = NULL; 2379 struct fcp_cmnd *fcp_cmnd; 2380 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2381 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2382 2383 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2384 ZFCP_STATUS_COMMON_UNBLOCKED))) 2385 return NULL; 2386 2387 spin_lock_irq(&qdio->req_q_lock); 2388 if (zfcp_qdio_sbal_get(qdio)) 2389 goto out; 2390 2391 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2392 SBAL_SFLAGS0_TYPE_WRITE, 2393 qdio->adapter->pool.scsi_req); 2394 2395 if (IS_ERR(req)) { 2396 req = NULL; 2397 goto out; 2398 } 2399 2400 req->data = sdev; 2401 2402 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2403 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2404 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2405 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2406 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2407 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2408 2409 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2410 2411 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2412 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2413 2414 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 2415 if (!zfcp_fsf_req_send(req)) { 2416 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 2417 goto out; 2418 } 2419 2420 zfcp_fsf_req_free(req); 2421 req = NULL; 2422 out: 2423 spin_unlock_irq(&qdio->req_q_lock); 2424 return req; 2425 } 2426 2427 /** 2428 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2429 * @qdio: pointer to struct zfcp_qdio 2430 * @sbal_idx: response queue index of SBAL to be processed 2431 */ 2432 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2433 { 2434 struct zfcp_adapter *adapter = qdio->adapter; 2435 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2436 struct qdio_buffer_element *sbale; 2437 struct zfcp_fsf_req *fsf_req; 2438 unsigned long req_id; 2439 int idx; 2440 2441 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2442 2443 sbale = &sbal->element[idx]; 2444 req_id = (unsigned long) sbale->addr; 2445 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2446 2447 if (!fsf_req) { 2448 /* 2449 * Unknown request means that we have potentially memory 2450 * corruption and must stop the machine immediately. 2451 */ 2452 zfcp_qdio_siosl(adapter); 2453 panic("error: unknown req_id (%lx) on adapter %s.\n", 2454 req_id, dev_name(&adapter->ccw_device->dev)); 2455 } 2456 2457 zfcp_fsf_req_complete(fsf_req); 2458 2459 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2460 break; 2461 } 2462 } 2463