1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Implementation of FSF commands. 6 * 7 * Copyright IBM Corp. 2002, 2018 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/blktrace_api.h> 14 #include <linux/jiffies.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <scsi/fc/fc_els.h> 18 #include "zfcp_ext.h" 19 #include "zfcp_fc.h" 20 #include "zfcp_dbf.h" 21 #include "zfcp_qdio.h" 22 #include "zfcp_reqlist.h" 23 #include "zfcp_diag.h" 24 25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ 26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) 27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */ 28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 29 30 struct kmem_cache *zfcp_fsf_qtcb_cache; 31 32 static bool ber_stop = true; 33 module_param(ber_stop, bool, 0600); 34 MODULE_PARM_DESC(ber_stop, 35 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); 36 37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 38 { 39 struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); 40 struct zfcp_adapter *adapter = fsf_req->adapter; 41 42 zfcp_qdio_siosl(adapter); 43 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 44 "fsrth_1"); 45 } 46 47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 48 unsigned long timeout) 49 { 50 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 51 fsf_req->timer.expires = jiffies + timeout; 52 add_timer(&fsf_req->timer); 53 } 54 55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 56 { 57 BUG_ON(!fsf_req->erp_action); 58 fsf_req->timer.function = zfcp_erp_timeout_handler; 59 fsf_req->timer.expires = jiffies + 30 * HZ; 60 add_timer(&fsf_req->timer); 61 } 62 63 /* association between FSF command and FSF QTCB type */ 64 static u32 fsf_qtcb_type[] = { 65 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 66 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 67 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 68 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 69 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 70 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 71 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 72 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 73 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 74 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 75 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 76 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 77 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 78 }; 79 80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 81 { 82 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 83 "operational because of an unsupported FC class\n"); 84 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 85 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 86 } 87 88 /** 89 * zfcp_fsf_req_free - free memory used by fsf request 90 * @req: pointer to struct zfcp_fsf_req 91 */ 92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 93 { 94 if (likely(req->pool)) { 95 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 97 mempool_free(req, req->pool); 98 return; 99 } 100 101 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 102 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 103 kfree(req); 104 } 105 106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 107 { 108 unsigned long flags; 109 struct fsf_status_read_buffer *sr_buf = req->data; 110 struct zfcp_adapter *adapter = req->adapter; 111 struct zfcp_port *port; 112 int d_id = ntoh24(sr_buf->d_id); 113 114 read_lock_irqsave(&adapter->port_list_lock, flags); 115 list_for_each_entry(port, &adapter->port_list, list) 116 if (port->d_id == d_id) { 117 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 118 break; 119 } 120 read_unlock_irqrestore(&adapter->port_list_lock, flags); 121 } 122 123 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 124 struct fsf_link_down_info *link_down) 125 { 126 struct zfcp_adapter *adapter = req->adapter; 127 128 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 129 return; 130 131 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 132 133 zfcp_scsi_schedule_rports_block(adapter); 134 135 if (!link_down) 136 goto out; 137 138 switch (link_down->error_code) { 139 case FSF_PSQ_LINK_NO_LIGHT: 140 dev_warn(&req->adapter->ccw_device->dev, 141 "There is no light signal from the local " 142 "fibre channel cable\n"); 143 break; 144 case FSF_PSQ_LINK_WRAP_PLUG: 145 dev_warn(&req->adapter->ccw_device->dev, 146 "There is a wrap plug instead of a fibre " 147 "channel cable\n"); 148 break; 149 case FSF_PSQ_LINK_NO_FCP: 150 dev_warn(&req->adapter->ccw_device->dev, 151 "The adjacent fibre channel node does not " 152 "support FCP\n"); 153 break; 154 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 155 dev_warn(&req->adapter->ccw_device->dev, 156 "The FCP device is suspended because of a " 157 "firmware update\n"); 158 break; 159 case FSF_PSQ_LINK_INVALID_WWPN: 160 dev_warn(&req->adapter->ccw_device->dev, 161 "The FCP device detected a WWPN that is " 162 "duplicate or not valid\n"); 163 break; 164 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 165 dev_warn(&req->adapter->ccw_device->dev, 166 "The fibre channel fabric does not support NPIV\n"); 167 break; 168 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 169 dev_warn(&req->adapter->ccw_device->dev, 170 "The FCP adapter cannot support more NPIV ports\n"); 171 break; 172 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 173 dev_warn(&req->adapter->ccw_device->dev, 174 "The adjacent switch cannot support " 175 "more NPIV ports\n"); 176 break; 177 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 178 dev_warn(&req->adapter->ccw_device->dev, 179 "The FCP adapter could not log in to the " 180 "fibre channel fabric\n"); 181 break; 182 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 183 dev_warn(&req->adapter->ccw_device->dev, 184 "The WWPN assignment file on the FCP adapter " 185 "has been damaged\n"); 186 break; 187 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 188 dev_warn(&req->adapter->ccw_device->dev, 189 "The mode table on the FCP adapter " 190 "has been damaged\n"); 191 break; 192 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 193 dev_warn(&req->adapter->ccw_device->dev, 194 "All NPIV ports on the FCP adapter have " 195 "been assigned\n"); 196 break; 197 default: 198 dev_warn(&req->adapter->ccw_device->dev, 199 "The link between the FCP adapter and " 200 "the FC fabric is down\n"); 201 } 202 out: 203 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 204 } 205 206 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 207 { 208 struct fsf_status_read_buffer *sr_buf = req->data; 209 struct fsf_link_down_info *ldi = 210 (struct fsf_link_down_info *) &sr_buf->payload; 211 212 switch (sr_buf->status_subtype) { 213 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 214 case FSF_STATUS_READ_SUB_FDISC_FAILED: 215 zfcp_fsf_link_down_info_eval(req, ldi); 216 break; 217 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 218 zfcp_fsf_link_down_info_eval(req, NULL); 219 } 220 } 221 222 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 223 { 224 struct zfcp_adapter *adapter = req->adapter; 225 struct fsf_status_read_buffer *sr_buf = req->data; 226 227 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 228 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 229 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 230 zfcp_fsf_req_free(req); 231 return; 232 } 233 234 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 235 236 switch (sr_buf->status_type) { 237 case FSF_STATUS_READ_PORT_CLOSED: 238 zfcp_fsf_status_read_port_closed(req); 239 break; 240 case FSF_STATUS_READ_INCOMING_ELS: 241 zfcp_fc_incoming_els(req); 242 break; 243 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 244 break; 245 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 246 zfcp_dbf_hba_bit_err("fssrh_3", req); 247 if (ber_stop) { 248 dev_warn(&adapter->ccw_device->dev, 249 "All paths over this FCP device are disused because of excessive bit errors\n"); 250 zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); 251 } else { 252 dev_warn(&adapter->ccw_device->dev, 253 "The error threshold for checksum statistics has been exceeded\n"); 254 } 255 break; 256 case FSF_STATUS_READ_LINK_DOWN: 257 zfcp_fsf_status_read_link_down(req); 258 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 259 break; 260 case FSF_STATUS_READ_LINK_UP: 261 dev_info(&adapter->ccw_device->dev, 262 "The local link has been restored\n"); 263 /* All ports should be marked as ready to run again */ 264 zfcp_erp_set_adapter_status(adapter, 265 ZFCP_STATUS_COMMON_RUNNING); 266 zfcp_erp_adapter_reopen(adapter, 267 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 268 ZFCP_STATUS_COMMON_ERP_FAILED, 269 "fssrh_2"); 270 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 271 272 break; 273 case FSF_STATUS_READ_NOTIFICATION_LOST: 274 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 275 zfcp_fc_conditional_port_scan(adapter); 276 break; 277 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 278 adapter->adapter_features = sr_buf->payload.word[0]; 279 break; 280 } 281 282 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 283 zfcp_fsf_req_free(req); 284 285 atomic_inc(&adapter->stat_miss); 286 queue_work(adapter->work_queue, &adapter->stat_work); 287 } 288 289 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 290 { 291 switch (req->qtcb->header.fsf_status_qual.word[0]) { 292 case FSF_SQ_FCP_RSP_AVAILABLE: 293 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 294 case FSF_SQ_NO_RETRY_POSSIBLE: 295 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 296 return; 297 case FSF_SQ_COMMAND_ABORTED: 298 break; 299 case FSF_SQ_NO_RECOM: 300 dev_err(&req->adapter->ccw_device->dev, 301 "The FCP adapter reported a problem " 302 "that cannot be recovered\n"); 303 zfcp_qdio_siosl(req->adapter); 304 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 305 break; 306 } 307 /* all non-return stats set FSFREQ_ERROR*/ 308 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 309 } 310 311 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 312 { 313 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 314 return; 315 316 switch (req->qtcb->header.fsf_status) { 317 case FSF_UNKNOWN_COMMAND: 318 dev_err(&req->adapter->ccw_device->dev, 319 "The FCP adapter does not recognize the command 0x%x\n", 320 req->qtcb->header.fsf_command); 321 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 322 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 323 break; 324 case FSF_ADAPTER_STATUS_AVAILABLE: 325 zfcp_fsf_fsfstatus_qual_eval(req); 326 break; 327 } 328 } 329 330 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 331 { 332 struct zfcp_adapter *adapter = req->adapter; 333 struct fsf_qtcb *qtcb = req->qtcb; 334 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 335 336 zfcp_dbf_hba_fsf_response(req); 337 338 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 339 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 340 return; 341 } 342 343 switch (qtcb->prefix.prot_status) { 344 case FSF_PROT_GOOD: 345 case FSF_PROT_FSF_STATUS_PRESENTED: 346 return; 347 case FSF_PROT_QTCB_VERSION_ERROR: 348 dev_err(&adapter->ccw_device->dev, 349 "QTCB version 0x%x not supported by FCP adapter " 350 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 351 psq->word[0], psq->word[1]); 352 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 353 break; 354 case FSF_PROT_ERROR_STATE: 355 case FSF_PROT_SEQ_NUMB_ERROR: 356 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 357 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 358 break; 359 case FSF_PROT_UNSUPP_QTCB_TYPE: 360 dev_err(&adapter->ccw_device->dev, 361 "The QTCB type is not supported by the FCP adapter\n"); 362 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 363 break; 364 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 365 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 366 &adapter->status); 367 break; 368 case FSF_PROT_DUPLICATE_REQUEST_ID: 369 dev_err(&adapter->ccw_device->dev, 370 "0x%Lx is an ambiguous request identifier\n", 371 (unsigned long long)qtcb->bottom.support.req_handle); 372 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 373 break; 374 case FSF_PROT_LINK_DOWN: 375 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 376 /* go through reopen to flush pending requests */ 377 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 378 break; 379 case FSF_PROT_REEST_QUEUE: 380 /* All ports should be marked as ready to run again */ 381 zfcp_erp_set_adapter_status(adapter, 382 ZFCP_STATUS_COMMON_RUNNING); 383 zfcp_erp_adapter_reopen(adapter, 384 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 385 ZFCP_STATUS_COMMON_ERP_FAILED, 386 "fspse_8"); 387 break; 388 default: 389 dev_err(&adapter->ccw_device->dev, 390 "0x%x is not a valid transfer protocol status\n", 391 qtcb->prefix.prot_status); 392 zfcp_qdio_siosl(adapter); 393 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 394 } 395 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 396 } 397 398 /** 399 * zfcp_fsf_req_complete - process completion of a FSF request 400 * @req: The FSF request that has been completed. 401 * 402 * When a request has been completed either from the FCP adapter, 403 * or it has been dismissed due to a queue shutdown, this function 404 * is called to process the completion status and trigger further 405 * events related to the FSF request. 406 */ 407 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 408 { 409 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) { 410 zfcp_fsf_status_read_handler(req); 411 return; 412 } 413 414 del_timer(&req->timer); 415 zfcp_fsf_protstatus_eval(req); 416 zfcp_fsf_fsfstatus_eval(req); 417 req->handler(req); 418 419 if (req->erp_action) 420 zfcp_erp_notify(req->erp_action, 0); 421 422 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 423 zfcp_fsf_req_free(req); 424 else 425 complete(&req->completion); 426 } 427 428 /** 429 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 430 * @adapter: pointer to struct zfcp_adapter 431 * 432 * Never ever call this without shutting down the adapter first. 433 * Otherwise the adapter would continue using and corrupting s390 storage. 434 * Included BUG_ON() call to ensure this is done. 435 * ERP is supposed to be the only user of this function. 436 */ 437 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 438 { 439 struct zfcp_fsf_req *req, *tmp; 440 LIST_HEAD(remove_queue); 441 442 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 443 zfcp_reqlist_move(adapter->req_list, &remove_queue); 444 445 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 446 list_del(&req->list); 447 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 448 zfcp_fsf_req_complete(req); 449 } 450 } 451 452 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 453 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 454 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 455 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 456 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 457 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 458 #define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) 459 #define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) 460 #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) 461 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 462 463 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 464 { 465 u32 fdmi_speed = 0; 466 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 467 fdmi_speed |= FC_PORTSPEED_1GBIT; 468 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 469 fdmi_speed |= FC_PORTSPEED_2GBIT; 470 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 471 fdmi_speed |= FC_PORTSPEED_4GBIT; 472 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 473 fdmi_speed |= FC_PORTSPEED_10GBIT; 474 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 475 fdmi_speed |= FC_PORTSPEED_8GBIT; 476 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 477 fdmi_speed |= FC_PORTSPEED_16GBIT; 478 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) 479 fdmi_speed |= FC_PORTSPEED_32GBIT; 480 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) 481 fdmi_speed |= FC_PORTSPEED_64GBIT; 482 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) 483 fdmi_speed |= FC_PORTSPEED_128GBIT; 484 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 485 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 486 return fdmi_speed; 487 } 488 489 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 490 { 491 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 492 struct zfcp_adapter *adapter = req->adapter; 493 struct Scsi_Host *shost = adapter->scsi_host; 494 struct fc_els_flogi *nsp, *plogi; 495 496 /* adjust pointers for missing command code */ 497 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param 498 - sizeof(u32)); 499 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 500 - sizeof(u32)); 501 502 if (req->data) 503 memcpy(req->data, bottom, sizeof(*bottom)); 504 505 fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn); 506 fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn); 507 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 508 509 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 510 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 511 (u16)FSF_STATUS_READS_RECOM); 512 513 if (fc_host_permanent_port_name(shost) == -1) 514 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 515 516 zfcp_scsi_set_prot(adapter); 517 518 /* no error return above here, otherwise must fix call chains */ 519 /* do not evaluate invalid fields */ 520 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) 521 return 0; 522 523 fc_host_port_id(shost) = ntoh24(bottom->s_id); 524 fc_host_speed(shost) = 525 zfcp_fsf_convert_portspeed(bottom->fc_link_speed); 526 527 adapter->hydra_version = bottom->adapter_type; 528 529 switch (bottom->fc_topology) { 530 case FSF_TOPO_P2P: 531 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 532 adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); 533 adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); 534 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 535 break; 536 case FSF_TOPO_FABRIC: 537 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) 538 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 539 else 540 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 541 break; 542 case FSF_TOPO_AL: 543 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 544 /* fall through */ 545 default: 546 dev_err(&adapter->ccw_device->dev, 547 "Unknown or unsupported arbitrated loop " 548 "fibre channel topology detected\n"); 549 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 550 return -EIO; 551 } 552 553 return 0; 554 } 555 556 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 557 { 558 struct zfcp_adapter *adapter = req->adapter; 559 struct zfcp_diag_header *const diag_hdr = 560 &adapter->diagnostics->config_data.header; 561 struct fsf_qtcb *qtcb = req->qtcb; 562 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 563 struct Scsi_Host *shost = adapter->scsi_host; 564 565 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 566 return; 567 568 adapter->fsf_lic_version = bottom->lic_version; 569 adapter->adapter_features = bottom->adapter_features; 570 adapter->connection_features = bottom->connection_features; 571 adapter->peer_wwpn = 0; 572 adapter->peer_wwnn = 0; 573 adapter->peer_d_id = 0; 574 575 switch (qtcb->header.fsf_status) { 576 case FSF_GOOD: 577 /* 578 * usually we wait with an update till the cache is too old, 579 * but because we have the data available, update it anyway 580 */ 581 zfcp_diag_update_xdata(diag_hdr, bottom, false); 582 583 if (zfcp_fsf_exchange_config_evaluate(req)) 584 return; 585 586 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 587 dev_err(&adapter->ccw_device->dev, 588 "FCP adapter maximum QTCB size (%d bytes) " 589 "is too small\n", 590 bottom->max_qtcb_size); 591 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 592 return; 593 } 594 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 595 &adapter->status); 596 break; 597 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 598 zfcp_diag_update_xdata(diag_hdr, bottom, true); 599 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 600 601 fc_host_node_name(shost) = 0; 602 fc_host_port_name(shost) = 0; 603 fc_host_port_id(shost) = 0; 604 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 605 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 606 adapter->hydra_version = 0; 607 608 /* avoids adapter shutdown to be able to recognize 609 * events such as LINK UP */ 610 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 611 &adapter->status); 612 zfcp_fsf_link_down_info_eval(req, 613 &qtcb->header.fsf_status_qual.link_down_info); 614 if (zfcp_fsf_exchange_config_evaluate(req)) 615 return; 616 break; 617 default: 618 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 619 return; 620 } 621 622 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 623 adapter->hardware_version = bottom->hardware_version; 624 memcpy(fc_host_serial_number(shost), bottom->serial_number, 625 min(FC_SERIAL_NUMBER_SIZE, 17)); 626 EBCASC(fc_host_serial_number(shost), 627 min(FC_SERIAL_NUMBER_SIZE, 17)); 628 } 629 630 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 631 dev_err(&adapter->ccw_device->dev, 632 "The FCP adapter only supports newer " 633 "control block versions\n"); 634 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 635 return; 636 } 637 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 638 dev_err(&adapter->ccw_device->dev, 639 "The FCP adapter only supports older " 640 "control block versions\n"); 641 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 642 } 643 } 644 645 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 646 { 647 struct zfcp_adapter *adapter = req->adapter; 648 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 649 struct Scsi_Host *shost = adapter->scsi_host; 650 651 if (req->data) 652 memcpy(req->data, bottom, sizeof(*bottom)); 653 654 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { 655 fc_host_permanent_port_name(shost) = bottom->wwpn; 656 } else 657 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 658 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 659 fc_host_supported_speeds(shost) = 660 zfcp_fsf_convert_portspeed(bottom->supported_speed); 661 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 662 FC_FC4_LIST_SIZE); 663 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 664 FC_FC4_LIST_SIZE); 665 } 666 667 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 668 { 669 struct zfcp_diag_header *const diag_hdr = 670 &req->adapter->diagnostics->port_data.header; 671 struct fsf_qtcb *qtcb = req->qtcb; 672 struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port; 673 674 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 675 return; 676 677 switch (qtcb->header.fsf_status) { 678 case FSF_GOOD: 679 /* 680 * usually we wait with an update till the cache is too old, 681 * but because we have the data available, update it anyway 682 */ 683 zfcp_diag_update_xdata(diag_hdr, bottom, false); 684 685 zfcp_fsf_exchange_port_evaluate(req); 686 break; 687 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 688 zfcp_diag_update_xdata(diag_hdr, bottom, true); 689 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 690 691 zfcp_fsf_exchange_port_evaluate(req); 692 zfcp_fsf_link_down_info_eval(req, 693 &qtcb->header.fsf_status_qual.link_down_info); 694 break; 695 } 696 } 697 698 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 699 { 700 struct zfcp_fsf_req *req; 701 702 if (likely(pool)) 703 req = mempool_alloc(pool, GFP_ATOMIC); 704 else 705 req = kmalloc(sizeof(*req), GFP_ATOMIC); 706 707 if (unlikely(!req)) 708 return NULL; 709 710 memset(req, 0, sizeof(*req)); 711 req->pool = pool; 712 return req; 713 } 714 715 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) 716 { 717 struct fsf_qtcb *qtcb; 718 719 if (likely(pool)) 720 qtcb = mempool_alloc(pool, GFP_ATOMIC); 721 else 722 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 723 724 if (unlikely(!qtcb)) 725 return NULL; 726 727 memset(qtcb, 0, sizeof(*qtcb)); 728 return qtcb; 729 } 730 731 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 732 u32 fsf_cmd, u8 sbtype, 733 mempool_t *pool) 734 { 735 struct zfcp_adapter *adapter = qdio->adapter; 736 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 737 738 if (unlikely(!req)) 739 return ERR_PTR(-ENOMEM); 740 741 if (adapter->req_no == 0) 742 adapter->req_no++; 743 744 INIT_LIST_HEAD(&req->list); 745 timer_setup(&req->timer, NULL, 0); 746 init_completion(&req->completion); 747 748 req->adapter = adapter; 749 req->req_id = adapter->req_no; 750 751 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 752 if (likely(pool)) 753 req->qtcb = zfcp_fsf_qtcb_alloc( 754 adapter->pool.qtcb_pool); 755 else 756 req->qtcb = zfcp_fsf_qtcb_alloc(NULL); 757 758 if (unlikely(!req->qtcb)) { 759 zfcp_fsf_req_free(req); 760 return ERR_PTR(-ENOMEM); 761 } 762 763 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 764 req->qtcb->prefix.req_id = req->req_id; 765 req->qtcb->prefix.ulp_info = 26; 766 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 767 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 768 req->qtcb->header.req_handle = req->req_id; 769 req->qtcb->header.fsf_command = fsf_cmd; 770 } 771 772 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 773 req->qtcb, sizeof(struct fsf_qtcb)); 774 775 return req; 776 } 777 778 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 779 { 780 const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); 781 struct zfcp_adapter *adapter = req->adapter; 782 struct zfcp_qdio *qdio = adapter->qdio; 783 int req_id = req->req_id; 784 785 zfcp_reqlist_add(adapter->req_list, req); 786 787 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 788 req->issued = get_tod_clock(); 789 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 790 del_timer(&req->timer); 791 /* lookup request again, list might have changed */ 792 zfcp_reqlist_find_rm(adapter->req_list, req_id); 793 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 794 return -EIO; 795 } 796 797 /* 798 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT. 799 * ONLY TOUCH SYNC req AGAIN ON req->completion. 800 * 801 * The request might complete and be freed concurrently at any point 802 * now. This is not protected by the QDIO-lock (req_q_lock). So any 803 * uncontrolled access after this might result in an use-after-free bug. 804 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and 805 * when it is completed via req->completion, is it safe to use req 806 * again. 807 */ 808 809 /* Don't increase for unsolicited status */ 810 if (!is_srb) 811 adapter->fsf_req_seq_no++; 812 adapter->req_no++; 813 814 return 0; 815 } 816 817 /** 818 * zfcp_fsf_status_read - send status read request 819 * @qdio: pointer to struct zfcp_qdio 820 * Returns: 0 on success, ERROR otherwise 821 */ 822 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 823 { 824 struct zfcp_adapter *adapter = qdio->adapter; 825 struct zfcp_fsf_req *req; 826 struct fsf_status_read_buffer *sr_buf; 827 struct page *page; 828 int retval = -EIO; 829 830 spin_lock_irq(&qdio->req_q_lock); 831 if (zfcp_qdio_sbal_get(qdio)) 832 goto out; 833 834 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 835 SBAL_SFLAGS0_TYPE_STATUS, 836 adapter->pool.status_read_req); 837 if (IS_ERR(req)) { 838 retval = PTR_ERR(req); 839 goto out; 840 } 841 842 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 843 if (!page) { 844 retval = -ENOMEM; 845 goto failed_buf; 846 } 847 sr_buf = page_address(page); 848 memset(sr_buf, 0, sizeof(*sr_buf)); 849 req->data = sr_buf; 850 851 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 852 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 853 854 retval = zfcp_fsf_req_send(req); 855 if (retval) 856 goto failed_req_send; 857 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 858 859 goto out; 860 861 failed_req_send: 862 req->data = NULL; 863 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 864 failed_buf: 865 zfcp_dbf_hba_fsf_uss("fssr__1", req); 866 zfcp_fsf_req_free(req); 867 out: 868 spin_unlock_irq(&qdio->req_q_lock); 869 return retval; 870 } 871 872 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 873 { 874 struct scsi_device *sdev = req->data; 875 struct zfcp_scsi_dev *zfcp_sdev; 876 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 877 878 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 879 return; 880 881 zfcp_sdev = sdev_to_zfcp(sdev); 882 883 switch (req->qtcb->header.fsf_status) { 884 case FSF_PORT_HANDLE_NOT_VALID: 885 if (fsq->word[0] == fsq->word[1]) { 886 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 887 "fsafch1"); 888 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 889 } 890 break; 891 case FSF_LUN_HANDLE_NOT_VALID: 892 if (fsq->word[0] == fsq->word[1]) { 893 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 894 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 895 } 896 break; 897 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 898 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 899 break; 900 case FSF_PORT_BOXED: 901 zfcp_erp_set_port_status(zfcp_sdev->port, 902 ZFCP_STATUS_COMMON_ACCESS_BOXED); 903 zfcp_erp_port_reopen(zfcp_sdev->port, 904 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 905 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 906 break; 907 case FSF_LUN_BOXED: 908 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 909 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 910 "fsafch4"); 911 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 912 break; 913 case FSF_ADAPTER_STATUS_AVAILABLE: 914 switch (fsq->word[0]) { 915 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 916 zfcp_fc_test_link(zfcp_sdev->port); 917 /* fall through */ 918 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 919 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 920 break; 921 } 922 break; 923 case FSF_GOOD: 924 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 925 break; 926 } 927 } 928 929 /** 930 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 931 * @scmnd: The SCSI command to abort 932 * Returns: pointer to struct zfcp_fsf_req 933 */ 934 935 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 936 { 937 struct zfcp_fsf_req *req = NULL; 938 struct scsi_device *sdev = scmnd->device; 939 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 940 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 941 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 942 943 spin_lock_irq(&qdio->req_q_lock); 944 if (zfcp_qdio_sbal_get(qdio)) 945 goto out; 946 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 947 SBAL_SFLAGS0_TYPE_READ, 948 qdio->adapter->pool.scsi_abort); 949 if (IS_ERR(req)) { 950 req = NULL; 951 goto out; 952 } 953 954 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 955 ZFCP_STATUS_COMMON_UNBLOCKED))) 956 goto out_error_free; 957 958 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 959 960 req->data = sdev; 961 req->handler = zfcp_fsf_abort_fcp_command_handler; 962 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 963 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 964 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 965 966 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 967 if (!zfcp_fsf_req_send(req)) { 968 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 969 goto out; 970 } 971 972 out_error_free: 973 zfcp_fsf_req_free(req); 974 req = NULL; 975 out: 976 spin_unlock_irq(&qdio->req_q_lock); 977 return req; 978 } 979 980 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 981 { 982 struct zfcp_adapter *adapter = req->adapter; 983 struct zfcp_fsf_ct_els *ct = req->data; 984 struct fsf_qtcb_header *header = &req->qtcb->header; 985 986 ct->status = -EINVAL; 987 988 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 989 goto skip_fsfstatus; 990 991 switch (header->fsf_status) { 992 case FSF_GOOD: 993 ct->status = 0; 994 zfcp_dbf_san_res("fsscth2", req); 995 break; 996 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 997 zfcp_fsf_class_not_supp(req); 998 break; 999 case FSF_ADAPTER_STATUS_AVAILABLE: 1000 switch (header->fsf_status_qual.word[0]){ 1001 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1002 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1003 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1004 break; 1005 } 1006 break; 1007 case FSF_PORT_BOXED: 1008 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1009 break; 1010 case FSF_PORT_HANDLE_NOT_VALID: 1011 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 1012 /* fall through */ 1013 case FSF_GENERIC_COMMAND_REJECTED: 1014 case FSF_PAYLOAD_SIZE_MISMATCH: 1015 case FSF_REQUEST_SIZE_TOO_LARGE: 1016 case FSF_RESPONSE_SIZE_TOO_LARGE: 1017 case FSF_SBAL_MISMATCH: 1018 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1019 break; 1020 } 1021 1022 skip_fsfstatus: 1023 if (ct->handler) 1024 ct->handler(ct->handler_data); 1025 } 1026 1027 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 1028 struct zfcp_qdio_req *q_req, 1029 struct scatterlist *sg_req, 1030 struct scatterlist *sg_resp) 1031 { 1032 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 1033 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 1034 zfcp_qdio_set_sbale_last(qdio, q_req); 1035 } 1036 1037 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1038 struct scatterlist *sg_req, 1039 struct scatterlist *sg_resp) 1040 { 1041 struct zfcp_adapter *adapter = req->adapter; 1042 struct zfcp_qdio *qdio = adapter->qdio; 1043 struct fsf_qtcb *qtcb = req->qtcb; 1044 u32 feat = adapter->adapter_features; 1045 1046 if (zfcp_adapter_multi_buffer_active(adapter)) { 1047 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1048 return -EIO; 1049 qtcb->bottom.support.req_buf_length = 1050 zfcp_qdio_real_bytes(sg_req); 1051 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1052 return -EIO; 1053 qtcb->bottom.support.resp_buf_length = 1054 zfcp_qdio_real_bytes(sg_resp); 1055 1056 zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); 1057 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1058 zfcp_qdio_set_scount(qdio, &req->qdio_req); 1059 return 0; 1060 } 1061 1062 /* use single, unchained SBAL if it can hold the request */ 1063 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 1064 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 1065 sg_req, sg_resp); 1066 return 0; 1067 } 1068 1069 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 1070 return -EOPNOTSUPP; 1071 1072 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1073 return -EIO; 1074 1075 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 1076 1077 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1078 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1079 1080 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1081 return -EIO; 1082 1083 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1084 1085 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1086 1087 return 0; 1088 } 1089 1090 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1091 struct scatterlist *sg_req, 1092 struct scatterlist *sg_resp, 1093 unsigned int timeout) 1094 { 1095 int ret; 1096 1097 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1098 if (ret) 1099 return ret; 1100 1101 /* common settings for ct/gs and els requests */ 1102 if (timeout > 255) 1103 timeout = 255; /* max value accepted by hardware */ 1104 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1105 req->qtcb->bottom.support.timeout = timeout; 1106 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1107 1108 return 0; 1109 } 1110 1111 /** 1112 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1113 * @wka_port: pointer to zfcp WKA port to send CT/GS to 1114 * @ct: pointer to struct zfcp_send_ct with data for request 1115 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1116 * @timeout: timeout that hardware should use, and a later software timeout 1117 */ 1118 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1119 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1120 unsigned int timeout) 1121 { 1122 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1123 struct zfcp_fsf_req *req; 1124 int ret = -EIO; 1125 1126 spin_lock_irq(&qdio->req_q_lock); 1127 if (zfcp_qdio_sbal_get(qdio)) 1128 goto out; 1129 1130 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1131 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1132 1133 if (IS_ERR(req)) { 1134 ret = PTR_ERR(req); 1135 goto out; 1136 } 1137 1138 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1139 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1140 if (ret) 1141 goto failed_send; 1142 1143 req->handler = zfcp_fsf_send_ct_handler; 1144 req->qtcb->header.port_handle = wka_port->handle; 1145 ct->d_id = wka_port->d_id; 1146 req->data = ct; 1147 1148 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1149 1150 ret = zfcp_fsf_req_send(req); 1151 if (ret) 1152 goto failed_send; 1153 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1154 1155 goto out; 1156 1157 failed_send: 1158 zfcp_fsf_req_free(req); 1159 out: 1160 spin_unlock_irq(&qdio->req_q_lock); 1161 return ret; 1162 } 1163 1164 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1165 { 1166 struct zfcp_fsf_ct_els *send_els = req->data; 1167 struct fsf_qtcb_header *header = &req->qtcb->header; 1168 1169 send_els->status = -EINVAL; 1170 1171 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1172 goto skip_fsfstatus; 1173 1174 switch (header->fsf_status) { 1175 case FSF_GOOD: 1176 send_els->status = 0; 1177 zfcp_dbf_san_res("fsselh1", req); 1178 break; 1179 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1180 zfcp_fsf_class_not_supp(req); 1181 break; 1182 case FSF_ADAPTER_STATUS_AVAILABLE: 1183 switch (header->fsf_status_qual.word[0]){ 1184 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1185 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1186 case FSF_SQ_RETRY_IF_POSSIBLE: 1187 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1188 break; 1189 } 1190 break; 1191 case FSF_ELS_COMMAND_REJECTED: 1192 case FSF_PAYLOAD_SIZE_MISMATCH: 1193 case FSF_REQUEST_SIZE_TOO_LARGE: 1194 case FSF_RESPONSE_SIZE_TOO_LARGE: 1195 break; 1196 case FSF_SBAL_MISMATCH: 1197 /* should never occur, avoided in zfcp_fsf_send_els */ 1198 /* fall through */ 1199 default: 1200 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1201 break; 1202 } 1203 skip_fsfstatus: 1204 if (send_els->handler) 1205 send_els->handler(send_els->handler_data); 1206 } 1207 1208 /** 1209 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1210 * @adapter: pointer to zfcp adapter 1211 * @d_id: N_Port_ID to send ELS to 1212 * @els: pointer to struct zfcp_send_els with data for the command 1213 * @timeout: timeout that hardware should use, and a later software timeout 1214 */ 1215 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1216 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1217 { 1218 struct zfcp_fsf_req *req; 1219 struct zfcp_qdio *qdio = adapter->qdio; 1220 int ret = -EIO; 1221 1222 spin_lock_irq(&qdio->req_q_lock); 1223 if (zfcp_qdio_sbal_get(qdio)) 1224 goto out; 1225 1226 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1227 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1228 1229 if (IS_ERR(req)) { 1230 ret = PTR_ERR(req); 1231 goto out; 1232 } 1233 1234 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1235 1236 if (!zfcp_adapter_multi_buffer_active(adapter)) 1237 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1238 1239 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1240 1241 if (ret) 1242 goto failed_send; 1243 1244 hton24(req->qtcb->bottom.support.d_id, d_id); 1245 req->handler = zfcp_fsf_send_els_handler; 1246 els->d_id = d_id; 1247 req->data = els; 1248 1249 zfcp_dbf_san_req("fssels1", req, d_id); 1250 1251 ret = zfcp_fsf_req_send(req); 1252 if (ret) 1253 goto failed_send; 1254 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1255 1256 goto out; 1257 1258 failed_send: 1259 zfcp_fsf_req_free(req); 1260 out: 1261 spin_unlock_irq(&qdio->req_q_lock); 1262 return ret; 1263 } 1264 1265 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1266 { 1267 struct zfcp_fsf_req *req; 1268 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1269 int retval = -EIO; 1270 1271 spin_lock_irq(&qdio->req_q_lock); 1272 if (zfcp_qdio_sbal_get(qdio)) 1273 goto out; 1274 1275 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1276 SBAL_SFLAGS0_TYPE_READ, 1277 qdio->adapter->pool.erp_req); 1278 1279 if (IS_ERR(req)) { 1280 retval = PTR_ERR(req); 1281 goto out; 1282 } 1283 1284 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1285 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1286 1287 req->qtcb->bottom.config.feature_selection = 1288 FSF_FEATURE_NOTIFICATION_LOST | 1289 FSF_FEATURE_UPDATE_ALERT | 1290 FSF_FEATURE_REQUEST_SFP_DATA; 1291 req->erp_action = erp_action; 1292 req->handler = zfcp_fsf_exchange_config_data_handler; 1293 erp_action->fsf_req_id = req->req_id; 1294 1295 zfcp_fsf_start_erp_timer(req); 1296 retval = zfcp_fsf_req_send(req); 1297 if (retval) { 1298 zfcp_fsf_req_free(req); 1299 erp_action->fsf_req_id = 0; 1300 } 1301 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1302 out: 1303 spin_unlock_irq(&qdio->req_q_lock); 1304 return retval; 1305 } 1306 1307 1308 /** 1309 * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel. 1310 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1311 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1312 * might be %NULL. 1313 * 1314 * Returns: 1315 * * 0 - Exchange Config Data was successful, @data is complete 1316 * * -EIO - Exchange Config Data was not successful, @data is invalid 1317 * * -EAGAIN - @data contains incomplete data 1318 * * -ENOMEM - Some memory allocation failed along the way 1319 */ 1320 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1321 struct fsf_qtcb_bottom_config *data) 1322 { 1323 struct zfcp_fsf_req *req = NULL; 1324 int retval = -EIO; 1325 1326 spin_lock_irq(&qdio->req_q_lock); 1327 if (zfcp_qdio_sbal_get(qdio)) 1328 goto out_unlock; 1329 1330 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1331 SBAL_SFLAGS0_TYPE_READ, NULL); 1332 1333 if (IS_ERR(req)) { 1334 retval = PTR_ERR(req); 1335 goto out_unlock; 1336 } 1337 1338 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1339 req->handler = zfcp_fsf_exchange_config_data_handler; 1340 1341 req->qtcb->bottom.config.feature_selection = 1342 FSF_FEATURE_NOTIFICATION_LOST | 1343 FSF_FEATURE_UPDATE_ALERT | 1344 FSF_FEATURE_REQUEST_SFP_DATA; 1345 1346 if (data) 1347 req->data = data; 1348 1349 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1350 retval = zfcp_fsf_req_send(req); 1351 spin_unlock_irq(&qdio->req_q_lock); 1352 1353 if (!retval) { 1354 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1355 wait_for_completion(&req->completion); 1356 1357 if (req->status & 1358 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1359 retval = -EIO; 1360 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1361 retval = -EAGAIN; 1362 } 1363 1364 zfcp_fsf_req_free(req); 1365 return retval; 1366 1367 out_unlock: 1368 spin_unlock_irq(&qdio->req_q_lock); 1369 return retval; 1370 } 1371 1372 /** 1373 * zfcp_fsf_exchange_port_data - request information about local port 1374 * @erp_action: ERP action for the adapter for which port data is requested 1375 * Returns: 0 on success, error otherwise 1376 */ 1377 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1378 { 1379 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1380 struct zfcp_fsf_req *req; 1381 int retval = -EIO; 1382 1383 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1384 return -EOPNOTSUPP; 1385 1386 spin_lock_irq(&qdio->req_q_lock); 1387 if (zfcp_qdio_sbal_get(qdio)) 1388 goto out; 1389 1390 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1391 SBAL_SFLAGS0_TYPE_READ, 1392 qdio->adapter->pool.erp_req); 1393 1394 if (IS_ERR(req)) { 1395 retval = PTR_ERR(req); 1396 goto out; 1397 } 1398 1399 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1400 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1401 1402 req->handler = zfcp_fsf_exchange_port_data_handler; 1403 req->erp_action = erp_action; 1404 erp_action->fsf_req_id = req->req_id; 1405 1406 zfcp_fsf_start_erp_timer(req); 1407 retval = zfcp_fsf_req_send(req); 1408 if (retval) { 1409 zfcp_fsf_req_free(req); 1410 erp_action->fsf_req_id = 0; 1411 } 1412 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1413 out: 1414 spin_unlock_irq(&qdio->req_q_lock); 1415 return retval; 1416 } 1417 1418 /** 1419 * zfcp_fsf_exchange_port_data_sync() - Request information about local port. 1420 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1421 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1422 * might be %NULL. 1423 * 1424 * Returns: 1425 * * 0 - Exchange Port Data was successful, @data is complete 1426 * * -EIO - Exchange Port Data was not successful, @data is invalid 1427 * * -EAGAIN - @data contains incomplete data 1428 * * -ENOMEM - Some memory allocation failed along the way 1429 * * -EOPNOTSUPP - This operation is not supported 1430 */ 1431 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1432 struct fsf_qtcb_bottom_port *data) 1433 { 1434 struct zfcp_fsf_req *req = NULL; 1435 int retval = -EIO; 1436 1437 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1438 return -EOPNOTSUPP; 1439 1440 spin_lock_irq(&qdio->req_q_lock); 1441 if (zfcp_qdio_sbal_get(qdio)) 1442 goto out_unlock; 1443 1444 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1445 SBAL_SFLAGS0_TYPE_READ, NULL); 1446 1447 if (IS_ERR(req)) { 1448 retval = PTR_ERR(req); 1449 goto out_unlock; 1450 } 1451 1452 if (data) 1453 req->data = data; 1454 1455 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1456 1457 req->handler = zfcp_fsf_exchange_port_data_handler; 1458 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1459 retval = zfcp_fsf_req_send(req); 1460 spin_unlock_irq(&qdio->req_q_lock); 1461 1462 if (!retval) { 1463 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1464 wait_for_completion(&req->completion); 1465 1466 if (req->status & 1467 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1468 retval = -EIO; 1469 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1470 retval = -EAGAIN; 1471 } 1472 1473 zfcp_fsf_req_free(req); 1474 return retval; 1475 1476 out_unlock: 1477 spin_unlock_irq(&qdio->req_q_lock); 1478 return retval; 1479 } 1480 1481 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1482 { 1483 struct zfcp_port *port = req->data; 1484 struct fsf_qtcb_header *header = &req->qtcb->header; 1485 struct fc_els_flogi *plogi; 1486 1487 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1488 goto out; 1489 1490 switch (header->fsf_status) { 1491 case FSF_PORT_ALREADY_OPEN: 1492 break; 1493 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1494 dev_warn(&req->adapter->ccw_device->dev, 1495 "Not enough FCP adapter resources to open " 1496 "remote port 0x%016Lx\n", 1497 (unsigned long long)port->wwpn); 1498 zfcp_erp_set_port_status(port, 1499 ZFCP_STATUS_COMMON_ERP_FAILED); 1500 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1501 break; 1502 case FSF_ADAPTER_STATUS_AVAILABLE: 1503 switch (header->fsf_status_qual.word[0]) { 1504 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1505 /* no zfcp_fc_test_link() with failed open port */ 1506 /* fall through */ 1507 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1508 case FSF_SQ_NO_RETRY_POSSIBLE: 1509 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1510 break; 1511 } 1512 break; 1513 case FSF_GOOD: 1514 port->handle = header->port_handle; 1515 atomic_or(ZFCP_STATUS_COMMON_OPEN | 1516 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1517 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1518 &port->status); 1519 /* check whether D_ID has changed during open */ 1520 /* 1521 * FIXME: This check is not airtight, as the FCP channel does 1522 * not monitor closures of target port connections caused on 1523 * the remote side. Thus, they might miss out on invalidating 1524 * locally cached WWPNs (and other N_Port parameters) of gone 1525 * target ports. So, our heroic attempt to make things safe 1526 * could be undermined by 'open port' response data tagged with 1527 * obsolete WWPNs. Another reason to monitor potential 1528 * connection closures ourself at least (by interpreting 1529 * incoming ELS' and unsolicited status). It just crosses my 1530 * mind that one should be able to cross-check by means of 1531 * another GID_PN straight after a port has been opened. 1532 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1533 */ 1534 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; 1535 if (req->qtcb->bottom.support.els1_length >= 1536 FSF_PLOGI_MIN_LEN) 1537 zfcp_fc_plogi_evaluate(port, plogi); 1538 break; 1539 case FSF_UNKNOWN_OP_SUBTYPE: 1540 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1541 break; 1542 } 1543 1544 out: 1545 put_device(&port->dev); 1546 } 1547 1548 /** 1549 * zfcp_fsf_open_port - create and send open port request 1550 * @erp_action: pointer to struct zfcp_erp_action 1551 * Returns: 0 on success, error otherwise 1552 */ 1553 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1554 { 1555 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1556 struct zfcp_port *port = erp_action->port; 1557 struct zfcp_fsf_req *req; 1558 int retval = -EIO; 1559 1560 spin_lock_irq(&qdio->req_q_lock); 1561 if (zfcp_qdio_sbal_get(qdio)) 1562 goto out; 1563 1564 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1565 SBAL_SFLAGS0_TYPE_READ, 1566 qdio->adapter->pool.erp_req); 1567 1568 if (IS_ERR(req)) { 1569 retval = PTR_ERR(req); 1570 goto out; 1571 } 1572 1573 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1574 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1575 1576 req->handler = zfcp_fsf_open_port_handler; 1577 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1578 req->data = port; 1579 req->erp_action = erp_action; 1580 erp_action->fsf_req_id = req->req_id; 1581 get_device(&port->dev); 1582 1583 zfcp_fsf_start_erp_timer(req); 1584 retval = zfcp_fsf_req_send(req); 1585 if (retval) { 1586 zfcp_fsf_req_free(req); 1587 erp_action->fsf_req_id = 0; 1588 put_device(&port->dev); 1589 } 1590 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1591 out: 1592 spin_unlock_irq(&qdio->req_q_lock); 1593 return retval; 1594 } 1595 1596 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1597 { 1598 struct zfcp_port *port = req->data; 1599 1600 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1601 return; 1602 1603 switch (req->qtcb->header.fsf_status) { 1604 case FSF_PORT_HANDLE_NOT_VALID: 1605 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1606 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1607 break; 1608 case FSF_ADAPTER_STATUS_AVAILABLE: 1609 break; 1610 case FSF_GOOD: 1611 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1612 break; 1613 } 1614 } 1615 1616 /** 1617 * zfcp_fsf_close_port - create and send close port request 1618 * @erp_action: pointer to struct zfcp_erp_action 1619 * Returns: 0 on success, error otherwise 1620 */ 1621 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1622 { 1623 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1624 struct zfcp_fsf_req *req; 1625 int retval = -EIO; 1626 1627 spin_lock_irq(&qdio->req_q_lock); 1628 if (zfcp_qdio_sbal_get(qdio)) 1629 goto out; 1630 1631 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1632 SBAL_SFLAGS0_TYPE_READ, 1633 qdio->adapter->pool.erp_req); 1634 1635 if (IS_ERR(req)) { 1636 retval = PTR_ERR(req); 1637 goto out; 1638 } 1639 1640 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1641 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1642 1643 req->handler = zfcp_fsf_close_port_handler; 1644 req->data = erp_action->port; 1645 req->erp_action = erp_action; 1646 req->qtcb->header.port_handle = erp_action->port->handle; 1647 erp_action->fsf_req_id = req->req_id; 1648 1649 zfcp_fsf_start_erp_timer(req); 1650 retval = zfcp_fsf_req_send(req); 1651 if (retval) { 1652 zfcp_fsf_req_free(req); 1653 erp_action->fsf_req_id = 0; 1654 } 1655 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1656 out: 1657 spin_unlock_irq(&qdio->req_q_lock); 1658 return retval; 1659 } 1660 1661 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1662 { 1663 struct zfcp_fc_wka_port *wka_port = req->data; 1664 struct fsf_qtcb_header *header = &req->qtcb->header; 1665 1666 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1667 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1668 goto out; 1669 } 1670 1671 switch (header->fsf_status) { 1672 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1673 dev_warn(&req->adapter->ccw_device->dev, 1674 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1675 /* fall through */ 1676 case FSF_ADAPTER_STATUS_AVAILABLE: 1677 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1678 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1679 break; 1680 case FSF_GOOD: 1681 wka_port->handle = header->port_handle; 1682 /* fall through */ 1683 case FSF_PORT_ALREADY_OPEN: 1684 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1685 } 1686 out: 1687 wake_up(&wka_port->completion_wq); 1688 } 1689 1690 /** 1691 * zfcp_fsf_open_wka_port - create and send open wka-port request 1692 * @wka_port: pointer to struct zfcp_fc_wka_port 1693 * Returns: 0 on success, error otherwise 1694 */ 1695 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1696 { 1697 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1698 struct zfcp_fsf_req *req; 1699 unsigned long req_id = 0; 1700 int retval = -EIO; 1701 1702 spin_lock_irq(&qdio->req_q_lock); 1703 if (zfcp_qdio_sbal_get(qdio)) 1704 goto out; 1705 1706 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1707 SBAL_SFLAGS0_TYPE_READ, 1708 qdio->adapter->pool.erp_req); 1709 1710 if (IS_ERR(req)) { 1711 retval = PTR_ERR(req); 1712 goto out; 1713 } 1714 1715 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1716 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1717 1718 req->handler = zfcp_fsf_open_wka_port_handler; 1719 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1720 req->data = wka_port; 1721 1722 req_id = req->req_id; 1723 1724 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1725 retval = zfcp_fsf_req_send(req); 1726 if (retval) 1727 zfcp_fsf_req_free(req); 1728 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1729 out: 1730 spin_unlock_irq(&qdio->req_q_lock); 1731 if (!retval) 1732 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); 1733 return retval; 1734 } 1735 1736 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1737 { 1738 struct zfcp_fc_wka_port *wka_port = req->data; 1739 1740 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1741 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1742 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1743 } 1744 1745 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1746 wake_up(&wka_port->completion_wq); 1747 } 1748 1749 /** 1750 * zfcp_fsf_close_wka_port - create and send close wka port request 1751 * @wka_port: WKA port to open 1752 * Returns: 0 on success, error otherwise 1753 */ 1754 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1755 { 1756 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1757 struct zfcp_fsf_req *req; 1758 unsigned long req_id = 0; 1759 int retval = -EIO; 1760 1761 spin_lock_irq(&qdio->req_q_lock); 1762 if (zfcp_qdio_sbal_get(qdio)) 1763 goto out; 1764 1765 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1766 SBAL_SFLAGS0_TYPE_READ, 1767 qdio->adapter->pool.erp_req); 1768 1769 if (IS_ERR(req)) { 1770 retval = PTR_ERR(req); 1771 goto out; 1772 } 1773 1774 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1775 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1776 1777 req->handler = zfcp_fsf_close_wka_port_handler; 1778 req->data = wka_port; 1779 req->qtcb->header.port_handle = wka_port->handle; 1780 1781 req_id = req->req_id; 1782 1783 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1784 retval = zfcp_fsf_req_send(req); 1785 if (retval) 1786 zfcp_fsf_req_free(req); 1787 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1788 out: 1789 spin_unlock_irq(&qdio->req_q_lock); 1790 if (!retval) 1791 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); 1792 return retval; 1793 } 1794 1795 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1796 { 1797 struct zfcp_port *port = req->data; 1798 struct fsf_qtcb_header *header = &req->qtcb->header; 1799 struct scsi_device *sdev; 1800 1801 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1802 return; 1803 1804 switch (header->fsf_status) { 1805 case FSF_PORT_HANDLE_NOT_VALID: 1806 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 1807 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1808 break; 1809 case FSF_PORT_BOXED: 1810 /* can't use generic zfcp_erp_modify_port_status because 1811 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1812 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1813 shost_for_each_device(sdev, port->adapter->scsi_host) 1814 if (sdev_to_zfcp(sdev)->port == port) 1815 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1816 &sdev_to_zfcp(sdev)->status); 1817 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1818 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 1819 "fscpph2"); 1820 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1821 break; 1822 case FSF_ADAPTER_STATUS_AVAILABLE: 1823 switch (header->fsf_status_qual.word[0]) { 1824 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1825 /* fall through */ 1826 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1827 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1828 break; 1829 } 1830 break; 1831 case FSF_GOOD: 1832 /* can't use generic zfcp_erp_modify_port_status because 1833 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1834 */ 1835 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1836 shost_for_each_device(sdev, port->adapter->scsi_host) 1837 if (sdev_to_zfcp(sdev)->port == port) 1838 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1839 &sdev_to_zfcp(sdev)->status); 1840 break; 1841 } 1842 } 1843 1844 /** 1845 * zfcp_fsf_close_physical_port - close physical port 1846 * @erp_action: pointer to struct zfcp_erp_action 1847 * Returns: 0 on success 1848 */ 1849 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1850 { 1851 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1852 struct zfcp_fsf_req *req; 1853 int retval = -EIO; 1854 1855 spin_lock_irq(&qdio->req_q_lock); 1856 if (zfcp_qdio_sbal_get(qdio)) 1857 goto out; 1858 1859 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1860 SBAL_SFLAGS0_TYPE_READ, 1861 qdio->adapter->pool.erp_req); 1862 1863 if (IS_ERR(req)) { 1864 retval = PTR_ERR(req); 1865 goto out; 1866 } 1867 1868 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1869 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1870 1871 req->data = erp_action->port; 1872 req->qtcb->header.port_handle = erp_action->port->handle; 1873 req->erp_action = erp_action; 1874 req->handler = zfcp_fsf_close_physical_port_handler; 1875 erp_action->fsf_req_id = req->req_id; 1876 1877 zfcp_fsf_start_erp_timer(req); 1878 retval = zfcp_fsf_req_send(req); 1879 if (retval) { 1880 zfcp_fsf_req_free(req); 1881 erp_action->fsf_req_id = 0; 1882 } 1883 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1884 out: 1885 spin_unlock_irq(&qdio->req_q_lock); 1886 return retval; 1887 } 1888 1889 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 1890 { 1891 struct zfcp_adapter *adapter = req->adapter; 1892 struct scsi_device *sdev = req->data; 1893 struct zfcp_scsi_dev *zfcp_sdev; 1894 struct fsf_qtcb_header *header = &req->qtcb->header; 1895 union fsf_status_qual *qual = &header->fsf_status_qual; 1896 1897 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1898 return; 1899 1900 zfcp_sdev = sdev_to_zfcp(sdev); 1901 1902 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1903 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1904 &zfcp_sdev->status); 1905 1906 switch (header->fsf_status) { 1907 1908 case FSF_PORT_HANDLE_NOT_VALID: 1909 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 1910 /* fall through */ 1911 case FSF_LUN_ALREADY_OPEN: 1912 break; 1913 case FSF_PORT_BOXED: 1914 zfcp_erp_set_port_status(zfcp_sdev->port, 1915 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1916 zfcp_erp_port_reopen(zfcp_sdev->port, 1917 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 1918 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1919 break; 1920 case FSF_LUN_SHARING_VIOLATION: 1921 if (qual->word[0]) 1922 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 1923 "LUN 0x%016Lx on port 0x%016Lx is already in " 1924 "use by CSS%d, MIF Image ID %x\n", 1925 zfcp_scsi_dev_lun(sdev), 1926 (unsigned long long)zfcp_sdev->port->wwpn, 1927 qual->fsf_queue_designator.cssid, 1928 qual->fsf_queue_designator.hla); 1929 zfcp_erp_set_lun_status(sdev, 1930 ZFCP_STATUS_COMMON_ERP_FAILED | 1931 ZFCP_STATUS_COMMON_ACCESS_DENIED); 1932 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1933 break; 1934 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1935 dev_warn(&adapter->ccw_device->dev, 1936 "No handle is available for LUN " 1937 "0x%016Lx on port 0x%016Lx\n", 1938 (unsigned long long)zfcp_scsi_dev_lun(sdev), 1939 (unsigned long long)zfcp_sdev->port->wwpn); 1940 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 1941 /* fall through */ 1942 case FSF_INVALID_COMMAND_OPTION: 1943 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1944 break; 1945 case FSF_ADAPTER_STATUS_AVAILABLE: 1946 switch (header->fsf_status_qual.word[0]) { 1947 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1948 zfcp_fc_test_link(zfcp_sdev->port); 1949 /* fall through */ 1950 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1951 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1952 break; 1953 } 1954 break; 1955 1956 case FSF_GOOD: 1957 zfcp_sdev->lun_handle = header->lun_handle; 1958 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1959 break; 1960 } 1961 } 1962 1963 /** 1964 * zfcp_fsf_open_lun - open LUN 1965 * @erp_action: pointer to struct zfcp_erp_action 1966 * Returns: 0 on success, error otherwise 1967 */ 1968 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 1969 { 1970 struct zfcp_adapter *adapter = erp_action->adapter; 1971 struct zfcp_qdio *qdio = adapter->qdio; 1972 struct zfcp_fsf_req *req; 1973 int retval = -EIO; 1974 1975 spin_lock_irq(&qdio->req_q_lock); 1976 if (zfcp_qdio_sbal_get(qdio)) 1977 goto out; 1978 1979 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1980 SBAL_SFLAGS0_TYPE_READ, 1981 adapter->pool.erp_req); 1982 1983 if (IS_ERR(req)) { 1984 retval = PTR_ERR(req); 1985 goto out; 1986 } 1987 1988 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1989 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1990 1991 req->qtcb->header.port_handle = erp_action->port->handle; 1992 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 1993 req->handler = zfcp_fsf_open_lun_handler; 1994 req->data = erp_action->sdev; 1995 req->erp_action = erp_action; 1996 erp_action->fsf_req_id = req->req_id; 1997 1998 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1999 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 2000 2001 zfcp_fsf_start_erp_timer(req); 2002 retval = zfcp_fsf_req_send(req); 2003 if (retval) { 2004 zfcp_fsf_req_free(req); 2005 erp_action->fsf_req_id = 0; 2006 } 2007 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2008 out: 2009 spin_unlock_irq(&qdio->req_q_lock); 2010 return retval; 2011 } 2012 2013 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 2014 { 2015 struct scsi_device *sdev = req->data; 2016 struct zfcp_scsi_dev *zfcp_sdev; 2017 2018 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2019 return; 2020 2021 zfcp_sdev = sdev_to_zfcp(sdev); 2022 2023 switch (req->qtcb->header.fsf_status) { 2024 case FSF_PORT_HANDLE_NOT_VALID: 2025 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 2026 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2027 break; 2028 case FSF_LUN_HANDLE_NOT_VALID: 2029 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 2030 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2031 break; 2032 case FSF_PORT_BOXED: 2033 zfcp_erp_set_port_status(zfcp_sdev->port, 2034 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2035 zfcp_erp_port_reopen(zfcp_sdev->port, 2036 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 2037 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2038 break; 2039 case FSF_ADAPTER_STATUS_AVAILABLE: 2040 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2041 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2042 zfcp_fc_test_link(zfcp_sdev->port); 2043 /* fall through */ 2044 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2045 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2046 break; 2047 } 2048 break; 2049 case FSF_GOOD: 2050 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2051 break; 2052 } 2053 } 2054 2055 /** 2056 * zfcp_fsf_close_LUN - close LUN 2057 * @erp_action: pointer to erp_action triggering the "close LUN" 2058 * Returns: 0 on success, error otherwise 2059 */ 2060 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 2061 { 2062 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2063 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 2064 struct zfcp_fsf_req *req; 2065 int retval = -EIO; 2066 2067 spin_lock_irq(&qdio->req_q_lock); 2068 if (zfcp_qdio_sbal_get(qdio)) 2069 goto out; 2070 2071 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 2072 SBAL_SFLAGS0_TYPE_READ, 2073 qdio->adapter->pool.erp_req); 2074 2075 if (IS_ERR(req)) { 2076 retval = PTR_ERR(req); 2077 goto out; 2078 } 2079 2080 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2081 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2082 2083 req->qtcb->header.port_handle = erp_action->port->handle; 2084 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2085 req->handler = zfcp_fsf_close_lun_handler; 2086 req->data = erp_action->sdev; 2087 req->erp_action = erp_action; 2088 erp_action->fsf_req_id = req->req_id; 2089 2090 zfcp_fsf_start_erp_timer(req); 2091 retval = zfcp_fsf_req_send(req); 2092 if (retval) { 2093 zfcp_fsf_req_free(req); 2094 erp_action->fsf_req_id = 0; 2095 } 2096 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2097 out: 2098 spin_unlock_irq(&qdio->req_q_lock); 2099 return retval; 2100 } 2101 2102 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat) 2103 { 2104 lat_rec->sum += lat; 2105 lat_rec->min = min(lat_rec->min, lat); 2106 lat_rec->max = max(lat_rec->max, lat); 2107 } 2108 2109 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2110 { 2111 struct fsf_qual_latency_info *lat_in; 2112 struct zfcp_latency_cont *lat = NULL; 2113 struct zfcp_scsi_dev *zfcp_sdev; 2114 struct zfcp_blk_drv_data blktrc; 2115 int ticks = req->adapter->timer_ticks; 2116 2117 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 2118 2119 blktrc.flags = 0; 2120 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2121 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2122 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2123 blktrc.inb_usage = 0; 2124 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2125 2126 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2127 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2128 zfcp_sdev = sdev_to_zfcp(scsi->device); 2129 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2130 blktrc.channel_lat = lat_in->channel_lat * ticks; 2131 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2132 2133 switch (req->qtcb->bottom.io.data_direction) { 2134 case FSF_DATADIR_DIF_READ_STRIP: 2135 case FSF_DATADIR_DIF_READ_CONVERT: 2136 case FSF_DATADIR_READ: 2137 lat = &zfcp_sdev->latencies.read; 2138 break; 2139 case FSF_DATADIR_DIF_WRITE_INSERT: 2140 case FSF_DATADIR_DIF_WRITE_CONVERT: 2141 case FSF_DATADIR_WRITE: 2142 lat = &zfcp_sdev->latencies.write; 2143 break; 2144 case FSF_DATADIR_CMND: 2145 lat = &zfcp_sdev->latencies.cmd; 2146 break; 2147 } 2148 2149 if (lat) { 2150 spin_lock(&zfcp_sdev->latencies.lock); 2151 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2152 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2153 lat->counter++; 2154 spin_unlock(&zfcp_sdev->latencies.lock); 2155 } 2156 } 2157 2158 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2159 sizeof(blktrc)); 2160 } 2161 2162 /** 2163 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. 2164 * @req: Pointer to FSF request. 2165 * @sdev: Pointer to SCSI device as request context. 2166 */ 2167 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, 2168 struct scsi_device *sdev) 2169 { 2170 struct zfcp_scsi_dev *zfcp_sdev; 2171 struct fsf_qtcb_header *header = &req->qtcb->header; 2172 2173 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2174 return; 2175 2176 zfcp_sdev = sdev_to_zfcp(sdev); 2177 2178 switch (header->fsf_status) { 2179 case FSF_HANDLE_MISMATCH: 2180 case FSF_PORT_HANDLE_NOT_VALID: 2181 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); 2182 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2183 break; 2184 case FSF_FCPLUN_NOT_VALID: 2185 case FSF_LUN_HANDLE_NOT_VALID: 2186 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2187 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2188 break; 2189 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2190 zfcp_fsf_class_not_supp(req); 2191 break; 2192 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2193 dev_err(&req->adapter->ccw_device->dev, 2194 "Incorrect direction %d, LUN 0x%016Lx on port " 2195 "0x%016Lx closed\n", 2196 req->qtcb->bottom.io.data_direction, 2197 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2198 (unsigned long long)zfcp_sdev->port->wwpn); 2199 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); 2200 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2201 break; 2202 case FSF_CMND_LENGTH_NOT_VALID: 2203 dev_err(&req->adapter->ccw_device->dev, 2204 "Incorrect FCP_CMND length %d, FCP device closed\n", 2205 req->qtcb->bottom.io.fcp_cmnd_length); 2206 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2207 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2208 break; 2209 case FSF_PORT_BOXED: 2210 zfcp_erp_set_port_status(zfcp_sdev->port, 2211 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2212 zfcp_erp_port_reopen(zfcp_sdev->port, 2213 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2214 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2215 break; 2216 case FSF_LUN_BOXED: 2217 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2218 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2219 "fssfch6"); 2220 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2221 break; 2222 case FSF_ADAPTER_STATUS_AVAILABLE: 2223 if (header->fsf_status_qual.word[0] == 2224 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2225 zfcp_fc_test_link(zfcp_sdev->port); 2226 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2227 break; 2228 } 2229 } 2230 2231 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2232 { 2233 struct scsi_cmnd *scpnt; 2234 struct fcp_resp_with_ext *fcp_rsp; 2235 unsigned long flags; 2236 2237 read_lock_irqsave(&req->adapter->abort_lock, flags); 2238 2239 scpnt = req->data; 2240 if (unlikely(!scpnt)) { 2241 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2242 return; 2243 } 2244 2245 zfcp_fsf_fcp_handler_common(req, scpnt->device); 2246 2247 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2248 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2249 goto skip_fsfstatus; 2250 } 2251 2252 switch (req->qtcb->header.fsf_status) { 2253 case FSF_INCONSISTENT_PROT_DATA: 2254 case FSF_INVALID_PROT_PARM: 2255 set_host_byte(scpnt, DID_ERROR); 2256 goto skip_fsfstatus; 2257 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2258 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2259 goto skip_fsfstatus; 2260 case FSF_APP_TAG_CHECK_FAILURE: 2261 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2262 goto skip_fsfstatus; 2263 case FSF_REF_TAG_CHECK_FAILURE: 2264 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2265 goto skip_fsfstatus; 2266 } 2267 BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); 2268 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2269 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2270 2271 skip_fsfstatus: 2272 zfcp_fsf_req_trace(req, scpnt); 2273 zfcp_dbf_scsi_result(scpnt, req); 2274 2275 scpnt->host_scribble = NULL; 2276 (scpnt->scsi_done) (scpnt); 2277 /* 2278 * We must hold this lock until scsi_done has been called. 2279 * Otherwise we may call scsi_done after abort regarding this 2280 * command has completed. 2281 * Note: scsi_done must not block! 2282 */ 2283 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2284 } 2285 2286 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2287 { 2288 switch (scsi_get_prot_op(scsi_cmnd)) { 2289 case SCSI_PROT_NORMAL: 2290 switch (scsi_cmnd->sc_data_direction) { 2291 case DMA_NONE: 2292 *data_dir = FSF_DATADIR_CMND; 2293 break; 2294 case DMA_FROM_DEVICE: 2295 *data_dir = FSF_DATADIR_READ; 2296 break; 2297 case DMA_TO_DEVICE: 2298 *data_dir = FSF_DATADIR_WRITE; 2299 break; 2300 case DMA_BIDIRECTIONAL: 2301 return -EINVAL; 2302 } 2303 break; 2304 2305 case SCSI_PROT_READ_STRIP: 2306 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2307 break; 2308 case SCSI_PROT_WRITE_INSERT: 2309 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2310 break; 2311 case SCSI_PROT_READ_PASS: 2312 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2313 break; 2314 case SCSI_PROT_WRITE_PASS: 2315 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2316 break; 2317 default: 2318 return -EINVAL; 2319 } 2320 2321 return 0; 2322 } 2323 2324 /** 2325 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2326 * @scsi_cmnd: scsi command to be sent 2327 */ 2328 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2329 { 2330 struct zfcp_fsf_req *req; 2331 struct fcp_cmnd *fcp_cmnd; 2332 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2333 int retval = -EIO; 2334 struct scsi_device *sdev = scsi_cmnd->device; 2335 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2336 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2337 struct zfcp_qdio *qdio = adapter->qdio; 2338 struct fsf_qtcb_bottom_io *io; 2339 unsigned long flags; 2340 2341 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2342 ZFCP_STATUS_COMMON_UNBLOCKED))) 2343 return -EBUSY; 2344 2345 spin_lock_irqsave(&qdio->req_q_lock, flags); 2346 if (atomic_read(&qdio->req_q_free) <= 0) { 2347 atomic_inc(&qdio->req_q_full); 2348 goto out; 2349 } 2350 2351 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2352 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2353 2354 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2355 sbtype, adapter->pool.scsi_req); 2356 2357 if (IS_ERR(req)) { 2358 retval = PTR_ERR(req); 2359 goto out; 2360 } 2361 2362 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2363 2364 io = &req->qtcb->bottom.io; 2365 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2366 req->data = scsi_cmnd; 2367 req->handler = zfcp_fsf_fcp_cmnd_handler; 2368 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2369 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2370 io->service_class = FSF_CLASS_3; 2371 io->fcp_cmnd_length = FCP_CMND_LEN; 2372 2373 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2374 io->data_block_length = scsi_cmnd->device->sector_size; 2375 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2376 } 2377 2378 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2379 goto failed_scsi_cmnd; 2380 2381 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2382 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2383 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2384 2385 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2386 scsi_prot_sg_count(scsi_cmnd)) { 2387 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2388 scsi_prot_sg_count(scsi_cmnd)); 2389 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2390 scsi_prot_sglist(scsi_cmnd)); 2391 if (retval) 2392 goto failed_scsi_cmnd; 2393 io->prot_data_length = zfcp_qdio_real_bytes( 2394 scsi_prot_sglist(scsi_cmnd)); 2395 } 2396 2397 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2398 scsi_sglist(scsi_cmnd)); 2399 if (unlikely(retval)) 2400 goto failed_scsi_cmnd; 2401 2402 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2403 if (zfcp_adapter_multi_buffer_active(adapter)) 2404 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2405 2406 retval = zfcp_fsf_req_send(req); 2407 if (unlikely(retval)) 2408 goto failed_scsi_cmnd; 2409 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2410 2411 goto out; 2412 2413 failed_scsi_cmnd: 2414 zfcp_fsf_req_free(req); 2415 scsi_cmnd->host_scribble = NULL; 2416 out: 2417 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2418 return retval; 2419 } 2420 2421 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2422 { 2423 struct scsi_device *sdev = req->data; 2424 struct fcp_resp_with_ext *fcp_rsp; 2425 struct fcp_resp_rsp_info *rsp_info; 2426 2427 zfcp_fsf_fcp_handler_common(req, sdev); 2428 2429 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2430 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2431 2432 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2433 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2434 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2435 } 2436 2437 /** 2438 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). 2439 * @sdev: Pointer to SCSI device to send the task management command to. 2440 * @tm_flags: Unsigned byte for task management flags. 2441 * 2442 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. 2443 */ 2444 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, 2445 u8 tm_flags) 2446 { 2447 struct zfcp_fsf_req *req = NULL; 2448 struct fcp_cmnd *fcp_cmnd; 2449 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2450 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2451 2452 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2453 ZFCP_STATUS_COMMON_UNBLOCKED))) 2454 return NULL; 2455 2456 spin_lock_irq(&qdio->req_q_lock); 2457 if (zfcp_qdio_sbal_get(qdio)) 2458 goto out; 2459 2460 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2461 SBAL_SFLAGS0_TYPE_WRITE, 2462 qdio->adapter->pool.scsi_req); 2463 2464 if (IS_ERR(req)) { 2465 req = NULL; 2466 goto out; 2467 } 2468 2469 req->data = sdev; 2470 2471 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2472 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2473 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2474 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2475 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2476 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2477 2478 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2479 2480 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2481 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2482 2483 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 2484 if (!zfcp_fsf_req_send(req)) { 2485 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 2486 goto out; 2487 } 2488 2489 zfcp_fsf_req_free(req); 2490 req = NULL; 2491 out: 2492 spin_unlock_irq(&qdio->req_q_lock); 2493 return req; 2494 } 2495 2496 /** 2497 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2498 * @qdio: pointer to struct zfcp_qdio 2499 * @sbal_idx: response queue index of SBAL to be processed 2500 */ 2501 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2502 { 2503 struct zfcp_adapter *adapter = qdio->adapter; 2504 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2505 struct qdio_buffer_element *sbale; 2506 struct zfcp_fsf_req *fsf_req; 2507 unsigned long req_id; 2508 int idx; 2509 2510 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2511 2512 sbale = &sbal->element[idx]; 2513 req_id = (unsigned long) sbale->addr; 2514 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2515 2516 if (!fsf_req) { 2517 /* 2518 * Unknown request means that we have potentially memory 2519 * corruption and must stop the machine immediately. 2520 */ 2521 zfcp_qdio_siosl(adapter); 2522 panic("error: unknown req_id (%lx) on adapter %s.\n", 2523 req_id, dev_name(&adapter->ccw_device->dev)); 2524 } 2525 2526 zfcp_fsf_req_complete(fsf_req); 2527 2528 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2529 break; 2530 } 2531 } 2532