1 /* 2 * zfcp device driver 3 * 4 * Implementation of FSF commands. 5 * 6 * Copyright IBM Corporation 2002, 2008 7 */ 8 9 #include "zfcp_ext.h" 10 11 static void zfcp_fsf_request_timeout_handler(unsigned long data) 12 { 13 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 14 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, 15 NULL); 16 } 17 18 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 19 unsigned long timeout) 20 { 21 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 22 fsf_req->timer.data = (unsigned long) fsf_req->adapter; 23 fsf_req->timer.expires = jiffies + timeout; 24 add_timer(&fsf_req->timer); 25 } 26 27 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 28 { 29 BUG_ON(!fsf_req->erp_action); 30 fsf_req->timer.function = zfcp_erp_timeout_handler; 31 fsf_req->timer.data = (unsigned long) fsf_req->erp_action; 32 fsf_req->timer.expires = jiffies + 30 * HZ; 33 add_timer(&fsf_req->timer); 34 } 35 36 /* association between FSF command and FSF QTCB type */ 37 static u32 fsf_qtcb_type[] = { 38 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 39 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 40 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 41 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 42 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 43 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 44 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 45 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 46 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 47 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 48 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 49 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 51 }; 52 53 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) 54 { 55 u16 subtable = table >> 16; 56 u16 rule = table & 0xffff; 57 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; 58 59 if (subtable && subtable < ARRAY_SIZE(act_type)) 60 dev_warn(&adapter->ccw_device->dev, 61 "Access denied according to ACT rule type %s, " 62 "rule %d\n", act_type[subtable], rule); 63 } 64 65 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, 66 struct zfcp_port *port) 67 { 68 struct fsf_qtcb_header *header = &req->qtcb->header; 69 dev_warn(&req->adapter->ccw_device->dev, 70 "Access denied to port 0x%016Lx\n", 71 (unsigned long long)port->wwpn); 72 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 73 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 74 zfcp_erp_port_access_denied(port, 55, req); 75 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 76 } 77 78 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req, 79 struct zfcp_unit *unit) 80 { 81 struct fsf_qtcb_header *header = &req->qtcb->header; 82 dev_warn(&req->adapter->ccw_device->dev, 83 "Access denied to unit 0x%016Lx on port 0x%016Lx\n", 84 (unsigned long long)unit->fcp_lun, 85 (unsigned long long)unit->port->wwpn); 86 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 87 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 88 zfcp_erp_unit_access_denied(unit, 59, req); 89 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 90 } 91 92 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 93 { 94 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 95 "operational because of an unsupported FC class\n"); 96 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req); 97 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 98 } 99 100 /** 101 * zfcp_fsf_req_free - free memory used by fsf request 102 * @fsf_req: pointer to struct zfcp_fsf_req 103 */ 104 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 105 { 106 if (likely(req->pool)) { 107 mempool_free(req, req->pool); 108 return; 109 } 110 111 if (req->qtcb) { 112 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req); 113 return; 114 } 115 } 116 117 /** 118 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 119 * @adapter: pointer to struct zfcp_adapter 120 * 121 * Never ever call this without shutting down the adapter first. 122 * Otherwise the adapter would continue using and corrupting s390 storage. 123 * Included BUG_ON() call to ensure this is done. 124 * ERP is supposed to be the only user of this function. 125 */ 126 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 127 { 128 struct zfcp_fsf_req *req, *tmp; 129 unsigned long flags; 130 LIST_HEAD(remove_queue); 131 unsigned int i; 132 133 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 134 spin_lock_irqsave(&adapter->req_list_lock, flags); 135 for (i = 0; i < REQUEST_LIST_SIZE; i++) 136 list_splice_init(&adapter->req_list[i], &remove_queue); 137 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 138 139 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 140 list_del(&req->list); 141 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 142 zfcp_fsf_req_complete(req); 143 } 144 } 145 146 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 147 { 148 struct fsf_status_read_buffer *sr_buf = req->data; 149 struct zfcp_adapter *adapter = req->adapter; 150 struct zfcp_port *port; 151 int d_id = sr_buf->d_id & ZFCP_DID_MASK; 152 unsigned long flags; 153 154 read_lock_irqsave(&zfcp_data.config_lock, flags); 155 list_for_each_entry(port, &adapter->port_list_head, list) 156 if (port->d_id == d_id) { 157 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 158 switch (sr_buf->status_subtype) { 159 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: 160 zfcp_erp_port_reopen(port, 0, 101, req); 161 break; 162 case FSF_STATUS_READ_SUB_ERROR_PORT: 163 zfcp_erp_port_shutdown(port, 0, 122, req); 164 break; 165 } 166 return; 167 } 168 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 169 } 170 171 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id, 172 struct fsf_link_down_info *link_down) 173 { 174 struct zfcp_adapter *adapter = req->adapter; 175 176 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 177 return; 178 179 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 180 181 if (!link_down) 182 goto out; 183 184 switch (link_down->error_code) { 185 case FSF_PSQ_LINK_NO_LIGHT: 186 dev_warn(&req->adapter->ccw_device->dev, 187 "There is no light signal from the local " 188 "fibre channel cable\n"); 189 break; 190 case FSF_PSQ_LINK_WRAP_PLUG: 191 dev_warn(&req->adapter->ccw_device->dev, 192 "There is a wrap plug instead of a fibre " 193 "channel cable\n"); 194 break; 195 case FSF_PSQ_LINK_NO_FCP: 196 dev_warn(&req->adapter->ccw_device->dev, 197 "The adjacent fibre channel node does not " 198 "support FCP\n"); 199 break; 200 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 201 dev_warn(&req->adapter->ccw_device->dev, 202 "The FCP device is suspended because of a " 203 "firmware update\n"); 204 break; 205 case FSF_PSQ_LINK_INVALID_WWPN: 206 dev_warn(&req->adapter->ccw_device->dev, 207 "The FCP device detected a WWPN that is " 208 "duplicate or not valid\n"); 209 break; 210 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 211 dev_warn(&req->adapter->ccw_device->dev, 212 "The fibre channel fabric does not support NPIV\n"); 213 break; 214 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 215 dev_warn(&req->adapter->ccw_device->dev, 216 "The FCP adapter cannot support more NPIV ports\n"); 217 break; 218 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 219 dev_warn(&req->adapter->ccw_device->dev, 220 "The adjacent switch cannot support " 221 "more NPIV ports\n"); 222 break; 223 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 224 dev_warn(&req->adapter->ccw_device->dev, 225 "The FCP adapter could not log in to the " 226 "fibre channel fabric\n"); 227 break; 228 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 229 dev_warn(&req->adapter->ccw_device->dev, 230 "The WWPN assignment file on the FCP adapter " 231 "has been damaged\n"); 232 break; 233 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 234 dev_warn(&req->adapter->ccw_device->dev, 235 "The mode table on the FCP adapter " 236 "has been damaged\n"); 237 break; 238 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 239 dev_warn(&req->adapter->ccw_device->dev, 240 "All NPIV ports on the FCP adapter have " 241 "been assigned\n"); 242 break; 243 default: 244 dev_warn(&req->adapter->ccw_device->dev, 245 "The link between the FCP adapter and " 246 "the FC fabric is down\n"); 247 } 248 out: 249 zfcp_erp_adapter_failed(adapter, id, req); 250 } 251 252 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 253 { 254 struct fsf_status_read_buffer *sr_buf = req->data; 255 struct fsf_link_down_info *ldi = 256 (struct fsf_link_down_info *) &sr_buf->payload; 257 258 switch (sr_buf->status_subtype) { 259 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 260 zfcp_fsf_link_down_info_eval(req, 38, ldi); 261 break; 262 case FSF_STATUS_READ_SUB_FDISC_FAILED: 263 zfcp_fsf_link_down_info_eval(req, 39, ldi); 264 break; 265 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 266 zfcp_fsf_link_down_info_eval(req, 40, NULL); 267 }; 268 } 269 270 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 271 { 272 struct zfcp_adapter *adapter = req->adapter; 273 struct fsf_status_read_buffer *sr_buf = req->data; 274 275 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 276 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); 277 mempool_free(sr_buf, adapter->pool.data_status_read); 278 zfcp_fsf_req_free(req); 279 return; 280 } 281 282 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf); 283 284 switch (sr_buf->status_type) { 285 case FSF_STATUS_READ_PORT_CLOSED: 286 zfcp_fsf_status_read_port_closed(req); 287 break; 288 case FSF_STATUS_READ_INCOMING_ELS: 289 zfcp_fc_incoming_els(req); 290 break; 291 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 292 break; 293 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 294 dev_warn(&adapter->ccw_device->dev, 295 "The error threshold for checksum statistics " 296 "has been exceeded\n"); 297 zfcp_hba_dbf_event_berr(adapter, req); 298 break; 299 case FSF_STATUS_READ_LINK_DOWN: 300 zfcp_fsf_status_read_link_down(req); 301 break; 302 case FSF_STATUS_READ_LINK_UP: 303 dev_info(&adapter->ccw_device->dev, 304 "The local link has been restored\n"); 305 /* All ports should be marked as ready to run again */ 306 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 307 ZFCP_STATUS_COMMON_RUNNING, 308 ZFCP_SET); 309 zfcp_erp_adapter_reopen(adapter, 310 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 311 ZFCP_STATUS_COMMON_ERP_FAILED, 312 102, req); 313 break; 314 case FSF_STATUS_READ_NOTIFICATION_LOST: 315 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 316 zfcp_erp_adapter_access_changed(adapter, 135, req); 317 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 318 schedule_work(&adapter->scan_work); 319 break; 320 case FSF_STATUS_READ_CFDC_UPDATED: 321 zfcp_erp_adapter_access_changed(adapter, 136, req); 322 break; 323 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 324 adapter->adapter_features = sr_buf->payload.word[0]; 325 break; 326 } 327 328 mempool_free(sr_buf, adapter->pool.data_status_read); 329 zfcp_fsf_req_free(req); 330 331 atomic_inc(&adapter->stat_miss); 332 queue_work(zfcp_data.work_queue, &adapter->stat_work); 333 } 334 335 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 336 { 337 switch (req->qtcb->header.fsf_status_qual.word[0]) { 338 case FSF_SQ_FCP_RSP_AVAILABLE: 339 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 340 case FSF_SQ_NO_RETRY_POSSIBLE: 341 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 342 return; 343 case FSF_SQ_COMMAND_ABORTED: 344 req->status |= ZFCP_STATUS_FSFREQ_ABORTED; 345 break; 346 case FSF_SQ_NO_RECOM: 347 dev_err(&req->adapter->ccw_device->dev, 348 "The FCP adapter reported a problem " 349 "that cannot be recovered\n"); 350 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req); 351 break; 352 } 353 /* all non-return stats set FSFREQ_ERROR*/ 354 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 355 } 356 357 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 358 { 359 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 360 return; 361 362 switch (req->qtcb->header.fsf_status) { 363 case FSF_UNKNOWN_COMMAND: 364 dev_err(&req->adapter->ccw_device->dev, 365 "The FCP adapter does not recognize the command 0x%x\n", 366 req->qtcb->header.fsf_command); 367 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req); 368 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 369 break; 370 case FSF_ADAPTER_STATUS_AVAILABLE: 371 zfcp_fsf_fsfstatus_qual_eval(req); 372 break; 373 } 374 } 375 376 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 377 { 378 struct zfcp_adapter *adapter = req->adapter; 379 struct fsf_qtcb *qtcb = req->qtcb; 380 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 381 382 zfcp_hba_dbf_event_fsf_response(req); 383 384 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 385 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 386 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ 387 return; 388 } 389 390 switch (qtcb->prefix.prot_status) { 391 case FSF_PROT_GOOD: 392 case FSF_PROT_FSF_STATUS_PRESENTED: 393 return; 394 case FSF_PROT_QTCB_VERSION_ERROR: 395 dev_err(&adapter->ccw_device->dev, 396 "QTCB version 0x%x not supported by FCP adapter " 397 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 398 psq->word[0], psq->word[1]); 399 zfcp_erp_adapter_shutdown(adapter, 0, 117, req); 400 break; 401 case FSF_PROT_ERROR_STATE: 402 case FSF_PROT_SEQ_NUMB_ERROR: 403 zfcp_erp_adapter_reopen(adapter, 0, 98, req); 404 req->status |= ZFCP_STATUS_FSFREQ_RETRY; 405 break; 406 case FSF_PROT_UNSUPP_QTCB_TYPE: 407 dev_err(&adapter->ccw_device->dev, 408 "The QTCB type is not supported by the FCP adapter\n"); 409 zfcp_erp_adapter_shutdown(adapter, 0, 118, req); 410 break; 411 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 412 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 413 &adapter->status); 414 break; 415 case FSF_PROT_DUPLICATE_REQUEST_ID: 416 dev_err(&adapter->ccw_device->dev, 417 "0x%Lx is an ambiguous request identifier\n", 418 (unsigned long long)qtcb->bottom.support.req_handle); 419 zfcp_erp_adapter_shutdown(adapter, 0, 78, req); 420 break; 421 case FSF_PROT_LINK_DOWN: 422 zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info); 423 /* FIXME: reopening adapter now? better wait for link up */ 424 zfcp_erp_adapter_reopen(adapter, 0, 79, req); 425 break; 426 case FSF_PROT_REEST_QUEUE: 427 /* All ports should be marked as ready to run again */ 428 zfcp_erp_modify_adapter_status(adapter, 28, NULL, 429 ZFCP_STATUS_COMMON_RUNNING, 430 ZFCP_SET); 431 zfcp_erp_adapter_reopen(adapter, 432 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 433 ZFCP_STATUS_COMMON_ERP_FAILED, 99, req); 434 break; 435 default: 436 dev_err(&adapter->ccw_device->dev, 437 "0x%x is not a valid transfer protocol status\n", 438 qtcb->prefix.prot_status); 439 zfcp_erp_adapter_shutdown(adapter, 0, 119, req); 440 } 441 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 442 } 443 444 /** 445 * zfcp_fsf_req_complete - process completion of a FSF request 446 * @fsf_req: The FSF request that has been completed. 447 * 448 * When a request has been completed either from the FCP adapter, 449 * or it has been dismissed due to a queue shutdown, this function 450 * is called to process the completion status and trigger further 451 * events related to the FSF request. 452 */ 453 void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 454 { 455 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { 456 zfcp_fsf_status_read_handler(req); 457 return; 458 } 459 460 del_timer(&req->timer); 461 zfcp_fsf_protstatus_eval(req); 462 zfcp_fsf_fsfstatus_eval(req); 463 req->handler(req); 464 465 if (req->erp_action) 466 zfcp_erp_notify(req->erp_action, 0); 467 req->status |= ZFCP_STATUS_FSFREQ_COMPLETED; 468 469 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 470 zfcp_fsf_req_free(req); 471 else 472 /* notify initiator waiting for the requests completion */ 473 /* 474 * FIXME: Race! We must not access fsf_req here as it might have been 475 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED 476 * flag. It's an improbable case. But, we have the same paranoia for 477 * the cleanup flag already. 478 * Might better be handled using complete()? 479 * (setting the flag and doing wakeup ought to be atomic 480 * with regard to checking the flag as long as waitqueue is 481 * part of the to be released structure) 482 */ 483 wake_up(&req->completion_wq); 484 } 485 486 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 487 { 488 struct fsf_qtcb_bottom_config *bottom; 489 struct zfcp_adapter *adapter = req->adapter; 490 struct Scsi_Host *shost = adapter->scsi_host; 491 492 bottom = &req->qtcb->bottom.config; 493 494 if (req->data) 495 memcpy(req->data, bottom, sizeof(*bottom)); 496 497 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; 498 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; 499 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; 500 fc_host_speed(shost) = bottom->fc_link_speed; 501 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 502 503 adapter->hydra_version = bottom->adapter_type; 504 adapter->timer_ticks = bottom->timer_interval; 505 506 if (fc_host_permanent_port_name(shost) == -1) 507 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 508 509 switch (bottom->fc_topology) { 510 case FSF_TOPO_P2P: 511 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; 512 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 513 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 514 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 515 break; 516 case FSF_TOPO_FABRIC: 517 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 518 break; 519 case FSF_TOPO_AL: 520 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 521 default: 522 dev_err(&adapter->ccw_device->dev, 523 "Unknown or unsupported arbitrated loop " 524 "fibre channel topology detected\n"); 525 zfcp_erp_adapter_shutdown(adapter, 0, 127, req); 526 return -EIO; 527 } 528 529 return 0; 530 } 531 532 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 533 { 534 struct zfcp_adapter *adapter = req->adapter; 535 struct fsf_qtcb *qtcb = req->qtcb; 536 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 537 struct Scsi_Host *shost = adapter->scsi_host; 538 539 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 540 return; 541 542 adapter->fsf_lic_version = bottom->lic_version; 543 adapter->adapter_features = bottom->adapter_features; 544 adapter->connection_features = bottom->connection_features; 545 adapter->peer_wwpn = 0; 546 adapter->peer_wwnn = 0; 547 adapter->peer_d_id = 0; 548 549 switch (qtcb->header.fsf_status) { 550 case FSF_GOOD: 551 if (zfcp_fsf_exchange_config_evaluate(req)) 552 return; 553 554 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 555 dev_err(&adapter->ccw_device->dev, 556 "FCP adapter maximum QTCB size (%d bytes) " 557 "is too small\n", 558 bottom->max_qtcb_size); 559 zfcp_erp_adapter_shutdown(adapter, 0, 129, req); 560 return; 561 } 562 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 563 &adapter->status); 564 break; 565 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 566 fc_host_node_name(shost) = 0; 567 fc_host_port_name(shost) = 0; 568 fc_host_port_id(shost) = 0; 569 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 570 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 571 adapter->hydra_version = 0; 572 573 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 574 &adapter->status); 575 576 zfcp_fsf_link_down_info_eval(req, 42, 577 &qtcb->header.fsf_status_qual.link_down_info); 578 break; 579 default: 580 zfcp_erp_adapter_shutdown(adapter, 0, 130, req); 581 return; 582 } 583 584 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 585 adapter->hardware_version = bottom->hardware_version; 586 memcpy(fc_host_serial_number(shost), bottom->serial_number, 587 min(FC_SERIAL_NUMBER_SIZE, 17)); 588 EBCASC(fc_host_serial_number(shost), 589 min(FC_SERIAL_NUMBER_SIZE, 17)); 590 } 591 592 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 593 dev_err(&adapter->ccw_device->dev, 594 "The FCP adapter only supports newer " 595 "control block versions\n"); 596 zfcp_erp_adapter_shutdown(adapter, 0, 125, req); 597 return; 598 } 599 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 600 dev_err(&adapter->ccw_device->dev, 601 "The FCP adapter only supports older " 602 "control block versions\n"); 603 zfcp_erp_adapter_shutdown(adapter, 0, 126, req); 604 } 605 } 606 607 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 608 { 609 struct zfcp_adapter *adapter = req->adapter; 610 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 611 struct Scsi_Host *shost = adapter->scsi_host; 612 613 if (req->data) 614 memcpy(req->data, bottom, sizeof(*bottom)); 615 616 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 617 fc_host_permanent_port_name(shost) = bottom->wwpn; 618 else 619 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 620 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 621 fc_host_supported_speeds(shost) = bottom->supported_speed; 622 } 623 624 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 625 { 626 struct fsf_qtcb *qtcb = req->qtcb; 627 628 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 629 return; 630 631 switch (qtcb->header.fsf_status) { 632 case FSF_GOOD: 633 zfcp_fsf_exchange_port_evaluate(req); 634 break; 635 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 636 zfcp_fsf_exchange_port_evaluate(req); 637 zfcp_fsf_link_down_info_eval(req, 43, 638 &qtcb->header.fsf_status_qual.link_down_info); 639 break; 640 } 641 } 642 643 static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) 644 { 645 struct zfcp_qdio_queue *req_q = &adapter->req_q; 646 647 spin_lock_bh(&adapter->req_q_lock); 648 if (atomic_read(&req_q->count)) 649 return 1; 650 spin_unlock_bh(&adapter->req_q_lock); 651 return 0; 652 } 653 654 static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter) 655 { 656 unsigned int count = atomic_read(&adapter->req_q.count); 657 if (!count) 658 atomic_inc(&adapter->qdio_outb_full); 659 return count > 0; 660 } 661 662 static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 663 { 664 long ret; 665 666 spin_unlock_bh(&adapter->req_q_lock); 667 ret = wait_event_interruptible_timeout(adapter->request_wq, 668 zfcp_fsf_sbal_check(adapter), 5 * HZ); 669 if (ret > 0) 670 return 0; 671 if (!ret) 672 atomic_inc(&adapter->qdio_outb_full); 673 674 spin_lock_bh(&adapter->req_q_lock); 675 return -EIO; 676 } 677 678 static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool) 679 { 680 struct zfcp_fsf_req *req; 681 req = mempool_alloc(pool, GFP_ATOMIC); 682 if (!req) 683 return NULL; 684 memset(req, 0, sizeof(*req)); 685 return req; 686 } 687 688 static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool) 689 { 690 struct zfcp_fsf_req_qtcb *qtcb; 691 692 if (likely(pool)) 693 qtcb = mempool_alloc(pool, GFP_ATOMIC); 694 else 695 qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, 696 GFP_ATOMIC); 697 if (unlikely(!qtcb)) 698 return NULL; 699 700 memset(qtcb, 0, sizeof(*qtcb)); 701 qtcb->fsf_req.qtcb = &qtcb->qtcb; 702 qtcb->fsf_req.pool = pool; 703 704 return &qtcb->fsf_req; 705 } 706 707 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, 708 u32 fsf_cmd, int req_flags, 709 mempool_t *pool) 710 { 711 struct qdio_buffer_element *sbale; 712 713 struct zfcp_fsf_req *req; 714 struct zfcp_qdio_queue *req_q = &adapter->req_q; 715 716 if (req_flags & ZFCP_REQ_NO_QTCB) 717 req = zfcp_fsf_alloc_noqtcb(pool); 718 else 719 req = zfcp_fsf_alloc_qtcb(pool); 720 721 if (unlikely(!req)) 722 return ERR_PTR(-EIO); 723 724 if (adapter->req_no == 0) 725 adapter->req_no++; 726 727 INIT_LIST_HEAD(&req->list); 728 init_timer(&req->timer); 729 init_waitqueue_head(&req->completion_wq); 730 731 req->adapter = adapter; 732 req->fsf_command = fsf_cmd; 733 req->req_id = adapter->req_no++; 734 req->sbal_number = 1; 735 req->sbal_first = req_q->first; 736 req->sbal_last = req_q->first; 737 req->sbale_curr = 1; 738 739 sbale = zfcp_qdio_sbale_req(req); 740 sbale[0].addr = (void *) req->req_id; 741 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 742 743 if (likely(req->qtcb)) { 744 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; 745 req->qtcb->prefix.req_id = req->req_id; 746 req->qtcb->prefix.ulp_info = 26; 747 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; 748 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 749 req->qtcb->header.req_handle = req->req_id; 750 req->qtcb->header.fsf_command = req->fsf_command; 751 req->seq_no = adapter->fsf_req_seq_no; 752 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 753 sbale[1].addr = (void *) req->qtcb; 754 sbale[1].length = sizeof(struct fsf_qtcb); 755 } 756 757 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) { 758 zfcp_fsf_req_free(req); 759 return ERR_PTR(-EIO); 760 } 761 762 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) 763 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 764 765 return req; 766 } 767 768 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 769 { 770 struct zfcp_adapter *adapter = req->adapter; 771 struct zfcp_qdio_queue *req_q = &adapter->req_q; 772 int idx; 773 774 /* put allocated FSF request into hash table */ 775 spin_lock(&adapter->req_list_lock); 776 idx = zfcp_reqlist_hash(req->req_id); 777 list_add_tail(&req->list, &adapter->req_list[idx]); 778 spin_unlock(&adapter->req_list_lock); 779 780 req->issued = get_clock(); 781 if (zfcp_qdio_send(req)) { 782 /* Queues are down..... */ 783 del_timer(&req->timer); 784 spin_lock(&adapter->req_list_lock); 785 zfcp_reqlist_remove(adapter, req); 786 spin_unlock(&adapter->req_list_lock); 787 /* undo changes in request queue made for this request */ 788 atomic_add(req->sbal_number, &req_q->count); 789 req_q->first -= req->sbal_number; 790 req_q->first += QDIO_MAX_BUFFERS_PER_Q; 791 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ 792 zfcp_erp_adapter_reopen(adapter, 0, 116, req); 793 return -EIO; 794 } 795 796 /* Don't increase for unsolicited status */ 797 if (req->qtcb) 798 adapter->fsf_req_seq_no++; 799 800 return 0; 801 } 802 803 /** 804 * zfcp_fsf_status_read - send status read request 805 * @adapter: pointer to struct zfcp_adapter 806 * @req_flags: request flags 807 * Returns: 0 on success, ERROR otherwise 808 */ 809 int zfcp_fsf_status_read(struct zfcp_adapter *adapter) 810 { 811 struct zfcp_fsf_req *req; 812 struct fsf_status_read_buffer *sr_buf; 813 struct qdio_buffer_element *sbale; 814 int retval = -EIO; 815 816 spin_lock_bh(&adapter->req_q_lock); 817 if (zfcp_fsf_req_sbal_get(adapter)) 818 goto out; 819 820 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 821 ZFCP_REQ_NO_QTCB, 822 adapter->pool.fsf_req_status_read); 823 if (IS_ERR(req)) { 824 retval = PTR_ERR(req); 825 goto out; 826 } 827 828 sbale = zfcp_qdio_sbale_req(req); 829 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; 830 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; 831 req->sbale_curr = 2; 832 833 sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); 834 if (!sr_buf) { 835 retval = -ENOMEM; 836 goto failed_buf; 837 } 838 memset(sr_buf, 0, sizeof(*sr_buf)); 839 req->data = sr_buf; 840 sbale = zfcp_qdio_sbale_curr(req); 841 sbale->addr = (void *) sr_buf; 842 sbale->length = sizeof(*sr_buf); 843 844 retval = zfcp_fsf_req_send(req); 845 if (retval) 846 goto failed_req_send; 847 848 goto out; 849 850 failed_req_send: 851 mempool_free(sr_buf, adapter->pool.data_status_read); 852 failed_buf: 853 zfcp_fsf_req_free(req); 854 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 855 out: 856 spin_unlock_bh(&adapter->req_q_lock); 857 return retval; 858 } 859 860 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 861 { 862 struct zfcp_unit *unit = req->data; 863 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 864 865 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 866 return; 867 868 switch (req->qtcb->header.fsf_status) { 869 case FSF_PORT_HANDLE_NOT_VALID: 870 if (fsq->word[0] == fsq->word[1]) { 871 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104, 872 req); 873 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 874 } 875 break; 876 case FSF_LUN_HANDLE_NOT_VALID: 877 if (fsq->word[0] == fsq->word[1]) { 878 zfcp_erp_port_reopen(unit->port, 0, 105, req); 879 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 880 } 881 break; 882 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 883 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 884 break; 885 case FSF_PORT_BOXED: 886 zfcp_erp_port_boxed(unit->port, 47, req); 887 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 888 ZFCP_STATUS_FSFREQ_RETRY; 889 break; 890 case FSF_LUN_BOXED: 891 zfcp_erp_unit_boxed(unit, 48, req); 892 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 893 ZFCP_STATUS_FSFREQ_RETRY; 894 break; 895 case FSF_ADAPTER_STATUS_AVAILABLE: 896 switch (fsq->word[0]) { 897 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 898 zfcp_test_link(unit->port); 899 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 900 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 901 break; 902 } 903 break; 904 case FSF_GOOD: 905 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 906 break; 907 } 908 } 909 910 /** 911 * zfcp_fsf_abort_fcp_command - abort running SCSI command 912 * @old_req_id: unsigned long 913 * @adapter: pointer to struct zfcp_adapter 914 * @unit: pointer to struct zfcp_unit 915 * @req_flags: integer specifying the request flags 916 * Returns: pointer to struct zfcp_fsf_req 917 * 918 * FIXME(design): should be watched by a timeout !!! 919 */ 920 921 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, 922 struct zfcp_adapter *adapter, 923 struct zfcp_unit *unit, 924 int req_flags) 925 { 926 struct qdio_buffer_element *sbale; 927 struct zfcp_fsf_req *req = NULL; 928 929 spin_lock(&adapter->req_q_lock); 930 if (!zfcp_fsf_sbal_available(adapter)) 931 goto out; 932 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 933 req_flags, adapter->pool.fsf_req_abort); 934 if (IS_ERR(req)) 935 goto out; 936 937 if (unlikely(!(atomic_read(&unit->status) & 938 ZFCP_STATUS_COMMON_UNBLOCKED))) 939 goto out_error_free; 940 941 sbale = zfcp_qdio_sbale_req(req); 942 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 943 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 944 945 req->data = unit; 946 req->handler = zfcp_fsf_abort_fcp_command_handler; 947 req->qtcb->header.lun_handle = unit->handle; 948 req->qtcb->header.port_handle = unit->port->handle; 949 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 950 951 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 952 if (!zfcp_fsf_req_send(req)) 953 goto out; 954 955 out_error_free: 956 zfcp_fsf_req_free(req); 957 req = NULL; 958 out: 959 spin_unlock(&adapter->req_q_lock); 960 return req; 961 } 962 963 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 964 { 965 struct zfcp_adapter *adapter = req->adapter; 966 struct zfcp_send_ct *send_ct = req->data; 967 struct fsf_qtcb_header *header = &req->qtcb->header; 968 969 send_ct->status = -EINVAL; 970 971 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 972 goto skip_fsfstatus; 973 974 switch (header->fsf_status) { 975 case FSF_GOOD: 976 zfcp_san_dbf_event_ct_response(req); 977 send_ct->status = 0; 978 break; 979 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 980 zfcp_fsf_class_not_supp(req); 981 break; 982 case FSF_ADAPTER_STATUS_AVAILABLE: 983 switch (header->fsf_status_qual.word[0]){ 984 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 985 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 986 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 987 break; 988 } 989 break; 990 case FSF_ACCESS_DENIED: 991 break; 992 case FSF_PORT_BOXED: 993 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 994 ZFCP_STATUS_FSFREQ_RETRY; 995 break; 996 case FSF_PORT_HANDLE_NOT_VALID: 997 zfcp_erp_adapter_reopen(adapter, 0, 106, req); 998 case FSF_GENERIC_COMMAND_REJECTED: 999 case FSF_PAYLOAD_SIZE_MISMATCH: 1000 case FSF_REQUEST_SIZE_TOO_LARGE: 1001 case FSF_RESPONSE_SIZE_TOO_LARGE: 1002 case FSF_SBAL_MISMATCH: 1003 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1004 break; 1005 } 1006 1007 skip_fsfstatus: 1008 if (send_ct->handler) 1009 send_ct->handler(send_ct->handler_data); 1010 } 1011 1012 static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req, 1013 struct scatterlist *sg_req, 1014 struct scatterlist *sg_resp, int max_sbals) 1015 { 1016 int bytes; 1017 1018 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1019 sg_req, max_sbals); 1020 if (bytes <= 0) 1021 return -ENOMEM; 1022 req->qtcb->bottom.support.req_buf_length = bytes; 1023 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1024 1025 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1026 sg_resp, max_sbals); 1027 if (bytes <= 0) 1028 return -ENOMEM; 1029 req->qtcb->bottom.support.resp_buf_length = bytes; 1030 1031 return 0; 1032 } 1033 1034 /** 1035 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1036 * @ct: pointer to struct zfcp_send_ct with data for request 1037 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1038 * @erp_action: if non-null the Generic Service request sent within ERP 1039 */ 1040 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, 1041 struct zfcp_erp_action *erp_action) 1042 { 1043 struct zfcp_wka_port *wka_port = ct->wka_port; 1044 struct zfcp_adapter *adapter = wka_port->adapter; 1045 struct zfcp_fsf_req *req; 1046 int ret = -EIO; 1047 1048 spin_lock_bh(&adapter->req_q_lock); 1049 if (zfcp_fsf_req_sbal_get(adapter)) 1050 goto out; 1051 1052 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, 1053 ZFCP_REQ_AUTO_CLEANUP, pool); 1054 if (IS_ERR(req)) { 1055 ret = PTR_ERR(req); 1056 goto out; 1057 } 1058 1059 ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp, 1060 FSF_MAX_SBALS_PER_REQ); 1061 if (ret) 1062 goto failed_send; 1063 1064 req->handler = zfcp_fsf_send_ct_handler; 1065 req->qtcb->header.port_handle = wka_port->handle; 1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1067 req->qtcb->bottom.support.timeout = ct->timeout; 1068 req->data = ct; 1069 1070 zfcp_san_dbf_event_ct_request(req); 1071 1072 if (erp_action) { 1073 erp_action->fsf_req = req; 1074 req->erp_action = erp_action; 1075 zfcp_fsf_start_erp_timer(req); 1076 } else 1077 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1078 1079 ret = zfcp_fsf_req_send(req); 1080 if (ret) 1081 goto failed_send; 1082 1083 goto out; 1084 1085 failed_send: 1086 zfcp_fsf_req_free(req); 1087 if (erp_action) 1088 erp_action->fsf_req = NULL; 1089 out: 1090 spin_unlock_bh(&adapter->req_q_lock); 1091 return ret; 1092 } 1093 1094 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1095 { 1096 struct zfcp_send_els *send_els = req->data; 1097 struct zfcp_port *port = send_els->port; 1098 struct fsf_qtcb_header *header = &req->qtcb->header; 1099 1100 send_els->status = -EINVAL; 1101 1102 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1103 goto skip_fsfstatus; 1104 1105 switch (header->fsf_status) { 1106 case FSF_GOOD: 1107 zfcp_san_dbf_event_els_response(req); 1108 send_els->status = 0; 1109 break; 1110 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1111 zfcp_fsf_class_not_supp(req); 1112 break; 1113 case FSF_ADAPTER_STATUS_AVAILABLE: 1114 switch (header->fsf_status_qual.word[0]){ 1115 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1116 if (port && (send_els->ls_code != ZFCP_LS_ADISC)) 1117 zfcp_test_link(port); 1118 /*fall through */ 1119 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1120 case FSF_SQ_RETRY_IF_POSSIBLE: 1121 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1122 break; 1123 } 1124 break; 1125 case FSF_ELS_COMMAND_REJECTED: 1126 case FSF_PAYLOAD_SIZE_MISMATCH: 1127 case FSF_REQUEST_SIZE_TOO_LARGE: 1128 case FSF_RESPONSE_SIZE_TOO_LARGE: 1129 break; 1130 case FSF_ACCESS_DENIED: 1131 zfcp_fsf_access_denied_port(req, port); 1132 break; 1133 case FSF_SBAL_MISMATCH: 1134 /* should never occure, avoided in zfcp_fsf_send_els */ 1135 /* fall through */ 1136 default: 1137 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1138 break; 1139 } 1140 skip_fsfstatus: 1141 if (send_els->handler) 1142 send_els->handler(send_els->handler_data); 1143 } 1144 1145 /** 1146 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1147 * @els: pointer to struct zfcp_send_els with data for the command 1148 */ 1149 int zfcp_fsf_send_els(struct zfcp_send_els *els) 1150 { 1151 struct zfcp_fsf_req *req; 1152 struct zfcp_adapter *adapter = els->adapter; 1153 struct fsf_qtcb_bottom_support *bottom; 1154 int ret = -EIO; 1155 1156 if (unlikely(!(atomic_read(&els->port->status) & 1157 ZFCP_STATUS_COMMON_UNBLOCKED))) 1158 return -EBUSY; 1159 1160 spin_lock(&adapter->req_q_lock); 1161 if (!zfcp_fsf_sbal_available(adapter)) 1162 goto out; 1163 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1164 ZFCP_REQ_AUTO_CLEANUP, NULL); 1165 if (IS_ERR(req)) { 1166 ret = PTR_ERR(req); 1167 goto out; 1168 } 1169 1170 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2); 1171 1172 if (ret) 1173 goto failed_send; 1174 1175 bottom = &req->qtcb->bottom.support; 1176 req->handler = zfcp_fsf_send_els_handler; 1177 bottom->d_id = els->d_id; 1178 bottom->service_class = FSF_CLASS_3; 1179 bottom->timeout = 2 * R_A_TOV; 1180 req->data = els; 1181 1182 zfcp_san_dbf_event_els_request(req); 1183 1184 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1185 ret = zfcp_fsf_req_send(req); 1186 if (ret) 1187 goto failed_send; 1188 1189 goto out; 1190 1191 failed_send: 1192 zfcp_fsf_req_free(req); 1193 out: 1194 spin_unlock(&adapter->req_q_lock); 1195 return ret; 1196 } 1197 1198 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1199 { 1200 struct qdio_buffer_element *sbale; 1201 struct zfcp_fsf_req *req; 1202 struct zfcp_adapter *adapter = erp_action->adapter; 1203 int retval = -EIO; 1204 1205 spin_lock_bh(&adapter->req_q_lock); 1206 if (!zfcp_fsf_sbal_available(adapter)) 1207 goto out; 1208 req = zfcp_fsf_req_create(adapter, 1209 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1210 ZFCP_REQ_AUTO_CLEANUP, 1211 adapter->pool.fsf_req_erp); 1212 if (IS_ERR(req)) { 1213 retval = PTR_ERR(req); 1214 goto out; 1215 } 1216 1217 sbale = zfcp_qdio_sbale_req(req); 1218 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1219 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1220 1221 req->qtcb->bottom.config.feature_selection = 1222 FSF_FEATURE_CFDC | 1223 FSF_FEATURE_LUN_SHARING | 1224 FSF_FEATURE_NOTIFICATION_LOST | 1225 FSF_FEATURE_UPDATE_ALERT; 1226 req->erp_action = erp_action; 1227 req->handler = zfcp_fsf_exchange_config_data_handler; 1228 erp_action->fsf_req = req; 1229 1230 zfcp_fsf_start_erp_timer(req); 1231 retval = zfcp_fsf_req_send(req); 1232 if (retval) { 1233 zfcp_fsf_req_free(req); 1234 erp_action->fsf_req = NULL; 1235 } 1236 out: 1237 spin_unlock_bh(&adapter->req_q_lock); 1238 return retval; 1239 } 1240 1241 int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1242 struct fsf_qtcb_bottom_config *data) 1243 { 1244 struct qdio_buffer_element *sbale; 1245 struct zfcp_fsf_req *req = NULL; 1246 int retval = -EIO; 1247 1248 spin_lock_bh(&adapter->req_q_lock); 1249 if (zfcp_fsf_req_sbal_get(adapter)) 1250 goto out; 1251 1252 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1253 0, NULL); 1254 if (IS_ERR(req)) { 1255 retval = PTR_ERR(req); 1256 goto out; 1257 } 1258 1259 sbale = zfcp_qdio_sbale_req(req); 1260 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1261 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1262 req->handler = zfcp_fsf_exchange_config_data_handler; 1263 1264 req->qtcb->bottom.config.feature_selection = 1265 FSF_FEATURE_CFDC | 1266 FSF_FEATURE_LUN_SHARING | 1267 FSF_FEATURE_NOTIFICATION_LOST | 1268 FSF_FEATURE_UPDATE_ALERT; 1269 1270 if (data) 1271 req->data = data; 1272 1273 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1274 retval = zfcp_fsf_req_send(req); 1275 out: 1276 spin_unlock_bh(&adapter->req_q_lock); 1277 if (!retval) 1278 wait_event(req->completion_wq, 1279 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1280 1281 zfcp_fsf_req_free(req); 1282 1283 return retval; 1284 } 1285 1286 /** 1287 * zfcp_fsf_exchange_port_data - request information about local port 1288 * @erp_action: ERP action for the adapter for which port data is requested 1289 * Returns: 0 on success, error otherwise 1290 */ 1291 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1292 { 1293 struct qdio_buffer_element *sbale; 1294 struct zfcp_fsf_req *req; 1295 struct zfcp_adapter *adapter = erp_action->adapter; 1296 int retval = -EIO; 1297 1298 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1299 return -EOPNOTSUPP; 1300 1301 spin_lock_bh(&adapter->req_q_lock); 1302 if (!zfcp_fsf_sbal_available(adapter)) 1303 goto out; 1304 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1305 ZFCP_REQ_AUTO_CLEANUP, 1306 adapter->pool.fsf_req_erp); 1307 if (IS_ERR(req)) { 1308 retval = PTR_ERR(req); 1309 goto out; 1310 } 1311 1312 sbale = zfcp_qdio_sbale_req(req); 1313 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1314 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1315 1316 req->handler = zfcp_fsf_exchange_port_data_handler; 1317 req->erp_action = erp_action; 1318 erp_action->fsf_req = req; 1319 1320 zfcp_fsf_start_erp_timer(req); 1321 retval = zfcp_fsf_req_send(req); 1322 if (retval) { 1323 zfcp_fsf_req_free(req); 1324 erp_action->fsf_req = NULL; 1325 } 1326 out: 1327 spin_unlock_bh(&adapter->req_q_lock); 1328 return retval; 1329 } 1330 1331 /** 1332 * zfcp_fsf_exchange_port_data_sync - request information about local port 1333 * @adapter: pointer to struct zfcp_adapter 1334 * @data: pointer to struct fsf_qtcb_bottom_port 1335 * Returns: 0 on success, error otherwise 1336 */ 1337 int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1338 struct fsf_qtcb_bottom_port *data) 1339 { 1340 struct qdio_buffer_element *sbale; 1341 struct zfcp_fsf_req *req = NULL; 1342 int retval = -EIO; 1343 1344 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1345 return -EOPNOTSUPP; 1346 1347 spin_lock_bh(&adapter->req_q_lock); 1348 if (!zfcp_fsf_sbal_available(adapter)) 1349 goto out; 1350 1351 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, 1352 NULL); 1353 if (IS_ERR(req)) { 1354 retval = PTR_ERR(req); 1355 goto out; 1356 } 1357 1358 if (data) 1359 req->data = data; 1360 1361 sbale = zfcp_qdio_sbale_req(req); 1362 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1363 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1364 1365 req->handler = zfcp_fsf_exchange_port_data_handler; 1366 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1367 retval = zfcp_fsf_req_send(req); 1368 out: 1369 spin_unlock_bh(&adapter->req_q_lock); 1370 if (!retval) 1371 wait_event(req->completion_wq, 1372 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1373 zfcp_fsf_req_free(req); 1374 1375 return retval; 1376 } 1377 1378 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1379 { 1380 struct zfcp_port *port = req->data; 1381 struct fsf_qtcb_header *header = &req->qtcb->header; 1382 struct fsf_plogi *plogi; 1383 1384 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1385 return; 1386 1387 switch (header->fsf_status) { 1388 case FSF_PORT_ALREADY_OPEN: 1389 break; 1390 case FSF_ACCESS_DENIED: 1391 zfcp_fsf_access_denied_port(req, port); 1392 break; 1393 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1394 dev_warn(&req->adapter->ccw_device->dev, 1395 "Not enough FCP adapter resources to open " 1396 "remote port 0x%016Lx\n", 1397 (unsigned long long)port->wwpn); 1398 zfcp_erp_port_failed(port, 31, req); 1399 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1400 break; 1401 case FSF_ADAPTER_STATUS_AVAILABLE: 1402 switch (header->fsf_status_qual.word[0]) { 1403 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1404 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1405 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1406 break; 1407 case FSF_SQ_NO_RETRY_POSSIBLE: 1408 dev_warn(&req->adapter->ccw_device->dev, 1409 "Remote port 0x%016Lx could not be opened\n", 1410 (unsigned long long)port->wwpn); 1411 zfcp_erp_port_failed(port, 32, req); 1412 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1413 break; 1414 } 1415 break; 1416 case FSF_GOOD: 1417 port->handle = header->port_handle; 1418 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1419 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1420 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1421 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1422 &port->status); 1423 /* check whether D_ID has changed during open */ 1424 /* 1425 * FIXME: This check is not airtight, as the FCP channel does 1426 * not monitor closures of target port connections caused on 1427 * the remote side. Thus, they might miss out on invalidating 1428 * locally cached WWPNs (and other N_Port parameters) of gone 1429 * target ports. So, our heroic attempt to make things safe 1430 * could be undermined by 'open port' response data tagged with 1431 * obsolete WWPNs. Another reason to monitor potential 1432 * connection closures ourself at least (by interpreting 1433 * incoming ELS' and unsolicited status). It just crosses my 1434 * mind that one should be able to cross-check by means of 1435 * another GID_PN straight after a port has been opened. 1436 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1437 */ 1438 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1439 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) { 1440 if (plogi->serv_param.wwpn != port->wwpn) 1441 atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID, 1442 &port->status); 1443 else { 1444 port->wwnn = plogi->serv_param.wwnn; 1445 zfcp_fc_plogi_evaluate(port, plogi); 1446 } 1447 } 1448 break; 1449 case FSF_UNKNOWN_OP_SUBTYPE: 1450 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1451 break; 1452 } 1453 } 1454 1455 /** 1456 * zfcp_fsf_open_port - create and send open port request 1457 * @erp_action: pointer to struct zfcp_erp_action 1458 * Returns: 0 on success, error otherwise 1459 */ 1460 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1461 { 1462 struct qdio_buffer_element *sbale; 1463 struct zfcp_adapter *adapter = erp_action->adapter; 1464 struct zfcp_fsf_req *req; 1465 int retval = -EIO; 1466 1467 spin_lock_bh(&adapter->req_q_lock); 1468 if (zfcp_fsf_req_sbal_get(adapter)) 1469 goto out; 1470 1471 req = zfcp_fsf_req_create(adapter, 1472 FSF_QTCB_OPEN_PORT_WITH_DID, 1473 ZFCP_REQ_AUTO_CLEANUP, 1474 adapter->pool.fsf_req_erp); 1475 if (IS_ERR(req)) { 1476 retval = PTR_ERR(req); 1477 goto out; 1478 } 1479 1480 sbale = zfcp_qdio_sbale_req(req); 1481 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1482 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1483 1484 req->handler = zfcp_fsf_open_port_handler; 1485 req->qtcb->bottom.support.d_id = erp_action->port->d_id; 1486 req->data = erp_action->port; 1487 req->erp_action = erp_action; 1488 erp_action->fsf_req = req; 1489 1490 zfcp_fsf_start_erp_timer(req); 1491 retval = zfcp_fsf_req_send(req); 1492 if (retval) { 1493 zfcp_fsf_req_free(req); 1494 erp_action->fsf_req = NULL; 1495 } 1496 out: 1497 spin_unlock_bh(&adapter->req_q_lock); 1498 return retval; 1499 } 1500 1501 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1502 { 1503 struct zfcp_port *port = req->data; 1504 1505 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1506 return; 1507 1508 switch (req->qtcb->header.fsf_status) { 1509 case FSF_PORT_HANDLE_NOT_VALID: 1510 zfcp_erp_adapter_reopen(port->adapter, 0, 107, req); 1511 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1512 break; 1513 case FSF_ADAPTER_STATUS_AVAILABLE: 1514 break; 1515 case FSF_GOOD: 1516 zfcp_erp_modify_port_status(port, 33, req, 1517 ZFCP_STATUS_COMMON_OPEN, 1518 ZFCP_CLEAR); 1519 break; 1520 } 1521 } 1522 1523 /** 1524 * zfcp_fsf_close_port - create and send close port request 1525 * @erp_action: pointer to struct zfcp_erp_action 1526 * Returns: 0 on success, error otherwise 1527 */ 1528 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1529 { 1530 struct qdio_buffer_element *sbale; 1531 struct zfcp_adapter *adapter = erp_action->adapter; 1532 struct zfcp_fsf_req *req; 1533 int retval = -EIO; 1534 1535 spin_lock_bh(&adapter->req_q_lock); 1536 if (zfcp_fsf_req_sbal_get(adapter)) 1537 goto out; 1538 1539 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, 1540 ZFCP_REQ_AUTO_CLEANUP, 1541 adapter->pool.fsf_req_erp); 1542 if (IS_ERR(req)) { 1543 retval = PTR_ERR(req); 1544 goto out; 1545 } 1546 1547 sbale = zfcp_qdio_sbale_req(req); 1548 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1549 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1550 1551 req->handler = zfcp_fsf_close_port_handler; 1552 req->data = erp_action->port; 1553 req->erp_action = erp_action; 1554 req->qtcb->header.port_handle = erp_action->port->handle; 1555 erp_action->fsf_req = req; 1556 1557 zfcp_fsf_start_erp_timer(req); 1558 retval = zfcp_fsf_req_send(req); 1559 if (retval) { 1560 zfcp_fsf_req_free(req); 1561 erp_action->fsf_req = NULL; 1562 } 1563 out: 1564 spin_unlock_bh(&adapter->req_q_lock); 1565 return retval; 1566 } 1567 1568 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1569 { 1570 struct zfcp_wka_port *wka_port = req->data; 1571 struct fsf_qtcb_header *header = &req->qtcb->header; 1572 1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1574 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1575 goto out; 1576 } 1577 1578 switch (header->fsf_status) { 1579 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1580 dev_warn(&req->adapter->ccw_device->dev, 1581 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1582 case FSF_ADAPTER_STATUS_AVAILABLE: 1583 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1584 case FSF_ACCESS_DENIED: 1585 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1586 break; 1587 case FSF_PORT_ALREADY_OPEN: 1588 case FSF_GOOD: 1589 wka_port->handle = header->port_handle; 1590 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1591 } 1592 out: 1593 wake_up(&wka_port->completion_wq); 1594 } 1595 1596 /** 1597 * zfcp_fsf_open_wka_port - create and send open wka-port request 1598 * @wka_port: pointer to struct zfcp_wka_port 1599 * Returns: 0 on success, error otherwise 1600 */ 1601 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) 1602 { 1603 struct qdio_buffer_element *sbale; 1604 struct zfcp_adapter *adapter = wka_port->adapter; 1605 struct zfcp_fsf_req *req; 1606 int retval = -EIO; 1607 1608 spin_lock_bh(&adapter->req_q_lock); 1609 if (zfcp_fsf_req_sbal_get(adapter)) 1610 goto out; 1611 1612 req = zfcp_fsf_req_create(adapter, 1613 FSF_QTCB_OPEN_PORT_WITH_DID, 1614 ZFCP_REQ_AUTO_CLEANUP, 1615 adapter->pool.fsf_req_erp); 1616 if (unlikely(IS_ERR(req))) { 1617 retval = PTR_ERR(req); 1618 goto out; 1619 } 1620 1621 sbale = zfcp_qdio_sbale_req(req); 1622 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1623 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1624 1625 req->handler = zfcp_fsf_open_wka_port_handler; 1626 req->qtcb->bottom.support.d_id = wka_port->d_id; 1627 req->data = wka_port; 1628 1629 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1630 retval = zfcp_fsf_req_send(req); 1631 if (retval) 1632 zfcp_fsf_req_free(req); 1633 out: 1634 spin_unlock_bh(&adapter->req_q_lock); 1635 return retval; 1636 } 1637 1638 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1639 { 1640 struct zfcp_wka_port *wka_port = req->data; 1641 1642 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1643 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1644 zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req); 1645 } 1646 1647 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1648 wake_up(&wka_port->completion_wq); 1649 } 1650 1651 /** 1652 * zfcp_fsf_close_wka_port - create and send close wka port request 1653 * @erp_action: pointer to struct zfcp_erp_action 1654 * Returns: 0 on success, error otherwise 1655 */ 1656 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) 1657 { 1658 struct qdio_buffer_element *sbale; 1659 struct zfcp_adapter *adapter = wka_port->adapter; 1660 struct zfcp_fsf_req *req; 1661 int retval = -EIO; 1662 1663 spin_lock_bh(&adapter->req_q_lock); 1664 if (zfcp_fsf_req_sbal_get(adapter)) 1665 goto out; 1666 1667 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, 1668 ZFCP_REQ_AUTO_CLEANUP, 1669 adapter->pool.fsf_req_erp); 1670 if (unlikely(IS_ERR(req))) { 1671 retval = PTR_ERR(req); 1672 goto out; 1673 } 1674 1675 sbale = zfcp_qdio_sbale_req(req); 1676 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1677 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1678 1679 req->handler = zfcp_fsf_close_wka_port_handler; 1680 req->data = wka_port; 1681 req->qtcb->header.port_handle = wka_port->handle; 1682 1683 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1684 retval = zfcp_fsf_req_send(req); 1685 if (retval) 1686 zfcp_fsf_req_free(req); 1687 out: 1688 spin_unlock_bh(&adapter->req_q_lock); 1689 return retval; 1690 } 1691 1692 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1693 { 1694 struct zfcp_port *port = req->data; 1695 struct fsf_qtcb_header *header = &req->qtcb->header; 1696 struct zfcp_unit *unit; 1697 1698 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1699 goto skip_fsfstatus; 1700 1701 switch (header->fsf_status) { 1702 case FSF_PORT_HANDLE_NOT_VALID: 1703 zfcp_erp_adapter_reopen(port->adapter, 0, 108, req); 1704 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1705 break; 1706 case FSF_ACCESS_DENIED: 1707 zfcp_fsf_access_denied_port(req, port); 1708 break; 1709 case FSF_PORT_BOXED: 1710 zfcp_erp_port_boxed(port, 50, req); 1711 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1712 ZFCP_STATUS_FSFREQ_RETRY; 1713 /* can't use generic zfcp_erp_modify_port_status because 1714 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1715 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1716 list_for_each_entry(unit, &port->unit_list_head, list) 1717 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1718 &unit->status); 1719 break; 1720 case FSF_ADAPTER_STATUS_AVAILABLE: 1721 switch (header->fsf_status_qual.word[0]) { 1722 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1723 /* fall through */ 1724 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1725 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1726 break; 1727 } 1728 break; 1729 case FSF_GOOD: 1730 /* can't use generic zfcp_erp_modify_port_status because 1731 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1732 */ 1733 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1734 list_for_each_entry(unit, &port->unit_list_head, list) 1735 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1736 &unit->status); 1737 break; 1738 } 1739 skip_fsfstatus: 1740 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status); 1741 } 1742 1743 /** 1744 * zfcp_fsf_close_physical_port - close physical port 1745 * @erp_action: pointer to struct zfcp_erp_action 1746 * Returns: 0 on success 1747 */ 1748 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1749 { 1750 struct qdio_buffer_element *sbale; 1751 struct zfcp_adapter *adapter = erp_action->adapter; 1752 struct zfcp_fsf_req *req; 1753 int retval = -EIO; 1754 1755 spin_lock_bh(&adapter->req_q_lock); 1756 if (zfcp_fsf_req_sbal_get(adapter)) 1757 goto out; 1758 1759 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1760 ZFCP_REQ_AUTO_CLEANUP, 1761 adapter->pool.fsf_req_erp); 1762 if (IS_ERR(req)) { 1763 retval = PTR_ERR(req); 1764 goto out; 1765 } 1766 1767 sbale = zfcp_qdio_sbale_req(req); 1768 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1769 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1770 1771 req->data = erp_action->port; 1772 req->qtcb->header.port_handle = erp_action->port->handle; 1773 req->erp_action = erp_action; 1774 req->handler = zfcp_fsf_close_physical_port_handler; 1775 erp_action->fsf_req = req; 1776 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 1777 &erp_action->port->status); 1778 1779 zfcp_fsf_start_erp_timer(req); 1780 retval = zfcp_fsf_req_send(req); 1781 if (retval) { 1782 zfcp_fsf_req_free(req); 1783 erp_action->fsf_req = NULL; 1784 } 1785 out: 1786 spin_unlock_bh(&adapter->req_q_lock); 1787 return retval; 1788 } 1789 1790 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) 1791 { 1792 struct zfcp_adapter *adapter = req->adapter; 1793 struct zfcp_unit *unit = req->data; 1794 struct fsf_qtcb_header *header = &req->qtcb->header; 1795 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1796 struct fsf_queue_designator *queue_designator = 1797 &header->fsf_status_qual.fsf_queue_designator; 1798 int exclusive, readwrite; 1799 1800 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1801 return; 1802 1803 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1804 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1805 ZFCP_STATUS_UNIT_SHARED | 1806 ZFCP_STATUS_UNIT_READONLY, 1807 &unit->status); 1808 1809 switch (header->fsf_status) { 1810 1811 case FSF_PORT_HANDLE_NOT_VALID: 1812 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req); 1813 /* fall through */ 1814 case FSF_LUN_ALREADY_OPEN: 1815 break; 1816 case FSF_ACCESS_DENIED: 1817 zfcp_fsf_access_denied_unit(req, unit); 1818 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1819 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1820 break; 1821 case FSF_PORT_BOXED: 1822 zfcp_erp_port_boxed(unit->port, 51, req); 1823 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1824 ZFCP_STATUS_FSFREQ_RETRY; 1825 break; 1826 case FSF_LUN_SHARING_VIOLATION: 1827 if (header->fsf_status_qual.word[0]) 1828 dev_warn(&adapter->ccw_device->dev, 1829 "LUN 0x%Lx on port 0x%Lx is already in " 1830 "use by CSS%d, MIF Image ID %x\n", 1831 (unsigned long long)unit->fcp_lun, 1832 (unsigned long long)unit->port->wwpn, 1833 queue_designator->cssid, 1834 queue_designator->hla); 1835 else 1836 zfcp_act_eval_err(adapter, 1837 header->fsf_status_qual.word[2]); 1838 zfcp_erp_unit_access_denied(unit, 60, req); 1839 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1840 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1841 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1842 break; 1843 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1844 dev_warn(&adapter->ccw_device->dev, 1845 "No handle is available for LUN " 1846 "0x%016Lx on port 0x%016Lx\n", 1847 (unsigned long long)unit->fcp_lun, 1848 (unsigned long long)unit->port->wwpn); 1849 zfcp_erp_unit_failed(unit, 34, req); 1850 /* fall through */ 1851 case FSF_INVALID_COMMAND_OPTION: 1852 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1853 break; 1854 case FSF_ADAPTER_STATUS_AVAILABLE: 1855 switch (header->fsf_status_qual.word[0]) { 1856 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1857 zfcp_test_link(unit->port); 1858 /* fall through */ 1859 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1860 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1861 break; 1862 } 1863 break; 1864 1865 case FSF_GOOD: 1866 unit->handle = header->lun_handle; 1867 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1868 1869 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && 1870 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && 1871 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) { 1872 exclusive = (bottom->lun_access_info & 1873 FSF_UNIT_ACCESS_EXCLUSIVE); 1874 readwrite = (bottom->lun_access_info & 1875 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); 1876 1877 if (!exclusive) 1878 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, 1879 &unit->status); 1880 1881 if (!readwrite) { 1882 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, 1883 &unit->status); 1884 dev_info(&adapter->ccw_device->dev, 1885 "SCSI device at LUN 0x%016Lx on port " 1886 "0x%016Lx opened read-only\n", 1887 (unsigned long long)unit->fcp_lun, 1888 (unsigned long long)unit->port->wwpn); 1889 } 1890 1891 if (exclusive && !readwrite) { 1892 dev_err(&adapter->ccw_device->dev, 1893 "Exclusive read-only access not " 1894 "supported (unit 0x%016Lx, " 1895 "port 0x%016Lx)\n", 1896 (unsigned long long)unit->fcp_lun, 1897 (unsigned long long)unit->port->wwpn); 1898 zfcp_erp_unit_failed(unit, 35, req); 1899 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1900 zfcp_erp_unit_shutdown(unit, 0, 80, req); 1901 } else if (!exclusive && readwrite) { 1902 dev_err(&adapter->ccw_device->dev, 1903 "Shared read-write access not " 1904 "supported (unit 0x%016Lx, port " 1905 "0x%016Lx\n)", 1906 (unsigned long long)unit->fcp_lun, 1907 (unsigned long long)unit->port->wwpn); 1908 zfcp_erp_unit_failed(unit, 36, req); 1909 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1910 zfcp_erp_unit_shutdown(unit, 0, 81, req); 1911 } 1912 } 1913 break; 1914 } 1915 } 1916 1917 /** 1918 * zfcp_fsf_open_unit - open unit 1919 * @erp_action: pointer to struct zfcp_erp_action 1920 * Returns: 0 on success, error otherwise 1921 */ 1922 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1923 { 1924 struct qdio_buffer_element *sbale; 1925 struct zfcp_adapter *adapter = erp_action->adapter; 1926 struct zfcp_fsf_req *req; 1927 int retval = -EIO; 1928 1929 spin_lock_bh(&adapter->req_q_lock); 1930 if (zfcp_fsf_req_sbal_get(adapter)) 1931 goto out; 1932 1933 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, 1934 ZFCP_REQ_AUTO_CLEANUP, 1935 adapter->pool.fsf_req_erp); 1936 if (IS_ERR(req)) { 1937 retval = PTR_ERR(req); 1938 goto out; 1939 } 1940 1941 sbale = zfcp_qdio_sbale_req(req); 1942 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1943 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1944 1945 req->qtcb->header.port_handle = erp_action->port->handle; 1946 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1947 req->handler = zfcp_fsf_open_unit_handler; 1948 req->data = erp_action->unit; 1949 req->erp_action = erp_action; 1950 erp_action->fsf_req = req; 1951 1952 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1953 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1954 1955 zfcp_fsf_start_erp_timer(req); 1956 retval = zfcp_fsf_req_send(req); 1957 if (retval) { 1958 zfcp_fsf_req_free(req); 1959 erp_action->fsf_req = NULL; 1960 } 1961 out: 1962 spin_unlock_bh(&adapter->req_q_lock); 1963 return retval; 1964 } 1965 1966 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) 1967 { 1968 struct zfcp_unit *unit = req->data; 1969 1970 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1971 return; 1972 1973 switch (req->qtcb->header.fsf_status) { 1974 case FSF_PORT_HANDLE_NOT_VALID: 1975 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req); 1976 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1977 break; 1978 case FSF_LUN_HANDLE_NOT_VALID: 1979 zfcp_erp_port_reopen(unit->port, 0, 111, req); 1980 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1981 break; 1982 case FSF_PORT_BOXED: 1983 zfcp_erp_port_boxed(unit->port, 52, req); 1984 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1985 ZFCP_STATUS_FSFREQ_RETRY; 1986 break; 1987 case FSF_ADAPTER_STATUS_AVAILABLE: 1988 switch (req->qtcb->header.fsf_status_qual.word[0]) { 1989 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1990 zfcp_test_link(unit->port); 1991 /* fall through */ 1992 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1993 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1994 break; 1995 } 1996 break; 1997 case FSF_GOOD: 1998 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1999 break; 2000 } 2001 } 2002 2003 /** 2004 * zfcp_fsf_close_unit - close zfcp unit 2005 * @erp_action: pointer to struct zfcp_unit 2006 * Returns: 0 on success, error otherwise 2007 */ 2008 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 2009 { 2010 struct qdio_buffer_element *sbale; 2011 struct zfcp_adapter *adapter = erp_action->adapter; 2012 struct zfcp_fsf_req *req; 2013 int retval = -EIO; 2014 2015 spin_lock_bh(&adapter->req_q_lock); 2016 if (zfcp_fsf_req_sbal_get(adapter)) 2017 goto out; 2018 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, 2019 ZFCP_REQ_AUTO_CLEANUP, 2020 adapter->pool.fsf_req_erp); 2021 if (IS_ERR(req)) { 2022 retval = PTR_ERR(req); 2023 goto out; 2024 } 2025 2026 sbale = zfcp_qdio_sbale_req(req); 2027 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2028 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2029 2030 req->qtcb->header.port_handle = erp_action->port->handle; 2031 req->qtcb->header.lun_handle = erp_action->unit->handle; 2032 req->handler = zfcp_fsf_close_unit_handler; 2033 req->data = erp_action->unit; 2034 req->erp_action = erp_action; 2035 erp_action->fsf_req = req; 2036 2037 zfcp_fsf_start_erp_timer(req); 2038 retval = zfcp_fsf_req_send(req); 2039 if (retval) { 2040 zfcp_fsf_req_free(req); 2041 erp_action->fsf_req = NULL; 2042 } 2043 out: 2044 spin_unlock_bh(&adapter->req_q_lock); 2045 return retval; 2046 } 2047 2048 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat) 2049 { 2050 lat_rec->sum += lat; 2051 lat_rec->min = min(lat_rec->min, lat); 2052 lat_rec->max = max(lat_rec->max, lat); 2053 } 2054 2055 static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) 2056 { 2057 struct fsf_qual_latency_info *lat_inf; 2058 struct latency_cont *lat; 2059 struct zfcp_unit *unit = req->unit; 2060 unsigned long flags; 2061 2062 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; 2063 2064 switch (req->qtcb->bottom.io.data_direction) { 2065 case FSF_DATADIR_READ: 2066 lat = &unit->latencies.read; 2067 break; 2068 case FSF_DATADIR_WRITE: 2069 lat = &unit->latencies.write; 2070 break; 2071 case FSF_DATADIR_CMND: 2072 lat = &unit->latencies.cmd; 2073 break; 2074 default: 2075 return; 2076 } 2077 2078 spin_lock_irqsave(&unit->latencies.lock, flags); 2079 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat); 2080 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat); 2081 lat->counter++; 2082 spin_unlock_irqrestore(&unit->latencies.lock, flags); 2083 } 2084 2085 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2086 { 2087 struct scsi_cmnd *scpnt = req->data; 2088 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2089 &(req->qtcb->bottom.io.fcp_rsp); 2090 u32 sns_len; 2091 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2092 unsigned long flags; 2093 2094 if (unlikely(!scpnt)) 2095 return; 2096 2097 read_lock_irqsave(&req->adapter->abort_lock, flags); 2098 2099 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { 2100 set_host_byte(scpnt, DID_SOFT_ERROR); 2101 set_driver_byte(scpnt, SUGGEST_RETRY); 2102 goto skip_fsfstatus; 2103 } 2104 2105 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2106 set_host_byte(scpnt, DID_ERROR); 2107 goto skip_fsfstatus; 2108 } 2109 2110 set_msg_byte(scpnt, COMMAND_COMPLETE); 2111 2112 scpnt->result |= fcp_rsp_iu->scsi_status; 2113 2114 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) 2115 zfcp_fsf_req_latency(req); 2116 2117 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { 2118 if (fcp_rsp_info[3] == RSP_CODE_GOOD) 2119 set_host_byte(scpnt, DID_OK); 2120 else { 2121 set_host_byte(scpnt, DID_ERROR); 2122 goto skip_fsfstatus; 2123 } 2124 } 2125 2126 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) { 2127 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) + 2128 fcp_rsp_iu->fcp_rsp_len; 2129 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE); 2130 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len); 2131 2132 memcpy(scpnt->sense_buffer, 2133 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len); 2134 } 2135 2136 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) { 2137 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid); 2138 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) < 2139 scpnt->underflow) 2140 set_host_byte(scpnt, DID_ERROR); 2141 } 2142 skip_fsfstatus: 2143 if (scpnt->result != 0) 2144 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req); 2145 else if (scpnt->retries > 0) 2146 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req); 2147 else 2148 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req); 2149 2150 scpnt->host_scribble = NULL; 2151 (scpnt->scsi_done) (scpnt); 2152 /* 2153 * We must hold this lock until scsi_done has been called. 2154 * Otherwise we may call scsi_done after abort regarding this 2155 * command has completed. 2156 * Note: scsi_done must not block! 2157 */ 2158 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2159 } 2160 2161 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) 2162 { 2163 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2164 &(req->qtcb->bottom.io.fcp_rsp); 2165 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2166 2167 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) || 2168 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2169 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2170 } 2171 2172 2173 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) 2174 { 2175 struct zfcp_unit *unit; 2176 struct fsf_qtcb_header *header = &req->qtcb->header; 2177 2178 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) 2179 unit = req->data; 2180 else 2181 unit = req->unit; 2182 2183 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2184 goto skip_fsfstatus; 2185 2186 switch (header->fsf_status) { 2187 case FSF_HANDLE_MISMATCH: 2188 case FSF_PORT_HANDLE_NOT_VALID: 2189 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req); 2190 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2191 break; 2192 case FSF_FCPLUN_NOT_VALID: 2193 case FSF_LUN_HANDLE_NOT_VALID: 2194 zfcp_erp_port_reopen(unit->port, 0, 113, req); 2195 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2196 break; 2197 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2198 zfcp_fsf_class_not_supp(req); 2199 break; 2200 case FSF_ACCESS_DENIED: 2201 zfcp_fsf_access_denied_unit(req, unit); 2202 break; 2203 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2204 dev_err(&req->adapter->ccw_device->dev, 2205 "Incorrect direction %d, unit 0x%016Lx on port " 2206 "0x%016Lx closed\n", 2207 req->qtcb->bottom.io.data_direction, 2208 (unsigned long long)unit->fcp_lun, 2209 (unsigned long long)unit->port->wwpn); 2210 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req); 2211 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2212 break; 2213 case FSF_CMND_LENGTH_NOT_VALID: 2214 dev_err(&req->adapter->ccw_device->dev, 2215 "Incorrect CDB length %d, unit 0x%016Lx on " 2216 "port 0x%016Lx closed\n", 2217 req->qtcb->bottom.io.fcp_cmnd_length, 2218 (unsigned long long)unit->fcp_lun, 2219 (unsigned long long)unit->port->wwpn); 2220 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req); 2221 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2222 break; 2223 case FSF_PORT_BOXED: 2224 zfcp_erp_port_boxed(unit->port, 53, req); 2225 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2226 ZFCP_STATUS_FSFREQ_RETRY; 2227 break; 2228 case FSF_LUN_BOXED: 2229 zfcp_erp_unit_boxed(unit, 54, req); 2230 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2231 ZFCP_STATUS_FSFREQ_RETRY; 2232 break; 2233 case FSF_ADAPTER_STATUS_AVAILABLE: 2234 if (header->fsf_status_qual.word[0] == 2235 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2236 zfcp_test_link(unit->port); 2237 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2238 break; 2239 } 2240 skip_fsfstatus: 2241 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) 2242 zfcp_fsf_send_fcp_ctm_handler(req); 2243 else { 2244 zfcp_fsf_send_fcp_command_task_handler(req); 2245 req->unit = NULL; 2246 zfcp_unit_put(unit); 2247 } 2248 } 2249 2250 static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl) 2251 { 2252 u32 *fcp_dl_ptr; 2253 2254 /* 2255 * fcp_dl_addr = start address of fcp_cmnd structure + 2256 * size of fixed part + size of dynamically sized add_dcp_cdb field 2257 * SEE FCP-2 documentation 2258 */ 2259 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] + 2260 (fcp_cmd->add_fcp_cdb_length << 2)); 2261 *fcp_dl_ptr = fcp_dl; 2262 } 2263 2264 /** 2265 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2266 * @adapter: adapter where scsi command is issued 2267 * @unit: unit where command is sent to 2268 * @scsi_cmnd: scsi command to be sent 2269 * @timer: timer to be started when request is initiated 2270 * @req_flags: flags for fsf_request 2271 */ 2272 int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, 2273 struct zfcp_unit *unit, 2274 struct scsi_cmnd *scsi_cmnd, 2275 int use_timer, int req_flags) 2276 { 2277 struct zfcp_fsf_req *req; 2278 struct fcp_cmnd_iu *fcp_cmnd_iu; 2279 unsigned int sbtype; 2280 int real_bytes, retval = -EIO; 2281 2282 if (unlikely(!(atomic_read(&unit->status) & 2283 ZFCP_STATUS_COMMON_UNBLOCKED))) 2284 return -EBUSY; 2285 2286 spin_lock(&adapter->req_q_lock); 2287 if (!zfcp_fsf_sbal_available(adapter)) 2288 goto out; 2289 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2290 adapter->pool.fsf_req_scsi); 2291 if (IS_ERR(req)) { 2292 retval = PTR_ERR(req); 2293 goto out; 2294 } 2295 2296 zfcp_unit_get(unit); 2297 req->unit = unit; 2298 req->data = scsi_cmnd; 2299 req->handler = zfcp_fsf_send_fcp_command_handler; 2300 req->qtcb->header.lun_handle = unit->handle; 2301 req->qtcb->header.port_handle = unit->port->handle; 2302 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2303 2304 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2305 2306 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd); 2307 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 2308 /* 2309 * set depending on data direction: 2310 * data direction bits in SBALE (SB Type) 2311 * data direction bits in QTCB 2312 * data direction bits in FCP_CMND IU 2313 */ 2314 switch (scsi_cmnd->sc_data_direction) { 2315 case DMA_NONE: 2316 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2317 sbtype = SBAL_FLAGS0_TYPE_READ; 2318 break; 2319 case DMA_FROM_DEVICE: 2320 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2321 sbtype = SBAL_FLAGS0_TYPE_READ; 2322 fcp_cmnd_iu->rddata = 1; 2323 break; 2324 case DMA_TO_DEVICE: 2325 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; 2326 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2327 fcp_cmnd_iu->wddata = 1; 2328 break; 2329 case DMA_BIDIRECTIONAL: 2330 default: 2331 retval = -EIO; 2332 goto failed_scsi_cmnd; 2333 } 2334 2335 if (likely((scsi_cmnd->device->simple_tags) || 2336 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) && 2337 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED)))) 2338 fcp_cmnd_iu->task_attribute = SIMPLE_Q; 2339 else 2340 fcp_cmnd_iu->task_attribute = UNTAGGED; 2341 2342 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) 2343 fcp_cmnd_iu->add_fcp_cdb_length = 2344 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2; 2345 2346 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 2347 2348 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2349 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); 2350 2351 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, 2352 scsi_sglist(scsi_cmnd), 2353 FSF_MAX_SBALS_PER_REQ); 2354 if (unlikely(real_bytes < 0)) { 2355 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) 2356 retval = -EIO; 2357 else { 2358 dev_err(&adapter->ccw_device->dev, 2359 "Oversize data package, unit 0x%016Lx " 2360 "on port 0x%016Lx closed\n", 2361 (unsigned long long)unit->fcp_lun, 2362 (unsigned long long)unit->port->wwpn); 2363 zfcp_erp_unit_shutdown(unit, 0, 131, req); 2364 retval = -EINVAL; 2365 } 2366 goto failed_scsi_cmnd; 2367 } 2368 2369 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); 2370 2371 if (use_timer) 2372 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2373 2374 retval = zfcp_fsf_req_send(req); 2375 if (unlikely(retval)) 2376 goto failed_scsi_cmnd; 2377 2378 goto out; 2379 2380 failed_scsi_cmnd: 2381 zfcp_unit_put(unit); 2382 zfcp_fsf_req_free(req); 2383 scsi_cmnd->host_scribble = NULL; 2384 out: 2385 spin_unlock(&adapter->req_q_lock); 2386 return retval; 2387 } 2388 2389 /** 2390 * zfcp_fsf_send_fcp_ctm - send SCSI task management command 2391 * @adapter: pointer to struct zfcp-adapter 2392 * @unit: pointer to struct zfcp_unit 2393 * @tm_flags: unsigned byte for task management flags 2394 * @req_flags: int request flags 2395 * Returns: on success pointer to struct fsf_req, NULL otherwise 2396 */ 2397 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter, 2398 struct zfcp_unit *unit, 2399 u8 tm_flags, int req_flags) 2400 { 2401 struct qdio_buffer_element *sbale; 2402 struct zfcp_fsf_req *req = NULL; 2403 struct fcp_cmnd_iu *fcp_cmnd_iu; 2404 2405 if (unlikely(!(atomic_read(&unit->status) & 2406 ZFCP_STATUS_COMMON_UNBLOCKED))) 2407 return NULL; 2408 2409 spin_lock(&adapter->req_q_lock); 2410 if (!zfcp_fsf_sbal_available(adapter)) 2411 goto out; 2412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2413 adapter->pool.fsf_req_scsi); 2414 if (IS_ERR(req)) 2415 goto out; 2416 2417 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2418 req->data = unit; 2419 req->handler = zfcp_fsf_send_fcp_command_handler; 2420 req->qtcb->header.lun_handle = unit->handle; 2421 req->qtcb->header.port_handle = unit->port->handle; 2422 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2423 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2424 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2425 sizeof(u32); 2426 2427 sbale = zfcp_qdio_sbale_req(req); 2428 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2429 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2430 2431 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd; 2432 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 2433 fcp_cmnd_iu->task_management_flags = tm_flags; 2434 2435 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2436 if (!zfcp_fsf_req_send(req)) 2437 goto out; 2438 2439 zfcp_fsf_req_free(req); 2440 req = NULL; 2441 out: 2442 spin_unlock(&adapter->req_q_lock); 2443 return req; 2444 } 2445 2446 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req) 2447 { 2448 if (req->qtcb->header.fsf_status != FSF_GOOD) 2449 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2450 } 2451 2452 /** 2453 * zfcp_fsf_control_file - control file upload/download 2454 * @adapter: pointer to struct zfcp_adapter 2455 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc 2456 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise 2457 */ 2458 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, 2459 struct zfcp_fsf_cfdc *fsf_cfdc) 2460 { 2461 struct qdio_buffer_element *sbale; 2462 struct zfcp_fsf_req *req = NULL; 2463 struct fsf_qtcb_bottom_support *bottom; 2464 int direction, retval = -EIO, bytes; 2465 2466 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2467 return ERR_PTR(-EOPNOTSUPP); 2468 2469 switch (fsf_cfdc->command) { 2470 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 2471 direction = SBAL_FLAGS0_TYPE_WRITE; 2472 break; 2473 case FSF_QTCB_UPLOAD_CONTROL_FILE: 2474 direction = SBAL_FLAGS0_TYPE_READ; 2475 break; 2476 default: 2477 return ERR_PTR(-EINVAL); 2478 } 2479 2480 spin_lock_bh(&adapter->req_q_lock); 2481 if (zfcp_fsf_req_sbal_get(adapter)) 2482 goto out; 2483 2484 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); 2485 if (IS_ERR(req)) { 2486 retval = -EPERM; 2487 goto out; 2488 } 2489 2490 req->handler = zfcp_fsf_control_file_handler; 2491 2492 sbale = zfcp_qdio_sbale_req(req); 2493 sbale[0].flags |= direction; 2494 2495 bottom = &req->qtcb->bottom.support; 2496 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2497 bottom->option = fsf_cfdc->option; 2498 2499 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, 2500 FSF_MAX_SBALS_PER_REQ); 2501 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2502 retval = -ENOMEM; 2503 zfcp_fsf_req_free(req); 2504 goto out; 2505 } 2506 2507 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2508 retval = zfcp_fsf_req_send(req); 2509 out: 2510 spin_unlock_bh(&adapter->req_q_lock); 2511 2512 if (!retval) { 2513 wait_event(req->completion_wq, 2514 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 2515 return req; 2516 } 2517 return ERR_PTR(retval); 2518 } 2519