1 /* 2 * QLogic FCoE Offload Driver 3 * Copyright (c) 2016-2018 Cavium Inc. 4 * 5 * This software is available under the terms of the GNU General Public License 6 * (GPL) Version 2, available from the file COPYING in the main directory of 7 * this source tree. 8 */ 9 #include "qedf.h" 10 11 /* It's assumed that the lock is held when calling this function. */ 12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, 13 void *data, uint32_t data_len, 14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg), 15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) 16 { 17 struct qedf_ctx *qedf; 18 struct fc_lport *lport; 19 struct qedf_ioreq *els_req; 20 struct qedf_mp_req *mp_req; 21 struct fc_frame_header *fc_hdr; 22 struct e4_fcoe_task_context *task; 23 int rc = 0; 24 uint32_t did, sid; 25 uint16_t xid; 26 struct fcoe_wqe *sqe; 27 unsigned long flags; 28 u16 sqe_idx; 29 30 if (!fcport) { 31 QEDF_ERR(NULL, "fcport is NULL"); 32 rc = -EINVAL; 33 goto els_err; 34 } 35 36 qedf = fcport->qedf; 37 lport = qedf->lport; 38 39 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); 40 41 rc = fc_remote_port_chkready(fcport->rport); 42 if (rc) { 43 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); 44 rc = -EAGAIN; 45 goto els_err; 46 } 47 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 48 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", 49 op); 50 rc = -EAGAIN; 51 goto els_err; 52 } 53 54 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 55 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); 56 rc = -EINVAL; 57 goto els_err; 58 } 59 60 els_req = qedf_alloc_cmd(fcport, QEDF_ELS); 61 if (!els_req) { 62 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, 63 "Failed to alloc ELS request 0x%x\n", op); 64 rc = -ENOMEM; 65 goto els_err; 66 } 67 68 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " 69 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, 70 els_req->xid); 71 els_req->sc_cmd = NULL; 72 els_req->cmd_type = QEDF_ELS; 73 els_req->fcport = fcport; 74 els_req->cb_func = cb_func; 75 cb_arg->io_req = els_req; 76 cb_arg->op = op; 77 els_req->cb_arg = cb_arg; 78 els_req->data_xfer_len = data_len; 79 80 /* Record which cpu this request is associated with */ 81 els_req->cpu = smp_processor_id(); 82 83 mp_req = (struct qedf_mp_req *)&(els_req->mp_req); 84 rc = qedf_init_mp_req(els_req); 85 if (rc) { 86 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); 87 kref_put(&els_req->refcount, qedf_release_cmd); 88 goto els_err; 89 } else { 90 rc = 0; 91 } 92 93 /* Fill ELS Payload */ 94 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { 95 memcpy(mp_req->req_buf, data, data_len); 96 } else { 97 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); 98 els_req->cb_func = NULL; 99 els_req->cb_arg = NULL; 100 kref_put(&els_req->refcount, qedf_release_cmd); 101 rc = -EINVAL; 102 } 103 104 if (rc) 105 goto els_err; 106 107 /* Fill FC header */ 108 fc_hdr = &(mp_req->req_fc_hdr); 109 110 did = fcport->rdata->ids.port_id; 111 sid = fcport->sid; 112 113 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, 114 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 115 FC_FC_SEQ_INIT, 0); 116 117 /* Obtain exchange id */ 118 xid = els_req->xid; 119 120 spin_lock_irqsave(&fcport->rport_lock, flags); 121 122 sqe_idx = qedf_get_sqe_idx(fcport); 123 sqe = &fcport->sq[sqe_idx]; 124 memset(sqe, 0, sizeof(struct fcoe_wqe)); 125 126 /* Initialize task context for this IO request */ 127 task = qedf_get_task_mem(&qedf->tasks, xid); 128 qedf_init_mp_task(els_req, task, sqe); 129 130 /* Put timer on original I/O request */ 131 if (timer_msec) 132 qedf_cmd_timer_set(qedf, els_req, timer_msec); 133 134 /* Ring doorbell */ 135 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " 136 "req\n"); 137 qedf_ring_doorbell(fcport); 138 set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); 139 140 spin_unlock_irqrestore(&fcport->rport_lock, flags); 141 els_err: 142 return rc; 143 } 144 145 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 146 struct qedf_ioreq *els_req) 147 { 148 struct fcoe_cqe_midpath_info *mp_info; 149 150 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" 151 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); 152 153 clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); 154 155 /* Kill the ELS timer */ 156 cancel_delayed_work(&els_req->timeout_work); 157 158 /* Get ELS response length from CQE */ 159 mp_info = &cqe->cqe_info.midpath_info; 160 els_req->mp_req.resp_len = mp_info->data_placement_size; 161 162 /* Parse ELS response */ 163 if ((els_req->cb_func) && (els_req->cb_arg)) { 164 els_req->cb_func(els_req->cb_arg); 165 els_req->cb_arg = NULL; 166 } 167 168 kref_put(&els_req->refcount, qedf_release_cmd); 169 } 170 171 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) 172 { 173 struct qedf_ioreq *orig_io_req; 174 struct qedf_ioreq *rrq_req; 175 struct qedf_ctx *qedf; 176 int refcount; 177 178 rrq_req = cb_arg->io_req; 179 qedf = rrq_req->fcport->qedf; 180 181 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); 182 183 orig_io_req = cb_arg->aborted_io_req; 184 185 if (!orig_io_req) 186 goto out_free; 187 188 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && 189 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) 190 cancel_delayed_work_sync(&orig_io_req->timeout_work); 191 192 refcount = kref_read(&orig_io_req->refcount); 193 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," 194 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", 195 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); 196 197 /* 198 * This should return the aborted io_req to the command pool. Note that 199 * we need to check the refcound in case the original request was 200 * flushed but we get a completion on this xid. 201 */ 202 if (orig_io_req && refcount > 0) 203 kref_put(&orig_io_req->refcount, qedf_release_cmd); 204 205 out_free: 206 /* 207 * Release a reference to the rrq request if we timed out as the 208 * rrq completion handler is called directly from the timeout handler 209 * and not from els_compl where the reference would have normally been 210 * released. 211 */ 212 if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO) 213 kref_put(&rrq_req->refcount, qedf_release_cmd); 214 kfree(cb_arg); 215 } 216 217 /* Assumes kref is already held by caller */ 218 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) 219 { 220 221 struct fc_els_rrq rrq; 222 struct qedf_rport *fcport; 223 struct fc_lport *lport; 224 struct qedf_els_cb_arg *cb_arg = NULL; 225 struct qedf_ctx *qedf; 226 uint32_t sid; 227 uint32_t r_a_tov; 228 int rc; 229 int refcount; 230 231 if (!aborted_io_req) { 232 QEDF_ERR(NULL, "abort_io_req is NULL.\n"); 233 return -EINVAL; 234 } 235 236 fcport = aborted_io_req->fcport; 237 238 if (!fcport) { 239 refcount = kref_read(&aborted_io_req->refcount); 240 QEDF_ERR(NULL, 241 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n", 242 aborted_io_req->xid, refcount); 243 kref_put(&aborted_io_req->refcount, qedf_release_cmd); 244 return -EINVAL; 245 } 246 247 /* Check that fcport is still offloaded */ 248 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 249 QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); 250 return -EINVAL; 251 } 252 253 if (!fcport->qedf) { 254 QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); 255 return -EINVAL; 256 } 257 258 qedf = fcport->qedf; 259 260 /* 261 * Sanity check that we can send a RRQ to make sure that refcount isn't 262 * 0 263 */ 264 refcount = kref_read(&aborted_io_req->refcount); 265 if (refcount != 1) { 266 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, 267 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n", 268 aborted_io_req->xid, aborted_io_req, refcount); 269 return -EINVAL; 270 } 271 272 lport = qedf->lport; 273 sid = fcport->sid; 274 r_a_tov = lport->r_a_tov; 275 276 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig " 277 "io = %p, orig_xid = 0x%x\n", aborted_io_req, 278 aborted_io_req->xid); 279 memset(&rrq, 0, sizeof(rrq)); 280 281 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); 282 if (!cb_arg) { 283 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " 284 "RRQ\n"); 285 rc = -ENOMEM; 286 goto rrq_err; 287 } 288 289 cb_arg->aborted_io_req = aborted_io_req; 290 291 rrq.rrq_cmd = ELS_RRQ; 292 hton24(rrq.rrq_s_id, sid); 293 rrq.rrq_ox_id = htons(aborted_io_req->xid); 294 rrq.rrq_rx_id = 295 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); 296 297 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq), 298 qedf_rrq_compl, cb_arg, r_a_tov); 299 300 rrq_err: 301 if (rc) { 302 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io " 303 "req 0x%x\n", aborted_io_req->xid); 304 kfree(cb_arg); 305 kref_put(&aborted_io_req->refcount, qedf_release_cmd); 306 } 307 return rc; 308 } 309 310 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport, 311 struct fc_frame *fp, 312 u16 l2_oxid) 313 { 314 struct fc_lport *lport = fcport->qedf->lport; 315 struct fc_frame_header *fh; 316 u32 crc; 317 318 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 319 320 /* Set the OXID we return to what libfc used */ 321 if (l2_oxid != FC_XID_UNKNOWN) 322 fh->fh_ox_id = htons(l2_oxid); 323 324 /* Setup header fields */ 325 fh->fh_r_ctl = FC_RCTL_ELS_REP; 326 fh->fh_type = FC_TYPE_ELS; 327 /* Last sequence, end sequence */ 328 fh->fh_f_ctl[0] = 0x98; 329 hton24(fh->fh_d_id, lport->port_id); 330 hton24(fh->fh_s_id, fcport->rdata->ids.port_id); 331 fh->fh_rx_id = 0xffff; 332 333 /* Set frame attributes */ 334 crc = fcoe_fc_crc(fp); 335 fc_frame_init(fp); 336 fr_dev(fp) = lport; 337 fr_sof(fp) = FC_SOF_I3; 338 fr_eof(fp) = FC_EOF_T; 339 fr_crc(fp) = cpu_to_le32(~crc); 340 341 /* Send completed request to libfc */ 342 fc_exch_recv(lport, fp); 343 } 344 345 /* 346 * In instances where an ELS command times out we may need to restart the 347 * rport by logging out and then logging back in. 348 */ 349 void qedf_restart_rport(struct qedf_rport *fcport) 350 { 351 struct fc_lport *lport; 352 struct fc_rport_priv *rdata; 353 u32 port_id; 354 unsigned long flags; 355 356 if (!fcport) 357 return; 358 359 spin_lock_irqsave(&fcport->rport_lock, flags); 360 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) || 361 !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || 362 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 363 QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n", 364 fcport); 365 spin_unlock_irqrestore(&fcport->rport_lock, flags); 366 return; 367 } 368 369 /* Set that we are now in reset */ 370 set_bit(QEDF_RPORT_IN_RESET, &fcport->flags); 371 spin_unlock_irqrestore(&fcport->rport_lock, flags); 372 373 rdata = fcport->rdata; 374 if (rdata && !kref_get_unless_zero(&rdata->kref)) { 375 fcport->rdata = NULL; 376 rdata = NULL; 377 } 378 379 if (rdata && rdata->rp_state == RPORT_ST_READY) { 380 lport = fcport->qedf->lport; 381 port_id = rdata->ids.port_id; 382 QEDF_ERR(&(fcport->qedf->dbg_ctx), 383 "LOGO port_id=%x.\n", port_id); 384 fc_rport_logoff(rdata); 385 kref_put(&rdata->kref, fc_rport_destroy); 386 mutex_lock(&lport->disc.disc_mutex); 387 /* Recreate the rport and log back in */ 388 rdata = fc_rport_create(lport, port_id); 389 if (rdata) { 390 mutex_unlock(&lport->disc.disc_mutex); 391 fc_rport_login(rdata); 392 fcport->rdata = rdata; 393 } else { 394 mutex_unlock(&lport->disc.disc_mutex); 395 fcport->rdata = NULL; 396 } 397 } 398 clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags); 399 } 400 401 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) 402 { 403 struct qedf_ioreq *els_req; 404 struct qedf_rport *fcport; 405 struct qedf_mp_req *mp_req; 406 struct fc_frame *fp; 407 struct fc_frame_header *fh, *mp_fc_hdr; 408 void *resp_buf, *fc_payload; 409 u32 resp_len; 410 u16 l2_oxid; 411 412 l2_oxid = cb_arg->l2_oxid; 413 els_req = cb_arg->io_req; 414 415 if (!els_req) { 416 QEDF_ERR(NULL, "els_req is NULL.\n"); 417 goto free_arg; 418 } 419 420 /* 421 * If we are flushing the command just free the cb_arg as none of the 422 * response data will be valid. 423 */ 424 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) 425 goto free_arg; 426 427 fcport = els_req->fcport; 428 mp_req = &(els_req->mp_req); 429 mp_fc_hdr = &(mp_req->resp_fc_hdr); 430 resp_len = mp_req->resp_len; 431 resp_buf = mp_req->resp_buf; 432 433 /* 434 * If a middle path ELS command times out, don't try to return 435 * the command but rather do any internal cleanup and then libfc 436 * timeout the command and clean up its internal resources. 437 */ 438 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { 439 /* 440 * If ADISC times out, libfc will timeout the exchange and then 441 * try to send a PLOGI which will timeout since the session is 442 * still offloaded. Force libfc to logout the session which 443 * will offload the connection and allow the PLOGI response to 444 * flow over the LL2 path. 445 */ 446 if (cb_arg->op == ELS_ADISC) 447 qedf_restart_rport(fcport); 448 return; 449 } 450 451 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) { 452 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " 453 "beyond page size.\n"); 454 goto free_arg; 455 } 456 457 fp = fc_frame_alloc(fcport->qedf->lport, resp_len); 458 if (!fp) { 459 QEDF_ERR(&(fcport->qedf->dbg_ctx), 460 "fc_frame_alloc failure.\n"); 461 return; 462 } 463 464 /* Copy frame header from firmware into fp */ 465 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 466 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); 467 468 /* Copy payload from firmware into fp */ 469 fc_payload = fc_frame_payload_get(fp, resp_len); 470 memcpy(fc_payload, resp_buf, resp_len); 471 472 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, 473 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid); 474 qedf_process_l2_frame_compl(fcport, fp, l2_oxid); 475 476 free_arg: 477 kfree(cb_arg); 478 } 479 480 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) 481 { 482 struct fc_els_adisc *adisc; 483 struct fc_frame_header *fh; 484 struct fc_lport *lport = fcport->qedf->lport; 485 struct qedf_els_cb_arg *cb_arg = NULL; 486 struct qedf_ctx *qedf; 487 uint32_t r_a_tov = lport->r_a_tov; 488 int rc; 489 490 qedf = fcport->qedf; 491 fh = fc_frame_header_get(fp); 492 493 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); 494 if (!cb_arg) { 495 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " 496 "ADISC\n"); 497 rc = -ENOMEM; 498 goto adisc_err; 499 } 500 cb_arg->l2_oxid = ntohs(fh->fh_ox_id); 501 502 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 503 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); 504 505 adisc = fc_frame_payload_get(fp, sizeof(*adisc)); 506 507 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), 508 qedf_l2_els_compl, cb_arg, r_a_tov); 509 510 adisc_err: 511 if (rc) { 512 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); 513 kfree(cb_arg); 514 } 515 return rc; 516 } 517 518 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) 519 { 520 struct qedf_ioreq *orig_io_req; 521 struct qedf_ioreq *srr_req; 522 struct qedf_mp_req *mp_req; 523 struct fc_frame_header *mp_fc_hdr, *fh; 524 struct fc_frame *fp; 525 void *resp_buf, *fc_payload; 526 u32 resp_len; 527 struct fc_lport *lport; 528 struct qedf_ctx *qedf; 529 int refcount; 530 u8 opcode; 531 532 srr_req = cb_arg->io_req; 533 qedf = srr_req->fcport->qedf; 534 lport = qedf->lport; 535 536 orig_io_req = cb_arg->aborted_io_req; 537 538 if (!orig_io_req) 539 goto out_free; 540 541 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); 542 543 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && 544 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) 545 cancel_delayed_work_sync(&orig_io_req->timeout_work); 546 547 refcount = kref_read(&orig_io_req->refcount); 548 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," 549 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", 550 orig_io_req, orig_io_req->xid, srr_req->xid, refcount); 551 552 /* If a SRR times out, simply free resources */ 553 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) 554 goto out_put; 555 556 /* Normalize response data into struct fc_frame */ 557 mp_req = &(srr_req->mp_req); 558 mp_fc_hdr = &(mp_req->resp_fc_hdr); 559 resp_len = mp_req->resp_len; 560 resp_buf = mp_req->resp_buf; 561 562 fp = fc_frame_alloc(lport, resp_len); 563 if (!fp) { 564 QEDF_ERR(&(qedf->dbg_ctx), 565 "fc_frame_alloc failure.\n"); 566 goto out_put; 567 } 568 569 /* Copy frame header from firmware into fp */ 570 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 571 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); 572 573 /* Copy payload from firmware into fp */ 574 fc_payload = fc_frame_payload_get(fp, resp_len); 575 memcpy(fc_payload, resp_buf, resp_len); 576 577 opcode = fc_frame_payload_op(fp); 578 switch (opcode) { 579 case ELS_LS_ACC: 580 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 581 "SRR success.\n"); 582 break; 583 case ELS_LS_RJT: 584 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, 585 "SRR rejected.\n"); 586 qedf_initiate_abts(orig_io_req, true); 587 break; 588 } 589 590 fc_frame_free(fp); 591 out_put: 592 /* Put reference for original command since SRR completed */ 593 kref_put(&orig_io_req->refcount, qedf_release_cmd); 594 out_free: 595 kfree(cb_arg); 596 } 597 598 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) 599 { 600 struct fcp_srr srr; 601 struct qedf_ctx *qedf; 602 struct qedf_rport *fcport; 603 struct fc_lport *lport; 604 struct qedf_els_cb_arg *cb_arg = NULL; 605 u32 r_a_tov; 606 int rc; 607 608 if (!orig_io_req) { 609 QEDF_ERR(NULL, "orig_io_req is NULL.\n"); 610 return -EINVAL; 611 } 612 613 fcport = orig_io_req->fcport; 614 615 /* Check that fcport is still offloaded */ 616 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 617 QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); 618 return -EINVAL; 619 } 620 621 if (!fcport->qedf) { 622 QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); 623 return -EINVAL; 624 } 625 626 /* Take reference until SRR command completion */ 627 kref_get(&orig_io_req->refcount); 628 629 qedf = fcport->qedf; 630 lport = qedf->lport; 631 r_a_tov = lport->r_a_tov; 632 633 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " 634 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid); 635 memset(&srr, 0, sizeof(srr)); 636 637 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); 638 if (!cb_arg) { 639 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " 640 "SRR\n"); 641 rc = -ENOMEM; 642 goto srr_err; 643 } 644 645 cb_arg->aborted_io_req = orig_io_req; 646 647 srr.srr_op = ELS_SRR; 648 srr.srr_ox_id = htons(orig_io_req->xid); 649 srr.srr_rx_id = htons(orig_io_req->rx_id); 650 srr.srr_rel_off = htonl(offset); 651 srr.srr_r_ctl = r_ctl; 652 653 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr), 654 qedf_srr_compl, cb_arg, r_a_tov); 655 656 srr_err: 657 if (rc) { 658 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" 659 "=0x%x\n", orig_io_req->xid); 660 kfree(cb_arg); 661 /* If we fail to queue SRR, send ABTS to orig_io */ 662 qedf_initiate_abts(orig_io_req, true); 663 kref_put(&orig_io_req->refcount, qedf_release_cmd); 664 } else 665 /* Tell other threads that SRR is in progress */ 666 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); 667 668 return rc; 669 } 670 671 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, 672 u32 offset, u8 r_ctl) 673 { 674 struct qedf_rport *fcport; 675 unsigned long flags; 676 struct qedf_els_cb_arg *cb_arg; 677 struct fcoe_wqe *sqe; 678 u16 sqe_idx; 679 680 fcport = orig_io_req->fcport; 681 682 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, 683 "Doing sequence cleanup for xid=0x%x offset=%u.\n", 684 orig_io_req->xid, offset); 685 686 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); 687 if (!cb_arg) { 688 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg " 689 "for sequence cleanup\n"); 690 return; 691 } 692 693 /* Get reference for cleanup request */ 694 kref_get(&orig_io_req->refcount); 695 696 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP; 697 cb_arg->offset = offset; 698 cb_arg->r_ctl = r_ctl; 699 orig_io_req->cb_arg = cb_arg; 700 701 qedf_cmd_timer_set(fcport->qedf, orig_io_req, 702 QEDF_CLEANUP_TIMEOUT * HZ); 703 704 spin_lock_irqsave(&fcport->rport_lock, flags); 705 706 sqe_idx = qedf_get_sqe_idx(fcport); 707 sqe = &fcport->sq[sqe_idx]; 708 memset(sqe, 0, sizeof(struct fcoe_wqe)); 709 orig_io_req->task_params->sqe = sqe; 710 711 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params, 712 offset); 713 qedf_ring_doorbell(fcport); 714 715 spin_unlock_irqrestore(&fcport->rport_lock, flags); 716 } 717 718 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, 719 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) 720 { 721 int rc; 722 struct qedf_els_cb_arg *cb_arg; 723 724 cb_arg = io_req->cb_arg; 725 726 /* If we timed out just free resources */ 727 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) 728 goto free; 729 730 /* Kill the timer we put on the request */ 731 cancel_delayed_work_sync(&io_req->timeout_work); 732 733 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); 734 if (rc) 735 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " 736 "abort, xid=0x%x.\n", io_req->xid); 737 free: 738 kfree(cb_arg); 739 kref_put(&io_req->refcount, qedf_release_cmd); 740 } 741 742 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req) 743 { 744 struct qedf_rport *fcport; 745 struct qedf_ioreq *new_io_req; 746 unsigned long flags; 747 bool rc = false; 748 749 fcport = orig_io_req->fcport; 750 if (!fcport) { 751 QEDF_ERR(NULL, "fcport is NULL.\n"); 752 goto out; 753 } 754 755 if (!orig_io_req->sc_cmd) { 756 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " 757 "xid=0x%x.\n", orig_io_req->xid); 758 goto out; 759 } 760 761 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); 762 if (!new_io_req) { 763 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " 764 "io_req.\n"); 765 goto out; 766 } 767 768 new_io_req->sc_cmd = orig_io_req->sc_cmd; 769 770 /* 771 * This keeps the sc_cmd struct from being returned to the tape 772 * driver and being requeued twice. We do need to put a reference 773 * for the original I/O request since we will not do a SCSI completion 774 * for it. 775 */ 776 orig_io_req->sc_cmd = NULL; 777 kref_put(&orig_io_req->refcount, qedf_release_cmd); 778 779 spin_lock_irqsave(&fcport->rport_lock, flags); 780 781 /* kref for new command released in qedf_post_io_req on error */ 782 if (qedf_post_io_req(fcport, new_io_req)) { 783 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); 784 /* Return SQE to pool */ 785 atomic_inc(&fcport->free_sqes); 786 } else { 787 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, 788 "Reissued SCSI command from orig_xid=0x%x on " 789 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); 790 /* 791 * Abort the original I/O but do not return SCSI command as 792 * it has been reissued on another OX_ID. 793 */ 794 spin_unlock_irqrestore(&fcport->rport_lock, flags); 795 qedf_initiate_abts(orig_io_req, false); 796 goto out; 797 } 798 799 spin_unlock_irqrestore(&fcport->rport_lock, flags); 800 out: 801 return rc; 802 } 803 804 805 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) 806 { 807 struct qedf_ioreq *orig_io_req; 808 struct qedf_ioreq *rec_req; 809 struct qedf_mp_req *mp_req; 810 struct fc_frame_header *mp_fc_hdr, *fh; 811 struct fc_frame *fp; 812 void *resp_buf, *fc_payload; 813 u32 resp_len; 814 struct fc_lport *lport; 815 struct qedf_ctx *qedf; 816 int refcount; 817 enum fc_rctl r_ctl; 818 struct fc_els_ls_rjt *rjt; 819 struct fc_els_rec_acc *acc; 820 u8 opcode; 821 u32 offset, e_stat; 822 struct scsi_cmnd *sc_cmd; 823 bool srr_needed = false; 824 825 rec_req = cb_arg->io_req; 826 qedf = rec_req->fcport->qedf; 827 lport = qedf->lport; 828 829 orig_io_req = cb_arg->aborted_io_req; 830 831 if (!orig_io_req) 832 goto out_free; 833 834 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && 835 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) 836 cancel_delayed_work_sync(&orig_io_req->timeout_work); 837 838 refcount = kref_read(&orig_io_req->refcount); 839 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," 840 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", 841 orig_io_req, orig_io_req->xid, rec_req->xid, refcount); 842 843 /* If a REC times out, free resources */ 844 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) 845 goto out_put; 846 847 /* Normalize response data into struct fc_frame */ 848 mp_req = &(rec_req->mp_req); 849 mp_fc_hdr = &(mp_req->resp_fc_hdr); 850 resp_len = mp_req->resp_len; 851 acc = resp_buf = mp_req->resp_buf; 852 853 fp = fc_frame_alloc(lport, resp_len); 854 if (!fp) { 855 QEDF_ERR(&(qedf->dbg_ctx), 856 "fc_frame_alloc failure.\n"); 857 goto out_put; 858 } 859 860 /* Copy frame header from firmware into fp */ 861 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 862 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); 863 864 /* Copy payload from firmware into fp */ 865 fc_payload = fc_frame_payload_get(fp, resp_len); 866 memcpy(fc_payload, resp_buf, resp_len); 867 868 opcode = fc_frame_payload_op(fp); 869 if (opcode == ELS_LS_RJT) { 870 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 871 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 872 "Received LS_RJT for REC: er_reason=0x%x, " 873 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); 874 /* 875 * The following response(s) mean that we need to reissue the 876 * request on another exchange. We need to do this without 877 * informing the upper layers lest it cause an application 878 * error. 879 */ 880 if ((rjt->er_reason == ELS_RJT_LOGIC || 881 rjt->er_reason == ELS_RJT_UNAB) && 882 rjt->er_explan == ELS_EXPL_OXID_RXID) { 883 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 884 "Handle CMD LOST case.\n"); 885 qedf_requeue_io_req(orig_io_req); 886 } 887 } else if (opcode == ELS_LS_ACC) { 888 offset = ntohl(acc->reca_fc4value); 889 e_stat = ntohl(acc->reca_e_stat); 890 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 891 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", 892 offset, e_stat); 893 if (e_stat & ESB_ST_SEQ_INIT) { 894 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 895 "Target has the seq init\n"); 896 goto out_free_frame; 897 } 898 sc_cmd = orig_io_req->sc_cmd; 899 if (!sc_cmd) { 900 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 901 "sc_cmd is NULL for xid=0x%x.\n", 902 orig_io_req->xid); 903 goto out_free_frame; 904 } 905 /* SCSI write case */ 906 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 907 if (offset == orig_io_req->data_xfer_len) { 908 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 909 "WRITE - response lost.\n"); 910 r_ctl = FC_RCTL_DD_CMD_STATUS; 911 srr_needed = true; 912 offset = 0; 913 } else { 914 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 915 "WRITE - XFER_RDY/DATA lost.\n"); 916 r_ctl = FC_RCTL_DD_DATA_DESC; 917 /* Use data from warning CQE instead of REC */ 918 offset = orig_io_req->tx_buf_off; 919 } 920 /* SCSI read case */ 921 } else { 922 if (orig_io_req->rx_buf_off == 923 orig_io_req->data_xfer_len) { 924 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 925 "READ - response lost.\n"); 926 srr_needed = true; 927 r_ctl = FC_RCTL_DD_CMD_STATUS; 928 offset = 0; 929 } else { 930 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 931 "READ - DATA lost.\n"); 932 /* 933 * For read case we always set the offset to 0 934 * for sequence recovery task. 935 */ 936 offset = 0; 937 r_ctl = FC_RCTL_DD_SOL_DATA; 938 } 939 } 940 941 if (srr_needed) 942 qedf_send_srr(orig_io_req, offset, r_ctl); 943 else 944 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); 945 } 946 947 out_free_frame: 948 fc_frame_free(fp); 949 out_put: 950 /* Put reference for original command since REC completed */ 951 kref_put(&orig_io_req->refcount, qedf_release_cmd); 952 out_free: 953 kfree(cb_arg); 954 } 955 956 /* Assumes kref is already held by caller */ 957 int qedf_send_rec(struct qedf_ioreq *orig_io_req) 958 { 959 960 struct fc_els_rec rec; 961 struct qedf_rport *fcport; 962 struct fc_lport *lport; 963 struct qedf_els_cb_arg *cb_arg = NULL; 964 struct qedf_ctx *qedf; 965 uint32_t sid; 966 uint32_t r_a_tov; 967 int rc; 968 969 if (!orig_io_req) { 970 QEDF_ERR(NULL, "orig_io_req is NULL.\n"); 971 return -EINVAL; 972 } 973 974 fcport = orig_io_req->fcport; 975 976 /* Check that fcport is still offloaded */ 977 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 978 QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); 979 return -EINVAL; 980 } 981 982 if (!fcport->qedf) { 983 QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); 984 return -EINVAL; 985 } 986 987 /* Take reference until REC command completion */ 988 kref_get(&orig_io_req->refcount); 989 990 qedf = fcport->qedf; 991 lport = qedf->lport; 992 sid = fcport->sid; 993 r_a_tov = lport->r_a_tov; 994 995 memset(&rec, 0, sizeof(rec)); 996 997 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); 998 if (!cb_arg) { 999 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " 1000 "REC\n"); 1001 rc = -ENOMEM; 1002 goto rec_err; 1003 } 1004 1005 cb_arg->aborted_io_req = orig_io_req; 1006 1007 rec.rec_cmd = ELS_REC; 1008 hton24(rec.rec_s_id, sid); 1009 rec.rec_ox_id = htons(orig_io_req->xid); 1010 rec.rec_rx_id = 1011 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); 1012 1013 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, " 1014 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req, 1015 orig_io_req->xid, rec.rec_rx_id); 1016 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec), 1017 qedf_rec_compl, cb_arg, r_a_tov); 1018 1019 rec_err: 1020 if (rc) { 1021 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req" 1022 "=0x%x\n", orig_io_req->xid); 1023 kfree(cb_arg); 1024 kref_put(&orig_io_req->refcount, qedf_release_cmd); 1025 } 1026 return rc; 1027 } 1028