xref: /openbmc/linux/drivers/scsi/qedf/qedf_els.c (revision 82e6fdd6)
1 /*
2  *  QLogic FCoE Offload Driver
3  *  Copyright (c) 2016-2017 Cavium Inc.
4  *
5  *  This software is available under the terms of the GNU General Public License
6  *  (GPL) Version 2, available from the file COPYING in the main directory of
7  *  this source tree.
8  */
9 #include "qedf.h"
10 
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 	void *data, uint32_t data_len,
14 	void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 	struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
16 {
17 	struct qedf_ctx *qedf = fcport->qedf;
18 	struct fc_lport *lport = qedf->lport;
19 	struct qedf_ioreq *els_req;
20 	struct qedf_mp_req *mp_req;
21 	struct fc_frame_header *fc_hdr;
22 	struct e4_fcoe_task_context *task;
23 	int rc = 0;
24 	uint32_t did, sid;
25 	uint16_t xid;
26 	uint32_t start_time = jiffies / HZ;
27 	uint32_t current_time;
28 	struct fcoe_wqe *sqe;
29 	unsigned long flags;
30 	u16 sqe_idx;
31 
32 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
33 
34 	rc = fc_remote_port_chkready(fcport->rport);
35 	if (rc) {
36 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
37 		rc = -EAGAIN;
38 		goto els_err;
39 	}
40 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
41 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
42 			  op);
43 		rc = -EAGAIN;
44 		goto els_err;
45 	}
46 
47 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
48 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
49 		rc = -EINVAL;
50 		goto els_err;
51 	}
52 
53 retry_els:
54 	els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
55 	if (!els_req) {
56 		current_time = jiffies / HZ;
57 		if ((current_time - start_time) > 10) {
58 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
59 				   "els: Failed els 0x%x\n", op);
60 			rc = -ENOMEM;
61 			goto els_err;
62 		}
63 		mdelay(20 * USEC_PER_MSEC);
64 		goto retry_els;
65 	}
66 
67 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
68 		   "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
69 		   els_req->xid);
70 	els_req->sc_cmd = NULL;
71 	els_req->cmd_type = QEDF_ELS;
72 	els_req->fcport = fcport;
73 	els_req->cb_func = cb_func;
74 	cb_arg->io_req = els_req;
75 	cb_arg->op = op;
76 	els_req->cb_arg = cb_arg;
77 	els_req->data_xfer_len = data_len;
78 
79 	/* Record which cpu this request is associated with */
80 	els_req->cpu = smp_processor_id();
81 
82 	mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
83 	rc = qedf_init_mp_req(els_req);
84 	if (rc) {
85 		QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
86 		kref_put(&els_req->refcount, qedf_release_cmd);
87 		goto els_err;
88 	} else {
89 		rc = 0;
90 	}
91 
92 	/* Fill ELS Payload */
93 	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
94 		memcpy(mp_req->req_buf, data, data_len);
95 	} else {
96 		QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
97 		els_req->cb_func = NULL;
98 		els_req->cb_arg = NULL;
99 		kref_put(&els_req->refcount, qedf_release_cmd);
100 		rc = -EINVAL;
101 	}
102 
103 	if (rc)
104 		goto els_err;
105 
106 	/* Fill FC header */
107 	fc_hdr = &(mp_req->req_fc_hdr);
108 
109 	did = fcport->rdata->ids.port_id;
110 	sid = fcport->sid;
111 
112 	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
113 			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
114 			   FC_FC_SEQ_INIT, 0);
115 
116 	/* Obtain exchange id */
117 	xid = els_req->xid;
118 
119 	spin_lock_irqsave(&fcport->rport_lock, flags);
120 
121 	sqe_idx = qedf_get_sqe_idx(fcport);
122 	sqe = &fcport->sq[sqe_idx];
123 	memset(sqe, 0, sizeof(struct fcoe_wqe));
124 
125 	/* Initialize task context for this IO request */
126 	task = qedf_get_task_mem(&qedf->tasks, xid);
127 	qedf_init_mp_task(els_req, task, sqe);
128 
129 	/* Put timer on original I/O request */
130 	if (timer_msec)
131 		qedf_cmd_timer_set(qedf, els_req, timer_msec);
132 
133 	/* Ring doorbell */
134 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
135 		   "req\n");
136 	qedf_ring_doorbell(fcport);
137 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
138 els_err:
139 	return rc;
140 }
141 
142 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
143 	struct qedf_ioreq *els_req)
144 {
145 	struct fcoe_task_context *task_ctx;
146 	struct scsi_cmnd *sc_cmd;
147 	uint16_t xid;
148 	struct fcoe_cqe_midpath_info *mp_info;
149 
150 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
151 		   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
152 
153 	/* Kill the ELS timer */
154 	cancel_delayed_work(&els_req->timeout_work);
155 
156 	xid = els_req->xid;
157 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
158 	sc_cmd = els_req->sc_cmd;
159 
160 	/* Get ELS response length from CQE */
161 	mp_info = &cqe->cqe_info.midpath_info;
162 	els_req->mp_req.resp_len = mp_info->data_placement_size;
163 
164 	/* Parse ELS response */
165 	if ((els_req->cb_func) && (els_req->cb_arg)) {
166 		els_req->cb_func(els_req->cb_arg);
167 		els_req->cb_arg = NULL;
168 	}
169 
170 	kref_put(&els_req->refcount, qedf_release_cmd);
171 }
172 
173 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
174 {
175 	struct qedf_ioreq *orig_io_req;
176 	struct qedf_ioreq *rrq_req;
177 	struct qedf_ctx *qedf;
178 	int refcount;
179 
180 	rrq_req = cb_arg->io_req;
181 	qedf = rrq_req->fcport->qedf;
182 
183 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
184 
185 	orig_io_req = cb_arg->aborted_io_req;
186 
187 	if (!orig_io_req)
188 		goto out_free;
189 
190 	if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
191 	    rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
192 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
193 
194 	refcount = kref_read(&orig_io_req->refcount);
195 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
196 		   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
197 		   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
198 
199 	/* This should return the aborted io_req to the command pool */
200 	if (orig_io_req)
201 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
202 
203 out_free:
204 	kfree(cb_arg);
205 }
206 
207 /* Assumes kref is already held by caller */
208 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
209 {
210 
211 	struct fc_els_rrq rrq;
212 	struct qedf_rport *fcport;
213 	struct fc_lport *lport;
214 	struct qedf_els_cb_arg *cb_arg = NULL;
215 	struct qedf_ctx *qedf;
216 	uint32_t sid;
217 	uint32_t r_a_tov;
218 	int rc;
219 
220 	if (!aborted_io_req) {
221 		QEDF_ERR(NULL, "abort_io_req is NULL.\n");
222 		return -EINVAL;
223 	}
224 
225 	fcport = aborted_io_req->fcport;
226 
227 	/* Check that fcport is still offloaded */
228 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
229 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
230 		return -EINVAL;
231 	}
232 
233 	if (!fcport->qedf) {
234 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
235 		return -EINVAL;
236 	}
237 
238 	qedf = fcport->qedf;
239 	lport = qedf->lport;
240 	sid = fcport->sid;
241 	r_a_tov = lport->r_a_tov;
242 
243 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
244 		   "io = %p, orig_xid = 0x%x\n", aborted_io_req,
245 		   aborted_io_req->xid);
246 	memset(&rrq, 0, sizeof(rrq));
247 
248 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
249 	if (!cb_arg) {
250 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
251 			  "RRQ\n");
252 		rc = -ENOMEM;
253 		goto rrq_err;
254 	}
255 
256 	cb_arg->aborted_io_req = aborted_io_req;
257 
258 	rrq.rrq_cmd = ELS_RRQ;
259 	hton24(rrq.rrq_s_id, sid);
260 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
261 	rrq.rrq_rx_id =
262 	    htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
263 
264 	rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
265 	    qedf_rrq_compl, cb_arg, r_a_tov);
266 
267 rrq_err:
268 	if (rc) {
269 		QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
270 			  "req 0x%x\n", aborted_io_req->xid);
271 		kfree(cb_arg);
272 		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
273 	}
274 	return rc;
275 }
276 
277 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
278 					struct fc_frame *fp,
279 					u16 l2_oxid)
280 {
281 	struct fc_lport *lport = fcport->qedf->lport;
282 	struct fc_frame_header *fh;
283 	u32 crc;
284 
285 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
286 
287 	/* Set the OXID we return to what libfc used */
288 	if (l2_oxid != FC_XID_UNKNOWN)
289 		fh->fh_ox_id = htons(l2_oxid);
290 
291 	/* Setup header fields */
292 	fh->fh_r_ctl = FC_RCTL_ELS_REP;
293 	fh->fh_type = FC_TYPE_ELS;
294 	/* Last sequence, end sequence */
295 	fh->fh_f_ctl[0] = 0x98;
296 	hton24(fh->fh_d_id, lport->port_id);
297 	hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
298 	fh->fh_rx_id = 0xffff;
299 
300 	/* Set frame attributes */
301 	crc = fcoe_fc_crc(fp);
302 	fc_frame_init(fp);
303 	fr_dev(fp) = lport;
304 	fr_sof(fp) = FC_SOF_I3;
305 	fr_eof(fp) = FC_EOF_T;
306 	fr_crc(fp) = cpu_to_le32(~crc);
307 
308 	/* Send completed request to libfc */
309 	fc_exch_recv(lport, fp);
310 }
311 
312 /*
313  * In instances where an ELS command times out we may need to restart the
314  * rport by logging out and then logging back in.
315  */
316 void qedf_restart_rport(struct qedf_rport *fcport)
317 {
318 	struct fc_lport *lport;
319 	struct fc_rport_priv *rdata;
320 	u32 port_id;
321 
322 	if (!fcport)
323 		return;
324 
325 	rdata = fcport->rdata;
326 	if (rdata) {
327 		lport = fcport->qedf->lport;
328 		port_id = rdata->ids.port_id;
329 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
330 		    "LOGO port_id=%x.\n", port_id);
331 		fc_rport_logoff(rdata);
332 		/* Recreate the rport and log back in */
333 		rdata = fc_rport_create(lport, port_id);
334 		if (rdata)
335 			fc_rport_login(rdata);
336 	}
337 }
338 
339 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
340 {
341 	struct qedf_ioreq *els_req;
342 	struct qedf_rport *fcport;
343 	struct qedf_mp_req *mp_req;
344 	struct fc_frame *fp;
345 	struct fc_frame_header *fh, *mp_fc_hdr;
346 	void *resp_buf, *fc_payload;
347 	u32 resp_len;
348 	u16 l2_oxid;
349 
350 	l2_oxid = cb_arg->l2_oxid;
351 	els_req = cb_arg->io_req;
352 
353 	if (!els_req) {
354 		QEDF_ERR(NULL, "els_req is NULL.\n");
355 		goto free_arg;
356 	}
357 
358 	/*
359 	 * If we are flushing the command just free the cb_arg as none of the
360 	 * response data will be valid.
361 	 */
362 	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
363 		goto free_arg;
364 
365 	fcport = els_req->fcport;
366 	mp_req = &(els_req->mp_req);
367 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
368 	resp_len = mp_req->resp_len;
369 	resp_buf = mp_req->resp_buf;
370 
371 	/*
372 	 * If a middle path ELS command times out, don't try to return
373 	 * the command but rather do any internal cleanup and then libfc
374 	 * timeout the command and clean up its internal resources.
375 	 */
376 	if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
377 		/*
378 		 * If ADISC times out, libfc will timeout the exchange and then
379 		 * try to send a PLOGI which will timeout since the session is
380 		 * still offloaded.  Force libfc to logout the session which
381 		 * will offload the connection and allow the PLOGI response to
382 		 * flow over the LL2 path.
383 		 */
384 		if (cb_arg->op == ELS_ADISC)
385 			qedf_restart_rport(fcport);
386 		return;
387 	}
388 
389 	if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
390 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
391 		   "beyond page size.\n");
392 		goto free_arg;
393 	}
394 
395 	fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
396 	if (!fp) {
397 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
398 		    "fc_frame_alloc failure.\n");
399 		return;
400 	}
401 
402 	/* Copy frame header from firmware into fp */
403 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
404 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
405 
406 	/* Copy payload from firmware into fp */
407 	fc_payload = fc_frame_payload_get(fp, resp_len);
408 	memcpy(fc_payload, resp_buf, resp_len);
409 
410 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
411 	    "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
412 	qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
413 
414 free_arg:
415 	kfree(cb_arg);
416 }
417 
418 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
419 {
420 	struct fc_els_adisc *adisc;
421 	struct fc_frame_header *fh;
422 	struct fc_lport *lport = fcport->qedf->lport;
423 	struct qedf_els_cb_arg *cb_arg = NULL;
424 	struct qedf_ctx *qedf;
425 	uint32_t r_a_tov = lport->r_a_tov;
426 	int rc;
427 
428 	qedf = fcport->qedf;
429 	fh = fc_frame_header_get(fp);
430 
431 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
432 	if (!cb_arg) {
433 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
434 			  "ADISC\n");
435 		rc = -ENOMEM;
436 		goto adisc_err;
437 	}
438 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
439 
440 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
441 	    "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
442 
443 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
444 
445 	rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
446 	    qedf_l2_els_compl, cb_arg, r_a_tov);
447 
448 adisc_err:
449 	if (rc) {
450 		QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
451 		kfree(cb_arg);
452 	}
453 	return rc;
454 }
455 
456 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
457 {
458 	struct qedf_ioreq *orig_io_req;
459 	struct qedf_ioreq *srr_req;
460 	struct qedf_mp_req *mp_req;
461 	struct fc_frame_header *mp_fc_hdr, *fh;
462 	struct fc_frame *fp;
463 	void *resp_buf, *fc_payload;
464 	u32 resp_len;
465 	struct fc_lport *lport;
466 	struct qedf_ctx *qedf;
467 	int refcount;
468 	u8 opcode;
469 
470 	srr_req = cb_arg->io_req;
471 	qedf = srr_req->fcport->qedf;
472 	lport = qedf->lport;
473 
474 	orig_io_req = cb_arg->aborted_io_req;
475 
476 	if (!orig_io_req)
477 		goto out_free;
478 
479 	clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
480 
481 	if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
482 	    srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
483 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
484 
485 	refcount = kref_read(&orig_io_req->refcount);
486 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
487 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
488 		   orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
489 
490 	/* If a SRR times out, simply free resources */
491 	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
492 		goto out_put;
493 
494 	/* Normalize response data into struct fc_frame */
495 	mp_req = &(srr_req->mp_req);
496 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
497 	resp_len = mp_req->resp_len;
498 	resp_buf = mp_req->resp_buf;
499 
500 	fp = fc_frame_alloc(lport, resp_len);
501 	if (!fp) {
502 		QEDF_ERR(&(qedf->dbg_ctx),
503 		    "fc_frame_alloc failure.\n");
504 		goto out_put;
505 	}
506 
507 	/* Copy frame header from firmware into fp */
508 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
509 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
510 
511 	/* Copy payload from firmware into fp */
512 	fc_payload = fc_frame_payload_get(fp, resp_len);
513 	memcpy(fc_payload, resp_buf, resp_len);
514 
515 	opcode = fc_frame_payload_op(fp);
516 	switch (opcode) {
517 	case ELS_LS_ACC:
518 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
519 		    "SRR success.\n");
520 		break;
521 	case ELS_LS_RJT:
522 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
523 		    "SRR rejected.\n");
524 		qedf_initiate_abts(orig_io_req, true);
525 		break;
526 	}
527 
528 	fc_frame_free(fp);
529 out_put:
530 	/* Put reference for original command since SRR completed */
531 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
532 out_free:
533 	kfree(cb_arg);
534 }
535 
536 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
537 {
538 	struct fcp_srr srr;
539 	struct qedf_ctx *qedf;
540 	struct qedf_rport *fcport;
541 	struct fc_lport *lport;
542 	struct qedf_els_cb_arg *cb_arg = NULL;
543 	u32 sid, r_a_tov;
544 	int rc;
545 
546 	if (!orig_io_req) {
547 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
548 		return -EINVAL;
549 	}
550 
551 	fcport = orig_io_req->fcport;
552 
553 	/* Check that fcport is still offloaded */
554 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
555 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
556 		return -EINVAL;
557 	}
558 
559 	if (!fcport->qedf) {
560 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
561 		return -EINVAL;
562 	}
563 
564 	/* Take reference until SRR command completion */
565 	kref_get(&orig_io_req->refcount);
566 
567 	qedf = fcport->qedf;
568 	lport = qedf->lport;
569 	sid = fcport->sid;
570 	r_a_tov = lport->r_a_tov;
571 
572 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
573 		   "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
574 	memset(&srr, 0, sizeof(srr));
575 
576 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
577 	if (!cb_arg) {
578 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
579 			  "SRR\n");
580 		rc = -ENOMEM;
581 		goto srr_err;
582 	}
583 
584 	cb_arg->aborted_io_req = orig_io_req;
585 
586 	srr.srr_op = ELS_SRR;
587 	srr.srr_ox_id = htons(orig_io_req->xid);
588 	srr.srr_rx_id = htons(orig_io_req->rx_id);
589 	srr.srr_rel_off = htonl(offset);
590 	srr.srr_r_ctl = r_ctl;
591 
592 	rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
593 	    qedf_srr_compl, cb_arg, r_a_tov);
594 
595 srr_err:
596 	if (rc) {
597 		QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
598 			  "=0x%x\n", orig_io_req->xid);
599 		kfree(cb_arg);
600 		/* If we fail to queue SRR, send ABTS to orig_io */
601 		qedf_initiate_abts(orig_io_req, true);
602 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
603 	} else
604 		/* Tell other threads that SRR is in progress */
605 		set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
606 
607 	return rc;
608 }
609 
610 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
611 	u32 offset, u8 r_ctl)
612 {
613 	struct qedf_rport *fcport;
614 	unsigned long flags;
615 	struct qedf_els_cb_arg *cb_arg;
616 	struct fcoe_wqe *sqe;
617 	u16 sqe_idx;
618 
619 	fcport = orig_io_req->fcport;
620 
621 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
622 	    "Doing sequence cleanup for xid=0x%x offset=%u.\n",
623 	    orig_io_req->xid, offset);
624 
625 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
626 	if (!cb_arg) {
627 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
628 			  "for sequence cleanup\n");
629 		return;
630 	}
631 
632 	/* Get reference for cleanup request */
633 	kref_get(&orig_io_req->refcount);
634 
635 	orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
636 	cb_arg->offset = offset;
637 	cb_arg->r_ctl = r_ctl;
638 	orig_io_req->cb_arg = cb_arg;
639 
640 	qedf_cmd_timer_set(fcport->qedf, orig_io_req,
641 	    QEDF_CLEANUP_TIMEOUT * HZ);
642 
643 	spin_lock_irqsave(&fcport->rport_lock, flags);
644 
645 	sqe_idx = qedf_get_sqe_idx(fcport);
646 	sqe = &fcport->sq[sqe_idx];
647 	memset(sqe, 0, sizeof(struct fcoe_wqe));
648 	orig_io_req->task_params->sqe = sqe;
649 
650 	init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
651 						   offset);
652 	qedf_ring_doorbell(fcport);
653 
654 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
655 }
656 
657 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
658 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
659 {
660 	int rc;
661 	struct qedf_els_cb_arg *cb_arg;
662 
663 	cb_arg = io_req->cb_arg;
664 
665 	/* If we timed out just free resources */
666 	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
667 		goto free;
668 
669 	/* Kill the timer we put on the request */
670 	cancel_delayed_work_sync(&io_req->timeout_work);
671 
672 	rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
673 	if (rc)
674 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
675 		    "abort, xid=0x%x.\n", io_req->xid);
676 free:
677 	kfree(cb_arg);
678 	kref_put(&io_req->refcount, qedf_release_cmd);
679 }
680 
681 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
682 {
683 	struct qedf_rport *fcport;
684 	struct qedf_ioreq *new_io_req;
685 	unsigned long flags;
686 	bool rc = false;
687 
688 	fcport = orig_io_req->fcport;
689 	if (!fcport) {
690 		QEDF_ERR(NULL, "fcport is NULL.\n");
691 		goto out;
692 	}
693 
694 	if (!orig_io_req->sc_cmd) {
695 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
696 		    "xid=0x%x.\n", orig_io_req->xid);
697 		goto out;
698 	}
699 
700 	new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
701 	if (!new_io_req) {
702 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
703 		    "io_req.\n");
704 		goto out;
705 	}
706 
707 	new_io_req->sc_cmd = orig_io_req->sc_cmd;
708 
709 	/*
710 	 * This keeps the sc_cmd struct from being returned to the tape
711 	 * driver and being requeued twice. We do need to put a reference
712 	 * for the original I/O request since we will not do a SCSI completion
713 	 * for it.
714 	 */
715 	orig_io_req->sc_cmd = NULL;
716 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
717 
718 	spin_lock_irqsave(&fcport->rport_lock, flags);
719 
720 	/* kref for new command released in qedf_post_io_req on error */
721 	if (qedf_post_io_req(fcport, new_io_req)) {
722 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
723 		/* Return SQE to pool */
724 		atomic_inc(&fcport->free_sqes);
725 	} else {
726 		QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
727 		    "Reissued SCSI command from  orig_xid=0x%x on "
728 		    "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
729 		/*
730 		 * Abort the original I/O but do not return SCSI command as
731 		 * it has been reissued on another OX_ID.
732 		 */
733 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
734 		qedf_initiate_abts(orig_io_req, false);
735 		goto out;
736 	}
737 
738 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
739 out:
740 	return rc;
741 }
742 
743 
744 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
745 {
746 	struct qedf_ioreq *orig_io_req;
747 	struct qedf_ioreq *rec_req;
748 	struct qedf_mp_req *mp_req;
749 	struct fc_frame_header *mp_fc_hdr, *fh;
750 	struct fc_frame *fp;
751 	void *resp_buf, *fc_payload;
752 	u32 resp_len;
753 	struct fc_lport *lport;
754 	struct qedf_ctx *qedf;
755 	int refcount;
756 	enum fc_rctl r_ctl;
757 	struct fc_els_ls_rjt *rjt;
758 	struct fc_els_rec_acc *acc;
759 	u8 opcode;
760 	u32 offset, e_stat;
761 	struct scsi_cmnd *sc_cmd;
762 	bool srr_needed = false;
763 
764 	rec_req = cb_arg->io_req;
765 	qedf = rec_req->fcport->qedf;
766 	lport = qedf->lport;
767 
768 	orig_io_req = cb_arg->aborted_io_req;
769 
770 	if (!orig_io_req)
771 		goto out_free;
772 
773 	if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
774 	    rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
775 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
776 
777 	refcount = kref_read(&orig_io_req->refcount);
778 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
779 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
780 		   orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
781 
782 	/* If a REC times out, free resources */
783 	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
784 		goto out_put;
785 
786 	/* Normalize response data into struct fc_frame */
787 	mp_req = &(rec_req->mp_req);
788 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
789 	resp_len = mp_req->resp_len;
790 	acc = resp_buf = mp_req->resp_buf;
791 
792 	fp = fc_frame_alloc(lport, resp_len);
793 	if (!fp) {
794 		QEDF_ERR(&(qedf->dbg_ctx),
795 		    "fc_frame_alloc failure.\n");
796 		goto out_put;
797 	}
798 
799 	/* Copy frame header from firmware into fp */
800 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
801 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
802 
803 	/* Copy payload from firmware into fp */
804 	fc_payload = fc_frame_payload_get(fp, resp_len);
805 	memcpy(fc_payload, resp_buf, resp_len);
806 
807 	opcode = fc_frame_payload_op(fp);
808 	if (opcode == ELS_LS_RJT) {
809 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
810 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
811 		    "Received LS_RJT for REC: er_reason=0x%x, "
812 		    "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
813 		/*
814 		 * The following response(s) mean that we need to reissue the
815 		 * request on another exchange.  We need to do this without
816 		 * informing the upper layers lest it cause an application
817 		 * error.
818 		 */
819 		if ((rjt->er_reason == ELS_RJT_LOGIC ||
820 		    rjt->er_reason == ELS_RJT_UNAB) &&
821 		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
822 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
823 			    "Handle CMD LOST case.\n");
824 			qedf_requeue_io_req(orig_io_req);
825 		}
826 	} else if (opcode == ELS_LS_ACC) {
827 		offset = ntohl(acc->reca_fc4value);
828 		e_stat = ntohl(acc->reca_e_stat);
829 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
830 		    "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
831 		    offset, e_stat);
832 		if (e_stat & ESB_ST_SEQ_INIT)  {
833 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
834 			    "Target has the seq init\n");
835 			goto out_free_frame;
836 		}
837 		sc_cmd = orig_io_req->sc_cmd;
838 		if (!sc_cmd) {
839 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
840 			    "sc_cmd is NULL for xid=0x%x.\n",
841 			    orig_io_req->xid);
842 			goto out_free_frame;
843 		}
844 		/* SCSI write case */
845 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
846 			if (offset == orig_io_req->data_xfer_len) {
847 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
848 				    "WRITE - response lost.\n");
849 				r_ctl = FC_RCTL_DD_CMD_STATUS;
850 				srr_needed = true;
851 				offset = 0;
852 			} else {
853 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
854 				    "WRITE - XFER_RDY/DATA lost.\n");
855 				r_ctl = FC_RCTL_DD_DATA_DESC;
856 				/* Use data from warning CQE instead of REC */
857 				offset = orig_io_req->tx_buf_off;
858 			}
859 		/* SCSI read case */
860 		} else {
861 			if (orig_io_req->rx_buf_off ==
862 			    orig_io_req->data_xfer_len) {
863 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
864 				    "READ - response lost.\n");
865 				srr_needed = true;
866 				r_ctl = FC_RCTL_DD_CMD_STATUS;
867 				offset = 0;
868 			} else {
869 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
870 				    "READ - DATA lost.\n");
871 				/*
872 				 * For read case we always set the offset to 0
873 				 * for sequence recovery task.
874 				 */
875 				offset = 0;
876 				r_ctl = FC_RCTL_DD_SOL_DATA;
877 			}
878 		}
879 
880 		if (srr_needed)
881 			qedf_send_srr(orig_io_req, offset, r_ctl);
882 		else
883 			qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
884 	}
885 
886 out_free_frame:
887 	fc_frame_free(fp);
888 out_put:
889 	/* Put reference for original command since REC completed */
890 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
891 out_free:
892 	kfree(cb_arg);
893 }
894 
895 /* Assumes kref is already held by caller */
896 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
897 {
898 
899 	struct fc_els_rec rec;
900 	struct qedf_rport *fcport;
901 	struct fc_lport *lport;
902 	struct qedf_els_cb_arg *cb_arg = NULL;
903 	struct qedf_ctx *qedf;
904 	uint32_t sid;
905 	uint32_t r_a_tov;
906 	int rc;
907 
908 	if (!orig_io_req) {
909 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
910 		return -EINVAL;
911 	}
912 
913 	fcport = orig_io_req->fcport;
914 
915 	/* Check that fcport is still offloaded */
916 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
917 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
918 		return -EINVAL;
919 	}
920 
921 	if (!fcport->qedf) {
922 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
923 		return -EINVAL;
924 	}
925 
926 	/* Take reference until REC command completion */
927 	kref_get(&orig_io_req->refcount);
928 
929 	qedf = fcport->qedf;
930 	lport = qedf->lport;
931 	sid = fcport->sid;
932 	r_a_tov = lport->r_a_tov;
933 
934 	memset(&rec, 0, sizeof(rec));
935 
936 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
937 	if (!cb_arg) {
938 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
939 			  "REC\n");
940 		rc = -ENOMEM;
941 		goto rec_err;
942 	}
943 
944 	cb_arg->aborted_io_req = orig_io_req;
945 
946 	rec.rec_cmd = ELS_REC;
947 	hton24(rec.rec_s_id, sid);
948 	rec.rec_ox_id = htons(orig_io_req->xid);
949 	rec.rec_rx_id =
950 	    htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
951 
952 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
953 	   "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
954 	   orig_io_req->xid, rec.rec_rx_id);
955 	rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
956 	    qedf_rec_compl, cb_arg, r_a_tov);
957 
958 rec_err:
959 	if (rc) {
960 		QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
961 			  "=0x%x\n", orig_io_req->xid);
962 		kfree(cb_arg);
963 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
964 	}
965 	return rc;
966 }
967