xref: /openbmc/linux/drivers/scsi/qedf/qedf_els.c (revision e3d786a3)
1 /*
2  *  QLogic FCoE Offload Driver
3  *  Copyright (c) 2016-2018 Cavium Inc.
4  *
5  *  This software is available under the terms of the GNU General Public License
6  *  (GPL) Version 2, available from the file COPYING in the main directory of
7  *  this source tree.
8  */
9 #include "qedf.h"
10 
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 	void *data, uint32_t data_len,
14 	void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 	struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
16 {
17 	struct qedf_ctx *qedf;
18 	struct fc_lport *lport;
19 	struct qedf_ioreq *els_req;
20 	struct qedf_mp_req *mp_req;
21 	struct fc_frame_header *fc_hdr;
22 	struct e4_fcoe_task_context *task;
23 	int rc = 0;
24 	uint32_t did, sid;
25 	uint16_t xid;
26 	uint32_t start_time = jiffies / HZ;
27 	uint32_t current_time;
28 	struct fcoe_wqe *sqe;
29 	unsigned long flags;
30 	u16 sqe_idx;
31 
32 	if (!fcport) {
33 		QEDF_ERR(NULL, "fcport is NULL");
34 		rc = -EINVAL;
35 		goto els_err;
36 	}
37 
38 	qedf = fcport->qedf;
39 	lport = qedf->lport;
40 
41 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
42 
43 	rc = fc_remote_port_chkready(fcport->rport);
44 	if (rc) {
45 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
46 		rc = -EAGAIN;
47 		goto els_err;
48 	}
49 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
50 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
51 			  op);
52 		rc = -EAGAIN;
53 		goto els_err;
54 	}
55 
56 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
57 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
58 		rc = -EINVAL;
59 		goto els_err;
60 	}
61 
62 retry_els:
63 	els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
64 	if (!els_req) {
65 		current_time = jiffies / HZ;
66 		if ((current_time - start_time) > 10) {
67 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
68 				   "els: Failed els 0x%x\n", op);
69 			rc = -ENOMEM;
70 			goto els_err;
71 		}
72 		mdelay(20 * USEC_PER_MSEC);
73 		goto retry_els;
74 	}
75 
76 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
77 		   "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
78 		   els_req->xid);
79 	els_req->sc_cmd = NULL;
80 	els_req->cmd_type = QEDF_ELS;
81 	els_req->fcport = fcport;
82 	els_req->cb_func = cb_func;
83 	cb_arg->io_req = els_req;
84 	cb_arg->op = op;
85 	els_req->cb_arg = cb_arg;
86 	els_req->data_xfer_len = data_len;
87 
88 	/* Record which cpu this request is associated with */
89 	els_req->cpu = smp_processor_id();
90 
91 	mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
92 	rc = qedf_init_mp_req(els_req);
93 	if (rc) {
94 		QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
95 		kref_put(&els_req->refcount, qedf_release_cmd);
96 		goto els_err;
97 	} else {
98 		rc = 0;
99 	}
100 
101 	/* Fill ELS Payload */
102 	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
103 		memcpy(mp_req->req_buf, data, data_len);
104 	} else {
105 		QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
106 		els_req->cb_func = NULL;
107 		els_req->cb_arg = NULL;
108 		kref_put(&els_req->refcount, qedf_release_cmd);
109 		rc = -EINVAL;
110 	}
111 
112 	if (rc)
113 		goto els_err;
114 
115 	/* Fill FC header */
116 	fc_hdr = &(mp_req->req_fc_hdr);
117 
118 	did = fcport->rdata->ids.port_id;
119 	sid = fcport->sid;
120 
121 	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
122 			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
123 			   FC_FC_SEQ_INIT, 0);
124 
125 	/* Obtain exchange id */
126 	xid = els_req->xid;
127 
128 	spin_lock_irqsave(&fcport->rport_lock, flags);
129 
130 	sqe_idx = qedf_get_sqe_idx(fcport);
131 	sqe = &fcport->sq[sqe_idx];
132 	memset(sqe, 0, sizeof(struct fcoe_wqe));
133 
134 	/* Initialize task context for this IO request */
135 	task = qedf_get_task_mem(&qedf->tasks, xid);
136 	qedf_init_mp_task(els_req, task, sqe);
137 
138 	/* Put timer on original I/O request */
139 	if (timer_msec)
140 		qedf_cmd_timer_set(qedf, els_req, timer_msec);
141 
142 	/* Ring doorbell */
143 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
144 		   "req\n");
145 	qedf_ring_doorbell(fcport);
146 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
147 els_err:
148 	return rc;
149 }
150 
151 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
152 	struct qedf_ioreq *els_req)
153 {
154 	struct fcoe_task_context *task_ctx;
155 	struct scsi_cmnd *sc_cmd;
156 	uint16_t xid;
157 	struct fcoe_cqe_midpath_info *mp_info;
158 
159 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
160 		   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
161 
162 	/* Kill the ELS timer */
163 	cancel_delayed_work(&els_req->timeout_work);
164 
165 	xid = els_req->xid;
166 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
167 	sc_cmd = els_req->sc_cmd;
168 
169 	/* Get ELS response length from CQE */
170 	mp_info = &cqe->cqe_info.midpath_info;
171 	els_req->mp_req.resp_len = mp_info->data_placement_size;
172 
173 	/* Parse ELS response */
174 	if ((els_req->cb_func) && (els_req->cb_arg)) {
175 		els_req->cb_func(els_req->cb_arg);
176 		els_req->cb_arg = NULL;
177 	}
178 
179 	kref_put(&els_req->refcount, qedf_release_cmd);
180 }
181 
182 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
183 {
184 	struct qedf_ioreq *orig_io_req;
185 	struct qedf_ioreq *rrq_req;
186 	struct qedf_ctx *qedf;
187 	int refcount;
188 
189 	rrq_req = cb_arg->io_req;
190 	qedf = rrq_req->fcport->qedf;
191 
192 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
193 
194 	orig_io_req = cb_arg->aborted_io_req;
195 
196 	if (!orig_io_req)
197 		goto out_free;
198 
199 	if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
200 	    rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
201 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
202 
203 	refcount = kref_read(&orig_io_req->refcount);
204 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
205 		   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
206 		   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
207 
208 	/* This should return the aborted io_req to the command pool */
209 	if (orig_io_req)
210 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
211 
212 out_free:
213 	/*
214 	 * Release a reference to the rrq request if we timed out as the
215 	 * rrq completion handler is called directly from the timeout handler
216 	 * and not from els_compl where the reference would have normally been
217 	 * released.
218 	 */
219 	if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
220 		kref_put(&rrq_req->refcount, qedf_release_cmd);
221 	kfree(cb_arg);
222 }
223 
224 /* Assumes kref is already held by caller */
225 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
226 {
227 
228 	struct fc_els_rrq rrq;
229 	struct qedf_rport *fcport;
230 	struct fc_lport *lport;
231 	struct qedf_els_cb_arg *cb_arg = NULL;
232 	struct qedf_ctx *qedf;
233 	uint32_t sid;
234 	uint32_t r_a_tov;
235 	int rc;
236 
237 	if (!aborted_io_req) {
238 		QEDF_ERR(NULL, "abort_io_req is NULL.\n");
239 		return -EINVAL;
240 	}
241 
242 	fcport = aborted_io_req->fcport;
243 
244 	/* Check that fcport is still offloaded */
245 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
246 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
247 		return -EINVAL;
248 	}
249 
250 	if (!fcport->qedf) {
251 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
252 		return -EINVAL;
253 	}
254 
255 	qedf = fcport->qedf;
256 	lport = qedf->lport;
257 	sid = fcport->sid;
258 	r_a_tov = lport->r_a_tov;
259 
260 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
261 		   "io = %p, orig_xid = 0x%x\n", aborted_io_req,
262 		   aborted_io_req->xid);
263 	memset(&rrq, 0, sizeof(rrq));
264 
265 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
266 	if (!cb_arg) {
267 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
268 			  "RRQ\n");
269 		rc = -ENOMEM;
270 		goto rrq_err;
271 	}
272 
273 	cb_arg->aborted_io_req = aborted_io_req;
274 
275 	rrq.rrq_cmd = ELS_RRQ;
276 	hton24(rrq.rrq_s_id, sid);
277 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
278 	rrq.rrq_rx_id =
279 	    htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
280 
281 	rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
282 	    qedf_rrq_compl, cb_arg, r_a_tov);
283 
284 rrq_err:
285 	if (rc) {
286 		QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
287 			  "req 0x%x\n", aborted_io_req->xid);
288 		kfree(cb_arg);
289 		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
290 	}
291 	return rc;
292 }
293 
294 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
295 					struct fc_frame *fp,
296 					u16 l2_oxid)
297 {
298 	struct fc_lport *lport = fcport->qedf->lport;
299 	struct fc_frame_header *fh;
300 	u32 crc;
301 
302 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
303 
304 	/* Set the OXID we return to what libfc used */
305 	if (l2_oxid != FC_XID_UNKNOWN)
306 		fh->fh_ox_id = htons(l2_oxid);
307 
308 	/* Setup header fields */
309 	fh->fh_r_ctl = FC_RCTL_ELS_REP;
310 	fh->fh_type = FC_TYPE_ELS;
311 	/* Last sequence, end sequence */
312 	fh->fh_f_ctl[0] = 0x98;
313 	hton24(fh->fh_d_id, lport->port_id);
314 	hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
315 	fh->fh_rx_id = 0xffff;
316 
317 	/* Set frame attributes */
318 	crc = fcoe_fc_crc(fp);
319 	fc_frame_init(fp);
320 	fr_dev(fp) = lport;
321 	fr_sof(fp) = FC_SOF_I3;
322 	fr_eof(fp) = FC_EOF_T;
323 	fr_crc(fp) = cpu_to_le32(~crc);
324 
325 	/* Send completed request to libfc */
326 	fc_exch_recv(lport, fp);
327 }
328 
329 /*
330  * In instances where an ELS command times out we may need to restart the
331  * rport by logging out and then logging back in.
332  */
333 void qedf_restart_rport(struct qedf_rport *fcport)
334 {
335 	struct fc_lport *lport;
336 	struct fc_rport_priv *rdata;
337 	u32 port_id;
338 
339 	if (!fcport)
340 		return;
341 
342 	if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
343 	    !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
344 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
345 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
346 		    fcport);
347 		return;
348 	}
349 
350 	/* Set that we are now in reset */
351 	set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
352 
353 	rdata = fcport->rdata;
354 	if (rdata) {
355 		lport = fcport->qedf->lport;
356 		port_id = rdata->ids.port_id;
357 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
358 		    "LOGO port_id=%x.\n", port_id);
359 		fc_rport_logoff(rdata);
360 		/* Recreate the rport and log back in */
361 		rdata = fc_rport_create(lport, port_id);
362 		if (rdata)
363 			fc_rport_login(rdata);
364 	}
365 	clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
366 }
367 
368 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
369 {
370 	struct qedf_ioreq *els_req;
371 	struct qedf_rport *fcport;
372 	struct qedf_mp_req *mp_req;
373 	struct fc_frame *fp;
374 	struct fc_frame_header *fh, *mp_fc_hdr;
375 	void *resp_buf, *fc_payload;
376 	u32 resp_len;
377 	u16 l2_oxid;
378 
379 	l2_oxid = cb_arg->l2_oxid;
380 	els_req = cb_arg->io_req;
381 
382 	if (!els_req) {
383 		QEDF_ERR(NULL, "els_req is NULL.\n");
384 		goto free_arg;
385 	}
386 
387 	/*
388 	 * If we are flushing the command just free the cb_arg as none of the
389 	 * response data will be valid.
390 	 */
391 	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
392 		goto free_arg;
393 
394 	fcport = els_req->fcport;
395 	mp_req = &(els_req->mp_req);
396 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
397 	resp_len = mp_req->resp_len;
398 	resp_buf = mp_req->resp_buf;
399 
400 	/*
401 	 * If a middle path ELS command times out, don't try to return
402 	 * the command but rather do any internal cleanup and then libfc
403 	 * timeout the command and clean up its internal resources.
404 	 */
405 	if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
406 		/*
407 		 * If ADISC times out, libfc will timeout the exchange and then
408 		 * try to send a PLOGI which will timeout since the session is
409 		 * still offloaded.  Force libfc to logout the session which
410 		 * will offload the connection and allow the PLOGI response to
411 		 * flow over the LL2 path.
412 		 */
413 		if (cb_arg->op == ELS_ADISC)
414 			qedf_restart_rport(fcport);
415 		return;
416 	}
417 
418 	if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
419 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
420 		   "beyond page size.\n");
421 		goto free_arg;
422 	}
423 
424 	fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
425 	if (!fp) {
426 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
427 		    "fc_frame_alloc failure.\n");
428 		return;
429 	}
430 
431 	/* Copy frame header from firmware into fp */
432 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
433 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
434 
435 	/* Copy payload from firmware into fp */
436 	fc_payload = fc_frame_payload_get(fp, resp_len);
437 	memcpy(fc_payload, resp_buf, resp_len);
438 
439 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
440 	    "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
441 	qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
442 
443 free_arg:
444 	kfree(cb_arg);
445 }
446 
447 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
448 {
449 	struct fc_els_adisc *adisc;
450 	struct fc_frame_header *fh;
451 	struct fc_lport *lport = fcport->qedf->lport;
452 	struct qedf_els_cb_arg *cb_arg = NULL;
453 	struct qedf_ctx *qedf;
454 	uint32_t r_a_tov = lport->r_a_tov;
455 	int rc;
456 
457 	qedf = fcport->qedf;
458 	fh = fc_frame_header_get(fp);
459 
460 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
461 	if (!cb_arg) {
462 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
463 			  "ADISC\n");
464 		rc = -ENOMEM;
465 		goto adisc_err;
466 	}
467 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
468 
469 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
470 	    "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
471 
472 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
473 
474 	rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
475 	    qedf_l2_els_compl, cb_arg, r_a_tov);
476 
477 adisc_err:
478 	if (rc) {
479 		QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
480 		kfree(cb_arg);
481 	}
482 	return rc;
483 }
484 
485 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
486 {
487 	struct qedf_ioreq *orig_io_req;
488 	struct qedf_ioreq *srr_req;
489 	struct qedf_mp_req *mp_req;
490 	struct fc_frame_header *mp_fc_hdr, *fh;
491 	struct fc_frame *fp;
492 	void *resp_buf, *fc_payload;
493 	u32 resp_len;
494 	struct fc_lport *lport;
495 	struct qedf_ctx *qedf;
496 	int refcount;
497 	u8 opcode;
498 
499 	srr_req = cb_arg->io_req;
500 	qedf = srr_req->fcport->qedf;
501 	lport = qedf->lport;
502 
503 	orig_io_req = cb_arg->aborted_io_req;
504 
505 	if (!orig_io_req)
506 		goto out_free;
507 
508 	clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
509 
510 	if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
511 	    srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
512 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
513 
514 	refcount = kref_read(&orig_io_req->refcount);
515 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
516 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
517 		   orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
518 
519 	/* If a SRR times out, simply free resources */
520 	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
521 		goto out_put;
522 
523 	/* Normalize response data into struct fc_frame */
524 	mp_req = &(srr_req->mp_req);
525 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
526 	resp_len = mp_req->resp_len;
527 	resp_buf = mp_req->resp_buf;
528 
529 	fp = fc_frame_alloc(lport, resp_len);
530 	if (!fp) {
531 		QEDF_ERR(&(qedf->dbg_ctx),
532 		    "fc_frame_alloc failure.\n");
533 		goto out_put;
534 	}
535 
536 	/* Copy frame header from firmware into fp */
537 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
538 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
539 
540 	/* Copy payload from firmware into fp */
541 	fc_payload = fc_frame_payload_get(fp, resp_len);
542 	memcpy(fc_payload, resp_buf, resp_len);
543 
544 	opcode = fc_frame_payload_op(fp);
545 	switch (opcode) {
546 	case ELS_LS_ACC:
547 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
548 		    "SRR success.\n");
549 		break;
550 	case ELS_LS_RJT:
551 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
552 		    "SRR rejected.\n");
553 		qedf_initiate_abts(orig_io_req, true);
554 		break;
555 	}
556 
557 	fc_frame_free(fp);
558 out_put:
559 	/* Put reference for original command since SRR completed */
560 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
561 out_free:
562 	kfree(cb_arg);
563 }
564 
565 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
566 {
567 	struct fcp_srr srr;
568 	struct qedf_ctx *qedf;
569 	struct qedf_rport *fcport;
570 	struct fc_lport *lport;
571 	struct qedf_els_cb_arg *cb_arg = NULL;
572 	u32 sid, r_a_tov;
573 	int rc;
574 
575 	if (!orig_io_req) {
576 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
577 		return -EINVAL;
578 	}
579 
580 	fcport = orig_io_req->fcport;
581 
582 	/* Check that fcport is still offloaded */
583 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
584 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
585 		return -EINVAL;
586 	}
587 
588 	if (!fcport->qedf) {
589 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
590 		return -EINVAL;
591 	}
592 
593 	/* Take reference until SRR command completion */
594 	kref_get(&orig_io_req->refcount);
595 
596 	qedf = fcport->qedf;
597 	lport = qedf->lport;
598 	sid = fcport->sid;
599 	r_a_tov = lport->r_a_tov;
600 
601 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
602 		   "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
603 	memset(&srr, 0, sizeof(srr));
604 
605 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
606 	if (!cb_arg) {
607 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
608 			  "SRR\n");
609 		rc = -ENOMEM;
610 		goto srr_err;
611 	}
612 
613 	cb_arg->aborted_io_req = orig_io_req;
614 
615 	srr.srr_op = ELS_SRR;
616 	srr.srr_ox_id = htons(orig_io_req->xid);
617 	srr.srr_rx_id = htons(orig_io_req->rx_id);
618 	srr.srr_rel_off = htonl(offset);
619 	srr.srr_r_ctl = r_ctl;
620 
621 	rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
622 	    qedf_srr_compl, cb_arg, r_a_tov);
623 
624 srr_err:
625 	if (rc) {
626 		QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
627 			  "=0x%x\n", orig_io_req->xid);
628 		kfree(cb_arg);
629 		/* If we fail to queue SRR, send ABTS to orig_io */
630 		qedf_initiate_abts(orig_io_req, true);
631 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
632 	} else
633 		/* Tell other threads that SRR is in progress */
634 		set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
635 
636 	return rc;
637 }
638 
639 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
640 	u32 offset, u8 r_ctl)
641 {
642 	struct qedf_rport *fcport;
643 	unsigned long flags;
644 	struct qedf_els_cb_arg *cb_arg;
645 	struct fcoe_wqe *sqe;
646 	u16 sqe_idx;
647 
648 	fcport = orig_io_req->fcport;
649 
650 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
651 	    "Doing sequence cleanup for xid=0x%x offset=%u.\n",
652 	    orig_io_req->xid, offset);
653 
654 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
655 	if (!cb_arg) {
656 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
657 			  "for sequence cleanup\n");
658 		return;
659 	}
660 
661 	/* Get reference for cleanup request */
662 	kref_get(&orig_io_req->refcount);
663 
664 	orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
665 	cb_arg->offset = offset;
666 	cb_arg->r_ctl = r_ctl;
667 	orig_io_req->cb_arg = cb_arg;
668 
669 	qedf_cmd_timer_set(fcport->qedf, orig_io_req,
670 	    QEDF_CLEANUP_TIMEOUT * HZ);
671 
672 	spin_lock_irqsave(&fcport->rport_lock, flags);
673 
674 	sqe_idx = qedf_get_sqe_idx(fcport);
675 	sqe = &fcport->sq[sqe_idx];
676 	memset(sqe, 0, sizeof(struct fcoe_wqe));
677 	orig_io_req->task_params->sqe = sqe;
678 
679 	init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
680 						   offset);
681 	qedf_ring_doorbell(fcport);
682 
683 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
684 }
685 
686 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
687 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
688 {
689 	int rc;
690 	struct qedf_els_cb_arg *cb_arg;
691 
692 	cb_arg = io_req->cb_arg;
693 
694 	/* If we timed out just free resources */
695 	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
696 		goto free;
697 
698 	/* Kill the timer we put on the request */
699 	cancel_delayed_work_sync(&io_req->timeout_work);
700 
701 	rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
702 	if (rc)
703 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
704 		    "abort, xid=0x%x.\n", io_req->xid);
705 free:
706 	kfree(cb_arg);
707 	kref_put(&io_req->refcount, qedf_release_cmd);
708 }
709 
710 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
711 {
712 	struct qedf_rport *fcport;
713 	struct qedf_ioreq *new_io_req;
714 	unsigned long flags;
715 	bool rc = false;
716 
717 	fcport = orig_io_req->fcport;
718 	if (!fcport) {
719 		QEDF_ERR(NULL, "fcport is NULL.\n");
720 		goto out;
721 	}
722 
723 	if (!orig_io_req->sc_cmd) {
724 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
725 		    "xid=0x%x.\n", orig_io_req->xid);
726 		goto out;
727 	}
728 
729 	new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
730 	if (!new_io_req) {
731 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
732 		    "io_req.\n");
733 		goto out;
734 	}
735 
736 	new_io_req->sc_cmd = orig_io_req->sc_cmd;
737 
738 	/*
739 	 * This keeps the sc_cmd struct from being returned to the tape
740 	 * driver and being requeued twice. We do need to put a reference
741 	 * for the original I/O request since we will not do a SCSI completion
742 	 * for it.
743 	 */
744 	orig_io_req->sc_cmd = NULL;
745 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
746 
747 	spin_lock_irqsave(&fcport->rport_lock, flags);
748 
749 	/* kref for new command released in qedf_post_io_req on error */
750 	if (qedf_post_io_req(fcport, new_io_req)) {
751 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
752 		/* Return SQE to pool */
753 		atomic_inc(&fcport->free_sqes);
754 	} else {
755 		QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
756 		    "Reissued SCSI command from  orig_xid=0x%x on "
757 		    "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
758 		/*
759 		 * Abort the original I/O but do not return SCSI command as
760 		 * it has been reissued on another OX_ID.
761 		 */
762 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
763 		qedf_initiate_abts(orig_io_req, false);
764 		goto out;
765 	}
766 
767 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
768 out:
769 	return rc;
770 }
771 
772 
773 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
774 {
775 	struct qedf_ioreq *orig_io_req;
776 	struct qedf_ioreq *rec_req;
777 	struct qedf_mp_req *mp_req;
778 	struct fc_frame_header *mp_fc_hdr, *fh;
779 	struct fc_frame *fp;
780 	void *resp_buf, *fc_payload;
781 	u32 resp_len;
782 	struct fc_lport *lport;
783 	struct qedf_ctx *qedf;
784 	int refcount;
785 	enum fc_rctl r_ctl;
786 	struct fc_els_ls_rjt *rjt;
787 	struct fc_els_rec_acc *acc;
788 	u8 opcode;
789 	u32 offset, e_stat;
790 	struct scsi_cmnd *sc_cmd;
791 	bool srr_needed = false;
792 
793 	rec_req = cb_arg->io_req;
794 	qedf = rec_req->fcport->qedf;
795 	lport = qedf->lport;
796 
797 	orig_io_req = cb_arg->aborted_io_req;
798 
799 	if (!orig_io_req)
800 		goto out_free;
801 
802 	if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
803 	    rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
804 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
805 
806 	refcount = kref_read(&orig_io_req->refcount);
807 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
808 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
809 		   orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
810 
811 	/* If a REC times out, free resources */
812 	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
813 		goto out_put;
814 
815 	/* Normalize response data into struct fc_frame */
816 	mp_req = &(rec_req->mp_req);
817 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
818 	resp_len = mp_req->resp_len;
819 	acc = resp_buf = mp_req->resp_buf;
820 
821 	fp = fc_frame_alloc(lport, resp_len);
822 	if (!fp) {
823 		QEDF_ERR(&(qedf->dbg_ctx),
824 		    "fc_frame_alloc failure.\n");
825 		goto out_put;
826 	}
827 
828 	/* Copy frame header from firmware into fp */
829 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
830 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
831 
832 	/* Copy payload from firmware into fp */
833 	fc_payload = fc_frame_payload_get(fp, resp_len);
834 	memcpy(fc_payload, resp_buf, resp_len);
835 
836 	opcode = fc_frame_payload_op(fp);
837 	if (opcode == ELS_LS_RJT) {
838 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
839 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
840 		    "Received LS_RJT for REC: er_reason=0x%x, "
841 		    "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
842 		/*
843 		 * The following response(s) mean that we need to reissue the
844 		 * request on another exchange.  We need to do this without
845 		 * informing the upper layers lest it cause an application
846 		 * error.
847 		 */
848 		if ((rjt->er_reason == ELS_RJT_LOGIC ||
849 		    rjt->er_reason == ELS_RJT_UNAB) &&
850 		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
851 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
852 			    "Handle CMD LOST case.\n");
853 			qedf_requeue_io_req(orig_io_req);
854 		}
855 	} else if (opcode == ELS_LS_ACC) {
856 		offset = ntohl(acc->reca_fc4value);
857 		e_stat = ntohl(acc->reca_e_stat);
858 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
859 		    "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
860 		    offset, e_stat);
861 		if (e_stat & ESB_ST_SEQ_INIT)  {
862 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
863 			    "Target has the seq init\n");
864 			goto out_free_frame;
865 		}
866 		sc_cmd = orig_io_req->sc_cmd;
867 		if (!sc_cmd) {
868 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
869 			    "sc_cmd is NULL for xid=0x%x.\n",
870 			    orig_io_req->xid);
871 			goto out_free_frame;
872 		}
873 		/* SCSI write case */
874 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
875 			if (offset == orig_io_req->data_xfer_len) {
876 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
877 				    "WRITE - response lost.\n");
878 				r_ctl = FC_RCTL_DD_CMD_STATUS;
879 				srr_needed = true;
880 				offset = 0;
881 			} else {
882 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
883 				    "WRITE - XFER_RDY/DATA lost.\n");
884 				r_ctl = FC_RCTL_DD_DATA_DESC;
885 				/* Use data from warning CQE instead of REC */
886 				offset = orig_io_req->tx_buf_off;
887 			}
888 		/* SCSI read case */
889 		} else {
890 			if (orig_io_req->rx_buf_off ==
891 			    orig_io_req->data_xfer_len) {
892 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
893 				    "READ - response lost.\n");
894 				srr_needed = true;
895 				r_ctl = FC_RCTL_DD_CMD_STATUS;
896 				offset = 0;
897 			} else {
898 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
899 				    "READ - DATA lost.\n");
900 				/*
901 				 * For read case we always set the offset to 0
902 				 * for sequence recovery task.
903 				 */
904 				offset = 0;
905 				r_ctl = FC_RCTL_DD_SOL_DATA;
906 			}
907 		}
908 
909 		if (srr_needed)
910 			qedf_send_srr(orig_io_req, offset, r_ctl);
911 		else
912 			qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
913 	}
914 
915 out_free_frame:
916 	fc_frame_free(fp);
917 out_put:
918 	/* Put reference for original command since REC completed */
919 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
920 out_free:
921 	kfree(cb_arg);
922 }
923 
924 /* Assumes kref is already held by caller */
925 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
926 {
927 
928 	struct fc_els_rec rec;
929 	struct qedf_rport *fcport;
930 	struct fc_lport *lport;
931 	struct qedf_els_cb_arg *cb_arg = NULL;
932 	struct qedf_ctx *qedf;
933 	uint32_t sid;
934 	uint32_t r_a_tov;
935 	int rc;
936 
937 	if (!orig_io_req) {
938 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
939 		return -EINVAL;
940 	}
941 
942 	fcport = orig_io_req->fcport;
943 
944 	/* Check that fcport is still offloaded */
945 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
946 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
947 		return -EINVAL;
948 	}
949 
950 	if (!fcport->qedf) {
951 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
952 		return -EINVAL;
953 	}
954 
955 	/* Take reference until REC command completion */
956 	kref_get(&orig_io_req->refcount);
957 
958 	qedf = fcport->qedf;
959 	lport = qedf->lport;
960 	sid = fcport->sid;
961 	r_a_tov = lport->r_a_tov;
962 
963 	memset(&rec, 0, sizeof(rec));
964 
965 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
966 	if (!cb_arg) {
967 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
968 			  "REC\n");
969 		rc = -ENOMEM;
970 		goto rec_err;
971 	}
972 
973 	cb_arg->aborted_io_req = orig_io_req;
974 
975 	rec.rec_cmd = ELS_REC;
976 	hton24(rec.rec_s_id, sid);
977 	rec.rec_ox_id = htons(orig_io_req->xid);
978 	rec.rec_rx_id =
979 	    htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
980 
981 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
982 	   "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
983 	   orig_io_req->xid, rec.rec_rx_id);
984 	rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
985 	    qedf_rec_compl, cb_arg, r_a_tov);
986 
987 rec_err:
988 	if (rc) {
989 		QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
990 			  "=0x%x\n", orig_io_req->xid);
991 		kfree(cb_arg);
992 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
993 	}
994 	return rc;
995 }
996