xref: /openbmc/linux/drivers/target/tcm_fc/tfc_cmd.c (revision 81d67439)
1 /*
2  * Copyright (c) 2010 Cisco Systems, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  */
17 
18 /* XXX TBD some includes may be extraneous */
19 
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/version.h>
23 #include <generated/utsrelease.h>
24 #include <linux/utsname.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/kthread.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/configfs.h>
31 #include <linux/ctype.h>
32 #include <linux/hash.h>
33 #include <asm/unaligned.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/libfc.h>
40 #include <scsi/fc_encode.h>
41 
42 #include <target/target_core_base.h>
43 #include <target/target_core_transport.h>
44 #include <target/target_core_fabric_ops.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_tpg.h>
47 #include <target/target_core_configfs.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_tmr.h>
50 #include <target/configfs_macros.h>
51 
52 #include "tcm_fc.h"
53 
54 /*
55  * Dump cmd state for debugging.
56  */
57 void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
58 {
59 	struct fc_exch *ep;
60 	struct fc_seq *sp;
61 	struct se_cmd *se_cmd;
62 	struct scatterlist *sg;
63 	int count;
64 
65 	se_cmd = &cmd->se_cmd;
66 	pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
67 		caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
68 	pr_debug("%s: cmd %p cdb %p\n",
69 		caller, cmd, cmd->cdb);
70 	pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
71 
72 	pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
73 		caller, cmd, se_cmd->t_data_nents,
74 	       se_cmd->data_length, se_cmd->se_cmd_flags);
75 
76 	for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
77 		pr_debug("%s: cmd %p sg %p page %p "
78 			"len 0x%x off 0x%x\n",
79 			caller, cmd, sg,
80 			sg_page(sg), sg->length, sg->offset);
81 
82 	sp = cmd->seq;
83 	if (sp) {
84 		ep = fc_seq_exch(sp);
85 		pr_debug("%s: cmd %p sid %x did %x "
86 			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
87 			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
88 			sp->id, ep->esb_stat);
89 	}
90 	print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
91 		16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
92 }
93 
94 static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
95 {
96 	struct ft_tpg *tpg = sess->tport->tpg;
97 	struct se_queue_obj *qobj = &tpg->qobj;
98 	unsigned long flags;
99 
100 	qobj = &sess->tport->tpg->qobj;
101 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
102 	list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
103 	atomic_inc(&qobj->queue_cnt);
104 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
105 
106 	wake_up_process(tpg->thread);
107 }
108 
109 static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
110 {
111 	unsigned long flags;
112 	struct se_queue_req *qr;
113 
114 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
115 	if (list_empty(&qobj->qobj_list)) {
116 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
117 		return NULL;
118 	}
119 	qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
120 	list_del(&qr->qr_list);
121 	atomic_dec(&qobj->queue_cnt);
122 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
123 	return container_of(qr, struct ft_cmd, se_req);
124 }
125 
126 static void ft_free_cmd(struct ft_cmd *cmd)
127 {
128 	struct fc_frame *fp;
129 	struct fc_lport *lport;
130 
131 	if (!cmd)
132 		return;
133 	fp = cmd->req_frame;
134 	lport = fr_dev(fp);
135 	if (fr_seq(fp))
136 		lport->tt.seq_release(fr_seq(fp));
137 	fc_frame_free(fp);
138 	ft_sess_put(cmd->sess);	/* undo get from lookup at recv */
139 	kfree(cmd);
140 }
141 
142 void ft_release_cmd(struct se_cmd *se_cmd)
143 {
144 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
145 
146 	ft_free_cmd(cmd);
147 }
148 
149 void ft_check_stop_free(struct se_cmd *se_cmd)
150 {
151 	transport_generic_free_cmd(se_cmd, 0, 0);
152 }
153 
154 /*
155  * Send response.
156  */
157 int ft_queue_status(struct se_cmd *se_cmd)
158 {
159 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
160 	struct fc_frame *fp;
161 	struct fcp_resp_with_ext *fcp;
162 	struct fc_lport *lport;
163 	struct fc_exch *ep;
164 	size_t len;
165 
166 	ft_dump_cmd(cmd, __func__);
167 	ep = fc_seq_exch(cmd->seq);
168 	lport = ep->lp;
169 	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
170 	fp = fc_frame_alloc(lport, len);
171 	if (!fp) {
172 		/* XXX shouldn't just drop it - requeue and retry? */
173 		return 0;
174 	}
175 	fcp = fc_frame_payload_get(fp, len);
176 	memset(fcp, 0, len);
177 	fcp->resp.fr_status = se_cmd->scsi_status;
178 
179 	len = se_cmd->scsi_sense_length;
180 	if (len) {
181 		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
182 		fcp->ext.fr_sns_len = htonl(len);
183 		memcpy((fcp + 1), se_cmd->sense_buffer, len);
184 	}
185 
186 	/*
187 	 * Test underflow and overflow with one mask.  Usually both are off.
188 	 * Bidirectional commands are not handled yet.
189 	 */
190 	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
191 		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
192 			fcp->resp.fr_flags |= FCP_RESID_OVER;
193 		else
194 			fcp->resp.fr_flags |= FCP_RESID_UNDER;
195 		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
196 	}
197 
198 	/*
199 	 * Send response.
200 	 */
201 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
202 	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
203 		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
204 
205 	lport->tt.seq_send(lport, cmd->seq, fp);
206 	lport->tt.exch_done(cmd->seq);
207 	return 0;
208 }
209 
210 int ft_write_pending_status(struct se_cmd *se_cmd)
211 {
212 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
213 
214 	return cmd->write_data_len != se_cmd->data_length;
215 }
216 
217 /*
218  * Send TX_RDY (transfer ready).
219  */
220 int ft_write_pending(struct se_cmd *se_cmd)
221 {
222 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
223 	struct fc_frame *fp;
224 	struct fcp_txrdy *txrdy;
225 	struct fc_lport *lport;
226 	struct fc_exch *ep;
227 	struct fc_frame_header *fh;
228 	u32 f_ctl;
229 
230 	ft_dump_cmd(cmd, __func__);
231 
232 	ep = fc_seq_exch(cmd->seq);
233 	lport = ep->lp;
234 	fp = fc_frame_alloc(lport, sizeof(*txrdy));
235 	if (!fp)
236 		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
237 
238 	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
239 	memset(txrdy, 0, sizeof(*txrdy));
240 	txrdy->ft_burst_len = htonl(se_cmd->data_length);
241 
242 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
243 	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
244 		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
245 
246 	fh = fc_frame_header_get(fp);
247 	f_ctl = ntoh24(fh->fh_f_ctl);
248 
249 	/* Only if it is 'Exchange Responder' */
250 	if (f_ctl & FC_FC_EX_CTX) {
251 		/* Target is 'exchange responder' and sending XFER_READY
252 		 * to 'exchange initiator (initiator)'
253 		 */
254 		if ((ep->xid <= lport->lro_xid) &&
255 		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
256 			if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
257 				/*
258 				 * cmd may have been broken up into multiple
259 				 * tasks. Link their sgs together so we can
260 				 * operate on them all at once.
261 				 */
262 				transport_do_task_sg_chain(se_cmd);
263 				cmd->sg = se_cmd->t_tasks_sg_chained;
264 				cmd->sg_cnt =
265 					se_cmd->t_tasks_sg_chained_no;
266 			}
267 			if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
268 							    cmd->sg,
269 							    cmd->sg_cnt))
270 				cmd->was_ddp_setup = 1;
271 		}
272 	}
273 	lport->tt.seq_send(lport, cmd->seq, fp);
274 	return 0;
275 }
276 
277 u32 ft_get_task_tag(struct se_cmd *se_cmd)
278 {
279 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
280 
281 	return fc_seq_exch(cmd->seq)->rxid;
282 }
283 
284 int ft_get_cmd_state(struct se_cmd *se_cmd)
285 {
286 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
287 
288 	return cmd->state;
289 }
290 
291 int ft_is_state_remove(struct se_cmd *se_cmd)
292 {
293 	return 0;	/* XXX TBD */
294 }
295 
296 /*
297  * FC sequence response handler for follow-on sequences (data) and aborts.
298  */
299 static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
300 {
301 	struct ft_cmd *cmd = arg;
302 	struct fc_frame_header *fh;
303 
304 	if (IS_ERR(fp)) {
305 		/* XXX need to find cmd if queued */
306 		cmd->se_cmd.t_state = TRANSPORT_REMOVE;
307 		cmd->seq = NULL;
308 		transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
309 		return;
310 	}
311 
312 	fh = fc_frame_header_get(fp);
313 
314 	switch (fh->fh_r_ctl) {
315 	case FC_RCTL_DD_SOL_DATA:	/* write data */
316 		ft_recv_write_data(cmd, fp);
317 		break;
318 	case FC_RCTL_DD_UNSOL_CTL:	/* command */
319 	case FC_RCTL_DD_SOL_CTL:	/* transfer ready */
320 	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
321 	default:
322 		pr_debug("%s: unhandled frame r_ctl %x\n",
323 		       __func__, fh->fh_r_ctl);
324 		fc_frame_free(fp);
325 		transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
326 		break;
327 	}
328 }
329 
330 /*
331  * Send a FCP response including SCSI status and optional FCP rsp_code.
332  * status is SAM_STAT_GOOD (zero) iff code is valid.
333  * This is used in error cases, such as allocation failures.
334  */
335 static void ft_send_resp_status(struct fc_lport *lport,
336 				const struct fc_frame *rx_fp,
337 				u32 status, enum fcp_resp_rsp_codes code)
338 {
339 	struct fc_frame *fp;
340 	struct fc_seq *sp;
341 	const struct fc_frame_header *fh;
342 	size_t len;
343 	struct fcp_resp_with_ext *fcp;
344 	struct fcp_resp_rsp_info *info;
345 
346 	fh = fc_frame_header_get(rx_fp);
347 	pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
348 		  ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
349 	len = sizeof(*fcp);
350 	if (status == SAM_STAT_GOOD)
351 		len += sizeof(*info);
352 	fp = fc_frame_alloc(lport, len);
353 	if (!fp)
354 		return;
355 	fcp = fc_frame_payload_get(fp, len);
356 	memset(fcp, 0, len);
357 	fcp->resp.fr_status = status;
358 	if (status == SAM_STAT_GOOD) {
359 		fcp->ext.fr_rsp_len = htonl(sizeof(*info));
360 		fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
361 		info = (struct fcp_resp_rsp_info *)(fcp + 1);
362 		info->rsp_code = code;
363 	}
364 
365 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
366 	sp = fr_seq(fp);
367 	if (sp)
368 		lport->tt.seq_send(lport, sp, fp);
369 	else
370 		lport->tt.frame_send(lport, fp);
371 }
372 
373 /*
374  * Send error or task management response.
375  */
376 static void ft_send_resp_code(struct ft_cmd *cmd,
377 			      enum fcp_resp_rsp_codes code)
378 {
379 	ft_send_resp_status(cmd->sess->tport->lport,
380 			    cmd->req_frame, SAM_STAT_GOOD, code);
381 }
382 
383 
384 /*
385  * Send error or task management response.
386  * Always frees the cmd and associated state.
387  */
388 static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
389 				      enum fcp_resp_rsp_codes code)
390 {
391 	ft_send_resp_code(cmd, code);
392 	ft_free_cmd(cmd);
393 }
394 
395 /*
396  * Handle Task Management Request.
397  */
398 static void ft_send_tm(struct ft_cmd *cmd)
399 {
400 	struct se_tmr_req *tmr;
401 	struct fcp_cmnd *fcp;
402 	struct ft_sess *sess;
403 	u8 tm_func;
404 
405 	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
406 
407 	switch (fcp->fc_tm_flags) {
408 	case FCP_TMF_LUN_RESET:
409 		tm_func = TMR_LUN_RESET;
410 		break;
411 	case FCP_TMF_TGT_RESET:
412 		tm_func = TMR_TARGET_WARM_RESET;
413 		break;
414 	case FCP_TMF_CLR_TASK_SET:
415 		tm_func = TMR_CLEAR_TASK_SET;
416 		break;
417 	case FCP_TMF_ABT_TASK_SET:
418 		tm_func = TMR_ABORT_TASK_SET;
419 		break;
420 	case FCP_TMF_CLR_ACA:
421 		tm_func = TMR_CLEAR_ACA;
422 		break;
423 	default:
424 		/*
425 		 * FCP4r01 indicates having a combination of
426 		 * tm_flags set is invalid.
427 		 */
428 		pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
429 		ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
430 		return;
431 	}
432 
433 	pr_debug("alloc tm cmd fn %d\n", tm_func);
434 	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
435 	if (!tmr) {
436 		pr_debug("alloc failed\n");
437 		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
438 		return;
439 	}
440 	cmd->se_cmd.se_tmr_req = tmr;
441 
442 	switch (fcp->fc_tm_flags) {
443 	case FCP_TMF_LUN_RESET:
444 		cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
445 		if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
446 			/*
447 			 * Make sure to clean up newly allocated TMR request
448 			 * since "unable to  handle TMR request because failed
449 			 * to get to LUN"
450 			 */
451 			pr_debug("Failed to get LUN for TMR func %d, "
452 				  "se_cmd %p, unpacked_lun %d\n",
453 				  tm_func, &cmd->se_cmd, cmd->lun);
454 			ft_dump_cmd(cmd, __func__);
455 			sess = cmd->sess;
456 			transport_send_check_condition_and_sense(&cmd->se_cmd,
457 				cmd->se_cmd.scsi_sense_reason, 0);
458 			transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
459 			ft_sess_put(sess);
460 			return;
461 		}
462 		break;
463 	case FCP_TMF_TGT_RESET:
464 	case FCP_TMF_CLR_TASK_SET:
465 	case FCP_TMF_ABT_TASK_SET:
466 	case FCP_TMF_CLR_ACA:
467 		break;
468 	default:
469 		return;
470 	}
471 	transport_generic_handle_tmr(&cmd->se_cmd);
472 }
473 
474 /*
475  * Send status from completed task management request.
476  */
477 int ft_queue_tm_resp(struct se_cmd *se_cmd)
478 {
479 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
480 	struct se_tmr_req *tmr = se_cmd->se_tmr_req;
481 	enum fcp_resp_rsp_codes code;
482 
483 	switch (tmr->response) {
484 	case TMR_FUNCTION_COMPLETE:
485 		code = FCP_TMF_CMPL;
486 		break;
487 	case TMR_LUN_DOES_NOT_EXIST:
488 		code = FCP_TMF_INVALID_LUN;
489 		break;
490 	case TMR_FUNCTION_REJECTED:
491 		code = FCP_TMF_REJECTED;
492 		break;
493 	case TMR_TASK_DOES_NOT_EXIST:
494 	case TMR_TASK_STILL_ALLEGIANT:
495 	case TMR_TASK_FAILOVER_NOT_SUPPORTED:
496 	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
497 	case TMR_FUNCTION_AUTHORIZATION_FAILED:
498 	default:
499 		code = FCP_TMF_FAILED;
500 		break;
501 	}
502 	pr_debug("tmr fn %d resp %d fcp code %d\n",
503 		  tmr->function, tmr->response, code);
504 	ft_send_resp_code(cmd, code);
505 	return 0;
506 }
507 
508 /*
509  * Handle incoming FCP command.
510  */
511 static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
512 {
513 	struct ft_cmd *cmd;
514 	struct fc_lport *lport = sess->tport->lport;
515 
516 	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
517 	if (!cmd)
518 		goto busy;
519 	cmd->sess = sess;
520 	cmd->seq = lport->tt.seq_assign(lport, fp);
521 	if (!cmd->seq) {
522 		kfree(cmd);
523 		goto busy;
524 	}
525 	cmd->req_frame = fp;		/* hold frame during cmd */
526 	ft_queue_cmd(sess, cmd);
527 	return;
528 
529 busy:
530 	pr_debug("cmd or seq allocation failure - sending BUSY\n");
531 	ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
532 	fc_frame_free(fp);
533 	ft_sess_put(sess);		/* undo get from lookup */
534 }
535 
536 
537 /*
538  * Handle incoming FCP frame.
539  * Caller has verified that the frame is type FCP.
540  */
541 void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
542 {
543 	struct fc_frame_header *fh = fc_frame_header_get(fp);
544 
545 	switch (fh->fh_r_ctl) {
546 	case FC_RCTL_DD_UNSOL_CMD:	/* command */
547 		ft_recv_cmd(sess, fp);
548 		break;
549 	case FC_RCTL_DD_SOL_DATA:	/* write data */
550 	case FC_RCTL_DD_UNSOL_CTL:
551 	case FC_RCTL_DD_SOL_CTL:
552 	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
553 	case FC_RCTL_ELS4_REQ:		/* SRR, perhaps */
554 	default:
555 		pr_debug("%s: unhandled frame r_ctl %x\n",
556 		       __func__, fh->fh_r_ctl);
557 		fc_frame_free(fp);
558 		ft_sess_put(sess);	/* undo get from lookup */
559 		break;
560 	}
561 }
562 
563 /*
564  * Send new command to target.
565  */
566 static void ft_send_cmd(struct ft_cmd *cmd)
567 {
568 	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
569 	struct se_cmd *se_cmd;
570 	struct fcp_cmnd *fcp;
571 	int data_dir;
572 	u32 data_len;
573 	int task_attr;
574 	int ret;
575 
576 	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
577 	if (!fcp)
578 		goto err;
579 
580 	if (fcp->fc_flags & FCP_CFL_LEN_MASK)
581 		goto err;		/* not handling longer CDBs yet */
582 
583 	if (fcp->fc_tm_flags) {
584 		task_attr = FCP_PTA_SIMPLE;
585 		data_dir = DMA_NONE;
586 		data_len = 0;
587 	} else {
588 		switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
589 		case 0:
590 			data_dir = DMA_NONE;
591 			break;
592 		case FCP_CFL_RDDATA:
593 			data_dir = DMA_FROM_DEVICE;
594 			break;
595 		case FCP_CFL_WRDATA:
596 			data_dir = DMA_TO_DEVICE;
597 			break;
598 		case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
599 			goto err;	/* TBD not supported by tcm_fc yet */
600 		}
601 		/*
602 		 * Locate the SAM Task Attr from fc_pri_ta
603 		 */
604 		switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
605 		case FCP_PTA_HEADQ:
606 			task_attr = MSG_HEAD_TAG;
607 			break;
608 		case FCP_PTA_ORDERED:
609 			task_attr = MSG_ORDERED_TAG;
610 			break;
611 		case FCP_PTA_ACA:
612 			task_attr = MSG_ACA_TAG;
613 			break;
614 		case FCP_PTA_SIMPLE: /* Fallthrough */
615 		default:
616 			task_attr = MSG_SIMPLE_TAG;
617 		}
618 
619 
620 		task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
621 		data_len = ntohl(fcp->fc_dl);
622 		cmd->cdb = fcp->fc_cdb;
623 	}
624 
625 	se_cmd = &cmd->se_cmd;
626 	/*
627 	 * Initialize struct se_cmd descriptor from target_core_mod
628 	 * infrastructure
629 	 */
630 	transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
631 			      data_len, data_dir, task_attr,
632 			      &cmd->ft_sense_buffer[0]);
633 	/*
634 	 * Check for FCP task management flags
635 	 */
636 	if (fcp->fc_tm_flags) {
637 		ft_send_tm(cmd);
638 		return;
639 	}
640 
641 	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
642 
643 	cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
644 	ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
645 	if (ret < 0) {
646 		ft_dump_cmd(cmd, __func__);
647 		transport_send_check_condition_and_sense(&cmd->se_cmd,
648 			cmd->se_cmd.scsi_sense_reason, 0);
649 		return;
650 	}
651 
652 	ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
653 
654 	pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
655 	ft_dump_cmd(cmd, __func__);
656 
657 	if (ret == -ENOMEM) {
658 		transport_send_check_condition_and_sense(se_cmd,
659 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
660 		transport_generic_free_cmd(se_cmd, 0, 0);
661 		return;
662 	}
663 	if (ret == -EINVAL) {
664 		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
665 			ft_queue_status(se_cmd);
666 		else
667 			transport_send_check_condition_and_sense(se_cmd,
668 					se_cmd->scsi_sense_reason, 0);
669 		transport_generic_free_cmd(se_cmd, 0, 0);
670 		return;
671 	}
672 	transport_generic_handle_cdb(se_cmd);
673 	return;
674 
675 err:
676 	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
677 }
678 
679 /*
680  * Handle request in the command thread.
681  */
682 static void ft_exec_req(struct ft_cmd *cmd)
683 {
684 	pr_debug("cmd state %x\n", cmd->state);
685 	switch (cmd->state) {
686 	case FC_CMD_ST_NEW:
687 		ft_send_cmd(cmd);
688 		break;
689 	default:
690 		break;
691 	}
692 }
693 
694 /*
695  * Processing thread.
696  * Currently one thread per tpg.
697  */
698 int ft_thread(void *arg)
699 {
700 	struct ft_tpg *tpg = arg;
701 	struct se_queue_obj *qobj = &tpg->qobj;
702 	struct ft_cmd *cmd;
703 
704 	while (!kthread_should_stop()) {
705 		schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
706 		if (kthread_should_stop())
707 			goto out;
708 
709 		cmd = ft_dequeue_cmd(qobj);
710 		if (cmd)
711 			ft_exec_req(cmd);
712 	}
713 
714 out:
715 	return 0;
716 }
717