xref: /openbmc/linux/drivers/scsi/elx/efct/efct_lio.c (revision 0c6dfa75)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 #include <target/target_core_base.h>
8 #include <target/target_core_fabric.h>
9 #include "efct_driver.h"
10 #include "efct_lio.h"
11 
12 /*
13  * lio_wq is used to call the LIO backed during creation or deletion of
14  * sessions. This brings serialization to the session management as we create
15  * single threaded work queue.
16  */
17 static struct workqueue_struct *lio_wq;
18 
19 static int
20 efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
21 {
22 	u8 a[8];
23 
24 	put_unaligned_be64(wwn, a);
25 	return snprintf(str, len, "%s%8phC", pre, a);
26 }
27 
28 static int
29 efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
30 {
31 	int num;
32 	u8 b[8];
33 
34 	if (npiv) {
35 		num = sscanf(name,
36 			     "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
37 			     &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
38 			     &b[7]);
39 	} else {
40 		num = sscanf(name,
41 		      "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
42 			     &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
43 			     &b[7]);
44 	}
45 
46 	if (num != 8)
47 		return -EINVAL;
48 
49 	*wwp = get_unaligned_be64(b);
50 	return 0;
51 }
52 
53 static int
54 efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
55 {
56 	unsigned int cnt = size;
57 	int rc;
58 
59 	*wwpn = *wwnn = 0;
60 	if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
61 		cnt--;
62 
63 	/* validate we have enough characters for WWPN */
64 	if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
65 		return -EINVAL;
66 
67 	rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
68 	if (rc)
69 		return rc;
70 
71 	rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
72 	if (rc)
73 		return rc;
74 
75 	return 0;
76 }
77 
78 static ssize_t
79 efct_lio_tpg_enable_show(struct config_item *item, char *page)
80 {
81 	struct se_portal_group *se_tpg = to_tpg(item);
82 	struct efct_lio_tpg *tpg =
83 		container_of(se_tpg, struct efct_lio_tpg, tpg);
84 
85 	return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
86 }
87 
88 static ssize_t
89 efct_lio_tpg_enable_store(struct config_item *item, const char *page,
90 			  size_t count)
91 {
92 	struct se_portal_group *se_tpg = to_tpg(item);
93 	struct efct_lio_tpg *tpg =
94 		container_of(se_tpg, struct efct_lio_tpg, tpg);
95 	struct efct *efct;
96 	struct efc *efc;
97 	unsigned long op;
98 
99 	if (!tpg->nport || !tpg->nport->efct) {
100 		pr_err("%s: Unable to find EFCT device\n", __func__);
101 		return -EINVAL;
102 	}
103 
104 	efct = tpg->nport->efct;
105 	efc = efct->efcport;
106 
107 	if (kstrtoul(page, 0, &op) < 0)
108 		return -EINVAL;
109 
110 	if (op == 1) {
111 		int ret;
112 
113 		tpg->enabled = true;
114 		efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
115 
116 		ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
117 		if (ret) {
118 			efct->tgt_efct.lio_nport = NULL;
119 			efc_log_debug(efct, "cannot bring port online\n");
120 			return ret;
121 		}
122 	} else if (op == 0) {
123 		efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
124 
125 		if (efc->domain && efc->domain->nport)
126 			efct_scsi_tgt_del_nport(efc, efc->domain->nport);
127 
128 		tpg->enabled = false;
129 	} else {
130 		return -EINVAL;
131 	}
132 
133 	return count;
134 }
135 
136 static ssize_t
137 efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
138 {
139 	struct se_portal_group *se_tpg = to_tpg(item);
140 	struct efct_lio_tpg *tpg =
141 		container_of(se_tpg, struct efct_lio_tpg, tpg);
142 
143 	return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
144 }
145 
146 static ssize_t
147 efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
148 			       size_t count)
149 {
150 	struct se_portal_group *se_tpg = to_tpg(item);
151 	struct efct_lio_tpg *tpg =
152 		container_of(se_tpg, struct efct_lio_tpg, tpg);
153 	struct efct_lio_vport *lio_vport = tpg->vport;
154 	struct efct *efct;
155 	struct efc *efc;
156 	unsigned long op;
157 
158 	if (kstrtoul(page, 0, &op) < 0)
159 		return -EINVAL;
160 
161 	if (!lio_vport) {
162 		pr_err("Unable to find vport\n");
163 		return -EINVAL;
164 	}
165 
166 	efct = lio_vport->efct;
167 	efc = efct->efcport;
168 
169 	if (op == 1) {
170 		tpg->enabled = true;
171 		efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
172 
173 		if (efc->domain) {
174 			int ret;
175 
176 			ret = efc_nport_vport_new(efc->domain,
177 						  lio_vport->npiv_wwpn,
178 						  lio_vport->npiv_wwnn,
179 						  U32_MAX, false, true,
180 						  NULL, NULL);
181 			if (ret != 0) {
182 				efc_log_err(efct, "Failed to create Vport\n");
183 				return ret;
184 			}
185 			return count;
186 		}
187 
188 		if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
189 					    lio_vport->npiv_wwpn, U32_MAX,
190 					    false, true, NULL, NULL)))
191 			return -ENOMEM;
192 
193 	} else if (op == 0) {
194 		efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
195 
196 		tpg->enabled = false;
197 		/* only physical nport should exist, free lio_nport
198 		 * allocated in efct_lio_make_nport
199 		 */
200 		if (efc->domain) {
201 			efc_nport_vport_del(efct->efcport, efc->domain,
202 					    lio_vport->npiv_wwpn,
203 					    lio_vport->npiv_wwnn);
204 			return count;
205 		}
206 	} else {
207 		return -EINVAL;
208 	}
209 	return count;
210 }
211 
212 static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
213 {
214 	struct efct_lio_tpg *tpg =
215 		container_of(se_tpg, struct efct_lio_tpg, tpg);
216 
217 	return tpg->nport->wwpn_str;
218 }
219 
220 static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
221 {
222 	struct efct_lio_tpg *tpg =
223 		container_of(se_tpg, struct efct_lio_tpg, tpg);
224 
225 	return tpg->vport->wwpn_str;
226 }
227 
228 static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
229 {
230 	struct efct_lio_tpg *tpg =
231 		container_of(se_tpg, struct efct_lio_tpg, tpg);
232 
233 	return tpg->tpgt;
234 }
235 
236 static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
237 {
238 	struct efct_lio_tpg *tpg =
239 		container_of(se_tpg, struct efct_lio_tpg, tpg);
240 
241 	return tpg->tpgt;
242 }
243 
244 static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
245 {
246 	return 1;
247 }
248 
249 static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
250 {
251 	return 1;
252 }
253 
254 static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
255 {
256 	struct efct_lio_tpg *tpg =
257 		container_of(se_tpg, struct efct_lio_tpg, tpg);
258 
259 	return tpg->tpg_attrib.demo_mode_write_protect;
260 }
261 
262 static int
263 efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
264 {
265 	struct efct_lio_tpg *tpg =
266 		container_of(se_tpg, struct efct_lio_tpg, tpg);
267 
268 	return tpg->tpg_attrib.demo_mode_write_protect;
269 }
270 
271 static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
272 {
273 	struct efct_lio_tpg *tpg =
274 		container_of(se_tpg, struct efct_lio_tpg, tpg);
275 
276 	return tpg->tpg_attrib.prod_mode_write_protect;
277 }
278 
279 static int
280 efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
281 {
282 	struct efct_lio_tpg *tpg =
283 		container_of(se_tpg, struct efct_lio_tpg, tpg);
284 
285 	return tpg->tpg_attrib.prod_mode_write_protect;
286 }
287 
288 static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
289 {
290 	struct efct_scsi_tgt_io *ocp =
291 		container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
292 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
293 
294 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
295 	return target_put_sess_cmd(se_cmd);
296 }
297 
298 static int
299 efct_lio_abort_tgt_cb(struct efct_io *io,
300 		      enum efct_scsi_io_status scsi_status,
301 		      u32 flags, void *arg)
302 {
303 	efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
304 	return 0;
305 }
306 
307 static void
308 efct_lio_aborted_task(struct se_cmd *se_cmd)
309 {
310 	struct efct_scsi_tgt_io *ocp =
311 		container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
312 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
313 
314 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
315 
316 	if (ocp->rsp_sent)
317 		return;
318 
319 	/* command has been aborted, cleanup here */
320 	ocp->aborting = true;
321 	ocp->err = EFCT_SCSI_STATUS_ABORTED;
322 	/* terminate the exchange */
323 	efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
324 }
325 
326 static void efct_lio_release_cmd(struct se_cmd *se_cmd)
327 {
328 	struct efct_scsi_tgt_io *ocp =
329 		container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
330 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
331 	struct efct *efct = io->efct;
332 
333 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
334 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
335 	efct_scsi_io_complete(io);
336 	atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
337 }
338 
339 static void efct_lio_close_session(struct se_session *se_sess)
340 {
341 	struct efc_node *node = se_sess->fabric_sess_ptr;
342 
343 	pr_debug("se_sess=%p node=%p", se_sess, node);
344 
345 	if (!node) {
346 		pr_debug("node is NULL");
347 		return;
348 	}
349 
350 	efc_node_post_shutdown(node, NULL);
351 }
352 
353 static int efct_lio_get_cmd_state(struct se_cmd *cmd)
354 {
355 	struct efct_scsi_tgt_io *ocp =
356 		container_of(cmd, struct efct_scsi_tgt_io, cmd);
357 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
358 
359 	return io->tgt_io.state;
360 }
361 
362 static int
363 efct_lio_sg_map(struct efct_io *io)
364 {
365 	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
366 	struct se_cmd *cmd = &ocp->cmd;
367 
368 	ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
369 				      cmd->t_data_nents, cmd->data_direction);
370 	if (ocp->seg_map_cnt == 0)
371 		return -EFAULT;
372 	return 0;
373 }
374 
375 static void
376 efct_lio_sg_unmap(struct efct_io *io)
377 {
378 	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
379 	struct se_cmd *cmd = &ocp->cmd;
380 
381 	if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
382 		return;
383 
384 	dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
385 		     ocp->seg_map_cnt, cmd->data_direction);
386 	ocp->seg_map_cnt = 0;
387 }
388 
389 static int
390 efct_lio_status_done(struct efct_io *io,
391 		     enum efct_scsi_io_status scsi_status,
392 		     u32 flags, void *arg)
393 {
394 	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
395 
396 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
397 	if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
398 		efct_lio_io_printf(io, "callback completed with error=%d\n",
399 				   scsi_status);
400 		ocp->err = scsi_status;
401 	}
402 	if (ocp->seg_map_cnt)
403 		efct_lio_sg_unmap(io);
404 
405 	efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
406 			   scsi_status, ocp->err, flags, ocp->ddir);
407 
408 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
409 	transport_generic_free_cmd(&io->tgt_io.cmd, 0);
410 	return 0;
411 }
412 
413 static int
414 efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
415 		       u32 flags, void *arg);
416 
417 static int
418 efct_lio_write_pending(struct se_cmd *cmd)
419 {
420 	struct efct_scsi_tgt_io *ocp =
421 		container_of(cmd, struct efct_scsi_tgt_io, cmd);
422 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
423 	struct efct_scsi_sgl *sgl = io->sgl;
424 	struct scatterlist *sg;
425 	u32 flags = 0, cnt, curcnt;
426 	u64 length = 0;
427 
428 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
429 	efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
430 			   cmd->transport_state, cmd->se_cmd_flags);
431 
432 	if (ocp->seg_cnt == 0) {
433 		ocp->seg_cnt = cmd->t_data_nents;
434 		ocp->cur_seg = 0;
435 		if (efct_lio_sg_map(io)) {
436 			efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
437 			return -EFAULT;
438 		}
439 	}
440 	curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
441 	curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
442 	/* find current sg */
443 	for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
444 	     sg = sg_next(sg))
445 		;/* do nothing */
446 
447 	for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
448 		sgl[cnt].addr = sg_dma_address(sg);
449 		sgl[cnt].dif_addr = 0;
450 		sgl[cnt].len = sg_dma_len(sg);
451 		length += sgl[cnt].len;
452 		ocp->cur_seg++;
453 	}
454 
455 	if (ocp->cur_seg == ocp->seg_cnt)
456 		flags = EFCT_SCSI_LAST_DATAPHASE;
457 
458 	return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
459 				    efct_lio_datamove_done, NULL);
460 }
461 
462 static int
463 efct_lio_queue_data_in(struct se_cmd *cmd)
464 {
465 	struct efct_scsi_tgt_io *ocp =
466 		container_of(cmd, struct efct_scsi_tgt_io, cmd);
467 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
468 	struct efct_scsi_sgl *sgl = io->sgl;
469 	struct scatterlist *sg = NULL;
470 	uint flags = 0, cnt = 0, curcnt = 0;
471 	u64 length = 0;
472 
473 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
474 
475 	if (ocp->seg_cnt == 0) {
476 		if (cmd->data_length) {
477 			ocp->seg_cnt = cmd->t_data_nents;
478 			ocp->cur_seg = 0;
479 			if (efct_lio_sg_map(io)) {
480 				efct_lio_io_printf(io,
481 						   "efct_lio_sg_map failed\n");
482 				return -EAGAIN;
483 			}
484 		} else {
485 			/* If command length is 0, send the response status */
486 			struct efct_scsi_cmd_resp rsp;
487 
488 			memset(&rsp, 0, sizeof(rsp));
489 			efct_lio_io_printf(io,
490 					   "cmd : %p length 0, send status\n",
491 					   cmd);
492 			return efct_scsi_send_resp(io, 0, &rsp,
493 						   efct_lio_status_done, NULL);
494 		}
495 	}
496 	curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
497 
498 	while (cnt < curcnt) {
499 		sg = &cmd->t_data_sg[ocp->cur_seg];
500 		sgl[cnt].addr = sg_dma_address(sg);
501 		sgl[cnt].dif_addr = 0;
502 		if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
503 			sgl[cnt].len = cmd->data_length - ocp->transferred_len;
504 		else
505 			sgl[cnt].len = sg_dma_len(sg);
506 
507 		ocp->transferred_len += sgl[cnt].len;
508 		length += sgl[cnt].len;
509 		ocp->cur_seg++;
510 		cnt++;
511 		if (ocp->transferred_len == cmd->data_length)
512 			break;
513 	}
514 
515 	if (ocp->transferred_len == cmd->data_length) {
516 		flags = EFCT_SCSI_LAST_DATAPHASE;
517 		ocp->seg_cnt = ocp->cur_seg;
518 	}
519 
520 	/* If there is residual, disable Auto Good Response */
521 	if (cmd->residual_count)
522 		flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
523 
524 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
525 
526 	return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
527 				    efct_lio_datamove_done, NULL);
528 }
529 
530 static void
531 efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
532 		   u32 flags)
533 {
534 	struct efct_scsi_cmd_resp rsp;
535 	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
536 	struct se_cmd *cmd = &io->tgt_io.cmd;
537 	int rc;
538 
539 	if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
540 		ocp->rsp_sent = true;
541 		efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
542 		transport_generic_free_cmd(&io->tgt_io.cmd, 0);
543 		return;
544 	}
545 
546 	/* send check condition if an error occurred */
547 	memset(&rsp, 0, sizeof(rsp));
548 	rsp.scsi_status = cmd->scsi_status;
549 	rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
550 	rsp.sense_data_length = cmd->scsi_sense_length;
551 
552 	/* Check for residual underrun or overrun */
553 	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
554 		rsp.residual = -cmd->residual_count;
555 	else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
556 		rsp.residual = cmd->residual_count;
557 
558 	rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
559 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
560 	if (rc != 0) {
561 		efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
562 		efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
563 		transport_generic_free_cmd(&io->tgt_io.cmd, 0);
564 	} else {
565 		ocp->rsp_sent = true;
566 	}
567 }
568 
569 static int
570 efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
571 		       u32 flags, void *arg)
572 {
573 	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
574 
575 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
576 	if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
577 		efct_lio_io_printf(io, "callback completed with error=%d\n",
578 				   scsi_status);
579 		ocp->err = scsi_status;
580 	}
581 	efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
582 	if (ocp->seg_map_cnt) {
583 		if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
584 		    ocp->cur_seg < ocp->seg_cnt) {
585 			int rc;
586 
587 			efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
588 					   ocp->cur_seg);
589 			if (ocp->ddir == DMA_TO_DEVICE)
590 				rc = efct_lio_write_pending(&ocp->cmd);
591 			else
592 				rc = efct_lio_queue_data_in(&ocp->cmd);
593 			if (!rc)
594 				return 0;
595 
596 			ocp->err = EFCT_SCSI_STATUS_ERROR;
597 			efct_lio_io_printf(io, "could not continue command\n");
598 		}
599 		efct_lio_sg_unmap(io);
600 	}
601 
602 	if (io->tgt_io.aborting) {
603 		efct_lio_io_printf(io, "IO done aborted\n");
604 		return 0;
605 	}
606 
607 	if (ocp->ddir == DMA_TO_DEVICE) {
608 		efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
609 				   io->tgt_io.cmd.transport_state);
610 		if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
611 			transport_generic_request_failure(&io->tgt_io.cmd,
612 					TCM_CHECK_CONDITION_ABORT_CMD);
613 			efct_set_lio_io_state(io,
614 				EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
615 		} else {
616 			efct_set_lio_io_state(io,
617 						EFCT_LIO_STATE_TGT_EXECUTE_CMD);
618 			target_execute_cmd(&io->tgt_io.cmd);
619 		}
620 	} else {
621 		efct_lio_send_resp(io, scsi_status, flags);
622 	}
623 	return 0;
624 }
625 
626 static int
627 efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
628 		  u32 flags, void *arg)
629 {
630 	efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
631 			      &io->tgt_io.cmd, scsi_status, flags);
632 
633 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
634 	transport_generic_free_cmd(&io->tgt_io.cmd, 0);
635 	return 0;
636 }
637 
638 static int
639 efct_lio_null_tmf_done(struct efct_io *tmfio,
640 		       enum efct_scsi_io_status scsi_status,
641 		      u32 flags, void *arg)
642 {
643 	efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
644 			      &tmfio->tgt_io.cmd, scsi_status, flags);
645 
646 	/* free struct efct_io only, no active se_cmd */
647 	efct_scsi_io_complete(tmfio);
648 	return 0;
649 }
650 
651 static int
652 efct_lio_queue_status(struct se_cmd *cmd)
653 {
654 	struct efct_scsi_cmd_resp rsp;
655 	struct efct_scsi_tgt_io *ocp =
656 		container_of(cmd, struct efct_scsi_tgt_io, cmd);
657 	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
658 	int rc = 0;
659 
660 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
661 	efct_lio_io_printf(io,
662 		"status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
663 		cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
664 		cmd->scsi_sense_length);
665 
666 	memset(&rsp, 0, sizeof(rsp));
667 	rsp.scsi_status = cmd->scsi_status;
668 	rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
669 	rsp.sense_data_length = cmd->scsi_sense_length;
670 
671 	/* Check for residual underrun or overrun, mark negitive value for
672 	 * underrun to recognize in HW
673 	 */
674 	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
675 		rsp.residual = -cmd->residual_count;
676 	else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
677 		rsp.residual = cmd->residual_count;
678 
679 	rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
680 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
681 	if (rc == 0)
682 		ocp->rsp_sent = true;
683 	return rc;
684 }
685 
686 static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
687 {
688 	struct efct_scsi_tgt_io *ocp =
689 		container_of(cmd, struct efct_scsi_tgt_io, cmd);
690 	struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
691 	struct se_tmr_req *se_tmr = cmd->se_tmr_req;
692 	u8 rspcode;
693 
694 	efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
695 			      cmd, se_tmr->function, se_tmr->response);
696 	switch (se_tmr->response) {
697 	case TMR_FUNCTION_COMPLETE:
698 		rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
699 		break;
700 	case TMR_TASK_DOES_NOT_EXIST:
701 		rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
702 		break;
703 	case TMR_LUN_DOES_NOT_EXIST:
704 		rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
705 		break;
706 	case TMR_FUNCTION_REJECTED:
707 	default:
708 		rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
709 		break;
710 	}
711 	efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
712 }
713 
714 static struct efct *efct_find_wwpn(u64 wwpn)
715 {
716 	struct efct *efct;
717 
718 	 /* Search for the HBA that has this WWPN */
719 	list_for_each_entry(efct, &efct_devices, list_entry) {
720 
721 		if (wwpn == efct_get_wwpn(&efct->hw))
722 			return efct;
723 	}
724 
725 	return NULL;
726 }
727 
728 static struct se_wwn *
729 efct_lio_make_nport(struct target_fabric_configfs *tf,
730 		    struct config_group *group, const char *name)
731 {
732 	struct efct_lio_nport *lio_nport;
733 	struct efct *efct;
734 	int ret;
735 	u64 wwpn;
736 
737 	ret = efct_lio_parse_wwn(name, &wwpn, 0);
738 	if (ret)
739 		return ERR_PTR(ret);
740 
741 	efct = efct_find_wwpn(wwpn);
742 	if (!efct) {
743 		pr_err("cannot find EFCT for base wwpn %s\n", name);
744 		return ERR_PTR(-ENXIO);
745 	}
746 
747 	lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
748 	if (!lio_nport)
749 		return ERR_PTR(-ENOMEM);
750 
751 	lio_nport->efct = efct;
752 	lio_nport->wwpn = wwpn;
753 	efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
754 			"naa.", wwpn);
755 	efct->tgt_efct.lio_nport = lio_nport;
756 
757 	return &lio_nport->nport_wwn;
758 }
759 
760 static struct se_wwn *
761 efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
762 			 struct config_group *group, const char *name)
763 {
764 	struct efct_lio_vport *lio_vport;
765 	struct efct *efct;
766 	int ret;
767 	u64 p_wwpn, npiv_wwpn, npiv_wwnn;
768 	char *p, *pbuf, tmp[128];
769 	struct efct_lio_vport_list_t *vport_list;
770 	struct fc_vport *new_fc_vport;
771 	struct fc_vport_identifiers vport_id;
772 	unsigned long flags = 0;
773 
774 	snprintf(tmp, sizeof(tmp), "%s", name);
775 	pbuf = &tmp[0];
776 
777 	p = strsep(&pbuf, "@");
778 
779 	if (!p || !pbuf) {
780 		pr_err("Unable to find separator operator(@)\n");
781 		return ERR_PTR(-EINVAL);
782 	}
783 
784 	ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
785 	if (ret)
786 		return ERR_PTR(ret);
787 
788 	ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
789 				      &npiv_wwnn);
790 	if (ret)
791 		return ERR_PTR(ret);
792 
793 	efct = efct_find_wwpn(p_wwpn);
794 	if (!efct) {
795 		pr_err("cannot find EFCT for base wwpn %s\n", name);
796 		return ERR_PTR(-ENXIO);
797 	}
798 
799 	lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
800 	if (!lio_vport)
801 		return ERR_PTR(-ENOMEM);
802 
803 	lio_vport->efct = efct;
804 	lio_vport->wwpn = p_wwpn;
805 	lio_vport->npiv_wwpn = npiv_wwpn;
806 	lio_vport->npiv_wwnn = npiv_wwnn;
807 
808 	efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
809 			"naa.", npiv_wwpn);
810 
811 	vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
812 	if (!vport_list) {
813 		kfree(lio_vport);
814 		return ERR_PTR(-ENOMEM);
815 	}
816 
817 	vport_list->lio_vport = lio_vport;
818 
819 	memset(&vport_id, 0, sizeof(vport_id));
820 	vport_id.port_name = npiv_wwpn;
821 	vport_id.node_name = npiv_wwnn;
822 	vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
823 	vport_id.vport_type = FC_PORTTYPE_NPIV;
824 	vport_id.disable = false;
825 
826 	new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
827 	if (!new_fc_vport) {
828 		efc_log_err(efct, "fc_vport_create failed\n");
829 		kfree(lio_vport);
830 		kfree(vport_list);
831 		return ERR_PTR(-ENOMEM);
832 	}
833 
834 	lio_vport->fc_vport = new_fc_vport;
835 	spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
836 	INIT_LIST_HEAD(&vport_list->list_entry);
837 	list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
838 	spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
839 
840 	return &lio_vport->vport_wwn;
841 }
842 
843 static void
844 efct_lio_drop_nport(struct se_wwn *wwn)
845 {
846 	struct efct_lio_nport *lio_nport =
847 		container_of(wwn, struct efct_lio_nport, nport_wwn);
848 	struct efct *efct = lio_nport->efct;
849 
850 	/* only physical nport should exist, free lio_nport allocated
851 	 * in efct_lio_make_nport.
852 	 */
853 	kfree(efct->tgt_efct.lio_nport);
854 	efct->tgt_efct.lio_nport = NULL;
855 }
856 
857 static void
858 efct_lio_npiv_drop_nport(struct se_wwn *wwn)
859 {
860 	struct efct_lio_vport *lio_vport =
861 		container_of(wwn, struct efct_lio_vport, vport_wwn);
862 	struct efct_lio_vport_list_t *vport, *next_vport;
863 	struct efct *efct = lio_vport->efct;
864 	unsigned long flags = 0;
865 
866 	if (lio_vport->fc_vport)
867 		fc_vport_terminate(lio_vport->fc_vport);
868 
869 	spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
870 
871 	list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
872 				 list_entry) {
873 		if (vport->lio_vport == lio_vport) {
874 			list_del(&vport->list_entry);
875 			kfree(vport->lio_vport);
876 			kfree(vport);
877 			break;
878 		}
879 	}
880 	spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
881 }
882 
883 static struct se_portal_group *
884 efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
885 {
886 	struct efct_lio_nport *lio_nport =
887 		container_of(wwn, struct efct_lio_nport, nport_wwn);
888 	struct efct_lio_tpg *tpg;
889 	struct efct *efct;
890 	unsigned long n;
891 	int ret;
892 
893 	if (strstr(name, "tpgt_") != name)
894 		return ERR_PTR(-EINVAL);
895 	if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
896 		return ERR_PTR(-EINVAL);
897 
898 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
899 	if (!tpg)
900 		return ERR_PTR(-ENOMEM);
901 
902 	tpg->nport = lio_nport;
903 	tpg->tpgt = n;
904 	tpg->enabled = false;
905 
906 	tpg->tpg_attrib.generate_node_acls = 1;
907 	tpg->tpg_attrib.demo_mode_write_protect = 1;
908 	tpg->tpg_attrib.cache_dynamic_acls = 1;
909 	tpg->tpg_attrib.demo_mode_login_only = 1;
910 	tpg->tpg_attrib.session_deletion_wait = 1;
911 
912 	ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
913 	if (ret < 0) {
914 		kfree(tpg);
915 		return NULL;
916 	}
917 	efct = lio_nport->efct;
918 	efct->tgt_efct.tpg = tpg;
919 	efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
920 
921 	xa_init(&efct->lookup);
922 	return &tpg->tpg;
923 }
924 
925 static void
926 efct_lio_drop_tpg(struct se_portal_group *se_tpg)
927 {
928 	struct efct_lio_tpg *tpg =
929 		container_of(se_tpg, struct efct_lio_tpg, tpg);
930 
931 	struct efct *efct = tpg->nport->efct;
932 
933 	efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
934 	tpg->nport->efct->tgt_efct.tpg = NULL;
935 	core_tpg_deregister(se_tpg);
936 	xa_destroy(&efct->lookup);
937 	kfree(tpg);
938 }
939 
940 static struct se_portal_group *
941 efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
942 {
943 	struct efct_lio_vport *lio_vport =
944 		container_of(wwn, struct efct_lio_vport, vport_wwn);
945 	struct efct_lio_tpg *tpg;
946 	struct efct *efct;
947 	unsigned long n;
948 	int ret;
949 
950 	efct = lio_vport->efct;
951 	if (strstr(name, "tpgt_") != name)
952 		return ERR_PTR(-EINVAL);
953 	if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
954 		return ERR_PTR(-EINVAL);
955 
956 	if (n != 1) {
957 		efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
958 		return ERR_PTR(-EINVAL);
959 	}
960 
961 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
962 	if (!tpg)
963 		return ERR_PTR(-ENOMEM);
964 
965 	tpg->vport = lio_vport;
966 	tpg->tpgt = n;
967 	tpg->enabled = false;
968 
969 	tpg->tpg_attrib.generate_node_acls = 1;
970 	tpg->tpg_attrib.demo_mode_write_protect = 1;
971 	tpg->tpg_attrib.cache_dynamic_acls = 1;
972 	tpg->tpg_attrib.demo_mode_login_only = 1;
973 	tpg->tpg_attrib.session_deletion_wait = 1;
974 
975 	ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
976 
977 	if (ret < 0) {
978 		kfree(tpg);
979 		return NULL;
980 	}
981 	lio_vport->tpg = tpg;
982 	efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
983 
984 	return &tpg->tpg;
985 }
986 
987 static void
988 efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
989 {
990 	struct efct_lio_tpg *tpg =
991 		container_of(se_tpg, struct efct_lio_tpg, tpg);
992 
993 	efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
994 		       tpg->tpgt);
995 	core_tpg_deregister(se_tpg);
996 	kfree(tpg);
997 }
998 
999 static int
1000 efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1001 {
1002 	struct efct_lio_nacl *nacl;
1003 	u64 wwnn;
1004 
1005 	if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
1006 		return -EINVAL;
1007 
1008 	nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
1009 	nacl->nport_wwnn = wwnn;
1010 
1011 	efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
1012 	return 0;
1013 }
1014 
1015 static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
1016 {
1017 	struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
1018 
1019 	return tpg->tpg_attrib.demo_mode_login_only;
1020 }
1021 
1022 static int
1023 efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
1024 {
1025 	struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
1026 
1027 	return tpg->tpg_attrib.demo_mode_login_only;
1028 }
1029 
1030 static struct efct_lio_tpg *
1031 efct_get_vport_tpg(struct efc_node *node)
1032 {
1033 	struct efct *efct;
1034 	u64 wwpn = node->nport->wwpn;
1035 	struct efct_lio_vport_list_t *vport, *next;
1036 	struct efct_lio_vport *lio_vport = NULL;
1037 	struct efct_lio_tpg *tpg = NULL;
1038 	unsigned long flags = 0;
1039 
1040 	efct = node->efc->base;
1041 	spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
1042 	list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
1043 				 list_entry) {
1044 		lio_vport = vport->lio_vport;
1045 		if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
1046 			efc_log_debug(efct, "found tpg on vport\n");
1047 			tpg = lio_vport->tpg;
1048 			break;
1049 		}
1050 	}
1051 	spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
1052 	return tpg;
1053 }
1054 
1055 static void
1056 _efct_tgt_node_free(struct kref *arg)
1057 {
1058 	struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
1059 	struct efc_node *node = tgt_node->node;
1060 
1061 	efc_scsi_del_initiator_complete(node->efc, node);
1062 	kfree(tgt_node);
1063 }
1064 
1065 static int efct_session_cb(struct se_portal_group *se_tpg,
1066 			   struct se_session *se_sess, void *private)
1067 {
1068 	struct efc_node *node = private;
1069 	struct efct_node *tgt_node;
1070 	struct efct *efct = node->efc->base;
1071 
1072 	tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
1073 	if (!tgt_node)
1074 		return -ENOMEM;
1075 
1076 	kref_init(&tgt_node->ref);
1077 	tgt_node->release = _efct_tgt_node_free;
1078 
1079 	tgt_node->session = se_sess;
1080 	node->tgt_node = tgt_node;
1081 	tgt_node->efct = efct;
1082 
1083 	tgt_node->node = node;
1084 
1085 	tgt_node->node_fc_id = node->rnode.fc_id;
1086 	tgt_node->port_fc_id = node->nport->fc_id;
1087 	tgt_node->vpi = node->nport->indicator;
1088 	tgt_node->rpi = node->rnode.indicator;
1089 
1090 	spin_lock_init(&tgt_node->active_ios_lock);
1091 	INIT_LIST_HEAD(&tgt_node->active_ios);
1092 
1093 	return 0;
1094 }
1095 
1096 int efct_scsi_tgt_new_device(struct efct *efct)
1097 {
1098 	u32 total_ios;
1099 
1100 	/* Get the max settings */
1101 	efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
1102 	efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
1103 
1104 	/* initialize IO watermark fields */
1105 	atomic_set(&efct->tgt_efct.ios_in_use, 0);
1106 	total_ios = efct->hw.config.n_io;
1107 	efc_log_debug(efct, "total_ios=%d\n", total_ios);
1108 	efct->tgt_efct.watermark_min =
1109 			(total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
1110 	efct->tgt_efct.watermark_max =
1111 			(total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
1112 	atomic_set(&efct->tgt_efct.io_high_watermark,
1113 		   efct->tgt_efct.watermark_max);
1114 	atomic_set(&efct->tgt_efct.watermark_hit, 0);
1115 	atomic_set(&efct->tgt_efct.initiator_count, 0);
1116 
1117 	lio_wq = create_singlethread_workqueue("efct_lio_worker");
1118 	if (!lio_wq) {
1119 		efc_log_err(efct, "workqueue create failed\n");
1120 		return -EIO;
1121 	}
1122 
1123 	spin_lock_init(&efct->tgt_efct.efct_lio_lock);
1124 	INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
1125 
1126 	return 0;
1127 }
1128 
1129 int efct_scsi_tgt_del_device(struct efct *efct)
1130 {
1131 	flush_workqueue(lio_wq);
1132 
1133 	return 0;
1134 }
1135 
1136 int
1137 efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
1138 {
1139 	struct efct *efct = nport->efc->base;
1140 
1141 	efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
1142 		       efct->tgt_efct.lio_nport->wwpn_str);
1143 
1144 	return 0;
1145 }
1146 
1147 void
1148 efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
1149 {
1150 	efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
1151 }
1152 
1153 static void efct_lio_setup_session(struct work_struct *work)
1154 {
1155 	struct efct_lio_wq_data *wq_data =
1156 		container_of(work, struct efct_lio_wq_data, work);
1157 	struct efct *efct = wq_data->efct;
1158 	struct efc_node *node = wq_data->ptr;
1159 	char wwpn[WWN_NAME_LEN];
1160 	struct efct_lio_tpg *tpg;
1161 	struct efct_node *tgt_node;
1162 	struct se_portal_group *se_tpg;
1163 	struct se_session *se_sess;
1164 	int watermark;
1165 	int ini_count;
1166 	u64 id;
1167 
1168 	/* Check to see if it's belongs to vport,
1169 	 * if not get physical port
1170 	 */
1171 	tpg = efct_get_vport_tpg(node);
1172 	if (tpg) {
1173 		se_tpg = &tpg->tpg;
1174 	} else if (efct->tgt_efct.tpg) {
1175 		tpg = efct->tgt_efct.tpg;
1176 		se_tpg = &tpg->tpg;
1177 	} else {
1178 		efc_log_err(efct, "failed to init session\n");
1179 		return;
1180 	}
1181 
1182 	/*
1183 	 * Format the FCP Initiator port_name into colon
1184 	 * separated values to match the format by our explicit
1185 	 * ConfigFS NodeACLs.
1186 	 */
1187 	efct_format_wwn(wwpn, sizeof(wwpn), "",	efc_node_get_wwpn(node));
1188 
1189 	se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
1190 				       node, efct_session_cb);
1191 	if (IS_ERR(se_sess)) {
1192 		efc_log_err(efct, "failed to setup session\n");
1193 		kfree(wq_data);
1194 		efc_scsi_sess_reg_complete(node, -EIO);
1195 		return;
1196 	}
1197 
1198 	tgt_node = node->tgt_node;
1199 	id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
1200 
1201 	efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
1202 		      se_sess, node, id);
1203 
1204 	if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
1205 		efc_log_err(efct, "Node lookup store failed\n");
1206 
1207 	efc_scsi_sess_reg_complete(node, 0);
1208 
1209 	/* update IO watermark: increment initiator count */
1210 	ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
1211 	watermark = efct->tgt_efct.watermark_max -
1212 		    ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
1213 	watermark = (efct->tgt_efct.watermark_min > watermark) ?
1214 			efct->tgt_efct.watermark_min : watermark;
1215 	atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
1216 
1217 	kfree(wq_data);
1218 }
1219 
1220 int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
1221 {
1222 	struct efct *efct = node->efc->base;
1223 	struct efct_lio_wq_data *wq_data;
1224 
1225 	/*
1226 	 * Since LIO only supports initiator validation at thread level,
1227 	 * we are open minded and accept all callers.
1228 	 */
1229 	wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
1230 	if (!wq_data)
1231 		return -ENOMEM;
1232 
1233 	wq_data->ptr = node;
1234 	wq_data->efct = efct;
1235 	INIT_WORK(&wq_data->work, efct_lio_setup_session);
1236 	queue_work(lio_wq, &wq_data->work);
1237 	return EFC_SCSI_CALL_ASYNC;
1238 }
1239 
1240 static void efct_lio_remove_session(struct work_struct *work)
1241 {
1242 	struct efct_lio_wq_data *wq_data =
1243 		container_of(work, struct efct_lio_wq_data, work);
1244 	struct efct *efct = wq_data->efct;
1245 	struct efc_node *node = wq_data->ptr;
1246 	struct efct_node *tgt_node;
1247 	struct se_session *se_sess;
1248 
1249 	tgt_node = node->tgt_node;
1250 	if (!tgt_node) {
1251 		/* base driver has sent back-to-back requests
1252 		 * to unreg session with no intervening
1253 		 * register
1254 		 */
1255 		efc_log_err(efct, "unreg session for NULL session\n");
1256 		efc_scsi_del_initiator_complete(node->efc, node);
1257 		return;
1258 	}
1259 
1260 	se_sess = tgt_node->session;
1261 	efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
1262 		       se_sess, node);
1263 
1264 	/* first flag all session commands to complete */
1265 	target_stop_session(se_sess);
1266 
1267 	/* now wait for session commands to complete */
1268 	target_wait_for_sess_cmds(se_sess);
1269 	target_remove_session(se_sess);
1270 	tgt_node->session = NULL;
1271 	node->tgt_node = NULL;
1272 	kref_put(&tgt_node->ref, tgt_node->release);
1273 
1274 	kfree(wq_data);
1275 }
1276 
1277 int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
1278 {
1279 	struct efct *efct = node->efc->base;
1280 	struct efct_node *tgt_node = node->tgt_node;
1281 	struct efct_lio_wq_data *wq_data;
1282 	int watermark;
1283 	int ini_count;
1284 	u64 id;
1285 
1286 	if (reason == EFCT_SCSI_INITIATOR_MISSING)
1287 		return EFC_SCSI_CALL_COMPLETE;
1288 
1289 	if (!tgt_node) {
1290 		efc_log_err(efct, "tgt_node is NULL\n");
1291 		return -EIO;
1292 	}
1293 
1294 	wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
1295 	if (!wq_data)
1296 		return -ENOMEM;
1297 
1298 	id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
1299 	xa_erase(&efct->lookup, id);
1300 
1301 	wq_data->ptr = node;
1302 	wq_data->efct = efct;
1303 	INIT_WORK(&wq_data->work, efct_lio_remove_session);
1304 	queue_work(lio_wq, &wq_data->work);
1305 
1306 	/*
1307 	 * update IO watermark: decrement initiator count
1308 	 */
1309 	ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
1310 
1311 	watermark = efct->tgt_efct.watermark_max -
1312 		    ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
1313 	watermark = (efct->tgt_efct.watermark_min > watermark) ?
1314 			efct->tgt_efct.watermark_min : watermark;
1315 	atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
1316 
1317 	return EFC_SCSI_CALL_ASYNC;
1318 }
1319 
1320 void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
1321 		       u32 cdb_len, u32 flags)
1322 {
1323 	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
1324 	struct se_cmd *se_cmd = &io->tgt_io.cmd;
1325 	struct efct *efct = io->efct;
1326 	char *ddir;
1327 	struct efct_node *tgt_node;
1328 	struct se_session *se_sess;
1329 	int rc = 0;
1330 
1331 	memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
1332 	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
1333 	atomic_add_return(1, &efct->tgt_efct.ios_in_use);
1334 
1335 	/* set target timeout */
1336 	io->timeout = efct->target_io_timer_sec;
1337 
1338 	if (flags & EFCT_SCSI_CMD_SIMPLE)
1339 		ocp->task_attr = TCM_SIMPLE_TAG;
1340 	else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
1341 		ocp->task_attr = TCM_HEAD_TAG;
1342 	else if (flags & EFCT_SCSI_CMD_ORDERED)
1343 		ocp->task_attr = TCM_ORDERED_TAG;
1344 	else if (flags & EFCT_SCSI_CMD_ACA)
1345 		ocp->task_attr = TCM_ACA_TAG;
1346 
1347 	switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
1348 	case EFCT_SCSI_CMD_DIR_IN:
1349 		ddir = "FROM_INITIATOR";
1350 		ocp->ddir = DMA_TO_DEVICE;
1351 		break;
1352 	case EFCT_SCSI_CMD_DIR_OUT:
1353 		ddir = "TO_INITIATOR";
1354 		ocp->ddir = DMA_FROM_DEVICE;
1355 		break;
1356 	case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
1357 		ddir = "BIDIR";
1358 		ocp->ddir = DMA_BIDIRECTIONAL;
1359 		break;
1360 	default:
1361 		ddir = "NONE";
1362 		ocp->ddir = DMA_NONE;
1363 		break;
1364 	}
1365 
1366 	ocp->lun = lun;
1367 	efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
1368 			   cdb[0], ddir, io->exp_xfer_len);
1369 
1370 	tgt_node = io->node;
1371 	se_sess = tgt_node->session;
1372 	if (!se_sess) {
1373 		efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
1374 			    &ocp->cmd);
1375 		efct_scsi_io_free(io);
1376 		return;
1377 	}
1378 
1379 	efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
1380 	rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
1381 			     ocp->lun, io->exp_xfer_len, ocp->task_attr,
1382 			     ocp->ddir, TARGET_SCF_ACK_KREF);
1383 	if (rc) {
1384 		efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
1385 		efct_scsi_io_free(io);
1386 		return;
1387 	}
1388 
1389 	if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
1390 				NULL, 0, GFP_ATOMIC))
1391 		return;
1392 
1393 	target_submit(se_cmd);
1394 }
1395 
1396 int
1397 efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
1398 		   struct efct_io *io_to_abort, u32 flags)
1399 {
1400 	unsigned char tmr_func;
1401 	struct efct *efct = tmfio->efct;
1402 	struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
1403 	struct efct_node *tgt_node;
1404 	struct se_session *se_sess;
1405 	int rc;
1406 
1407 	memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
1408 	efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
1409 	atomic_add_return(1, &efct->tgt_efct.ios_in_use);
1410 	efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
1411 			      tmfio->display_name, cmd, lun);
1412 
1413 	switch (cmd) {
1414 	case EFCT_SCSI_TMF_ABORT_TASK:
1415 		tmr_func = TMR_ABORT_TASK;
1416 		break;
1417 	case EFCT_SCSI_TMF_ABORT_TASK_SET:
1418 		tmr_func = TMR_ABORT_TASK_SET;
1419 		break;
1420 	case EFCT_SCSI_TMF_CLEAR_TASK_SET:
1421 		tmr_func = TMR_CLEAR_TASK_SET;
1422 		break;
1423 	case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
1424 		tmr_func = TMR_LUN_RESET;
1425 		break;
1426 	case EFCT_SCSI_TMF_CLEAR_ACA:
1427 		tmr_func = TMR_CLEAR_ACA;
1428 		break;
1429 	case EFCT_SCSI_TMF_TARGET_RESET:
1430 		tmr_func = TMR_TARGET_WARM_RESET;
1431 		break;
1432 	case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
1433 	case EFCT_SCSI_TMF_QUERY_TASK_SET:
1434 	default:
1435 		goto tmf_fail;
1436 	}
1437 
1438 	tmfio->tgt_io.tmf = tmr_func;
1439 	tmfio->tgt_io.lun = lun;
1440 	tmfio->tgt_io.io_to_abort = io_to_abort;
1441 
1442 	tgt_node = tmfio->node;
1443 
1444 	se_sess = tgt_node->session;
1445 	if (!se_sess)
1446 		return 0;
1447 
1448 	rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
1449 			GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
1450 
1451 	efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
1452 	if (rc)
1453 		goto tmf_fail;
1454 
1455 	return 0;
1456 
1457 tmf_fail:
1458 	efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
1459 				NULL, efct_lio_null_tmf_done, NULL);
1460 	return 0;
1461 }
1462 
1463 /* Start items for efct_lio_tpg_attrib_cit */
1464 
1465 #define DEF_EFCT_TPG_ATTRIB(name)					  \
1466 									  \
1467 static ssize_t efct_lio_tpg_attrib_##name##_show(			  \
1468 		struct config_item *item, char *page)			  \
1469 {									  \
1470 	struct se_portal_group *se_tpg = to_tpg(item);			  \
1471 	struct efct_lio_tpg *tpg = container_of(se_tpg,			  \
1472 			struct efct_lio_tpg, tpg);			  \
1473 									  \
1474 	return sprintf(page, "%u\n", tpg->tpg_attrib.name);		  \
1475 }									  \
1476 									  \
1477 static ssize_t efct_lio_tpg_attrib_##name##_store(			  \
1478 		struct config_item *item, const char *page, size_t count) \
1479 {									  \
1480 	struct se_portal_group *se_tpg = to_tpg(item);			  \
1481 	struct efct_lio_tpg *tpg = container_of(se_tpg,			  \
1482 					struct efct_lio_tpg, tpg);	  \
1483 	struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib;		  \
1484 	unsigned long val;						  \
1485 	int ret;							  \
1486 									  \
1487 	ret = kstrtoul(page, 0, &val);					  \
1488 	if (ret < 0) {							  \
1489 		pr_err("kstrtoul() failed with ret: %d\n", ret);	  \
1490 		return ret;						  \
1491 	}								  \
1492 									  \
1493 	if (val != 0 && val != 1) {					  \
1494 		pr_err("Illegal boolean value %lu\n", val);		  \
1495 		return -EINVAL;						  \
1496 	}								  \
1497 									  \
1498 	a->name = val;							  \
1499 									  \
1500 	return count;							  \
1501 }									  \
1502 CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
1503 
1504 DEF_EFCT_TPG_ATTRIB(generate_node_acls);
1505 DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
1506 DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
1507 DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
1508 DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
1509 DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
1510 
1511 static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
1512 	&efct_lio_tpg_attrib_attr_generate_node_acls,
1513 	&efct_lio_tpg_attrib_attr_cache_dynamic_acls,
1514 	&efct_lio_tpg_attrib_attr_demo_mode_write_protect,
1515 	&efct_lio_tpg_attrib_attr_prod_mode_write_protect,
1516 	&efct_lio_tpg_attrib_attr_demo_mode_login_only,
1517 	&efct_lio_tpg_attrib_attr_session_deletion_wait,
1518 	NULL,
1519 };
1520 
1521 #define DEF_EFCT_NPIV_TPG_ATTRIB(name)					   \
1522 									   \
1523 static ssize_t efct_lio_npiv_tpg_attrib_##name##_show(			   \
1524 		struct config_item *item, char *page)			   \
1525 {									   \
1526 	struct se_portal_group *se_tpg = to_tpg(item);			   \
1527 	struct efct_lio_tpg *tpg = container_of(se_tpg,			   \
1528 			struct efct_lio_tpg, tpg);			   \
1529 									   \
1530 	return sprintf(page, "%u\n", tpg->tpg_attrib.name);		   \
1531 }									   \
1532 									   \
1533 static ssize_t efct_lio_npiv_tpg_attrib_##name##_store(			   \
1534 		struct config_item *item, const char *page, size_t count)  \
1535 {									   \
1536 	struct se_portal_group *se_tpg = to_tpg(item);			   \
1537 	struct efct_lio_tpg *tpg = container_of(se_tpg,			   \
1538 			struct efct_lio_tpg, tpg);			   \
1539 	struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib;		   \
1540 	unsigned long val;						   \
1541 	int ret;							   \
1542 									   \
1543 	ret = kstrtoul(page, 0, &val);					   \
1544 	if (ret < 0) {							   \
1545 		pr_err("kstrtoul() failed with ret: %d\n", ret);	   \
1546 		return ret;						   \
1547 	}								   \
1548 									   \
1549 	if (val != 0 && val != 1) {					   \
1550 		pr_err("Illegal boolean value %lu\n", val);		   \
1551 		return -EINVAL;						   \
1552 	}								   \
1553 									   \
1554 	a->name = val;							   \
1555 									   \
1556 	return count;							   \
1557 }									   \
1558 CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
1559 
1560 DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
1561 DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
1562 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
1563 DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
1564 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
1565 DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
1566 
1567 static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
1568 	&efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
1569 	&efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
1570 	&efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
1571 	&efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
1572 	&efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
1573 	&efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
1574 	NULL,
1575 };
1576 
1577 CONFIGFS_ATTR(efct_lio_tpg_, enable);
1578 static struct configfs_attribute *efct_lio_tpg_attrs[] = {
1579 				&efct_lio_tpg_attr_enable, NULL };
1580 CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
1581 static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
1582 				&efct_lio_npiv_tpg_attr_enable, NULL };
1583 
1584 static const struct target_core_fabric_ops efct_lio_ops = {
1585 	.module				= THIS_MODULE,
1586 	.fabric_name			= "efct",
1587 	.node_acl_size			= sizeof(struct efct_lio_nacl),
1588 	.max_data_sg_nents		= 65535,
1589 	.tpg_get_wwn			= efct_lio_get_fabric_wwn,
1590 	.tpg_get_tag			= efct_lio_get_tag,
1591 	.fabric_init_nodeacl		= efct_lio_init_nodeacl,
1592 	.tpg_check_demo_mode		= efct_lio_check_demo_mode,
1593 	.tpg_check_demo_mode_cache      = efct_lio_check_demo_mode_cache,
1594 	.tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
1595 	.tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
1596 	.check_stop_free		= efct_lio_check_stop_free,
1597 	.aborted_task			= efct_lio_aborted_task,
1598 	.release_cmd			= efct_lio_release_cmd,
1599 	.close_session			= efct_lio_close_session,
1600 	.write_pending			= efct_lio_write_pending,
1601 	.get_cmd_state			= efct_lio_get_cmd_state,
1602 	.queue_data_in			= efct_lio_queue_data_in,
1603 	.queue_status			= efct_lio_queue_status,
1604 	.queue_tm_rsp			= efct_lio_queue_tm_rsp,
1605 	.fabric_make_wwn		= efct_lio_make_nport,
1606 	.fabric_drop_wwn		= efct_lio_drop_nport,
1607 	.fabric_make_tpg		= efct_lio_make_tpg,
1608 	.fabric_drop_tpg		= efct_lio_drop_tpg,
1609 	.tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
1610 	.tpg_check_prot_fabric_only	= NULL,
1611 	.sess_get_initiator_sid		= NULL,
1612 	.tfc_tpg_base_attrs		= efct_lio_tpg_attrs,
1613 	.tfc_tpg_attrib_attrs           = efct_lio_tpg_attrib_attrs,
1614 };
1615 
1616 static const struct target_core_fabric_ops efct_lio_npiv_ops = {
1617 	.module				= THIS_MODULE,
1618 	.fabric_name			= "efct_npiv",
1619 	.node_acl_size			= sizeof(struct efct_lio_nacl),
1620 	.max_data_sg_nents		= 65535,
1621 	.tpg_get_wwn			= efct_lio_get_npiv_fabric_wwn,
1622 	.tpg_get_tag			= efct_lio_get_npiv_tag,
1623 	.fabric_init_nodeacl		= efct_lio_init_nodeacl,
1624 	.tpg_check_demo_mode		= efct_lio_check_demo_mode,
1625 	.tpg_check_demo_mode_cache      = efct_lio_check_demo_mode_cache,
1626 	.tpg_check_demo_mode_write_protect =
1627 					efct_lio_npiv_check_demo_write_protect,
1628 	.tpg_check_prod_mode_write_protect =
1629 					efct_lio_npiv_check_prod_write_protect,
1630 	.check_stop_free		= efct_lio_check_stop_free,
1631 	.aborted_task			= efct_lio_aborted_task,
1632 	.release_cmd			= efct_lio_release_cmd,
1633 	.close_session			= efct_lio_close_session,
1634 	.write_pending			= efct_lio_write_pending,
1635 	.get_cmd_state			= efct_lio_get_cmd_state,
1636 	.queue_data_in			= efct_lio_queue_data_in,
1637 	.queue_status			= efct_lio_queue_status,
1638 	.queue_tm_rsp			= efct_lio_queue_tm_rsp,
1639 	.fabric_make_wwn		= efct_lio_npiv_make_nport,
1640 	.fabric_drop_wwn		= efct_lio_npiv_drop_nport,
1641 	.fabric_make_tpg		= efct_lio_npiv_make_tpg,
1642 	.fabric_drop_tpg		= efct_lio_npiv_drop_tpg,
1643 	.tpg_check_demo_mode_login_only =
1644 				efct_lio_npiv_check_demo_mode_login_only,
1645 	.tpg_check_prot_fabric_only	= NULL,
1646 	.sess_get_initiator_sid		= NULL,
1647 	.tfc_tpg_base_attrs		= efct_lio_npiv_tpg_attrs,
1648 	.tfc_tpg_attrib_attrs		= efct_lio_npiv_tpg_attrib_attrs,
1649 };
1650 
1651 int efct_scsi_tgt_driver_init(void)
1652 {
1653 	int rc;
1654 
1655 	/* Register the top level struct config_item_type with TCM core */
1656 	rc = target_register_template(&efct_lio_ops);
1657 	if (rc < 0) {
1658 		pr_err("target_fabric_configfs_register failed with %d\n", rc);
1659 		return rc;
1660 	}
1661 	rc = target_register_template(&efct_lio_npiv_ops);
1662 	if (rc < 0) {
1663 		pr_err("target_fabric_configfs_register failed with %d\n", rc);
1664 		target_unregister_template(&efct_lio_ops);
1665 		return rc;
1666 	}
1667 	return 0;
1668 }
1669 
1670 int efct_scsi_tgt_driver_exit(void)
1671 {
1672 	target_unregister_template(&efct_lio_ops);
1673 	target_unregister_template(&efct_lio_npiv_ops);
1674 	return 0;
1675 }
1676