xref: /openbmc/linux/drivers/vhost/scsi.c (revision 80d0624d)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3  * Vhost kernel TCM fabric driver for virtio SCSI initiators
4  *
5  * (C) Copyright 2010-2013 Datera, Inc.
6  * (C) Copyright 2010-2012 IBM Corp.
7  *
8  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10  ****************************************************************************/
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <linux/blk_types.h>
29 #include <linux/bio.h>
30 #include <asm/unaligned.h>
31 #include <scsi/scsi_common.h>
32 #include <scsi/scsi_proto.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35 #include <linux/vhost.h>
36 #include <linux/virtio_scsi.h>
37 #include <linux/llist.h>
38 #include <linux/bitmap.h>
39 
40 #include "vhost.h"
41 
42 #define VHOST_SCSI_VERSION  "v0.1"
43 #define VHOST_SCSI_NAMELEN 256
44 #define VHOST_SCSI_MAX_CDB_SIZE 32
45 #define VHOST_SCSI_PREALLOC_SGLS 2048
46 #define VHOST_SCSI_PREALLOC_UPAGES 2048
47 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48 
49 /* Max number of requests before requeueing the job.
50  * Using this limit prevents one virtqueue from starving others with
51  * request.
52  */
53 #define VHOST_SCSI_WEIGHT 256
54 
55 struct vhost_scsi_inflight {
56 	/* Wait for the flush operation to finish */
57 	struct completion comp;
58 	/* Refcount for the inflight reqs */
59 	struct kref kref;
60 };
61 
62 struct vhost_scsi_cmd {
63 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
64 	int tvc_vq_desc;
65 	/* virtio-scsi initiator task attribute */
66 	int tvc_task_attr;
67 	/* virtio-scsi response incoming iovecs */
68 	int tvc_in_iovs;
69 	/* virtio-scsi initiator data direction */
70 	enum dma_data_direction tvc_data_direction;
71 	/* Expected data transfer length from virtio-scsi header */
72 	u32 tvc_exp_data_len;
73 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
74 	u64 tvc_tag;
75 	/* The number of scatterlists associated with this cmd */
76 	u32 tvc_sgl_count;
77 	u32 tvc_prot_sgl_count;
78 	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
79 	u32 tvc_lun;
80 	u32 copied_iov:1;
81 	const void *saved_iter_addr;
82 	struct iov_iter saved_iter;
83 	/* Pointer to the SGL formatted memory from virtio-scsi */
84 	struct scatterlist *tvc_sgl;
85 	struct scatterlist *tvc_prot_sgl;
86 	struct page **tvc_upages;
87 	/* Pointer to response header iovec */
88 	struct iovec *tvc_resp_iov;
89 	/* Pointer to vhost_scsi for our device */
90 	struct vhost_scsi *tvc_vhost;
91 	/* Pointer to vhost_virtqueue for the cmd */
92 	struct vhost_virtqueue *tvc_vq;
93 	/* Pointer to vhost nexus memory */
94 	struct vhost_scsi_nexus *tvc_nexus;
95 	/* The TCM I/O descriptor that is accessed via container_of() */
96 	struct se_cmd tvc_se_cmd;
97 	/* Copy of the incoming SCSI command descriptor block (CDB) */
98 	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
99 	/* Sense buffer that will be mapped into outgoing status */
100 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
101 	/* Completed commands list, serviced from vhost worker thread */
102 	struct llist_node tvc_completion_list;
103 	/* Used to track inflight cmd */
104 	struct vhost_scsi_inflight *inflight;
105 };
106 
107 struct vhost_scsi_nexus {
108 	/* Pointer to TCM session for I_T Nexus */
109 	struct se_session *tvn_se_sess;
110 };
111 
112 struct vhost_scsi_tpg {
113 	/* Vhost port target portal group tag for TCM */
114 	u16 tport_tpgt;
115 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
116 	int tv_tpg_port_count;
117 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
118 	int tv_tpg_vhost_count;
119 	/* Used for enabling T10-PI with legacy devices */
120 	int tv_fabric_prot_type;
121 	/* list for vhost_scsi_list */
122 	struct list_head tv_tpg_list;
123 	/* Used to protect access for tpg_nexus */
124 	struct mutex tv_tpg_mutex;
125 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
126 	struct vhost_scsi_nexus *tpg_nexus;
127 	/* Pointer back to vhost_scsi_tport */
128 	struct vhost_scsi_tport *tport;
129 	/* Returned by vhost_scsi_make_tpg() */
130 	struct se_portal_group se_tpg;
131 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
132 	struct vhost_scsi *vhost_scsi;
133 };
134 
135 struct vhost_scsi_tport {
136 	/* SCSI protocol the tport is providing */
137 	u8 tport_proto_id;
138 	/* Binary World Wide unique Port Name for Vhost Target port */
139 	u64 tport_wwpn;
140 	/* ASCII formatted WWPN for Vhost Target port */
141 	char tport_name[VHOST_SCSI_NAMELEN];
142 	/* Returned by vhost_scsi_make_tport() */
143 	struct se_wwn tport_wwn;
144 };
145 
146 struct vhost_scsi_evt {
147 	/* event to be sent to guest */
148 	struct virtio_scsi_event event;
149 	/* event list, serviced from vhost worker thread */
150 	struct llist_node list;
151 };
152 
153 enum {
154 	VHOST_SCSI_VQ_CTL = 0,
155 	VHOST_SCSI_VQ_EVT = 1,
156 	VHOST_SCSI_VQ_IO = 2,
157 };
158 
159 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
160 enum {
161 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
162 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
163 };
164 
165 #define VHOST_SCSI_MAX_TARGET	256
166 #define VHOST_SCSI_MAX_IO_VQ	1024
167 #define VHOST_SCSI_MAX_EVENT	128
168 
169 static unsigned vhost_scsi_max_io_vqs = 128;
170 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
171 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
172 
173 struct vhost_scsi_virtqueue {
174 	struct vhost_virtqueue vq;
175 	struct vhost_scsi *vs;
176 	/*
177 	 * Reference counting for inflight reqs, used for flush operation. At
178 	 * each time, one reference tracks new commands submitted, while we
179 	 * wait for another one to reach 0.
180 	 */
181 	struct vhost_scsi_inflight inflights[2];
182 	/*
183 	 * Indicate current inflight in use, protected by vq->mutex.
184 	 * Writers must also take dev mutex and flush under it.
185 	 */
186 	int inflight_idx;
187 	struct vhost_scsi_cmd *scsi_cmds;
188 	struct sbitmap scsi_tags;
189 	int max_cmds;
190 
191 	struct vhost_work completion_work;
192 	struct llist_head completion_list;
193 };
194 
195 struct vhost_scsi {
196 	/* Protected by vhost_scsi->dev.mutex */
197 	struct vhost_scsi_tpg **vs_tpg;
198 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199 
200 	struct vhost_dev dev;
201 	struct vhost_scsi_virtqueue *vqs;
202 	struct vhost_scsi_inflight **old_inflight;
203 
204 	struct vhost_work vs_event_work; /* evt injection work item */
205 	struct llist_head vs_event_list; /* evt injection queue */
206 
207 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
208 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
209 };
210 
211 struct vhost_scsi_tmf {
212 	struct vhost_work vwork;
213 	struct vhost_scsi *vhost;
214 	struct vhost_scsi_virtqueue *svq;
215 
216 	struct se_cmd se_cmd;
217 	u8 scsi_resp;
218 	struct vhost_scsi_inflight *inflight;
219 	struct iovec resp_iov;
220 	int in_iovs;
221 	int vq_desc;
222 };
223 
224 /*
225  * Context for processing request and control queue operations.
226  */
227 struct vhost_scsi_ctx {
228 	int head;
229 	unsigned int out, in;
230 	size_t req_size, rsp_size;
231 	size_t out_size, in_size;
232 	u8 *target, *lunp;
233 	void *req;
234 	struct iov_iter out_iter;
235 };
236 
237 /*
238  * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
239  * configfs management operations.
240  */
241 static DEFINE_MUTEX(vhost_scsi_mutex);
242 static LIST_HEAD(vhost_scsi_list);
243 
244 static void vhost_scsi_done_inflight(struct kref *kref)
245 {
246 	struct vhost_scsi_inflight *inflight;
247 
248 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
249 	complete(&inflight->comp);
250 }
251 
252 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
253 				    struct vhost_scsi_inflight *old_inflight[])
254 {
255 	struct vhost_scsi_inflight *new_inflight;
256 	struct vhost_virtqueue *vq;
257 	int idx, i;
258 
259 	for (i = 0; i < vs->dev.nvqs;  i++) {
260 		vq = &vs->vqs[i].vq;
261 
262 		mutex_lock(&vq->mutex);
263 
264 		/* store old infight */
265 		idx = vs->vqs[i].inflight_idx;
266 		if (old_inflight)
267 			old_inflight[i] = &vs->vqs[i].inflights[idx];
268 
269 		/* setup new infight */
270 		vs->vqs[i].inflight_idx = idx ^ 1;
271 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
272 		kref_init(&new_inflight->kref);
273 		init_completion(&new_inflight->comp);
274 
275 		mutex_unlock(&vq->mutex);
276 	}
277 }
278 
279 static struct vhost_scsi_inflight *
280 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
281 {
282 	struct vhost_scsi_inflight *inflight;
283 	struct vhost_scsi_virtqueue *svq;
284 
285 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
286 	inflight = &svq->inflights[svq->inflight_idx];
287 	kref_get(&inflight->kref);
288 
289 	return inflight;
290 }
291 
292 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
293 {
294 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
295 }
296 
297 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
298 {
299 	return 1;
300 }
301 
302 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
303 {
304 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305 				struct vhost_scsi_tpg, se_tpg);
306 	struct vhost_scsi_tport *tport = tpg->tport;
307 
308 	return &tport->tport_name[0];
309 }
310 
311 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
312 {
313 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
314 				struct vhost_scsi_tpg, se_tpg);
315 	return tpg->tport_tpgt;
316 }
317 
318 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
319 {
320 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
321 				struct vhost_scsi_tpg, se_tpg);
322 
323 	return tpg->tv_fabric_prot_type;
324 }
325 
326 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
327 {
328 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
329 				struct vhost_scsi_cmd, tvc_se_cmd);
330 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
331 				struct vhost_scsi_virtqueue, vq);
332 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
333 	int i;
334 
335 	if (tv_cmd->tvc_sgl_count) {
336 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
337 			if (tv_cmd->copied_iov)
338 				__free_page(sg_page(&tv_cmd->tvc_sgl[i]));
339 			else
340 				put_page(sg_page(&tv_cmd->tvc_sgl[i]));
341 		}
342 		kfree(tv_cmd->saved_iter_addr);
343 	}
344 	if (tv_cmd->tvc_prot_sgl_count) {
345 		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
346 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
347 	}
348 
349 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
350 	vhost_scsi_put_inflight(inflight);
351 }
352 
353 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
354 {
355 	struct vhost_scsi_inflight *inflight = tmf->inflight;
356 
357 	kfree(tmf);
358 	vhost_scsi_put_inflight(inflight);
359 }
360 
361 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
362 {
363 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
364 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
365 					struct vhost_scsi_tmf, se_cmd);
366 		struct vhost_virtqueue *vq = &tmf->svq->vq;
367 
368 		vhost_vq_work_queue(vq, &tmf->vwork);
369 	} else {
370 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
371 					struct vhost_scsi_cmd, tvc_se_cmd);
372 		struct vhost_scsi_virtqueue *svq =  container_of(cmd->tvc_vq,
373 					struct vhost_scsi_virtqueue, vq);
374 
375 		llist_add(&cmd->tvc_completion_list, &svq->completion_list);
376 		vhost_vq_work_queue(&svq->vq, &svq->completion_work);
377 	}
378 }
379 
380 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
381 {
382 	/* Go ahead and process the write immediately */
383 	target_execute_cmd(se_cmd);
384 	return 0;
385 }
386 
387 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
388 {
389 	transport_generic_free_cmd(se_cmd, 0);
390 	return 0;
391 }
392 
393 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
394 {
395 	transport_generic_free_cmd(se_cmd, 0);
396 	return 0;
397 }
398 
399 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
400 {
401 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
402 						  se_cmd);
403 
404 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
405 	transport_generic_free_cmd(&tmf->se_cmd, 0);
406 }
407 
408 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
409 {
410 	return;
411 }
412 
413 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
414 {
415 	vs->vs_events_nr--;
416 	kfree(evt);
417 }
418 
419 static struct vhost_scsi_evt *
420 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
421 		       u32 event, u32 reason)
422 {
423 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
424 	struct vhost_scsi_evt *evt;
425 
426 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
427 		vs->vs_events_missed = true;
428 		return NULL;
429 	}
430 
431 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
432 	if (!evt) {
433 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
434 		vs->vs_events_missed = true;
435 		return NULL;
436 	}
437 
438 	evt->event.event = cpu_to_vhost32(vq, event);
439 	evt->event.reason = cpu_to_vhost32(vq, reason);
440 	vs->vs_events_nr++;
441 
442 	return evt;
443 }
444 
445 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
446 {
447 	return target_put_sess_cmd(se_cmd);
448 }
449 
450 static void
451 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
452 {
453 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
454 	struct virtio_scsi_event *event = &evt->event;
455 	struct virtio_scsi_event __user *eventp;
456 	unsigned out, in;
457 	int head, ret;
458 
459 	if (!vhost_vq_get_backend(vq)) {
460 		vs->vs_events_missed = true;
461 		return;
462 	}
463 
464 again:
465 	vhost_disable_notify(&vs->dev, vq);
466 	head = vhost_get_vq_desc(vq, vq->iov,
467 			ARRAY_SIZE(vq->iov), &out, &in,
468 			NULL, NULL);
469 	if (head < 0) {
470 		vs->vs_events_missed = true;
471 		return;
472 	}
473 	if (head == vq->num) {
474 		if (vhost_enable_notify(&vs->dev, vq))
475 			goto again;
476 		vs->vs_events_missed = true;
477 		return;
478 	}
479 
480 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
481 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
482 				vq->iov[out].iov_len);
483 		vs->vs_events_missed = true;
484 		return;
485 	}
486 
487 	if (vs->vs_events_missed) {
488 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
489 		vs->vs_events_missed = false;
490 	}
491 
492 	eventp = vq->iov[out].iov_base;
493 	ret = __copy_to_user(eventp, event, sizeof(*event));
494 	if (!ret)
495 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
496 	else
497 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
498 }
499 
500 static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
501 {
502 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
503 	struct vhost_scsi_evt *evt, *t;
504 	struct llist_node *llnode;
505 
506 	mutex_lock(&vq->mutex);
507 	llnode = llist_del_all(&vs->vs_event_list);
508 	llist_for_each_entry_safe(evt, t, llnode, list) {
509 		if (!drop)
510 			vhost_scsi_do_evt_work(vs, evt);
511 		vhost_scsi_free_evt(vs, evt);
512 	}
513 	mutex_unlock(&vq->mutex);
514 }
515 
516 static void vhost_scsi_evt_work(struct vhost_work *work)
517 {
518 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
519 					     vs_event_work);
520 	vhost_scsi_complete_events(vs, false);
521 }
522 
523 static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
524 {
525 	struct iov_iter *iter = &cmd->saved_iter;
526 	struct scatterlist *sg = cmd->tvc_sgl;
527 	struct page *page;
528 	size_t len;
529 	int i;
530 
531 	for (i = 0; i < cmd->tvc_sgl_count; i++) {
532 		page = sg_page(&sg[i]);
533 		len = sg[i].length;
534 
535 		if (copy_page_to_iter(page, 0, len, iter) != len) {
536 			pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
537 			       len);
538 			return -1;
539 		}
540 	}
541 
542 	return 0;
543 }
544 
545 /* Fill in status and signal that we are done processing this command
546  *
547  * This is scheduled in the vhost work queue so we are called with the owner
548  * process mm and can access the vring.
549  */
550 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
551 {
552 	struct vhost_scsi_virtqueue *svq = container_of(work,
553 				struct vhost_scsi_virtqueue, completion_work);
554 	struct virtio_scsi_cmd_resp v_rsp;
555 	struct vhost_scsi_cmd *cmd, *t;
556 	struct llist_node *llnode;
557 	struct se_cmd *se_cmd;
558 	struct iov_iter iov_iter;
559 	bool signal = false;
560 	int ret;
561 
562 	llnode = llist_del_all(&svq->completion_list);
563 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
564 		se_cmd = &cmd->tvc_se_cmd;
565 
566 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
567 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
568 		memset(&v_rsp, 0, sizeof(v_rsp));
569 
570 		if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
571 			v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
572 		} else {
573 			v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
574 						     se_cmd->residual_count);
575 			/* TODO is status_qualifier field needed? */
576 			v_rsp.status = se_cmd->scsi_status;
577 			v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
578 							 se_cmd->scsi_sense_length);
579 			memcpy(v_rsp.sense, cmd->tvc_sense_buf,
580 			       se_cmd->scsi_sense_length);
581 		}
582 
583 		iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
584 			      cmd->tvc_in_iovs, sizeof(v_rsp));
585 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
586 		if (likely(ret == sizeof(v_rsp))) {
587 			signal = true;
588 
589 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
590 		} else
591 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
592 
593 		vhost_scsi_release_cmd_res(se_cmd);
594 	}
595 
596 	if (signal)
597 		vhost_signal(&svq->vs->dev, &svq->vq);
598 }
599 
600 static struct vhost_scsi_cmd *
601 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
602 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
603 		   u32 exp_data_len, int data_direction)
604 {
605 	struct vhost_scsi_virtqueue *svq = container_of(vq,
606 					struct vhost_scsi_virtqueue, vq);
607 	struct vhost_scsi_cmd *cmd;
608 	struct vhost_scsi_nexus *tv_nexus;
609 	struct scatterlist *sg, *prot_sg;
610 	struct iovec *tvc_resp_iov;
611 	struct page **pages;
612 	int tag;
613 
614 	tv_nexus = tpg->tpg_nexus;
615 	if (!tv_nexus) {
616 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
617 		return ERR_PTR(-EIO);
618 	}
619 
620 	tag = sbitmap_get(&svq->scsi_tags);
621 	if (tag < 0) {
622 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
623 		return ERR_PTR(-ENOMEM);
624 	}
625 
626 	cmd = &svq->scsi_cmds[tag];
627 	sg = cmd->tvc_sgl;
628 	prot_sg = cmd->tvc_prot_sgl;
629 	pages = cmd->tvc_upages;
630 	tvc_resp_iov = cmd->tvc_resp_iov;
631 	memset(cmd, 0, sizeof(*cmd));
632 	cmd->tvc_sgl = sg;
633 	cmd->tvc_prot_sgl = prot_sg;
634 	cmd->tvc_upages = pages;
635 	cmd->tvc_se_cmd.map_tag = tag;
636 	cmd->tvc_tag = scsi_tag;
637 	cmd->tvc_lun = lun;
638 	cmd->tvc_task_attr = task_attr;
639 	cmd->tvc_exp_data_len = exp_data_len;
640 	cmd->tvc_data_direction = data_direction;
641 	cmd->tvc_nexus = tv_nexus;
642 	cmd->inflight = vhost_scsi_get_inflight(vq);
643 	cmd->tvc_resp_iov = tvc_resp_iov;
644 
645 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
646 
647 	return cmd;
648 }
649 
650 /*
651  * Map a user memory range into a scatterlist
652  *
653  * Returns the number of scatterlist entries used or -errno on error.
654  */
655 static int
656 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
657 		      struct iov_iter *iter,
658 		      struct scatterlist *sgl,
659 		      bool is_prot)
660 {
661 	struct page **pages = cmd->tvc_upages;
662 	struct scatterlist *sg = sgl;
663 	ssize_t bytes, mapped_bytes;
664 	size_t offset, mapped_offset;
665 	unsigned int npages = 0;
666 
667 	bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
668 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
669 	/* No pages were pinned */
670 	if (bytes <= 0)
671 		return bytes < 0 ? bytes : -EFAULT;
672 
673 	mapped_bytes = bytes;
674 	mapped_offset = offset;
675 
676 	while (bytes) {
677 		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
678 		/*
679 		 * The block layer requires bios/requests to be a multiple of
680 		 * 512 bytes, but Windows can send us vecs that are misaligned.
681 		 * This can result in bios and later requests with misaligned
682 		 * sizes if we have to break up a cmd/scatterlist into multiple
683 		 * bios.
684 		 *
685 		 * We currently only break up a command into multiple bios if
686 		 * we hit the vec/seg limit, so check if our sgl_count is
687 		 * greater than the max and if a vec in the cmd has a
688 		 * misaligned offset/size.
689 		 */
690 		if (!is_prot &&
691 		    (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
692 		    cmd->tvc_sgl_count > BIO_MAX_VECS) {
693 			WARN_ONCE(true,
694 				  "vhost-scsi detected misaligned IO. Performance may be degraded.");
695 			goto revert_iter_get_pages;
696 		}
697 
698 		sg_set_page(sg++, pages[npages++], n, offset);
699 		bytes -= n;
700 		offset = 0;
701 	}
702 
703 	return npages;
704 
705 revert_iter_get_pages:
706 	iov_iter_revert(iter, mapped_bytes);
707 
708 	npages = 0;
709 	while (mapped_bytes) {
710 		unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
711 				       mapped_bytes);
712 
713 		put_page(pages[npages++]);
714 
715 		mapped_bytes -= n;
716 		mapped_offset = 0;
717 	}
718 
719 	return -EINVAL;
720 }
721 
722 static int
723 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
724 {
725 	int sgl_count = 0;
726 
727 	if (!iter || !iter_iov(iter)) {
728 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
729 		       " present\n", __func__, bytes);
730 		return -EINVAL;
731 	}
732 
733 	sgl_count = iov_iter_npages(iter, 0xffff);
734 	if (sgl_count > max_sgls) {
735 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
736 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
737 		return -EINVAL;
738 	}
739 	return sgl_count;
740 }
741 
742 static int
743 vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
744 			   struct scatterlist *sg, int sg_count)
745 {
746 	size_t len = iov_iter_count(iter);
747 	unsigned int nbytes = 0;
748 	struct page *page;
749 	int i;
750 
751 	if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
752 		cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
753 						GFP_KERNEL);
754 		if (!cmd->saved_iter_addr)
755 			return -ENOMEM;
756 	}
757 
758 	for (i = 0; i < sg_count; i++) {
759 		page = alloc_page(GFP_KERNEL);
760 		if (!page) {
761 			i--;
762 			goto err;
763 		}
764 
765 		nbytes = min_t(unsigned int, PAGE_SIZE, len);
766 		sg_set_page(&sg[i], page, nbytes, 0);
767 
768 		if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
769 		    copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
770 			goto err;
771 
772 		len -= nbytes;
773 	}
774 
775 	cmd->copied_iov = 1;
776 	return 0;
777 
778 err:
779 	pr_err("Could not read %u bytes while handling misaligned cmd\n",
780 	       nbytes);
781 
782 	for (; i >= 0; i--)
783 		__free_page(sg_page(&sg[i]));
784 	kfree(cmd->saved_iter_addr);
785 	return -ENOMEM;
786 }
787 
788 static int
789 vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
790 			  struct scatterlist *sg, int sg_count, bool is_prot)
791 {
792 	struct scatterlist *p = sg;
793 	size_t revert_bytes;
794 	int ret;
795 
796 	while (iov_iter_count(iter)) {
797 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
798 		if (ret < 0) {
799 			revert_bytes = 0;
800 
801 			while (p < sg) {
802 				struct page *page = sg_page(p);
803 
804 				if (page) {
805 					put_page(page);
806 					revert_bytes += p->length;
807 				}
808 				p++;
809 			}
810 
811 			iov_iter_revert(iter, revert_bytes);
812 			return ret;
813 		}
814 		sg += ret;
815 	}
816 
817 	return 0;
818 }
819 
820 static int
821 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
822 		 size_t prot_bytes, struct iov_iter *prot_iter,
823 		 size_t data_bytes, struct iov_iter *data_iter)
824 {
825 	int sgl_count, ret;
826 
827 	if (prot_bytes) {
828 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
829 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
830 		if (sgl_count < 0)
831 			return sgl_count;
832 
833 		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
834 		cmd->tvc_prot_sgl_count = sgl_count;
835 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
836 			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
837 
838 		ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
839 						cmd->tvc_prot_sgl,
840 						cmd->tvc_prot_sgl_count, true);
841 		if (ret < 0) {
842 			cmd->tvc_prot_sgl_count = 0;
843 			return ret;
844 		}
845 	}
846 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
847 					 VHOST_SCSI_PREALLOC_SGLS);
848 	if (sgl_count < 0)
849 		return sgl_count;
850 
851 	sg_init_table(cmd->tvc_sgl, sgl_count);
852 	cmd->tvc_sgl_count = sgl_count;
853 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
854 		  cmd->tvc_sgl, cmd->tvc_sgl_count);
855 
856 	ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
857 					cmd->tvc_sgl_count, false);
858 	if (ret == -EINVAL) {
859 		sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
860 		ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
861 						 cmd->tvc_sgl_count);
862 	}
863 
864 	if (ret < 0) {
865 		cmd->tvc_sgl_count = 0;
866 		return ret;
867 	}
868 	return 0;
869 }
870 
871 static int vhost_scsi_to_tcm_attr(int attr)
872 {
873 	switch (attr) {
874 	case VIRTIO_SCSI_S_SIMPLE:
875 		return TCM_SIMPLE_TAG;
876 	case VIRTIO_SCSI_S_ORDERED:
877 		return TCM_ORDERED_TAG;
878 	case VIRTIO_SCSI_S_HEAD:
879 		return TCM_HEAD_TAG;
880 	case VIRTIO_SCSI_S_ACA:
881 		return TCM_ACA_TAG;
882 	default:
883 		break;
884 	}
885 	return TCM_SIMPLE_TAG;
886 }
887 
888 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
889 {
890 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
891 	struct vhost_scsi_nexus *tv_nexus;
892 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
893 
894 	/* FIXME: BIDI operation */
895 	if (cmd->tvc_sgl_count) {
896 		sg_ptr = cmd->tvc_sgl;
897 
898 		if (cmd->tvc_prot_sgl_count)
899 			sg_prot_ptr = cmd->tvc_prot_sgl;
900 		else
901 			se_cmd->prot_pto = true;
902 	} else {
903 		sg_ptr = NULL;
904 	}
905 	tv_nexus = cmd->tvc_nexus;
906 
907 	se_cmd->tag = 0;
908 	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
909 			cmd->tvc_lun, cmd->tvc_exp_data_len,
910 			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
911 			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
912 
913 	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
914 			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
915 			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
916 		return;
917 
918 	target_queue_submission(se_cmd);
919 }
920 
921 static void
922 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
923 			   struct vhost_virtqueue *vq,
924 			   int head, unsigned out)
925 {
926 	struct virtio_scsi_cmd_resp __user *resp;
927 	struct virtio_scsi_cmd_resp rsp;
928 	int ret;
929 
930 	memset(&rsp, 0, sizeof(rsp));
931 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
932 	resp = vq->iov[out].iov_base;
933 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
934 	if (!ret)
935 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
936 	else
937 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
938 }
939 
940 static int
941 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
942 		    struct vhost_scsi_ctx *vc)
943 {
944 	int ret = -ENXIO;
945 
946 	vc->head = vhost_get_vq_desc(vq, vq->iov,
947 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
948 				     NULL, NULL);
949 
950 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
951 		 vc->head, vc->out, vc->in);
952 
953 	/* On error, stop handling until the next kick. */
954 	if (unlikely(vc->head < 0))
955 		goto done;
956 
957 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
958 	if (vc->head == vq->num) {
959 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
960 			vhost_disable_notify(&vs->dev, vq);
961 			ret = -EAGAIN;
962 		}
963 		goto done;
964 	}
965 
966 	/*
967 	 * Get the size of request and response buffers.
968 	 * FIXME: Not correct for BIDI operation
969 	 */
970 	vc->out_size = iov_length(vq->iov, vc->out);
971 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
972 
973 	/*
974 	 * Copy over the virtio-scsi request header, which for a
975 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
976 	 * single iovec may contain both the header + outgoing
977 	 * WRITE payloads.
978 	 *
979 	 * copy_from_iter() will advance out_iter, so that it will
980 	 * point at the start of the outgoing WRITE payload, if
981 	 * DMA_TO_DEVICE is set.
982 	 */
983 	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
984 	ret = 0;
985 
986 done:
987 	return ret;
988 }
989 
990 static int
991 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
992 {
993 	if (unlikely(vc->in_size < vc->rsp_size)) {
994 		vq_err(vq,
995 		       "Response buf too small, need min %zu bytes got %zu",
996 		       vc->rsp_size, vc->in_size);
997 		return -EINVAL;
998 	} else if (unlikely(vc->out_size < vc->req_size)) {
999 		vq_err(vq,
1000 		       "Request buf too small, need min %zu bytes got %zu",
1001 		       vc->req_size, vc->out_size);
1002 		return -EIO;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static int
1009 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1010 		   struct vhost_scsi_tpg **tpgp)
1011 {
1012 	int ret = -EIO;
1013 
1014 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1015 					  &vc->out_iter))) {
1016 		vq_err(vq, "Faulted on copy_from_iter_full\n");
1017 	} else if (unlikely(*vc->lunp != 1)) {
1018 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
1019 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1020 	} else {
1021 		struct vhost_scsi_tpg **vs_tpg, *tpg;
1022 
1023 		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
1024 
1025 		tpg = READ_ONCE(vs_tpg[*vc->target]);
1026 		if (unlikely(!tpg)) {
1027 			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1028 		} else {
1029 			if (tpgp)
1030 				*tpgp = tpg;
1031 			ret = 0;
1032 		}
1033 	}
1034 
1035 	return ret;
1036 }
1037 
1038 static u16 vhost_buf_to_lun(u8 *lun_buf)
1039 {
1040 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1041 }
1042 
1043 static void
1044 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1045 {
1046 	struct vhost_scsi_tpg **vs_tpg, *tpg;
1047 	struct virtio_scsi_cmd_req v_req;
1048 	struct virtio_scsi_cmd_req_pi v_req_pi;
1049 	struct vhost_scsi_ctx vc;
1050 	struct vhost_scsi_cmd *cmd;
1051 	struct iov_iter in_iter, prot_iter, data_iter;
1052 	u64 tag;
1053 	u32 exp_data_len, data_direction;
1054 	int ret, prot_bytes, i, c = 0;
1055 	u16 lun;
1056 	u8 task_attr;
1057 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1058 	void *cdb;
1059 
1060 	mutex_lock(&vq->mutex);
1061 	/*
1062 	 * We can handle the vq only after the endpoint is setup by calling the
1063 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1064 	 */
1065 	vs_tpg = vhost_vq_get_backend(vq);
1066 	if (!vs_tpg)
1067 		goto out;
1068 
1069 	memset(&vc, 0, sizeof(vc));
1070 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1071 
1072 	vhost_disable_notify(&vs->dev, vq);
1073 
1074 	do {
1075 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1076 		if (ret)
1077 			goto err;
1078 
1079 		/*
1080 		 * Setup pointers and values based upon different virtio-scsi
1081 		 * request header if T10_PI is enabled in KVM guest.
1082 		 */
1083 		if (t10_pi) {
1084 			vc.req = &v_req_pi;
1085 			vc.req_size = sizeof(v_req_pi);
1086 			vc.lunp = &v_req_pi.lun[0];
1087 			vc.target = &v_req_pi.lun[1];
1088 		} else {
1089 			vc.req = &v_req;
1090 			vc.req_size = sizeof(v_req);
1091 			vc.lunp = &v_req.lun[0];
1092 			vc.target = &v_req.lun[1];
1093 		}
1094 
1095 		/*
1096 		 * Validate the size of request and response buffers.
1097 		 * Check for a sane response buffer so we can report
1098 		 * early errors back to the guest.
1099 		 */
1100 		ret = vhost_scsi_chk_size(vq, &vc);
1101 		if (ret)
1102 			goto err;
1103 
1104 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1105 		if (ret)
1106 			goto err;
1107 
1108 		ret = -EIO;	/* bad target on any error from here on */
1109 
1110 		/*
1111 		 * Determine data_direction by calculating the total outgoing
1112 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1113 		 * response headers respectively.
1114 		 *
1115 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1116 		 * to the right place.
1117 		 *
1118 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1119 		 * of the virtio-scsi response header in either the same
1120 		 * or immediately following iovec.
1121 		 *
1122 		 * Any associated T10_PI bytes for the outgoing / incoming
1123 		 * payloads are included in calculation of exp_data_len here.
1124 		 */
1125 		prot_bytes = 0;
1126 
1127 		if (vc.out_size > vc.req_size) {
1128 			data_direction = DMA_TO_DEVICE;
1129 			exp_data_len = vc.out_size - vc.req_size;
1130 			data_iter = vc.out_iter;
1131 		} else if (vc.in_size > vc.rsp_size) {
1132 			data_direction = DMA_FROM_DEVICE;
1133 			exp_data_len = vc.in_size - vc.rsp_size;
1134 
1135 			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1136 				      vc.rsp_size + exp_data_len);
1137 			iov_iter_advance(&in_iter, vc.rsp_size);
1138 			data_iter = in_iter;
1139 		} else {
1140 			data_direction = DMA_NONE;
1141 			exp_data_len = 0;
1142 		}
1143 		/*
1144 		 * If T10_PI header + payload is present, setup prot_iter values
1145 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1146 		 * host scatterlists via get_user_pages_fast().
1147 		 */
1148 		if (t10_pi) {
1149 			if (v_req_pi.pi_bytesout) {
1150 				if (data_direction != DMA_TO_DEVICE) {
1151 					vq_err(vq, "Received non zero pi_bytesout,"
1152 						" but wrong data_direction\n");
1153 					goto err;
1154 				}
1155 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1156 			} else if (v_req_pi.pi_bytesin) {
1157 				if (data_direction != DMA_FROM_DEVICE) {
1158 					vq_err(vq, "Received non zero pi_bytesin,"
1159 						" but wrong data_direction\n");
1160 					goto err;
1161 				}
1162 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1163 			}
1164 			/*
1165 			 * Set prot_iter to data_iter and truncate it to
1166 			 * prot_bytes, and advance data_iter past any
1167 			 * preceeding prot_bytes that may be present.
1168 			 *
1169 			 * Also fix up the exp_data_len to reflect only the
1170 			 * actual data payload length.
1171 			 */
1172 			if (prot_bytes) {
1173 				exp_data_len -= prot_bytes;
1174 				prot_iter = data_iter;
1175 				iov_iter_truncate(&prot_iter, prot_bytes);
1176 				iov_iter_advance(&data_iter, prot_bytes);
1177 			}
1178 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1179 			task_attr = v_req_pi.task_attr;
1180 			cdb = &v_req_pi.cdb[0];
1181 			lun = vhost_buf_to_lun(v_req_pi.lun);
1182 		} else {
1183 			tag = vhost64_to_cpu(vq, v_req.tag);
1184 			task_attr = v_req.task_attr;
1185 			cdb = &v_req.cdb[0];
1186 			lun = vhost_buf_to_lun(v_req.lun);
1187 		}
1188 		/*
1189 		 * Check that the received CDB size does not exceeded our
1190 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1191 		 * cmd descriptor for the new virtio-scsi tag.
1192 		 *
1193 		 * TODO what if cdb was too small for varlen cdb header?
1194 		 */
1195 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1196 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1197 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1198 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1199 				goto err;
1200 		}
1201 		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1202 					 exp_data_len + prot_bytes,
1203 					 data_direction);
1204 		if (IS_ERR(cmd)) {
1205 			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1206 			       PTR_ERR(cmd));
1207 			goto err;
1208 		}
1209 		cmd->tvc_vhost = vs;
1210 		cmd->tvc_vq = vq;
1211 		for (i = 0; i < vc.in ; i++)
1212 			cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
1213 		cmd->tvc_in_iovs = vc.in;
1214 
1215 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1216 			 cmd->tvc_cdb[0], cmd->tvc_lun);
1217 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1218 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1219 
1220 		if (data_direction != DMA_NONE) {
1221 			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1222 						      &prot_iter, exp_data_len,
1223 						      &data_iter))) {
1224 				vq_err(vq, "Failed to map iov to sgl\n");
1225 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1226 				goto err;
1227 			}
1228 		}
1229 		/*
1230 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1231 		 * complete the virtio-scsi request in TCM callback context via
1232 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1233 		 */
1234 		cmd->tvc_vq_desc = vc.head;
1235 		vhost_scsi_target_queue_cmd(cmd);
1236 		ret = 0;
1237 err:
1238 		/*
1239 		 * ENXIO:  No more requests, or read error, wait for next kick
1240 		 * EINVAL: Invalid response buffer, drop the request
1241 		 * EIO:    Respond with bad target
1242 		 * EAGAIN: Pending request
1243 		 */
1244 		if (ret == -ENXIO)
1245 			break;
1246 		else if (ret == -EIO)
1247 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1248 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1249 out:
1250 	mutex_unlock(&vq->mutex);
1251 }
1252 
1253 static void
1254 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1255 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1256 			 int tmf_resp_code)
1257 {
1258 	struct virtio_scsi_ctrl_tmf_resp rsp;
1259 	struct iov_iter iov_iter;
1260 	int ret;
1261 
1262 	pr_debug("%s\n", __func__);
1263 	memset(&rsp, 0, sizeof(rsp));
1264 	rsp.response = tmf_resp_code;
1265 
1266 	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1267 
1268 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1269 	if (likely(ret == sizeof(rsp)))
1270 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1271 	else
1272 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1273 }
1274 
1275 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1276 {
1277 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1278 						  vwork);
1279 	struct vhost_virtqueue *ctl_vq, *vq;
1280 	int resp_code, i;
1281 
1282 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) {
1283 		/*
1284 		 * Flush IO vqs that don't share a worker with the ctl to make
1285 		 * sure they have sent their responses before us.
1286 		 */
1287 		ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq;
1288 		for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) {
1289 			vq = &tmf->vhost->vqs[i].vq;
1290 
1291 			if (vhost_vq_is_setup(vq) &&
1292 			    vq->worker != ctl_vq->worker)
1293 				vhost_vq_flush(vq);
1294 		}
1295 
1296 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1297 	} else {
1298 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1299 	}
1300 
1301 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1302 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1303 	vhost_scsi_release_tmf_res(tmf);
1304 }
1305 
1306 static void
1307 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1308 		      struct vhost_virtqueue *vq,
1309 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1310 		      struct vhost_scsi_ctx *vc)
1311 {
1312 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1313 					struct vhost_scsi_virtqueue, vq);
1314 	struct vhost_scsi_tmf *tmf;
1315 
1316 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1317 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1318 		goto send_reject;
1319 
1320 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1321 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1322 		goto send_reject;
1323 	}
1324 
1325 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1326 	if (!tmf)
1327 		goto send_reject;
1328 
1329 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1330 	tmf->vhost = vs;
1331 	tmf->svq = svq;
1332 	tmf->resp_iov = vq->iov[vc->out];
1333 	tmf->vq_desc = vc->head;
1334 	tmf->in_iovs = vc->in;
1335 	tmf->inflight = vhost_scsi_get_inflight(vq);
1336 
1337 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1338 			      vhost_buf_to_lun(vtmf->lun), NULL,
1339 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1340 			      TARGET_SCF_ACK_KREF) < 0) {
1341 		vhost_scsi_release_tmf_res(tmf);
1342 		goto send_reject;
1343 	}
1344 
1345 	return;
1346 
1347 send_reject:
1348 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1349 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1350 }
1351 
1352 static void
1353 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1354 			struct vhost_virtqueue *vq,
1355 			struct vhost_scsi_ctx *vc)
1356 {
1357 	struct virtio_scsi_ctrl_an_resp rsp;
1358 	struct iov_iter iov_iter;
1359 	int ret;
1360 
1361 	pr_debug("%s\n", __func__);
1362 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1363 	rsp.response = VIRTIO_SCSI_S_OK;
1364 
1365 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1366 
1367 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1368 	if (likely(ret == sizeof(rsp)))
1369 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1370 	else
1371 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1372 }
1373 
1374 static void
1375 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1376 {
1377 	struct vhost_scsi_tpg *tpg;
1378 	union {
1379 		__virtio32 type;
1380 		struct virtio_scsi_ctrl_an_req an;
1381 		struct virtio_scsi_ctrl_tmf_req tmf;
1382 	} v_req;
1383 	struct vhost_scsi_ctx vc;
1384 	size_t typ_size;
1385 	int ret, c = 0;
1386 
1387 	mutex_lock(&vq->mutex);
1388 	/*
1389 	 * We can handle the vq only after the endpoint is setup by calling the
1390 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1391 	 */
1392 	if (!vhost_vq_get_backend(vq))
1393 		goto out;
1394 
1395 	memset(&vc, 0, sizeof(vc));
1396 
1397 	vhost_disable_notify(&vs->dev, vq);
1398 
1399 	do {
1400 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1401 		if (ret)
1402 			goto err;
1403 
1404 		/*
1405 		 * Get the request type first in order to setup
1406 		 * other parameters dependent on the type.
1407 		 */
1408 		vc.req = &v_req.type;
1409 		typ_size = sizeof(v_req.type);
1410 
1411 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1412 						  &vc.out_iter))) {
1413 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1414 			/*
1415 			 * The size of the response buffer depends on the
1416 			 * request type and must be validated against it.
1417 			 * Since the request type is not known, don't send
1418 			 * a response.
1419 			 */
1420 			continue;
1421 		}
1422 
1423 		switch (vhost32_to_cpu(vq, v_req.type)) {
1424 		case VIRTIO_SCSI_T_TMF:
1425 			vc.req = &v_req.tmf;
1426 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1427 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1428 			vc.lunp = &v_req.tmf.lun[0];
1429 			vc.target = &v_req.tmf.lun[1];
1430 			break;
1431 		case VIRTIO_SCSI_T_AN_QUERY:
1432 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1433 			vc.req = &v_req.an;
1434 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1435 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1436 			vc.lunp = &v_req.an.lun[0];
1437 			vc.target = NULL;
1438 			break;
1439 		default:
1440 			vq_err(vq, "Unknown control request %d", v_req.type);
1441 			continue;
1442 		}
1443 
1444 		/*
1445 		 * Validate the size of request and response buffers.
1446 		 * Check for a sane response buffer so we can report
1447 		 * early errors back to the guest.
1448 		 */
1449 		ret = vhost_scsi_chk_size(vq, &vc);
1450 		if (ret)
1451 			goto err;
1452 
1453 		/*
1454 		 * Get the rest of the request now that its size is known.
1455 		 */
1456 		vc.req += typ_size;
1457 		vc.req_size -= typ_size;
1458 
1459 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1460 		if (ret)
1461 			goto err;
1462 
1463 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1464 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1465 		else
1466 			vhost_scsi_send_an_resp(vs, vq, &vc);
1467 err:
1468 		/*
1469 		 * ENXIO:  No more requests, or read error, wait for next kick
1470 		 * EINVAL: Invalid response buffer, drop the request
1471 		 * EIO:    Respond with bad target
1472 		 * EAGAIN: Pending request
1473 		 */
1474 		if (ret == -ENXIO)
1475 			break;
1476 		else if (ret == -EIO)
1477 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1478 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1479 out:
1480 	mutex_unlock(&vq->mutex);
1481 }
1482 
1483 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1484 {
1485 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1486 						poll.work);
1487 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1488 
1489 	pr_debug("%s: The handling func for control queue.\n", __func__);
1490 	vhost_scsi_ctl_handle_vq(vs, vq);
1491 }
1492 
1493 static void
1494 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1495 		    struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1496 		    u32 event, u32 reason)
1497 {
1498 	struct vhost_scsi_evt *evt;
1499 
1500 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1501 	if (!evt)
1502 		return;
1503 
1504 	if (tpg && lun) {
1505 		/* TODO: share lun setup code with virtio-scsi.ko */
1506 		/*
1507 		 * Note: evt->event is zeroed when we allocate it and
1508 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1509 		 */
1510 		evt->event.lun[0] = 0x01;
1511 		evt->event.lun[1] = tpg->tport_tpgt;
1512 		if (lun->unpacked_lun >= 256)
1513 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1514 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1515 	}
1516 
1517 	llist_add(&evt->list, &vs->vs_event_list);
1518 	if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
1519 		vhost_scsi_complete_events(vs, true);
1520 }
1521 
1522 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1523 {
1524 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1525 						poll.work);
1526 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1527 
1528 	mutex_lock(&vq->mutex);
1529 	if (!vhost_vq_get_backend(vq))
1530 		goto out;
1531 
1532 	if (vs->vs_events_missed)
1533 		vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1534 				    0);
1535 out:
1536 	mutex_unlock(&vq->mutex);
1537 }
1538 
1539 static void vhost_scsi_handle_kick(struct vhost_work *work)
1540 {
1541 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1542 						poll.work);
1543 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1544 
1545 	vhost_scsi_handle_vq(vs, vq);
1546 }
1547 
1548 /* Callers must hold dev mutex */
1549 static void vhost_scsi_flush(struct vhost_scsi *vs)
1550 {
1551 	int i;
1552 
1553 	/* Init new inflight and remember the old inflight */
1554 	vhost_scsi_init_inflight(vs, vs->old_inflight);
1555 
1556 	/*
1557 	 * The inflight->kref was initialized to 1. We decrement it here to
1558 	 * indicate the start of the flush operation so that it will reach 0
1559 	 * when all the reqs are finished.
1560 	 */
1561 	for (i = 0; i < vs->dev.nvqs; i++)
1562 		kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1563 
1564 	/* Flush both the vhost poll and vhost work */
1565 	vhost_dev_flush(&vs->dev);
1566 
1567 	/* Wait for all reqs issued before the flush to be finished */
1568 	for (i = 0; i < vs->dev.nvqs; i++)
1569 		wait_for_completion(&vs->old_inflight[i]->comp);
1570 }
1571 
1572 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1573 {
1574 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1575 					struct vhost_scsi_virtqueue, vq);
1576 	struct vhost_scsi_cmd *tv_cmd;
1577 	unsigned int i;
1578 
1579 	if (!svq->scsi_cmds)
1580 		return;
1581 
1582 	for (i = 0; i < svq->max_cmds; i++) {
1583 		tv_cmd = &svq->scsi_cmds[i];
1584 
1585 		kfree(tv_cmd->tvc_sgl);
1586 		kfree(tv_cmd->tvc_prot_sgl);
1587 		kfree(tv_cmd->tvc_upages);
1588 		kfree(tv_cmd->tvc_resp_iov);
1589 	}
1590 
1591 	sbitmap_free(&svq->scsi_tags);
1592 	kfree(svq->scsi_cmds);
1593 	svq->scsi_cmds = NULL;
1594 }
1595 
1596 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1597 {
1598 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1599 					struct vhost_scsi_virtqueue, vq);
1600 	struct vhost_scsi_cmd *tv_cmd;
1601 	unsigned int i;
1602 
1603 	if (svq->scsi_cmds)
1604 		return 0;
1605 
1606 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1607 			      NUMA_NO_NODE, false, true))
1608 		return -ENOMEM;
1609 	svq->max_cmds = max_cmds;
1610 
1611 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1612 	if (!svq->scsi_cmds) {
1613 		sbitmap_free(&svq->scsi_tags);
1614 		return -ENOMEM;
1615 	}
1616 
1617 	for (i = 0; i < max_cmds; i++) {
1618 		tv_cmd = &svq->scsi_cmds[i];
1619 
1620 		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1621 					  sizeof(struct scatterlist),
1622 					  GFP_KERNEL);
1623 		if (!tv_cmd->tvc_sgl) {
1624 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1625 			goto out;
1626 		}
1627 
1628 		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1629 					     sizeof(struct page *),
1630 					     GFP_KERNEL);
1631 		if (!tv_cmd->tvc_upages) {
1632 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1633 			goto out;
1634 		}
1635 
1636 		tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1637 					       sizeof(struct iovec),
1638 					       GFP_KERNEL);
1639 		if (!tv_cmd->tvc_resp_iov) {
1640 			pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1641 			goto out;
1642 		}
1643 
1644 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1645 					       sizeof(struct scatterlist),
1646 					       GFP_KERNEL);
1647 		if (!tv_cmd->tvc_prot_sgl) {
1648 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1649 			goto out;
1650 		}
1651 	}
1652 	return 0;
1653 out:
1654 	vhost_scsi_destroy_vq_cmds(vq);
1655 	return -ENOMEM;
1656 }
1657 
1658 /*
1659  * Called from vhost_scsi_ioctl() context to walk the list of available
1660  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1661  *
1662  *  The lock nesting rule is:
1663  *    vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1664  */
1665 static int
1666 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1667 			struct vhost_scsi_target *t)
1668 {
1669 	struct se_portal_group *se_tpg;
1670 	struct vhost_scsi_tport *tv_tport;
1671 	struct vhost_scsi_tpg *tpg;
1672 	struct vhost_scsi_tpg **vs_tpg;
1673 	struct vhost_virtqueue *vq;
1674 	int index, ret, i, len;
1675 	bool match = false;
1676 
1677 	mutex_lock(&vs->dev.mutex);
1678 
1679 	/* Verify that ring has been setup correctly. */
1680 	for (index = 0; index < vs->dev.nvqs; ++index) {
1681 		/* Verify that ring has been setup correctly. */
1682 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1683 			ret = -EFAULT;
1684 			goto out;
1685 		}
1686 	}
1687 
1688 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1689 	vs_tpg = kzalloc(len, GFP_KERNEL);
1690 	if (!vs_tpg) {
1691 		ret = -ENOMEM;
1692 		goto out;
1693 	}
1694 	if (vs->vs_tpg)
1695 		memcpy(vs_tpg, vs->vs_tpg, len);
1696 
1697 	mutex_lock(&vhost_scsi_mutex);
1698 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1699 		mutex_lock(&tpg->tv_tpg_mutex);
1700 		if (!tpg->tpg_nexus) {
1701 			mutex_unlock(&tpg->tv_tpg_mutex);
1702 			continue;
1703 		}
1704 		if (tpg->tv_tpg_vhost_count != 0) {
1705 			mutex_unlock(&tpg->tv_tpg_mutex);
1706 			continue;
1707 		}
1708 		tv_tport = tpg->tport;
1709 
1710 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1711 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1712 				mutex_unlock(&tpg->tv_tpg_mutex);
1713 				mutex_unlock(&vhost_scsi_mutex);
1714 				ret = -EEXIST;
1715 				goto undepend;
1716 			}
1717 			/*
1718 			 * In order to ensure individual vhost-scsi configfs
1719 			 * groups cannot be removed while in use by vhost ioctl,
1720 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1721 			 * dependency now.
1722 			 */
1723 			se_tpg = &tpg->se_tpg;
1724 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1725 			if (ret) {
1726 				pr_warn("target_depend_item() failed: %d\n", ret);
1727 				mutex_unlock(&tpg->tv_tpg_mutex);
1728 				mutex_unlock(&vhost_scsi_mutex);
1729 				goto undepend;
1730 			}
1731 			tpg->tv_tpg_vhost_count++;
1732 			tpg->vhost_scsi = vs;
1733 			vs_tpg[tpg->tport_tpgt] = tpg;
1734 			match = true;
1735 		}
1736 		mutex_unlock(&tpg->tv_tpg_mutex);
1737 	}
1738 	mutex_unlock(&vhost_scsi_mutex);
1739 
1740 	if (match) {
1741 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1742 		       sizeof(vs->vs_vhost_wwpn));
1743 
1744 		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1745 			vq = &vs->vqs[i].vq;
1746 			if (!vhost_vq_is_setup(vq))
1747 				continue;
1748 
1749 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1750 			if (ret)
1751 				goto destroy_vq_cmds;
1752 		}
1753 
1754 		for (i = 0; i < vs->dev.nvqs; i++) {
1755 			vq = &vs->vqs[i].vq;
1756 			mutex_lock(&vq->mutex);
1757 			vhost_vq_set_backend(vq, vs_tpg);
1758 			vhost_vq_init_access(vq);
1759 			mutex_unlock(&vq->mutex);
1760 		}
1761 		ret = 0;
1762 	} else {
1763 		ret = -EEXIST;
1764 	}
1765 
1766 	/*
1767 	 * Act as synchronize_rcu to make sure access to
1768 	 * old vs->vs_tpg is finished.
1769 	 */
1770 	vhost_scsi_flush(vs);
1771 	kfree(vs->vs_tpg);
1772 	vs->vs_tpg = vs_tpg;
1773 	goto out;
1774 
1775 destroy_vq_cmds:
1776 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1777 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1778 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1779 	}
1780 undepend:
1781 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1782 		tpg = vs_tpg[i];
1783 		if (tpg) {
1784 			mutex_lock(&tpg->tv_tpg_mutex);
1785 			tpg->vhost_scsi = NULL;
1786 			tpg->tv_tpg_vhost_count--;
1787 			mutex_unlock(&tpg->tv_tpg_mutex);
1788 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1789 		}
1790 	}
1791 	kfree(vs_tpg);
1792 out:
1793 	mutex_unlock(&vs->dev.mutex);
1794 	return ret;
1795 }
1796 
1797 static int
1798 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1799 			  struct vhost_scsi_target *t)
1800 {
1801 	struct se_portal_group *se_tpg;
1802 	struct vhost_scsi_tport *tv_tport;
1803 	struct vhost_scsi_tpg *tpg;
1804 	struct vhost_virtqueue *vq;
1805 	bool match = false;
1806 	int index, ret, i;
1807 	u8 target;
1808 
1809 	mutex_lock(&vs->dev.mutex);
1810 	/* Verify that ring has been setup correctly. */
1811 	for (index = 0; index < vs->dev.nvqs; ++index) {
1812 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1813 			ret = -EFAULT;
1814 			goto err_dev;
1815 		}
1816 	}
1817 
1818 	if (!vs->vs_tpg) {
1819 		ret = 0;
1820 		goto err_dev;
1821 	}
1822 
1823 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1824 		target = i;
1825 		tpg = vs->vs_tpg[target];
1826 		if (!tpg)
1827 			continue;
1828 
1829 		tv_tport = tpg->tport;
1830 		if (!tv_tport) {
1831 			ret = -ENODEV;
1832 			goto err_dev;
1833 		}
1834 
1835 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1836 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1837 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1838 				tv_tport->tport_name, tpg->tport_tpgt,
1839 				t->vhost_wwpn, t->vhost_tpgt);
1840 			ret = -EINVAL;
1841 			goto err_dev;
1842 		}
1843 		match = true;
1844 	}
1845 	if (!match)
1846 		goto free_vs_tpg;
1847 
1848 	/* Prevent new cmds from starting and accessing the tpgs/sessions */
1849 	for (i = 0; i < vs->dev.nvqs; i++) {
1850 		vq = &vs->vqs[i].vq;
1851 		mutex_lock(&vq->mutex);
1852 		vhost_vq_set_backend(vq, NULL);
1853 		mutex_unlock(&vq->mutex);
1854 	}
1855 	/* Make sure cmds are not running before tearing them down. */
1856 	vhost_scsi_flush(vs);
1857 
1858 	for (i = 0; i < vs->dev.nvqs; i++) {
1859 		vq = &vs->vqs[i].vq;
1860 		vhost_scsi_destroy_vq_cmds(vq);
1861 	}
1862 
1863 	/*
1864 	 * We can now release our hold on the tpg and sessions and userspace
1865 	 * can free them after this point.
1866 	 */
1867 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1868 		target = i;
1869 		tpg = vs->vs_tpg[target];
1870 		if (!tpg)
1871 			continue;
1872 
1873 		mutex_lock(&tpg->tv_tpg_mutex);
1874 
1875 		tpg->tv_tpg_vhost_count--;
1876 		tpg->vhost_scsi = NULL;
1877 		vs->vs_tpg[target] = NULL;
1878 
1879 		mutex_unlock(&tpg->tv_tpg_mutex);
1880 
1881 		se_tpg = &tpg->se_tpg;
1882 		target_undepend_item(&se_tpg->tpg_group.cg_item);
1883 	}
1884 
1885 free_vs_tpg:
1886 	/*
1887 	 * Act as synchronize_rcu to make sure access to
1888 	 * old vs->vs_tpg is finished.
1889 	 */
1890 	vhost_scsi_flush(vs);
1891 	kfree(vs->vs_tpg);
1892 	vs->vs_tpg = NULL;
1893 	WARN_ON(vs->vs_events_nr);
1894 	mutex_unlock(&vs->dev.mutex);
1895 	return 0;
1896 
1897 err_dev:
1898 	mutex_unlock(&vs->dev.mutex);
1899 	return ret;
1900 }
1901 
1902 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1903 {
1904 	struct vhost_virtqueue *vq;
1905 	int i;
1906 
1907 	if (features & ~VHOST_SCSI_FEATURES)
1908 		return -EOPNOTSUPP;
1909 
1910 	mutex_lock(&vs->dev.mutex);
1911 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1912 	    !vhost_log_access_ok(&vs->dev)) {
1913 		mutex_unlock(&vs->dev.mutex);
1914 		return -EFAULT;
1915 	}
1916 
1917 	for (i = 0; i < vs->dev.nvqs; i++) {
1918 		vq = &vs->vqs[i].vq;
1919 		mutex_lock(&vq->mutex);
1920 		vq->acked_features = features;
1921 		mutex_unlock(&vq->mutex);
1922 	}
1923 	mutex_unlock(&vs->dev.mutex);
1924 	return 0;
1925 }
1926 
1927 static int vhost_scsi_open(struct inode *inode, struct file *f)
1928 {
1929 	struct vhost_scsi_virtqueue *svq;
1930 	struct vhost_scsi *vs;
1931 	struct vhost_virtqueue **vqs;
1932 	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1933 
1934 	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1935 	if (!vs)
1936 		goto err_vs;
1937 
1938 	if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1939 		pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1940 		       VHOST_SCSI_MAX_IO_VQ);
1941 		nvqs = VHOST_SCSI_MAX_IO_VQ;
1942 	} else if (nvqs == 0) {
1943 		pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1944 		nvqs = 1;
1945 	}
1946 	nvqs += VHOST_SCSI_VQ_IO;
1947 
1948 	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1949 					 GFP_KERNEL | __GFP_ZERO);
1950 	if (!vs->old_inflight)
1951 		goto err_inflight;
1952 
1953 	vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1954 				GFP_KERNEL | __GFP_ZERO);
1955 	if (!vs->vqs)
1956 		goto err_vqs;
1957 
1958 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1959 	if (!vqs)
1960 		goto err_local_vqs;
1961 
1962 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1963 
1964 	vs->vs_events_nr = 0;
1965 	vs->vs_events_missed = false;
1966 
1967 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1968 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1969 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1970 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1971 	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1972 		svq = &vs->vqs[i];
1973 
1974 		vqs[i] = &svq->vq;
1975 		svq->vs = vs;
1976 		init_llist_head(&svq->completion_list);
1977 		vhost_work_init(&svq->completion_work,
1978 				vhost_scsi_complete_cmd_work);
1979 		svq->vq.handle_kick = vhost_scsi_handle_kick;
1980 	}
1981 	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1982 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1983 
1984 	vhost_scsi_init_inflight(vs, NULL);
1985 
1986 	f->private_data = vs;
1987 	return 0;
1988 
1989 err_local_vqs:
1990 	kfree(vs->vqs);
1991 err_vqs:
1992 	kfree(vs->old_inflight);
1993 err_inflight:
1994 	kvfree(vs);
1995 err_vs:
1996 	return r;
1997 }
1998 
1999 static int vhost_scsi_release(struct inode *inode, struct file *f)
2000 {
2001 	struct vhost_scsi *vs = f->private_data;
2002 	struct vhost_scsi_target t;
2003 
2004 	mutex_lock(&vs->dev.mutex);
2005 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
2006 	mutex_unlock(&vs->dev.mutex);
2007 	vhost_scsi_clear_endpoint(vs, &t);
2008 	vhost_dev_stop(&vs->dev);
2009 	vhost_dev_cleanup(&vs->dev);
2010 	kfree(vs->dev.vqs);
2011 	kfree(vs->vqs);
2012 	kfree(vs->old_inflight);
2013 	kvfree(vs);
2014 	return 0;
2015 }
2016 
2017 static long
2018 vhost_scsi_ioctl(struct file *f,
2019 		 unsigned int ioctl,
2020 		 unsigned long arg)
2021 {
2022 	struct vhost_scsi *vs = f->private_data;
2023 	struct vhost_scsi_target backend;
2024 	void __user *argp = (void __user *)arg;
2025 	u64 __user *featurep = argp;
2026 	u32 __user *eventsp = argp;
2027 	u32 events_missed;
2028 	u64 features;
2029 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
2030 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2031 
2032 	switch (ioctl) {
2033 	case VHOST_SCSI_SET_ENDPOINT:
2034 		if (copy_from_user(&backend, argp, sizeof backend))
2035 			return -EFAULT;
2036 		if (backend.reserved != 0)
2037 			return -EOPNOTSUPP;
2038 
2039 		return vhost_scsi_set_endpoint(vs, &backend);
2040 	case VHOST_SCSI_CLEAR_ENDPOINT:
2041 		if (copy_from_user(&backend, argp, sizeof backend))
2042 			return -EFAULT;
2043 		if (backend.reserved != 0)
2044 			return -EOPNOTSUPP;
2045 
2046 		return vhost_scsi_clear_endpoint(vs, &backend);
2047 	case VHOST_SCSI_GET_ABI_VERSION:
2048 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
2049 			return -EFAULT;
2050 		return 0;
2051 	case VHOST_SCSI_SET_EVENTS_MISSED:
2052 		if (get_user(events_missed, eventsp))
2053 			return -EFAULT;
2054 		mutex_lock(&vq->mutex);
2055 		vs->vs_events_missed = events_missed;
2056 		mutex_unlock(&vq->mutex);
2057 		return 0;
2058 	case VHOST_SCSI_GET_EVENTS_MISSED:
2059 		mutex_lock(&vq->mutex);
2060 		events_missed = vs->vs_events_missed;
2061 		mutex_unlock(&vq->mutex);
2062 		if (put_user(events_missed, eventsp))
2063 			return -EFAULT;
2064 		return 0;
2065 	case VHOST_GET_FEATURES:
2066 		features = VHOST_SCSI_FEATURES;
2067 		if (copy_to_user(featurep, &features, sizeof features))
2068 			return -EFAULT;
2069 		return 0;
2070 	case VHOST_SET_FEATURES:
2071 		if (copy_from_user(&features, featurep, sizeof features))
2072 			return -EFAULT;
2073 		return vhost_scsi_set_features(vs, features);
2074 	case VHOST_NEW_WORKER:
2075 	case VHOST_FREE_WORKER:
2076 	case VHOST_ATTACH_VRING_WORKER:
2077 	case VHOST_GET_VRING_WORKER:
2078 		mutex_lock(&vs->dev.mutex);
2079 		r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2080 		mutex_unlock(&vs->dev.mutex);
2081 		return r;
2082 	default:
2083 		mutex_lock(&vs->dev.mutex);
2084 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2085 		/* TODO: flush backend after dev ioctl. */
2086 		if (r == -ENOIOCTLCMD)
2087 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2088 		mutex_unlock(&vs->dev.mutex);
2089 		return r;
2090 	}
2091 }
2092 
2093 static const struct file_operations vhost_scsi_fops = {
2094 	.owner          = THIS_MODULE,
2095 	.release        = vhost_scsi_release,
2096 	.unlocked_ioctl = vhost_scsi_ioctl,
2097 	.compat_ioctl	= compat_ptr_ioctl,
2098 	.open           = vhost_scsi_open,
2099 	.llseek		= noop_llseek,
2100 };
2101 
2102 static struct miscdevice vhost_scsi_misc = {
2103 	MISC_DYNAMIC_MINOR,
2104 	"vhost-scsi",
2105 	&vhost_scsi_fops,
2106 };
2107 
2108 static int __init vhost_scsi_register(void)
2109 {
2110 	return misc_register(&vhost_scsi_misc);
2111 }
2112 
2113 static void vhost_scsi_deregister(void)
2114 {
2115 	misc_deregister(&vhost_scsi_misc);
2116 }
2117 
2118 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2119 {
2120 	switch (tport->tport_proto_id) {
2121 	case SCSI_PROTOCOL_SAS:
2122 		return "SAS";
2123 	case SCSI_PROTOCOL_FCP:
2124 		return "FCP";
2125 	case SCSI_PROTOCOL_ISCSI:
2126 		return "iSCSI";
2127 	default:
2128 		break;
2129 	}
2130 
2131 	return "Unknown";
2132 }
2133 
2134 static void
2135 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2136 		  struct se_lun *lun, bool plug)
2137 {
2138 
2139 	struct vhost_scsi *vs = tpg->vhost_scsi;
2140 	struct vhost_virtqueue *vq;
2141 	u32 reason;
2142 
2143 	if (!vs)
2144 		return;
2145 
2146 	if (plug)
2147 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2148 	else
2149 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2150 
2151 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2152 	mutex_lock(&vq->mutex);
2153 	/*
2154 	 * We can't queue events if the backend has been cleared, because
2155 	 * we could end up queueing an event after the flush.
2156 	 */
2157 	if (!vhost_vq_get_backend(vq))
2158 		goto unlock;
2159 
2160 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2161 		vhost_scsi_send_evt(vs, vq, tpg, lun,
2162 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2163 unlock:
2164 	mutex_unlock(&vq->mutex);
2165 }
2166 
2167 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2168 {
2169 	vhost_scsi_do_plug(tpg, lun, true);
2170 }
2171 
2172 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2173 {
2174 	vhost_scsi_do_plug(tpg, lun, false);
2175 }
2176 
2177 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2178 			       struct se_lun *lun)
2179 {
2180 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2181 				struct vhost_scsi_tpg, se_tpg);
2182 
2183 	mutex_lock(&tpg->tv_tpg_mutex);
2184 	tpg->tv_tpg_port_count++;
2185 	vhost_scsi_hotplug(tpg, lun);
2186 	mutex_unlock(&tpg->tv_tpg_mutex);
2187 
2188 	return 0;
2189 }
2190 
2191 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2192 				  struct se_lun *lun)
2193 {
2194 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2195 				struct vhost_scsi_tpg, se_tpg);
2196 
2197 	mutex_lock(&tpg->tv_tpg_mutex);
2198 	tpg->tv_tpg_port_count--;
2199 	vhost_scsi_hotunplug(tpg, lun);
2200 	mutex_unlock(&tpg->tv_tpg_mutex);
2201 }
2202 
2203 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2204 		struct config_item *item, const char *page, size_t count)
2205 {
2206 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2207 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2208 				struct vhost_scsi_tpg, se_tpg);
2209 	unsigned long val;
2210 	int ret = kstrtoul(page, 0, &val);
2211 
2212 	if (ret) {
2213 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2214 		return ret;
2215 	}
2216 	if (val != 0 && val != 1 && val != 3) {
2217 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2218 		return -EINVAL;
2219 	}
2220 	tpg->tv_fabric_prot_type = val;
2221 
2222 	return count;
2223 }
2224 
2225 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2226 		struct config_item *item, char *page)
2227 {
2228 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2229 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2230 				struct vhost_scsi_tpg, se_tpg);
2231 
2232 	return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2233 }
2234 
2235 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2236 
2237 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2238 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2239 	NULL,
2240 };
2241 
2242 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2243 				const char *name)
2244 {
2245 	struct vhost_scsi_nexus *tv_nexus;
2246 
2247 	mutex_lock(&tpg->tv_tpg_mutex);
2248 	if (tpg->tpg_nexus) {
2249 		mutex_unlock(&tpg->tv_tpg_mutex);
2250 		pr_debug("tpg->tpg_nexus already exists\n");
2251 		return -EEXIST;
2252 	}
2253 
2254 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2255 	if (!tv_nexus) {
2256 		mutex_unlock(&tpg->tv_tpg_mutex);
2257 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2258 		return -ENOMEM;
2259 	}
2260 	/*
2261 	 * Since we are running in 'demo mode' this call with generate a
2262 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2263 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2264 	 */
2265 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2266 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2267 					(unsigned char *)name, tv_nexus, NULL);
2268 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2269 		mutex_unlock(&tpg->tv_tpg_mutex);
2270 		kfree(tv_nexus);
2271 		return -ENOMEM;
2272 	}
2273 	tpg->tpg_nexus = tv_nexus;
2274 
2275 	mutex_unlock(&tpg->tv_tpg_mutex);
2276 	return 0;
2277 }
2278 
2279 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2280 {
2281 	struct se_session *se_sess;
2282 	struct vhost_scsi_nexus *tv_nexus;
2283 
2284 	mutex_lock(&tpg->tv_tpg_mutex);
2285 	tv_nexus = tpg->tpg_nexus;
2286 	if (!tv_nexus) {
2287 		mutex_unlock(&tpg->tv_tpg_mutex);
2288 		return -ENODEV;
2289 	}
2290 
2291 	se_sess = tv_nexus->tvn_se_sess;
2292 	if (!se_sess) {
2293 		mutex_unlock(&tpg->tv_tpg_mutex);
2294 		return -ENODEV;
2295 	}
2296 
2297 	if (tpg->tv_tpg_port_count != 0) {
2298 		mutex_unlock(&tpg->tv_tpg_mutex);
2299 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2300 			" active TPG port count: %d\n",
2301 			tpg->tv_tpg_port_count);
2302 		return -EBUSY;
2303 	}
2304 
2305 	if (tpg->tv_tpg_vhost_count != 0) {
2306 		mutex_unlock(&tpg->tv_tpg_mutex);
2307 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2308 			" active TPG vhost count: %d\n",
2309 			tpg->tv_tpg_vhost_count);
2310 		return -EBUSY;
2311 	}
2312 
2313 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2314 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2315 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2316 
2317 	/*
2318 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2319 	 */
2320 	target_remove_session(se_sess);
2321 	tpg->tpg_nexus = NULL;
2322 	mutex_unlock(&tpg->tv_tpg_mutex);
2323 
2324 	kfree(tv_nexus);
2325 	return 0;
2326 }
2327 
2328 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2329 {
2330 	struct se_portal_group *se_tpg = to_tpg(item);
2331 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2332 				struct vhost_scsi_tpg, se_tpg);
2333 	struct vhost_scsi_nexus *tv_nexus;
2334 	ssize_t ret;
2335 
2336 	mutex_lock(&tpg->tv_tpg_mutex);
2337 	tv_nexus = tpg->tpg_nexus;
2338 	if (!tv_nexus) {
2339 		mutex_unlock(&tpg->tv_tpg_mutex);
2340 		return -ENODEV;
2341 	}
2342 	ret = sysfs_emit(page, "%s\n",
2343 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2344 	mutex_unlock(&tpg->tv_tpg_mutex);
2345 
2346 	return ret;
2347 }
2348 
2349 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2350 		const char *page, size_t count)
2351 {
2352 	struct se_portal_group *se_tpg = to_tpg(item);
2353 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2354 				struct vhost_scsi_tpg, se_tpg);
2355 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2356 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2357 	int ret;
2358 	/*
2359 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2360 	 */
2361 	if (!strncmp(page, "NULL", 4)) {
2362 		ret = vhost_scsi_drop_nexus(tpg);
2363 		return (!ret) ? count : ret;
2364 	}
2365 	/*
2366 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2367 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2368 	 * vhost_scsi_make_nexus().
2369 	 */
2370 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2371 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2372 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2373 		return -EINVAL;
2374 	}
2375 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2376 
2377 	ptr = strstr(i_port, "naa.");
2378 	if (ptr) {
2379 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2380 			pr_err("Passed SAS Initiator Port %s does not"
2381 				" match target port protoid: %s\n", i_port,
2382 				vhost_scsi_dump_proto_id(tport_wwn));
2383 			return -EINVAL;
2384 		}
2385 		port_ptr = &i_port[0];
2386 		goto check_newline;
2387 	}
2388 	ptr = strstr(i_port, "fc.");
2389 	if (ptr) {
2390 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2391 			pr_err("Passed FCP Initiator Port %s does not"
2392 				" match target port protoid: %s\n", i_port,
2393 				vhost_scsi_dump_proto_id(tport_wwn));
2394 			return -EINVAL;
2395 		}
2396 		port_ptr = &i_port[3]; /* Skip over "fc." */
2397 		goto check_newline;
2398 	}
2399 	ptr = strstr(i_port, "iqn.");
2400 	if (ptr) {
2401 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2402 			pr_err("Passed iSCSI Initiator Port %s does not"
2403 				" match target port protoid: %s\n", i_port,
2404 				vhost_scsi_dump_proto_id(tport_wwn));
2405 			return -EINVAL;
2406 		}
2407 		port_ptr = &i_port[0];
2408 		goto check_newline;
2409 	}
2410 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2411 			" %s\n", i_port);
2412 	return -EINVAL;
2413 	/*
2414 	 * Clear any trailing newline for the NAA WWN
2415 	 */
2416 check_newline:
2417 	if (i_port[strlen(i_port)-1] == '\n')
2418 		i_port[strlen(i_port)-1] = '\0';
2419 
2420 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2421 	if (ret < 0)
2422 		return ret;
2423 
2424 	return count;
2425 }
2426 
2427 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2428 
2429 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2430 	&vhost_scsi_tpg_attr_nexus,
2431 	NULL,
2432 };
2433 
2434 static struct se_portal_group *
2435 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2436 {
2437 	struct vhost_scsi_tport *tport = container_of(wwn,
2438 			struct vhost_scsi_tport, tport_wwn);
2439 
2440 	struct vhost_scsi_tpg *tpg;
2441 	u16 tpgt;
2442 	int ret;
2443 
2444 	if (strstr(name, "tpgt_") != name)
2445 		return ERR_PTR(-EINVAL);
2446 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2447 		return ERR_PTR(-EINVAL);
2448 
2449 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2450 	if (!tpg) {
2451 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2452 		return ERR_PTR(-ENOMEM);
2453 	}
2454 	mutex_init(&tpg->tv_tpg_mutex);
2455 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2456 	tpg->tport = tport;
2457 	tpg->tport_tpgt = tpgt;
2458 
2459 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2460 	if (ret < 0) {
2461 		kfree(tpg);
2462 		return NULL;
2463 	}
2464 	mutex_lock(&vhost_scsi_mutex);
2465 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2466 	mutex_unlock(&vhost_scsi_mutex);
2467 
2468 	return &tpg->se_tpg;
2469 }
2470 
2471 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2472 {
2473 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2474 				struct vhost_scsi_tpg, se_tpg);
2475 
2476 	mutex_lock(&vhost_scsi_mutex);
2477 	list_del(&tpg->tv_tpg_list);
2478 	mutex_unlock(&vhost_scsi_mutex);
2479 	/*
2480 	 * Release the virtual I_T Nexus for this vhost TPG
2481 	 */
2482 	vhost_scsi_drop_nexus(tpg);
2483 	/*
2484 	 * Deregister the se_tpg from TCM..
2485 	 */
2486 	core_tpg_deregister(se_tpg);
2487 	kfree(tpg);
2488 }
2489 
2490 static struct se_wwn *
2491 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2492 		     struct config_group *group,
2493 		     const char *name)
2494 {
2495 	struct vhost_scsi_tport *tport;
2496 	char *ptr;
2497 	u64 wwpn = 0;
2498 	int off = 0;
2499 
2500 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2501 		return ERR_PTR(-EINVAL); */
2502 
2503 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2504 	if (!tport) {
2505 		pr_err("Unable to allocate struct vhost_scsi_tport");
2506 		return ERR_PTR(-ENOMEM);
2507 	}
2508 	tport->tport_wwpn = wwpn;
2509 	/*
2510 	 * Determine the emulated Protocol Identifier and Target Port Name
2511 	 * based on the incoming configfs directory name.
2512 	 */
2513 	ptr = strstr(name, "naa.");
2514 	if (ptr) {
2515 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2516 		goto check_len;
2517 	}
2518 	ptr = strstr(name, "fc.");
2519 	if (ptr) {
2520 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2521 		off = 3; /* Skip over "fc." */
2522 		goto check_len;
2523 	}
2524 	ptr = strstr(name, "iqn.");
2525 	if (ptr) {
2526 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2527 		goto check_len;
2528 	}
2529 
2530 	pr_err("Unable to locate prefix for emulated Target Port:"
2531 			" %s\n", name);
2532 	kfree(tport);
2533 	return ERR_PTR(-EINVAL);
2534 
2535 check_len:
2536 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2537 		pr_err("Emulated %s Address: %s, exceeds"
2538 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2539 			VHOST_SCSI_NAMELEN);
2540 		kfree(tport);
2541 		return ERR_PTR(-EINVAL);
2542 	}
2543 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2544 
2545 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2546 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2547 
2548 	return &tport->tport_wwn;
2549 }
2550 
2551 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2552 {
2553 	struct vhost_scsi_tport *tport = container_of(wwn,
2554 				struct vhost_scsi_tport, tport_wwn);
2555 
2556 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2557 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2558 		tport->tport_name);
2559 
2560 	kfree(tport);
2561 }
2562 
2563 static ssize_t
2564 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2565 {
2566 	return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2567 		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2568 		utsname()->machine);
2569 }
2570 
2571 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2572 
2573 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2574 	&vhost_scsi_wwn_attr_version,
2575 	NULL,
2576 };
2577 
2578 static const struct target_core_fabric_ops vhost_scsi_ops = {
2579 	.module				= THIS_MODULE,
2580 	.fabric_name			= "vhost",
2581 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2582 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2583 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2584 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2585 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2586 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2587 	.release_cmd			= vhost_scsi_release_cmd,
2588 	.check_stop_free		= vhost_scsi_check_stop_free,
2589 	.sess_get_initiator_sid		= NULL,
2590 	.write_pending			= vhost_scsi_write_pending,
2591 	.queue_data_in			= vhost_scsi_queue_data_in,
2592 	.queue_status			= vhost_scsi_queue_status,
2593 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2594 	.aborted_task			= vhost_scsi_aborted_task,
2595 	/*
2596 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2597 	 */
2598 	.fabric_make_wwn		= vhost_scsi_make_tport,
2599 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2600 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2601 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2602 	.fabric_post_link		= vhost_scsi_port_link,
2603 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2604 
2605 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2606 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2607 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2608 };
2609 
2610 static int __init vhost_scsi_init(void)
2611 {
2612 	int ret = -ENOMEM;
2613 
2614 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2615 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2616 		utsname()->machine);
2617 
2618 	ret = vhost_scsi_register();
2619 	if (ret < 0)
2620 		goto out;
2621 
2622 	ret = target_register_template(&vhost_scsi_ops);
2623 	if (ret < 0)
2624 		goto out_vhost_scsi_deregister;
2625 
2626 	return 0;
2627 
2628 out_vhost_scsi_deregister:
2629 	vhost_scsi_deregister();
2630 out:
2631 	return ret;
2632 };
2633 
2634 static void vhost_scsi_exit(void)
2635 {
2636 	target_unregister_template(&vhost_scsi_ops);
2637 	vhost_scsi_deregister();
2638 };
2639 
2640 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2641 MODULE_ALIAS("tcm_vhost");
2642 MODULE_LICENSE("GPL");
2643 module_init(vhost_scsi_init);
2644 module_exit(vhost_scsi_exit);
2645