xref: /openbmc/linux/drivers/vhost/scsi.c (revision fa840ba4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3  * Vhost kernel TCM fabric driver for virtio SCSI initiators
4  *
5  * (C) Copyright 2010-2013 Datera, Inc.
6  * (C) Copyright 2010-2012 IBM Corp.
7  *
8  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10  ****************************************************************************/
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <linux/blk_types.h>
29 #include <linux/bio.h>
30 #include <asm/unaligned.h>
31 #include <scsi/scsi_common.h>
32 #include <scsi/scsi_proto.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35 #include <linux/vhost.h>
36 #include <linux/virtio_scsi.h>
37 #include <linux/llist.h>
38 #include <linux/bitmap.h>
39 
40 #include "vhost.h"
41 
42 #define VHOST_SCSI_VERSION  "v0.1"
43 #define VHOST_SCSI_NAMELEN 256
44 #define VHOST_SCSI_MAX_CDB_SIZE 32
45 #define VHOST_SCSI_PREALLOC_SGLS 2048
46 #define VHOST_SCSI_PREALLOC_UPAGES 2048
47 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48 
49 /* Max number of requests before requeueing the job.
50  * Using this limit prevents one virtqueue from starving others with
51  * request.
52  */
53 #define VHOST_SCSI_WEIGHT 256
54 
55 struct vhost_scsi_inflight {
56 	/* Wait for the flush operation to finish */
57 	struct completion comp;
58 	/* Refcount for the inflight reqs */
59 	struct kref kref;
60 };
61 
62 struct vhost_scsi_cmd {
63 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
64 	int tvc_vq_desc;
65 	/* virtio-scsi initiator task attribute */
66 	int tvc_task_attr;
67 	/* virtio-scsi response incoming iovecs */
68 	int tvc_in_iovs;
69 	/* virtio-scsi initiator data direction */
70 	enum dma_data_direction tvc_data_direction;
71 	/* Expected data transfer length from virtio-scsi header */
72 	u32 tvc_exp_data_len;
73 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
74 	u64 tvc_tag;
75 	/* The number of scatterlists associated with this cmd */
76 	u32 tvc_sgl_count;
77 	u32 tvc_prot_sgl_count;
78 	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
79 	u32 tvc_lun;
80 	u32 copied_iov:1;
81 	const void *saved_iter_addr;
82 	struct iov_iter saved_iter;
83 	/* Pointer to the SGL formatted memory from virtio-scsi */
84 	struct scatterlist *tvc_sgl;
85 	struct scatterlist *tvc_prot_sgl;
86 	struct page **tvc_upages;
87 	/* Pointer to response header iovec */
88 	struct iovec *tvc_resp_iov;
89 	/* Pointer to vhost_scsi for our device */
90 	struct vhost_scsi *tvc_vhost;
91 	/* Pointer to vhost_virtqueue for the cmd */
92 	struct vhost_virtqueue *tvc_vq;
93 	/* Pointer to vhost nexus memory */
94 	struct vhost_scsi_nexus *tvc_nexus;
95 	/* The TCM I/O descriptor that is accessed via container_of() */
96 	struct se_cmd tvc_se_cmd;
97 	/* Copy of the incoming SCSI command descriptor block (CDB) */
98 	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
99 	/* Sense buffer that will be mapped into outgoing status */
100 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
101 	/* Completed commands list, serviced from vhost worker thread */
102 	struct llist_node tvc_completion_list;
103 	/* Used to track inflight cmd */
104 	struct vhost_scsi_inflight *inflight;
105 };
106 
107 struct vhost_scsi_nexus {
108 	/* Pointer to TCM session for I_T Nexus */
109 	struct se_session *tvn_se_sess;
110 };
111 
112 struct vhost_scsi_tpg {
113 	/* Vhost port target portal group tag for TCM */
114 	u16 tport_tpgt;
115 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
116 	int tv_tpg_port_count;
117 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
118 	int tv_tpg_vhost_count;
119 	/* Used for enabling T10-PI with legacy devices */
120 	int tv_fabric_prot_type;
121 	/* list for vhost_scsi_list */
122 	struct list_head tv_tpg_list;
123 	/* Used to protect access for tpg_nexus */
124 	struct mutex tv_tpg_mutex;
125 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
126 	struct vhost_scsi_nexus *tpg_nexus;
127 	/* Pointer back to vhost_scsi_tport */
128 	struct vhost_scsi_tport *tport;
129 	/* Returned by vhost_scsi_make_tpg() */
130 	struct se_portal_group se_tpg;
131 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
132 	struct vhost_scsi *vhost_scsi;
133 };
134 
135 struct vhost_scsi_tport {
136 	/* SCSI protocol the tport is providing */
137 	u8 tport_proto_id;
138 	/* Binary World Wide unique Port Name for Vhost Target port */
139 	u64 tport_wwpn;
140 	/* ASCII formatted WWPN for Vhost Target port */
141 	char tport_name[VHOST_SCSI_NAMELEN];
142 	/* Returned by vhost_scsi_make_tport() */
143 	struct se_wwn tport_wwn;
144 };
145 
146 struct vhost_scsi_evt {
147 	/* event to be sent to guest */
148 	struct virtio_scsi_event event;
149 	/* event list, serviced from vhost worker thread */
150 	struct llist_node list;
151 };
152 
153 enum {
154 	VHOST_SCSI_VQ_CTL = 0,
155 	VHOST_SCSI_VQ_EVT = 1,
156 	VHOST_SCSI_VQ_IO = 2,
157 };
158 
159 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
160 enum {
161 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
162 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
163 };
164 
165 #define VHOST_SCSI_MAX_TARGET	256
166 #define VHOST_SCSI_MAX_IO_VQ	1024
167 #define VHOST_SCSI_MAX_EVENT	128
168 
169 static unsigned vhost_scsi_max_io_vqs = 128;
170 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
171 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
172 
173 struct vhost_scsi_virtqueue {
174 	struct vhost_virtqueue vq;
175 	struct vhost_scsi *vs;
176 	/*
177 	 * Reference counting for inflight reqs, used for flush operation. At
178 	 * each time, one reference tracks new commands submitted, while we
179 	 * wait for another one to reach 0.
180 	 */
181 	struct vhost_scsi_inflight inflights[2];
182 	/*
183 	 * Indicate current inflight in use, protected by vq->mutex.
184 	 * Writers must also take dev mutex and flush under it.
185 	 */
186 	int inflight_idx;
187 	struct vhost_scsi_cmd *scsi_cmds;
188 	struct sbitmap scsi_tags;
189 	int max_cmds;
190 
191 	struct vhost_work completion_work;
192 	struct llist_head completion_list;
193 };
194 
195 struct vhost_scsi {
196 	/* Protected by vhost_scsi->dev.mutex */
197 	struct vhost_scsi_tpg **vs_tpg;
198 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199 
200 	struct vhost_dev dev;
201 	struct vhost_scsi_virtqueue *vqs;
202 	struct vhost_scsi_inflight **old_inflight;
203 
204 	struct vhost_work vs_event_work; /* evt injection work item */
205 	struct llist_head vs_event_list; /* evt injection queue */
206 
207 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
208 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
209 };
210 
211 struct vhost_scsi_tmf {
212 	struct vhost_work vwork;
213 	struct vhost_scsi *vhost;
214 	struct vhost_scsi_virtqueue *svq;
215 
216 	struct se_cmd se_cmd;
217 	u8 scsi_resp;
218 	struct vhost_scsi_inflight *inflight;
219 	struct iovec resp_iov;
220 	int in_iovs;
221 	int vq_desc;
222 };
223 
224 /*
225  * Context for processing request and control queue operations.
226  */
227 struct vhost_scsi_ctx {
228 	int head;
229 	unsigned int out, in;
230 	size_t req_size, rsp_size;
231 	size_t out_size, in_size;
232 	u8 *target, *lunp;
233 	void *req;
234 	struct iov_iter out_iter;
235 };
236 
237 /*
238  * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
239  * configfs management operations.
240  */
241 static DEFINE_MUTEX(vhost_scsi_mutex);
242 static LIST_HEAD(vhost_scsi_list);
243 
244 static void vhost_scsi_done_inflight(struct kref *kref)
245 {
246 	struct vhost_scsi_inflight *inflight;
247 
248 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
249 	complete(&inflight->comp);
250 }
251 
252 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
253 				    struct vhost_scsi_inflight *old_inflight[])
254 {
255 	struct vhost_scsi_inflight *new_inflight;
256 	struct vhost_virtqueue *vq;
257 	int idx, i;
258 
259 	for (i = 0; i < vs->dev.nvqs;  i++) {
260 		vq = &vs->vqs[i].vq;
261 
262 		mutex_lock(&vq->mutex);
263 
264 		/* store old infight */
265 		idx = vs->vqs[i].inflight_idx;
266 		if (old_inflight)
267 			old_inflight[i] = &vs->vqs[i].inflights[idx];
268 
269 		/* setup new infight */
270 		vs->vqs[i].inflight_idx = idx ^ 1;
271 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
272 		kref_init(&new_inflight->kref);
273 		init_completion(&new_inflight->comp);
274 
275 		mutex_unlock(&vq->mutex);
276 	}
277 }
278 
279 static struct vhost_scsi_inflight *
280 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
281 {
282 	struct vhost_scsi_inflight *inflight;
283 	struct vhost_scsi_virtqueue *svq;
284 
285 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
286 	inflight = &svq->inflights[svq->inflight_idx];
287 	kref_get(&inflight->kref);
288 
289 	return inflight;
290 }
291 
292 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
293 {
294 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
295 }
296 
297 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
298 {
299 	return 1;
300 }
301 
302 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
303 {
304 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305 				struct vhost_scsi_tpg, se_tpg);
306 	struct vhost_scsi_tport *tport = tpg->tport;
307 
308 	return &tport->tport_name[0];
309 }
310 
311 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
312 {
313 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
314 				struct vhost_scsi_tpg, se_tpg);
315 	return tpg->tport_tpgt;
316 }
317 
318 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
319 {
320 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
321 				struct vhost_scsi_tpg, se_tpg);
322 
323 	return tpg->tv_fabric_prot_type;
324 }
325 
326 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
327 {
328 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
329 				struct vhost_scsi_cmd, tvc_se_cmd);
330 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
331 				struct vhost_scsi_virtqueue, vq);
332 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
333 	int i;
334 
335 	if (tv_cmd->tvc_sgl_count) {
336 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
337 			if (tv_cmd->copied_iov)
338 				__free_page(sg_page(&tv_cmd->tvc_sgl[i]));
339 			else
340 				put_page(sg_page(&tv_cmd->tvc_sgl[i]));
341 		}
342 		kfree(tv_cmd->saved_iter_addr);
343 	}
344 	if (tv_cmd->tvc_prot_sgl_count) {
345 		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
346 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
347 	}
348 
349 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
350 	vhost_scsi_put_inflight(inflight);
351 }
352 
353 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
354 {
355 	struct vhost_scsi_inflight *inflight = tmf->inflight;
356 
357 	kfree(tmf);
358 	vhost_scsi_put_inflight(inflight);
359 }
360 
361 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
362 {
363 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
364 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
365 					struct vhost_scsi_tmf, se_cmd);
366 		struct vhost_virtqueue *vq = &tmf->svq->vq;
367 
368 		vhost_vq_work_queue(vq, &tmf->vwork);
369 	} else {
370 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
371 					struct vhost_scsi_cmd, tvc_se_cmd);
372 		struct vhost_scsi_virtqueue *svq =  container_of(cmd->tvc_vq,
373 					struct vhost_scsi_virtqueue, vq);
374 
375 		llist_add(&cmd->tvc_completion_list, &svq->completion_list);
376 		vhost_vq_work_queue(&svq->vq, &svq->completion_work);
377 	}
378 }
379 
380 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
381 {
382 	/* Go ahead and process the write immediately */
383 	target_execute_cmd(se_cmd);
384 	return 0;
385 }
386 
387 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
388 {
389 	transport_generic_free_cmd(se_cmd, 0);
390 	return 0;
391 }
392 
393 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
394 {
395 	transport_generic_free_cmd(se_cmd, 0);
396 	return 0;
397 }
398 
399 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
400 {
401 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
402 						  se_cmd);
403 
404 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
405 	transport_generic_free_cmd(&tmf->se_cmd, 0);
406 }
407 
408 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
409 {
410 	return;
411 }
412 
413 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
414 {
415 	vs->vs_events_nr--;
416 	kfree(evt);
417 }
418 
419 static struct vhost_scsi_evt *
420 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
421 		       u32 event, u32 reason)
422 {
423 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
424 	struct vhost_scsi_evt *evt;
425 
426 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
427 		vs->vs_events_missed = true;
428 		return NULL;
429 	}
430 
431 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
432 	if (!evt) {
433 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
434 		vs->vs_events_missed = true;
435 		return NULL;
436 	}
437 
438 	evt->event.event = cpu_to_vhost32(vq, event);
439 	evt->event.reason = cpu_to_vhost32(vq, reason);
440 	vs->vs_events_nr++;
441 
442 	return evt;
443 }
444 
445 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
446 {
447 	return target_put_sess_cmd(se_cmd);
448 }
449 
450 static void
451 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
452 {
453 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
454 	struct virtio_scsi_event *event = &evt->event;
455 	struct virtio_scsi_event __user *eventp;
456 	unsigned out, in;
457 	int head, ret;
458 
459 	if (!vhost_vq_get_backend(vq)) {
460 		vs->vs_events_missed = true;
461 		return;
462 	}
463 
464 again:
465 	vhost_disable_notify(&vs->dev, vq);
466 	head = vhost_get_vq_desc(vq, vq->iov,
467 			ARRAY_SIZE(vq->iov), &out, &in,
468 			NULL, NULL);
469 	if (head < 0) {
470 		vs->vs_events_missed = true;
471 		return;
472 	}
473 	if (head == vq->num) {
474 		if (vhost_enable_notify(&vs->dev, vq))
475 			goto again;
476 		vs->vs_events_missed = true;
477 		return;
478 	}
479 
480 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
481 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
482 				vq->iov[out].iov_len);
483 		vs->vs_events_missed = true;
484 		return;
485 	}
486 
487 	if (vs->vs_events_missed) {
488 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
489 		vs->vs_events_missed = false;
490 	}
491 
492 	eventp = vq->iov[out].iov_base;
493 	ret = __copy_to_user(eventp, event, sizeof(*event));
494 	if (!ret)
495 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
496 	else
497 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
498 }
499 
500 static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
501 {
502 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
503 	struct vhost_scsi_evt *evt, *t;
504 	struct llist_node *llnode;
505 
506 	mutex_lock(&vq->mutex);
507 	llnode = llist_del_all(&vs->vs_event_list);
508 	llist_for_each_entry_safe(evt, t, llnode, list) {
509 		if (!drop)
510 			vhost_scsi_do_evt_work(vs, evt);
511 		vhost_scsi_free_evt(vs, evt);
512 	}
513 	mutex_unlock(&vq->mutex);
514 }
515 
516 static void vhost_scsi_evt_work(struct vhost_work *work)
517 {
518 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
519 					     vs_event_work);
520 	vhost_scsi_complete_events(vs, false);
521 }
522 
523 static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
524 {
525 	struct iov_iter *iter = &cmd->saved_iter;
526 	struct scatterlist *sg = cmd->tvc_sgl;
527 	struct page *page;
528 	size_t len;
529 	int i;
530 
531 	for (i = 0; i < cmd->tvc_sgl_count; i++) {
532 		page = sg_page(&sg[i]);
533 		len = sg[i].length;
534 
535 		if (copy_page_to_iter(page, 0, len, iter) != len) {
536 			pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
537 			       len);
538 			return -1;
539 		}
540 	}
541 
542 	return 0;
543 }
544 
545 /* Fill in status and signal that we are done processing this command
546  *
547  * This is scheduled in the vhost work queue so we are called with the owner
548  * process mm and can access the vring.
549  */
550 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
551 {
552 	struct vhost_scsi_virtqueue *svq = container_of(work,
553 				struct vhost_scsi_virtqueue, completion_work);
554 	struct virtio_scsi_cmd_resp v_rsp;
555 	struct vhost_scsi_cmd *cmd, *t;
556 	struct llist_node *llnode;
557 	struct se_cmd *se_cmd;
558 	struct iov_iter iov_iter;
559 	bool signal = false;
560 	int ret;
561 
562 	llnode = llist_del_all(&svq->completion_list);
563 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
564 		se_cmd = &cmd->tvc_se_cmd;
565 
566 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
567 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
568 		memset(&v_rsp, 0, sizeof(v_rsp));
569 
570 		if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
571 			v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
572 		} else {
573 			v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
574 						     se_cmd->residual_count);
575 			/* TODO is status_qualifier field needed? */
576 			v_rsp.status = se_cmd->scsi_status;
577 			v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
578 							 se_cmd->scsi_sense_length);
579 			memcpy(v_rsp.sense, cmd->tvc_sense_buf,
580 			       se_cmd->scsi_sense_length);
581 		}
582 
583 		iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
584 			      cmd->tvc_in_iovs, sizeof(v_rsp));
585 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
586 		if (likely(ret == sizeof(v_rsp))) {
587 			signal = true;
588 
589 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
590 		} else
591 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
592 
593 		vhost_scsi_release_cmd_res(se_cmd);
594 	}
595 
596 	if (signal)
597 		vhost_signal(&svq->vs->dev, &svq->vq);
598 }
599 
600 static struct vhost_scsi_cmd *
601 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
602 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
603 		   u32 exp_data_len, int data_direction)
604 {
605 	struct vhost_scsi_virtqueue *svq = container_of(vq,
606 					struct vhost_scsi_virtqueue, vq);
607 	struct vhost_scsi_cmd *cmd;
608 	struct vhost_scsi_nexus *tv_nexus;
609 	struct scatterlist *sg, *prot_sg;
610 	struct iovec *tvc_resp_iov;
611 	struct page **pages;
612 	int tag;
613 
614 	tv_nexus = tpg->tpg_nexus;
615 	if (!tv_nexus) {
616 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
617 		return ERR_PTR(-EIO);
618 	}
619 
620 	tag = sbitmap_get(&svq->scsi_tags);
621 	if (tag < 0) {
622 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
623 		return ERR_PTR(-ENOMEM);
624 	}
625 
626 	cmd = &svq->scsi_cmds[tag];
627 	sg = cmd->tvc_sgl;
628 	prot_sg = cmd->tvc_prot_sgl;
629 	pages = cmd->tvc_upages;
630 	tvc_resp_iov = cmd->tvc_resp_iov;
631 	memset(cmd, 0, sizeof(*cmd));
632 	cmd->tvc_sgl = sg;
633 	cmd->tvc_prot_sgl = prot_sg;
634 	cmd->tvc_upages = pages;
635 	cmd->tvc_se_cmd.map_tag = tag;
636 	cmd->tvc_tag = scsi_tag;
637 	cmd->tvc_lun = lun;
638 	cmd->tvc_task_attr = task_attr;
639 	cmd->tvc_exp_data_len = exp_data_len;
640 	cmd->tvc_data_direction = data_direction;
641 	cmd->tvc_nexus = tv_nexus;
642 	cmd->inflight = vhost_scsi_get_inflight(vq);
643 	cmd->tvc_resp_iov = tvc_resp_iov;
644 
645 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
646 
647 	return cmd;
648 }
649 
650 /*
651  * Map a user memory range into a scatterlist
652  *
653  * Returns the number of scatterlist entries used or -errno on error.
654  */
655 static int
656 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
657 		      struct iov_iter *iter,
658 		      struct scatterlist *sgl,
659 		      bool is_prot)
660 {
661 	struct page **pages = cmd->tvc_upages;
662 	struct scatterlist *sg = sgl;
663 	ssize_t bytes, mapped_bytes;
664 	size_t offset, mapped_offset;
665 	unsigned int npages = 0;
666 
667 	bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
668 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
669 	/* No pages were pinned */
670 	if (bytes <= 0)
671 		return bytes < 0 ? bytes : -EFAULT;
672 
673 	mapped_bytes = bytes;
674 	mapped_offset = offset;
675 
676 	while (bytes) {
677 		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
678 		/*
679 		 * The block layer requires bios/requests to be a multiple of
680 		 * 512 bytes, but Windows can send us vecs that are misaligned.
681 		 * This can result in bios and later requests with misaligned
682 		 * sizes if we have to break up a cmd/scatterlist into multiple
683 		 * bios.
684 		 *
685 		 * We currently only break up a command into multiple bios if
686 		 * we hit the vec/seg limit, so check if our sgl_count is
687 		 * greater than the max and if a vec in the cmd has a
688 		 * misaligned offset/size.
689 		 */
690 		if (!is_prot &&
691 		    (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
692 		    cmd->tvc_sgl_count > BIO_MAX_VECS) {
693 			WARN_ONCE(true,
694 				  "vhost-scsi detected misaligned IO. Performance may be degraded.");
695 			goto revert_iter_get_pages;
696 		}
697 
698 		sg_set_page(sg++, pages[npages++], n, offset);
699 		bytes -= n;
700 		offset = 0;
701 	}
702 
703 	return npages;
704 
705 revert_iter_get_pages:
706 	iov_iter_revert(iter, mapped_bytes);
707 
708 	npages = 0;
709 	while (mapped_bytes) {
710 		unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
711 				       mapped_bytes);
712 
713 		put_page(pages[npages++]);
714 
715 		mapped_bytes -= n;
716 		mapped_offset = 0;
717 	}
718 
719 	return -EINVAL;
720 }
721 
722 static int
723 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
724 {
725 	int sgl_count = 0;
726 
727 	if (!iter || !iter_iov(iter)) {
728 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
729 		       " present\n", __func__, bytes);
730 		return -EINVAL;
731 	}
732 
733 	sgl_count = iov_iter_npages(iter, 0xffff);
734 	if (sgl_count > max_sgls) {
735 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
736 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
737 		return -EINVAL;
738 	}
739 	return sgl_count;
740 }
741 
742 static int
743 vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
744 			   struct scatterlist *sg, int sg_count)
745 {
746 	size_t len = iov_iter_count(iter);
747 	unsigned int nbytes = 0;
748 	struct page *page;
749 	int i;
750 
751 	if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
752 		cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
753 						GFP_KERNEL);
754 		if (!cmd->saved_iter_addr)
755 			return -ENOMEM;
756 	}
757 
758 	for (i = 0; i < sg_count; i++) {
759 		page = alloc_page(GFP_KERNEL);
760 		if (!page) {
761 			i--;
762 			goto err;
763 		}
764 
765 		nbytes = min_t(unsigned int, PAGE_SIZE, len);
766 		sg_set_page(&sg[i], page, nbytes, 0);
767 
768 		if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
769 		    copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
770 			goto err;
771 
772 		len -= nbytes;
773 	}
774 
775 	cmd->copied_iov = 1;
776 	return 0;
777 
778 err:
779 	pr_err("Could not read %u bytes while handling misaligned cmd\n",
780 	       nbytes);
781 
782 	for (; i >= 0; i--)
783 		__free_page(sg_page(&sg[i]));
784 	kfree(cmd->saved_iter_addr);
785 	return -ENOMEM;
786 }
787 
788 static int
789 vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
790 			  struct scatterlist *sg, int sg_count, bool is_prot)
791 {
792 	struct scatterlist *p = sg;
793 	size_t revert_bytes;
794 	int ret;
795 
796 	while (iov_iter_count(iter)) {
797 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
798 		if (ret < 0) {
799 			revert_bytes = 0;
800 
801 			while (p < sg) {
802 				struct page *page = sg_page(p);
803 
804 				if (page) {
805 					put_page(page);
806 					revert_bytes += p->length;
807 				}
808 				p++;
809 			}
810 
811 			iov_iter_revert(iter, revert_bytes);
812 			return ret;
813 		}
814 		sg += ret;
815 	}
816 
817 	return 0;
818 }
819 
820 static int
821 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
822 		 size_t prot_bytes, struct iov_iter *prot_iter,
823 		 size_t data_bytes, struct iov_iter *data_iter)
824 {
825 	int sgl_count, ret;
826 
827 	if (prot_bytes) {
828 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
829 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
830 		if (sgl_count < 0)
831 			return sgl_count;
832 
833 		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
834 		cmd->tvc_prot_sgl_count = sgl_count;
835 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
836 			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
837 
838 		ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
839 						cmd->tvc_prot_sgl,
840 						cmd->tvc_prot_sgl_count, true);
841 		if (ret < 0) {
842 			cmd->tvc_prot_sgl_count = 0;
843 			return ret;
844 		}
845 	}
846 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
847 					 VHOST_SCSI_PREALLOC_SGLS);
848 	if (sgl_count < 0)
849 		return sgl_count;
850 
851 	sg_init_table(cmd->tvc_sgl, sgl_count);
852 	cmd->tvc_sgl_count = sgl_count;
853 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
854 		  cmd->tvc_sgl, cmd->tvc_sgl_count);
855 
856 	ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
857 					cmd->tvc_sgl_count, false);
858 	if (ret == -EINVAL) {
859 		sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
860 		ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
861 						 cmd->tvc_sgl_count);
862 	}
863 
864 	if (ret < 0) {
865 		cmd->tvc_sgl_count = 0;
866 		return ret;
867 	}
868 	return 0;
869 }
870 
871 static int vhost_scsi_to_tcm_attr(int attr)
872 {
873 	switch (attr) {
874 	case VIRTIO_SCSI_S_SIMPLE:
875 		return TCM_SIMPLE_TAG;
876 	case VIRTIO_SCSI_S_ORDERED:
877 		return TCM_ORDERED_TAG;
878 	case VIRTIO_SCSI_S_HEAD:
879 		return TCM_HEAD_TAG;
880 	case VIRTIO_SCSI_S_ACA:
881 		return TCM_ACA_TAG;
882 	default:
883 		break;
884 	}
885 	return TCM_SIMPLE_TAG;
886 }
887 
888 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
889 {
890 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
891 	struct vhost_scsi_nexus *tv_nexus;
892 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
893 
894 	/* FIXME: BIDI operation */
895 	if (cmd->tvc_sgl_count) {
896 		sg_ptr = cmd->tvc_sgl;
897 
898 		if (cmd->tvc_prot_sgl_count)
899 			sg_prot_ptr = cmd->tvc_prot_sgl;
900 		else
901 			se_cmd->prot_pto = true;
902 	} else {
903 		sg_ptr = NULL;
904 	}
905 	tv_nexus = cmd->tvc_nexus;
906 
907 	se_cmd->tag = 0;
908 	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
909 			cmd->tvc_lun, cmd->tvc_exp_data_len,
910 			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
911 			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
912 
913 	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
914 			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
915 			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
916 		return;
917 
918 	target_queue_submission(se_cmd);
919 }
920 
921 static void
922 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
923 			   struct vhost_virtqueue *vq,
924 			   int head, unsigned out)
925 {
926 	struct virtio_scsi_cmd_resp __user *resp;
927 	struct virtio_scsi_cmd_resp rsp;
928 	int ret;
929 
930 	memset(&rsp, 0, sizeof(rsp));
931 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
932 	resp = vq->iov[out].iov_base;
933 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
934 	if (!ret)
935 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
936 	else
937 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
938 }
939 
940 static int
941 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
942 		    struct vhost_scsi_ctx *vc)
943 {
944 	int ret = -ENXIO;
945 
946 	vc->head = vhost_get_vq_desc(vq, vq->iov,
947 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
948 				     NULL, NULL);
949 
950 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
951 		 vc->head, vc->out, vc->in);
952 
953 	/* On error, stop handling until the next kick. */
954 	if (unlikely(vc->head < 0))
955 		goto done;
956 
957 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
958 	if (vc->head == vq->num) {
959 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
960 			vhost_disable_notify(&vs->dev, vq);
961 			ret = -EAGAIN;
962 		}
963 		goto done;
964 	}
965 
966 	/*
967 	 * Get the size of request and response buffers.
968 	 * FIXME: Not correct for BIDI operation
969 	 */
970 	vc->out_size = iov_length(vq->iov, vc->out);
971 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
972 
973 	/*
974 	 * Copy over the virtio-scsi request header, which for a
975 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
976 	 * single iovec may contain both the header + outgoing
977 	 * WRITE payloads.
978 	 *
979 	 * copy_from_iter() will advance out_iter, so that it will
980 	 * point at the start of the outgoing WRITE payload, if
981 	 * DMA_TO_DEVICE is set.
982 	 */
983 	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
984 	ret = 0;
985 
986 done:
987 	return ret;
988 }
989 
990 static int
991 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
992 {
993 	if (unlikely(vc->in_size < vc->rsp_size)) {
994 		vq_err(vq,
995 		       "Response buf too small, need min %zu bytes got %zu",
996 		       vc->rsp_size, vc->in_size);
997 		return -EINVAL;
998 	} else if (unlikely(vc->out_size < vc->req_size)) {
999 		vq_err(vq,
1000 		       "Request buf too small, need min %zu bytes got %zu",
1001 		       vc->req_size, vc->out_size);
1002 		return -EIO;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static int
1009 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1010 		   struct vhost_scsi_tpg **tpgp)
1011 {
1012 	int ret = -EIO;
1013 
1014 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1015 					  &vc->out_iter))) {
1016 		vq_err(vq, "Faulted on copy_from_iter_full\n");
1017 	} else if (unlikely(*vc->lunp != 1)) {
1018 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
1019 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1020 	} else {
1021 		struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
1022 
1023 		if (vc->target) {
1024 			/* validated at handler entry */
1025 			vs_tpg = vhost_vq_get_backend(vq);
1026 			tpg = READ_ONCE(vs_tpg[*vc->target]);
1027 			if (unlikely(!tpg)) {
1028 				vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1029 				goto out;
1030 			}
1031 		}
1032 
1033 		if (tpgp)
1034 			*tpgp = tpg;
1035 		ret = 0;
1036 	}
1037 out:
1038 	return ret;
1039 }
1040 
1041 static u16 vhost_buf_to_lun(u8 *lun_buf)
1042 {
1043 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1044 }
1045 
1046 static void
1047 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1048 {
1049 	struct vhost_scsi_tpg **vs_tpg, *tpg;
1050 	struct virtio_scsi_cmd_req v_req;
1051 	struct virtio_scsi_cmd_req_pi v_req_pi;
1052 	struct vhost_scsi_ctx vc;
1053 	struct vhost_scsi_cmd *cmd;
1054 	struct iov_iter in_iter, prot_iter, data_iter;
1055 	u64 tag;
1056 	u32 exp_data_len, data_direction;
1057 	int ret, prot_bytes, i, c = 0;
1058 	u16 lun;
1059 	u8 task_attr;
1060 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1061 	void *cdb;
1062 
1063 	mutex_lock(&vq->mutex);
1064 	/*
1065 	 * We can handle the vq only after the endpoint is setup by calling the
1066 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1067 	 */
1068 	vs_tpg = vhost_vq_get_backend(vq);
1069 	if (!vs_tpg)
1070 		goto out;
1071 
1072 	memset(&vc, 0, sizeof(vc));
1073 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1074 
1075 	vhost_disable_notify(&vs->dev, vq);
1076 
1077 	do {
1078 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1079 		if (ret)
1080 			goto err;
1081 
1082 		/*
1083 		 * Setup pointers and values based upon different virtio-scsi
1084 		 * request header if T10_PI is enabled in KVM guest.
1085 		 */
1086 		if (t10_pi) {
1087 			vc.req = &v_req_pi;
1088 			vc.req_size = sizeof(v_req_pi);
1089 			vc.lunp = &v_req_pi.lun[0];
1090 			vc.target = &v_req_pi.lun[1];
1091 		} else {
1092 			vc.req = &v_req;
1093 			vc.req_size = sizeof(v_req);
1094 			vc.lunp = &v_req.lun[0];
1095 			vc.target = &v_req.lun[1];
1096 		}
1097 
1098 		/*
1099 		 * Validate the size of request and response buffers.
1100 		 * Check for a sane response buffer so we can report
1101 		 * early errors back to the guest.
1102 		 */
1103 		ret = vhost_scsi_chk_size(vq, &vc);
1104 		if (ret)
1105 			goto err;
1106 
1107 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1108 		if (ret)
1109 			goto err;
1110 
1111 		ret = -EIO;	/* bad target on any error from here on */
1112 
1113 		/*
1114 		 * Determine data_direction by calculating the total outgoing
1115 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1116 		 * response headers respectively.
1117 		 *
1118 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1119 		 * to the right place.
1120 		 *
1121 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1122 		 * of the virtio-scsi response header in either the same
1123 		 * or immediately following iovec.
1124 		 *
1125 		 * Any associated T10_PI bytes for the outgoing / incoming
1126 		 * payloads are included in calculation of exp_data_len here.
1127 		 */
1128 		prot_bytes = 0;
1129 
1130 		if (vc.out_size > vc.req_size) {
1131 			data_direction = DMA_TO_DEVICE;
1132 			exp_data_len = vc.out_size - vc.req_size;
1133 			data_iter = vc.out_iter;
1134 		} else if (vc.in_size > vc.rsp_size) {
1135 			data_direction = DMA_FROM_DEVICE;
1136 			exp_data_len = vc.in_size - vc.rsp_size;
1137 
1138 			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1139 				      vc.rsp_size + exp_data_len);
1140 			iov_iter_advance(&in_iter, vc.rsp_size);
1141 			data_iter = in_iter;
1142 		} else {
1143 			data_direction = DMA_NONE;
1144 			exp_data_len = 0;
1145 		}
1146 		/*
1147 		 * If T10_PI header + payload is present, setup prot_iter values
1148 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1149 		 * host scatterlists via get_user_pages_fast().
1150 		 */
1151 		if (t10_pi) {
1152 			if (v_req_pi.pi_bytesout) {
1153 				if (data_direction != DMA_TO_DEVICE) {
1154 					vq_err(vq, "Received non zero pi_bytesout,"
1155 						" but wrong data_direction\n");
1156 					goto err;
1157 				}
1158 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1159 			} else if (v_req_pi.pi_bytesin) {
1160 				if (data_direction != DMA_FROM_DEVICE) {
1161 					vq_err(vq, "Received non zero pi_bytesin,"
1162 						" but wrong data_direction\n");
1163 					goto err;
1164 				}
1165 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1166 			}
1167 			/*
1168 			 * Set prot_iter to data_iter and truncate it to
1169 			 * prot_bytes, and advance data_iter past any
1170 			 * preceeding prot_bytes that may be present.
1171 			 *
1172 			 * Also fix up the exp_data_len to reflect only the
1173 			 * actual data payload length.
1174 			 */
1175 			if (prot_bytes) {
1176 				exp_data_len -= prot_bytes;
1177 				prot_iter = data_iter;
1178 				iov_iter_truncate(&prot_iter, prot_bytes);
1179 				iov_iter_advance(&data_iter, prot_bytes);
1180 			}
1181 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1182 			task_attr = v_req_pi.task_attr;
1183 			cdb = &v_req_pi.cdb[0];
1184 			lun = vhost_buf_to_lun(v_req_pi.lun);
1185 		} else {
1186 			tag = vhost64_to_cpu(vq, v_req.tag);
1187 			task_attr = v_req.task_attr;
1188 			cdb = &v_req.cdb[0];
1189 			lun = vhost_buf_to_lun(v_req.lun);
1190 		}
1191 		/*
1192 		 * Check that the received CDB size does not exceeded our
1193 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1194 		 * cmd descriptor for the new virtio-scsi tag.
1195 		 *
1196 		 * TODO what if cdb was too small for varlen cdb header?
1197 		 */
1198 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1199 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1200 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1201 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1202 				goto err;
1203 		}
1204 		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1205 					 exp_data_len + prot_bytes,
1206 					 data_direction);
1207 		if (IS_ERR(cmd)) {
1208 			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1209 			       PTR_ERR(cmd));
1210 			goto err;
1211 		}
1212 		cmd->tvc_vhost = vs;
1213 		cmd->tvc_vq = vq;
1214 		for (i = 0; i < vc.in ; i++)
1215 			cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
1216 		cmd->tvc_in_iovs = vc.in;
1217 
1218 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1219 			 cmd->tvc_cdb[0], cmd->tvc_lun);
1220 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1221 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1222 
1223 		if (data_direction != DMA_NONE) {
1224 			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1225 						      &prot_iter, exp_data_len,
1226 						      &data_iter))) {
1227 				vq_err(vq, "Failed to map iov to sgl\n");
1228 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1229 				goto err;
1230 			}
1231 		}
1232 		/*
1233 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1234 		 * complete the virtio-scsi request in TCM callback context via
1235 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1236 		 */
1237 		cmd->tvc_vq_desc = vc.head;
1238 		vhost_scsi_target_queue_cmd(cmd);
1239 		ret = 0;
1240 err:
1241 		/*
1242 		 * ENXIO:  No more requests, or read error, wait for next kick
1243 		 * EINVAL: Invalid response buffer, drop the request
1244 		 * EIO:    Respond with bad target
1245 		 * EAGAIN: Pending request
1246 		 */
1247 		if (ret == -ENXIO)
1248 			break;
1249 		else if (ret == -EIO)
1250 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1251 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1252 out:
1253 	mutex_unlock(&vq->mutex);
1254 }
1255 
1256 static void
1257 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1258 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1259 			 int tmf_resp_code)
1260 {
1261 	struct virtio_scsi_ctrl_tmf_resp rsp;
1262 	struct iov_iter iov_iter;
1263 	int ret;
1264 
1265 	pr_debug("%s\n", __func__);
1266 	memset(&rsp, 0, sizeof(rsp));
1267 	rsp.response = tmf_resp_code;
1268 
1269 	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1270 
1271 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1272 	if (likely(ret == sizeof(rsp)))
1273 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1274 	else
1275 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1276 }
1277 
1278 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1279 {
1280 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1281 						  vwork);
1282 	struct vhost_virtqueue *ctl_vq, *vq;
1283 	int resp_code, i;
1284 
1285 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) {
1286 		/*
1287 		 * Flush IO vqs that don't share a worker with the ctl to make
1288 		 * sure they have sent their responses before us.
1289 		 */
1290 		ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq;
1291 		for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) {
1292 			vq = &tmf->vhost->vqs[i].vq;
1293 
1294 			if (vhost_vq_is_setup(vq) &&
1295 			    vq->worker != ctl_vq->worker)
1296 				vhost_vq_flush(vq);
1297 		}
1298 
1299 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1300 	} else {
1301 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1302 	}
1303 
1304 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1305 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1306 	vhost_scsi_release_tmf_res(tmf);
1307 }
1308 
1309 static void
1310 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1311 		      struct vhost_virtqueue *vq,
1312 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1313 		      struct vhost_scsi_ctx *vc)
1314 {
1315 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1316 					struct vhost_scsi_virtqueue, vq);
1317 	struct vhost_scsi_tmf *tmf;
1318 
1319 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1320 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1321 		goto send_reject;
1322 
1323 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1324 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1325 		goto send_reject;
1326 	}
1327 
1328 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1329 	if (!tmf)
1330 		goto send_reject;
1331 
1332 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1333 	tmf->vhost = vs;
1334 	tmf->svq = svq;
1335 	tmf->resp_iov = vq->iov[vc->out];
1336 	tmf->vq_desc = vc->head;
1337 	tmf->in_iovs = vc->in;
1338 	tmf->inflight = vhost_scsi_get_inflight(vq);
1339 
1340 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1341 			      vhost_buf_to_lun(vtmf->lun), NULL,
1342 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1343 			      TARGET_SCF_ACK_KREF) < 0) {
1344 		vhost_scsi_release_tmf_res(tmf);
1345 		goto send_reject;
1346 	}
1347 
1348 	return;
1349 
1350 send_reject:
1351 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1352 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1353 }
1354 
1355 static void
1356 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1357 			struct vhost_virtqueue *vq,
1358 			struct vhost_scsi_ctx *vc)
1359 {
1360 	struct virtio_scsi_ctrl_an_resp rsp;
1361 	struct iov_iter iov_iter;
1362 	int ret;
1363 
1364 	pr_debug("%s\n", __func__);
1365 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1366 	rsp.response = VIRTIO_SCSI_S_OK;
1367 
1368 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1369 
1370 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1371 	if (likely(ret == sizeof(rsp)))
1372 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1373 	else
1374 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1375 }
1376 
1377 static void
1378 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1379 {
1380 	struct vhost_scsi_tpg *tpg;
1381 	union {
1382 		__virtio32 type;
1383 		struct virtio_scsi_ctrl_an_req an;
1384 		struct virtio_scsi_ctrl_tmf_req tmf;
1385 	} v_req;
1386 	struct vhost_scsi_ctx vc;
1387 	size_t typ_size;
1388 	int ret, c = 0;
1389 
1390 	mutex_lock(&vq->mutex);
1391 	/*
1392 	 * We can handle the vq only after the endpoint is setup by calling the
1393 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1394 	 */
1395 	if (!vhost_vq_get_backend(vq))
1396 		goto out;
1397 
1398 	memset(&vc, 0, sizeof(vc));
1399 
1400 	vhost_disable_notify(&vs->dev, vq);
1401 
1402 	do {
1403 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1404 		if (ret)
1405 			goto err;
1406 
1407 		/*
1408 		 * Get the request type first in order to setup
1409 		 * other parameters dependent on the type.
1410 		 */
1411 		vc.req = &v_req.type;
1412 		typ_size = sizeof(v_req.type);
1413 
1414 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1415 						  &vc.out_iter))) {
1416 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1417 			/*
1418 			 * The size of the response buffer depends on the
1419 			 * request type and must be validated against it.
1420 			 * Since the request type is not known, don't send
1421 			 * a response.
1422 			 */
1423 			continue;
1424 		}
1425 
1426 		switch (vhost32_to_cpu(vq, v_req.type)) {
1427 		case VIRTIO_SCSI_T_TMF:
1428 			vc.req = &v_req.tmf;
1429 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1430 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1431 			vc.lunp = &v_req.tmf.lun[0];
1432 			vc.target = &v_req.tmf.lun[1];
1433 			break;
1434 		case VIRTIO_SCSI_T_AN_QUERY:
1435 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1436 			vc.req = &v_req.an;
1437 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1438 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1439 			vc.lunp = &v_req.an.lun[0];
1440 			vc.target = NULL;
1441 			break;
1442 		default:
1443 			vq_err(vq, "Unknown control request %d", v_req.type);
1444 			continue;
1445 		}
1446 
1447 		/*
1448 		 * Validate the size of request and response buffers.
1449 		 * Check for a sane response buffer so we can report
1450 		 * early errors back to the guest.
1451 		 */
1452 		ret = vhost_scsi_chk_size(vq, &vc);
1453 		if (ret)
1454 			goto err;
1455 
1456 		/*
1457 		 * Get the rest of the request now that its size is known.
1458 		 */
1459 		vc.req += typ_size;
1460 		vc.req_size -= typ_size;
1461 
1462 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1463 		if (ret)
1464 			goto err;
1465 
1466 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1467 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1468 		else
1469 			vhost_scsi_send_an_resp(vs, vq, &vc);
1470 err:
1471 		/*
1472 		 * ENXIO:  No more requests, or read error, wait for next kick
1473 		 * EINVAL: Invalid response buffer, drop the request
1474 		 * EIO:    Respond with bad target
1475 		 * EAGAIN: Pending request
1476 		 */
1477 		if (ret == -ENXIO)
1478 			break;
1479 		else if (ret == -EIO)
1480 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1481 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1482 out:
1483 	mutex_unlock(&vq->mutex);
1484 }
1485 
1486 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1487 {
1488 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1489 						poll.work);
1490 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1491 
1492 	pr_debug("%s: The handling func for control queue.\n", __func__);
1493 	vhost_scsi_ctl_handle_vq(vs, vq);
1494 }
1495 
1496 static void
1497 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1498 		    struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1499 		    u32 event, u32 reason)
1500 {
1501 	struct vhost_scsi_evt *evt;
1502 
1503 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1504 	if (!evt)
1505 		return;
1506 
1507 	if (tpg && lun) {
1508 		/* TODO: share lun setup code with virtio-scsi.ko */
1509 		/*
1510 		 * Note: evt->event is zeroed when we allocate it and
1511 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1512 		 */
1513 		evt->event.lun[0] = 0x01;
1514 		evt->event.lun[1] = tpg->tport_tpgt;
1515 		if (lun->unpacked_lun >= 256)
1516 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1517 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1518 	}
1519 
1520 	llist_add(&evt->list, &vs->vs_event_list);
1521 	if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
1522 		vhost_scsi_complete_events(vs, true);
1523 }
1524 
1525 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1526 {
1527 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1528 						poll.work);
1529 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1530 
1531 	mutex_lock(&vq->mutex);
1532 	if (!vhost_vq_get_backend(vq))
1533 		goto out;
1534 
1535 	if (vs->vs_events_missed)
1536 		vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1537 				    0);
1538 out:
1539 	mutex_unlock(&vq->mutex);
1540 }
1541 
1542 static void vhost_scsi_handle_kick(struct vhost_work *work)
1543 {
1544 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1545 						poll.work);
1546 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1547 
1548 	vhost_scsi_handle_vq(vs, vq);
1549 }
1550 
1551 /* Callers must hold dev mutex */
1552 static void vhost_scsi_flush(struct vhost_scsi *vs)
1553 {
1554 	int i;
1555 
1556 	/* Init new inflight and remember the old inflight */
1557 	vhost_scsi_init_inflight(vs, vs->old_inflight);
1558 
1559 	/*
1560 	 * The inflight->kref was initialized to 1. We decrement it here to
1561 	 * indicate the start of the flush operation so that it will reach 0
1562 	 * when all the reqs are finished.
1563 	 */
1564 	for (i = 0; i < vs->dev.nvqs; i++)
1565 		kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1566 
1567 	/* Flush both the vhost poll and vhost work */
1568 	vhost_dev_flush(&vs->dev);
1569 
1570 	/* Wait for all reqs issued before the flush to be finished */
1571 	for (i = 0; i < vs->dev.nvqs; i++)
1572 		wait_for_completion(&vs->old_inflight[i]->comp);
1573 }
1574 
1575 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1576 {
1577 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1578 					struct vhost_scsi_virtqueue, vq);
1579 	struct vhost_scsi_cmd *tv_cmd;
1580 	unsigned int i;
1581 
1582 	if (!svq->scsi_cmds)
1583 		return;
1584 
1585 	for (i = 0; i < svq->max_cmds; i++) {
1586 		tv_cmd = &svq->scsi_cmds[i];
1587 
1588 		kfree(tv_cmd->tvc_sgl);
1589 		kfree(tv_cmd->tvc_prot_sgl);
1590 		kfree(tv_cmd->tvc_upages);
1591 		kfree(tv_cmd->tvc_resp_iov);
1592 	}
1593 
1594 	sbitmap_free(&svq->scsi_tags);
1595 	kfree(svq->scsi_cmds);
1596 	svq->scsi_cmds = NULL;
1597 }
1598 
1599 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1600 {
1601 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1602 					struct vhost_scsi_virtqueue, vq);
1603 	struct vhost_scsi_cmd *tv_cmd;
1604 	unsigned int i;
1605 
1606 	if (svq->scsi_cmds)
1607 		return 0;
1608 
1609 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1610 			      NUMA_NO_NODE, false, true))
1611 		return -ENOMEM;
1612 	svq->max_cmds = max_cmds;
1613 
1614 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1615 	if (!svq->scsi_cmds) {
1616 		sbitmap_free(&svq->scsi_tags);
1617 		return -ENOMEM;
1618 	}
1619 
1620 	for (i = 0; i < max_cmds; i++) {
1621 		tv_cmd = &svq->scsi_cmds[i];
1622 
1623 		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1624 					  sizeof(struct scatterlist),
1625 					  GFP_KERNEL);
1626 		if (!tv_cmd->tvc_sgl) {
1627 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1628 			goto out;
1629 		}
1630 
1631 		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1632 					     sizeof(struct page *),
1633 					     GFP_KERNEL);
1634 		if (!tv_cmd->tvc_upages) {
1635 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1636 			goto out;
1637 		}
1638 
1639 		tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1640 					       sizeof(struct iovec),
1641 					       GFP_KERNEL);
1642 		if (!tv_cmd->tvc_resp_iov) {
1643 			pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1644 			goto out;
1645 		}
1646 
1647 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1648 					       sizeof(struct scatterlist),
1649 					       GFP_KERNEL);
1650 		if (!tv_cmd->tvc_prot_sgl) {
1651 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1652 			goto out;
1653 		}
1654 	}
1655 	return 0;
1656 out:
1657 	vhost_scsi_destroy_vq_cmds(vq);
1658 	return -ENOMEM;
1659 }
1660 
1661 /*
1662  * Called from vhost_scsi_ioctl() context to walk the list of available
1663  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1664  *
1665  *  The lock nesting rule is:
1666  *    vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1667  */
1668 static int
1669 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1670 			struct vhost_scsi_target *t)
1671 {
1672 	struct se_portal_group *se_tpg;
1673 	struct vhost_scsi_tport *tv_tport;
1674 	struct vhost_scsi_tpg *tpg;
1675 	struct vhost_scsi_tpg **vs_tpg;
1676 	struct vhost_virtqueue *vq;
1677 	int index, ret, i, len;
1678 	bool match = false;
1679 
1680 	mutex_lock(&vs->dev.mutex);
1681 
1682 	/* Verify that ring has been setup correctly. */
1683 	for (index = 0; index < vs->dev.nvqs; ++index) {
1684 		/* Verify that ring has been setup correctly. */
1685 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1686 			ret = -EFAULT;
1687 			goto out;
1688 		}
1689 	}
1690 
1691 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1692 	vs_tpg = kzalloc(len, GFP_KERNEL);
1693 	if (!vs_tpg) {
1694 		ret = -ENOMEM;
1695 		goto out;
1696 	}
1697 	if (vs->vs_tpg)
1698 		memcpy(vs_tpg, vs->vs_tpg, len);
1699 
1700 	mutex_lock(&vhost_scsi_mutex);
1701 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1702 		mutex_lock(&tpg->tv_tpg_mutex);
1703 		if (!tpg->tpg_nexus) {
1704 			mutex_unlock(&tpg->tv_tpg_mutex);
1705 			continue;
1706 		}
1707 		if (tpg->tv_tpg_vhost_count != 0) {
1708 			mutex_unlock(&tpg->tv_tpg_mutex);
1709 			continue;
1710 		}
1711 		tv_tport = tpg->tport;
1712 
1713 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1714 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1715 				mutex_unlock(&tpg->tv_tpg_mutex);
1716 				mutex_unlock(&vhost_scsi_mutex);
1717 				ret = -EEXIST;
1718 				goto undepend;
1719 			}
1720 			/*
1721 			 * In order to ensure individual vhost-scsi configfs
1722 			 * groups cannot be removed while in use by vhost ioctl,
1723 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1724 			 * dependency now.
1725 			 */
1726 			se_tpg = &tpg->se_tpg;
1727 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1728 			if (ret) {
1729 				pr_warn("target_depend_item() failed: %d\n", ret);
1730 				mutex_unlock(&tpg->tv_tpg_mutex);
1731 				mutex_unlock(&vhost_scsi_mutex);
1732 				goto undepend;
1733 			}
1734 			tpg->tv_tpg_vhost_count++;
1735 			tpg->vhost_scsi = vs;
1736 			vs_tpg[tpg->tport_tpgt] = tpg;
1737 			match = true;
1738 		}
1739 		mutex_unlock(&tpg->tv_tpg_mutex);
1740 	}
1741 	mutex_unlock(&vhost_scsi_mutex);
1742 
1743 	if (match) {
1744 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1745 		       sizeof(vs->vs_vhost_wwpn));
1746 
1747 		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1748 			vq = &vs->vqs[i].vq;
1749 			if (!vhost_vq_is_setup(vq))
1750 				continue;
1751 
1752 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1753 			if (ret)
1754 				goto destroy_vq_cmds;
1755 		}
1756 
1757 		for (i = 0; i < vs->dev.nvqs; i++) {
1758 			vq = &vs->vqs[i].vq;
1759 			mutex_lock(&vq->mutex);
1760 			vhost_vq_set_backend(vq, vs_tpg);
1761 			vhost_vq_init_access(vq);
1762 			mutex_unlock(&vq->mutex);
1763 		}
1764 		ret = 0;
1765 	} else {
1766 		ret = -EEXIST;
1767 	}
1768 
1769 	/*
1770 	 * Act as synchronize_rcu to make sure access to
1771 	 * old vs->vs_tpg is finished.
1772 	 */
1773 	vhost_scsi_flush(vs);
1774 	kfree(vs->vs_tpg);
1775 	vs->vs_tpg = vs_tpg;
1776 	goto out;
1777 
1778 destroy_vq_cmds:
1779 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1780 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1781 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1782 	}
1783 undepend:
1784 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1785 		tpg = vs_tpg[i];
1786 		if (tpg) {
1787 			mutex_lock(&tpg->tv_tpg_mutex);
1788 			tpg->vhost_scsi = NULL;
1789 			tpg->tv_tpg_vhost_count--;
1790 			mutex_unlock(&tpg->tv_tpg_mutex);
1791 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1792 		}
1793 	}
1794 	kfree(vs_tpg);
1795 out:
1796 	mutex_unlock(&vs->dev.mutex);
1797 	return ret;
1798 }
1799 
1800 static int
1801 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1802 			  struct vhost_scsi_target *t)
1803 {
1804 	struct se_portal_group *se_tpg;
1805 	struct vhost_scsi_tport *tv_tport;
1806 	struct vhost_scsi_tpg *tpg;
1807 	struct vhost_virtqueue *vq;
1808 	bool match = false;
1809 	int index, ret, i;
1810 	u8 target;
1811 
1812 	mutex_lock(&vs->dev.mutex);
1813 	/* Verify that ring has been setup correctly. */
1814 	for (index = 0; index < vs->dev.nvqs; ++index) {
1815 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1816 			ret = -EFAULT;
1817 			goto err_dev;
1818 		}
1819 	}
1820 
1821 	if (!vs->vs_tpg) {
1822 		ret = 0;
1823 		goto err_dev;
1824 	}
1825 
1826 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1827 		target = i;
1828 		tpg = vs->vs_tpg[target];
1829 		if (!tpg)
1830 			continue;
1831 
1832 		tv_tport = tpg->tport;
1833 		if (!tv_tport) {
1834 			ret = -ENODEV;
1835 			goto err_dev;
1836 		}
1837 
1838 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1839 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1840 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1841 				tv_tport->tport_name, tpg->tport_tpgt,
1842 				t->vhost_wwpn, t->vhost_tpgt);
1843 			ret = -EINVAL;
1844 			goto err_dev;
1845 		}
1846 		match = true;
1847 	}
1848 	if (!match)
1849 		goto free_vs_tpg;
1850 
1851 	/* Prevent new cmds from starting and accessing the tpgs/sessions */
1852 	for (i = 0; i < vs->dev.nvqs; i++) {
1853 		vq = &vs->vqs[i].vq;
1854 		mutex_lock(&vq->mutex);
1855 		vhost_vq_set_backend(vq, NULL);
1856 		mutex_unlock(&vq->mutex);
1857 	}
1858 	/* Make sure cmds are not running before tearing them down. */
1859 	vhost_scsi_flush(vs);
1860 
1861 	for (i = 0; i < vs->dev.nvqs; i++) {
1862 		vq = &vs->vqs[i].vq;
1863 		vhost_scsi_destroy_vq_cmds(vq);
1864 	}
1865 
1866 	/*
1867 	 * We can now release our hold on the tpg and sessions and userspace
1868 	 * can free them after this point.
1869 	 */
1870 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1871 		target = i;
1872 		tpg = vs->vs_tpg[target];
1873 		if (!tpg)
1874 			continue;
1875 
1876 		mutex_lock(&tpg->tv_tpg_mutex);
1877 
1878 		tpg->tv_tpg_vhost_count--;
1879 		tpg->vhost_scsi = NULL;
1880 		vs->vs_tpg[target] = NULL;
1881 
1882 		mutex_unlock(&tpg->tv_tpg_mutex);
1883 
1884 		se_tpg = &tpg->se_tpg;
1885 		target_undepend_item(&se_tpg->tpg_group.cg_item);
1886 	}
1887 
1888 free_vs_tpg:
1889 	/*
1890 	 * Act as synchronize_rcu to make sure access to
1891 	 * old vs->vs_tpg is finished.
1892 	 */
1893 	vhost_scsi_flush(vs);
1894 	kfree(vs->vs_tpg);
1895 	vs->vs_tpg = NULL;
1896 	WARN_ON(vs->vs_events_nr);
1897 	mutex_unlock(&vs->dev.mutex);
1898 	return 0;
1899 
1900 err_dev:
1901 	mutex_unlock(&vs->dev.mutex);
1902 	return ret;
1903 }
1904 
1905 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1906 {
1907 	struct vhost_virtqueue *vq;
1908 	int i;
1909 
1910 	if (features & ~VHOST_SCSI_FEATURES)
1911 		return -EOPNOTSUPP;
1912 
1913 	mutex_lock(&vs->dev.mutex);
1914 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1915 	    !vhost_log_access_ok(&vs->dev)) {
1916 		mutex_unlock(&vs->dev.mutex);
1917 		return -EFAULT;
1918 	}
1919 
1920 	for (i = 0; i < vs->dev.nvqs; i++) {
1921 		vq = &vs->vqs[i].vq;
1922 		mutex_lock(&vq->mutex);
1923 		vq->acked_features = features;
1924 		mutex_unlock(&vq->mutex);
1925 	}
1926 	mutex_unlock(&vs->dev.mutex);
1927 	return 0;
1928 }
1929 
1930 static int vhost_scsi_open(struct inode *inode, struct file *f)
1931 {
1932 	struct vhost_scsi_virtqueue *svq;
1933 	struct vhost_scsi *vs;
1934 	struct vhost_virtqueue **vqs;
1935 	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1936 
1937 	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1938 	if (!vs)
1939 		goto err_vs;
1940 
1941 	if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1942 		pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1943 		       VHOST_SCSI_MAX_IO_VQ);
1944 		nvqs = VHOST_SCSI_MAX_IO_VQ;
1945 	} else if (nvqs == 0) {
1946 		pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1947 		nvqs = 1;
1948 	}
1949 	nvqs += VHOST_SCSI_VQ_IO;
1950 
1951 	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1952 					 GFP_KERNEL | __GFP_ZERO);
1953 	if (!vs->old_inflight)
1954 		goto err_inflight;
1955 
1956 	vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1957 				GFP_KERNEL | __GFP_ZERO);
1958 	if (!vs->vqs)
1959 		goto err_vqs;
1960 
1961 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1962 	if (!vqs)
1963 		goto err_local_vqs;
1964 
1965 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1966 
1967 	vs->vs_events_nr = 0;
1968 	vs->vs_events_missed = false;
1969 
1970 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1971 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1972 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1973 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1974 	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1975 		svq = &vs->vqs[i];
1976 
1977 		vqs[i] = &svq->vq;
1978 		svq->vs = vs;
1979 		init_llist_head(&svq->completion_list);
1980 		vhost_work_init(&svq->completion_work,
1981 				vhost_scsi_complete_cmd_work);
1982 		svq->vq.handle_kick = vhost_scsi_handle_kick;
1983 	}
1984 	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1985 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1986 
1987 	vhost_scsi_init_inflight(vs, NULL);
1988 
1989 	f->private_data = vs;
1990 	return 0;
1991 
1992 err_local_vqs:
1993 	kfree(vs->vqs);
1994 err_vqs:
1995 	kfree(vs->old_inflight);
1996 err_inflight:
1997 	kvfree(vs);
1998 err_vs:
1999 	return r;
2000 }
2001 
2002 static int vhost_scsi_release(struct inode *inode, struct file *f)
2003 {
2004 	struct vhost_scsi *vs = f->private_data;
2005 	struct vhost_scsi_target t;
2006 
2007 	mutex_lock(&vs->dev.mutex);
2008 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
2009 	mutex_unlock(&vs->dev.mutex);
2010 	vhost_scsi_clear_endpoint(vs, &t);
2011 	vhost_dev_stop(&vs->dev);
2012 	vhost_dev_cleanup(&vs->dev);
2013 	kfree(vs->dev.vqs);
2014 	kfree(vs->vqs);
2015 	kfree(vs->old_inflight);
2016 	kvfree(vs);
2017 	return 0;
2018 }
2019 
2020 static long
2021 vhost_scsi_ioctl(struct file *f,
2022 		 unsigned int ioctl,
2023 		 unsigned long arg)
2024 {
2025 	struct vhost_scsi *vs = f->private_data;
2026 	struct vhost_scsi_target backend;
2027 	void __user *argp = (void __user *)arg;
2028 	u64 __user *featurep = argp;
2029 	u32 __user *eventsp = argp;
2030 	u32 events_missed;
2031 	u64 features;
2032 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
2033 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2034 
2035 	switch (ioctl) {
2036 	case VHOST_SCSI_SET_ENDPOINT:
2037 		if (copy_from_user(&backend, argp, sizeof backend))
2038 			return -EFAULT;
2039 		if (backend.reserved != 0)
2040 			return -EOPNOTSUPP;
2041 
2042 		return vhost_scsi_set_endpoint(vs, &backend);
2043 	case VHOST_SCSI_CLEAR_ENDPOINT:
2044 		if (copy_from_user(&backend, argp, sizeof backend))
2045 			return -EFAULT;
2046 		if (backend.reserved != 0)
2047 			return -EOPNOTSUPP;
2048 
2049 		return vhost_scsi_clear_endpoint(vs, &backend);
2050 	case VHOST_SCSI_GET_ABI_VERSION:
2051 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
2052 			return -EFAULT;
2053 		return 0;
2054 	case VHOST_SCSI_SET_EVENTS_MISSED:
2055 		if (get_user(events_missed, eventsp))
2056 			return -EFAULT;
2057 		mutex_lock(&vq->mutex);
2058 		vs->vs_events_missed = events_missed;
2059 		mutex_unlock(&vq->mutex);
2060 		return 0;
2061 	case VHOST_SCSI_GET_EVENTS_MISSED:
2062 		mutex_lock(&vq->mutex);
2063 		events_missed = vs->vs_events_missed;
2064 		mutex_unlock(&vq->mutex);
2065 		if (put_user(events_missed, eventsp))
2066 			return -EFAULT;
2067 		return 0;
2068 	case VHOST_GET_FEATURES:
2069 		features = VHOST_SCSI_FEATURES;
2070 		if (copy_to_user(featurep, &features, sizeof features))
2071 			return -EFAULT;
2072 		return 0;
2073 	case VHOST_SET_FEATURES:
2074 		if (copy_from_user(&features, featurep, sizeof features))
2075 			return -EFAULT;
2076 		return vhost_scsi_set_features(vs, features);
2077 	case VHOST_NEW_WORKER:
2078 	case VHOST_FREE_WORKER:
2079 	case VHOST_ATTACH_VRING_WORKER:
2080 	case VHOST_GET_VRING_WORKER:
2081 		mutex_lock(&vs->dev.mutex);
2082 		r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2083 		mutex_unlock(&vs->dev.mutex);
2084 		return r;
2085 	default:
2086 		mutex_lock(&vs->dev.mutex);
2087 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2088 		/* TODO: flush backend after dev ioctl. */
2089 		if (r == -ENOIOCTLCMD)
2090 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2091 		mutex_unlock(&vs->dev.mutex);
2092 		return r;
2093 	}
2094 }
2095 
2096 static const struct file_operations vhost_scsi_fops = {
2097 	.owner          = THIS_MODULE,
2098 	.release        = vhost_scsi_release,
2099 	.unlocked_ioctl = vhost_scsi_ioctl,
2100 	.compat_ioctl	= compat_ptr_ioctl,
2101 	.open           = vhost_scsi_open,
2102 	.llseek		= noop_llseek,
2103 };
2104 
2105 static struct miscdevice vhost_scsi_misc = {
2106 	MISC_DYNAMIC_MINOR,
2107 	"vhost-scsi",
2108 	&vhost_scsi_fops,
2109 };
2110 
2111 static int __init vhost_scsi_register(void)
2112 {
2113 	return misc_register(&vhost_scsi_misc);
2114 }
2115 
2116 static void vhost_scsi_deregister(void)
2117 {
2118 	misc_deregister(&vhost_scsi_misc);
2119 }
2120 
2121 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2122 {
2123 	switch (tport->tport_proto_id) {
2124 	case SCSI_PROTOCOL_SAS:
2125 		return "SAS";
2126 	case SCSI_PROTOCOL_FCP:
2127 		return "FCP";
2128 	case SCSI_PROTOCOL_ISCSI:
2129 		return "iSCSI";
2130 	default:
2131 		break;
2132 	}
2133 
2134 	return "Unknown";
2135 }
2136 
2137 static void
2138 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2139 		  struct se_lun *lun, bool plug)
2140 {
2141 
2142 	struct vhost_scsi *vs = tpg->vhost_scsi;
2143 	struct vhost_virtqueue *vq;
2144 	u32 reason;
2145 
2146 	if (!vs)
2147 		return;
2148 
2149 	if (plug)
2150 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2151 	else
2152 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2153 
2154 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2155 	mutex_lock(&vq->mutex);
2156 	/*
2157 	 * We can't queue events if the backend has been cleared, because
2158 	 * we could end up queueing an event after the flush.
2159 	 */
2160 	if (!vhost_vq_get_backend(vq))
2161 		goto unlock;
2162 
2163 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2164 		vhost_scsi_send_evt(vs, vq, tpg, lun,
2165 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2166 unlock:
2167 	mutex_unlock(&vq->mutex);
2168 }
2169 
2170 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2171 {
2172 	vhost_scsi_do_plug(tpg, lun, true);
2173 }
2174 
2175 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2176 {
2177 	vhost_scsi_do_plug(tpg, lun, false);
2178 }
2179 
2180 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2181 			       struct se_lun *lun)
2182 {
2183 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2184 				struct vhost_scsi_tpg, se_tpg);
2185 
2186 	mutex_lock(&tpg->tv_tpg_mutex);
2187 	tpg->tv_tpg_port_count++;
2188 	vhost_scsi_hotplug(tpg, lun);
2189 	mutex_unlock(&tpg->tv_tpg_mutex);
2190 
2191 	return 0;
2192 }
2193 
2194 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2195 				  struct se_lun *lun)
2196 {
2197 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2198 				struct vhost_scsi_tpg, se_tpg);
2199 
2200 	mutex_lock(&tpg->tv_tpg_mutex);
2201 	tpg->tv_tpg_port_count--;
2202 	vhost_scsi_hotunplug(tpg, lun);
2203 	mutex_unlock(&tpg->tv_tpg_mutex);
2204 }
2205 
2206 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2207 		struct config_item *item, const char *page, size_t count)
2208 {
2209 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2210 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2211 				struct vhost_scsi_tpg, se_tpg);
2212 	unsigned long val;
2213 	int ret = kstrtoul(page, 0, &val);
2214 
2215 	if (ret) {
2216 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2217 		return ret;
2218 	}
2219 	if (val != 0 && val != 1 && val != 3) {
2220 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2221 		return -EINVAL;
2222 	}
2223 	tpg->tv_fabric_prot_type = val;
2224 
2225 	return count;
2226 }
2227 
2228 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2229 		struct config_item *item, char *page)
2230 {
2231 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2232 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2233 				struct vhost_scsi_tpg, se_tpg);
2234 
2235 	return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2236 }
2237 
2238 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2239 
2240 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2241 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2242 	NULL,
2243 };
2244 
2245 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2246 				const char *name)
2247 {
2248 	struct vhost_scsi_nexus *tv_nexus;
2249 
2250 	mutex_lock(&tpg->tv_tpg_mutex);
2251 	if (tpg->tpg_nexus) {
2252 		mutex_unlock(&tpg->tv_tpg_mutex);
2253 		pr_debug("tpg->tpg_nexus already exists\n");
2254 		return -EEXIST;
2255 	}
2256 
2257 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2258 	if (!tv_nexus) {
2259 		mutex_unlock(&tpg->tv_tpg_mutex);
2260 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2261 		return -ENOMEM;
2262 	}
2263 	/*
2264 	 * Since we are running in 'demo mode' this call with generate a
2265 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2266 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2267 	 */
2268 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2269 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2270 					(unsigned char *)name, tv_nexus, NULL);
2271 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2272 		mutex_unlock(&tpg->tv_tpg_mutex);
2273 		kfree(tv_nexus);
2274 		return -ENOMEM;
2275 	}
2276 	tpg->tpg_nexus = tv_nexus;
2277 
2278 	mutex_unlock(&tpg->tv_tpg_mutex);
2279 	return 0;
2280 }
2281 
2282 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2283 {
2284 	struct se_session *se_sess;
2285 	struct vhost_scsi_nexus *tv_nexus;
2286 
2287 	mutex_lock(&tpg->tv_tpg_mutex);
2288 	tv_nexus = tpg->tpg_nexus;
2289 	if (!tv_nexus) {
2290 		mutex_unlock(&tpg->tv_tpg_mutex);
2291 		return -ENODEV;
2292 	}
2293 
2294 	se_sess = tv_nexus->tvn_se_sess;
2295 	if (!se_sess) {
2296 		mutex_unlock(&tpg->tv_tpg_mutex);
2297 		return -ENODEV;
2298 	}
2299 
2300 	if (tpg->tv_tpg_port_count != 0) {
2301 		mutex_unlock(&tpg->tv_tpg_mutex);
2302 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2303 			" active TPG port count: %d\n",
2304 			tpg->tv_tpg_port_count);
2305 		return -EBUSY;
2306 	}
2307 
2308 	if (tpg->tv_tpg_vhost_count != 0) {
2309 		mutex_unlock(&tpg->tv_tpg_mutex);
2310 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2311 			" active TPG vhost count: %d\n",
2312 			tpg->tv_tpg_vhost_count);
2313 		return -EBUSY;
2314 	}
2315 
2316 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2317 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2318 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2319 
2320 	/*
2321 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2322 	 */
2323 	target_remove_session(se_sess);
2324 	tpg->tpg_nexus = NULL;
2325 	mutex_unlock(&tpg->tv_tpg_mutex);
2326 
2327 	kfree(tv_nexus);
2328 	return 0;
2329 }
2330 
2331 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2332 {
2333 	struct se_portal_group *se_tpg = to_tpg(item);
2334 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2335 				struct vhost_scsi_tpg, se_tpg);
2336 	struct vhost_scsi_nexus *tv_nexus;
2337 	ssize_t ret;
2338 
2339 	mutex_lock(&tpg->tv_tpg_mutex);
2340 	tv_nexus = tpg->tpg_nexus;
2341 	if (!tv_nexus) {
2342 		mutex_unlock(&tpg->tv_tpg_mutex);
2343 		return -ENODEV;
2344 	}
2345 	ret = sysfs_emit(page, "%s\n",
2346 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2347 	mutex_unlock(&tpg->tv_tpg_mutex);
2348 
2349 	return ret;
2350 }
2351 
2352 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2353 		const char *page, size_t count)
2354 {
2355 	struct se_portal_group *se_tpg = to_tpg(item);
2356 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2357 				struct vhost_scsi_tpg, se_tpg);
2358 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2359 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2360 	int ret;
2361 	/*
2362 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2363 	 */
2364 	if (!strncmp(page, "NULL", 4)) {
2365 		ret = vhost_scsi_drop_nexus(tpg);
2366 		return (!ret) ? count : ret;
2367 	}
2368 	/*
2369 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2370 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2371 	 * vhost_scsi_make_nexus().
2372 	 */
2373 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2374 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2375 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2376 		return -EINVAL;
2377 	}
2378 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2379 
2380 	ptr = strstr(i_port, "naa.");
2381 	if (ptr) {
2382 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2383 			pr_err("Passed SAS Initiator Port %s does not"
2384 				" match target port protoid: %s\n", i_port,
2385 				vhost_scsi_dump_proto_id(tport_wwn));
2386 			return -EINVAL;
2387 		}
2388 		port_ptr = &i_port[0];
2389 		goto check_newline;
2390 	}
2391 	ptr = strstr(i_port, "fc.");
2392 	if (ptr) {
2393 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2394 			pr_err("Passed FCP Initiator Port %s does not"
2395 				" match target port protoid: %s\n", i_port,
2396 				vhost_scsi_dump_proto_id(tport_wwn));
2397 			return -EINVAL;
2398 		}
2399 		port_ptr = &i_port[3]; /* Skip over "fc." */
2400 		goto check_newline;
2401 	}
2402 	ptr = strstr(i_port, "iqn.");
2403 	if (ptr) {
2404 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2405 			pr_err("Passed iSCSI Initiator Port %s does not"
2406 				" match target port protoid: %s\n", i_port,
2407 				vhost_scsi_dump_proto_id(tport_wwn));
2408 			return -EINVAL;
2409 		}
2410 		port_ptr = &i_port[0];
2411 		goto check_newline;
2412 	}
2413 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2414 			" %s\n", i_port);
2415 	return -EINVAL;
2416 	/*
2417 	 * Clear any trailing newline for the NAA WWN
2418 	 */
2419 check_newline:
2420 	if (i_port[strlen(i_port)-1] == '\n')
2421 		i_port[strlen(i_port)-1] = '\0';
2422 
2423 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2424 	if (ret < 0)
2425 		return ret;
2426 
2427 	return count;
2428 }
2429 
2430 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2431 
2432 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2433 	&vhost_scsi_tpg_attr_nexus,
2434 	NULL,
2435 };
2436 
2437 static struct se_portal_group *
2438 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2439 {
2440 	struct vhost_scsi_tport *tport = container_of(wwn,
2441 			struct vhost_scsi_tport, tport_wwn);
2442 
2443 	struct vhost_scsi_tpg *tpg;
2444 	u16 tpgt;
2445 	int ret;
2446 
2447 	if (strstr(name, "tpgt_") != name)
2448 		return ERR_PTR(-EINVAL);
2449 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2450 		return ERR_PTR(-EINVAL);
2451 
2452 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2453 	if (!tpg) {
2454 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2455 		return ERR_PTR(-ENOMEM);
2456 	}
2457 	mutex_init(&tpg->tv_tpg_mutex);
2458 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2459 	tpg->tport = tport;
2460 	tpg->tport_tpgt = tpgt;
2461 
2462 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2463 	if (ret < 0) {
2464 		kfree(tpg);
2465 		return NULL;
2466 	}
2467 	mutex_lock(&vhost_scsi_mutex);
2468 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2469 	mutex_unlock(&vhost_scsi_mutex);
2470 
2471 	return &tpg->se_tpg;
2472 }
2473 
2474 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2475 {
2476 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2477 				struct vhost_scsi_tpg, se_tpg);
2478 
2479 	mutex_lock(&vhost_scsi_mutex);
2480 	list_del(&tpg->tv_tpg_list);
2481 	mutex_unlock(&vhost_scsi_mutex);
2482 	/*
2483 	 * Release the virtual I_T Nexus for this vhost TPG
2484 	 */
2485 	vhost_scsi_drop_nexus(tpg);
2486 	/*
2487 	 * Deregister the se_tpg from TCM..
2488 	 */
2489 	core_tpg_deregister(se_tpg);
2490 	kfree(tpg);
2491 }
2492 
2493 static struct se_wwn *
2494 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2495 		     struct config_group *group,
2496 		     const char *name)
2497 {
2498 	struct vhost_scsi_tport *tport;
2499 	char *ptr;
2500 	u64 wwpn = 0;
2501 	int off = 0;
2502 
2503 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2504 		return ERR_PTR(-EINVAL); */
2505 
2506 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2507 	if (!tport) {
2508 		pr_err("Unable to allocate struct vhost_scsi_tport");
2509 		return ERR_PTR(-ENOMEM);
2510 	}
2511 	tport->tport_wwpn = wwpn;
2512 	/*
2513 	 * Determine the emulated Protocol Identifier and Target Port Name
2514 	 * based on the incoming configfs directory name.
2515 	 */
2516 	ptr = strstr(name, "naa.");
2517 	if (ptr) {
2518 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2519 		goto check_len;
2520 	}
2521 	ptr = strstr(name, "fc.");
2522 	if (ptr) {
2523 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2524 		off = 3; /* Skip over "fc." */
2525 		goto check_len;
2526 	}
2527 	ptr = strstr(name, "iqn.");
2528 	if (ptr) {
2529 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2530 		goto check_len;
2531 	}
2532 
2533 	pr_err("Unable to locate prefix for emulated Target Port:"
2534 			" %s\n", name);
2535 	kfree(tport);
2536 	return ERR_PTR(-EINVAL);
2537 
2538 check_len:
2539 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2540 		pr_err("Emulated %s Address: %s, exceeds"
2541 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2542 			VHOST_SCSI_NAMELEN);
2543 		kfree(tport);
2544 		return ERR_PTR(-EINVAL);
2545 	}
2546 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2547 
2548 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2549 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2550 
2551 	return &tport->tport_wwn;
2552 }
2553 
2554 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2555 {
2556 	struct vhost_scsi_tport *tport = container_of(wwn,
2557 				struct vhost_scsi_tport, tport_wwn);
2558 
2559 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2560 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2561 		tport->tport_name);
2562 
2563 	kfree(tport);
2564 }
2565 
2566 static ssize_t
2567 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2568 {
2569 	return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2570 		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2571 		utsname()->machine);
2572 }
2573 
2574 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2575 
2576 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2577 	&vhost_scsi_wwn_attr_version,
2578 	NULL,
2579 };
2580 
2581 static const struct target_core_fabric_ops vhost_scsi_ops = {
2582 	.module				= THIS_MODULE,
2583 	.fabric_name			= "vhost",
2584 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2585 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2586 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2587 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2588 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2589 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2590 	.release_cmd			= vhost_scsi_release_cmd,
2591 	.check_stop_free		= vhost_scsi_check_stop_free,
2592 	.sess_get_initiator_sid		= NULL,
2593 	.write_pending			= vhost_scsi_write_pending,
2594 	.queue_data_in			= vhost_scsi_queue_data_in,
2595 	.queue_status			= vhost_scsi_queue_status,
2596 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2597 	.aborted_task			= vhost_scsi_aborted_task,
2598 	/*
2599 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2600 	 */
2601 	.fabric_make_wwn		= vhost_scsi_make_tport,
2602 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2603 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2604 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2605 	.fabric_post_link		= vhost_scsi_port_link,
2606 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2607 
2608 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2609 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2610 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2611 };
2612 
2613 static int __init vhost_scsi_init(void)
2614 {
2615 	int ret = -ENOMEM;
2616 
2617 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2618 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2619 		utsname()->machine);
2620 
2621 	ret = vhost_scsi_register();
2622 	if (ret < 0)
2623 		goto out;
2624 
2625 	ret = target_register_template(&vhost_scsi_ops);
2626 	if (ret < 0)
2627 		goto out_vhost_scsi_deregister;
2628 
2629 	return 0;
2630 
2631 out_vhost_scsi_deregister:
2632 	vhost_scsi_deregister();
2633 out:
2634 	return ret;
2635 };
2636 
2637 static void vhost_scsi_exit(void)
2638 {
2639 	target_unregister_template(&vhost_scsi_ops);
2640 	vhost_scsi_deregister();
2641 };
2642 
2643 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2644 MODULE_ALIAS("tcm_vhost");
2645 MODULE_LICENSE("GPL");
2646 module_init(vhost_scsi_init);
2647 module_exit(vhost_scsi_exit);
2648