xref: /openbmc/linux/drivers/vhost/scsi.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3  * Vhost kernel TCM fabric driver for virtio SCSI initiators
4  *
5  * (C) Copyright 2010-2013 Datera, Inc.
6  * (C) Copyright 2010-2012 IBM Corp.
7  *
8  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10  ****************************************************************************/
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <asm/unaligned.h>
29 #include <scsi/scsi_common.h>
30 #include <scsi/scsi_proto.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_fabric.h>
33 #include <linux/vhost.h>
34 #include <linux/virtio_scsi.h>
35 #include <linux/llist.h>
36 #include <linux/bitmap.h>
37 
38 #include "vhost.h"
39 
40 #define VHOST_SCSI_VERSION  "v0.1"
41 #define VHOST_SCSI_NAMELEN 256
42 #define VHOST_SCSI_MAX_CDB_SIZE 32
43 #define VHOST_SCSI_PREALLOC_SGLS 2048
44 #define VHOST_SCSI_PREALLOC_UPAGES 2048
45 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
46 
47 /* Max number of requests before requeueing the job.
48  * Using this limit prevents one virtqueue from starving others with
49  * request.
50  */
51 #define VHOST_SCSI_WEIGHT 256
52 
53 struct vhost_scsi_inflight {
54 	/* Wait for the flush operation to finish */
55 	struct completion comp;
56 	/* Refcount for the inflight reqs */
57 	struct kref kref;
58 };
59 
60 struct vhost_scsi_cmd {
61 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
62 	int tvc_vq_desc;
63 	/* virtio-scsi initiator task attribute */
64 	int tvc_task_attr;
65 	/* virtio-scsi response incoming iovecs */
66 	int tvc_in_iovs;
67 	/* virtio-scsi initiator data direction */
68 	enum dma_data_direction tvc_data_direction;
69 	/* Expected data transfer length from virtio-scsi header */
70 	u32 tvc_exp_data_len;
71 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
72 	u64 tvc_tag;
73 	/* The number of scatterlists associated with this cmd */
74 	u32 tvc_sgl_count;
75 	u32 tvc_prot_sgl_count;
76 	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
77 	u32 tvc_lun;
78 	/* Pointer to the SGL formatted memory from virtio-scsi */
79 	struct scatterlist *tvc_sgl;
80 	struct scatterlist *tvc_prot_sgl;
81 	struct page **tvc_upages;
82 	/* Pointer to response header iovec */
83 	struct iovec *tvc_resp_iov;
84 	/* Pointer to vhost_scsi for our device */
85 	struct vhost_scsi *tvc_vhost;
86 	/* Pointer to vhost_virtqueue for the cmd */
87 	struct vhost_virtqueue *tvc_vq;
88 	/* Pointer to vhost nexus memory */
89 	struct vhost_scsi_nexus *tvc_nexus;
90 	/* The TCM I/O descriptor that is accessed via container_of() */
91 	struct se_cmd tvc_se_cmd;
92 	/* Copy of the incoming SCSI command descriptor block (CDB) */
93 	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
94 	/* Sense buffer that will be mapped into outgoing status */
95 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
96 	/* Completed commands list, serviced from vhost worker thread */
97 	struct llist_node tvc_completion_list;
98 	/* Used to track inflight cmd */
99 	struct vhost_scsi_inflight *inflight;
100 };
101 
102 struct vhost_scsi_nexus {
103 	/* Pointer to TCM session for I_T Nexus */
104 	struct se_session *tvn_se_sess;
105 };
106 
107 struct vhost_scsi_tpg {
108 	/* Vhost port target portal group tag for TCM */
109 	u16 tport_tpgt;
110 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
111 	int tv_tpg_port_count;
112 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
113 	int tv_tpg_vhost_count;
114 	/* Used for enabling T10-PI with legacy devices */
115 	int tv_fabric_prot_type;
116 	/* list for vhost_scsi_list */
117 	struct list_head tv_tpg_list;
118 	/* Used to protect access for tpg_nexus */
119 	struct mutex tv_tpg_mutex;
120 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
121 	struct vhost_scsi_nexus *tpg_nexus;
122 	/* Pointer back to vhost_scsi_tport */
123 	struct vhost_scsi_tport *tport;
124 	/* Returned by vhost_scsi_make_tpg() */
125 	struct se_portal_group se_tpg;
126 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
127 	struct vhost_scsi *vhost_scsi;
128 };
129 
130 struct vhost_scsi_tport {
131 	/* SCSI protocol the tport is providing */
132 	u8 tport_proto_id;
133 	/* Binary World Wide unique Port Name for Vhost Target port */
134 	u64 tport_wwpn;
135 	/* ASCII formatted WWPN for Vhost Target port */
136 	char tport_name[VHOST_SCSI_NAMELEN];
137 	/* Returned by vhost_scsi_make_tport() */
138 	struct se_wwn tport_wwn;
139 };
140 
141 struct vhost_scsi_evt {
142 	/* event to be sent to guest */
143 	struct virtio_scsi_event event;
144 	/* event list, serviced from vhost worker thread */
145 	struct llist_node list;
146 };
147 
148 enum {
149 	VHOST_SCSI_VQ_CTL = 0,
150 	VHOST_SCSI_VQ_EVT = 1,
151 	VHOST_SCSI_VQ_IO = 2,
152 };
153 
154 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
155 enum {
156 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
157 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
158 };
159 
160 #define VHOST_SCSI_MAX_TARGET	256
161 #define VHOST_SCSI_MAX_IO_VQ	1024
162 #define VHOST_SCSI_MAX_EVENT	128
163 
164 static unsigned vhost_scsi_max_io_vqs = 128;
165 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
166 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
167 
168 struct vhost_scsi_virtqueue {
169 	struct vhost_virtqueue vq;
170 	/*
171 	 * Reference counting for inflight reqs, used for flush operation. At
172 	 * each time, one reference tracks new commands submitted, while we
173 	 * wait for another one to reach 0.
174 	 */
175 	struct vhost_scsi_inflight inflights[2];
176 	/*
177 	 * Indicate current inflight in use, protected by vq->mutex.
178 	 * Writers must also take dev mutex and flush under it.
179 	 */
180 	int inflight_idx;
181 	struct vhost_scsi_cmd *scsi_cmds;
182 	struct sbitmap scsi_tags;
183 	int max_cmds;
184 };
185 
186 struct vhost_scsi {
187 	/* Protected by vhost_scsi->dev.mutex */
188 	struct vhost_scsi_tpg **vs_tpg;
189 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
190 
191 	struct vhost_dev dev;
192 	struct vhost_scsi_virtqueue *vqs;
193 	unsigned long *compl_bitmap;
194 	struct vhost_scsi_inflight **old_inflight;
195 
196 	struct vhost_work vs_completion_work; /* cmd completion work item */
197 	struct llist_head vs_completion_list; /* cmd completion queue */
198 
199 	struct vhost_work vs_event_work; /* evt injection work item */
200 	struct llist_head vs_event_list; /* evt injection queue */
201 
202 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
203 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
204 };
205 
206 struct vhost_scsi_tmf {
207 	struct vhost_work vwork;
208 	struct vhost_scsi *vhost;
209 	struct vhost_scsi_virtqueue *svq;
210 
211 	struct se_cmd se_cmd;
212 	u8 scsi_resp;
213 	struct vhost_scsi_inflight *inflight;
214 	struct iovec resp_iov;
215 	int in_iovs;
216 	int vq_desc;
217 };
218 
219 /*
220  * Context for processing request and control queue operations.
221  */
222 struct vhost_scsi_ctx {
223 	int head;
224 	unsigned int out, in;
225 	size_t req_size, rsp_size;
226 	size_t out_size, in_size;
227 	u8 *target, *lunp;
228 	void *req;
229 	struct iov_iter out_iter;
230 };
231 
232 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
233 static DEFINE_MUTEX(vhost_scsi_mutex);
234 static LIST_HEAD(vhost_scsi_list);
235 
236 static void vhost_scsi_done_inflight(struct kref *kref)
237 {
238 	struct vhost_scsi_inflight *inflight;
239 
240 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
241 	complete(&inflight->comp);
242 }
243 
244 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
245 				    struct vhost_scsi_inflight *old_inflight[])
246 {
247 	struct vhost_scsi_inflight *new_inflight;
248 	struct vhost_virtqueue *vq;
249 	int idx, i;
250 
251 	for (i = 0; i < vs->dev.nvqs;  i++) {
252 		vq = &vs->vqs[i].vq;
253 
254 		mutex_lock(&vq->mutex);
255 
256 		/* store old infight */
257 		idx = vs->vqs[i].inflight_idx;
258 		if (old_inflight)
259 			old_inflight[i] = &vs->vqs[i].inflights[idx];
260 
261 		/* setup new infight */
262 		vs->vqs[i].inflight_idx = idx ^ 1;
263 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
264 		kref_init(&new_inflight->kref);
265 		init_completion(&new_inflight->comp);
266 
267 		mutex_unlock(&vq->mutex);
268 	}
269 }
270 
271 static struct vhost_scsi_inflight *
272 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
273 {
274 	struct vhost_scsi_inflight *inflight;
275 	struct vhost_scsi_virtqueue *svq;
276 
277 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
278 	inflight = &svq->inflights[svq->inflight_idx];
279 	kref_get(&inflight->kref);
280 
281 	return inflight;
282 }
283 
284 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
285 {
286 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
287 }
288 
289 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
290 {
291 	return 1;
292 }
293 
294 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
295 {
296 	return 0;
297 }
298 
299 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
300 {
301 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
302 				struct vhost_scsi_tpg, se_tpg);
303 	struct vhost_scsi_tport *tport = tpg->tport;
304 
305 	return &tport->tport_name[0];
306 }
307 
308 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
309 {
310 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
311 				struct vhost_scsi_tpg, se_tpg);
312 	return tpg->tport_tpgt;
313 }
314 
315 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
316 {
317 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
318 				struct vhost_scsi_tpg, se_tpg);
319 
320 	return tpg->tv_fabric_prot_type;
321 }
322 
323 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
324 {
325 	return 1;
326 }
327 
328 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
329 {
330 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
331 				struct vhost_scsi_cmd, tvc_se_cmd);
332 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
333 				struct vhost_scsi_virtqueue, vq);
334 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
335 	int i;
336 
337 	if (tv_cmd->tvc_sgl_count) {
338 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
339 			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
340 	}
341 	if (tv_cmd->tvc_prot_sgl_count) {
342 		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
343 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
344 	}
345 
346 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
347 	vhost_scsi_put_inflight(inflight);
348 }
349 
350 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
351 {
352 	struct vhost_scsi_inflight *inflight = tmf->inflight;
353 
354 	kfree(tmf);
355 	vhost_scsi_put_inflight(inflight);
356 }
357 
358 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
359 {
360 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
361 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
362 					struct vhost_scsi_tmf, se_cmd);
363 
364 		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
365 	} else {
366 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
367 					struct vhost_scsi_cmd, tvc_se_cmd);
368 		struct vhost_scsi *vs = cmd->tvc_vhost;
369 
370 		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
371 		vhost_work_queue(&vs->dev, &vs->vs_completion_work);
372 	}
373 }
374 
375 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
376 {
377 	return 0;
378 }
379 
380 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
381 {
382 	/* Go ahead and process the write immediately */
383 	target_execute_cmd(se_cmd);
384 	return 0;
385 }
386 
387 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
388 {
389 	return;
390 }
391 
392 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
393 {
394 	return 0;
395 }
396 
397 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
398 {
399 	transport_generic_free_cmd(se_cmd, 0);
400 	return 0;
401 }
402 
403 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
404 {
405 	transport_generic_free_cmd(se_cmd, 0);
406 	return 0;
407 }
408 
409 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
410 {
411 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
412 						  se_cmd);
413 
414 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
415 	transport_generic_free_cmd(&tmf->se_cmd, 0);
416 }
417 
418 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
419 {
420 	return;
421 }
422 
423 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
424 {
425 	vs->vs_events_nr--;
426 	kfree(evt);
427 }
428 
429 static struct vhost_scsi_evt *
430 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
431 		       u32 event, u32 reason)
432 {
433 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
434 	struct vhost_scsi_evt *evt;
435 
436 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
437 		vs->vs_events_missed = true;
438 		return NULL;
439 	}
440 
441 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
442 	if (!evt) {
443 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
444 		vs->vs_events_missed = true;
445 		return NULL;
446 	}
447 
448 	evt->event.event = cpu_to_vhost32(vq, event);
449 	evt->event.reason = cpu_to_vhost32(vq, reason);
450 	vs->vs_events_nr++;
451 
452 	return evt;
453 }
454 
455 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
456 {
457 	return target_put_sess_cmd(se_cmd);
458 }
459 
460 static void
461 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
462 {
463 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
464 	struct virtio_scsi_event *event = &evt->event;
465 	struct virtio_scsi_event __user *eventp;
466 	unsigned out, in;
467 	int head, ret;
468 
469 	if (!vhost_vq_get_backend(vq)) {
470 		vs->vs_events_missed = true;
471 		return;
472 	}
473 
474 again:
475 	vhost_disable_notify(&vs->dev, vq);
476 	head = vhost_get_vq_desc(vq, vq->iov,
477 			ARRAY_SIZE(vq->iov), &out, &in,
478 			NULL, NULL);
479 	if (head < 0) {
480 		vs->vs_events_missed = true;
481 		return;
482 	}
483 	if (head == vq->num) {
484 		if (vhost_enable_notify(&vs->dev, vq))
485 			goto again;
486 		vs->vs_events_missed = true;
487 		return;
488 	}
489 
490 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
491 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
492 				vq->iov[out].iov_len);
493 		vs->vs_events_missed = true;
494 		return;
495 	}
496 
497 	if (vs->vs_events_missed) {
498 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
499 		vs->vs_events_missed = false;
500 	}
501 
502 	eventp = vq->iov[out].iov_base;
503 	ret = __copy_to_user(eventp, event, sizeof(*event));
504 	if (!ret)
505 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
506 	else
507 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
508 }
509 
510 static void vhost_scsi_evt_work(struct vhost_work *work)
511 {
512 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
513 					vs_event_work);
514 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
515 	struct vhost_scsi_evt *evt, *t;
516 	struct llist_node *llnode;
517 
518 	mutex_lock(&vq->mutex);
519 	llnode = llist_del_all(&vs->vs_event_list);
520 	llist_for_each_entry_safe(evt, t, llnode, list) {
521 		vhost_scsi_do_evt_work(vs, evt);
522 		vhost_scsi_free_evt(vs, evt);
523 	}
524 	mutex_unlock(&vq->mutex);
525 }
526 
527 /* Fill in status and signal that we are done processing this command
528  *
529  * This is scheduled in the vhost work queue so we are called with the owner
530  * process mm and can access the vring.
531  */
532 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
533 {
534 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
535 					vs_completion_work);
536 	struct virtio_scsi_cmd_resp v_rsp;
537 	struct vhost_scsi_cmd *cmd, *t;
538 	struct llist_node *llnode;
539 	struct se_cmd *se_cmd;
540 	struct iov_iter iov_iter;
541 	int ret, vq;
542 
543 	bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
544 	llnode = llist_del_all(&vs->vs_completion_list);
545 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
546 		se_cmd = &cmd->tvc_se_cmd;
547 
548 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
549 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
550 
551 		memset(&v_rsp, 0, sizeof(v_rsp));
552 		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
553 		/* TODO is status_qualifier field needed? */
554 		v_rsp.status = se_cmd->scsi_status;
555 		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
556 						 se_cmd->scsi_sense_length);
557 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
558 		       se_cmd->scsi_sense_length);
559 
560 		iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
561 			      cmd->tvc_in_iovs, sizeof(v_rsp));
562 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
563 		if (likely(ret == sizeof(v_rsp))) {
564 			struct vhost_scsi_virtqueue *q;
565 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
566 			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
567 			vq = q - vs->vqs;
568 			__set_bit(vq, vs->compl_bitmap);
569 		} else
570 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
571 
572 		vhost_scsi_release_cmd_res(se_cmd);
573 	}
574 
575 	vq = -1;
576 	while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
577 		< vs->dev.nvqs)
578 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
579 }
580 
581 static struct vhost_scsi_cmd *
582 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
583 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
584 		   u32 exp_data_len, int data_direction)
585 {
586 	struct vhost_scsi_virtqueue *svq = container_of(vq,
587 					struct vhost_scsi_virtqueue, vq);
588 	struct vhost_scsi_cmd *cmd;
589 	struct vhost_scsi_nexus *tv_nexus;
590 	struct scatterlist *sg, *prot_sg;
591 	struct iovec *tvc_resp_iov;
592 	struct page **pages;
593 	int tag;
594 
595 	tv_nexus = tpg->tpg_nexus;
596 	if (!tv_nexus) {
597 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
598 		return ERR_PTR(-EIO);
599 	}
600 
601 	tag = sbitmap_get(&svq->scsi_tags);
602 	if (tag < 0) {
603 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
604 		return ERR_PTR(-ENOMEM);
605 	}
606 
607 	cmd = &svq->scsi_cmds[tag];
608 	sg = cmd->tvc_sgl;
609 	prot_sg = cmd->tvc_prot_sgl;
610 	pages = cmd->tvc_upages;
611 	tvc_resp_iov = cmd->tvc_resp_iov;
612 	memset(cmd, 0, sizeof(*cmd));
613 	cmd->tvc_sgl = sg;
614 	cmd->tvc_prot_sgl = prot_sg;
615 	cmd->tvc_upages = pages;
616 	cmd->tvc_se_cmd.map_tag = tag;
617 	cmd->tvc_tag = scsi_tag;
618 	cmd->tvc_lun = lun;
619 	cmd->tvc_task_attr = task_attr;
620 	cmd->tvc_exp_data_len = exp_data_len;
621 	cmd->tvc_data_direction = data_direction;
622 	cmd->tvc_nexus = tv_nexus;
623 	cmd->inflight = vhost_scsi_get_inflight(vq);
624 	cmd->tvc_resp_iov = tvc_resp_iov;
625 
626 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
627 
628 	return cmd;
629 }
630 
631 /*
632  * Map a user memory range into a scatterlist
633  *
634  * Returns the number of scatterlist entries used or -errno on error.
635  */
636 static int
637 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
638 		      struct iov_iter *iter,
639 		      struct scatterlist *sgl,
640 		      bool write)
641 {
642 	struct page **pages = cmd->tvc_upages;
643 	struct scatterlist *sg = sgl;
644 	ssize_t bytes;
645 	size_t offset;
646 	unsigned int npages = 0;
647 
648 	bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
649 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
650 	/* No pages were pinned */
651 	if (bytes <= 0)
652 		return bytes < 0 ? bytes : -EFAULT;
653 
654 	while (bytes) {
655 		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
656 		sg_set_page(sg++, pages[npages++], n, offset);
657 		bytes -= n;
658 		offset = 0;
659 	}
660 	return npages;
661 }
662 
663 static int
664 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
665 {
666 	int sgl_count = 0;
667 
668 	if (!iter || !iter->iov) {
669 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
670 		       " present\n", __func__, bytes);
671 		return -EINVAL;
672 	}
673 
674 	sgl_count = iov_iter_npages(iter, 0xffff);
675 	if (sgl_count > max_sgls) {
676 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
677 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
678 		return -EINVAL;
679 	}
680 	return sgl_count;
681 }
682 
683 static int
684 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
685 		      struct iov_iter *iter,
686 		      struct scatterlist *sg, int sg_count)
687 {
688 	struct scatterlist *p = sg;
689 	int ret;
690 
691 	while (iov_iter_count(iter)) {
692 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
693 		if (ret < 0) {
694 			while (p < sg) {
695 				struct page *page = sg_page(p++);
696 				if (page)
697 					put_page(page);
698 			}
699 			return ret;
700 		}
701 		sg += ret;
702 	}
703 	return 0;
704 }
705 
706 static int
707 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
708 		 size_t prot_bytes, struct iov_iter *prot_iter,
709 		 size_t data_bytes, struct iov_iter *data_iter)
710 {
711 	int sgl_count, ret;
712 	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
713 
714 	if (prot_bytes) {
715 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
716 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
717 		if (sgl_count < 0)
718 			return sgl_count;
719 
720 		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
721 		cmd->tvc_prot_sgl_count = sgl_count;
722 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
723 			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
724 
725 		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
726 					    cmd->tvc_prot_sgl,
727 					    cmd->tvc_prot_sgl_count);
728 		if (ret < 0) {
729 			cmd->tvc_prot_sgl_count = 0;
730 			return ret;
731 		}
732 	}
733 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
734 					 VHOST_SCSI_PREALLOC_SGLS);
735 	if (sgl_count < 0)
736 		return sgl_count;
737 
738 	sg_init_table(cmd->tvc_sgl, sgl_count);
739 	cmd->tvc_sgl_count = sgl_count;
740 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
741 		  cmd->tvc_sgl, cmd->tvc_sgl_count);
742 
743 	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
744 				    cmd->tvc_sgl, cmd->tvc_sgl_count);
745 	if (ret < 0) {
746 		cmd->tvc_sgl_count = 0;
747 		return ret;
748 	}
749 	return 0;
750 }
751 
752 static int vhost_scsi_to_tcm_attr(int attr)
753 {
754 	switch (attr) {
755 	case VIRTIO_SCSI_S_SIMPLE:
756 		return TCM_SIMPLE_TAG;
757 	case VIRTIO_SCSI_S_ORDERED:
758 		return TCM_ORDERED_TAG;
759 	case VIRTIO_SCSI_S_HEAD:
760 		return TCM_HEAD_TAG;
761 	case VIRTIO_SCSI_S_ACA:
762 		return TCM_ACA_TAG;
763 	default:
764 		break;
765 	}
766 	return TCM_SIMPLE_TAG;
767 }
768 
769 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
770 {
771 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
772 	struct vhost_scsi_nexus *tv_nexus;
773 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
774 
775 	/* FIXME: BIDI operation */
776 	if (cmd->tvc_sgl_count) {
777 		sg_ptr = cmd->tvc_sgl;
778 
779 		if (cmd->tvc_prot_sgl_count)
780 			sg_prot_ptr = cmd->tvc_prot_sgl;
781 		else
782 			se_cmd->prot_pto = true;
783 	} else {
784 		sg_ptr = NULL;
785 	}
786 	tv_nexus = cmd->tvc_nexus;
787 
788 	se_cmd->tag = 0;
789 	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
790 			cmd->tvc_lun, cmd->tvc_exp_data_len,
791 			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
792 			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
793 
794 	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
795 			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
796 			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
797 		return;
798 
799 	target_queue_submission(se_cmd);
800 }
801 
802 static void
803 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
804 			   struct vhost_virtqueue *vq,
805 			   int head, unsigned out)
806 {
807 	struct virtio_scsi_cmd_resp __user *resp;
808 	struct virtio_scsi_cmd_resp rsp;
809 	int ret;
810 
811 	memset(&rsp, 0, sizeof(rsp));
812 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
813 	resp = vq->iov[out].iov_base;
814 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
815 	if (!ret)
816 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
817 	else
818 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
819 }
820 
821 static int
822 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
823 		    struct vhost_scsi_ctx *vc)
824 {
825 	int ret = -ENXIO;
826 
827 	vc->head = vhost_get_vq_desc(vq, vq->iov,
828 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
829 				     NULL, NULL);
830 
831 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
832 		 vc->head, vc->out, vc->in);
833 
834 	/* On error, stop handling until the next kick. */
835 	if (unlikely(vc->head < 0))
836 		goto done;
837 
838 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
839 	if (vc->head == vq->num) {
840 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
841 			vhost_disable_notify(&vs->dev, vq);
842 			ret = -EAGAIN;
843 		}
844 		goto done;
845 	}
846 
847 	/*
848 	 * Get the size of request and response buffers.
849 	 * FIXME: Not correct for BIDI operation
850 	 */
851 	vc->out_size = iov_length(vq->iov, vc->out);
852 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
853 
854 	/*
855 	 * Copy over the virtio-scsi request header, which for a
856 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
857 	 * single iovec may contain both the header + outgoing
858 	 * WRITE payloads.
859 	 *
860 	 * copy_from_iter() will advance out_iter, so that it will
861 	 * point at the start of the outgoing WRITE payload, if
862 	 * DMA_TO_DEVICE is set.
863 	 */
864 	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
865 	ret = 0;
866 
867 done:
868 	return ret;
869 }
870 
871 static int
872 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
873 {
874 	if (unlikely(vc->in_size < vc->rsp_size)) {
875 		vq_err(vq,
876 		       "Response buf too small, need min %zu bytes got %zu",
877 		       vc->rsp_size, vc->in_size);
878 		return -EINVAL;
879 	} else if (unlikely(vc->out_size < vc->req_size)) {
880 		vq_err(vq,
881 		       "Request buf too small, need min %zu bytes got %zu",
882 		       vc->req_size, vc->out_size);
883 		return -EIO;
884 	}
885 
886 	return 0;
887 }
888 
889 static int
890 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
891 		   struct vhost_scsi_tpg **tpgp)
892 {
893 	int ret = -EIO;
894 
895 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
896 					  &vc->out_iter))) {
897 		vq_err(vq, "Faulted on copy_from_iter_full\n");
898 	} else if (unlikely(*vc->lunp != 1)) {
899 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
900 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
901 	} else {
902 		struct vhost_scsi_tpg **vs_tpg, *tpg;
903 
904 		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
905 
906 		tpg = READ_ONCE(vs_tpg[*vc->target]);
907 		if (unlikely(!tpg)) {
908 			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
909 		} else {
910 			if (tpgp)
911 				*tpgp = tpg;
912 			ret = 0;
913 		}
914 	}
915 
916 	return ret;
917 }
918 
919 static u16 vhost_buf_to_lun(u8 *lun_buf)
920 {
921 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
922 }
923 
924 static void
925 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
926 {
927 	struct vhost_scsi_tpg **vs_tpg, *tpg;
928 	struct virtio_scsi_cmd_req v_req;
929 	struct virtio_scsi_cmd_req_pi v_req_pi;
930 	struct vhost_scsi_ctx vc;
931 	struct vhost_scsi_cmd *cmd;
932 	struct iov_iter in_iter, prot_iter, data_iter;
933 	u64 tag;
934 	u32 exp_data_len, data_direction;
935 	int ret, prot_bytes, i, c = 0;
936 	u16 lun;
937 	u8 task_attr;
938 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
939 	void *cdb;
940 
941 	mutex_lock(&vq->mutex);
942 	/*
943 	 * We can handle the vq only after the endpoint is setup by calling the
944 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
945 	 */
946 	vs_tpg = vhost_vq_get_backend(vq);
947 	if (!vs_tpg)
948 		goto out;
949 
950 	memset(&vc, 0, sizeof(vc));
951 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
952 
953 	vhost_disable_notify(&vs->dev, vq);
954 
955 	do {
956 		ret = vhost_scsi_get_desc(vs, vq, &vc);
957 		if (ret)
958 			goto err;
959 
960 		/*
961 		 * Setup pointers and values based upon different virtio-scsi
962 		 * request header if T10_PI is enabled in KVM guest.
963 		 */
964 		if (t10_pi) {
965 			vc.req = &v_req_pi;
966 			vc.req_size = sizeof(v_req_pi);
967 			vc.lunp = &v_req_pi.lun[0];
968 			vc.target = &v_req_pi.lun[1];
969 		} else {
970 			vc.req = &v_req;
971 			vc.req_size = sizeof(v_req);
972 			vc.lunp = &v_req.lun[0];
973 			vc.target = &v_req.lun[1];
974 		}
975 
976 		/*
977 		 * Validate the size of request and response buffers.
978 		 * Check for a sane response buffer so we can report
979 		 * early errors back to the guest.
980 		 */
981 		ret = vhost_scsi_chk_size(vq, &vc);
982 		if (ret)
983 			goto err;
984 
985 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
986 		if (ret)
987 			goto err;
988 
989 		ret = -EIO;	/* bad target on any error from here on */
990 
991 		/*
992 		 * Determine data_direction by calculating the total outgoing
993 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
994 		 * response headers respectively.
995 		 *
996 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
997 		 * to the right place.
998 		 *
999 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1000 		 * of the virtio-scsi response header in either the same
1001 		 * or immediately following iovec.
1002 		 *
1003 		 * Any associated T10_PI bytes for the outgoing / incoming
1004 		 * payloads are included in calculation of exp_data_len here.
1005 		 */
1006 		prot_bytes = 0;
1007 
1008 		if (vc.out_size > vc.req_size) {
1009 			data_direction = DMA_TO_DEVICE;
1010 			exp_data_len = vc.out_size - vc.req_size;
1011 			data_iter = vc.out_iter;
1012 		} else if (vc.in_size > vc.rsp_size) {
1013 			data_direction = DMA_FROM_DEVICE;
1014 			exp_data_len = vc.in_size - vc.rsp_size;
1015 
1016 			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1017 				      vc.rsp_size + exp_data_len);
1018 			iov_iter_advance(&in_iter, vc.rsp_size);
1019 			data_iter = in_iter;
1020 		} else {
1021 			data_direction = DMA_NONE;
1022 			exp_data_len = 0;
1023 		}
1024 		/*
1025 		 * If T10_PI header + payload is present, setup prot_iter values
1026 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1027 		 * host scatterlists via get_user_pages_fast().
1028 		 */
1029 		if (t10_pi) {
1030 			if (v_req_pi.pi_bytesout) {
1031 				if (data_direction != DMA_TO_DEVICE) {
1032 					vq_err(vq, "Received non zero pi_bytesout,"
1033 						" but wrong data_direction\n");
1034 					goto err;
1035 				}
1036 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1037 			} else if (v_req_pi.pi_bytesin) {
1038 				if (data_direction != DMA_FROM_DEVICE) {
1039 					vq_err(vq, "Received non zero pi_bytesin,"
1040 						" but wrong data_direction\n");
1041 					goto err;
1042 				}
1043 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1044 			}
1045 			/*
1046 			 * Set prot_iter to data_iter and truncate it to
1047 			 * prot_bytes, and advance data_iter past any
1048 			 * preceeding prot_bytes that may be present.
1049 			 *
1050 			 * Also fix up the exp_data_len to reflect only the
1051 			 * actual data payload length.
1052 			 */
1053 			if (prot_bytes) {
1054 				exp_data_len -= prot_bytes;
1055 				prot_iter = data_iter;
1056 				iov_iter_truncate(&prot_iter, prot_bytes);
1057 				iov_iter_advance(&data_iter, prot_bytes);
1058 			}
1059 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1060 			task_attr = v_req_pi.task_attr;
1061 			cdb = &v_req_pi.cdb[0];
1062 			lun = vhost_buf_to_lun(v_req_pi.lun);
1063 		} else {
1064 			tag = vhost64_to_cpu(vq, v_req.tag);
1065 			task_attr = v_req.task_attr;
1066 			cdb = &v_req.cdb[0];
1067 			lun = vhost_buf_to_lun(v_req.lun);
1068 		}
1069 		/*
1070 		 * Check that the received CDB size does not exceeded our
1071 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1072 		 * cmd descriptor for the new virtio-scsi tag.
1073 		 *
1074 		 * TODO what if cdb was too small for varlen cdb header?
1075 		 */
1076 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1077 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1078 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1079 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1080 				goto err;
1081 		}
1082 		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1083 					 exp_data_len + prot_bytes,
1084 					 data_direction);
1085 		if (IS_ERR(cmd)) {
1086 			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1087 			       PTR_ERR(cmd));
1088 			goto err;
1089 		}
1090 		cmd->tvc_vhost = vs;
1091 		cmd->tvc_vq = vq;
1092 		for (i = 0; i < vc.in ; i++)
1093 			cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
1094 		cmd->tvc_in_iovs = vc.in;
1095 
1096 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1097 			 cmd->tvc_cdb[0], cmd->tvc_lun);
1098 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1099 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1100 
1101 		if (data_direction != DMA_NONE) {
1102 			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1103 						      &prot_iter, exp_data_len,
1104 						      &data_iter))) {
1105 				vq_err(vq, "Failed to map iov to sgl\n");
1106 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1107 				goto err;
1108 			}
1109 		}
1110 		/*
1111 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1112 		 * complete the virtio-scsi request in TCM callback context via
1113 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1114 		 */
1115 		cmd->tvc_vq_desc = vc.head;
1116 		vhost_scsi_target_queue_cmd(cmd);
1117 		ret = 0;
1118 err:
1119 		/*
1120 		 * ENXIO:  No more requests, or read error, wait for next kick
1121 		 * EINVAL: Invalid response buffer, drop the request
1122 		 * EIO:    Respond with bad target
1123 		 * EAGAIN: Pending request
1124 		 */
1125 		if (ret == -ENXIO)
1126 			break;
1127 		else if (ret == -EIO)
1128 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1129 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1130 out:
1131 	mutex_unlock(&vq->mutex);
1132 }
1133 
1134 static void
1135 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1136 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1137 			 int tmf_resp_code)
1138 {
1139 	struct virtio_scsi_ctrl_tmf_resp rsp;
1140 	struct iov_iter iov_iter;
1141 	int ret;
1142 
1143 	pr_debug("%s\n", __func__);
1144 	memset(&rsp, 0, sizeof(rsp));
1145 	rsp.response = tmf_resp_code;
1146 
1147 	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1148 
1149 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1150 	if (likely(ret == sizeof(rsp)))
1151 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1152 	else
1153 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1154 }
1155 
1156 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1157 {
1158 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1159 						  vwork);
1160 	int resp_code;
1161 
1162 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1163 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1164 	else
1165 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1166 
1167 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1168 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1169 	vhost_scsi_release_tmf_res(tmf);
1170 }
1171 
1172 static void
1173 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1174 		      struct vhost_virtqueue *vq,
1175 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1176 		      struct vhost_scsi_ctx *vc)
1177 {
1178 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1179 					struct vhost_scsi_virtqueue, vq);
1180 	struct vhost_scsi_tmf *tmf;
1181 
1182 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1183 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1184 		goto send_reject;
1185 
1186 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1187 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1188 		goto send_reject;
1189 	}
1190 
1191 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1192 	if (!tmf)
1193 		goto send_reject;
1194 
1195 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1196 	tmf->vhost = vs;
1197 	tmf->svq = svq;
1198 	tmf->resp_iov = vq->iov[vc->out];
1199 	tmf->vq_desc = vc->head;
1200 	tmf->in_iovs = vc->in;
1201 	tmf->inflight = vhost_scsi_get_inflight(vq);
1202 
1203 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1204 			      vhost_buf_to_lun(vtmf->lun), NULL,
1205 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1206 			      TARGET_SCF_ACK_KREF) < 0) {
1207 		vhost_scsi_release_tmf_res(tmf);
1208 		goto send_reject;
1209 	}
1210 
1211 	return;
1212 
1213 send_reject:
1214 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1215 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1216 }
1217 
1218 static void
1219 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1220 			struct vhost_virtqueue *vq,
1221 			struct vhost_scsi_ctx *vc)
1222 {
1223 	struct virtio_scsi_ctrl_an_resp rsp;
1224 	struct iov_iter iov_iter;
1225 	int ret;
1226 
1227 	pr_debug("%s\n", __func__);
1228 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1229 	rsp.response = VIRTIO_SCSI_S_OK;
1230 
1231 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1232 
1233 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1234 	if (likely(ret == sizeof(rsp)))
1235 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1236 	else
1237 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1238 }
1239 
1240 static void
1241 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1242 {
1243 	struct vhost_scsi_tpg *tpg;
1244 	union {
1245 		__virtio32 type;
1246 		struct virtio_scsi_ctrl_an_req an;
1247 		struct virtio_scsi_ctrl_tmf_req tmf;
1248 	} v_req;
1249 	struct vhost_scsi_ctx vc;
1250 	size_t typ_size;
1251 	int ret, c = 0;
1252 
1253 	mutex_lock(&vq->mutex);
1254 	/*
1255 	 * We can handle the vq only after the endpoint is setup by calling the
1256 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1257 	 */
1258 	if (!vhost_vq_get_backend(vq))
1259 		goto out;
1260 
1261 	memset(&vc, 0, sizeof(vc));
1262 
1263 	vhost_disable_notify(&vs->dev, vq);
1264 
1265 	do {
1266 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1267 		if (ret)
1268 			goto err;
1269 
1270 		/*
1271 		 * Get the request type first in order to setup
1272 		 * other parameters dependent on the type.
1273 		 */
1274 		vc.req = &v_req.type;
1275 		typ_size = sizeof(v_req.type);
1276 
1277 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1278 						  &vc.out_iter))) {
1279 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1280 			/*
1281 			 * The size of the response buffer depends on the
1282 			 * request type and must be validated against it.
1283 			 * Since the request type is not known, don't send
1284 			 * a response.
1285 			 */
1286 			continue;
1287 		}
1288 
1289 		switch (vhost32_to_cpu(vq, v_req.type)) {
1290 		case VIRTIO_SCSI_T_TMF:
1291 			vc.req = &v_req.tmf;
1292 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1293 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1294 			vc.lunp = &v_req.tmf.lun[0];
1295 			vc.target = &v_req.tmf.lun[1];
1296 			break;
1297 		case VIRTIO_SCSI_T_AN_QUERY:
1298 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1299 			vc.req = &v_req.an;
1300 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1301 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1302 			vc.lunp = &v_req.an.lun[0];
1303 			vc.target = NULL;
1304 			break;
1305 		default:
1306 			vq_err(vq, "Unknown control request %d", v_req.type);
1307 			continue;
1308 		}
1309 
1310 		/*
1311 		 * Validate the size of request and response buffers.
1312 		 * Check for a sane response buffer so we can report
1313 		 * early errors back to the guest.
1314 		 */
1315 		ret = vhost_scsi_chk_size(vq, &vc);
1316 		if (ret)
1317 			goto err;
1318 
1319 		/*
1320 		 * Get the rest of the request now that its size is known.
1321 		 */
1322 		vc.req += typ_size;
1323 		vc.req_size -= typ_size;
1324 
1325 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1326 		if (ret)
1327 			goto err;
1328 
1329 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1330 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1331 		else
1332 			vhost_scsi_send_an_resp(vs, vq, &vc);
1333 err:
1334 		/*
1335 		 * ENXIO:  No more requests, or read error, wait for next kick
1336 		 * EINVAL: Invalid response buffer, drop the request
1337 		 * EIO:    Respond with bad target
1338 		 * EAGAIN: Pending request
1339 		 */
1340 		if (ret == -ENXIO)
1341 			break;
1342 		else if (ret == -EIO)
1343 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1344 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1345 out:
1346 	mutex_unlock(&vq->mutex);
1347 }
1348 
1349 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1350 {
1351 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1352 						poll.work);
1353 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1354 
1355 	pr_debug("%s: The handling func for control queue.\n", __func__);
1356 	vhost_scsi_ctl_handle_vq(vs, vq);
1357 }
1358 
1359 static void
1360 vhost_scsi_send_evt(struct vhost_scsi *vs,
1361 		   struct vhost_scsi_tpg *tpg,
1362 		   struct se_lun *lun,
1363 		   u32 event,
1364 		   u32 reason)
1365 {
1366 	struct vhost_scsi_evt *evt;
1367 
1368 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1369 	if (!evt)
1370 		return;
1371 
1372 	if (tpg && lun) {
1373 		/* TODO: share lun setup code with virtio-scsi.ko */
1374 		/*
1375 		 * Note: evt->event is zeroed when we allocate it and
1376 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1377 		 */
1378 		evt->event.lun[0] = 0x01;
1379 		evt->event.lun[1] = tpg->tport_tpgt;
1380 		if (lun->unpacked_lun >= 256)
1381 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1382 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1383 	}
1384 
1385 	llist_add(&evt->list, &vs->vs_event_list);
1386 	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1387 }
1388 
1389 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1390 {
1391 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1392 						poll.work);
1393 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1394 
1395 	mutex_lock(&vq->mutex);
1396 	if (!vhost_vq_get_backend(vq))
1397 		goto out;
1398 
1399 	if (vs->vs_events_missed)
1400 		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1401 out:
1402 	mutex_unlock(&vq->mutex);
1403 }
1404 
1405 static void vhost_scsi_handle_kick(struct vhost_work *work)
1406 {
1407 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1408 						poll.work);
1409 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1410 
1411 	vhost_scsi_handle_vq(vs, vq);
1412 }
1413 
1414 /* Callers must hold dev mutex */
1415 static void vhost_scsi_flush(struct vhost_scsi *vs)
1416 {
1417 	int i;
1418 
1419 	/* Init new inflight and remember the old inflight */
1420 	vhost_scsi_init_inflight(vs, vs->old_inflight);
1421 
1422 	/*
1423 	 * The inflight->kref was initialized to 1. We decrement it here to
1424 	 * indicate the start of the flush operation so that it will reach 0
1425 	 * when all the reqs are finished.
1426 	 */
1427 	for (i = 0; i < vs->dev.nvqs; i++)
1428 		kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1429 
1430 	/* Flush both the vhost poll and vhost work */
1431 	vhost_dev_flush(&vs->dev);
1432 
1433 	/* Wait for all reqs issued before the flush to be finished */
1434 	for (i = 0; i < vs->dev.nvqs; i++)
1435 		wait_for_completion(&vs->old_inflight[i]->comp);
1436 }
1437 
1438 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1439 {
1440 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1441 					struct vhost_scsi_virtqueue, vq);
1442 	struct vhost_scsi_cmd *tv_cmd;
1443 	unsigned int i;
1444 
1445 	if (!svq->scsi_cmds)
1446 		return;
1447 
1448 	for (i = 0; i < svq->max_cmds; i++) {
1449 		tv_cmd = &svq->scsi_cmds[i];
1450 
1451 		kfree(tv_cmd->tvc_sgl);
1452 		kfree(tv_cmd->tvc_prot_sgl);
1453 		kfree(tv_cmd->tvc_upages);
1454 		kfree(tv_cmd->tvc_resp_iov);
1455 	}
1456 
1457 	sbitmap_free(&svq->scsi_tags);
1458 	kfree(svq->scsi_cmds);
1459 	svq->scsi_cmds = NULL;
1460 }
1461 
1462 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1463 {
1464 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1465 					struct vhost_scsi_virtqueue, vq);
1466 	struct vhost_scsi_cmd *tv_cmd;
1467 	unsigned int i;
1468 
1469 	if (svq->scsi_cmds)
1470 		return 0;
1471 
1472 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1473 			      NUMA_NO_NODE, false, true))
1474 		return -ENOMEM;
1475 	svq->max_cmds = max_cmds;
1476 
1477 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1478 	if (!svq->scsi_cmds) {
1479 		sbitmap_free(&svq->scsi_tags);
1480 		return -ENOMEM;
1481 	}
1482 
1483 	for (i = 0; i < max_cmds; i++) {
1484 		tv_cmd = &svq->scsi_cmds[i];
1485 
1486 		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1487 					  sizeof(struct scatterlist),
1488 					  GFP_KERNEL);
1489 		if (!tv_cmd->tvc_sgl) {
1490 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1491 			goto out;
1492 		}
1493 
1494 		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1495 					     sizeof(struct page *),
1496 					     GFP_KERNEL);
1497 		if (!tv_cmd->tvc_upages) {
1498 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1499 			goto out;
1500 		}
1501 
1502 		tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1503 					       sizeof(struct iovec),
1504 					       GFP_KERNEL);
1505 		if (!tv_cmd->tvc_resp_iov) {
1506 			pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1507 			goto out;
1508 		}
1509 
1510 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1511 					       sizeof(struct scatterlist),
1512 					       GFP_KERNEL);
1513 		if (!tv_cmd->tvc_prot_sgl) {
1514 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1515 			goto out;
1516 		}
1517 	}
1518 	return 0;
1519 out:
1520 	vhost_scsi_destroy_vq_cmds(vq);
1521 	return -ENOMEM;
1522 }
1523 
1524 /*
1525  * Called from vhost_scsi_ioctl() context to walk the list of available
1526  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1527  *
1528  *  The lock nesting rule is:
1529  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1530  */
1531 static int
1532 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1533 			struct vhost_scsi_target *t)
1534 {
1535 	struct se_portal_group *se_tpg;
1536 	struct vhost_scsi_tport *tv_tport;
1537 	struct vhost_scsi_tpg *tpg;
1538 	struct vhost_scsi_tpg **vs_tpg;
1539 	struct vhost_virtqueue *vq;
1540 	int index, ret, i, len;
1541 	bool match = false;
1542 
1543 	mutex_lock(&vhost_scsi_mutex);
1544 	mutex_lock(&vs->dev.mutex);
1545 
1546 	/* Verify that ring has been setup correctly. */
1547 	for (index = 0; index < vs->dev.nvqs; ++index) {
1548 		/* Verify that ring has been setup correctly. */
1549 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1550 			ret = -EFAULT;
1551 			goto out;
1552 		}
1553 	}
1554 
1555 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1556 	vs_tpg = kzalloc(len, GFP_KERNEL);
1557 	if (!vs_tpg) {
1558 		ret = -ENOMEM;
1559 		goto out;
1560 	}
1561 	if (vs->vs_tpg)
1562 		memcpy(vs_tpg, vs->vs_tpg, len);
1563 
1564 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1565 		mutex_lock(&tpg->tv_tpg_mutex);
1566 		if (!tpg->tpg_nexus) {
1567 			mutex_unlock(&tpg->tv_tpg_mutex);
1568 			continue;
1569 		}
1570 		if (tpg->tv_tpg_vhost_count != 0) {
1571 			mutex_unlock(&tpg->tv_tpg_mutex);
1572 			continue;
1573 		}
1574 		tv_tport = tpg->tport;
1575 
1576 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1577 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1578 				mutex_unlock(&tpg->tv_tpg_mutex);
1579 				ret = -EEXIST;
1580 				goto undepend;
1581 			}
1582 			/*
1583 			 * In order to ensure individual vhost-scsi configfs
1584 			 * groups cannot be removed while in use by vhost ioctl,
1585 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1586 			 * dependency now.
1587 			 */
1588 			se_tpg = &tpg->se_tpg;
1589 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1590 			if (ret) {
1591 				pr_warn("target_depend_item() failed: %d\n", ret);
1592 				mutex_unlock(&tpg->tv_tpg_mutex);
1593 				goto undepend;
1594 			}
1595 			tpg->tv_tpg_vhost_count++;
1596 			tpg->vhost_scsi = vs;
1597 			vs_tpg[tpg->tport_tpgt] = tpg;
1598 			match = true;
1599 		}
1600 		mutex_unlock(&tpg->tv_tpg_mutex);
1601 	}
1602 
1603 	if (match) {
1604 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1605 		       sizeof(vs->vs_vhost_wwpn));
1606 
1607 		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1608 			vq = &vs->vqs[i].vq;
1609 			if (!vhost_vq_is_setup(vq))
1610 				continue;
1611 
1612 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1613 			if (ret)
1614 				goto destroy_vq_cmds;
1615 		}
1616 
1617 		for (i = 0; i < vs->dev.nvqs; i++) {
1618 			vq = &vs->vqs[i].vq;
1619 			mutex_lock(&vq->mutex);
1620 			vhost_vq_set_backend(vq, vs_tpg);
1621 			vhost_vq_init_access(vq);
1622 			mutex_unlock(&vq->mutex);
1623 		}
1624 		ret = 0;
1625 	} else {
1626 		ret = -EEXIST;
1627 	}
1628 
1629 	/*
1630 	 * Act as synchronize_rcu to make sure access to
1631 	 * old vs->vs_tpg is finished.
1632 	 */
1633 	vhost_scsi_flush(vs);
1634 	kfree(vs->vs_tpg);
1635 	vs->vs_tpg = vs_tpg;
1636 	goto out;
1637 
1638 destroy_vq_cmds:
1639 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1640 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1641 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1642 	}
1643 undepend:
1644 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1645 		tpg = vs_tpg[i];
1646 		if (tpg) {
1647 			mutex_lock(&tpg->tv_tpg_mutex);
1648 			tpg->vhost_scsi = NULL;
1649 			tpg->tv_tpg_vhost_count--;
1650 			mutex_unlock(&tpg->tv_tpg_mutex);
1651 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1652 		}
1653 	}
1654 	kfree(vs_tpg);
1655 out:
1656 	mutex_unlock(&vs->dev.mutex);
1657 	mutex_unlock(&vhost_scsi_mutex);
1658 	return ret;
1659 }
1660 
1661 static int
1662 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1663 			  struct vhost_scsi_target *t)
1664 {
1665 	struct se_portal_group *se_tpg;
1666 	struct vhost_scsi_tport *tv_tport;
1667 	struct vhost_scsi_tpg *tpg;
1668 	struct vhost_virtqueue *vq;
1669 	bool match = false;
1670 	int index, ret, i;
1671 	u8 target;
1672 
1673 	mutex_lock(&vhost_scsi_mutex);
1674 	mutex_lock(&vs->dev.mutex);
1675 	/* Verify that ring has been setup correctly. */
1676 	for (index = 0; index < vs->dev.nvqs; ++index) {
1677 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1678 			ret = -EFAULT;
1679 			goto err_dev;
1680 		}
1681 	}
1682 
1683 	if (!vs->vs_tpg) {
1684 		ret = 0;
1685 		goto err_dev;
1686 	}
1687 
1688 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1689 		target = i;
1690 		tpg = vs->vs_tpg[target];
1691 		if (!tpg)
1692 			continue;
1693 
1694 		mutex_lock(&tpg->tv_tpg_mutex);
1695 		tv_tport = tpg->tport;
1696 		if (!tv_tport) {
1697 			ret = -ENODEV;
1698 			goto err_tpg;
1699 		}
1700 
1701 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1702 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1703 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1704 				tv_tport->tport_name, tpg->tport_tpgt,
1705 				t->vhost_wwpn, t->vhost_tpgt);
1706 			ret = -EINVAL;
1707 			goto err_tpg;
1708 		}
1709 		tpg->tv_tpg_vhost_count--;
1710 		tpg->vhost_scsi = NULL;
1711 		vs->vs_tpg[target] = NULL;
1712 		match = true;
1713 		mutex_unlock(&tpg->tv_tpg_mutex);
1714 		/*
1715 		 * Release se_tpg->tpg_group.cg_item configfs dependency now
1716 		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1717 		 */
1718 		se_tpg = &tpg->se_tpg;
1719 		target_undepend_item(&se_tpg->tpg_group.cg_item);
1720 	}
1721 	if (match) {
1722 		for (i = 0; i < vs->dev.nvqs; i++) {
1723 			vq = &vs->vqs[i].vq;
1724 			mutex_lock(&vq->mutex);
1725 			vhost_vq_set_backend(vq, NULL);
1726 			mutex_unlock(&vq->mutex);
1727 		}
1728 		/* Make sure cmds are not running before tearing them down. */
1729 		vhost_scsi_flush(vs);
1730 
1731 		for (i = 0; i < vs->dev.nvqs; i++) {
1732 			vq = &vs->vqs[i].vq;
1733 			vhost_scsi_destroy_vq_cmds(vq);
1734 		}
1735 	}
1736 	/*
1737 	 * Act as synchronize_rcu to make sure access to
1738 	 * old vs->vs_tpg is finished.
1739 	 */
1740 	vhost_scsi_flush(vs);
1741 	kfree(vs->vs_tpg);
1742 	vs->vs_tpg = NULL;
1743 	WARN_ON(vs->vs_events_nr);
1744 	mutex_unlock(&vs->dev.mutex);
1745 	mutex_unlock(&vhost_scsi_mutex);
1746 	return 0;
1747 
1748 err_tpg:
1749 	mutex_unlock(&tpg->tv_tpg_mutex);
1750 err_dev:
1751 	mutex_unlock(&vs->dev.mutex);
1752 	mutex_unlock(&vhost_scsi_mutex);
1753 	return ret;
1754 }
1755 
1756 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1757 {
1758 	struct vhost_virtqueue *vq;
1759 	int i;
1760 
1761 	if (features & ~VHOST_SCSI_FEATURES)
1762 		return -EOPNOTSUPP;
1763 
1764 	mutex_lock(&vs->dev.mutex);
1765 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1766 	    !vhost_log_access_ok(&vs->dev)) {
1767 		mutex_unlock(&vs->dev.mutex);
1768 		return -EFAULT;
1769 	}
1770 
1771 	for (i = 0; i < vs->dev.nvqs; i++) {
1772 		vq = &vs->vqs[i].vq;
1773 		mutex_lock(&vq->mutex);
1774 		vq->acked_features = features;
1775 		mutex_unlock(&vq->mutex);
1776 	}
1777 	mutex_unlock(&vs->dev.mutex);
1778 	return 0;
1779 }
1780 
1781 static int vhost_scsi_open(struct inode *inode, struct file *f)
1782 {
1783 	struct vhost_scsi *vs;
1784 	struct vhost_virtqueue **vqs;
1785 	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1786 
1787 	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1788 	if (!vs)
1789 		goto err_vs;
1790 
1791 	if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1792 		pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1793 		       VHOST_SCSI_MAX_IO_VQ);
1794 		nvqs = VHOST_SCSI_MAX_IO_VQ;
1795 	} else if (nvqs == 0) {
1796 		pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1797 		nvqs = 1;
1798 	}
1799 	nvqs += VHOST_SCSI_VQ_IO;
1800 
1801 	vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
1802 	if (!vs->compl_bitmap)
1803 		goto err_compl_bitmap;
1804 
1805 	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1806 					 GFP_KERNEL | __GFP_ZERO);
1807 	if (!vs->old_inflight)
1808 		goto err_inflight;
1809 
1810 	vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1811 				GFP_KERNEL | __GFP_ZERO);
1812 	if (!vs->vqs)
1813 		goto err_vqs;
1814 
1815 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1816 	if (!vqs)
1817 		goto err_local_vqs;
1818 
1819 	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1820 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1821 
1822 	vs->vs_events_nr = 0;
1823 	vs->vs_events_missed = false;
1824 
1825 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1826 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1827 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1828 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1829 	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1830 		vqs[i] = &vs->vqs[i].vq;
1831 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1832 	}
1833 	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1834 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1835 
1836 	vhost_scsi_init_inflight(vs, NULL);
1837 
1838 	f->private_data = vs;
1839 	return 0;
1840 
1841 err_local_vqs:
1842 	kfree(vs->vqs);
1843 err_vqs:
1844 	kfree(vs->old_inflight);
1845 err_inflight:
1846 	bitmap_free(vs->compl_bitmap);
1847 err_compl_bitmap:
1848 	kvfree(vs);
1849 err_vs:
1850 	return r;
1851 }
1852 
1853 static int vhost_scsi_release(struct inode *inode, struct file *f)
1854 {
1855 	struct vhost_scsi *vs = f->private_data;
1856 	struct vhost_scsi_target t;
1857 
1858 	mutex_lock(&vs->dev.mutex);
1859 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1860 	mutex_unlock(&vs->dev.mutex);
1861 	vhost_scsi_clear_endpoint(vs, &t);
1862 	vhost_dev_stop(&vs->dev);
1863 	vhost_dev_cleanup(&vs->dev);
1864 	kfree(vs->dev.vqs);
1865 	kfree(vs->vqs);
1866 	kfree(vs->old_inflight);
1867 	bitmap_free(vs->compl_bitmap);
1868 	kvfree(vs);
1869 	return 0;
1870 }
1871 
1872 static long
1873 vhost_scsi_ioctl(struct file *f,
1874 		 unsigned int ioctl,
1875 		 unsigned long arg)
1876 {
1877 	struct vhost_scsi *vs = f->private_data;
1878 	struct vhost_scsi_target backend;
1879 	void __user *argp = (void __user *)arg;
1880 	u64 __user *featurep = argp;
1881 	u32 __user *eventsp = argp;
1882 	u32 events_missed;
1883 	u64 features;
1884 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1885 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1886 
1887 	switch (ioctl) {
1888 	case VHOST_SCSI_SET_ENDPOINT:
1889 		if (copy_from_user(&backend, argp, sizeof backend))
1890 			return -EFAULT;
1891 		if (backend.reserved != 0)
1892 			return -EOPNOTSUPP;
1893 
1894 		return vhost_scsi_set_endpoint(vs, &backend);
1895 	case VHOST_SCSI_CLEAR_ENDPOINT:
1896 		if (copy_from_user(&backend, argp, sizeof backend))
1897 			return -EFAULT;
1898 		if (backend.reserved != 0)
1899 			return -EOPNOTSUPP;
1900 
1901 		return vhost_scsi_clear_endpoint(vs, &backend);
1902 	case VHOST_SCSI_GET_ABI_VERSION:
1903 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1904 			return -EFAULT;
1905 		return 0;
1906 	case VHOST_SCSI_SET_EVENTS_MISSED:
1907 		if (get_user(events_missed, eventsp))
1908 			return -EFAULT;
1909 		mutex_lock(&vq->mutex);
1910 		vs->vs_events_missed = events_missed;
1911 		mutex_unlock(&vq->mutex);
1912 		return 0;
1913 	case VHOST_SCSI_GET_EVENTS_MISSED:
1914 		mutex_lock(&vq->mutex);
1915 		events_missed = vs->vs_events_missed;
1916 		mutex_unlock(&vq->mutex);
1917 		if (put_user(events_missed, eventsp))
1918 			return -EFAULT;
1919 		return 0;
1920 	case VHOST_GET_FEATURES:
1921 		features = VHOST_SCSI_FEATURES;
1922 		if (copy_to_user(featurep, &features, sizeof features))
1923 			return -EFAULT;
1924 		return 0;
1925 	case VHOST_SET_FEATURES:
1926 		if (copy_from_user(&features, featurep, sizeof features))
1927 			return -EFAULT;
1928 		return vhost_scsi_set_features(vs, features);
1929 	default:
1930 		mutex_lock(&vs->dev.mutex);
1931 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1932 		/* TODO: flush backend after dev ioctl. */
1933 		if (r == -ENOIOCTLCMD)
1934 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1935 		mutex_unlock(&vs->dev.mutex);
1936 		return r;
1937 	}
1938 }
1939 
1940 static const struct file_operations vhost_scsi_fops = {
1941 	.owner          = THIS_MODULE,
1942 	.release        = vhost_scsi_release,
1943 	.unlocked_ioctl = vhost_scsi_ioctl,
1944 	.compat_ioctl	= compat_ptr_ioctl,
1945 	.open           = vhost_scsi_open,
1946 	.llseek		= noop_llseek,
1947 };
1948 
1949 static struct miscdevice vhost_scsi_misc = {
1950 	MISC_DYNAMIC_MINOR,
1951 	"vhost-scsi",
1952 	&vhost_scsi_fops,
1953 };
1954 
1955 static int __init vhost_scsi_register(void)
1956 {
1957 	return misc_register(&vhost_scsi_misc);
1958 }
1959 
1960 static void vhost_scsi_deregister(void)
1961 {
1962 	misc_deregister(&vhost_scsi_misc);
1963 }
1964 
1965 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1966 {
1967 	switch (tport->tport_proto_id) {
1968 	case SCSI_PROTOCOL_SAS:
1969 		return "SAS";
1970 	case SCSI_PROTOCOL_FCP:
1971 		return "FCP";
1972 	case SCSI_PROTOCOL_ISCSI:
1973 		return "iSCSI";
1974 	default:
1975 		break;
1976 	}
1977 
1978 	return "Unknown";
1979 }
1980 
1981 static void
1982 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1983 		  struct se_lun *lun, bool plug)
1984 {
1985 
1986 	struct vhost_scsi *vs = tpg->vhost_scsi;
1987 	struct vhost_virtqueue *vq;
1988 	u32 reason;
1989 
1990 	if (!vs)
1991 		return;
1992 
1993 	mutex_lock(&vs->dev.mutex);
1994 
1995 	if (plug)
1996 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1997 	else
1998 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1999 
2000 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2001 	mutex_lock(&vq->mutex);
2002 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2003 		vhost_scsi_send_evt(vs, tpg, lun,
2004 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2005 	mutex_unlock(&vq->mutex);
2006 	mutex_unlock(&vs->dev.mutex);
2007 }
2008 
2009 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2010 {
2011 	vhost_scsi_do_plug(tpg, lun, true);
2012 }
2013 
2014 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2015 {
2016 	vhost_scsi_do_plug(tpg, lun, false);
2017 }
2018 
2019 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2020 			       struct se_lun *lun)
2021 {
2022 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2023 				struct vhost_scsi_tpg, se_tpg);
2024 
2025 	mutex_lock(&vhost_scsi_mutex);
2026 
2027 	mutex_lock(&tpg->tv_tpg_mutex);
2028 	tpg->tv_tpg_port_count++;
2029 	mutex_unlock(&tpg->tv_tpg_mutex);
2030 
2031 	vhost_scsi_hotplug(tpg, lun);
2032 
2033 	mutex_unlock(&vhost_scsi_mutex);
2034 
2035 	return 0;
2036 }
2037 
2038 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2039 				  struct se_lun *lun)
2040 {
2041 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2042 				struct vhost_scsi_tpg, se_tpg);
2043 
2044 	mutex_lock(&vhost_scsi_mutex);
2045 
2046 	mutex_lock(&tpg->tv_tpg_mutex);
2047 	tpg->tv_tpg_port_count--;
2048 	mutex_unlock(&tpg->tv_tpg_mutex);
2049 
2050 	vhost_scsi_hotunplug(tpg, lun);
2051 
2052 	mutex_unlock(&vhost_scsi_mutex);
2053 }
2054 
2055 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2056 		struct config_item *item, const char *page, size_t count)
2057 {
2058 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2059 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2060 				struct vhost_scsi_tpg, se_tpg);
2061 	unsigned long val;
2062 	int ret = kstrtoul(page, 0, &val);
2063 
2064 	if (ret) {
2065 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2066 		return ret;
2067 	}
2068 	if (val != 0 && val != 1 && val != 3) {
2069 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2070 		return -EINVAL;
2071 	}
2072 	tpg->tv_fabric_prot_type = val;
2073 
2074 	return count;
2075 }
2076 
2077 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2078 		struct config_item *item, char *page)
2079 {
2080 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2081 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2082 				struct vhost_scsi_tpg, se_tpg);
2083 
2084 	return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2085 }
2086 
2087 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2088 
2089 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2090 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2091 	NULL,
2092 };
2093 
2094 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2095 				const char *name)
2096 {
2097 	struct vhost_scsi_nexus *tv_nexus;
2098 
2099 	mutex_lock(&tpg->tv_tpg_mutex);
2100 	if (tpg->tpg_nexus) {
2101 		mutex_unlock(&tpg->tv_tpg_mutex);
2102 		pr_debug("tpg->tpg_nexus already exists\n");
2103 		return -EEXIST;
2104 	}
2105 
2106 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2107 	if (!tv_nexus) {
2108 		mutex_unlock(&tpg->tv_tpg_mutex);
2109 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2110 		return -ENOMEM;
2111 	}
2112 	/*
2113 	 * Since we are running in 'demo mode' this call with generate a
2114 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2115 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2116 	 */
2117 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2118 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2119 					(unsigned char *)name, tv_nexus, NULL);
2120 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2121 		mutex_unlock(&tpg->tv_tpg_mutex);
2122 		kfree(tv_nexus);
2123 		return -ENOMEM;
2124 	}
2125 	tpg->tpg_nexus = tv_nexus;
2126 
2127 	mutex_unlock(&tpg->tv_tpg_mutex);
2128 	return 0;
2129 }
2130 
2131 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2132 {
2133 	struct se_session *se_sess;
2134 	struct vhost_scsi_nexus *tv_nexus;
2135 
2136 	mutex_lock(&tpg->tv_tpg_mutex);
2137 	tv_nexus = tpg->tpg_nexus;
2138 	if (!tv_nexus) {
2139 		mutex_unlock(&tpg->tv_tpg_mutex);
2140 		return -ENODEV;
2141 	}
2142 
2143 	se_sess = tv_nexus->tvn_se_sess;
2144 	if (!se_sess) {
2145 		mutex_unlock(&tpg->tv_tpg_mutex);
2146 		return -ENODEV;
2147 	}
2148 
2149 	if (tpg->tv_tpg_port_count != 0) {
2150 		mutex_unlock(&tpg->tv_tpg_mutex);
2151 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2152 			" active TPG port count: %d\n",
2153 			tpg->tv_tpg_port_count);
2154 		return -EBUSY;
2155 	}
2156 
2157 	if (tpg->tv_tpg_vhost_count != 0) {
2158 		mutex_unlock(&tpg->tv_tpg_mutex);
2159 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2160 			" active TPG vhost count: %d\n",
2161 			tpg->tv_tpg_vhost_count);
2162 		return -EBUSY;
2163 	}
2164 
2165 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2166 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2167 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2168 
2169 	/*
2170 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2171 	 */
2172 	target_remove_session(se_sess);
2173 	tpg->tpg_nexus = NULL;
2174 	mutex_unlock(&tpg->tv_tpg_mutex);
2175 
2176 	kfree(tv_nexus);
2177 	return 0;
2178 }
2179 
2180 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2181 {
2182 	struct se_portal_group *se_tpg = to_tpg(item);
2183 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2184 				struct vhost_scsi_tpg, se_tpg);
2185 	struct vhost_scsi_nexus *tv_nexus;
2186 	ssize_t ret;
2187 
2188 	mutex_lock(&tpg->tv_tpg_mutex);
2189 	tv_nexus = tpg->tpg_nexus;
2190 	if (!tv_nexus) {
2191 		mutex_unlock(&tpg->tv_tpg_mutex);
2192 		return -ENODEV;
2193 	}
2194 	ret = sysfs_emit(page, "%s\n",
2195 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2196 	mutex_unlock(&tpg->tv_tpg_mutex);
2197 
2198 	return ret;
2199 }
2200 
2201 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2202 		const char *page, size_t count)
2203 {
2204 	struct se_portal_group *se_tpg = to_tpg(item);
2205 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2206 				struct vhost_scsi_tpg, se_tpg);
2207 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2208 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2209 	int ret;
2210 	/*
2211 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2212 	 */
2213 	if (!strncmp(page, "NULL", 4)) {
2214 		ret = vhost_scsi_drop_nexus(tpg);
2215 		return (!ret) ? count : ret;
2216 	}
2217 	/*
2218 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2219 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2220 	 * vhost_scsi_make_nexus().
2221 	 */
2222 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2223 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2224 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2225 		return -EINVAL;
2226 	}
2227 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2228 
2229 	ptr = strstr(i_port, "naa.");
2230 	if (ptr) {
2231 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2232 			pr_err("Passed SAS Initiator Port %s does not"
2233 				" match target port protoid: %s\n", i_port,
2234 				vhost_scsi_dump_proto_id(tport_wwn));
2235 			return -EINVAL;
2236 		}
2237 		port_ptr = &i_port[0];
2238 		goto check_newline;
2239 	}
2240 	ptr = strstr(i_port, "fc.");
2241 	if (ptr) {
2242 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2243 			pr_err("Passed FCP Initiator Port %s does not"
2244 				" match target port protoid: %s\n", i_port,
2245 				vhost_scsi_dump_proto_id(tport_wwn));
2246 			return -EINVAL;
2247 		}
2248 		port_ptr = &i_port[3]; /* Skip over "fc." */
2249 		goto check_newline;
2250 	}
2251 	ptr = strstr(i_port, "iqn.");
2252 	if (ptr) {
2253 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2254 			pr_err("Passed iSCSI Initiator Port %s does not"
2255 				" match target port protoid: %s\n", i_port,
2256 				vhost_scsi_dump_proto_id(tport_wwn));
2257 			return -EINVAL;
2258 		}
2259 		port_ptr = &i_port[0];
2260 		goto check_newline;
2261 	}
2262 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2263 			" %s\n", i_port);
2264 	return -EINVAL;
2265 	/*
2266 	 * Clear any trailing newline for the NAA WWN
2267 	 */
2268 check_newline:
2269 	if (i_port[strlen(i_port)-1] == '\n')
2270 		i_port[strlen(i_port)-1] = '\0';
2271 
2272 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2273 	if (ret < 0)
2274 		return ret;
2275 
2276 	return count;
2277 }
2278 
2279 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2280 
2281 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2282 	&vhost_scsi_tpg_attr_nexus,
2283 	NULL,
2284 };
2285 
2286 static struct se_portal_group *
2287 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2288 {
2289 	struct vhost_scsi_tport *tport = container_of(wwn,
2290 			struct vhost_scsi_tport, tport_wwn);
2291 
2292 	struct vhost_scsi_tpg *tpg;
2293 	u16 tpgt;
2294 	int ret;
2295 
2296 	if (strstr(name, "tpgt_") != name)
2297 		return ERR_PTR(-EINVAL);
2298 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2299 		return ERR_PTR(-EINVAL);
2300 
2301 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2302 	if (!tpg) {
2303 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2304 		return ERR_PTR(-ENOMEM);
2305 	}
2306 	mutex_init(&tpg->tv_tpg_mutex);
2307 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2308 	tpg->tport = tport;
2309 	tpg->tport_tpgt = tpgt;
2310 
2311 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2312 	if (ret < 0) {
2313 		kfree(tpg);
2314 		return NULL;
2315 	}
2316 	mutex_lock(&vhost_scsi_mutex);
2317 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2318 	mutex_unlock(&vhost_scsi_mutex);
2319 
2320 	return &tpg->se_tpg;
2321 }
2322 
2323 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2324 {
2325 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2326 				struct vhost_scsi_tpg, se_tpg);
2327 
2328 	mutex_lock(&vhost_scsi_mutex);
2329 	list_del(&tpg->tv_tpg_list);
2330 	mutex_unlock(&vhost_scsi_mutex);
2331 	/*
2332 	 * Release the virtual I_T Nexus for this vhost TPG
2333 	 */
2334 	vhost_scsi_drop_nexus(tpg);
2335 	/*
2336 	 * Deregister the se_tpg from TCM..
2337 	 */
2338 	core_tpg_deregister(se_tpg);
2339 	kfree(tpg);
2340 }
2341 
2342 static struct se_wwn *
2343 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2344 		     struct config_group *group,
2345 		     const char *name)
2346 {
2347 	struct vhost_scsi_tport *tport;
2348 	char *ptr;
2349 	u64 wwpn = 0;
2350 	int off = 0;
2351 
2352 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2353 		return ERR_PTR(-EINVAL); */
2354 
2355 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2356 	if (!tport) {
2357 		pr_err("Unable to allocate struct vhost_scsi_tport");
2358 		return ERR_PTR(-ENOMEM);
2359 	}
2360 	tport->tport_wwpn = wwpn;
2361 	/*
2362 	 * Determine the emulated Protocol Identifier and Target Port Name
2363 	 * based on the incoming configfs directory name.
2364 	 */
2365 	ptr = strstr(name, "naa.");
2366 	if (ptr) {
2367 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2368 		goto check_len;
2369 	}
2370 	ptr = strstr(name, "fc.");
2371 	if (ptr) {
2372 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2373 		off = 3; /* Skip over "fc." */
2374 		goto check_len;
2375 	}
2376 	ptr = strstr(name, "iqn.");
2377 	if (ptr) {
2378 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2379 		goto check_len;
2380 	}
2381 
2382 	pr_err("Unable to locate prefix for emulated Target Port:"
2383 			" %s\n", name);
2384 	kfree(tport);
2385 	return ERR_PTR(-EINVAL);
2386 
2387 check_len:
2388 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2389 		pr_err("Emulated %s Address: %s, exceeds"
2390 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2391 			VHOST_SCSI_NAMELEN);
2392 		kfree(tport);
2393 		return ERR_PTR(-EINVAL);
2394 	}
2395 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2396 
2397 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2398 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2399 
2400 	return &tport->tport_wwn;
2401 }
2402 
2403 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2404 {
2405 	struct vhost_scsi_tport *tport = container_of(wwn,
2406 				struct vhost_scsi_tport, tport_wwn);
2407 
2408 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2409 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2410 		tport->tport_name);
2411 
2412 	kfree(tport);
2413 }
2414 
2415 static ssize_t
2416 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2417 {
2418 	return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2419 		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2420 		utsname()->machine);
2421 }
2422 
2423 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2424 
2425 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2426 	&vhost_scsi_wwn_attr_version,
2427 	NULL,
2428 };
2429 
2430 static const struct target_core_fabric_ops vhost_scsi_ops = {
2431 	.module				= THIS_MODULE,
2432 	.fabric_name			= "vhost",
2433 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2434 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2435 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2436 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2437 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2438 	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2439 	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2440 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2441 	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
2442 	.release_cmd			= vhost_scsi_release_cmd,
2443 	.check_stop_free		= vhost_scsi_check_stop_free,
2444 	.sess_get_index			= vhost_scsi_sess_get_index,
2445 	.sess_get_initiator_sid		= NULL,
2446 	.write_pending			= vhost_scsi_write_pending,
2447 	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
2448 	.get_cmd_state			= vhost_scsi_get_cmd_state,
2449 	.queue_data_in			= vhost_scsi_queue_data_in,
2450 	.queue_status			= vhost_scsi_queue_status,
2451 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2452 	.aborted_task			= vhost_scsi_aborted_task,
2453 	/*
2454 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2455 	 */
2456 	.fabric_make_wwn		= vhost_scsi_make_tport,
2457 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2458 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2459 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2460 	.fabric_post_link		= vhost_scsi_port_link,
2461 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2462 
2463 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2464 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2465 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2466 };
2467 
2468 static int __init vhost_scsi_init(void)
2469 {
2470 	int ret = -ENOMEM;
2471 
2472 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2473 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2474 		utsname()->machine);
2475 
2476 	ret = vhost_scsi_register();
2477 	if (ret < 0)
2478 		goto out;
2479 
2480 	ret = target_register_template(&vhost_scsi_ops);
2481 	if (ret < 0)
2482 		goto out_vhost_scsi_deregister;
2483 
2484 	return 0;
2485 
2486 out_vhost_scsi_deregister:
2487 	vhost_scsi_deregister();
2488 out:
2489 	return ret;
2490 };
2491 
2492 static void vhost_scsi_exit(void)
2493 {
2494 	target_unregister_template(&vhost_scsi_ops);
2495 	vhost_scsi_deregister();
2496 };
2497 
2498 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2499 MODULE_ALIAS("tcm_vhost");
2500 MODULE_LICENSE("GPL");
2501 module_init(vhost_scsi_init);
2502 module_exit(vhost_scsi_exit);
2503