xref: /openbmc/linux/drivers/vhost/scsi.c (revision 35267cea)
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23 
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 
50 #include "vhost.h"
51 
52 #define VHOST_SCSI_VERSION  "v0.1"
53 #define VHOST_SCSI_NAMELEN 256
54 #define VHOST_SCSI_MAX_CDB_SIZE 32
55 #define VHOST_SCSI_PREALLOC_SGLS 2048
56 #define VHOST_SCSI_PREALLOC_UPAGES 2048
57 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
58 
59 /* Max number of requests before requeueing the job.
60  * Using this limit prevents one virtqueue from starving others with
61  * request.
62  */
63 #define VHOST_SCSI_WEIGHT 256
64 
65 struct vhost_scsi_inflight {
66 	/* Wait for the flush operation to finish */
67 	struct completion comp;
68 	/* Refcount for the inflight reqs */
69 	struct kref kref;
70 };
71 
72 struct vhost_scsi_cmd {
73 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
74 	int tvc_vq_desc;
75 	/* virtio-scsi initiator task attribute */
76 	int tvc_task_attr;
77 	/* virtio-scsi response incoming iovecs */
78 	int tvc_in_iovs;
79 	/* virtio-scsi initiator data direction */
80 	enum dma_data_direction tvc_data_direction;
81 	/* Expected data transfer length from virtio-scsi header */
82 	u32 tvc_exp_data_len;
83 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
84 	u64 tvc_tag;
85 	/* The number of scatterlists associated with this cmd */
86 	u32 tvc_sgl_count;
87 	u32 tvc_prot_sgl_count;
88 	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
89 	u32 tvc_lun;
90 	/* Pointer to the SGL formatted memory from virtio-scsi */
91 	struct scatterlist *tvc_sgl;
92 	struct scatterlist *tvc_prot_sgl;
93 	struct page **tvc_upages;
94 	/* Pointer to response header iovec */
95 	struct iovec tvc_resp_iov;
96 	/* Pointer to vhost_scsi for our device */
97 	struct vhost_scsi *tvc_vhost;
98 	/* Pointer to vhost_virtqueue for the cmd */
99 	struct vhost_virtqueue *tvc_vq;
100 	/* Pointer to vhost nexus memory */
101 	struct vhost_scsi_nexus *tvc_nexus;
102 	/* The TCM I/O descriptor that is accessed via container_of() */
103 	struct se_cmd tvc_se_cmd;
104 	/* Copy of the incoming SCSI command descriptor block (CDB) */
105 	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
106 	/* Sense buffer that will be mapped into outgoing status */
107 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
108 	/* Completed commands list, serviced from vhost worker thread */
109 	struct llist_node tvc_completion_list;
110 	/* Used to track inflight cmd */
111 	struct vhost_scsi_inflight *inflight;
112 };
113 
114 struct vhost_scsi_nexus {
115 	/* Pointer to TCM session for I_T Nexus */
116 	struct se_session *tvn_se_sess;
117 };
118 
119 struct vhost_scsi_tpg {
120 	/* Vhost port target portal group tag for TCM */
121 	u16 tport_tpgt;
122 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
123 	int tv_tpg_port_count;
124 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
125 	int tv_tpg_vhost_count;
126 	/* Used for enabling T10-PI with legacy devices */
127 	int tv_fabric_prot_type;
128 	/* list for vhost_scsi_list */
129 	struct list_head tv_tpg_list;
130 	/* Used to protect access for tpg_nexus */
131 	struct mutex tv_tpg_mutex;
132 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
133 	struct vhost_scsi_nexus *tpg_nexus;
134 	/* Pointer back to vhost_scsi_tport */
135 	struct vhost_scsi_tport *tport;
136 	/* Returned by vhost_scsi_make_tpg() */
137 	struct se_portal_group se_tpg;
138 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
139 	struct vhost_scsi *vhost_scsi;
140 	struct list_head tmf_queue;
141 };
142 
143 struct vhost_scsi_tport {
144 	/* SCSI protocol the tport is providing */
145 	u8 tport_proto_id;
146 	/* Binary World Wide unique Port Name for Vhost Target port */
147 	u64 tport_wwpn;
148 	/* ASCII formatted WWPN for Vhost Target port */
149 	char tport_name[VHOST_SCSI_NAMELEN];
150 	/* Returned by vhost_scsi_make_tport() */
151 	struct se_wwn tport_wwn;
152 };
153 
154 struct vhost_scsi_evt {
155 	/* event to be sent to guest */
156 	struct virtio_scsi_event event;
157 	/* event list, serviced from vhost worker thread */
158 	struct llist_node list;
159 };
160 
161 enum {
162 	VHOST_SCSI_VQ_CTL = 0,
163 	VHOST_SCSI_VQ_EVT = 1,
164 	VHOST_SCSI_VQ_IO = 2,
165 };
166 
167 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
168 enum {
169 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
170 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
171 };
172 
173 #define VHOST_SCSI_MAX_TARGET	256
174 #define VHOST_SCSI_MAX_VQ	128
175 #define VHOST_SCSI_MAX_EVENT	128
176 
177 struct vhost_scsi_virtqueue {
178 	struct vhost_virtqueue vq;
179 	/*
180 	 * Reference counting for inflight reqs, used for flush operation. At
181 	 * each time, one reference tracks new commands submitted, while we
182 	 * wait for another one to reach 0.
183 	 */
184 	struct vhost_scsi_inflight inflights[2];
185 	/*
186 	 * Indicate current inflight in use, protected by vq->mutex.
187 	 * Writers must also take dev mutex and flush under it.
188 	 */
189 	int inflight_idx;
190 	struct vhost_scsi_cmd *scsi_cmds;
191 	struct sbitmap scsi_tags;
192 	int max_cmds;
193 };
194 
195 struct vhost_scsi {
196 	/* Protected by vhost_scsi->dev.mutex */
197 	struct vhost_scsi_tpg **vs_tpg;
198 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199 
200 	struct vhost_dev dev;
201 	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
202 
203 	struct vhost_work vs_completion_work; /* cmd completion work item */
204 	struct llist_head vs_completion_list; /* cmd completion queue */
205 
206 	struct vhost_work vs_event_work; /* evt injection work item */
207 	struct llist_head vs_event_list; /* evt injection queue */
208 
209 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
210 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
211 };
212 
213 struct vhost_scsi_tmf {
214 	struct vhost_work vwork;
215 	struct vhost_scsi_tpg *tpg;
216 	struct vhost_scsi *vhost;
217 	struct vhost_scsi_virtqueue *svq;
218 	struct list_head queue_entry;
219 
220 	struct se_cmd se_cmd;
221 	u8 scsi_resp;
222 	struct vhost_scsi_inflight *inflight;
223 	struct iovec resp_iov;
224 	int in_iovs;
225 	int vq_desc;
226 };
227 
228 /*
229  * Context for processing request and control queue operations.
230  */
231 struct vhost_scsi_ctx {
232 	int head;
233 	unsigned int out, in;
234 	size_t req_size, rsp_size;
235 	size_t out_size, in_size;
236 	u8 *target, *lunp;
237 	void *req;
238 	struct iov_iter out_iter;
239 };
240 
241 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
242 static DEFINE_MUTEX(vhost_scsi_mutex);
243 static LIST_HEAD(vhost_scsi_list);
244 
245 static void vhost_scsi_done_inflight(struct kref *kref)
246 {
247 	struct vhost_scsi_inflight *inflight;
248 
249 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
250 	complete(&inflight->comp);
251 }
252 
253 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
254 				    struct vhost_scsi_inflight *old_inflight[])
255 {
256 	struct vhost_scsi_inflight *new_inflight;
257 	struct vhost_virtqueue *vq;
258 	int idx, i;
259 
260 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
261 		vq = &vs->vqs[i].vq;
262 
263 		mutex_lock(&vq->mutex);
264 
265 		/* store old infight */
266 		idx = vs->vqs[i].inflight_idx;
267 		if (old_inflight)
268 			old_inflight[i] = &vs->vqs[i].inflights[idx];
269 
270 		/* setup new infight */
271 		vs->vqs[i].inflight_idx = idx ^ 1;
272 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
273 		kref_init(&new_inflight->kref);
274 		init_completion(&new_inflight->comp);
275 
276 		mutex_unlock(&vq->mutex);
277 	}
278 }
279 
280 static struct vhost_scsi_inflight *
281 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
282 {
283 	struct vhost_scsi_inflight *inflight;
284 	struct vhost_scsi_virtqueue *svq;
285 
286 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
287 	inflight = &svq->inflights[svq->inflight_idx];
288 	kref_get(&inflight->kref);
289 
290 	return inflight;
291 }
292 
293 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
294 {
295 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
296 }
297 
298 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
299 {
300 	return 1;
301 }
302 
303 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
304 {
305 	return 0;
306 }
307 
308 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
309 {
310 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
311 				struct vhost_scsi_tpg, se_tpg);
312 	struct vhost_scsi_tport *tport = tpg->tport;
313 
314 	return &tport->tport_name[0];
315 }
316 
317 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
318 {
319 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
320 				struct vhost_scsi_tpg, se_tpg);
321 	return tpg->tport_tpgt;
322 }
323 
324 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
325 {
326 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
327 				struct vhost_scsi_tpg, se_tpg);
328 
329 	return tpg->tv_fabric_prot_type;
330 }
331 
332 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
333 {
334 	return 1;
335 }
336 
337 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
338 {
339 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
340 				struct vhost_scsi_cmd, tvc_se_cmd);
341 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
342 				struct vhost_scsi_virtqueue, vq);
343 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
344 	int i;
345 
346 	if (tv_cmd->tvc_sgl_count) {
347 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
348 			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
349 	}
350 	if (tv_cmd->tvc_prot_sgl_count) {
351 		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
352 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
353 	}
354 
355 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
356 	vhost_scsi_put_inflight(inflight);
357 }
358 
359 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
360 {
361 	struct vhost_scsi_tpg *tpg = tmf->tpg;
362 	struct vhost_scsi_inflight *inflight = tmf->inflight;
363 
364 	mutex_lock(&tpg->tv_tpg_mutex);
365 	list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
366 	mutex_unlock(&tpg->tv_tpg_mutex);
367 	vhost_scsi_put_inflight(inflight);
368 }
369 
370 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
371 {
372 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
373 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
374 					struct vhost_scsi_tmf, se_cmd);
375 
376 		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
377 	} else {
378 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
379 					struct vhost_scsi_cmd, tvc_se_cmd);
380 		struct vhost_scsi *vs = cmd->tvc_vhost;
381 
382 		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
383 		vhost_work_queue(&vs->dev, &vs->vs_completion_work);
384 	}
385 }
386 
387 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
388 {
389 	return 0;
390 }
391 
392 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
393 {
394 	/* Go ahead and process the write immediately */
395 	target_execute_cmd(se_cmd);
396 	return 0;
397 }
398 
399 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
400 {
401 	return;
402 }
403 
404 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
405 {
406 	return 0;
407 }
408 
409 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
410 {
411 	transport_generic_free_cmd(se_cmd, 0);
412 	return 0;
413 }
414 
415 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
416 {
417 	transport_generic_free_cmd(se_cmd, 0);
418 	return 0;
419 }
420 
421 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
422 {
423 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
424 						  se_cmd);
425 
426 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
427 	transport_generic_free_cmd(&tmf->se_cmd, 0);
428 }
429 
430 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
431 {
432 	return;
433 }
434 
435 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
436 {
437 	vs->vs_events_nr--;
438 	kfree(evt);
439 }
440 
441 static struct vhost_scsi_evt *
442 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
443 		       u32 event, u32 reason)
444 {
445 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
446 	struct vhost_scsi_evt *evt;
447 
448 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
449 		vs->vs_events_missed = true;
450 		return NULL;
451 	}
452 
453 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
454 	if (!evt) {
455 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
456 		vs->vs_events_missed = true;
457 		return NULL;
458 	}
459 
460 	evt->event.event = cpu_to_vhost32(vq, event);
461 	evt->event.reason = cpu_to_vhost32(vq, reason);
462 	vs->vs_events_nr++;
463 
464 	return evt;
465 }
466 
467 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
468 {
469 	return target_put_sess_cmd(se_cmd);
470 }
471 
472 static void
473 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
474 {
475 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
476 	struct virtio_scsi_event *event = &evt->event;
477 	struct virtio_scsi_event __user *eventp;
478 	unsigned out, in;
479 	int head, ret;
480 
481 	if (!vhost_vq_get_backend(vq)) {
482 		vs->vs_events_missed = true;
483 		return;
484 	}
485 
486 again:
487 	vhost_disable_notify(&vs->dev, vq);
488 	head = vhost_get_vq_desc(vq, vq->iov,
489 			ARRAY_SIZE(vq->iov), &out, &in,
490 			NULL, NULL);
491 	if (head < 0) {
492 		vs->vs_events_missed = true;
493 		return;
494 	}
495 	if (head == vq->num) {
496 		if (vhost_enable_notify(&vs->dev, vq))
497 			goto again;
498 		vs->vs_events_missed = true;
499 		return;
500 	}
501 
502 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
503 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
504 				vq->iov[out].iov_len);
505 		vs->vs_events_missed = true;
506 		return;
507 	}
508 
509 	if (vs->vs_events_missed) {
510 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
511 		vs->vs_events_missed = false;
512 	}
513 
514 	eventp = vq->iov[out].iov_base;
515 	ret = __copy_to_user(eventp, event, sizeof(*event));
516 	if (!ret)
517 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
518 	else
519 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
520 }
521 
522 static void vhost_scsi_evt_work(struct vhost_work *work)
523 {
524 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
525 					vs_event_work);
526 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
527 	struct vhost_scsi_evt *evt, *t;
528 	struct llist_node *llnode;
529 
530 	mutex_lock(&vq->mutex);
531 	llnode = llist_del_all(&vs->vs_event_list);
532 	llist_for_each_entry_safe(evt, t, llnode, list) {
533 		vhost_scsi_do_evt_work(vs, evt);
534 		vhost_scsi_free_evt(vs, evt);
535 	}
536 	mutex_unlock(&vq->mutex);
537 }
538 
539 /* Fill in status and signal that we are done processing this command
540  *
541  * This is scheduled in the vhost work queue so we are called with the owner
542  * process mm and can access the vring.
543  */
544 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
545 {
546 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
547 					vs_completion_work);
548 	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
549 	struct virtio_scsi_cmd_resp v_rsp;
550 	struct vhost_scsi_cmd *cmd, *t;
551 	struct llist_node *llnode;
552 	struct se_cmd *se_cmd;
553 	struct iov_iter iov_iter;
554 	int ret, vq;
555 
556 	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
557 	llnode = llist_del_all(&vs->vs_completion_list);
558 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
559 		se_cmd = &cmd->tvc_se_cmd;
560 
561 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
562 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
563 
564 		memset(&v_rsp, 0, sizeof(v_rsp));
565 		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
566 		/* TODO is status_qualifier field needed? */
567 		v_rsp.status = se_cmd->scsi_status;
568 		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
569 						 se_cmd->scsi_sense_length);
570 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
571 		       se_cmd->scsi_sense_length);
572 
573 		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
574 			      cmd->tvc_in_iovs, sizeof(v_rsp));
575 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
576 		if (likely(ret == sizeof(v_rsp))) {
577 			struct vhost_scsi_virtqueue *q;
578 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
579 			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
580 			vq = q - vs->vqs;
581 			__set_bit(vq, signal);
582 		} else
583 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
584 
585 		vhost_scsi_release_cmd_res(se_cmd);
586 	}
587 
588 	vq = -1;
589 	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
590 		< VHOST_SCSI_MAX_VQ)
591 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
592 }
593 
594 static struct vhost_scsi_cmd *
595 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
596 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
597 		   u32 exp_data_len, int data_direction)
598 {
599 	struct vhost_scsi_virtqueue *svq = container_of(vq,
600 					struct vhost_scsi_virtqueue, vq);
601 	struct vhost_scsi_cmd *cmd;
602 	struct vhost_scsi_nexus *tv_nexus;
603 	struct scatterlist *sg, *prot_sg;
604 	struct page **pages;
605 	int tag;
606 
607 	tv_nexus = tpg->tpg_nexus;
608 	if (!tv_nexus) {
609 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
610 		return ERR_PTR(-EIO);
611 	}
612 
613 	tag = sbitmap_get(&svq->scsi_tags);
614 	if (tag < 0) {
615 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
616 		return ERR_PTR(-ENOMEM);
617 	}
618 
619 	cmd = &svq->scsi_cmds[tag];
620 	sg = cmd->tvc_sgl;
621 	prot_sg = cmd->tvc_prot_sgl;
622 	pages = cmd->tvc_upages;
623 	memset(cmd, 0, sizeof(*cmd));
624 	cmd->tvc_sgl = sg;
625 	cmd->tvc_prot_sgl = prot_sg;
626 	cmd->tvc_upages = pages;
627 	cmd->tvc_se_cmd.map_tag = tag;
628 	cmd->tvc_tag = scsi_tag;
629 	cmd->tvc_lun = lun;
630 	cmd->tvc_task_attr = task_attr;
631 	cmd->tvc_exp_data_len = exp_data_len;
632 	cmd->tvc_data_direction = data_direction;
633 	cmd->tvc_nexus = tv_nexus;
634 	cmd->inflight = vhost_scsi_get_inflight(vq);
635 
636 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
637 
638 	return cmd;
639 }
640 
641 /*
642  * Map a user memory range into a scatterlist
643  *
644  * Returns the number of scatterlist entries used or -errno on error.
645  */
646 static int
647 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
648 		      struct iov_iter *iter,
649 		      struct scatterlist *sgl,
650 		      bool write)
651 {
652 	struct page **pages = cmd->tvc_upages;
653 	struct scatterlist *sg = sgl;
654 	ssize_t bytes;
655 	size_t offset;
656 	unsigned int npages = 0;
657 
658 	bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
659 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
660 	/* No pages were pinned */
661 	if (bytes <= 0)
662 		return bytes < 0 ? bytes : -EFAULT;
663 
664 	iov_iter_advance(iter, bytes);
665 
666 	while (bytes) {
667 		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
668 		sg_set_page(sg++, pages[npages++], n, offset);
669 		bytes -= n;
670 		offset = 0;
671 	}
672 	return npages;
673 }
674 
675 static int
676 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
677 {
678 	int sgl_count = 0;
679 
680 	if (!iter || !iter->iov) {
681 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
682 		       " present\n", __func__, bytes);
683 		return -EINVAL;
684 	}
685 
686 	sgl_count = iov_iter_npages(iter, 0xffff);
687 	if (sgl_count > max_sgls) {
688 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
689 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
690 		return -EINVAL;
691 	}
692 	return sgl_count;
693 }
694 
695 static int
696 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
697 		      struct iov_iter *iter,
698 		      struct scatterlist *sg, int sg_count)
699 {
700 	struct scatterlist *p = sg;
701 	int ret;
702 
703 	while (iov_iter_count(iter)) {
704 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
705 		if (ret < 0) {
706 			while (p < sg) {
707 				struct page *page = sg_page(p++);
708 				if (page)
709 					put_page(page);
710 			}
711 			return ret;
712 		}
713 		sg += ret;
714 	}
715 	return 0;
716 }
717 
718 static int
719 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
720 		 size_t prot_bytes, struct iov_iter *prot_iter,
721 		 size_t data_bytes, struct iov_iter *data_iter)
722 {
723 	int sgl_count, ret;
724 	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
725 
726 	if (prot_bytes) {
727 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
728 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
729 		if (sgl_count < 0)
730 			return sgl_count;
731 
732 		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
733 		cmd->tvc_prot_sgl_count = sgl_count;
734 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
735 			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
736 
737 		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
738 					    cmd->tvc_prot_sgl,
739 					    cmd->tvc_prot_sgl_count);
740 		if (ret < 0) {
741 			cmd->tvc_prot_sgl_count = 0;
742 			return ret;
743 		}
744 	}
745 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
746 					 VHOST_SCSI_PREALLOC_SGLS);
747 	if (sgl_count < 0)
748 		return sgl_count;
749 
750 	sg_init_table(cmd->tvc_sgl, sgl_count);
751 	cmd->tvc_sgl_count = sgl_count;
752 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
753 		  cmd->tvc_sgl, cmd->tvc_sgl_count);
754 
755 	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
756 				    cmd->tvc_sgl, cmd->tvc_sgl_count);
757 	if (ret < 0) {
758 		cmd->tvc_sgl_count = 0;
759 		return ret;
760 	}
761 	return 0;
762 }
763 
764 static int vhost_scsi_to_tcm_attr(int attr)
765 {
766 	switch (attr) {
767 	case VIRTIO_SCSI_S_SIMPLE:
768 		return TCM_SIMPLE_TAG;
769 	case VIRTIO_SCSI_S_ORDERED:
770 		return TCM_ORDERED_TAG;
771 	case VIRTIO_SCSI_S_HEAD:
772 		return TCM_HEAD_TAG;
773 	case VIRTIO_SCSI_S_ACA:
774 		return TCM_ACA_TAG;
775 	default:
776 		break;
777 	}
778 	return TCM_SIMPLE_TAG;
779 }
780 
781 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
782 {
783 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
784 	struct vhost_scsi_nexus *tv_nexus;
785 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
786 
787 	/* FIXME: BIDI operation */
788 	if (cmd->tvc_sgl_count) {
789 		sg_ptr = cmd->tvc_sgl;
790 
791 		if (cmd->tvc_prot_sgl_count)
792 			sg_prot_ptr = cmd->tvc_prot_sgl;
793 		else
794 			se_cmd->prot_pto = true;
795 	} else {
796 		sg_ptr = NULL;
797 	}
798 	tv_nexus = cmd->tvc_nexus;
799 
800 	se_cmd->tag = 0;
801 	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
802 			cmd->tvc_lun, cmd->tvc_exp_data_len,
803 			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
804 			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
805 
806 	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
807 			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
808 			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
809 		return;
810 
811 	target_queue_submission(se_cmd);
812 }
813 
814 static void
815 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
816 			   struct vhost_virtqueue *vq,
817 			   int head, unsigned out)
818 {
819 	struct virtio_scsi_cmd_resp __user *resp;
820 	struct virtio_scsi_cmd_resp rsp;
821 	int ret;
822 
823 	memset(&rsp, 0, sizeof(rsp));
824 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
825 	resp = vq->iov[out].iov_base;
826 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
827 	if (!ret)
828 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
829 	else
830 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
831 }
832 
833 static int
834 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
835 		    struct vhost_scsi_ctx *vc)
836 {
837 	int ret = -ENXIO;
838 
839 	vc->head = vhost_get_vq_desc(vq, vq->iov,
840 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
841 				     NULL, NULL);
842 
843 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
844 		 vc->head, vc->out, vc->in);
845 
846 	/* On error, stop handling until the next kick. */
847 	if (unlikely(vc->head < 0))
848 		goto done;
849 
850 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
851 	if (vc->head == vq->num) {
852 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
853 			vhost_disable_notify(&vs->dev, vq);
854 			ret = -EAGAIN;
855 		}
856 		goto done;
857 	}
858 
859 	/*
860 	 * Get the size of request and response buffers.
861 	 * FIXME: Not correct for BIDI operation
862 	 */
863 	vc->out_size = iov_length(vq->iov, vc->out);
864 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
865 
866 	/*
867 	 * Copy over the virtio-scsi request header, which for a
868 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
869 	 * single iovec may contain both the header + outgoing
870 	 * WRITE payloads.
871 	 *
872 	 * copy_from_iter() will advance out_iter, so that it will
873 	 * point at the start of the outgoing WRITE payload, if
874 	 * DMA_TO_DEVICE is set.
875 	 */
876 	iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
877 	ret = 0;
878 
879 done:
880 	return ret;
881 }
882 
883 static int
884 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
885 {
886 	if (unlikely(vc->in_size < vc->rsp_size)) {
887 		vq_err(vq,
888 		       "Response buf too small, need min %zu bytes got %zu",
889 		       vc->rsp_size, vc->in_size);
890 		return -EINVAL;
891 	} else if (unlikely(vc->out_size < vc->req_size)) {
892 		vq_err(vq,
893 		       "Request buf too small, need min %zu bytes got %zu",
894 		       vc->req_size, vc->out_size);
895 		return -EIO;
896 	}
897 
898 	return 0;
899 }
900 
901 static int
902 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
903 		   struct vhost_scsi_tpg **tpgp)
904 {
905 	int ret = -EIO;
906 
907 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
908 					  &vc->out_iter))) {
909 		vq_err(vq, "Faulted on copy_from_iter_full\n");
910 	} else if (unlikely(*vc->lunp != 1)) {
911 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
912 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
913 	} else {
914 		struct vhost_scsi_tpg **vs_tpg, *tpg;
915 
916 		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
917 
918 		tpg = READ_ONCE(vs_tpg[*vc->target]);
919 		if (unlikely(!tpg)) {
920 			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
921 		} else {
922 			if (tpgp)
923 				*tpgp = tpg;
924 			ret = 0;
925 		}
926 	}
927 
928 	return ret;
929 }
930 
931 static u16 vhost_buf_to_lun(u8 *lun_buf)
932 {
933 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
934 }
935 
936 static void
937 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
938 {
939 	struct vhost_scsi_tpg **vs_tpg, *tpg;
940 	struct virtio_scsi_cmd_req v_req;
941 	struct virtio_scsi_cmd_req_pi v_req_pi;
942 	struct vhost_scsi_ctx vc;
943 	struct vhost_scsi_cmd *cmd;
944 	struct iov_iter in_iter, prot_iter, data_iter;
945 	u64 tag;
946 	u32 exp_data_len, data_direction;
947 	int ret, prot_bytes, c = 0;
948 	u16 lun;
949 	u8 task_attr;
950 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
951 	void *cdb;
952 
953 	mutex_lock(&vq->mutex);
954 	/*
955 	 * We can handle the vq only after the endpoint is setup by calling the
956 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
957 	 */
958 	vs_tpg = vhost_vq_get_backend(vq);
959 	if (!vs_tpg)
960 		goto out;
961 
962 	memset(&vc, 0, sizeof(vc));
963 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
964 
965 	vhost_disable_notify(&vs->dev, vq);
966 
967 	do {
968 		ret = vhost_scsi_get_desc(vs, vq, &vc);
969 		if (ret)
970 			goto err;
971 
972 		/*
973 		 * Setup pointers and values based upon different virtio-scsi
974 		 * request header if T10_PI is enabled in KVM guest.
975 		 */
976 		if (t10_pi) {
977 			vc.req = &v_req_pi;
978 			vc.req_size = sizeof(v_req_pi);
979 			vc.lunp = &v_req_pi.lun[0];
980 			vc.target = &v_req_pi.lun[1];
981 		} else {
982 			vc.req = &v_req;
983 			vc.req_size = sizeof(v_req);
984 			vc.lunp = &v_req.lun[0];
985 			vc.target = &v_req.lun[1];
986 		}
987 
988 		/*
989 		 * Validate the size of request and response buffers.
990 		 * Check for a sane response buffer so we can report
991 		 * early errors back to the guest.
992 		 */
993 		ret = vhost_scsi_chk_size(vq, &vc);
994 		if (ret)
995 			goto err;
996 
997 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
998 		if (ret)
999 			goto err;
1000 
1001 		ret = -EIO;	/* bad target on any error from here on */
1002 
1003 		/*
1004 		 * Determine data_direction by calculating the total outgoing
1005 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1006 		 * response headers respectively.
1007 		 *
1008 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1009 		 * to the right place.
1010 		 *
1011 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1012 		 * of the virtio-scsi response header in either the same
1013 		 * or immediately following iovec.
1014 		 *
1015 		 * Any associated T10_PI bytes for the outgoing / incoming
1016 		 * payloads are included in calculation of exp_data_len here.
1017 		 */
1018 		prot_bytes = 0;
1019 
1020 		if (vc.out_size > vc.req_size) {
1021 			data_direction = DMA_TO_DEVICE;
1022 			exp_data_len = vc.out_size - vc.req_size;
1023 			data_iter = vc.out_iter;
1024 		} else if (vc.in_size > vc.rsp_size) {
1025 			data_direction = DMA_FROM_DEVICE;
1026 			exp_data_len = vc.in_size - vc.rsp_size;
1027 
1028 			iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1029 				      vc.rsp_size + exp_data_len);
1030 			iov_iter_advance(&in_iter, vc.rsp_size);
1031 			data_iter = in_iter;
1032 		} else {
1033 			data_direction = DMA_NONE;
1034 			exp_data_len = 0;
1035 		}
1036 		/*
1037 		 * If T10_PI header + payload is present, setup prot_iter values
1038 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1039 		 * host scatterlists via get_user_pages_fast().
1040 		 */
1041 		if (t10_pi) {
1042 			if (v_req_pi.pi_bytesout) {
1043 				if (data_direction != DMA_TO_DEVICE) {
1044 					vq_err(vq, "Received non zero pi_bytesout,"
1045 						" but wrong data_direction\n");
1046 					goto err;
1047 				}
1048 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1049 			} else if (v_req_pi.pi_bytesin) {
1050 				if (data_direction != DMA_FROM_DEVICE) {
1051 					vq_err(vq, "Received non zero pi_bytesin,"
1052 						" but wrong data_direction\n");
1053 					goto err;
1054 				}
1055 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1056 			}
1057 			/*
1058 			 * Set prot_iter to data_iter and truncate it to
1059 			 * prot_bytes, and advance data_iter past any
1060 			 * preceeding prot_bytes that may be present.
1061 			 *
1062 			 * Also fix up the exp_data_len to reflect only the
1063 			 * actual data payload length.
1064 			 */
1065 			if (prot_bytes) {
1066 				exp_data_len -= prot_bytes;
1067 				prot_iter = data_iter;
1068 				iov_iter_truncate(&prot_iter, prot_bytes);
1069 				iov_iter_advance(&data_iter, prot_bytes);
1070 			}
1071 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1072 			task_attr = v_req_pi.task_attr;
1073 			cdb = &v_req_pi.cdb[0];
1074 			lun = vhost_buf_to_lun(v_req_pi.lun);
1075 		} else {
1076 			tag = vhost64_to_cpu(vq, v_req.tag);
1077 			task_attr = v_req.task_attr;
1078 			cdb = &v_req.cdb[0];
1079 			lun = vhost_buf_to_lun(v_req.lun);
1080 		}
1081 		/*
1082 		 * Check that the received CDB size does not exceeded our
1083 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1084 		 * cmd descriptor for the new virtio-scsi tag.
1085 		 *
1086 		 * TODO what if cdb was too small for varlen cdb header?
1087 		 */
1088 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1089 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1090 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1091 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1092 				goto err;
1093 		}
1094 		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1095 					 exp_data_len + prot_bytes,
1096 					 data_direction);
1097 		if (IS_ERR(cmd)) {
1098 			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1099 			       PTR_ERR(cmd));
1100 			goto err;
1101 		}
1102 		cmd->tvc_vhost = vs;
1103 		cmd->tvc_vq = vq;
1104 		cmd->tvc_resp_iov = vq->iov[vc.out];
1105 		cmd->tvc_in_iovs = vc.in;
1106 
1107 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1108 			 cmd->tvc_cdb[0], cmd->tvc_lun);
1109 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1110 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1111 
1112 		if (data_direction != DMA_NONE) {
1113 			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1114 						      &prot_iter, exp_data_len,
1115 						      &data_iter))) {
1116 				vq_err(vq, "Failed to map iov to sgl\n");
1117 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1118 				goto err;
1119 			}
1120 		}
1121 		/*
1122 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1123 		 * complete the virtio-scsi request in TCM callback context via
1124 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1125 		 */
1126 		cmd->tvc_vq_desc = vc.head;
1127 		vhost_scsi_target_queue_cmd(cmd);
1128 		ret = 0;
1129 err:
1130 		/*
1131 		 * ENXIO:  No more requests, or read error, wait for next kick
1132 		 * EINVAL: Invalid response buffer, drop the request
1133 		 * EIO:    Respond with bad target
1134 		 * EAGAIN: Pending request
1135 		 */
1136 		if (ret == -ENXIO)
1137 			break;
1138 		else if (ret == -EIO)
1139 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1140 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1141 out:
1142 	mutex_unlock(&vq->mutex);
1143 }
1144 
1145 static void
1146 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1147 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1148 			 int tmf_resp_code)
1149 {
1150 	struct virtio_scsi_ctrl_tmf_resp rsp;
1151 	struct iov_iter iov_iter;
1152 	int ret;
1153 
1154 	pr_debug("%s\n", __func__);
1155 	memset(&rsp, 0, sizeof(rsp));
1156 	rsp.response = tmf_resp_code;
1157 
1158 	iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
1159 
1160 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1161 	if (likely(ret == sizeof(rsp)))
1162 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1163 	else
1164 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1165 }
1166 
1167 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1168 {
1169 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1170 						  vwork);
1171 	int resp_code;
1172 
1173 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1174 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1175 	else
1176 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1177 
1178 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1179 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1180 	vhost_scsi_release_tmf_res(tmf);
1181 }
1182 
1183 static void
1184 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1185 		      struct vhost_virtqueue *vq,
1186 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1187 		      struct vhost_scsi_ctx *vc)
1188 {
1189 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1190 					struct vhost_scsi_virtqueue, vq);
1191 	struct vhost_scsi_tmf *tmf;
1192 
1193 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1194 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1195 		goto send_reject;
1196 
1197 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1198 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1199 		goto send_reject;
1200 	}
1201 
1202 	mutex_lock(&tpg->tv_tpg_mutex);
1203 	if (list_empty(&tpg->tmf_queue)) {
1204 		pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1205 		mutex_unlock(&tpg->tv_tpg_mutex);
1206 		goto send_reject;
1207 	}
1208 
1209 	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1210 			       queue_entry);
1211 	list_del_init(&tmf->queue_entry);
1212 	mutex_unlock(&tpg->tv_tpg_mutex);
1213 
1214 	tmf->tpg = tpg;
1215 	tmf->vhost = vs;
1216 	tmf->svq = svq;
1217 	tmf->resp_iov = vq->iov[vc->out];
1218 	tmf->vq_desc = vc->head;
1219 	tmf->in_iovs = vc->in;
1220 	tmf->inflight = vhost_scsi_get_inflight(vq);
1221 
1222 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1223 			      vhost_buf_to_lun(vtmf->lun), NULL,
1224 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1225 			      TARGET_SCF_ACK_KREF) < 0) {
1226 		vhost_scsi_release_tmf_res(tmf);
1227 		goto send_reject;
1228 	}
1229 
1230 	return;
1231 
1232 send_reject:
1233 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1234 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1235 }
1236 
1237 static void
1238 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1239 			struct vhost_virtqueue *vq,
1240 			struct vhost_scsi_ctx *vc)
1241 {
1242 	struct virtio_scsi_ctrl_an_resp rsp;
1243 	struct iov_iter iov_iter;
1244 	int ret;
1245 
1246 	pr_debug("%s\n", __func__);
1247 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1248 	rsp.response = VIRTIO_SCSI_S_OK;
1249 
1250 	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1251 
1252 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1253 	if (likely(ret == sizeof(rsp)))
1254 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1255 	else
1256 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1257 }
1258 
1259 static void
1260 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1261 {
1262 	struct vhost_scsi_tpg *tpg;
1263 	union {
1264 		__virtio32 type;
1265 		struct virtio_scsi_ctrl_an_req an;
1266 		struct virtio_scsi_ctrl_tmf_req tmf;
1267 	} v_req;
1268 	struct vhost_scsi_ctx vc;
1269 	size_t typ_size;
1270 	int ret, c = 0;
1271 
1272 	mutex_lock(&vq->mutex);
1273 	/*
1274 	 * We can handle the vq only after the endpoint is setup by calling the
1275 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1276 	 */
1277 	if (!vhost_vq_get_backend(vq))
1278 		goto out;
1279 
1280 	memset(&vc, 0, sizeof(vc));
1281 
1282 	vhost_disable_notify(&vs->dev, vq);
1283 
1284 	do {
1285 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1286 		if (ret)
1287 			goto err;
1288 
1289 		/*
1290 		 * Get the request type first in order to setup
1291 		 * other parameters dependent on the type.
1292 		 */
1293 		vc.req = &v_req.type;
1294 		typ_size = sizeof(v_req.type);
1295 
1296 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1297 						  &vc.out_iter))) {
1298 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1299 			/*
1300 			 * The size of the response buffer depends on the
1301 			 * request type and must be validated against it.
1302 			 * Since the request type is not known, don't send
1303 			 * a response.
1304 			 */
1305 			continue;
1306 		}
1307 
1308 		switch (vhost32_to_cpu(vq, v_req.type)) {
1309 		case VIRTIO_SCSI_T_TMF:
1310 			vc.req = &v_req.tmf;
1311 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1312 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1313 			vc.lunp = &v_req.tmf.lun[0];
1314 			vc.target = &v_req.tmf.lun[1];
1315 			break;
1316 		case VIRTIO_SCSI_T_AN_QUERY:
1317 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1318 			vc.req = &v_req.an;
1319 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1320 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1321 			vc.lunp = &v_req.an.lun[0];
1322 			vc.target = NULL;
1323 			break;
1324 		default:
1325 			vq_err(vq, "Unknown control request %d", v_req.type);
1326 			continue;
1327 		}
1328 
1329 		/*
1330 		 * Validate the size of request and response buffers.
1331 		 * Check for a sane response buffer so we can report
1332 		 * early errors back to the guest.
1333 		 */
1334 		ret = vhost_scsi_chk_size(vq, &vc);
1335 		if (ret)
1336 			goto err;
1337 
1338 		/*
1339 		 * Get the rest of the request now that its size is known.
1340 		 */
1341 		vc.req += typ_size;
1342 		vc.req_size -= typ_size;
1343 
1344 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1345 		if (ret)
1346 			goto err;
1347 
1348 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1349 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1350 		else
1351 			vhost_scsi_send_an_resp(vs, vq, &vc);
1352 err:
1353 		/*
1354 		 * ENXIO:  No more requests, or read error, wait for next kick
1355 		 * EINVAL: Invalid response buffer, drop the request
1356 		 * EIO:    Respond with bad target
1357 		 * EAGAIN: Pending request
1358 		 */
1359 		if (ret == -ENXIO)
1360 			break;
1361 		else if (ret == -EIO)
1362 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1363 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1364 out:
1365 	mutex_unlock(&vq->mutex);
1366 }
1367 
1368 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1369 {
1370 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1371 						poll.work);
1372 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1373 
1374 	pr_debug("%s: The handling func for control queue.\n", __func__);
1375 	vhost_scsi_ctl_handle_vq(vs, vq);
1376 }
1377 
1378 static void
1379 vhost_scsi_send_evt(struct vhost_scsi *vs,
1380 		   struct vhost_scsi_tpg *tpg,
1381 		   struct se_lun *lun,
1382 		   u32 event,
1383 		   u32 reason)
1384 {
1385 	struct vhost_scsi_evt *evt;
1386 
1387 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1388 	if (!evt)
1389 		return;
1390 
1391 	if (tpg && lun) {
1392 		/* TODO: share lun setup code with virtio-scsi.ko */
1393 		/*
1394 		 * Note: evt->event is zeroed when we allocate it and
1395 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1396 		 */
1397 		evt->event.lun[0] = 0x01;
1398 		evt->event.lun[1] = tpg->tport_tpgt;
1399 		if (lun->unpacked_lun >= 256)
1400 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1401 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1402 	}
1403 
1404 	llist_add(&evt->list, &vs->vs_event_list);
1405 	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1406 }
1407 
1408 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1409 {
1410 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1411 						poll.work);
1412 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1413 
1414 	mutex_lock(&vq->mutex);
1415 	if (!vhost_vq_get_backend(vq))
1416 		goto out;
1417 
1418 	if (vs->vs_events_missed)
1419 		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1420 out:
1421 	mutex_unlock(&vq->mutex);
1422 }
1423 
1424 static void vhost_scsi_handle_kick(struct vhost_work *work)
1425 {
1426 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1427 						poll.work);
1428 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1429 
1430 	vhost_scsi_handle_vq(vs, vq);
1431 }
1432 
1433 /* Callers must hold dev mutex */
1434 static void vhost_scsi_flush(struct vhost_scsi *vs)
1435 {
1436 	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1437 	int i;
1438 
1439 	/* Init new inflight and remember the old inflight */
1440 	vhost_scsi_init_inflight(vs, old_inflight);
1441 
1442 	/*
1443 	 * The inflight->kref was initialized to 1. We decrement it here to
1444 	 * indicate the start of the flush operation so that it will reach 0
1445 	 * when all the reqs are finished.
1446 	 */
1447 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1448 		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1449 
1450 	/* Flush both the vhost poll and vhost work */
1451 	vhost_work_dev_flush(&vs->dev);
1452 
1453 	/* Wait for all reqs issued before the flush to be finished */
1454 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1455 		wait_for_completion(&old_inflight[i]->comp);
1456 }
1457 
1458 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1459 {
1460 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1461 					struct vhost_scsi_virtqueue, vq);
1462 	struct vhost_scsi_cmd *tv_cmd;
1463 	unsigned int i;
1464 
1465 	if (!svq->scsi_cmds)
1466 		return;
1467 
1468 	for (i = 0; i < svq->max_cmds; i++) {
1469 		tv_cmd = &svq->scsi_cmds[i];
1470 
1471 		kfree(tv_cmd->tvc_sgl);
1472 		kfree(tv_cmd->tvc_prot_sgl);
1473 		kfree(tv_cmd->tvc_upages);
1474 	}
1475 
1476 	sbitmap_free(&svq->scsi_tags);
1477 	kfree(svq->scsi_cmds);
1478 	svq->scsi_cmds = NULL;
1479 }
1480 
1481 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1482 {
1483 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1484 					struct vhost_scsi_virtqueue, vq);
1485 	struct vhost_scsi_cmd *tv_cmd;
1486 	unsigned int i;
1487 
1488 	if (svq->scsi_cmds)
1489 		return 0;
1490 
1491 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1492 			      NUMA_NO_NODE, false, true))
1493 		return -ENOMEM;
1494 	svq->max_cmds = max_cmds;
1495 
1496 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1497 	if (!svq->scsi_cmds) {
1498 		sbitmap_free(&svq->scsi_tags);
1499 		return -ENOMEM;
1500 	}
1501 
1502 	for (i = 0; i < max_cmds; i++) {
1503 		tv_cmd = &svq->scsi_cmds[i];
1504 
1505 		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1506 					  sizeof(struct scatterlist),
1507 					  GFP_KERNEL);
1508 		if (!tv_cmd->tvc_sgl) {
1509 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1510 			goto out;
1511 		}
1512 
1513 		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1514 					     sizeof(struct page *),
1515 					     GFP_KERNEL);
1516 		if (!tv_cmd->tvc_upages) {
1517 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1518 			goto out;
1519 		}
1520 
1521 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1522 					       sizeof(struct scatterlist),
1523 					       GFP_KERNEL);
1524 		if (!tv_cmd->tvc_prot_sgl) {
1525 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1526 			goto out;
1527 		}
1528 	}
1529 	return 0;
1530 out:
1531 	vhost_scsi_destroy_vq_cmds(vq);
1532 	return -ENOMEM;
1533 }
1534 
1535 /*
1536  * Called from vhost_scsi_ioctl() context to walk the list of available
1537  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1538  *
1539  *  The lock nesting rule is:
1540  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1541  */
1542 static int
1543 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1544 			struct vhost_scsi_target *t)
1545 {
1546 	struct se_portal_group *se_tpg;
1547 	struct vhost_scsi_tport *tv_tport;
1548 	struct vhost_scsi_tpg *tpg;
1549 	struct vhost_scsi_tpg **vs_tpg;
1550 	struct vhost_virtqueue *vq;
1551 	int index, ret, i, len;
1552 	bool match = false;
1553 
1554 	mutex_lock(&vhost_scsi_mutex);
1555 	mutex_lock(&vs->dev.mutex);
1556 
1557 	/* Verify that ring has been setup correctly. */
1558 	for (index = 0; index < vs->dev.nvqs; ++index) {
1559 		/* Verify that ring has been setup correctly. */
1560 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1561 			ret = -EFAULT;
1562 			goto out;
1563 		}
1564 	}
1565 
1566 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1567 	vs_tpg = kzalloc(len, GFP_KERNEL);
1568 	if (!vs_tpg) {
1569 		ret = -ENOMEM;
1570 		goto out;
1571 	}
1572 	if (vs->vs_tpg)
1573 		memcpy(vs_tpg, vs->vs_tpg, len);
1574 
1575 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1576 		mutex_lock(&tpg->tv_tpg_mutex);
1577 		if (!tpg->tpg_nexus) {
1578 			mutex_unlock(&tpg->tv_tpg_mutex);
1579 			continue;
1580 		}
1581 		if (tpg->tv_tpg_vhost_count != 0) {
1582 			mutex_unlock(&tpg->tv_tpg_mutex);
1583 			continue;
1584 		}
1585 		tv_tport = tpg->tport;
1586 
1587 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1588 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1589 				mutex_unlock(&tpg->tv_tpg_mutex);
1590 				ret = -EEXIST;
1591 				goto undepend;
1592 			}
1593 			/*
1594 			 * In order to ensure individual vhost-scsi configfs
1595 			 * groups cannot be removed while in use by vhost ioctl,
1596 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1597 			 * dependency now.
1598 			 */
1599 			se_tpg = &tpg->se_tpg;
1600 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1601 			if (ret) {
1602 				pr_warn("target_depend_item() failed: %d\n", ret);
1603 				mutex_unlock(&tpg->tv_tpg_mutex);
1604 				goto undepend;
1605 			}
1606 			tpg->tv_tpg_vhost_count++;
1607 			tpg->vhost_scsi = vs;
1608 			vs_tpg[tpg->tport_tpgt] = tpg;
1609 			match = true;
1610 		}
1611 		mutex_unlock(&tpg->tv_tpg_mutex);
1612 	}
1613 
1614 	if (match) {
1615 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1616 		       sizeof(vs->vs_vhost_wwpn));
1617 
1618 		for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1619 			vq = &vs->vqs[i].vq;
1620 			if (!vhost_vq_is_setup(vq))
1621 				continue;
1622 
1623 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1624 			if (ret)
1625 				goto destroy_vq_cmds;
1626 		}
1627 
1628 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1629 			vq = &vs->vqs[i].vq;
1630 			mutex_lock(&vq->mutex);
1631 			vhost_vq_set_backend(vq, vs_tpg);
1632 			vhost_vq_init_access(vq);
1633 			mutex_unlock(&vq->mutex);
1634 		}
1635 		ret = 0;
1636 	} else {
1637 		ret = -EEXIST;
1638 	}
1639 
1640 	/*
1641 	 * Act as synchronize_rcu to make sure access to
1642 	 * old vs->vs_tpg is finished.
1643 	 */
1644 	vhost_scsi_flush(vs);
1645 	kfree(vs->vs_tpg);
1646 	vs->vs_tpg = vs_tpg;
1647 	goto out;
1648 
1649 destroy_vq_cmds:
1650 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1651 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1652 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1653 	}
1654 undepend:
1655 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1656 		tpg = vs_tpg[i];
1657 		if (tpg) {
1658 			tpg->tv_tpg_vhost_count--;
1659 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1660 		}
1661 	}
1662 	kfree(vs_tpg);
1663 out:
1664 	mutex_unlock(&vs->dev.mutex);
1665 	mutex_unlock(&vhost_scsi_mutex);
1666 	return ret;
1667 }
1668 
1669 static int
1670 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1671 			  struct vhost_scsi_target *t)
1672 {
1673 	struct se_portal_group *se_tpg;
1674 	struct vhost_scsi_tport *tv_tport;
1675 	struct vhost_scsi_tpg *tpg;
1676 	struct vhost_virtqueue *vq;
1677 	bool match = false;
1678 	int index, ret, i;
1679 	u8 target;
1680 
1681 	mutex_lock(&vhost_scsi_mutex);
1682 	mutex_lock(&vs->dev.mutex);
1683 	/* Verify that ring has been setup correctly. */
1684 	for (index = 0; index < vs->dev.nvqs; ++index) {
1685 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1686 			ret = -EFAULT;
1687 			goto err_dev;
1688 		}
1689 	}
1690 
1691 	if (!vs->vs_tpg) {
1692 		ret = 0;
1693 		goto err_dev;
1694 	}
1695 
1696 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1697 		target = i;
1698 		tpg = vs->vs_tpg[target];
1699 		if (!tpg)
1700 			continue;
1701 
1702 		mutex_lock(&tpg->tv_tpg_mutex);
1703 		tv_tport = tpg->tport;
1704 		if (!tv_tport) {
1705 			ret = -ENODEV;
1706 			goto err_tpg;
1707 		}
1708 
1709 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1710 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1711 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1712 				tv_tport->tport_name, tpg->tport_tpgt,
1713 				t->vhost_wwpn, t->vhost_tpgt);
1714 			ret = -EINVAL;
1715 			goto err_tpg;
1716 		}
1717 		tpg->tv_tpg_vhost_count--;
1718 		tpg->vhost_scsi = NULL;
1719 		vs->vs_tpg[target] = NULL;
1720 		match = true;
1721 		mutex_unlock(&tpg->tv_tpg_mutex);
1722 		/*
1723 		 * Release se_tpg->tpg_group.cg_item configfs dependency now
1724 		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1725 		 */
1726 		se_tpg = &tpg->se_tpg;
1727 		target_undepend_item(&se_tpg->tpg_group.cg_item);
1728 	}
1729 	if (match) {
1730 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1731 			vq = &vs->vqs[i].vq;
1732 			mutex_lock(&vq->mutex);
1733 			vhost_vq_set_backend(vq, NULL);
1734 			mutex_unlock(&vq->mutex);
1735 		}
1736 		/* Make sure cmds are not running before tearing them down. */
1737 		vhost_scsi_flush(vs);
1738 
1739 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1740 			vq = &vs->vqs[i].vq;
1741 			vhost_scsi_destroy_vq_cmds(vq);
1742 		}
1743 	}
1744 	/*
1745 	 * Act as synchronize_rcu to make sure access to
1746 	 * old vs->vs_tpg is finished.
1747 	 */
1748 	vhost_scsi_flush(vs);
1749 	kfree(vs->vs_tpg);
1750 	vs->vs_tpg = NULL;
1751 	WARN_ON(vs->vs_events_nr);
1752 	mutex_unlock(&vs->dev.mutex);
1753 	mutex_unlock(&vhost_scsi_mutex);
1754 	return 0;
1755 
1756 err_tpg:
1757 	mutex_unlock(&tpg->tv_tpg_mutex);
1758 err_dev:
1759 	mutex_unlock(&vs->dev.mutex);
1760 	mutex_unlock(&vhost_scsi_mutex);
1761 	return ret;
1762 }
1763 
1764 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1765 {
1766 	struct vhost_virtqueue *vq;
1767 	int i;
1768 
1769 	if (features & ~VHOST_SCSI_FEATURES)
1770 		return -EOPNOTSUPP;
1771 
1772 	mutex_lock(&vs->dev.mutex);
1773 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1774 	    !vhost_log_access_ok(&vs->dev)) {
1775 		mutex_unlock(&vs->dev.mutex);
1776 		return -EFAULT;
1777 	}
1778 
1779 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1780 		vq = &vs->vqs[i].vq;
1781 		mutex_lock(&vq->mutex);
1782 		vq->acked_features = features;
1783 		mutex_unlock(&vq->mutex);
1784 	}
1785 	mutex_unlock(&vs->dev.mutex);
1786 	return 0;
1787 }
1788 
1789 static int vhost_scsi_open(struct inode *inode, struct file *f)
1790 {
1791 	struct vhost_scsi *vs;
1792 	struct vhost_virtqueue **vqs;
1793 	int r = -ENOMEM, i;
1794 
1795 	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1796 	if (!vs)
1797 		goto err_vs;
1798 
1799 	vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1800 	if (!vqs)
1801 		goto err_vqs;
1802 
1803 	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1804 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1805 
1806 	vs->vs_events_nr = 0;
1807 	vs->vs_events_missed = false;
1808 
1809 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1810 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1811 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1812 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1813 	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1814 		vqs[i] = &vs->vqs[i].vq;
1815 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1816 	}
1817 	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1818 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1819 
1820 	vhost_scsi_init_inflight(vs, NULL);
1821 
1822 	f->private_data = vs;
1823 	return 0;
1824 
1825 err_vqs:
1826 	kvfree(vs);
1827 err_vs:
1828 	return r;
1829 }
1830 
1831 static int vhost_scsi_release(struct inode *inode, struct file *f)
1832 {
1833 	struct vhost_scsi *vs = f->private_data;
1834 	struct vhost_scsi_target t;
1835 
1836 	mutex_lock(&vs->dev.mutex);
1837 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1838 	mutex_unlock(&vs->dev.mutex);
1839 	vhost_scsi_clear_endpoint(vs, &t);
1840 	vhost_dev_stop(&vs->dev);
1841 	vhost_dev_cleanup(&vs->dev);
1842 	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1843 	vhost_scsi_flush(vs);
1844 	kfree(vs->dev.vqs);
1845 	kvfree(vs);
1846 	return 0;
1847 }
1848 
1849 static long
1850 vhost_scsi_ioctl(struct file *f,
1851 		 unsigned int ioctl,
1852 		 unsigned long arg)
1853 {
1854 	struct vhost_scsi *vs = f->private_data;
1855 	struct vhost_scsi_target backend;
1856 	void __user *argp = (void __user *)arg;
1857 	u64 __user *featurep = argp;
1858 	u32 __user *eventsp = argp;
1859 	u32 events_missed;
1860 	u64 features;
1861 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1862 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1863 
1864 	switch (ioctl) {
1865 	case VHOST_SCSI_SET_ENDPOINT:
1866 		if (copy_from_user(&backend, argp, sizeof backend))
1867 			return -EFAULT;
1868 		if (backend.reserved != 0)
1869 			return -EOPNOTSUPP;
1870 
1871 		return vhost_scsi_set_endpoint(vs, &backend);
1872 	case VHOST_SCSI_CLEAR_ENDPOINT:
1873 		if (copy_from_user(&backend, argp, sizeof backend))
1874 			return -EFAULT;
1875 		if (backend.reserved != 0)
1876 			return -EOPNOTSUPP;
1877 
1878 		return vhost_scsi_clear_endpoint(vs, &backend);
1879 	case VHOST_SCSI_GET_ABI_VERSION:
1880 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1881 			return -EFAULT;
1882 		return 0;
1883 	case VHOST_SCSI_SET_EVENTS_MISSED:
1884 		if (get_user(events_missed, eventsp))
1885 			return -EFAULT;
1886 		mutex_lock(&vq->mutex);
1887 		vs->vs_events_missed = events_missed;
1888 		mutex_unlock(&vq->mutex);
1889 		return 0;
1890 	case VHOST_SCSI_GET_EVENTS_MISSED:
1891 		mutex_lock(&vq->mutex);
1892 		events_missed = vs->vs_events_missed;
1893 		mutex_unlock(&vq->mutex);
1894 		if (put_user(events_missed, eventsp))
1895 			return -EFAULT;
1896 		return 0;
1897 	case VHOST_GET_FEATURES:
1898 		features = VHOST_SCSI_FEATURES;
1899 		if (copy_to_user(featurep, &features, sizeof features))
1900 			return -EFAULT;
1901 		return 0;
1902 	case VHOST_SET_FEATURES:
1903 		if (copy_from_user(&features, featurep, sizeof features))
1904 			return -EFAULT;
1905 		return vhost_scsi_set_features(vs, features);
1906 	default:
1907 		mutex_lock(&vs->dev.mutex);
1908 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1909 		/* TODO: flush backend after dev ioctl. */
1910 		if (r == -ENOIOCTLCMD)
1911 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1912 		mutex_unlock(&vs->dev.mutex);
1913 		return r;
1914 	}
1915 }
1916 
1917 static const struct file_operations vhost_scsi_fops = {
1918 	.owner          = THIS_MODULE,
1919 	.release        = vhost_scsi_release,
1920 	.unlocked_ioctl = vhost_scsi_ioctl,
1921 	.compat_ioctl	= compat_ptr_ioctl,
1922 	.open           = vhost_scsi_open,
1923 	.llseek		= noop_llseek,
1924 };
1925 
1926 static struct miscdevice vhost_scsi_misc = {
1927 	MISC_DYNAMIC_MINOR,
1928 	"vhost-scsi",
1929 	&vhost_scsi_fops,
1930 };
1931 
1932 static int __init vhost_scsi_register(void)
1933 {
1934 	return misc_register(&vhost_scsi_misc);
1935 }
1936 
1937 static void vhost_scsi_deregister(void)
1938 {
1939 	misc_deregister(&vhost_scsi_misc);
1940 }
1941 
1942 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1943 {
1944 	switch (tport->tport_proto_id) {
1945 	case SCSI_PROTOCOL_SAS:
1946 		return "SAS";
1947 	case SCSI_PROTOCOL_FCP:
1948 		return "FCP";
1949 	case SCSI_PROTOCOL_ISCSI:
1950 		return "iSCSI";
1951 	default:
1952 		break;
1953 	}
1954 
1955 	return "Unknown";
1956 }
1957 
1958 static void
1959 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1960 		  struct se_lun *lun, bool plug)
1961 {
1962 
1963 	struct vhost_scsi *vs = tpg->vhost_scsi;
1964 	struct vhost_virtqueue *vq;
1965 	u32 reason;
1966 
1967 	if (!vs)
1968 		return;
1969 
1970 	mutex_lock(&vs->dev.mutex);
1971 
1972 	if (plug)
1973 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1974 	else
1975 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1976 
1977 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1978 	mutex_lock(&vq->mutex);
1979 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1980 		vhost_scsi_send_evt(vs, tpg, lun,
1981 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1982 	mutex_unlock(&vq->mutex);
1983 	mutex_unlock(&vs->dev.mutex);
1984 }
1985 
1986 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1987 {
1988 	vhost_scsi_do_plug(tpg, lun, true);
1989 }
1990 
1991 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1992 {
1993 	vhost_scsi_do_plug(tpg, lun, false);
1994 }
1995 
1996 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1997 			       struct se_lun *lun)
1998 {
1999 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2000 				struct vhost_scsi_tpg, se_tpg);
2001 	struct vhost_scsi_tmf *tmf;
2002 
2003 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2004 	if (!tmf)
2005 		return -ENOMEM;
2006 	INIT_LIST_HEAD(&tmf->queue_entry);
2007 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
2008 
2009 	mutex_lock(&vhost_scsi_mutex);
2010 
2011 	mutex_lock(&tpg->tv_tpg_mutex);
2012 	tpg->tv_tpg_port_count++;
2013 	list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2014 	mutex_unlock(&tpg->tv_tpg_mutex);
2015 
2016 	vhost_scsi_hotplug(tpg, lun);
2017 
2018 	mutex_unlock(&vhost_scsi_mutex);
2019 
2020 	return 0;
2021 }
2022 
2023 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2024 				  struct se_lun *lun)
2025 {
2026 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2027 				struct vhost_scsi_tpg, se_tpg);
2028 	struct vhost_scsi_tmf *tmf;
2029 
2030 	mutex_lock(&vhost_scsi_mutex);
2031 
2032 	mutex_lock(&tpg->tv_tpg_mutex);
2033 	tpg->tv_tpg_port_count--;
2034 	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2035 			       queue_entry);
2036 	list_del(&tmf->queue_entry);
2037 	kfree(tmf);
2038 	mutex_unlock(&tpg->tv_tpg_mutex);
2039 
2040 	vhost_scsi_hotunplug(tpg, lun);
2041 
2042 	mutex_unlock(&vhost_scsi_mutex);
2043 }
2044 
2045 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2046 		struct config_item *item, const char *page, size_t count)
2047 {
2048 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2049 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2050 				struct vhost_scsi_tpg, se_tpg);
2051 	unsigned long val;
2052 	int ret = kstrtoul(page, 0, &val);
2053 
2054 	if (ret) {
2055 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2056 		return ret;
2057 	}
2058 	if (val != 0 && val != 1 && val != 3) {
2059 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2060 		return -EINVAL;
2061 	}
2062 	tpg->tv_fabric_prot_type = val;
2063 
2064 	return count;
2065 }
2066 
2067 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2068 		struct config_item *item, char *page)
2069 {
2070 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2071 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2072 				struct vhost_scsi_tpg, se_tpg);
2073 
2074 	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
2075 }
2076 
2077 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2078 
2079 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2080 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2081 	NULL,
2082 };
2083 
2084 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2085 				const char *name)
2086 {
2087 	struct vhost_scsi_nexus *tv_nexus;
2088 
2089 	mutex_lock(&tpg->tv_tpg_mutex);
2090 	if (tpg->tpg_nexus) {
2091 		mutex_unlock(&tpg->tv_tpg_mutex);
2092 		pr_debug("tpg->tpg_nexus already exists\n");
2093 		return -EEXIST;
2094 	}
2095 
2096 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2097 	if (!tv_nexus) {
2098 		mutex_unlock(&tpg->tv_tpg_mutex);
2099 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2100 		return -ENOMEM;
2101 	}
2102 	/*
2103 	 * Since we are running in 'demo mode' this call with generate a
2104 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2105 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2106 	 */
2107 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2108 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2109 					(unsigned char *)name, tv_nexus, NULL);
2110 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2111 		mutex_unlock(&tpg->tv_tpg_mutex);
2112 		kfree(tv_nexus);
2113 		return -ENOMEM;
2114 	}
2115 	tpg->tpg_nexus = tv_nexus;
2116 
2117 	mutex_unlock(&tpg->tv_tpg_mutex);
2118 	return 0;
2119 }
2120 
2121 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2122 {
2123 	struct se_session *se_sess;
2124 	struct vhost_scsi_nexus *tv_nexus;
2125 
2126 	mutex_lock(&tpg->tv_tpg_mutex);
2127 	tv_nexus = tpg->tpg_nexus;
2128 	if (!tv_nexus) {
2129 		mutex_unlock(&tpg->tv_tpg_mutex);
2130 		return -ENODEV;
2131 	}
2132 
2133 	se_sess = tv_nexus->tvn_se_sess;
2134 	if (!se_sess) {
2135 		mutex_unlock(&tpg->tv_tpg_mutex);
2136 		return -ENODEV;
2137 	}
2138 
2139 	if (tpg->tv_tpg_port_count != 0) {
2140 		mutex_unlock(&tpg->tv_tpg_mutex);
2141 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2142 			" active TPG port count: %d\n",
2143 			tpg->tv_tpg_port_count);
2144 		return -EBUSY;
2145 	}
2146 
2147 	if (tpg->tv_tpg_vhost_count != 0) {
2148 		mutex_unlock(&tpg->tv_tpg_mutex);
2149 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2150 			" active TPG vhost count: %d\n",
2151 			tpg->tv_tpg_vhost_count);
2152 		return -EBUSY;
2153 	}
2154 
2155 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2156 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2157 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2158 
2159 	/*
2160 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2161 	 */
2162 	target_remove_session(se_sess);
2163 	tpg->tpg_nexus = NULL;
2164 	mutex_unlock(&tpg->tv_tpg_mutex);
2165 
2166 	kfree(tv_nexus);
2167 	return 0;
2168 }
2169 
2170 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2171 {
2172 	struct se_portal_group *se_tpg = to_tpg(item);
2173 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2174 				struct vhost_scsi_tpg, se_tpg);
2175 	struct vhost_scsi_nexus *tv_nexus;
2176 	ssize_t ret;
2177 
2178 	mutex_lock(&tpg->tv_tpg_mutex);
2179 	tv_nexus = tpg->tpg_nexus;
2180 	if (!tv_nexus) {
2181 		mutex_unlock(&tpg->tv_tpg_mutex);
2182 		return -ENODEV;
2183 	}
2184 	ret = snprintf(page, PAGE_SIZE, "%s\n",
2185 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2186 	mutex_unlock(&tpg->tv_tpg_mutex);
2187 
2188 	return ret;
2189 }
2190 
2191 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2192 		const char *page, size_t count)
2193 {
2194 	struct se_portal_group *se_tpg = to_tpg(item);
2195 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2196 				struct vhost_scsi_tpg, se_tpg);
2197 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2198 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2199 	int ret;
2200 	/*
2201 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2202 	 */
2203 	if (!strncmp(page, "NULL", 4)) {
2204 		ret = vhost_scsi_drop_nexus(tpg);
2205 		return (!ret) ? count : ret;
2206 	}
2207 	/*
2208 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2209 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2210 	 * vhost_scsi_make_nexus().
2211 	 */
2212 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2213 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2214 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2215 		return -EINVAL;
2216 	}
2217 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2218 
2219 	ptr = strstr(i_port, "naa.");
2220 	if (ptr) {
2221 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2222 			pr_err("Passed SAS Initiator Port %s does not"
2223 				" match target port protoid: %s\n", i_port,
2224 				vhost_scsi_dump_proto_id(tport_wwn));
2225 			return -EINVAL;
2226 		}
2227 		port_ptr = &i_port[0];
2228 		goto check_newline;
2229 	}
2230 	ptr = strstr(i_port, "fc.");
2231 	if (ptr) {
2232 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2233 			pr_err("Passed FCP Initiator Port %s does not"
2234 				" match target port protoid: %s\n", i_port,
2235 				vhost_scsi_dump_proto_id(tport_wwn));
2236 			return -EINVAL;
2237 		}
2238 		port_ptr = &i_port[3]; /* Skip over "fc." */
2239 		goto check_newline;
2240 	}
2241 	ptr = strstr(i_port, "iqn.");
2242 	if (ptr) {
2243 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2244 			pr_err("Passed iSCSI Initiator Port %s does not"
2245 				" match target port protoid: %s\n", i_port,
2246 				vhost_scsi_dump_proto_id(tport_wwn));
2247 			return -EINVAL;
2248 		}
2249 		port_ptr = &i_port[0];
2250 		goto check_newline;
2251 	}
2252 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2253 			" %s\n", i_port);
2254 	return -EINVAL;
2255 	/*
2256 	 * Clear any trailing newline for the NAA WWN
2257 	 */
2258 check_newline:
2259 	if (i_port[strlen(i_port)-1] == '\n')
2260 		i_port[strlen(i_port)-1] = '\0';
2261 
2262 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2263 	if (ret < 0)
2264 		return ret;
2265 
2266 	return count;
2267 }
2268 
2269 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2270 
2271 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2272 	&vhost_scsi_tpg_attr_nexus,
2273 	NULL,
2274 };
2275 
2276 static struct se_portal_group *
2277 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2278 {
2279 	struct vhost_scsi_tport *tport = container_of(wwn,
2280 			struct vhost_scsi_tport, tport_wwn);
2281 
2282 	struct vhost_scsi_tpg *tpg;
2283 	u16 tpgt;
2284 	int ret;
2285 
2286 	if (strstr(name, "tpgt_") != name)
2287 		return ERR_PTR(-EINVAL);
2288 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2289 		return ERR_PTR(-EINVAL);
2290 
2291 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2292 	if (!tpg) {
2293 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2294 		return ERR_PTR(-ENOMEM);
2295 	}
2296 	mutex_init(&tpg->tv_tpg_mutex);
2297 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2298 	INIT_LIST_HEAD(&tpg->tmf_queue);
2299 	tpg->tport = tport;
2300 	tpg->tport_tpgt = tpgt;
2301 
2302 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2303 	if (ret < 0) {
2304 		kfree(tpg);
2305 		return NULL;
2306 	}
2307 	mutex_lock(&vhost_scsi_mutex);
2308 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2309 	mutex_unlock(&vhost_scsi_mutex);
2310 
2311 	return &tpg->se_tpg;
2312 }
2313 
2314 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2315 {
2316 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2317 				struct vhost_scsi_tpg, se_tpg);
2318 
2319 	mutex_lock(&vhost_scsi_mutex);
2320 	list_del(&tpg->tv_tpg_list);
2321 	mutex_unlock(&vhost_scsi_mutex);
2322 	/*
2323 	 * Release the virtual I_T Nexus for this vhost TPG
2324 	 */
2325 	vhost_scsi_drop_nexus(tpg);
2326 	/*
2327 	 * Deregister the se_tpg from TCM..
2328 	 */
2329 	core_tpg_deregister(se_tpg);
2330 	kfree(tpg);
2331 }
2332 
2333 static struct se_wwn *
2334 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2335 		     struct config_group *group,
2336 		     const char *name)
2337 {
2338 	struct vhost_scsi_tport *tport;
2339 	char *ptr;
2340 	u64 wwpn = 0;
2341 	int off = 0;
2342 
2343 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2344 		return ERR_PTR(-EINVAL); */
2345 
2346 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2347 	if (!tport) {
2348 		pr_err("Unable to allocate struct vhost_scsi_tport");
2349 		return ERR_PTR(-ENOMEM);
2350 	}
2351 	tport->tport_wwpn = wwpn;
2352 	/*
2353 	 * Determine the emulated Protocol Identifier and Target Port Name
2354 	 * based on the incoming configfs directory name.
2355 	 */
2356 	ptr = strstr(name, "naa.");
2357 	if (ptr) {
2358 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2359 		goto check_len;
2360 	}
2361 	ptr = strstr(name, "fc.");
2362 	if (ptr) {
2363 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2364 		off = 3; /* Skip over "fc." */
2365 		goto check_len;
2366 	}
2367 	ptr = strstr(name, "iqn.");
2368 	if (ptr) {
2369 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2370 		goto check_len;
2371 	}
2372 
2373 	pr_err("Unable to locate prefix for emulated Target Port:"
2374 			" %s\n", name);
2375 	kfree(tport);
2376 	return ERR_PTR(-EINVAL);
2377 
2378 check_len:
2379 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2380 		pr_err("Emulated %s Address: %s, exceeds"
2381 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2382 			VHOST_SCSI_NAMELEN);
2383 		kfree(tport);
2384 		return ERR_PTR(-EINVAL);
2385 	}
2386 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2387 
2388 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2389 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2390 
2391 	return &tport->tport_wwn;
2392 }
2393 
2394 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2395 {
2396 	struct vhost_scsi_tport *tport = container_of(wwn,
2397 				struct vhost_scsi_tport, tport_wwn);
2398 
2399 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2400 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2401 		tport->tport_name);
2402 
2403 	kfree(tport);
2404 }
2405 
2406 static ssize_t
2407 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2408 {
2409 	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2410 		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2411 		utsname()->machine);
2412 }
2413 
2414 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2415 
2416 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2417 	&vhost_scsi_wwn_attr_version,
2418 	NULL,
2419 };
2420 
2421 static const struct target_core_fabric_ops vhost_scsi_ops = {
2422 	.module				= THIS_MODULE,
2423 	.fabric_name			= "vhost",
2424 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2425 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2426 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2427 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2428 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2429 	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2430 	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2431 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2432 	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
2433 	.release_cmd			= vhost_scsi_release_cmd,
2434 	.check_stop_free		= vhost_scsi_check_stop_free,
2435 	.sess_get_index			= vhost_scsi_sess_get_index,
2436 	.sess_get_initiator_sid		= NULL,
2437 	.write_pending			= vhost_scsi_write_pending,
2438 	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
2439 	.get_cmd_state			= vhost_scsi_get_cmd_state,
2440 	.queue_data_in			= vhost_scsi_queue_data_in,
2441 	.queue_status			= vhost_scsi_queue_status,
2442 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2443 	.aborted_task			= vhost_scsi_aborted_task,
2444 	/*
2445 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2446 	 */
2447 	.fabric_make_wwn		= vhost_scsi_make_tport,
2448 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2449 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2450 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2451 	.fabric_post_link		= vhost_scsi_port_link,
2452 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2453 
2454 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2455 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2456 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2457 };
2458 
2459 static int __init vhost_scsi_init(void)
2460 {
2461 	int ret = -ENOMEM;
2462 
2463 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2464 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2465 		utsname()->machine);
2466 
2467 	ret = vhost_scsi_register();
2468 	if (ret < 0)
2469 		goto out;
2470 
2471 	ret = target_register_template(&vhost_scsi_ops);
2472 	if (ret < 0)
2473 		goto out_vhost_scsi_deregister;
2474 
2475 	return 0;
2476 
2477 out_vhost_scsi_deregister:
2478 	vhost_scsi_deregister();
2479 out:
2480 	return ret;
2481 };
2482 
2483 static void vhost_scsi_exit(void)
2484 {
2485 	target_unregister_template(&vhost_scsi_ops);
2486 	vhost_scsi_deregister();
2487 };
2488 
2489 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2490 MODULE_ALIAS("tcm_vhost");
2491 MODULE_LICENSE("GPL");
2492 module_init(vhost_scsi_init);
2493 module_exit(vhost_scsi_exit);
2494