xref: /openbmc/linux/drivers/scsi/virtio_scsi.c (revision 965b5350)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtio SCSI HBA driver
4  *
5  * Copyright IBM Corp. 2010
6  * Copyright Red Hat, Inc. 2011
7  *
8  * Authors:
9  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
10  *  Paolo Bonzini   <pbonzini@redhat.com>
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/interrupt.h>
19 #include <linux/virtio.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/virtio_config.h>
22 #include <linux/virtio_scsi.h>
23 #include <linux/cpu.h>
24 #include <linux/blkdev.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_tcq.h>
29 #include <scsi/scsi_devinfo.h>
30 #include <linux/seqlock.h>
31 #include <linux/blk-mq-virtio.h>
32 
33 #include "sd.h"
34 
35 #define VIRTIO_SCSI_MEMPOOL_SZ 64
36 #define VIRTIO_SCSI_EVENT_LEN 8
37 #define VIRTIO_SCSI_VQ_BASE 2
38 
39 /* Command queue element */
40 struct virtio_scsi_cmd {
41 	struct scsi_cmnd *sc;
42 	struct completion *comp;
43 	union {
44 		struct virtio_scsi_cmd_req       cmd;
45 		struct virtio_scsi_cmd_req_pi    cmd_pi;
46 		struct virtio_scsi_ctrl_tmf_req  tmf;
47 		struct virtio_scsi_ctrl_an_req   an;
48 	} req;
49 	union {
50 		struct virtio_scsi_cmd_resp      cmd;
51 		struct virtio_scsi_ctrl_tmf_resp tmf;
52 		struct virtio_scsi_ctrl_an_resp  an;
53 		struct virtio_scsi_event         evt;
54 	} resp;
55 } ____cacheline_aligned_in_smp;
56 
57 struct virtio_scsi_event_node {
58 	struct virtio_scsi *vscsi;
59 	struct virtio_scsi_event event;
60 	struct work_struct work;
61 };
62 
63 struct virtio_scsi_vq {
64 	/* Protects vq */
65 	spinlock_t vq_lock;
66 
67 	struct virtqueue *vq;
68 };
69 
70 /* Driver instance state */
71 struct virtio_scsi {
72 	struct virtio_device *vdev;
73 
74 	/* Get some buffers ready for event vq */
75 	struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
76 
77 	u32 num_queues;
78 
79 	struct hlist_node node;
80 
81 	/* Protected by event_vq lock */
82 	bool stop_events;
83 
84 	struct virtio_scsi_vq ctrl_vq;
85 	struct virtio_scsi_vq event_vq;
86 	struct virtio_scsi_vq req_vqs[];
87 };
88 
89 static struct kmem_cache *virtscsi_cmd_cache;
90 static mempool_t *virtscsi_cmd_pool;
91 
92 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
93 {
94 	return vdev->priv;
95 }
96 
97 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
98 {
99 	if (resid)
100 		scsi_set_resid(sc, resid);
101 }
102 
103 /**
104  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
105  *
106  * Called with vq_lock held.
107  */
108 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
109 {
110 	struct virtio_scsi_cmd *cmd = buf;
111 	struct scsi_cmnd *sc = cmd->sc;
112 	struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
113 
114 	dev_dbg(&sc->device->sdev_gendev,
115 		"cmd %p response %u status %#02x sense_len %u\n",
116 		sc, resp->response, resp->status, resp->sense_len);
117 
118 	sc->result = resp->status;
119 	virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
120 	switch (resp->response) {
121 	case VIRTIO_SCSI_S_OK:
122 		set_host_byte(sc, DID_OK);
123 		break;
124 	case VIRTIO_SCSI_S_OVERRUN:
125 		set_host_byte(sc, DID_ERROR);
126 		break;
127 	case VIRTIO_SCSI_S_ABORTED:
128 		set_host_byte(sc, DID_ABORT);
129 		break;
130 	case VIRTIO_SCSI_S_BAD_TARGET:
131 		set_host_byte(sc, DID_BAD_TARGET);
132 		break;
133 	case VIRTIO_SCSI_S_RESET:
134 		set_host_byte(sc, DID_RESET);
135 		break;
136 	case VIRTIO_SCSI_S_BUSY:
137 		set_host_byte(sc, DID_BUS_BUSY);
138 		break;
139 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
140 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
141 		break;
142 	case VIRTIO_SCSI_S_TARGET_FAILURE:
143 		set_host_byte(sc, DID_TARGET_FAILURE);
144 		break;
145 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
146 		set_host_byte(sc, DID_NEXUS_FAILURE);
147 		break;
148 	default:
149 		scmd_printk(KERN_WARNING, sc, "Unknown response %d",
150 			    resp->response);
151 		/* fall through */
152 	case VIRTIO_SCSI_S_FAILURE:
153 		set_host_byte(sc, DID_ERROR);
154 		break;
155 	}
156 
157 	WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
158 		VIRTIO_SCSI_SENSE_SIZE);
159 	if (sc->sense_buffer) {
160 		memcpy(sc->sense_buffer, resp->sense,
161 		       min_t(u32,
162 			     virtio32_to_cpu(vscsi->vdev, resp->sense_len),
163 			     VIRTIO_SCSI_SENSE_SIZE));
164 		if (resp->sense_len)
165 			set_driver_byte(sc, DRIVER_SENSE);
166 	}
167 
168 	sc->scsi_done(sc);
169 }
170 
171 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
172 			     struct virtio_scsi_vq *virtscsi_vq,
173 			     void (*fn)(struct virtio_scsi *vscsi, void *buf))
174 {
175 	void *buf;
176 	unsigned int len;
177 	unsigned long flags;
178 	struct virtqueue *vq = virtscsi_vq->vq;
179 
180 	spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
181 	do {
182 		virtqueue_disable_cb(vq);
183 		while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
184 			fn(vscsi, buf);
185 
186 		if (unlikely(virtqueue_is_broken(vq)))
187 			break;
188 	} while (!virtqueue_enable_cb(vq));
189 	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
190 }
191 
192 static void virtscsi_req_done(struct virtqueue *vq)
193 {
194 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
195 	struct virtio_scsi *vscsi = shost_priv(sh);
196 	int index = vq->index - VIRTIO_SCSI_VQ_BASE;
197 	struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
198 
199 	virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
200 };
201 
202 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
203 {
204 	int i, num_vqs;
205 
206 	num_vqs = vscsi->num_queues;
207 	for (i = 0; i < num_vqs; i++)
208 		virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
209 				 virtscsi_complete_cmd);
210 }
211 
212 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
213 {
214 	struct virtio_scsi_cmd *cmd = buf;
215 
216 	if (cmd->comp)
217 		complete(cmd->comp);
218 }
219 
220 static void virtscsi_ctrl_done(struct virtqueue *vq)
221 {
222 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
223 	struct virtio_scsi *vscsi = shost_priv(sh);
224 
225 	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
226 };
227 
228 static void virtscsi_handle_event(struct work_struct *work);
229 
230 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
231 			       struct virtio_scsi_event_node *event_node)
232 {
233 	int err;
234 	struct scatterlist sg;
235 	unsigned long flags;
236 
237 	INIT_WORK(&event_node->work, virtscsi_handle_event);
238 	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
239 
240 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
241 
242 	err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
243 				  GFP_ATOMIC);
244 	if (!err)
245 		virtqueue_kick(vscsi->event_vq.vq);
246 
247 	spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
248 
249 	return err;
250 }
251 
252 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
253 {
254 	int i;
255 
256 	for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
257 		vscsi->event_list[i].vscsi = vscsi;
258 		virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
259 	}
260 
261 	return 0;
262 }
263 
264 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
265 {
266 	int i;
267 
268 	/* Stop scheduling work before calling cancel_work_sync.  */
269 	spin_lock_irq(&vscsi->event_vq.vq_lock);
270 	vscsi->stop_events = true;
271 	spin_unlock_irq(&vscsi->event_vq.vq_lock);
272 
273 	for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
274 		cancel_work_sync(&vscsi->event_list[i].work);
275 }
276 
277 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
278 					    struct virtio_scsi_event *event)
279 {
280 	struct scsi_device *sdev;
281 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
282 	unsigned int target = event->lun[1];
283 	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
284 
285 	switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
286 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
287 		scsi_add_device(shost, 0, target, lun);
288 		break;
289 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
290 		sdev = scsi_device_lookup(shost, 0, target, lun);
291 		if (sdev) {
292 			scsi_remove_device(sdev);
293 			scsi_device_put(sdev);
294 		} else {
295 			pr_err("SCSI device %d 0 %d %d not found\n",
296 				shost->host_no, target, lun);
297 		}
298 		break;
299 	default:
300 		pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
301 	}
302 }
303 
304 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
305 					 struct virtio_scsi_event *event)
306 {
307 	struct scsi_device *sdev;
308 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
309 	unsigned int target = event->lun[1];
310 	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
311 	u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
312 	u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
313 
314 	sdev = scsi_device_lookup(shost, 0, target, lun);
315 	if (!sdev) {
316 		pr_err("SCSI device %d 0 %d %d not found\n",
317 			shost->host_no, target, lun);
318 		return;
319 	}
320 
321 	/* Handle "Parameters changed", "Mode parameters changed", and
322 	   "Capacity data has changed".  */
323 	if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
324 		scsi_rescan_device(&sdev->sdev_gendev);
325 
326 	scsi_device_put(sdev);
327 }
328 
329 static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
330 {
331 	struct scsi_device *sdev;
332 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
333 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
334 	int result, inquiry_len, inq_result_len = 256;
335 	char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
336 
337 	shost_for_each_device(sdev, shost) {
338 		inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
339 
340 		memset(scsi_cmd, 0, sizeof(scsi_cmd));
341 		scsi_cmd[0] = INQUIRY;
342 		scsi_cmd[4] = (unsigned char) inquiry_len;
343 
344 		memset(inq_result, 0, inq_result_len);
345 
346 		result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
347 					  inq_result, inquiry_len, NULL,
348 					  SD_TIMEOUT, SD_MAX_RETRIES, NULL);
349 
350 		if (result == 0 && inq_result[0] >> 5) {
351 			/* PQ indicates the LUN is not attached */
352 			scsi_remove_device(sdev);
353 		}
354 	}
355 
356 	kfree(inq_result);
357 }
358 
359 static void virtscsi_handle_event(struct work_struct *work)
360 {
361 	struct virtio_scsi_event_node *event_node =
362 		container_of(work, struct virtio_scsi_event_node, work);
363 	struct virtio_scsi *vscsi = event_node->vscsi;
364 	struct virtio_scsi_event *event = &event_node->event;
365 
366 	if (event->event &
367 	    cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
368 		event->event &= ~cpu_to_virtio32(vscsi->vdev,
369 						   VIRTIO_SCSI_T_EVENTS_MISSED);
370 		virtscsi_rescan_hotunplug(vscsi);
371 		scsi_scan_host(virtio_scsi_host(vscsi->vdev));
372 	}
373 
374 	switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
375 	case VIRTIO_SCSI_T_NO_EVENT:
376 		break;
377 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
378 		virtscsi_handle_transport_reset(vscsi, event);
379 		break;
380 	case VIRTIO_SCSI_T_PARAM_CHANGE:
381 		virtscsi_handle_param_change(vscsi, event);
382 		break;
383 	default:
384 		pr_err("Unsupport virtio scsi event %x\n", event->event);
385 	}
386 	virtscsi_kick_event(vscsi, event_node);
387 }
388 
389 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
390 {
391 	struct virtio_scsi_event_node *event_node = buf;
392 
393 	if (!vscsi->stop_events)
394 		queue_work(system_freezable_wq, &event_node->work);
395 }
396 
397 static void virtscsi_event_done(struct virtqueue *vq)
398 {
399 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
400 	struct virtio_scsi *vscsi = shost_priv(sh);
401 
402 	virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
403 };
404 
405 static int __virtscsi_add_cmd(struct virtqueue *vq,
406 			    struct virtio_scsi_cmd *cmd,
407 			    size_t req_size, size_t resp_size)
408 {
409 	struct scsi_cmnd *sc = cmd->sc;
410 	struct scatterlist *sgs[6], req, resp;
411 	struct sg_table *out, *in;
412 	unsigned out_num = 0, in_num = 0;
413 
414 	out = in = NULL;
415 
416 	if (sc && sc->sc_data_direction != DMA_NONE) {
417 		if (sc->sc_data_direction != DMA_FROM_DEVICE)
418 			out = &sc->sdb.table;
419 		if (sc->sc_data_direction != DMA_TO_DEVICE)
420 			in = &sc->sdb.table;
421 	}
422 
423 	/* Request header.  */
424 	sg_init_one(&req, &cmd->req, req_size);
425 	sgs[out_num++] = &req;
426 
427 	/* Data-out buffer.  */
428 	if (out) {
429 		/* Place WRITE protection SGLs before Data OUT payload */
430 		if (scsi_prot_sg_count(sc))
431 			sgs[out_num++] = scsi_prot_sglist(sc);
432 		sgs[out_num++] = out->sgl;
433 	}
434 
435 	/* Response header.  */
436 	sg_init_one(&resp, &cmd->resp, resp_size);
437 	sgs[out_num + in_num++] = &resp;
438 
439 	/* Data-in buffer */
440 	if (in) {
441 		/* Place READ protection SGLs before Data IN payload */
442 		if (scsi_prot_sg_count(sc))
443 			sgs[out_num + in_num++] = scsi_prot_sglist(sc);
444 		sgs[out_num + in_num++] = in->sgl;
445 	}
446 
447 	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
448 }
449 
450 static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
451 {
452 	bool needs_kick;
453 	unsigned long flags;
454 
455 	spin_lock_irqsave(&vq->vq_lock, flags);
456 	needs_kick = virtqueue_kick_prepare(vq->vq);
457 	spin_unlock_irqrestore(&vq->vq_lock, flags);
458 
459 	if (needs_kick)
460 		virtqueue_notify(vq->vq);
461 }
462 
463 /**
464  * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
465  * @vq		: the struct virtqueue we're talking about
466  * @cmd		: command structure
467  * @req_size	: size of the request buffer
468  * @resp_size	: size of the response buffer
469  * @kick	: whether to kick the virtqueue immediately
470  */
471 static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
472 			     struct virtio_scsi_cmd *cmd,
473 			     size_t req_size, size_t resp_size,
474 			     bool kick)
475 {
476 	unsigned long flags;
477 	int err;
478 	bool needs_kick = false;
479 
480 	spin_lock_irqsave(&vq->vq_lock, flags);
481 	err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
482 	if (!err && kick)
483 		needs_kick = virtqueue_kick_prepare(vq->vq);
484 
485 	spin_unlock_irqrestore(&vq->vq_lock, flags);
486 
487 	if (needs_kick)
488 		virtqueue_notify(vq->vq);
489 	return err;
490 }
491 
492 static void virtio_scsi_init_hdr(struct virtio_device *vdev,
493 				 struct virtio_scsi_cmd_req *cmd,
494 				 struct scsi_cmnd *sc)
495 {
496 	cmd->lun[0] = 1;
497 	cmd->lun[1] = sc->device->id;
498 	cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
499 	cmd->lun[3] = sc->device->lun & 0xff;
500 	cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
501 	cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
502 	cmd->prio = 0;
503 	cmd->crn = 0;
504 }
505 
506 #ifdef CONFIG_BLK_DEV_INTEGRITY
507 static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
508 				    struct virtio_scsi_cmd_req_pi *cmd_pi,
509 				    struct scsi_cmnd *sc)
510 {
511 	struct request *rq = sc->request;
512 	struct blk_integrity *bi;
513 
514 	virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
515 
516 	if (!rq || !scsi_prot_sg_count(sc))
517 		return;
518 
519 	bi = blk_get_integrity(rq->rq_disk);
520 
521 	if (sc->sc_data_direction == DMA_TO_DEVICE)
522 		cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
523 						      bio_integrity_bytes(bi,
524 							blk_rq_sectors(rq)));
525 	else if (sc->sc_data_direction == DMA_FROM_DEVICE)
526 		cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
527 						     bio_integrity_bytes(bi,
528 							blk_rq_sectors(rq)));
529 }
530 #endif
531 
532 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
533 						  struct scsi_cmnd *sc)
534 {
535 	u32 tag = blk_mq_unique_tag(sc->request);
536 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
537 
538 	return &vscsi->req_vqs[hwq];
539 }
540 
541 static int virtscsi_queuecommand(struct Scsi_Host *shost,
542 				 struct scsi_cmnd *sc)
543 {
544 	struct virtio_scsi *vscsi = shost_priv(shost);
545 	struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
546 	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
547 	bool kick;
548 	unsigned long flags;
549 	int req_size;
550 	int ret;
551 
552 	BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
553 
554 	/* TODO: check feature bit and fail if unsupported?  */
555 	BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
556 
557 	dev_dbg(&sc->device->sdev_gendev,
558 		"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
559 
560 	cmd->sc = sc;
561 
562 	BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
563 
564 #ifdef CONFIG_BLK_DEV_INTEGRITY
565 	if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
566 		virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
567 		memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
568 		req_size = sizeof(cmd->req.cmd_pi);
569 	} else
570 #endif
571 	{
572 		virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
573 		memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
574 		req_size = sizeof(cmd->req.cmd);
575 	}
576 
577 	kick = (sc->flags & SCMD_LAST) != 0;
578 	ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
579 	if (ret == -EIO) {
580 		cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
581 		spin_lock_irqsave(&req_vq->vq_lock, flags);
582 		virtscsi_complete_cmd(vscsi, cmd);
583 		spin_unlock_irqrestore(&req_vq->vq_lock, flags);
584 	} else if (ret != 0) {
585 		return SCSI_MLQUEUE_HOST_BUSY;
586 	}
587 	return 0;
588 }
589 
590 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
591 {
592 	DECLARE_COMPLETION_ONSTACK(comp);
593 	int ret = FAILED;
594 
595 	cmd->comp = &comp;
596 	if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
597 			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
598 		goto out;
599 
600 	wait_for_completion(&comp);
601 	if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
602 	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
603 		ret = SUCCESS;
604 
605 	/*
606 	 * The spec guarantees that all requests related to the TMF have
607 	 * been completed, but the callback might not have run yet if
608 	 * we're using independent interrupts (e.g. MSI).  Poll the
609 	 * virtqueues once.
610 	 *
611 	 * In the abort case, sc->scsi_done will do nothing, because
612 	 * the block layer must have detected a timeout and as a result
613 	 * REQ_ATOM_COMPLETE has been set.
614 	 */
615 	virtscsi_poll_requests(vscsi);
616 
617 out:
618 	mempool_free(cmd, virtscsi_cmd_pool);
619 	return ret;
620 }
621 
622 static int virtscsi_device_reset(struct scsi_cmnd *sc)
623 {
624 	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
625 	struct virtio_scsi_cmd *cmd;
626 
627 	sdev_printk(KERN_INFO, sc->device, "device reset\n");
628 	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
629 	if (!cmd)
630 		return FAILED;
631 
632 	memset(cmd, 0, sizeof(*cmd));
633 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
634 		.type = VIRTIO_SCSI_T_TMF,
635 		.subtype = cpu_to_virtio32(vscsi->vdev,
636 					     VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
637 		.lun[0] = 1,
638 		.lun[1] = sc->device->id,
639 		.lun[2] = (sc->device->lun >> 8) | 0x40,
640 		.lun[3] = sc->device->lun & 0xff,
641 	};
642 	return virtscsi_tmf(vscsi, cmd);
643 }
644 
645 static int virtscsi_device_alloc(struct scsi_device *sdevice)
646 {
647 	/*
648 	 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
649 	 * may have transfer limits which come from the host SCSI
650 	 * controller or something on the host side other than the
651 	 * target itself.
652 	 *
653 	 * To make this work properly, the hypervisor can adjust the
654 	 * target's VPD information to advertise these limits.  But
655 	 * for that to work, the guest has to look at the VPD pages,
656 	 * which we won't do by default if it is an SPC-2 device, even
657 	 * if it does actually support it.
658 	 *
659 	 * So, set the blist to always try to read the VPD pages.
660 	 */
661 	sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
662 
663 	return 0;
664 }
665 
666 
667 /**
668  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
669  * @sdev:	Virtscsi target whose queue depth to change
670  * @qdepth:	New queue depth
671  */
672 static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
673 {
674 	struct Scsi_Host *shost = sdev->host;
675 	int max_depth = shost->cmd_per_lun;
676 
677 	return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
678 }
679 
680 static int virtscsi_abort(struct scsi_cmnd *sc)
681 {
682 	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
683 	struct virtio_scsi_cmd *cmd;
684 
685 	scmd_printk(KERN_INFO, sc, "abort\n");
686 	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
687 	if (!cmd)
688 		return FAILED;
689 
690 	memset(cmd, 0, sizeof(*cmd));
691 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
692 		.type = VIRTIO_SCSI_T_TMF,
693 		.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
694 		.lun[0] = 1,
695 		.lun[1] = sc->device->id,
696 		.lun[2] = (sc->device->lun >> 8) | 0x40,
697 		.lun[3] = sc->device->lun & 0xff,
698 		.tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
699 	};
700 	return virtscsi_tmf(vscsi, cmd);
701 }
702 
703 static int virtscsi_map_queues(struct Scsi_Host *shost)
704 {
705 	struct virtio_scsi *vscsi = shost_priv(shost);
706 	struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
707 
708 	return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
709 }
710 
711 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
712 {
713 	struct virtio_scsi *vscsi = shost_priv(shost);
714 
715 	virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
716 }
717 
718 /*
719  * The host guarantees to respond to each command, although I/O
720  * latencies might be higher than on bare metal.  Reset the timer
721  * unconditionally to give the host a chance to perform EH.
722  */
723 static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
724 {
725 	return BLK_EH_RESET_TIMER;
726 }
727 
728 static struct scsi_host_template virtscsi_host_template = {
729 	.module = THIS_MODULE,
730 	.name = "Virtio SCSI HBA",
731 	.proc_name = "virtio_scsi",
732 	.this_id = -1,
733 	.cmd_size = sizeof(struct virtio_scsi_cmd),
734 	.queuecommand = virtscsi_queuecommand,
735 	.commit_rqs = virtscsi_commit_rqs,
736 	.change_queue_depth = virtscsi_change_queue_depth,
737 	.eh_abort_handler = virtscsi_abort,
738 	.eh_device_reset_handler = virtscsi_device_reset,
739 	.eh_timed_out = virtscsi_eh_timed_out,
740 	.slave_alloc = virtscsi_device_alloc,
741 
742 	.dma_boundary = UINT_MAX,
743 	.map_queues = virtscsi_map_queues,
744 	.track_queue_depth = 1,
745 };
746 
747 #define virtscsi_config_get(vdev, fld) \
748 	({ \
749 		__virtio_native_type(struct virtio_scsi_config, fld) __val; \
750 		virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
751 		__val; \
752 	})
753 
754 #define virtscsi_config_set(vdev, fld, val) \
755 	do { \
756 		__virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
757 		virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
758 	} while(0)
759 
760 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
761 			     struct virtqueue *vq)
762 {
763 	spin_lock_init(&virtscsi_vq->vq_lock);
764 	virtscsi_vq->vq = vq;
765 }
766 
767 static void virtscsi_remove_vqs(struct virtio_device *vdev)
768 {
769 	/* Stop all the virtqueues. */
770 	vdev->config->reset(vdev);
771 	vdev->config->del_vqs(vdev);
772 }
773 
774 static int virtscsi_init(struct virtio_device *vdev,
775 			 struct virtio_scsi *vscsi)
776 {
777 	int err;
778 	u32 i;
779 	u32 num_vqs;
780 	vq_callback_t **callbacks;
781 	const char **names;
782 	struct virtqueue **vqs;
783 	struct irq_affinity desc = { .pre_vectors = 2 };
784 
785 	num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
786 	vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
787 	callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
788 				  GFP_KERNEL);
789 	names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
790 
791 	if (!callbacks || !vqs || !names) {
792 		err = -ENOMEM;
793 		goto out;
794 	}
795 
796 	callbacks[0] = virtscsi_ctrl_done;
797 	callbacks[1] = virtscsi_event_done;
798 	names[0] = "control";
799 	names[1] = "event";
800 	for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
801 		callbacks[i] = virtscsi_req_done;
802 		names[i] = "request";
803 	}
804 
805 	/* Discover virtqueues and write information to configuration.  */
806 	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
807 	if (err)
808 		goto out;
809 
810 	virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
811 	virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
812 	for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
813 		virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
814 				 vqs[i]);
815 
816 	virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
817 	virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
818 
819 	err = 0;
820 
821 out:
822 	kfree(names);
823 	kfree(callbacks);
824 	kfree(vqs);
825 	if (err)
826 		virtscsi_remove_vqs(vdev);
827 	return err;
828 }
829 
830 static int virtscsi_probe(struct virtio_device *vdev)
831 {
832 	struct Scsi_Host *shost;
833 	struct virtio_scsi *vscsi;
834 	int err;
835 	u32 sg_elems, num_targets;
836 	u32 cmd_per_lun;
837 	u32 num_queues;
838 
839 	if (!vdev->config->get) {
840 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
841 			__func__);
842 		return -EINVAL;
843 	}
844 
845 	/* We need to know how many queues before we allocate. */
846 	num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
847 	num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
848 
849 	num_targets = virtscsi_config_get(vdev, max_target) + 1;
850 
851 	shost = scsi_host_alloc(&virtscsi_host_template,
852 				struct_size(vscsi, req_vqs, num_queues));
853 	if (!shost)
854 		return -ENOMEM;
855 
856 	sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
857 	shost->sg_tablesize = sg_elems;
858 	vscsi = shost_priv(shost);
859 	vscsi->vdev = vdev;
860 	vscsi->num_queues = num_queues;
861 	vdev->priv = shost;
862 
863 	err = virtscsi_init(vdev, vscsi);
864 	if (err)
865 		goto virtscsi_init_failed;
866 
867 	shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
868 
869 	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
870 	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
871 	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
872 
873 	/* LUNs > 256 are reported with format 1, so they go in the range
874 	 * 16640-32767.
875 	 */
876 	shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
877 	shost->max_id = num_targets;
878 	shost->max_channel = 0;
879 	shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
880 	shost->nr_hw_queues = num_queues;
881 
882 #ifdef CONFIG_BLK_DEV_INTEGRITY
883 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
884 		int host_prot;
885 
886 		host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
887 			    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
888 			    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
889 
890 		scsi_host_set_prot(shost, host_prot);
891 		scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
892 	}
893 #endif
894 
895 	err = scsi_add_host(shost, &vdev->dev);
896 	if (err)
897 		goto scsi_add_host_failed;
898 
899 	virtio_device_ready(vdev);
900 
901 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
902 		virtscsi_kick_event_all(vscsi);
903 
904 	scsi_scan_host(shost);
905 	return 0;
906 
907 scsi_add_host_failed:
908 	vdev->config->del_vqs(vdev);
909 virtscsi_init_failed:
910 	scsi_host_put(shost);
911 	return err;
912 }
913 
914 static void virtscsi_remove(struct virtio_device *vdev)
915 {
916 	struct Scsi_Host *shost = virtio_scsi_host(vdev);
917 	struct virtio_scsi *vscsi = shost_priv(shost);
918 
919 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
920 		virtscsi_cancel_event_work(vscsi);
921 
922 	scsi_remove_host(shost);
923 	virtscsi_remove_vqs(vdev);
924 	scsi_host_put(shost);
925 }
926 
927 #ifdef CONFIG_PM_SLEEP
928 static int virtscsi_freeze(struct virtio_device *vdev)
929 {
930 	virtscsi_remove_vqs(vdev);
931 	return 0;
932 }
933 
934 static int virtscsi_restore(struct virtio_device *vdev)
935 {
936 	struct Scsi_Host *sh = virtio_scsi_host(vdev);
937 	struct virtio_scsi *vscsi = shost_priv(sh);
938 	int err;
939 
940 	err = virtscsi_init(vdev, vscsi);
941 	if (err)
942 		return err;
943 
944 	virtio_device_ready(vdev);
945 
946 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
947 		virtscsi_kick_event_all(vscsi);
948 
949 	return err;
950 }
951 #endif
952 
953 static struct virtio_device_id id_table[] = {
954 	{ VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
955 	{ 0 },
956 };
957 
958 static unsigned int features[] = {
959 	VIRTIO_SCSI_F_HOTPLUG,
960 	VIRTIO_SCSI_F_CHANGE,
961 #ifdef CONFIG_BLK_DEV_INTEGRITY
962 	VIRTIO_SCSI_F_T10_PI,
963 #endif
964 };
965 
966 static struct virtio_driver virtio_scsi_driver = {
967 	.feature_table = features,
968 	.feature_table_size = ARRAY_SIZE(features),
969 	.driver.name = KBUILD_MODNAME,
970 	.driver.owner = THIS_MODULE,
971 	.id_table = id_table,
972 	.probe = virtscsi_probe,
973 #ifdef CONFIG_PM_SLEEP
974 	.freeze = virtscsi_freeze,
975 	.restore = virtscsi_restore,
976 #endif
977 	.remove = virtscsi_remove,
978 };
979 
980 static int __init init(void)
981 {
982 	int ret = -ENOMEM;
983 
984 	virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
985 	if (!virtscsi_cmd_cache) {
986 		pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
987 		goto error;
988 	}
989 
990 
991 	virtscsi_cmd_pool =
992 		mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
993 					 virtscsi_cmd_cache);
994 	if (!virtscsi_cmd_pool) {
995 		pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
996 		goto error;
997 	}
998 	ret = register_virtio_driver(&virtio_scsi_driver);
999 	if (ret < 0)
1000 		goto error;
1001 
1002 	return 0;
1003 
1004 error:
1005 	if (virtscsi_cmd_pool) {
1006 		mempool_destroy(virtscsi_cmd_pool);
1007 		virtscsi_cmd_pool = NULL;
1008 	}
1009 	if (virtscsi_cmd_cache) {
1010 		kmem_cache_destroy(virtscsi_cmd_cache);
1011 		virtscsi_cmd_cache = NULL;
1012 	}
1013 	return ret;
1014 }
1015 
1016 static void __exit fini(void)
1017 {
1018 	unregister_virtio_driver(&virtio_scsi_driver);
1019 	mempool_destroy(virtscsi_cmd_pool);
1020 	kmem_cache_destroy(virtscsi_cmd_cache);
1021 }
1022 module_init(init);
1023 module_exit(fini);
1024 
1025 MODULE_DEVICE_TABLE(virtio, id_table);
1026 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1027 MODULE_LICENSE("GPL");
1028