xref: /openbmc/linux/drivers/block/virtio_blk.c (revision 93d90ad7)
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
16 
17 #define PART_BITS 4
18 #define VQ_NAME_LEN 16
19 
20 static int major;
21 static DEFINE_IDA(vd_index_ida);
22 
23 static struct workqueue_struct *virtblk_wq;
24 
25 struct virtio_blk_vq {
26 	struct virtqueue *vq;
27 	spinlock_t lock;
28 	char name[VQ_NAME_LEN];
29 } ____cacheline_aligned_in_smp;
30 
31 struct virtio_blk
32 {
33 	struct virtio_device *vdev;
34 
35 	/* The disk structure for the kernel. */
36 	struct gendisk *disk;
37 
38 	/* Block layer tags. */
39 	struct blk_mq_tag_set tag_set;
40 
41 	/* Process context for config space updates */
42 	struct work_struct config_work;
43 
44 	/* What host tells us, plus 2 for header & tailer. */
45 	unsigned int sg_elems;
46 
47 	/* Ida index - used to track minor number allocations. */
48 	int index;
49 
50 	/* num of vqs */
51 	int num_vqs;
52 	struct virtio_blk_vq *vqs;
53 };
54 
55 struct virtblk_req
56 {
57 	struct request *req;
58 	struct virtio_blk_outhdr out_hdr;
59 	struct virtio_scsi_inhdr in_hdr;
60 	u8 status;
61 	struct scatterlist sg[];
62 };
63 
64 static inline int virtblk_result(struct virtblk_req *vbr)
65 {
66 	switch (vbr->status) {
67 	case VIRTIO_BLK_S_OK:
68 		return 0;
69 	case VIRTIO_BLK_S_UNSUPP:
70 		return -ENOTTY;
71 	default:
72 		return -EIO;
73 	}
74 }
75 
76 static int __virtblk_add_req(struct virtqueue *vq,
77 			     struct virtblk_req *vbr,
78 			     struct scatterlist *data_sg,
79 			     bool have_data)
80 {
81 	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
82 	unsigned int num_out = 0, num_in = 0;
83 	__virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
84 
85 	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
86 	sgs[num_out++] = &hdr;
87 
88 	/*
89 	 * If this is a packet command we need a couple of additional headers.
90 	 * Behind the normal outhdr we put a segment with the scsi command
91 	 * block, and before the normal inhdr we put the sense data and the
92 	 * inhdr with additional status information.
93 	 */
94 	if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
95 		sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
96 		sgs[num_out++] = &cmd;
97 	}
98 
99 	if (have_data) {
100 		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
101 			sgs[num_out++] = data_sg;
102 		else
103 			sgs[num_out + num_in++] = data_sg;
104 	}
105 
106 	if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
107 		sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
108 		sgs[num_out + num_in++] = &sense;
109 		sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
110 		sgs[num_out + num_in++] = &inhdr;
111 	}
112 
113 	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
114 	sgs[num_out + num_in++] = &status;
115 
116 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
117 }
118 
119 static inline void virtblk_request_done(struct request *req)
120 {
121 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
122 	struct virtio_blk *vblk = req->q->queuedata;
123 	int error = virtblk_result(vbr);
124 
125 	if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
126 		req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
127 		req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
128 		req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
129 	} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
130 		req->errors = (error != 0);
131 	}
132 
133 	blk_mq_end_request(req, error);
134 }
135 
136 static void virtblk_done(struct virtqueue *vq)
137 {
138 	struct virtio_blk *vblk = vq->vdev->priv;
139 	bool req_done = false;
140 	int qid = vq->index;
141 	struct virtblk_req *vbr;
142 	unsigned long flags;
143 	unsigned int len;
144 
145 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
146 	do {
147 		virtqueue_disable_cb(vq);
148 		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
149 			blk_mq_complete_request(vbr->req);
150 			req_done = true;
151 		}
152 		if (unlikely(virtqueue_is_broken(vq)))
153 			break;
154 	} while (!virtqueue_enable_cb(vq));
155 
156 	/* In case queue is stopped waiting for more buffers. */
157 	if (req_done)
158 		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
159 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
160 }
161 
162 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
163 			   const struct blk_mq_queue_data *bd)
164 {
165 	struct virtio_blk *vblk = hctx->queue->queuedata;
166 	struct request *req = bd->rq;
167 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
168 	unsigned long flags;
169 	unsigned int num;
170 	int qid = hctx->queue_num;
171 	int err;
172 	bool notify = false;
173 
174 	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
175 
176 	vbr->req = req;
177 	if (req->cmd_flags & REQ_FLUSH) {
178 		vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
179 		vbr->out_hdr.sector = 0;
180 		vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
181 	} else {
182 		switch (req->cmd_type) {
183 		case REQ_TYPE_FS:
184 			vbr->out_hdr.type = 0;
185 			vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
186 			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
187 			break;
188 		case REQ_TYPE_BLOCK_PC:
189 			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
190 			vbr->out_hdr.sector = 0;
191 			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
192 			break;
193 		case REQ_TYPE_SPECIAL:
194 			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
195 			vbr->out_hdr.sector = 0;
196 			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
197 			break;
198 		default:
199 			/* We don't put anything else in the queue. */
200 			BUG();
201 		}
202 	}
203 
204 	blk_mq_start_request(req);
205 
206 	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
207 	if (num) {
208 		if (rq_data_dir(vbr->req) == WRITE)
209 			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
210 		else
211 			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
212 	}
213 
214 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
215 	err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
216 	if (err) {
217 		virtqueue_kick(vblk->vqs[qid].vq);
218 		blk_mq_stop_hw_queue(hctx);
219 		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
220 		/* Out of mem doesn't actually happen, since we fall back
221 		 * to direct descriptors */
222 		if (err == -ENOMEM || err == -ENOSPC)
223 			return BLK_MQ_RQ_QUEUE_BUSY;
224 		return BLK_MQ_RQ_QUEUE_ERROR;
225 	}
226 
227 	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
228 		notify = true;
229 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
230 
231 	if (notify)
232 		virtqueue_notify(vblk->vqs[qid].vq);
233 	return BLK_MQ_RQ_QUEUE_OK;
234 }
235 
236 /* return id (s/n) string for *disk to *id_str
237  */
238 static int virtblk_get_id(struct gendisk *disk, char *id_str)
239 {
240 	struct virtio_blk *vblk = disk->private_data;
241 	struct request *req;
242 	struct bio *bio;
243 	int err;
244 
245 	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
246 			   GFP_KERNEL);
247 	if (IS_ERR(bio))
248 		return PTR_ERR(bio);
249 
250 	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
251 	if (IS_ERR(req)) {
252 		bio_put(bio);
253 		return PTR_ERR(req);
254 	}
255 
256 	req->cmd_type = REQ_TYPE_SPECIAL;
257 	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
258 	blk_put_request(req);
259 
260 	return err;
261 }
262 
263 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
264 			     unsigned int cmd, unsigned long data)
265 {
266 	struct gendisk *disk = bdev->bd_disk;
267 	struct virtio_blk *vblk = disk->private_data;
268 
269 	/*
270 	 * Only allow the generic SCSI ioctls if the host can support it.
271 	 */
272 	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
273 		return -ENOTTY;
274 
275 	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
276 				  (void __user *)data);
277 }
278 
279 /* We provide getgeo only to please some old bootloader/partitioning tools */
280 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
281 {
282 	struct virtio_blk *vblk = bd->bd_disk->private_data;
283 
284 	/* see if the host passed in geometry config */
285 	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
286 		virtio_cread(vblk->vdev, struct virtio_blk_config,
287 			     geometry.cylinders, &geo->cylinders);
288 		virtio_cread(vblk->vdev, struct virtio_blk_config,
289 			     geometry.heads, &geo->heads);
290 		virtio_cread(vblk->vdev, struct virtio_blk_config,
291 			     geometry.sectors, &geo->sectors);
292 	} else {
293 		/* some standard values, similar to sd */
294 		geo->heads = 1 << 6;
295 		geo->sectors = 1 << 5;
296 		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
297 	}
298 	return 0;
299 }
300 
301 static const struct block_device_operations virtblk_fops = {
302 	.ioctl  = virtblk_ioctl,
303 	.owner  = THIS_MODULE,
304 	.getgeo = virtblk_getgeo,
305 };
306 
307 static int index_to_minor(int index)
308 {
309 	return index << PART_BITS;
310 }
311 
312 static int minor_to_index(int minor)
313 {
314 	return minor >> PART_BITS;
315 }
316 
317 static ssize_t virtblk_serial_show(struct device *dev,
318 				struct device_attribute *attr, char *buf)
319 {
320 	struct gendisk *disk = dev_to_disk(dev);
321 	int err;
322 
323 	/* sysfs gives us a PAGE_SIZE buffer */
324 	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
325 
326 	buf[VIRTIO_BLK_ID_BYTES] = '\0';
327 	err = virtblk_get_id(disk, buf);
328 	if (!err)
329 		return strlen(buf);
330 
331 	if (err == -EIO) /* Unsupported? Make it empty. */
332 		return 0;
333 
334 	return err;
335 }
336 
337 static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
338 
339 static void virtblk_config_changed_work(struct work_struct *work)
340 {
341 	struct virtio_blk *vblk =
342 		container_of(work, struct virtio_blk, config_work);
343 	struct virtio_device *vdev = vblk->vdev;
344 	struct request_queue *q = vblk->disk->queue;
345 	char cap_str_2[10], cap_str_10[10];
346 	char *envp[] = { "RESIZE=1", NULL };
347 	u64 capacity, size;
348 
349 	/* Host must always specify the capacity. */
350 	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
351 
352 	/* If capacity is too big, truncate with warning. */
353 	if ((sector_t)capacity != capacity) {
354 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
355 			 (unsigned long long)capacity);
356 		capacity = (sector_t)-1;
357 	}
358 
359 	size = capacity * queue_logical_block_size(q);
360 	string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
361 	string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
362 
363 	dev_notice(&vdev->dev,
364 		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
365 		  (unsigned long long)capacity,
366 		  queue_logical_block_size(q),
367 		  cap_str_10, cap_str_2);
368 
369 	set_capacity(vblk->disk, capacity);
370 	revalidate_disk(vblk->disk);
371 	kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
372 }
373 
374 static void virtblk_config_changed(struct virtio_device *vdev)
375 {
376 	struct virtio_blk *vblk = vdev->priv;
377 
378 	queue_work(virtblk_wq, &vblk->config_work);
379 }
380 
381 static int init_vq(struct virtio_blk *vblk)
382 {
383 	int err = 0;
384 	int i;
385 	vq_callback_t **callbacks;
386 	const char **names;
387 	struct virtqueue **vqs;
388 	unsigned short num_vqs;
389 	struct virtio_device *vdev = vblk->vdev;
390 
391 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
392 				   struct virtio_blk_config, num_queues,
393 				   &num_vqs);
394 	if (err)
395 		num_vqs = 1;
396 
397 	vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
398 	if (!vblk->vqs) {
399 		err = -ENOMEM;
400 		goto out;
401 	}
402 
403 	names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
404 	if (!names)
405 		goto err_names;
406 
407 	callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
408 	if (!callbacks)
409 		goto err_callbacks;
410 
411 	vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
412 	if (!vqs)
413 		goto err_vqs;
414 
415 	for (i = 0; i < num_vqs; i++) {
416 		callbacks[i] = virtblk_done;
417 		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
418 		names[i] = vblk->vqs[i].name;
419 	}
420 
421 	/* Discover virtqueues and write information to configuration.  */
422 	err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
423 	if (err)
424 		goto err_find_vqs;
425 
426 	for (i = 0; i < num_vqs; i++) {
427 		spin_lock_init(&vblk->vqs[i].lock);
428 		vblk->vqs[i].vq = vqs[i];
429 	}
430 	vblk->num_vqs = num_vqs;
431 
432  err_find_vqs:
433 	kfree(vqs);
434  err_vqs:
435 	kfree(callbacks);
436  err_callbacks:
437 	kfree(names);
438  err_names:
439 	if (err)
440 		kfree(vblk->vqs);
441  out:
442 	return err;
443 }
444 
445 /*
446  * Legacy naming scheme used for virtio devices.  We are stuck with it for
447  * virtio blk but don't ever use it for any new driver.
448  */
449 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
450 {
451 	const int base = 'z' - 'a' + 1;
452 	char *begin = buf + strlen(prefix);
453 	char *end = buf + buflen;
454 	char *p;
455 	int unit;
456 
457 	p = end - 1;
458 	*p = '\0';
459 	unit = base;
460 	do {
461 		if (p == begin)
462 			return -EINVAL;
463 		*--p = 'a' + (index % unit);
464 		index = (index / unit) - 1;
465 	} while (index >= 0);
466 
467 	memmove(begin, p, end - p);
468 	memcpy(buf, prefix, strlen(prefix));
469 
470 	return 0;
471 }
472 
473 static int virtblk_get_cache_mode(struct virtio_device *vdev)
474 {
475 	u8 writeback;
476 	int err;
477 
478 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
479 				   struct virtio_blk_config, wce,
480 				   &writeback);
481 	if (err)
482 		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
483 		            virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
484 
485 	return writeback;
486 }
487 
488 static void virtblk_update_cache_mode(struct virtio_device *vdev)
489 {
490 	u8 writeback = virtblk_get_cache_mode(vdev);
491 	struct virtio_blk *vblk = vdev->priv;
492 
493 	if (writeback)
494 		blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
495 	else
496 		blk_queue_flush(vblk->disk->queue, 0);
497 
498 	revalidate_disk(vblk->disk);
499 }
500 
501 static const char *const virtblk_cache_types[] = {
502 	"write through", "write back"
503 };
504 
505 static ssize_t
506 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
507 			 const char *buf, size_t count)
508 {
509 	struct gendisk *disk = dev_to_disk(dev);
510 	struct virtio_blk *vblk = disk->private_data;
511 	struct virtio_device *vdev = vblk->vdev;
512 	int i;
513 
514 	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
515 	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
516 		if (sysfs_streq(buf, virtblk_cache_types[i]))
517 			break;
518 
519 	if (i < 0)
520 		return -EINVAL;
521 
522 	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
523 	virtblk_update_cache_mode(vdev);
524 	return count;
525 }
526 
527 static ssize_t
528 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
529 			 char *buf)
530 {
531 	struct gendisk *disk = dev_to_disk(dev);
532 	struct virtio_blk *vblk = disk->private_data;
533 	u8 writeback = virtblk_get_cache_mode(vblk->vdev);
534 
535 	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
536 	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
537 }
538 
539 static const struct device_attribute dev_attr_cache_type_ro =
540 	__ATTR(cache_type, S_IRUGO,
541 	       virtblk_cache_type_show, NULL);
542 static const struct device_attribute dev_attr_cache_type_rw =
543 	__ATTR(cache_type, S_IRUGO|S_IWUSR,
544 	       virtblk_cache_type_show, virtblk_cache_type_store);
545 
546 static int virtblk_init_request(void *data, struct request *rq,
547 		unsigned int hctx_idx, unsigned int request_idx,
548 		unsigned int numa_node)
549 {
550 	struct virtio_blk *vblk = data;
551 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
552 
553 	sg_init_table(vbr->sg, vblk->sg_elems);
554 	return 0;
555 }
556 
557 static struct blk_mq_ops virtio_mq_ops = {
558 	.queue_rq	= virtio_queue_rq,
559 	.map_queue	= blk_mq_map_queue,
560 	.complete	= virtblk_request_done,
561 	.init_request	= virtblk_init_request,
562 };
563 
564 static unsigned int virtblk_queue_depth;
565 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
566 
567 static int virtblk_probe(struct virtio_device *vdev)
568 {
569 	struct virtio_blk *vblk;
570 	struct request_queue *q;
571 	int err, index;
572 
573 	u64 cap;
574 	u32 v, blk_size, sg_elems, opt_io_size;
575 	u16 min_io_size;
576 	u8 physical_block_exp, alignment_offset;
577 
578 	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
579 			     GFP_KERNEL);
580 	if (err < 0)
581 		goto out;
582 	index = err;
583 
584 	/* We need to know how many segments before we allocate. */
585 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
586 				   struct virtio_blk_config, seg_max,
587 				   &sg_elems);
588 
589 	/* We need at least one SG element, whatever they say. */
590 	if (err || !sg_elems)
591 		sg_elems = 1;
592 
593 	/* We need an extra sg elements at head and tail. */
594 	sg_elems += 2;
595 	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
596 	if (!vblk) {
597 		err = -ENOMEM;
598 		goto out_free_index;
599 	}
600 
601 	vblk->vdev = vdev;
602 	vblk->sg_elems = sg_elems;
603 
604 	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
605 
606 	err = init_vq(vblk);
607 	if (err)
608 		goto out_free_vblk;
609 
610 	/* FIXME: How many partitions?  How long is a piece of string? */
611 	vblk->disk = alloc_disk(1 << PART_BITS);
612 	if (!vblk->disk) {
613 		err = -ENOMEM;
614 		goto out_free_vq;
615 	}
616 
617 	/* Default queue sizing is to fill the ring. */
618 	if (!virtblk_queue_depth) {
619 		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
620 		/* ... but without indirect descs, we use 2 descs per req */
621 		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
622 			virtblk_queue_depth /= 2;
623 	}
624 
625 	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
626 	vblk->tag_set.ops = &virtio_mq_ops;
627 	vblk->tag_set.queue_depth = virtblk_queue_depth;
628 	vblk->tag_set.numa_node = NUMA_NO_NODE;
629 	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
630 	vblk->tag_set.cmd_size =
631 		sizeof(struct virtblk_req) +
632 		sizeof(struct scatterlist) * sg_elems;
633 	vblk->tag_set.driver_data = vblk;
634 	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
635 
636 	err = blk_mq_alloc_tag_set(&vblk->tag_set);
637 	if (err)
638 		goto out_put_disk;
639 
640 	q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
641 	if (IS_ERR(q)) {
642 		err = -ENOMEM;
643 		goto out_free_tags;
644 	}
645 
646 	q->queuedata = vblk;
647 
648 	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
649 
650 	vblk->disk->major = major;
651 	vblk->disk->first_minor = index_to_minor(index);
652 	vblk->disk->private_data = vblk;
653 	vblk->disk->fops = &virtblk_fops;
654 	vblk->disk->driverfs_dev = &vdev->dev;
655 	vblk->index = index;
656 
657 	/* configure queue flush support */
658 	virtblk_update_cache_mode(vdev);
659 
660 	/* If disk is read-only in the host, the guest should obey */
661 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
662 		set_disk_ro(vblk->disk, 1);
663 
664 	/* Host must always specify the capacity. */
665 	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
666 
667 	/* If capacity is too big, truncate with warning. */
668 	if ((sector_t)cap != cap) {
669 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
670 			 (unsigned long long)cap);
671 		cap = (sector_t)-1;
672 	}
673 	set_capacity(vblk->disk, cap);
674 
675 	/* We can handle whatever the host told us to handle. */
676 	blk_queue_max_segments(q, vblk->sg_elems-2);
677 
678 	/* No need to bounce any requests */
679 	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
680 
681 	/* No real sector limit. */
682 	blk_queue_max_hw_sectors(q, -1U);
683 
684 	/* Host can optionally specify maximum segment size and number of
685 	 * segments. */
686 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
687 				   struct virtio_blk_config, size_max, &v);
688 	if (!err)
689 		blk_queue_max_segment_size(q, v);
690 	else
691 		blk_queue_max_segment_size(q, -1U);
692 
693 	/* Host can optionally specify the block size of the device */
694 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
695 				   struct virtio_blk_config, blk_size,
696 				   &blk_size);
697 	if (!err)
698 		blk_queue_logical_block_size(q, blk_size);
699 	else
700 		blk_size = queue_logical_block_size(q);
701 
702 	/* Use topology information if available */
703 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
704 				   struct virtio_blk_config, physical_block_exp,
705 				   &physical_block_exp);
706 	if (!err && physical_block_exp)
707 		blk_queue_physical_block_size(q,
708 				blk_size * (1 << physical_block_exp));
709 
710 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
711 				   struct virtio_blk_config, alignment_offset,
712 				   &alignment_offset);
713 	if (!err && alignment_offset)
714 		blk_queue_alignment_offset(q, blk_size * alignment_offset);
715 
716 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
717 				   struct virtio_blk_config, min_io_size,
718 				   &min_io_size);
719 	if (!err && min_io_size)
720 		blk_queue_io_min(q, blk_size * min_io_size);
721 
722 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
723 				   struct virtio_blk_config, opt_io_size,
724 				   &opt_io_size);
725 	if (!err && opt_io_size)
726 		blk_queue_io_opt(q, blk_size * opt_io_size);
727 
728 	virtio_device_ready(vdev);
729 
730 	add_disk(vblk->disk);
731 	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
732 	if (err)
733 		goto out_del_disk;
734 
735 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
736 		err = device_create_file(disk_to_dev(vblk->disk),
737 					 &dev_attr_cache_type_rw);
738 	else
739 		err = device_create_file(disk_to_dev(vblk->disk),
740 					 &dev_attr_cache_type_ro);
741 	if (err)
742 		goto out_del_disk;
743 	return 0;
744 
745 out_del_disk:
746 	del_gendisk(vblk->disk);
747 	blk_cleanup_queue(vblk->disk->queue);
748 out_free_tags:
749 	blk_mq_free_tag_set(&vblk->tag_set);
750 out_put_disk:
751 	put_disk(vblk->disk);
752 out_free_vq:
753 	vdev->config->del_vqs(vdev);
754 out_free_vblk:
755 	kfree(vblk);
756 out_free_index:
757 	ida_simple_remove(&vd_index_ida, index);
758 out:
759 	return err;
760 }
761 
762 static void virtblk_remove(struct virtio_device *vdev)
763 {
764 	struct virtio_blk *vblk = vdev->priv;
765 	int index = vblk->index;
766 	int refc;
767 
768 	/* Make sure no work handler is accessing the device. */
769 	flush_work(&vblk->config_work);
770 
771 	del_gendisk(vblk->disk);
772 	blk_cleanup_queue(vblk->disk->queue);
773 
774 	blk_mq_free_tag_set(&vblk->tag_set);
775 
776 	/* Stop all the virtqueues. */
777 	vdev->config->reset(vdev);
778 
779 	refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
780 	put_disk(vblk->disk);
781 	vdev->config->del_vqs(vdev);
782 	kfree(vblk->vqs);
783 	kfree(vblk);
784 
785 	/* Only free device id if we don't have any users */
786 	if (refc == 1)
787 		ida_simple_remove(&vd_index_ida, index);
788 }
789 
790 #ifdef CONFIG_PM_SLEEP
791 static int virtblk_freeze(struct virtio_device *vdev)
792 {
793 	struct virtio_blk *vblk = vdev->priv;
794 
795 	/* Ensure we don't receive any more interrupts */
796 	vdev->config->reset(vdev);
797 
798 	/* Make sure no work handler is accessing the device. */
799 	flush_work(&vblk->config_work);
800 
801 	blk_mq_stop_hw_queues(vblk->disk->queue);
802 
803 	vdev->config->del_vqs(vdev);
804 	return 0;
805 }
806 
807 static int virtblk_restore(struct virtio_device *vdev)
808 {
809 	struct virtio_blk *vblk = vdev->priv;
810 	int ret;
811 
812 	ret = init_vq(vdev->priv);
813 	if (ret)
814 		return ret;
815 
816 	virtio_device_ready(vdev);
817 
818 	blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
819 	return 0;
820 }
821 #endif
822 
823 static const struct virtio_device_id id_table[] = {
824 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
825 	{ 0 },
826 };
827 
828 static unsigned int features_legacy[] = {
829 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
830 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
831 	VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
832 	VIRTIO_BLK_F_MQ,
833 }
834 ;
835 static unsigned int features[] = {
836 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
837 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
838 	VIRTIO_BLK_F_TOPOLOGY,
839 	VIRTIO_BLK_F_MQ,
840 };
841 
842 static struct virtio_driver virtio_blk = {
843 	.feature_table			= features,
844 	.feature_table_size		= ARRAY_SIZE(features),
845 	.feature_table_legacy		= features_legacy,
846 	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
847 	.driver.name			= KBUILD_MODNAME,
848 	.driver.owner			= THIS_MODULE,
849 	.id_table			= id_table,
850 	.probe				= virtblk_probe,
851 	.remove				= virtblk_remove,
852 	.config_changed			= virtblk_config_changed,
853 #ifdef CONFIG_PM_SLEEP
854 	.freeze				= virtblk_freeze,
855 	.restore			= virtblk_restore,
856 #endif
857 };
858 
859 static int __init init(void)
860 {
861 	int error;
862 
863 	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
864 	if (!virtblk_wq)
865 		return -ENOMEM;
866 
867 	major = register_blkdev(0, "virtblk");
868 	if (major < 0) {
869 		error = major;
870 		goto out_destroy_workqueue;
871 	}
872 
873 	error = register_virtio_driver(&virtio_blk);
874 	if (error)
875 		goto out_unregister_blkdev;
876 	return 0;
877 
878 out_unregister_blkdev:
879 	unregister_blkdev(major, "virtblk");
880 out_destroy_workqueue:
881 	destroy_workqueue(virtblk_wq);
882 	return error;
883 }
884 
885 static void __exit fini(void)
886 {
887 	unregister_virtio_driver(&virtio_blk);
888 	unregister_blkdev(major, "virtblk");
889 	destroy_workqueue(virtblk_wq);
890 }
891 module_init(init);
892 module_exit(fini);
893 
894 MODULE_DEVICE_TABLE(virtio, id_table);
895 MODULE_DESCRIPTION("Virtio block driver");
896 MODULE_LICENSE("GPL");
897