xref: /openbmc/linux/drivers/block/virtio_blk.c (revision 4a44a19b)
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
16 
17 #define PART_BITS 4
18 #define VQ_NAME_LEN 16
19 
20 static int major;
21 static DEFINE_IDA(vd_index_ida);
22 
23 static struct workqueue_struct *virtblk_wq;
24 
25 struct virtio_blk_vq {
26 	struct virtqueue *vq;
27 	spinlock_t lock;
28 	char name[VQ_NAME_LEN];
29 } ____cacheline_aligned_in_smp;
30 
31 struct virtio_blk
32 {
33 	struct virtio_device *vdev;
34 
35 	/* The disk structure for the kernel. */
36 	struct gendisk *disk;
37 
38 	/* Block layer tags. */
39 	struct blk_mq_tag_set tag_set;
40 
41 	/* Process context for config space updates */
42 	struct work_struct config_work;
43 
44 	/* What host tells us, plus 2 for header & tailer. */
45 	unsigned int sg_elems;
46 
47 	/* Ida index - used to track minor number allocations. */
48 	int index;
49 
50 	/* num of vqs */
51 	int num_vqs;
52 	struct virtio_blk_vq *vqs;
53 };
54 
55 struct virtblk_req
56 {
57 	struct request *req;
58 	struct virtio_blk_outhdr out_hdr;
59 	struct virtio_scsi_inhdr in_hdr;
60 	u8 status;
61 	struct scatterlist sg[];
62 };
63 
64 static inline int virtblk_result(struct virtblk_req *vbr)
65 {
66 	switch (vbr->status) {
67 	case VIRTIO_BLK_S_OK:
68 		return 0;
69 	case VIRTIO_BLK_S_UNSUPP:
70 		return -ENOTTY;
71 	default:
72 		return -EIO;
73 	}
74 }
75 
76 static int __virtblk_add_req(struct virtqueue *vq,
77 			     struct virtblk_req *vbr,
78 			     struct scatterlist *data_sg,
79 			     bool have_data)
80 {
81 	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
82 	unsigned int num_out = 0, num_in = 0;
83 	int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
84 
85 	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
86 	sgs[num_out++] = &hdr;
87 
88 	/*
89 	 * If this is a packet command we need a couple of additional headers.
90 	 * Behind the normal outhdr we put a segment with the scsi command
91 	 * block, and before the normal inhdr we put the sense data and the
92 	 * inhdr with additional status information.
93 	 */
94 	if (type == VIRTIO_BLK_T_SCSI_CMD) {
95 		sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
96 		sgs[num_out++] = &cmd;
97 	}
98 
99 	if (have_data) {
100 		if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
101 			sgs[num_out++] = data_sg;
102 		else
103 			sgs[num_out + num_in++] = data_sg;
104 	}
105 
106 	if (type == VIRTIO_BLK_T_SCSI_CMD) {
107 		sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
108 		sgs[num_out + num_in++] = &sense;
109 		sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
110 		sgs[num_out + num_in++] = &inhdr;
111 	}
112 
113 	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
114 	sgs[num_out + num_in++] = &status;
115 
116 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
117 }
118 
119 static inline void virtblk_request_done(struct request *req)
120 {
121 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
122 	int error = virtblk_result(vbr);
123 
124 	if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
125 		req->resid_len = vbr->in_hdr.residual;
126 		req->sense_len = vbr->in_hdr.sense_len;
127 		req->errors = vbr->in_hdr.errors;
128 	} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
129 		req->errors = (error != 0);
130 	}
131 
132 	blk_mq_end_request(req, error);
133 }
134 
135 static void virtblk_done(struct virtqueue *vq)
136 {
137 	struct virtio_blk *vblk = vq->vdev->priv;
138 	bool req_done = false;
139 	int qid = vq->index;
140 	struct virtblk_req *vbr;
141 	unsigned long flags;
142 	unsigned int len;
143 
144 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
145 	do {
146 		virtqueue_disable_cb(vq);
147 		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
148 			blk_mq_complete_request(vbr->req);
149 			req_done = true;
150 		}
151 		if (unlikely(virtqueue_is_broken(vq)))
152 			break;
153 	} while (!virtqueue_enable_cb(vq));
154 
155 	/* In case queue is stopped waiting for more buffers. */
156 	if (req_done)
157 		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
158 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
159 }
160 
161 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
162 		bool last)
163 {
164 	struct virtio_blk *vblk = hctx->queue->queuedata;
165 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
166 	unsigned long flags;
167 	unsigned int num;
168 	int qid = hctx->queue_num;
169 	int err;
170 	bool notify = false;
171 
172 	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
173 
174 	vbr->req = req;
175 	if (req->cmd_flags & REQ_FLUSH) {
176 		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
177 		vbr->out_hdr.sector = 0;
178 		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
179 	} else {
180 		switch (req->cmd_type) {
181 		case REQ_TYPE_FS:
182 			vbr->out_hdr.type = 0;
183 			vbr->out_hdr.sector = blk_rq_pos(vbr->req);
184 			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
185 			break;
186 		case REQ_TYPE_BLOCK_PC:
187 			vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
188 			vbr->out_hdr.sector = 0;
189 			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
190 			break;
191 		case REQ_TYPE_SPECIAL:
192 			vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
193 			vbr->out_hdr.sector = 0;
194 			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
195 			break;
196 		default:
197 			/* We don't put anything else in the queue. */
198 			BUG();
199 		}
200 	}
201 
202 	blk_mq_start_request(req);
203 
204 	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
205 	if (num) {
206 		if (rq_data_dir(vbr->req) == WRITE)
207 			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
208 		else
209 			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
210 	}
211 
212 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
213 	err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
214 	if (err) {
215 		virtqueue_kick(vblk->vqs[qid].vq);
216 		blk_mq_stop_hw_queue(hctx);
217 		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
218 		/* Out of mem doesn't actually happen, since we fall back
219 		 * to direct descriptors */
220 		if (err == -ENOMEM || err == -ENOSPC)
221 			return BLK_MQ_RQ_QUEUE_BUSY;
222 		return BLK_MQ_RQ_QUEUE_ERROR;
223 	}
224 
225 	if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
226 		notify = true;
227 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
228 
229 	if (notify)
230 		virtqueue_notify(vblk->vqs[qid].vq);
231 	return BLK_MQ_RQ_QUEUE_OK;
232 }
233 
234 /* return id (s/n) string for *disk to *id_str
235  */
236 static int virtblk_get_id(struct gendisk *disk, char *id_str)
237 {
238 	struct virtio_blk *vblk = disk->private_data;
239 	struct request *req;
240 	struct bio *bio;
241 	int err;
242 
243 	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
244 			   GFP_KERNEL);
245 	if (IS_ERR(bio))
246 		return PTR_ERR(bio);
247 
248 	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
249 	if (IS_ERR(req)) {
250 		bio_put(bio);
251 		return PTR_ERR(req);
252 	}
253 
254 	req->cmd_type = REQ_TYPE_SPECIAL;
255 	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
256 	blk_put_request(req);
257 
258 	return err;
259 }
260 
261 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
262 			     unsigned int cmd, unsigned long data)
263 {
264 	struct gendisk *disk = bdev->bd_disk;
265 	struct virtio_blk *vblk = disk->private_data;
266 
267 	/*
268 	 * Only allow the generic SCSI ioctls if the host can support it.
269 	 */
270 	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
271 		return -ENOTTY;
272 
273 	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
274 				  (void __user *)data);
275 }
276 
277 /* We provide getgeo only to please some old bootloader/partitioning tools */
278 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
279 {
280 	struct virtio_blk *vblk = bd->bd_disk->private_data;
281 
282 	/* see if the host passed in geometry config */
283 	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
284 		virtio_cread(vblk->vdev, struct virtio_blk_config,
285 			     geometry.cylinders, &geo->cylinders);
286 		virtio_cread(vblk->vdev, struct virtio_blk_config,
287 			     geometry.heads, &geo->heads);
288 		virtio_cread(vblk->vdev, struct virtio_blk_config,
289 			     geometry.sectors, &geo->sectors);
290 	} else {
291 		/* some standard values, similar to sd */
292 		geo->heads = 1 << 6;
293 		geo->sectors = 1 << 5;
294 		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
295 	}
296 	return 0;
297 }
298 
299 static const struct block_device_operations virtblk_fops = {
300 	.ioctl  = virtblk_ioctl,
301 	.owner  = THIS_MODULE,
302 	.getgeo = virtblk_getgeo,
303 };
304 
305 static int index_to_minor(int index)
306 {
307 	return index << PART_BITS;
308 }
309 
310 static int minor_to_index(int minor)
311 {
312 	return minor >> PART_BITS;
313 }
314 
315 static ssize_t virtblk_serial_show(struct device *dev,
316 				struct device_attribute *attr, char *buf)
317 {
318 	struct gendisk *disk = dev_to_disk(dev);
319 	int err;
320 
321 	/* sysfs gives us a PAGE_SIZE buffer */
322 	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
323 
324 	buf[VIRTIO_BLK_ID_BYTES] = '\0';
325 	err = virtblk_get_id(disk, buf);
326 	if (!err)
327 		return strlen(buf);
328 
329 	if (err == -EIO) /* Unsupported? Make it empty. */
330 		return 0;
331 
332 	return err;
333 }
334 DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
335 
336 static void virtblk_config_changed_work(struct work_struct *work)
337 {
338 	struct virtio_blk *vblk =
339 		container_of(work, struct virtio_blk, config_work);
340 	struct virtio_device *vdev = vblk->vdev;
341 	struct request_queue *q = vblk->disk->queue;
342 	char cap_str_2[10], cap_str_10[10];
343 	char *envp[] = { "RESIZE=1", NULL };
344 	u64 capacity, size;
345 
346 	/* Host must always specify the capacity. */
347 	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
348 
349 	/* If capacity is too big, truncate with warning. */
350 	if ((sector_t)capacity != capacity) {
351 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
352 			 (unsigned long long)capacity);
353 		capacity = (sector_t)-1;
354 	}
355 
356 	size = capacity * queue_logical_block_size(q);
357 	string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
358 	string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
359 
360 	dev_notice(&vdev->dev,
361 		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
362 		  (unsigned long long)capacity,
363 		  queue_logical_block_size(q),
364 		  cap_str_10, cap_str_2);
365 
366 	set_capacity(vblk->disk, capacity);
367 	revalidate_disk(vblk->disk);
368 	kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
369 }
370 
371 static void virtblk_config_changed(struct virtio_device *vdev)
372 {
373 	struct virtio_blk *vblk = vdev->priv;
374 
375 	queue_work(virtblk_wq, &vblk->config_work);
376 }
377 
378 static int init_vq(struct virtio_blk *vblk)
379 {
380 	int err = 0;
381 	int i;
382 	vq_callback_t **callbacks;
383 	const char **names;
384 	struct virtqueue **vqs;
385 	unsigned short num_vqs;
386 	struct virtio_device *vdev = vblk->vdev;
387 
388 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
389 				   struct virtio_blk_config, num_queues,
390 				   &num_vqs);
391 	if (err)
392 		num_vqs = 1;
393 
394 	vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
395 	if (!vblk->vqs) {
396 		err = -ENOMEM;
397 		goto out;
398 	}
399 
400 	names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
401 	if (!names)
402 		goto err_names;
403 
404 	callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
405 	if (!callbacks)
406 		goto err_callbacks;
407 
408 	vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
409 	if (!vqs)
410 		goto err_vqs;
411 
412 	for (i = 0; i < num_vqs; i++) {
413 		callbacks[i] = virtblk_done;
414 		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
415 		names[i] = vblk->vqs[i].name;
416 	}
417 
418 	/* Discover virtqueues and write information to configuration.  */
419 	err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
420 	if (err)
421 		goto err_find_vqs;
422 
423 	for (i = 0; i < num_vqs; i++) {
424 		spin_lock_init(&vblk->vqs[i].lock);
425 		vblk->vqs[i].vq = vqs[i];
426 	}
427 	vblk->num_vqs = num_vqs;
428 
429  err_find_vqs:
430 	kfree(vqs);
431  err_vqs:
432 	kfree(callbacks);
433  err_callbacks:
434 	kfree(names);
435  err_names:
436 	if (err)
437 		kfree(vblk->vqs);
438  out:
439 	return err;
440 }
441 
442 /*
443  * Legacy naming scheme used for virtio devices.  We are stuck with it for
444  * virtio blk but don't ever use it for any new driver.
445  */
446 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
447 {
448 	const int base = 'z' - 'a' + 1;
449 	char *begin = buf + strlen(prefix);
450 	char *end = buf + buflen;
451 	char *p;
452 	int unit;
453 
454 	p = end - 1;
455 	*p = '\0';
456 	unit = base;
457 	do {
458 		if (p == begin)
459 			return -EINVAL;
460 		*--p = 'a' + (index % unit);
461 		index = (index / unit) - 1;
462 	} while (index >= 0);
463 
464 	memmove(begin, p, end - p);
465 	memcpy(buf, prefix, strlen(prefix));
466 
467 	return 0;
468 }
469 
470 static int virtblk_get_cache_mode(struct virtio_device *vdev)
471 {
472 	u8 writeback;
473 	int err;
474 
475 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
476 				   struct virtio_blk_config, wce,
477 				   &writeback);
478 	if (err)
479 		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
480 
481 	return writeback;
482 }
483 
484 static void virtblk_update_cache_mode(struct virtio_device *vdev)
485 {
486 	u8 writeback = virtblk_get_cache_mode(vdev);
487 	struct virtio_blk *vblk = vdev->priv;
488 
489 	if (writeback)
490 		blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
491 	else
492 		blk_queue_flush(vblk->disk->queue, 0);
493 
494 	revalidate_disk(vblk->disk);
495 }
496 
497 static const char *const virtblk_cache_types[] = {
498 	"write through", "write back"
499 };
500 
501 static ssize_t
502 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
503 			 const char *buf, size_t count)
504 {
505 	struct gendisk *disk = dev_to_disk(dev);
506 	struct virtio_blk *vblk = disk->private_data;
507 	struct virtio_device *vdev = vblk->vdev;
508 	int i;
509 
510 	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
511 	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
512 		if (sysfs_streq(buf, virtblk_cache_types[i]))
513 			break;
514 
515 	if (i < 0)
516 		return -EINVAL;
517 
518 	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
519 	virtblk_update_cache_mode(vdev);
520 	return count;
521 }
522 
523 static ssize_t
524 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
525 			 char *buf)
526 {
527 	struct gendisk *disk = dev_to_disk(dev);
528 	struct virtio_blk *vblk = disk->private_data;
529 	u8 writeback = virtblk_get_cache_mode(vblk->vdev);
530 
531 	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
532 	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
533 }
534 
535 static const struct device_attribute dev_attr_cache_type_ro =
536 	__ATTR(cache_type, S_IRUGO,
537 	       virtblk_cache_type_show, NULL);
538 static const struct device_attribute dev_attr_cache_type_rw =
539 	__ATTR(cache_type, S_IRUGO|S_IWUSR,
540 	       virtblk_cache_type_show, virtblk_cache_type_store);
541 
542 static int virtblk_init_request(void *data, struct request *rq,
543 		unsigned int hctx_idx, unsigned int request_idx,
544 		unsigned int numa_node)
545 {
546 	struct virtio_blk *vblk = data;
547 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
548 
549 	sg_init_table(vbr->sg, vblk->sg_elems);
550 	return 0;
551 }
552 
553 static struct blk_mq_ops virtio_mq_ops = {
554 	.queue_rq	= virtio_queue_rq,
555 	.map_queue	= blk_mq_map_queue,
556 	.complete	= virtblk_request_done,
557 	.init_request	= virtblk_init_request,
558 };
559 
560 static unsigned int virtblk_queue_depth;
561 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
562 
563 static int virtblk_probe(struct virtio_device *vdev)
564 {
565 	struct virtio_blk *vblk;
566 	struct request_queue *q;
567 	int err, index;
568 
569 	u64 cap;
570 	u32 v, blk_size, sg_elems, opt_io_size;
571 	u16 min_io_size;
572 	u8 physical_block_exp, alignment_offset;
573 
574 	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
575 			     GFP_KERNEL);
576 	if (err < 0)
577 		goto out;
578 	index = err;
579 
580 	/* We need to know how many segments before we allocate. */
581 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
582 				   struct virtio_blk_config, seg_max,
583 				   &sg_elems);
584 
585 	/* We need at least one SG element, whatever they say. */
586 	if (err || !sg_elems)
587 		sg_elems = 1;
588 
589 	/* We need an extra sg elements at head and tail. */
590 	sg_elems += 2;
591 	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
592 	if (!vblk) {
593 		err = -ENOMEM;
594 		goto out_free_index;
595 	}
596 
597 	vblk->vdev = vdev;
598 	vblk->sg_elems = sg_elems;
599 
600 	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
601 
602 	err = init_vq(vblk);
603 	if (err)
604 		goto out_free_vblk;
605 
606 	/* FIXME: How many partitions?  How long is a piece of string? */
607 	vblk->disk = alloc_disk(1 << PART_BITS);
608 	if (!vblk->disk) {
609 		err = -ENOMEM;
610 		goto out_free_vq;
611 	}
612 
613 	/* Default queue sizing is to fill the ring. */
614 	if (!virtblk_queue_depth) {
615 		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
616 		/* ... but without indirect descs, we use 2 descs per req */
617 		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
618 			virtblk_queue_depth /= 2;
619 	}
620 
621 	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
622 	vblk->tag_set.ops = &virtio_mq_ops;
623 	vblk->tag_set.queue_depth = virtblk_queue_depth;
624 	vblk->tag_set.numa_node = NUMA_NO_NODE;
625 	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
626 	vblk->tag_set.cmd_size =
627 		sizeof(struct virtblk_req) +
628 		sizeof(struct scatterlist) * sg_elems;
629 	vblk->tag_set.driver_data = vblk;
630 	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
631 
632 	err = blk_mq_alloc_tag_set(&vblk->tag_set);
633 	if (err)
634 		goto out_put_disk;
635 
636 	q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
637 	if (!q) {
638 		err = -ENOMEM;
639 		goto out_free_tags;
640 	}
641 
642 	q->queuedata = vblk;
643 
644 	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
645 
646 	vblk->disk->major = major;
647 	vblk->disk->first_minor = index_to_minor(index);
648 	vblk->disk->private_data = vblk;
649 	vblk->disk->fops = &virtblk_fops;
650 	vblk->disk->driverfs_dev = &vdev->dev;
651 	vblk->index = index;
652 
653 	/* configure queue flush support */
654 	virtblk_update_cache_mode(vdev);
655 
656 	/* If disk is read-only in the host, the guest should obey */
657 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
658 		set_disk_ro(vblk->disk, 1);
659 
660 	/* Host must always specify the capacity. */
661 	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
662 
663 	/* If capacity is too big, truncate with warning. */
664 	if ((sector_t)cap != cap) {
665 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
666 			 (unsigned long long)cap);
667 		cap = (sector_t)-1;
668 	}
669 	set_capacity(vblk->disk, cap);
670 
671 	/* We can handle whatever the host told us to handle. */
672 	blk_queue_max_segments(q, vblk->sg_elems-2);
673 
674 	/* No need to bounce any requests */
675 	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
676 
677 	/* No real sector limit. */
678 	blk_queue_max_hw_sectors(q, -1U);
679 
680 	/* Host can optionally specify maximum segment size and number of
681 	 * segments. */
682 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
683 				   struct virtio_blk_config, size_max, &v);
684 	if (!err)
685 		blk_queue_max_segment_size(q, v);
686 	else
687 		blk_queue_max_segment_size(q, -1U);
688 
689 	/* Host can optionally specify the block size of the device */
690 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
691 				   struct virtio_blk_config, blk_size,
692 				   &blk_size);
693 	if (!err)
694 		blk_queue_logical_block_size(q, blk_size);
695 	else
696 		blk_size = queue_logical_block_size(q);
697 
698 	/* Use topology information if available */
699 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
700 				   struct virtio_blk_config, physical_block_exp,
701 				   &physical_block_exp);
702 	if (!err && physical_block_exp)
703 		blk_queue_physical_block_size(q,
704 				blk_size * (1 << physical_block_exp));
705 
706 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
707 				   struct virtio_blk_config, alignment_offset,
708 				   &alignment_offset);
709 	if (!err && alignment_offset)
710 		blk_queue_alignment_offset(q, blk_size * alignment_offset);
711 
712 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
713 				   struct virtio_blk_config, min_io_size,
714 				   &min_io_size);
715 	if (!err && min_io_size)
716 		blk_queue_io_min(q, blk_size * min_io_size);
717 
718 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
719 				   struct virtio_blk_config, opt_io_size,
720 				   &opt_io_size);
721 	if (!err && opt_io_size)
722 		blk_queue_io_opt(q, blk_size * opt_io_size);
723 
724 	virtio_device_ready(vdev);
725 
726 	add_disk(vblk->disk);
727 	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
728 	if (err)
729 		goto out_del_disk;
730 
731 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
732 		err = device_create_file(disk_to_dev(vblk->disk),
733 					 &dev_attr_cache_type_rw);
734 	else
735 		err = device_create_file(disk_to_dev(vblk->disk),
736 					 &dev_attr_cache_type_ro);
737 	if (err)
738 		goto out_del_disk;
739 	return 0;
740 
741 out_del_disk:
742 	del_gendisk(vblk->disk);
743 	blk_cleanup_queue(vblk->disk->queue);
744 out_free_tags:
745 	blk_mq_free_tag_set(&vblk->tag_set);
746 out_put_disk:
747 	put_disk(vblk->disk);
748 out_free_vq:
749 	vdev->config->del_vqs(vdev);
750 out_free_vblk:
751 	kfree(vblk);
752 out_free_index:
753 	ida_simple_remove(&vd_index_ida, index);
754 out:
755 	return err;
756 }
757 
758 static void virtblk_remove(struct virtio_device *vdev)
759 {
760 	struct virtio_blk *vblk = vdev->priv;
761 	int index = vblk->index;
762 	int refc;
763 
764 	/* Make sure no work handler is accessing the device. */
765 	flush_work(&vblk->config_work);
766 
767 	del_gendisk(vblk->disk);
768 	blk_cleanup_queue(vblk->disk->queue);
769 
770 	blk_mq_free_tag_set(&vblk->tag_set);
771 
772 	/* Stop all the virtqueues. */
773 	vdev->config->reset(vdev);
774 
775 	refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
776 	put_disk(vblk->disk);
777 	vdev->config->del_vqs(vdev);
778 	kfree(vblk->vqs);
779 	kfree(vblk);
780 
781 	/* Only free device id if we don't have any users */
782 	if (refc == 1)
783 		ida_simple_remove(&vd_index_ida, index);
784 }
785 
786 #ifdef CONFIG_PM_SLEEP
787 static int virtblk_freeze(struct virtio_device *vdev)
788 {
789 	struct virtio_blk *vblk = vdev->priv;
790 
791 	/* Ensure we don't receive any more interrupts */
792 	vdev->config->reset(vdev);
793 
794 	/* Make sure no work handler is accessing the device. */
795 	flush_work(&vblk->config_work);
796 
797 	blk_mq_stop_hw_queues(vblk->disk->queue);
798 
799 	vdev->config->del_vqs(vdev);
800 	return 0;
801 }
802 
803 static int virtblk_restore(struct virtio_device *vdev)
804 {
805 	struct virtio_blk *vblk = vdev->priv;
806 	int ret;
807 
808 	ret = init_vq(vdev->priv);
809 	if (ret)
810 		return ret;
811 
812 	virtio_device_ready(vdev);
813 
814 	blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
815 	return 0;
816 }
817 #endif
818 
819 static const struct virtio_device_id id_table[] = {
820 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
821 	{ 0 },
822 };
823 
824 static unsigned int features[] = {
825 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
826 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
827 	VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
828 	VIRTIO_BLK_F_MQ,
829 };
830 
831 static struct virtio_driver virtio_blk = {
832 	.feature_table		= features,
833 	.feature_table_size	= ARRAY_SIZE(features),
834 	.driver.name		= KBUILD_MODNAME,
835 	.driver.owner		= THIS_MODULE,
836 	.id_table		= id_table,
837 	.probe			= virtblk_probe,
838 	.remove			= virtblk_remove,
839 	.config_changed		= virtblk_config_changed,
840 #ifdef CONFIG_PM_SLEEP
841 	.freeze			= virtblk_freeze,
842 	.restore		= virtblk_restore,
843 #endif
844 };
845 
846 static int __init init(void)
847 {
848 	int error;
849 
850 	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
851 	if (!virtblk_wq)
852 		return -ENOMEM;
853 
854 	major = register_blkdev(0, "virtblk");
855 	if (major < 0) {
856 		error = major;
857 		goto out_destroy_workqueue;
858 	}
859 
860 	error = register_virtio_driver(&virtio_blk);
861 	if (error)
862 		goto out_unregister_blkdev;
863 	return 0;
864 
865 out_unregister_blkdev:
866 	unregister_blkdev(major, "virtblk");
867 out_destroy_workqueue:
868 	destroy_workqueue(virtblk_wq);
869 	return error;
870 }
871 
872 static void __exit fini(void)
873 {
874 	unregister_blkdev(major, "virtblk");
875 	unregister_virtio_driver(&virtio_blk);
876 	destroy_workqueue(virtblk_wq);
877 }
878 module_init(init);
879 module_exit(fini);
880 
881 MODULE_DEVICE_TABLE(virtio, id_table);
882 MODULE_DESCRIPTION("Virtio block driver");
883 MODULE_LICENSE("GPL");
884