xref: /openbmc/linux/drivers/block/virtio_blk.c (revision 7bcae826)
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
16 
17 #define PART_BITS 4
18 #define VQ_NAME_LEN 16
19 
20 static int major;
21 static DEFINE_IDA(vd_index_ida);
22 
23 static struct workqueue_struct *virtblk_wq;
24 
25 struct virtio_blk_vq {
26 	struct virtqueue *vq;
27 	spinlock_t lock;
28 	char name[VQ_NAME_LEN];
29 } ____cacheline_aligned_in_smp;
30 
31 struct virtio_blk {
32 	struct virtio_device *vdev;
33 
34 	/* The disk structure for the kernel. */
35 	struct gendisk *disk;
36 
37 	/* Block layer tags. */
38 	struct blk_mq_tag_set tag_set;
39 
40 	/* Process context for config space updates */
41 	struct work_struct config_work;
42 
43 	/* What host tells us, plus 2 for header & tailer. */
44 	unsigned int sg_elems;
45 
46 	/* Ida index - used to track minor number allocations. */
47 	int index;
48 
49 	/* num of vqs */
50 	int num_vqs;
51 	struct virtio_blk_vq *vqs;
52 };
53 
54 struct virtblk_req {
55 #ifdef CONFIG_VIRTIO_BLK_SCSI
56 	struct scsi_request sreq;	/* for SCSI passthrough, must be first */
57 	u8 sense[SCSI_SENSE_BUFFERSIZE];
58 	struct virtio_scsi_inhdr in_hdr;
59 #endif
60 	struct virtio_blk_outhdr out_hdr;
61 	u8 status;
62 	struct scatterlist sg[];
63 };
64 
65 static inline int virtblk_result(struct virtblk_req *vbr)
66 {
67 	switch (vbr->status) {
68 	case VIRTIO_BLK_S_OK:
69 		return 0;
70 	case VIRTIO_BLK_S_UNSUPP:
71 		return -ENOTTY;
72 	default:
73 		return -EIO;
74 	}
75 }
76 
77 /*
78  * If this is a packet command we need a couple of additional headers.  Behind
79  * the normal outhdr we put a segment with the scsi command block, and before
80  * the normal inhdr we put the sense data and the inhdr with additional status
81  * information.
82  */
83 #ifdef CONFIG_VIRTIO_BLK_SCSI
84 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
85 		struct scatterlist *data_sg, bool have_data)
86 {
87 	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
88 	unsigned int num_out = 0, num_in = 0;
89 
90 	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
91 	sgs[num_out++] = &hdr;
92 	sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
93 	sgs[num_out++] = &cmd;
94 
95 	if (have_data) {
96 		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
97 			sgs[num_out++] = data_sg;
98 		else
99 			sgs[num_out + num_in++] = data_sg;
100 	}
101 
102 	sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
103 	sgs[num_out + num_in++] = &sense;
104 	sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
105 	sgs[num_out + num_in++] = &inhdr;
106 	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
107 	sgs[num_out + num_in++] = &status;
108 
109 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
110 }
111 
112 static inline void virtblk_scsi_reques_done(struct request *req)
113 {
114 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
115 	struct virtio_blk *vblk = req->q->queuedata;
116 	struct scsi_request *sreq = &vbr->sreq;
117 
118 	sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
119 	sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
120 	req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
121 }
122 
123 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
124 			     unsigned int cmd, unsigned long data)
125 {
126 	struct gendisk *disk = bdev->bd_disk;
127 	struct virtio_blk *vblk = disk->private_data;
128 
129 	/*
130 	 * Only allow the generic SCSI ioctls if the host can support it.
131 	 */
132 	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
133 		return -ENOTTY;
134 
135 	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
136 				  (void __user *)data);
137 }
138 #else
139 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
140 		struct virtblk_req *vbr, struct scatterlist *data_sg,
141 		bool have_data)
142 {
143 	return -EIO;
144 }
145 static inline void virtblk_scsi_reques_done(struct request *req)
146 {
147 }
148 #define virtblk_ioctl	NULL
149 #endif /* CONFIG_VIRTIO_BLK_SCSI */
150 
151 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
152 		struct scatterlist *data_sg, bool have_data)
153 {
154 	struct scatterlist hdr, status, *sgs[3];
155 	unsigned int num_out = 0, num_in = 0;
156 
157 	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
158 	sgs[num_out++] = &hdr;
159 
160 	if (have_data) {
161 		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
162 			sgs[num_out++] = data_sg;
163 		else
164 			sgs[num_out + num_in++] = data_sg;
165 	}
166 
167 	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
168 	sgs[num_out + num_in++] = &status;
169 
170 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
171 }
172 
173 static inline void virtblk_request_done(struct request *req)
174 {
175 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
176 	int error = virtblk_result(vbr);
177 
178 	switch (req_op(req)) {
179 	case REQ_OP_SCSI_IN:
180 	case REQ_OP_SCSI_OUT:
181 		virtblk_scsi_reques_done(req);
182 		break;
183 	case REQ_OP_DRV_IN:
184 		req->errors = (error != 0);
185 		break;
186 	}
187 
188 	blk_mq_end_request(req, error);
189 }
190 
191 static void virtblk_done(struct virtqueue *vq)
192 {
193 	struct virtio_blk *vblk = vq->vdev->priv;
194 	bool req_done = false;
195 	int qid = vq->index;
196 	struct virtblk_req *vbr;
197 	unsigned long flags;
198 	unsigned int len;
199 
200 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
201 	do {
202 		virtqueue_disable_cb(vq);
203 		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
204 			struct request *req = blk_mq_rq_from_pdu(vbr);
205 
206 			blk_mq_complete_request(req, req->errors);
207 			req_done = true;
208 		}
209 		if (unlikely(virtqueue_is_broken(vq)))
210 			break;
211 	} while (!virtqueue_enable_cb(vq));
212 
213 	/* In case queue is stopped waiting for more buffers. */
214 	if (req_done)
215 		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
216 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
217 }
218 
219 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
220 			   const struct blk_mq_queue_data *bd)
221 {
222 	struct virtio_blk *vblk = hctx->queue->queuedata;
223 	struct request *req = bd->rq;
224 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
225 	unsigned long flags;
226 	unsigned int num;
227 	int qid = hctx->queue_num;
228 	int err;
229 	bool notify = false;
230 	u32 type;
231 
232 	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
233 
234 	switch (req_op(req)) {
235 	case REQ_OP_READ:
236 	case REQ_OP_WRITE:
237 		type = 0;
238 		break;
239 	case REQ_OP_FLUSH:
240 		type = VIRTIO_BLK_T_FLUSH;
241 		break;
242 	case REQ_OP_SCSI_IN:
243 	case REQ_OP_SCSI_OUT:
244 		type = VIRTIO_BLK_T_SCSI_CMD;
245 		break;
246 	case REQ_OP_DRV_IN:
247 		type = VIRTIO_BLK_T_GET_ID;
248 		break;
249 	default:
250 		WARN_ON_ONCE(1);
251 		return BLK_MQ_RQ_QUEUE_ERROR;
252 	}
253 
254 	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
255 	vbr->out_hdr.sector = type ?
256 		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
257 	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
258 
259 	blk_mq_start_request(req);
260 
261 	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
262 	if (num) {
263 		if (rq_data_dir(req) == WRITE)
264 			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
265 		else
266 			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
267 	}
268 
269 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
270 	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
271 		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
272 	else
273 		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
274 	if (err) {
275 		virtqueue_kick(vblk->vqs[qid].vq);
276 		blk_mq_stop_hw_queue(hctx);
277 		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
278 		/* Out of mem doesn't actually happen, since we fall back
279 		 * to direct descriptors */
280 		if (err == -ENOMEM || err == -ENOSPC)
281 			return BLK_MQ_RQ_QUEUE_BUSY;
282 		return BLK_MQ_RQ_QUEUE_ERROR;
283 	}
284 
285 	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
286 		notify = true;
287 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
288 
289 	if (notify)
290 		virtqueue_notify(vblk->vqs[qid].vq);
291 	return BLK_MQ_RQ_QUEUE_OK;
292 }
293 
294 /* return id (s/n) string for *disk to *id_str
295  */
296 static int virtblk_get_id(struct gendisk *disk, char *id_str)
297 {
298 	struct virtio_blk *vblk = disk->private_data;
299 	struct request_queue *q = vblk->disk->queue;
300 	struct request *req;
301 	int err;
302 
303 	req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
304 	if (IS_ERR(req))
305 		return PTR_ERR(req);
306 
307 	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
308 	if (err)
309 		goto out;
310 
311 	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
312 out:
313 	blk_put_request(req);
314 	return err;
315 }
316 
317 /* We provide getgeo only to please some old bootloader/partitioning tools */
318 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
319 {
320 	struct virtio_blk *vblk = bd->bd_disk->private_data;
321 
322 	/* see if the host passed in geometry config */
323 	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
324 		virtio_cread(vblk->vdev, struct virtio_blk_config,
325 			     geometry.cylinders, &geo->cylinders);
326 		virtio_cread(vblk->vdev, struct virtio_blk_config,
327 			     geometry.heads, &geo->heads);
328 		virtio_cread(vblk->vdev, struct virtio_blk_config,
329 			     geometry.sectors, &geo->sectors);
330 	} else {
331 		/* some standard values, similar to sd */
332 		geo->heads = 1 << 6;
333 		geo->sectors = 1 << 5;
334 		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
335 	}
336 	return 0;
337 }
338 
339 static const struct block_device_operations virtblk_fops = {
340 	.ioctl  = virtblk_ioctl,
341 	.owner  = THIS_MODULE,
342 	.getgeo = virtblk_getgeo,
343 };
344 
345 static int index_to_minor(int index)
346 {
347 	return index << PART_BITS;
348 }
349 
350 static int minor_to_index(int minor)
351 {
352 	return minor >> PART_BITS;
353 }
354 
355 static ssize_t virtblk_serial_show(struct device *dev,
356 				struct device_attribute *attr, char *buf)
357 {
358 	struct gendisk *disk = dev_to_disk(dev);
359 	int err;
360 
361 	/* sysfs gives us a PAGE_SIZE buffer */
362 	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
363 
364 	buf[VIRTIO_BLK_ID_BYTES] = '\0';
365 	err = virtblk_get_id(disk, buf);
366 	if (!err)
367 		return strlen(buf);
368 
369 	if (err == -EIO) /* Unsupported? Make it empty. */
370 		return 0;
371 
372 	return err;
373 }
374 
375 static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
376 
377 static void virtblk_config_changed_work(struct work_struct *work)
378 {
379 	struct virtio_blk *vblk =
380 		container_of(work, struct virtio_blk, config_work);
381 	struct virtio_device *vdev = vblk->vdev;
382 	struct request_queue *q = vblk->disk->queue;
383 	char cap_str_2[10], cap_str_10[10];
384 	char *envp[] = { "RESIZE=1", NULL };
385 	u64 capacity;
386 
387 	/* Host must always specify the capacity. */
388 	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
389 
390 	/* If capacity is too big, truncate with warning. */
391 	if ((sector_t)capacity != capacity) {
392 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
393 			 (unsigned long long)capacity);
394 		capacity = (sector_t)-1;
395 	}
396 
397 	string_get_size(capacity, queue_logical_block_size(q),
398 			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
399 	string_get_size(capacity, queue_logical_block_size(q),
400 			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
401 
402 	dev_notice(&vdev->dev,
403 		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
404 		  (unsigned long long)capacity,
405 		  queue_logical_block_size(q),
406 		  cap_str_10, cap_str_2);
407 
408 	set_capacity(vblk->disk, capacity);
409 	revalidate_disk(vblk->disk);
410 	kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
411 }
412 
413 static void virtblk_config_changed(struct virtio_device *vdev)
414 {
415 	struct virtio_blk *vblk = vdev->priv;
416 
417 	queue_work(virtblk_wq, &vblk->config_work);
418 }
419 
420 static int init_vq(struct virtio_blk *vblk)
421 {
422 	int err;
423 	int i;
424 	vq_callback_t **callbacks;
425 	const char **names;
426 	struct virtqueue **vqs;
427 	unsigned short num_vqs;
428 	struct virtio_device *vdev = vblk->vdev;
429 
430 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
431 				   struct virtio_blk_config, num_queues,
432 				   &num_vqs);
433 	if (err)
434 		num_vqs = 1;
435 
436 	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
437 	if (!vblk->vqs)
438 		return -ENOMEM;
439 
440 	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
441 	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
442 	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
443 	if (!names || !callbacks || !vqs) {
444 		err = -ENOMEM;
445 		goto out;
446 	}
447 
448 	for (i = 0; i < num_vqs; i++) {
449 		callbacks[i] = virtblk_done;
450 		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
451 		names[i] = vblk->vqs[i].name;
452 	}
453 
454 	/* Discover virtqueues and write information to configuration.  */
455 	err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
456 	if (err)
457 		goto out;
458 
459 	for (i = 0; i < num_vqs; i++) {
460 		spin_lock_init(&vblk->vqs[i].lock);
461 		vblk->vqs[i].vq = vqs[i];
462 	}
463 	vblk->num_vqs = num_vqs;
464 
465 out:
466 	kfree(vqs);
467 	kfree(callbacks);
468 	kfree(names);
469 	if (err)
470 		kfree(vblk->vqs);
471 	return err;
472 }
473 
474 /*
475  * Legacy naming scheme used for virtio devices.  We are stuck with it for
476  * virtio blk but don't ever use it for any new driver.
477  */
478 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
479 {
480 	const int base = 'z' - 'a' + 1;
481 	char *begin = buf + strlen(prefix);
482 	char *end = buf + buflen;
483 	char *p;
484 	int unit;
485 
486 	p = end - 1;
487 	*p = '\0';
488 	unit = base;
489 	do {
490 		if (p == begin)
491 			return -EINVAL;
492 		*--p = 'a' + (index % unit);
493 		index = (index / unit) - 1;
494 	} while (index >= 0);
495 
496 	memmove(begin, p, end - p);
497 	memcpy(buf, prefix, strlen(prefix));
498 
499 	return 0;
500 }
501 
502 static int virtblk_get_cache_mode(struct virtio_device *vdev)
503 {
504 	u8 writeback;
505 	int err;
506 
507 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
508 				   struct virtio_blk_config, wce,
509 				   &writeback);
510 
511 	/*
512 	 * If WCE is not configurable and flush is not available,
513 	 * assume no writeback cache is in use.
514 	 */
515 	if (err)
516 		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
517 
518 	return writeback;
519 }
520 
521 static void virtblk_update_cache_mode(struct virtio_device *vdev)
522 {
523 	u8 writeback = virtblk_get_cache_mode(vdev);
524 	struct virtio_blk *vblk = vdev->priv;
525 
526 	blk_queue_write_cache(vblk->disk->queue, writeback, false);
527 	revalidate_disk(vblk->disk);
528 }
529 
530 static const char *const virtblk_cache_types[] = {
531 	"write through", "write back"
532 };
533 
534 static ssize_t
535 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
536 			 const char *buf, size_t count)
537 {
538 	struct gendisk *disk = dev_to_disk(dev);
539 	struct virtio_blk *vblk = disk->private_data;
540 	struct virtio_device *vdev = vblk->vdev;
541 	int i;
542 
543 	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
544 	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
545 		if (sysfs_streq(buf, virtblk_cache_types[i]))
546 			break;
547 
548 	if (i < 0)
549 		return -EINVAL;
550 
551 	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
552 	virtblk_update_cache_mode(vdev);
553 	return count;
554 }
555 
556 static ssize_t
557 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
558 			 char *buf)
559 {
560 	struct gendisk *disk = dev_to_disk(dev);
561 	struct virtio_blk *vblk = disk->private_data;
562 	u8 writeback = virtblk_get_cache_mode(vblk->vdev);
563 
564 	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
565 	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
566 }
567 
568 static const struct device_attribute dev_attr_cache_type_ro =
569 	__ATTR(cache_type, S_IRUGO,
570 	       virtblk_cache_type_show, NULL);
571 static const struct device_attribute dev_attr_cache_type_rw =
572 	__ATTR(cache_type, S_IRUGO|S_IWUSR,
573 	       virtblk_cache_type_show, virtblk_cache_type_store);
574 
575 static int virtblk_init_request(void *data, struct request *rq,
576 		unsigned int hctx_idx, unsigned int request_idx,
577 		unsigned int numa_node)
578 {
579 	struct virtio_blk *vblk = data;
580 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
581 
582 #ifdef CONFIG_VIRTIO_BLK_SCSI
583 	vbr->sreq.sense = vbr->sense;
584 #endif
585 	sg_init_table(vbr->sg, vblk->sg_elems);
586 	return 0;
587 }
588 
589 static struct blk_mq_ops virtio_mq_ops = {
590 	.queue_rq	= virtio_queue_rq,
591 	.complete	= virtblk_request_done,
592 	.init_request	= virtblk_init_request,
593 };
594 
595 static unsigned int virtblk_queue_depth;
596 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
597 
598 static int virtblk_probe(struct virtio_device *vdev)
599 {
600 	struct virtio_blk *vblk;
601 	struct request_queue *q;
602 	int err, index;
603 
604 	u64 cap;
605 	u32 v, blk_size, sg_elems, opt_io_size;
606 	u16 min_io_size;
607 	u8 physical_block_exp, alignment_offset;
608 
609 	if (!vdev->config->get) {
610 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
611 			__func__);
612 		return -EINVAL;
613 	}
614 
615 	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
616 			     GFP_KERNEL);
617 	if (err < 0)
618 		goto out;
619 	index = err;
620 
621 	/* We need to know how many segments before we allocate. */
622 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
623 				   struct virtio_blk_config, seg_max,
624 				   &sg_elems);
625 
626 	/* We need at least one SG element, whatever they say. */
627 	if (err || !sg_elems)
628 		sg_elems = 1;
629 
630 	/* We need an extra sg elements at head and tail. */
631 	sg_elems += 2;
632 	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
633 	if (!vblk) {
634 		err = -ENOMEM;
635 		goto out_free_index;
636 	}
637 
638 	vblk->vdev = vdev;
639 	vblk->sg_elems = sg_elems;
640 
641 	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
642 
643 	err = init_vq(vblk);
644 	if (err)
645 		goto out_free_vblk;
646 
647 	/* FIXME: How many partitions?  How long is a piece of string? */
648 	vblk->disk = alloc_disk(1 << PART_BITS);
649 	if (!vblk->disk) {
650 		err = -ENOMEM;
651 		goto out_free_vq;
652 	}
653 
654 	/* Default queue sizing is to fill the ring. */
655 	if (!virtblk_queue_depth) {
656 		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
657 		/* ... but without indirect descs, we use 2 descs per req */
658 		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
659 			virtblk_queue_depth /= 2;
660 	}
661 
662 	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
663 	vblk->tag_set.ops = &virtio_mq_ops;
664 	vblk->tag_set.queue_depth = virtblk_queue_depth;
665 	vblk->tag_set.numa_node = NUMA_NO_NODE;
666 	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
667 	vblk->tag_set.cmd_size =
668 		sizeof(struct virtblk_req) +
669 		sizeof(struct scatterlist) * sg_elems;
670 	vblk->tag_set.driver_data = vblk;
671 	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
672 
673 	err = blk_mq_alloc_tag_set(&vblk->tag_set);
674 	if (err)
675 		goto out_put_disk;
676 
677 	q = blk_mq_init_queue(&vblk->tag_set);
678 	if (IS_ERR(q)) {
679 		err = -ENOMEM;
680 		goto out_free_tags;
681 	}
682 	vblk->disk->queue = q;
683 
684 	q->queuedata = vblk;
685 
686 	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
687 
688 	vblk->disk->major = major;
689 	vblk->disk->first_minor = index_to_minor(index);
690 	vblk->disk->private_data = vblk;
691 	vblk->disk->fops = &virtblk_fops;
692 	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
693 	vblk->index = index;
694 
695 	/* configure queue flush support */
696 	virtblk_update_cache_mode(vdev);
697 
698 	/* If disk is read-only in the host, the guest should obey */
699 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
700 		set_disk_ro(vblk->disk, 1);
701 
702 	/* Host must always specify the capacity. */
703 	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
704 
705 	/* If capacity is too big, truncate with warning. */
706 	if ((sector_t)cap != cap) {
707 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
708 			 (unsigned long long)cap);
709 		cap = (sector_t)-1;
710 	}
711 	set_capacity(vblk->disk, cap);
712 
713 	/* We can handle whatever the host told us to handle. */
714 	blk_queue_max_segments(q, vblk->sg_elems-2);
715 
716 	/* No need to bounce any requests */
717 	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
718 
719 	/* No real sector limit. */
720 	blk_queue_max_hw_sectors(q, -1U);
721 
722 	/* Host can optionally specify maximum segment size and number of
723 	 * segments. */
724 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
725 				   struct virtio_blk_config, size_max, &v);
726 	if (!err)
727 		blk_queue_max_segment_size(q, v);
728 	else
729 		blk_queue_max_segment_size(q, -1U);
730 
731 	/* Host can optionally specify the block size of the device */
732 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
733 				   struct virtio_blk_config, blk_size,
734 				   &blk_size);
735 	if (!err)
736 		blk_queue_logical_block_size(q, blk_size);
737 	else
738 		blk_size = queue_logical_block_size(q);
739 
740 	/* Use topology information if available */
741 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
742 				   struct virtio_blk_config, physical_block_exp,
743 				   &physical_block_exp);
744 	if (!err && physical_block_exp)
745 		blk_queue_physical_block_size(q,
746 				blk_size * (1 << physical_block_exp));
747 
748 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
749 				   struct virtio_blk_config, alignment_offset,
750 				   &alignment_offset);
751 	if (!err && alignment_offset)
752 		blk_queue_alignment_offset(q, blk_size * alignment_offset);
753 
754 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
755 				   struct virtio_blk_config, min_io_size,
756 				   &min_io_size);
757 	if (!err && min_io_size)
758 		blk_queue_io_min(q, blk_size * min_io_size);
759 
760 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
761 				   struct virtio_blk_config, opt_io_size,
762 				   &opt_io_size);
763 	if (!err && opt_io_size)
764 		blk_queue_io_opt(q, blk_size * opt_io_size);
765 
766 	virtio_device_ready(vdev);
767 
768 	device_add_disk(&vdev->dev, vblk->disk);
769 	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
770 	if (err)
771 		goto out_del_disk;
772 
773 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
774 		err = device_create_file(disk_to_dev(vblk->disk),
775 					 &dev_attr_cache_type_rw);
776 	else
777 		err = device_create_file(disk_to_dev(vblk->disk),
778 					 &dev_attr_cache_type_ro);
779 	if (err)
780 		goto out_del_disk;
781 	return 0;
782 
783 out_del_disk:
784 	del_gendisk(vblk->disk);
785 	blk_cleanup_queue(vblk->disk->queue);
786 out_free_tags:
787 	blk_mq_free_tag_set(&vblk->tag_set);
788 out_put_disk:
789 	put_disk(vblk->disk);
790 out_free_vq:
791 	vdev->config->del_vqs(vdev);
792 out_free_vblk:
793 	kfree(vblk);
794 out_free_index:
795 	ida_simple_remove(&vd_index_ida, index);
796 out:
797 	return err;
798 }
799 
800 static void virtblk_remove(struct virtio_device *vdev)
801 {
802 	struct virtio_blk *vblk = vdev->priv;
803 	int index = vblk->index;
804 	int refc;
805 
806 	/* Make sure no work handler is accessing the device. */
807 	flush_work(&vblk->config_work);
808 
809 	del_gendisk(vblk->disk);
810 	blk_cleanup_queue(vblk->disk->queue);
811 
812 	blk_mq_free_tag_set(&vblk->tag_set);
813 
814 	/* Stop all the virtqueues. */
815 	vdev->config->reset(vdev);
816 
817 	refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
818 	put_disk(vblk->disk);
819 	vdev->config->del_vqs(vdev);
820 	kfree(vblk->vqs);
821 	kfree(vblk);
822 
823 	/* Only free device id if we don't have any users */
824 	if (refc == 1)
825 		ida_simple_remove(&vd_index_ida, index);
826 }
827 
828 #ifdef CONFIG_PM_SLEEP
829 static int virtblk_freeze(struct virtio_device *vdev)
830 {
831 	struct virtio_blk *vblk = vdev->priv;
832 
833 	/* Ensure we don't receive any more interrupts */
834 	vdev->config->reset(vdev);
835 
836 	/* Make sure no work handler is accessing the device. */
837 	flush_work(&vblk->config_work);
838 
839 	blk_mq_stop_hw_queues(vblk->disk->queue);
840 
841 	vdev->config->del_vqs(vdev);
842 	return 0;
843 }
844 
845 static int virtblk_restore(struct virtio_device *vdev)
846 {
847 	struct virtio_blk *vblk = vdev->priv;
848 	int ret;
849 
850 	ret = init_vq(vdev->priv);
851 	if (ret)
852 		return ret;
853 
854 	virtio_device_ready(vdev);
855 
856 	blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
857 	return 0;
858 }
859 #endif
860 
861 static const struct virtio_device_id id_table[] = {
862 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
863 	{ 0 },
864 };
865 
866 static unsigned int features_legacy[] = {
867 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
868 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
869 #ifdef CONFIG_VIRTIO_BLK_SCSI
870 	VIRTIO_BLK_F_SCSI,
871 #endif
872 	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
873 	VIRTIO_BLK_F_MQ,
874 }
875 ;
876 static unsigned int features[] = {
877 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
878 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
879 	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
880 	VIRTIO_BLK_F_MQ,
881 };
882 
883 static struct virtio_driver virtio_blk = {
884 	.feature_table			= features,
885 	.feature_table_size		= ARRAY_SIZE(features),
886 	.feature_table_legacy		= features_legacy,
887 	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
888 	.driver.name			= KBUILD_MODNAME,
889 	.driver.owner			= THIS_MODULE,
890 	.id_table			= id_table,
891 	.probe				= virtblk_probe,
892 	.remove				= virtblk_remove,
893 	.config_changed			= virtblk_config_changed,
894 #ifdef CONFIG_PM_SLEEP
895 	.freeze				= virtblk_freeze,
896 	.restore			= virtblk_restore,
897 #endif
898 };
899 
900 static int __init init(void)
901 {
902 	int error;
903 
904 	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
905 	if (!virtblk_wq)
906 		return -ENOMEM;
907 
908 	major = register_blkdev(0, "virtblk");
909 	if (major < 0) {
910 		error = major;
911 		goto out_destroy_workqueue;
912 	}
913 
914 	error = register_virtio_driver(&virtio_blk);
915 	if (error)
916 		goto out_unregister_blkdev;
917 	return 0;
918 
919 out_unregister_blkdev:
920 	unregister_blkdev(major, "virtblk");
921 out_destroy_workqueue:
922 	destroy_workqueue(virtblk_wq);
923 	return error;
924 }
925 
926 static void __exit fini(void)
927 {
928 	unregister_virtio_driver(&virtio_blk);
929 	unregister_blkdev(major, "virtblk");
930 	destroy_workqueue(virtblk_wq);
931 }
932 module_init(init);
933 module_exit(fini);
934 
935 MODULE_DEVICE_TABLE(virtio, id_table);
936 MODULE_DESCRIPTION("Virtio block driver");
937 MODULE_LICENSE("GPL");
938