1 //#define DEBUG 2 #include <linux/spinlock.h> 3 #include <linux/slab.h> 4 #include <linux/blkdev.h> 5 #include <linux/hdreg.h> 6 #include <linux/module.h> 7 #include <linux/mutex.h> 8 #include <linux/virtio.h> 9 #include <linux/virtio_blk.h> 10 #include <linux/scatterlist.h> 11 #include <linux/string_helpers.h> 12 #include <scsi/scsi_cmnd.h> 13 #include <linux/idr.h> 14 #include <linux/blk-mq.h> 15 #include <linux/numa.h> 16 17 #define PART_BITS 4 18 #define VQ_NAME_LEN 16 19 20 static int major; 21 static DEFINE_IDA(vd_index_ida); 22 23 static struct workqueue_struct *virtblk_wq; 24 25 struct virtio_blk_vq { 26 struct virtqueue *vq; 27 spinlock_t lock; 28 char name[VQ_NAME_LEN]; 29 } ____cacheline_aligned_in_smp; 30 31 struct virtio_blk { 32 struct virtio_device *vdev; 33 34 /* The disk structure for the kernel. */ 35 struct gendisk *disk; 36 37 /* Block layer tags. */ 38 struct blk_mq_tag_set tag_set; 39 40 /* Process context for config space updates */ 41 struct work_struct config_work; 42 43 /* What host tells us, plus 2 for header & tailer. */ 44 unsigned int sg_elems; 45 46 /* Ida index - used to track minor number allocations. */ 47 int index; 48 49 /* num of vqs */ 50 int num_vqs; 51 struct virtio_blk_vq *vqs; 52 }; 53 54 struct virtblk_req { 55 struct request *req; 56 struct virtio_blk_outhdr out_hdr; 57 struct virtio_scsi_inhdr in_hdr; 58 u8 status; 59 u8 sense[SCSI_SENSE_BUFFERSIZE]; 60 struct scatterlist sg[]; 61 }; 62 63 static inline int virtblk_result(struct virtblk_req *vbr) 64 { 65 switch (vbr->status) { 66 case VIRTIO_BLK_S_OK: 67 return 0; 68 case VIRTIO_BLK_S_UNSUPP: 69 return -ENOTTY; 70 default: 71 return -EIO; 72 } 73 } 74 75 static int __virtblk_add_req(struct virtqueue *vq, 76 struct virtblk_req *vbr, 77 struct scatterlist *data_sg, 78 bool have_data) 79 { 80 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; 81 unsigned int num_out = 0, num_in = 0; 82 __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT); 83 84 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); 85 sgs[num_out++] = &hdr; 86 87 /* 88 * If this is a packet command we need a couple of additional headers. 89 * Behind the normal outhdr we put a segment with the scsi command 90 * block, and before the normal inhdr we put the sense data and the 91 * inhdr with additional status information. 92 */ 93 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { 94 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); 95 sgs[num_out++] = &cmd; 96 } 97 98 if (have_data) { 99 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) 100 sgs[num_out++] = data_sg; 101 else 102 sgs[num_out + num_in++] = data_sg; 103 } 104 105 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { 106 memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); 107 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); 108 sgs[num_out + num_in++] = &sense; 109 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); 110 sgs[num_out + num_in++] = &inhdr; 111 } 112 113 sg_init_one(&status, &vbr->status, sizeof(vbr->status)); 114 sgs[num_out + num_in++] = &status; 115 116 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 117 } 118 119 static inline void virtblk_request_done(struct request *req) 120 { 121 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 122 struct virtio_blk *vblk = req->q->queuedata; 123 int error = virtblk_result(vbr); 124 125 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { 126 req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); 127 req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); 128 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); 129 } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 130 req->errors = (error != 0); 131 } 132 133 blk_mq_end_request(req, error); 134 } 135 136 static void virtblk_done(struct virtqueue *vq) 137 { 138 struct virtio_blk *vblk = vq->vdev->priv; 139 bool req_done = false; 140 int qid = vq->index; 141 struct virtblk_req *vbr; 142 unsigned long flags; 143 unsigned int len; 144 145 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 146 do { 147 virtqueue_disable_cb(vq); 148 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 149 blk_mq_complete_request(vbr->req, vbr->req->errors); 150 req_done = true; 151 } 152 if (unlikely(virtqueue_is_broken(vq))) 153 break; 154 } while (!virtqueue_enable_cb(vq)); 155 156 /* In case queue is stopped waiting for more buffers. */ 157 if (req_done) 158 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 159 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 160 } 161 162 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 163 const struct blk_mq_queue_data *bd) 164 { 165 struct virtio_blk *vblk = hctx->queue->queuedata; 166 struct request *req = bd->rq; 167 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 168 unsigned long flags; 169 unsigned int num; 170 int qid = hctx->queue_num; 171 int err; 172 bool notify = false; 173 174 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 175 176 vbr->req = req; 177 if (req_op(req) == REQ_OP_FLUSH) { 178 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); 179 vbr->out_hdr.sector = 0; 180 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 181 } else { 182 switch (req->cmd_type) { 183 case REQ_TYPE_FS: 184 vbr->out_hdr.type = 0; 185 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req)); 186 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 187 break; 188 case REQ_TYPE_BLOCK_PC: 189 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD); 190 vbr->out_hdr.sector = 0; 191 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 192 break; 193 case REQ_TYPE_DRV_PRIV: 194 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); 195 vbr->out_hdr.sector = 0; 196 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 197 break; 198 default: 199 /* We don't put anything else in the queue. */ 200 BUG(); 201 } 202 } 203 204 blk_mq_start_request(req); 205 206 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); 207 if (num) { 208 if (rq_data_dir(vbr->req) == WRITE) 209 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); 210 else 211 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); 212 } 213 214 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 215 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); 216 if (err) { 217 virtqueue_kick(vblk->vqs[qid].vq); 218 blk_mq_stop_hw_queue(hctx); 219 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 220 /* Out of mem doesn't actually happen, since we fall back 221 * to direct descriptors */ 222 if (err == -ENOMEM || err == -ENOSPC) 223 return BLK_MQ_RQ_QUEUE_BUSY; 224 return BLK_MQ_RQ_QUEUE_ERROR; 225 } 226 227 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) 228 notify = true; 229 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 230 231 if (notify) 232 virtqueue_notify(vblk->vqs[qid].vq); 233 return BLK_MQ_RQ_QUEUE_OK; 234 } 235 236 /* return id (s/n) string for *disk to *id_str 237 */ 238 static int virtblk_get_id(struct gendisk *disk, char *id_str) 239 { 240 struct virtio_blk *vblk = disk->private_data; 241 struct request_queue *q = vblk->disk->queue; 242 struct request *req; 243 int err; 244 245 req = blk_get_request(q, READ, GFP_KERNEL); 246 if (IS_ERR(req)) 247 return PTR_ERR(req); 248 req->cmd_type = REQ_TYPE_DRV_PRIV; 249 250 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 251 if (err) 252 goto out; 253 254 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 255 out: 256 blk_put_request(req); 257 return err; 258 } 259 260 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 261 unsigned int cmd, unsigned long data) 262 { 263 struct gendisk *disk = bdev->bd_disk; 264 struct virtio_blk *vblk = disk->private_data; 265 266 /* 267 * Only allow the generic SCSI ioctls if the host can support it. 268 */ 269 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) 270 return -ENOTTY; 271 272 return scsi_cmd_blk_ioctl(bdev, mode, cmd, 273 (void __user *)data); 274 } 275 276 /* We provide getgeo only to please some old bootloader/partitioning tools */ 277 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 278 { 279 struct virtio_blk *vblk = bd->bd_disk->private_data; 280 281 /* see if the host passed in geometry config */ 282 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { 283 virtio_cread(vblk->vdev, struct virtio_blk_config, 284 geometry.cylinders, &geo->cylinders); 285 virtio_cread(vblk->vdev, struct virtio_blk_config, 286 geometry.heads, &geo->heads); 287 virtio_cread(vblk->vdev, struct virtio_blk_config, 288 geometry.sectors, &geo->sectors); 289 } else { 290 /* some standard values, similar to sd */ 291 geo->heads = 1 << 6; 292 geo->sectors = 1 << 5; 293 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 294 } 295 return 0; 296 } 297 298 static const struct block_device_operations virtblk_fops = { 299 .ioctl = virtblk_ioctl, 300 .owner = THIS_MODULE, 301 .getgeo = virtblk_getgeo, 302 }; 303 304 static int index_to_minor(int index) 305 { 306 return index << PART_BITS; 307 } 308 309 static int minor_to_index(int minor) 310 { 311 return minor >> PART_BITS; 312 } 313 314 static ssize_t virtblk_serial_show(struct device *dev, 315 struct device_attribute *attr, char *buf) 316 { 317 struct gendisk *disk = dev_to_disk(dev); 318 int err; 319 320 /* sysfs gives us a PAGE_SIZE buffer */ 321 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); 322 323 buf[VIRTIO_BLK_ID_BYTES] = '\0'; 324 err = virtblk_get_id(disk, buf); 325 if (!err) 326 return strlen(buf); 327 328 if (err == -EIO) /* Unsupported? Make it empty. */ 329 return 0; 330 331 return err; 332 } 333 334 static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); 335 336 static void virtblk_config_changed_work(struct work_struct *work) 337 { 338 struct virtio_blk *vblk = 339 container_of(work, struct virtio_blk, config_work); 340 struct virtio_device *vdev = vblk->vdev; 341 struct request_queue *q = vblk->disk->queue; 342 char cap_str_2[10], cap_str_10[10]; 343 char *envp[] = { "RESIZE=1", NULL }; 344 u64 capacity; 345 346 /* Host must always specify the capacity. */ 347 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); 348 349 /* If capacity is too big, truncate with warning. */ 350 if ((sector_t)capacity != capacity) { 351 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", 352 (unsigned long long)capacity); 353 capacity = (sector_t)-1; 354 } 355 356 string_get_size(capacity, queue_logical_block_size(q), 357 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 358 string_get_size(capacity, queue_logical_block_size(q), 359 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 360 361 dev_notice(&vdev->dev, 362 "new size: %llu %d-byte logical blocks (%s/%s)\n", 363 (unsigned long long)capacity, 364 queue_logical_block_size(q), 365 cap_str_10, cap_str_2); 366 367 set_capacity(vblk->disk, capacity); 368 revalidate_disk(vblk->disk); 369 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); 370 } 371 372 static void virtblk_config_changed(struct virtio_device *vdev) 373 { 374 struct virtio_blk *vblk = vdev->priv; 375 376 queue_work(virtblk_wq, &vblk->config_work); 377 } 378 379 static int init_vq(struct virtio_blk *vblk) 380 { 381 int err; 382 int i; 383 vq_callback_t **callbacks; 384 const char **names; 385 struct virtqueue **vqs; 386 unsigned short num_vqs; 387 struct virtio_device *vdev = vblk->vdev; 388 389 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, 390 struct virtio_blk_config, num_queues, 391 &num_vqs); 392 if (err) 393 num_vqs = 1; 394 395 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); 396 if (!vblk->vqs) 397 return -ENOMEM; 398 399 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL); 400 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL); 401 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); 402 if (!names || !callbacks || !vqs) { 403 err = -ENOMEM; 404 goto out; 405 } 406 407 for (i = 0; i < num_vqs; i++) { 408 callbacks[i] = virtblk_done; 409 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); 410 names[i] = vblk->vqs[i].name; 411 } 412 413 /* Discover virtqueues and write information to configuration. */ 414 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); 415 if (err) 416 goto out; 417 418 for (i = 0; i < num_vqs; i++) { 419 spin_lock_init(&vblk->vqs[i].lock); 420 vblk->vqs[i].vq = vqs[i]; 421 } 422 vblk->num_vqs = num_vqs; 423 424 out: 425 kfree(vqs); 426 kfree(callbacks); 427 kfree(names); 428 if (err) 429 kfree(vblk->vqs); 430 return err; 431 } 432 433 /* 434 * Legacy naming scheme used for virtio devices. We are stuck with it for 435 * virtio blk but don't ever use it for any new driver. 436 */ 437 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) 438 { 439 const int base = 'z' - 'a' + 1; 440 char *begin = buf + strlen(prefix); 441 char *end = buf + buflen; 442 char *p; 443 int unit; 444 445 p = end - 1; 446 *p = '\0'; 447 unit = base; 448 do { 449 if (p == begin) 450 return -EINVAL; 451 *--p = 'a' + (index % unit); 452 index = (index / unit) - 1; 453 } while (index >= 0); 454 455 memmove(begin, p, end - p); 456 memcpy(buf, prefix, strlen(prefix)); 457 458 return 0; 459 } 460 461 static int virtblk_get_cache_mode(struct virtio_device *vdev) 462 { 463 u8 writeback; 464 int err; 465 466 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, 467 struct virtio_blk_config, wce, 468 &writeback); 469 470 /* 471 * If WCE is not configurable and flush is not available, 472 * assume no writeback cache is in use. 473 */ 474 if (err) 475 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH); 476 477 return writeback; 478 } 479 480 static void virtblk_update_cache_mode(struct virtio_device *vdev) 481 { 482 u8 writeback = virtblk_get_cache_mode(vdev); 483 struct virtio_blk *vblk = vdev->priv; 484 485 blk_queue_write_cache(vblk->disk->queue, writeback, false); 486 revalidate_disk(vblk->disk); 487 } 488 489 static const char *const virtblk_cache_types[] = { 490 "write through", "write back" 491 }; 492 493 static ssize_t 494 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, 495 const char *buf, size_t count) 496 { 497 struct gendisk *disk = dev_to_disk(dev); 498 struct virtio_blk *vblk = disk->private_data; 499 struct virtio_device *vdev = vblk->vdev; 500 int i; 501 502 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); 503 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) 504 if (sysfs_streq(buf, virtblk_cache_types[i])) 505 break; 506 507 if (i < 0) 508 return -EINVAL; 509 510 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); 511 virtblk_update_cache_mode(vdev); 512 return count; 513 } 514 515 static ssize_t 516 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, 517 char *buf) 518 { 519 struct gendisk *disk = dev_to_disk(dev); 520 struct virtio_blk *vblk = disk->private_data; 521 u8 writeback = virtblk_get_cache_mode(vblk->vdev); 522 523 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); 524 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]); 525 } 526 527 static const struct device_attribute dev_attr_cache_type_ro = 528 __ATTR(cache_type, S_IRUGO, 529 virtblk_cache_type_show, NULL); 530 static const struct device_attribute dev_attr_cache_type_rw = 531 __ATTR(cache_type, S_IRUGO|S_IWUSR, 532 virtblk_cache_type_show, virtblk_cache_type_store); 533 534 static int virtblk_init_request(void *data, struct request *rq, 535 unsigned int hctx_idx, unsigned int request_idx, 536 unsigned int numa_node) 537 { 538 struct virtio_blk *vblk = data; 539 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); 540 541 sg_init_table(vbr->sg, vblk->sg_elems); 542 return 0; 543 } 544 545 static struct blk_mq_ops virtio_mq_ops = { 546 .queue_rq = virtio_queue_rq, 547 .complete = virtblk_request_done, 548 .init_request = virtblk_init_request, 549 }; 550 551 static unsigned int virtblk_queue_depth; 552 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); 553 554 static int virtblk_probe(struct virtio_device *vdev) 555 { 556 struct virtio_blk *vblk; 557 struct request_queue *q; 558 int err, index; 559 560 u64 cap; 561 u32 v, blk_size, sg_elems, opt_io_size; 562 u16 min_io_size; 563 u8 physical_block_exp, alignment_offset; 564 565 if (!vdev->config->get) { 566 dev_err(&vdev->dev, "%s failure: config access disabled\n", 567 __func__); 568 return -EINVAL; 569 } 570 571 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), 572 GFP_KERNEL); 573 if (err < 0) 574 goto out; 575 index = err; 576 577 /* We need to know how many segments before we allocate. */ 578 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, 579 struct virtio_blk_config, seg_max, 580 &sg_elems); 581 582 /* We need at least one SG element, whatever they say. */ 583 if (err || !sg_elems) 584 sg_elems = 1; 585 586 /* We need an extra sg elements at head and tail. */ 587 sg_elems += 2; 588 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 589 if (!vblk) { 590 err = -ENOMEM; 591 goto out_free_index; 592 } 593 594 vblk->vdev = vdev; 595 vblk->sg_elems = sg_elems; 596 597 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 598 599 err = init_vq(vblk); 600 if (err) 601 goto out_free_vblk; 602 603 /* FIXME: How many partitions? How long is a piece of string? */ 604 vblk->disk = alloc_disk(1 << PART_BITS); 605 if (!vblk->disk) { 606 err = -ENOMEM; 607 goto out_free_vq; 608 } 609 610 /* Default queue sizing is to fill the ring. */ 611 if (!virtblk_queue_depth) { 612 virtblk_queue_depth = vblk->vqs[0].vq->num_free; 613 /* ... but without indirect descs, we use 2 descs per req */ 614 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) 615 virtblk_queue_depth /= 2; 616 } 617 618 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); 619 vblk->tag_set.ops = &virtio_mq_ops; 620 vblk->tag_set.queue_depth = virtblk_queue_depth; 621 vblk->tag_set.numa_node = NUMA_NO_NODE; 622 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 623 vblk->tag_set.cmd_size = 624 sizeof(struct virtblk_req) + 625 sizeof(struct scatterlist) * sg_elems; 626 vblk->tag_set.driver_data = vblk; 627 vblk->tag_set.nr_hw_queues = vblk->num_vqs; 628 629 err = blk_mq_alloc_tag_set(&vblk->tag_set); 630 if (err) 631 goto out_put_disk; 632 633 q = blk_mq_init_queue(&vblk->tag_set); 634 if (IS_ERR(q)) { 635 err = -ENOMEM; 636 goto out_free_tags; 637 } 638 vblk->disk->queue = q; 639 640 q->queuedata = vblk; 641 642 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 643 644 vblk->disk->major = major; 645 vblk->disk->first_minor = index_to_minor(index); 646 vblk->disk->private_data = vblk; 647 vblk->disk->fops = &virtblk_fops; 648 vblk->disk->flags |= GENHD_FL_EXT_DEVT; 649 vblk->index = index; 650 651 /* configure queue flush support */ 652 virtblk_update_cache_mode(vdev); 653 654 /* If disk is read-only in the host, the guest should obey */ 655 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 656 set_disk_ro(vblk->disk, 1); 657 658 /* Host must always specify the capacity. */ 659 virtio_cread(vdev, struct virtio_blk_config, capacity, &cap); 660 661 /* If capacity is too big, truncate with warning. */ 662 if ((sector_t)cap != cap) { 663 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", 664 (unsigned long long)cap); 665 cap = (sector_t)-1; 666 } 667 set_capacity(vblk->disk, cap); 668 669 /* We can handle whatever the host told us to handle. */ 670 blk_queue_max_segments(q, vblk->sg_elems-2); 671 672 /* No need to bounce any requests */ 673 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 674 675 /* No real sector limit. */ 676 blk_queue_max_hw_sectors(q, -1U); 677 678 /* Host can optionally specify maximum segment size and number of 679 * segments. */ 680 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, 681 struct virtio_blk_config, size_max, &v); 682 if (!err) 683 blk_queue_max_segment_size(q, v); 684 else 685 blk_queue_max_segment_size(q, -1U); 686 687 /* Host can optionally specify the block size of the device */ 688 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, 689 struct virtio_blk_config, blk_size, 690 &blk_size); 691 if (!err) 692 blk_queue_logical_block_size(q, blk_size); 693 else 694 blk_size = queue_logical_block_size(q); 695 696 /* Use topology information if available */ 697 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 698 struct virtio_blk_config, physical_block_exp, 699 &physical_block_exp); 700 if (!err && physical_block_exp) 701 blk_queue_physical_block_size(q, 702 blk_size * (1 << physical_block_exp)); 703 704 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 705 struct virtio_blk_config, alignment_offset, 706 &alignment_offset); 707 if (!err && alignment_offset) 708 blk_queue_alignment_offset(q, blk_size * alignment_offset); 709 710 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 711 struct virtio_blk_config, min_io_size, 712 &min_io_size); 713 if (!err && min_io_size) 714 blk_queue_io_min(q, blk_size * min_io_size); 715 716 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 717 struct virtio_blk_config, opt_io_size, 718 &opt_io_size); 719 if (!err && opt_io_size) 720 blk_queue_io_opt(q, blk_size * opt_io_size); 721 722 virtio_device_ready(vdev); 723 724 device_add_disk(&vdev->dev, vblk->disk); 725 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); 726 if (err) 727 goto out_del_disk; 728 729 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) 730 err = device_create_file(disk_to_dev(vblk->disk), 731 &dev_attr_cache_type_rw); 732 else 733 err = device_create_file(disk_to_dev(vblk->disk), 734 &dev_attr_cache_type_ro); 735 if (err) 736 goto out_del_disk; 737 return 0; 738 739 out_del_disk: 740 del_gendisk(vblk->disk); 741 blk_cleanup_queue(vblk->disk->queue); 742 out_free_tags: 743 blk_mq_free_tag_set(&vblk->tag_set); 744 out_put_disk: 745 put_disk(vblk->disk); 746 out_free_vq: 747 vdev->config->del_vqs(vdev); 748 out_free_vblk: 749 kfree(vblk); 750 out_free_index: 751 ida_simple_remove(&vd_index_ida, index); 752 out: 753 return err; 754 } 755 756 static void virtblk_remove(struct virtio_device *vdev) 757 { 758 struct virtio_blk *vblk = vdev->priv; 759 int index = vblk->index; 760 int refc; 761 762 /* Make sure no work handler is accessing the device. */ 763 flush_work(&vblk->config_work); 764 765 del_gendisk(vblk->disk); 766 blk_cleanup_queue(vblk->disk->queue); 767 768 blk_mq_free_tag_set(&vblk->tag_set); 769 770 /* Stop all the virtqueues. */ 771 vdev->config->reset(vdev); 772 773 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); 774 put_disk(vblk->disk); 775 vdev->config->del_vqs(vdev); 776 kfree(vblk->vqs); 777 kfree(vblk); 778 779 /* Only free device id if we don't have any users */ 780 if (refc == 1) 781 ida_simple_remove(&vd_index_ida, index); 782 } 783 784 #ifdef CONFIG_PM_SLEEP 785 static int virtblk_freeze(struct virtio_device *vdev) 786 { 787 struct virtio_blk *vblk = vdev->priv; 788 789 /* Ensure we don't receive any more interrupts */ 790 vdev->config->reset(vdev); 791 792 /* Make sure no work handler is accessing the device. */ 793 flush_work(&vblk->config_work); 794 795 blk_mq_stop_hw_queues(vblk->disk->queue); 796 797 vdev->config->del_vqs(vdev); 798 return 0; 799 } 800 801 static int virtblk_restore(struct virtio_device *vdev) 802 { 803 struct virtio_blk *vblk = vdev->priv; 804 int ret; 805 806 ret = init_vq(vdev->priv); 807 if (ret) 808 return ret; 809 810 virtio_device_ready(vdev); 811 812 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 813 return 0; 814 } 815 #endif 816 817 static const struct virtio_device_id id_table[] = { 818 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, 819 { 0 }, 820 }; 821 822 static unsigned int features_legacy[] = { 823 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 824 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, 825 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 826 VIRTIO_BLK_F_MQ, 827 } 828 ; 829 static unsigned int features[] = { 830 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 831 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 832 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 833 VIRTIO_BLK_F_MQ, 834 }; 835 836 static struct virtio_driver virtio_blk = { 837 .feature_table = features, 838 .feature_table_size = ARRAY_SIZE(features), 839 .feature_table_legacy = features_legacy, 840 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 841 .driver.name = KBUILD_MODNAME, 842 .driver.owner = THIS_MODULE, 843 .id_table = id_table, 844 .probe = virtblk_probe, 845 .remove = virtblk_remove, 846 .config_changed = virtblk_config_changed, 847 #ifdef CONFIG_PM_SLEEP 848 .freeze = virtblk_freeze, 849 .restore = virtblk_restore, 850 #endif 851 }; 852 853 static int __init init(void) 854 { 855 int error; 856 857 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); 858 if (!virtblk_wq) 859 return -ENOMEM; 860 861 major = register_blkdev(0, "virtblk"); 862 if (major < 0) { 863 error = major; 864 goto out_destroy_workqueue; 865 } 866 867 error = register_virtio_driver(&virtio_blk); 868 if (error) 869 goto out_unregister_blkdev; 870 return 0; 871 872 out_unregister_blkdev: 873 unregister_blkdev(major, "virtblk"); 874 out_destroy_workqueue: 875 destroy_workqueue(virtblk_wq); 876 return error; 877 } 878 879 static void __exit fini(void) 880 { 881 unregister_virtio_driver(&virtio_blk); 882 unregister_blkdev(major, "virtblk"); 883 destroy_workqueue(virtblk_wq); 884 } 885 module_init(init); 886 module_exit(fini); 887 888 MODULE_DEVICE_TABLE(virtio, id_table); 889 MODULE_DESCRIPTION("Virtio block driver"); 890 MODULE_LICENSE("GPL"); 891