1 // SPDX-License-Identifier: GPL-2.0-only 2 //#define DEBUG 3 #include <linux/spinlock.h> 4 #include <linux/slab.h> 5 #include <linux/blkdev.h> 6 #include <linux/hdreg.h> 7 #include <linux/module.h> 8 #include <linux/mutex.h> 9 #include <linux/interrupt.h> 10 #include <linux/virtio.h> 11 #include <linux/virtio_blk.h> 12 #include <linux/scatterlist.h> 13 #include <linux/string_helpers.h> 14 #include <linux/idr.h> 15 #include <linux/blk-mq.h> 16 #include <linux/blk-mq-virtio.h> 17 #include <linux/numa.h> 18 #include <uapi/linux/virtio_ring.h> 19 20 #define PART_BITS 4 21 #define VQ_NAME_LEN 16 22 #define MAX_DISCARD_SEGMENTS 256u 23 24 /* The maximum number of sg elements that fit into a virtqueue */ 25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768 26 27 #ifdef CONFIG_ARCH_NO_SG_CHAIN 28 #define VIRTIO_BLK_INLINE_SG_CNT 0 29 #else 30 #define VIRTIO_BLK_INLINE_SG_CNT 2 31 #endif 32 33 static unsigned int num_request_queues; 34 module_param(num_request_queues, uint, 0644); 35 MODULE_PARM_DESC(num_request_queues, 36 "Limit the number of request queues to use for blk device. " 37 "0 for no limit. " 38 "Values > nr_cpu_ids truncated to nr_cpu_ids."); 39 40 static unsigned int poll_queues; 41 module_param(poll_queues, uint, 0644); 42 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O"); 43 44 static int major; 45 static DEFINE_IDA(vd_index_ida); 46 47 static struct workqueue_struct *virtblk_wq; 48 49 struct virtio_blk_vq { 50 struct virtqueue *vq; 51 spinlock_t lock; 52 char name[VQ_NAME_LEN]; 53 } ____cacheline_aligned_in_smp; 54 55 struct virtio_blk { 56 /* 57 * This mutex must be held by anything that may run after 58 * virtblk_remove() sets vblk->vdev to NULL. 59 * 60 * blk-mq, virtqueue processing, and sysfs attribute code paths are 61 * shut down before vblk->vdev is set to NULL and therefore do not need 62 * to hold this mutex. 63 */ 64 struct mutex vdev_mutex; 65 struct virtio_device *vdev; 66 67 /* The disk structure for the kernel. */ 68 struct gendisk *disk; 69 70 /* Block layer tags. */ 71 struct blk_mq_tag_set tag_set; 72 73 /* Process context for config space updates */ 74 struct work_struct config_work; 75 76 /* Ida index - used to track minor number allocations. */ 77 int index; 78 79 /* num of vqs */ 80 int num_vqs; 81 int io_queues[HCTX_MAX_TYPES]; 82 struct virtio_blk_vq *vqs; 83 }; 84 85 struct virtblk_req { 86 struct virtio_blk_outhdr out_hdr; 87 u8 status; 88 struct sg_table sg_table; 89 struct scatterlist sg[]; 90 }; 91 92 static inline blk_status_t virtblk_result(struct virtblk_req *vbr) 93 { 94 switch (vbr->status) { 95 case VIRTIO_BLK_S_OK: 96 return BLK_STS_OK; 97 case VIRTIO_BLK_S_UNSUPP: 98 return BLK_STS_NOTSUPP; 99 default: 100 return BLK_STS_IOERR; 101 } 102 } 103 104 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx) 105 { 106 struct virtio_blk *vblk = hctx->queue->queuedata; 107 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; 108 109 return vq; 110 } 111 112 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr) 113 { 114 struct scatterlist hdr, status, *sgs[3]; 115 unsigned int num_out = 0, num_in = 0; 116 117 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); 118 sgs[num_out++] = &hdr; 119 120 if (vbr->sg_table.nents) { 121 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) 122 sgs[num_out++] = vbr->sg_table.sgl; 123 else 124 sgs[num_out + num_in++] = vbr->sg_table.sgl; 125 } 126 127 sg_init_one(&status, &vbr->status, sizeof(vbr->status)); 128 sgs[num_out + num_in++] = &status; 129 130 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 131 } 132 133 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap) 134 { 135 unsigned short segments = blk_rq_nr_discard_segments(req); 136 unsigned short n = 0; 137 struct virtio_blk_discard_write_zeroes *range; 138 struct bio *bio; 139 u32 flags = 0; 140 141 if (unmap) 142 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; 143 144 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 145 if (!range) 146 return -ENOMEM; 147 148 /* 149 * Single max discard segment means multi-range discard isn't 150 * supported, and block layer only runs contiguity merge like 151 * normal RW request. So we can't reply on bio for retrieving 152 * each range info. 153 */ 154 if (queue_max_discard_segments(req->q) == 1) { 155 range[0].flags = cpu_to_le32(flags); 156 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); 157 range[0].sector = cpu_to_le64(blk_rq_pos(req)); 158 n = 1; 159 } else { 160 __rq_for_each_bio(bio, req) { 161 u64 sector = bio->bi_iter.bi_sector; 162 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; 163 164 range[n].flags = cpu_to_le32(flags); 165 range[n].num_sectors = cpu_to_le32(num_sectors); 166 range[n].sector = cpu_to_le64(sector); 167 n++; 168 } 169 } 170 171 WARN_ON_ONCE(n != segments); 172 173 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments); 174 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 175 176 return 0; 177 } 178 179 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr) 180 { 181 if (blk_rq_nr_phys_segments(req)) 182 sg_free_table_chained(&vbr->sg_table, 183 VIRTIO_BLK_INLINE_SG_CNT); 184 } 185 186 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, 187 struct virtblk_req *vbr) 188 { 189 int err; 190 191 if (!blk_rq_nr_phys_segments(req)) 192 return 0; 193 194 vbr->sg_table.sgl = vbr->sg; 195 err = sg_alloc_table_chained(&vbr->sg_table, 196 blk_rq_nr_phys_segments(req), 197 vbr->sg_table.sgl, 198 VIRTIO_BLK_INLINE_SG_CNT); 199 if (unlikely(err)) 200 return -ENOMEM; 201 202 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); 203 } 204 205 static void virtblk_cleanup_cmd(struct request *req) 206 { 207 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) 208 kfree(bvec_virt(&req->special_vec)); 209 } 210 211 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, 212 struct request *req, 213 struct virtblk_req *vbr) 214 { 215 bool unmap = false; 216 u32 type; 217 218 vbr->out_hdr.sector = 0; 219 220 switch (req_op(req)) { 221 case REQ_OP_READ: 222 type = VIRTIO_BLK_T_IN; 223 vbr->out_hdr.sector = cpu_to_virtio64(vdev, 224 blk_rq_pos(req)); 225 break; 226 case REQ_OP_WRITE: 227 type = VIRTIO_BLK_T_OUT; 228 vbr->out_hdr.sector = cpu_to_virtio64(vdev, 229 blk_rq_pos(req)); 230 break; 231 case REQ_OP_FLUSH: 232 type = VIRTIO_BLK_T_FLUSH; 233 break; 234 case REQ_OP_DISCARD: 235 type = VIRTIO_BLK_T_DISCARD; 236 break; 237 case REQ_OP_WRITE_ZEROES: 238 type = VIRTIO_BLK_T_WRITE_ZEROES; 239 unmap = !(req->cmd_flags & REQ_NOUNMAP); 240 break; 241 case REQ_OP_SECURE_ERASE: 242 type = VIRTIO_BLK_T_SECURE_ERASE; 243 break; 244 case REQ_OP_DRV_IN: 245 type = VIRTIO_BLK_T_GET_ID; 246 break; 247 default: 248 WARN_ON_ONCE(1); 249 return BLK_STS_IOERR; 250 } 251 252 vbr->out_hdr.type = cpu_to_virtio32(vdev, type); 253 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); 254 255 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES || 256 type == VIRTIO_BLK_T_SECURE_ERASE) { 257 if (virtblk_setup_discard_write_zeroes_erase(req, unmap)) 258 return BLK_STS_RESOURCE; 259 } 260 261 return 0; 262 } 263 264 static inline void virtblk_request_done(struct request *req) 265 { 266 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 267 268 virtblk_unmap_data(req, vbr); 269 virtblk_cleanup_cmd(req); 270 blk_mq_end_request(req, virtblk_result(vbr)); 271 } 272 273 static void virtblk_done(struct virtqueue *vq) 274 { 275 struct virtio_blk *vblk = vq->vdev->priv; 276 bool req_done = false; 277 int qid = vq->index; 278 struct virtblk_req *vbr; 279 unsigned long flags; 280 unsigned int len; 281 282 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 283 do { 284 virtqueue_disable_cb(vq); 285 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 286 struct request *req = blk_mq_rq_from_pdu(vbr); 287 288 if (likely(!blk_should_fake_timeout(req->q))) 289 blk_mq_complete_request(req); 290 req_done = true; 291 } 292 if (unlikely(virtqueue_is_broken(vq))) 293 break; 294 } while (!virtqueue_enable_cb(vq)); 295 296 /* In case queue is stopped waiting for more buffers. */ 297 if (req_done) 298 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 299 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 300 } 301 302 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) 303 { 304 struct virtio_blk *vblk = hctx->queue->queuedata; 305 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; 306 bool kick; 307 308 spin_lock_irq(&vq->lock); 309 kick = virtqueue_kick_prepare(vq->vq); 310 spin_unlock_irq(&vq->lock); 311 312 if (kick) 313 virtqueue_notify(vq->vq); 314 } 315 316 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) 317 { 318 virtblk_cleanup_cmd(req); 319 switch (rc) { 320 case -ENOSPC: 321 return BLK_STS_DEV_RESOURCE; 322 case -ENOMEM: 323 return BLK_STS_RESOURCE; 324 default: 325 return BLK_STS_IOERR; 326 } 327 } 328 329 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, 330 struct virtio_blk *vblk, 331 struct request *req, 332 struct virtblk_req *vbr) 333 { 334 blk_status_t status; 335 int num; 336 337 status = virtblk_setup_cmd(vblk->vdev, req, vbr); 338 if (unlikely(status)) 339 return status; 340 341 num = virtblk_map_data(hctx, req, vbr); 342 if (unlikely(num < 0)) 343 return virtblk_fail_to_queue(req, -ENOMEM); 344 vbr->sg_table.nents = num; 345 346 blk_mq_start_request(req); 347 348 return BLK_STS_OK; 349 } 350 351 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 352 const struct blk_mq_queue_data *bd) 353 { 354 struct virtio_blk *vblk = hctx->queue->queuedata; 355 struct request *req = bd->rq; 356 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 357 unsigned long flags; 358 int qid = hctx->queue_num; 359 bool notify = false; 360 blk_status_t status; 361 int err; 362 363 status = virtblk_prep_rq(hctx, vblk, req, vbr); 364 if (unlikely(status)) 365 return status; 366 367 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 368 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); 369 if (err) { 370 virtqueue_kick(vblk->vqs[qid].vq); 371 /* Don't stop the queue if -ENOMEM: we may have failed to 372 * bounce the buffer due to global resource outage. 373 */ 374 if (err == -ENOSPC) 375 blk_mq_stop_hw_queue(hctx); 376 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 377 virtblk_unmap_data(req, vbr); 378 return virtblk_fail_to_queue(req, err); 379 } 380 381 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) 382 notify = true; 383 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 384 385 if (notify) 386 virtqueue_notify(vblk->vqs[qid].vq); 387 return BLK_STS_OK; 388 } 389 390 static bool virtblk_prep_rq_batch(struct request *req) 391 { 392 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; 393 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 394 395 req->mq_hctx->tags->rqs[req->tag] = req; 396 397 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; 398 } 399 400 static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, 401 struct request **rqlist) 402 { 403 unsigned long flags; 404 int err; 405 bool kick; 406 407 spin_lock_irqsave(&vq->lock, flags); 408 409 while (!rq_list_empty(*rqlist)) { 410 struct request *req = rq_list_pop(rqlist); 411 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 412 413 err = virtblk_add_req(vq->vq, vbr); 414 if (err) { 415 virtblk_unmap_data(req, vbr); 416 virtblk_cleanup_cmd(req); 417 blk_mq_requeue_request(req, true); 418 } 419 } 420 421 kick = virtqueue_kick_prepare(vq->vq); 422 spin_unlock_irqrestore(&vq->lock, flags); 423 424 return kick; 425 } 426 427 static void virtio_queue_rqs(struct request **rqlist) 428 { 429 struct request *req, *next, *prev = NULL; 430 struct request *requeue_list = NULL; 431 432 rq_list_for_each_safe(rqlist, req, next) { 433 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx); 434 bool kick; 435 436 if (!virtblk_prep_rq_batch(req)) { 437 rq_list_move(rqlist, &requeue_list, req, prev); 438 req = prev; 439 if (!req) 440 continue; 441 } 442 443 if (!next || req->mq_hctx != next->mq_hctx) { 444 req->rq_next = NULL; 445 kick = virtblk_add_req_batch(vq, rqlist); 446 if (kick) 447 virtqueue_notify(vq->vq); 448 449 *rqlist = next; 450 prev = NULL; 451 } else 452 prev = req; 453 } 454 455 *rqlist = requeue_list; 456 } 457 458 /* return id (s/n) string for *disk to *id_str 459 */ 460 static int virtblk_get_id(struct gendisk *disk, char *id_str) 461 { 462 struct virtio_blk *vblk = disk->private_data; 463 struct request_queue *q = vblk->disk->queue; 464 struct request *req; 465 int err; 466 467 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); 468 if (IS_ERR(req)) 469 return PTR_ERR(req); 470 471 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 472 if (err) 473 goto out; 474 475 blk_execute_rq(req, false); 476 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req))); 477 out: 478 blk_mq_free_request(req); 479 return err; 480 } 481 482 /* We provide getgeo only to please some old bootloader/partitioning tools */ 483 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 484 { 485 struct virtio_blk *vblk = bd->bd_disk->private_data; 486 int ret = 0; 487 488 mutex_lock(&vblk->vdev_mutex); 489 490 if (!vblk->vdev) { 491 ret = -ENXIO; 492 goto out; 493 } 494 495 /* see if the host passed in geometry config */ 496 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { 497 virtio_cread(vblk->vdev, struct virtio_blk_config, 498 geometry.cylinders, &geo->cylinders); 499 virtio_cread(vblk->vdev, struct virtio_blk_config, 500 geometry.heads, &geo->heads); 501 virtio_cread(vblk->vdev, struct virtio_blk_config, 502 geometry.sectors, &geo->sectors); 503 } else { 504 /* some standard values, similar to sd */ 505 geo->heads = 1 << 6; 506 geo->sectors = 1 << 5; 507 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 508 } 509 out: 510 mutex_unlock(&vblk->vdev_mutex); 511 return ret; 512 } 513 514 static void virtblk_free_disk(struct gendisk *disk) 515 { 516 struct virtio_blk *vblk = disk->private_data; 517 518 ida_free(&vd_index_ida, vblk->index); 519 mutex_destroy(&vblk->vdev_mutex); 520 kfree(vblk); 521 } 522 523 static const struct block_device_operations virtblk_fops = { 524 .owner = THIS_MODULE, 525 .getgeo = virtblk_getgeo, 526 .free_disk = virtblk_free_disk, 527 }; 528 529 static int index_to_minor(int index) 530 { 531 return index << PART_BITS; 532 } 533 534 static int minor_to_index(int minor) 535 { 536 return minor >> PART_BITS; 537 } 538 539 static ssize_t serial_show(struct device *dev, 540 struct device_attribute *attr, char *buf) 541 { 542 struct gendisk *disk = dev_to_disk(dev); 543 int err; 544 545 /* sysfs gives us a PAGE_SIZE buffer */ 546 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); 547 548 buf[VIRTIO_BLK_ID_BYTES] = '\0'; 549 err = virtblk_get_id(disk, buf); 550 if (!err) 551 return strlen(buf); 552 553 if (err == -EIO) /* Unsupported? Make it empty. */ 554 return 0; 555 556 return err; 557 } 558 559 static DEVICE_ATTR_RO(serial); 560 561 /* The queue's logical block size must be set before calling this */ 562 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) 563 { 564 struct virtio_device *vdev = vblk->vdev; 565 struct request_queue *q = vblk->disk->queue; 566 char cap_str_2[10], cap_str_10[10]; 567 unsigned long long nblocks; 568 u64 capacity; 569 570 /* Host must always specify the capacity. */ 571 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); 572 573 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); 574 575 string_get_size(nblocks, queue_logical_block_size(q), 576 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 577 string_get_size(nblocks, queue_logical_block_size(q), 578 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 579 580 dev_notice(&vdev->dev, 581 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n", 582 vblk->disk->disk_name, 583 resize ? "new size: " : "", 584 nblocks, 585 queue_logical_block_size(q), 586 cap_str_10, 587 cap_str_2); 588 589 set_capacity_and_notify(vblk->disk, capacity); 590 } 591 592 static void virtblk_config_changed_work(struct work_struct *work) 593 { 594 struct virtio_blk *vblk = 595 container_of(work, struct virtio_blk, config_work); 596 597 virtblk_update_capacity(vblk, true); 598 } 599 600 static void virtblk_config_changed(struct virtio_device *vdev) 601 { 602 struct virtio_blk *vblk = vdev->priv; 603 604 queue_work(virtblk_wq, &vblk->config_work); 605 } 606 607 static int init_vq(struct virtio_blk *vblk) 608 { 609 int err; 610 int i; 611 vq_callback_t **callbacks; 612 const char **names; 613 struct virtqueue **vqs; 614 unsigned short num_vqs; 615 unsigned int num_poll_vqs; 616 struct virtio_device *vdev = vblk->vdev; 617 struct irq_affinity desc = { 0, }; 618 619 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, 620 struct virtio_blk_config, num_queues, 621 &num_vqs); 622 if (err) 623 num_vqs = 1; 624 625 if (!err && !num_vqs) { 626 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n"); 627 return -EINVAL; 628 } 629 630 num_vqs = min_t(unsigned int, 631 min_not_zero(num_request_queues, nr_cpu_ids), 632 num_vqs); 633 634 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1); 635 636 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; 637 vblk->io_queues[HCTX_TYPE_READ] = 0; 638 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; 639 640 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", 641 vblk->io_queues[HCTX_TYPE_DEFAULT], 642 vblk->io_queues[HCTX_TYPE_READ], 643 vblk->io_queues[HCTX_TYPE_POLL]); 644 645 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); 646 if (!vblk->vqs) 647 return -ENOMEM; 648 649 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL); 650 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL); 651 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); 652 if (!names || !callbacks || !vqs) { 653 err = -ENOMEM; 654 goto out; 655 } 656 657 for (i = 0; i < num_vqs - num_poll_vqs; i++) { 658 callbacks[i] = virtblk_done; 659 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); 660 names[i] = vblk->vqs[i].name; 661 } 662 663 for (; i < num_vqs; i++) { 664 callbacks[i] = NULL; 665 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i); 666 names[i] = vblk->vqs[i].name; 667 } 668 669 /* Discover virtqueues and write information to configuration. */ 670 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); 671 if (err) 672 goto out; 673 674 for (i = 0; i < num_vqs; i++) { 675 spin_lock_init(&vblk->vqs[i].lock); 676 vblk->vqs[i].vq = vqs[i]; 677 } 678 vblk->num_vqs = num_vqs; 679 680 out: 681 kfree(vqs); 682 kfree(callbacks); 683 kfree(names); 684 if (err) 685 kfree(vblk->vqs); 686 return err; 687 } 688 689 /* 690 * Legacy naming scheme used for virtio devices. We are stuck with it for 691 * virtio blk but don't ever use it for any new driver. 692 */ 693 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) 694 { 695 const int base = 'z' - 'a' + 1; 696 char *begin = buf + strlen(prefix); 697 char *end = buf + buflen; 698 char *p; 699 int unit; 700 701 p = end - 1; 702 *p = '\0'; 703 unit = base; 704 do { 705 if (p == begin) 706 return -EINVAL; 707 *--p = 'a' + (index % unit); 708 index = (index / unit) - 1; 709 } while (index >= 0); 710 711 memmove(begin, p, end - p); 712 memcpy(buf, prefix, strlen(prefix)); 713 714 return 0; 715 } 716 717 static int virtblk_get_cache_mode(struct virtio_device *vdev) 718 { 719 u8 writeback; 720 int err; 721 722 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, 723 struct virtio_blk_config, wce, 724 &writeback); 725 726 /* 727 * If WCE is not configurable and flush is not available, 728 * assume no writeback cache is in use. 729 */ 730 if (err) 731 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH); 732 733 return writeback; 734 } 735 736 static void virtblk_update_cache_mode(struct virtio_device *vdev) 737 { 738 u8 writeback = virtblk_get_cache_mode(vdev); 739 struct virtio_blk *vblk = vdev->priv; 740 741 blk_queue_write_cache(vblk->disk->queue, writeback, false); 742 } 743 744 static const char *const virtblk_cache_types[] = { 745 "write through", "write back" 746 }; 747 748 static ssize_t 749 cache_type_store(struct device *dev, struct device_attribute *attr, 750 const char *buf, size_t count) 751 { 752 struct gendisk *disk = dev_to_disk(dev); 753 struct virtio_blk *vblk = disk->private_data; 754 struct virtio_device *vdev = vblk->vdev; 755 int i; 756 757 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); 758 i = sysfs_match_string(virtblk_cache_types, buf); 759 if (i < 0) 760 return i; 761 762 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); 763 virtblk_update_cache_mode(vdev); 764 return count; 765 } 766 767 static ssize_t 768 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 769 { 770 struct gendisk *disk = dev_to_disk(dev); 771 struct virtio_blk *vblk = disk->private_data; 772 u8 writeback = virtblk_get_cache_mode(vblk->vdev); 773 774 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); 775 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]); 776 } 777 778 static DEVICE_ATTR_RW(cache_type); 779 780 static struct attribute *virtblk_attrs[] = { 781 &dev_attr_serial.attr, 782 &dev_attr_cache_type.attr, 783 NULL, 784 }; 785 786 static umode_t virtblk_attrs_are_visible(struct kobject *kobj, 787 struct attribute *a, int n) 788 { 789 struct device *dev = kobj_to_dev(kobj); 790 struct gendisk *disk = dev_to_disk(dev); 791 struct virtio_blk *vblk = disk->private_data; 792 struct virtio_device *vdev = vblk->vdev; 793 794 if (a == &dev_attr_cache_type.attr && 795 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) 796 return S_IRUGO; 797 798 return a->mode; 799 } 800 801 static const struct attribute_group virtblk_attr_group = { 802 .attrs = virtblk_attrs, 803 .is_visible = virtblk_attrs_are_visible, 804 }; 805 806 static const struct attribute_group *virtblk_attr_groups[] = { 807 &virtblk_attr_group, 808 NULL, 809 }; 810 811 static void virtblk_map_queues(struct blk_mq_tag_set *set) 812 { 813 struct virtio_blk *vblk = set->driver_data; 814 int i, qoff; 815 816 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 817 struct blk_mq_queue_map *map = &set->map[i]; 818 819 map->nr_queues = vblk->io_queues[i]; 820 map->queue_offset = qoff; 821 qoff += map->nr_queues; 822 823 if (map->nr_queues == 0) 824 continue; 825 826 /* 827 * Regular queues have interrupts and hence CPU affinity is 828 * defined by the core virtio code, but polling queues have 829 * no interrupts so we let the block layer assign CPU affinity. 830 */ 831 if (i == HCTX_TYPE_POLL) 832 blk_mq_map_queues(&set->map[i]); 833 else 834 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0); 835 } 836 } 837 838 static void virtblk_complete_batch(struct io_comp_batch *iob) 839 { 840 struct request *req; 841 842 rq_list_for_each(&iob->req_list, req) { 843 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); 844 virtblk_cleanup_cmd(req); 845 } 846 blk_mq_end_request_batch(iob); 847 } 848 849 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 850 { 851 struct virtio_blk *vblk = hctx->queue->queuedata; 852 struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx); 853 struct virtblk_req *vbr; 854 unsigned long flags; 855 unsigned int len; 856 int found = 0; 857 858 spin_lock_irqsave(&vq->lock, flags); 859 860 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { 861 struct request *req = blk_mq_rq_from_pdu(vbr); 862 863 found++; 864 if (!blk_mq_add_to_batch(req, iob, vbr->status, 865 virtblk_complete_batch)) 866 blk_mq_complete_request(req); 867 } 868 869 if (found) 870 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 871 872 spin_unlock_irqrestore(&vq->lock, flags); 873 874 return found; 875 } 876 877 static const struct blk_mq_ops virtio_mq_ops = { 878 .queue_rq = virtio_queue_rq, 879 .queue_rqs = virtio_queue_rqs, 880 .commit_rqs = virtio_commit_rqs, 881 .complete = virtblk_request_done, 882 .map_queues = virtblk_map_queues, 883 .poll = virtblk_poll, 884 }; 885 886 static unsigned int virtblk_queue_depth; 887 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); 888 889 static int virtblk_probe(struct virtio_device *vdev) 890 { 891 struct virtio_blk *vblk; 892 struct request_queue *q; 893 int err, index; 894 895 u32 v, blk_size, max_size, sg_elems, opt_io_size; 896 u32 max_discard_segs = 0; 897 u32 discard_granularity = 0; 898 u16 min_io_size; 899 u8 physical_block_exp, alignment_offset; 900 unsigned int queue_depth; 901 902 if (!vdev->config->get) { 903 dev_err(&vdev->dev, "%s failure: config access disabled\n", 904 __func__); 905 return -EINVAL; 906 } 907 908 err = ida_alloc_range(&vd_index_ida, 0, 909 minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); 910 if (err < 0) 911 goto out; 912 index = err; 913 914 /* We need to know how many segments before we allocate. */ 915 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, 916 struct virtio_blk_config, seg_max, 917 &sg_elems); 918 919 /* We need at least one SG element, whatever they say. */ 920 if (err || !sg_elems) 921 sg_elems = 1; 922 923 /* Prevent integer overflows and honor max vq size */ 924 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); 925 926 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 927 if (!vblk) { 928 err = -ENOMEM; 929 goto out_free_index; 930 } 931 932 mutex_init(&vblk->vdev_mutex); 933 934 vblk->vdev = vdev; 935 936 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 937 938 err = init_vq(vblk); 939 if (err) 940 goto out_free_vblk; 941 942 /* Default queue sizing is to fill the ring. */ 943 if (!virtblk_queue_depth) { 944 queue_depth = vblk->vqs[0].vq->num_free; 945 /* ... but without indirect descs, we use 2 descs per req */ 946 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) 947 queue_depth /= 2; 948 } else { 949 queue_depth = virtblk_queue_depth; 950 } 951 952 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); 953 vblk->tag_set.ops = &virtio_mq_ops; 954 vblk->tag_set.queue_depth = queue_depth; 955 vblk->tag_set.numa_node = NUMA_NO_NODE; 956 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 957 vblk->tag_set.cmd_size = 958 sizeof(struct virtblk_req) + 959 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; 960 vblk->tag_set.driver_data = vblk; 961 vblk->tag_set.nr_hw_queues = vblk->num_vqs; 962 vblk->tag_set.nr_maps = 1; 963 if (vblk->io_queues[HCTX_TYPE_POLL]) 964 vblk->tag_set.nr_maps = 3; 965 966 err = blk_mq_alloc_tag_set(&vblk->tag_set); 967 if (err) 968 goto out_free_vq; 969 970 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk); 971 if (IS_ERR(vblk->disk)) { 972 err = PTR_ERR(vblk->disk); 973 goto out_free_tags; 974 } 975 q = vblk->disk->queue; 976 977 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 978 979 vblk->disk->major = major; 980 vblk->disk->first_minor = index_to_minor(index); 981 vblk->disk->minors = 1 << PART_BITS; 982 vblk->disk->private_data = vblk; 983 vblk->disk->fops = &virtblk_fops; 984 vblk->index = index; 985 986 /* configure queue flush support */ 987 virtblk_update_cache_mode(vdev); 988 989 /* If disk is read-only in the host, the guest should obey */ 990 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 991 set_disk_ro(vblk->disk, 1); 992 993 /* We can handle whatever the host told us to handle. */ 994 blk_queue_max_segments(q, sg_elems); 995 996 /* No real sector limit. */ 997 blk_queue_max_hw_sectors(q, UINT_MAX); 998 999 max_size = virtio_max_dma_size(vdev); 1000 1001 /* Host can optionally specify maximum segment size and number of 1002 * segments. */ 1003 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, 1004 struct virtio_blk_config, size_max, &v); 1005 if (!err) 1006 max_size = min(max_size, v); 1007 1008 blk_queue_max_segment_size(q, max_size); 1009 1010 /* Host can optionally specify the block size of the device */ 1011 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, 1012 struct virtio_blk_config, blk_size, 1013 &blk_size); 1014 if (!err) { 1015 err = blk_validate_block_size(blk_size); 1016 if (err) { 1017 dev_err(&vdev->dev, 1018 "virtio_blk: invalid block size: 0x%x\n", 1019 blk_size); 1020 goto out_cleanup_disk; 1021 } 1022 1023 blk_queue_logical_block_size(q, blk_size); 1024 } else 1025 blk_size = queue_logical_block_size(q); 1026 1027 /* Use topology information if available */ 1028 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1029 struct virtio_blk_config, physical_block_exp, 1030 &physical_block_exp); 1031 if (!err && physical_block_exp) 1032 blk_queue_physical_block_size(q, 1033 blk_size * (1 << physical_block_exp)); 1034 1035 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1036 struct virtio_blk_config, alignment_offset, 1037 &alignment_offset); 1038 if (!err && alignment_offset) 1039 blk_queue_alignment_offset(q, blk_size * alignment_offset); 1040 1041 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1042 struct virtio_blk_config, min_io_size, 1043 &min_io_size); 1044 if (!err && min_io_size) 1045 blk_queue_io_min(q, blk_size * min_io_size); 1046 1047 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1048 struct virtio_blk_config, opt_io_size, 1049 &opt_io_size); 1050 if (!err && opt_io_size) 1051 blk_queue_io_opt(q, blk_size * opt_io_size); 1052 1053 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { 1054 virtio_cread(vdev, struct virtio_blk_config, 1055 discard_sector_alignment, &discard_granularity); 1056 1057 virtio_cread(vdev, struct virtio_blk_config, 1058 max_discard_sectors, &v); 1059 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); 1060 1061 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, 1062 &max_discard_segs); 1063 } 1064 1065 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { 1066 virtio_cread(vdev, struct virtio_blk_config, 1067 max_write_zeroes_sectors, &v); 1068 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); 1069 } 1070 1071 /* The discard and secure erase limits are combined since the Linux 1072 * block layer uses the same limit for both commands. 1073 * 1074 * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features 1075 * are negotiated, we will use the minimum between the limits. 1076 * 1077 * discard sector alignment is set to the minimum between discard_sector_alignment 1078 * and secure_erase_sector_alignment. 1079 * 1080 * max discard sectors is set to the minimum between max_discard_seg and 1081 * max_secure_erase_seg. 1082 */ 1083 if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) { 1084 1085 virtio_cread(vdev, struct virtio_blk_config, 1086 secure_erase_sector_alignment, &v); 1087 1088 /* secure_erase_sector_alignment should not be zero, the device should set a 1089 * valid number of sectors. 1090 */ 1091 if (!v) { 1092 dev_err(&vdev->dev, 1093 "virtio_blk: secure_erase_sector_alignment can't be 0\n"); 1094 err = -EINVAL; 1095 goto out_cleanup_disk; 1096 } 1097 1098 discard_granularity = min_not_zero(discard_granularity, v); 1099 1100 virtio_cread(vdev, struct virtio_blk_config, 1101 max_secure_erase_sectors, &v); 1102 1103 /* max_secure_erase_sectors should not be zero, the device should set a 1104 * valid number of sectors. 1105 */ 1106 if (!v) { 1107 dev_err(&vdev->dev, 1108 "virtio_blk: max_secure_erase_sectors can't be 0\n"); 1109 err = -EINVAL; 1110 goto out_cleanup_disk; 1111 } 1112 1113 blk_queue_max_secure_erase_sectors(q, v); 1114 1115 virtio_cread(vdev, struct virtio_blk_config, 1116 max_secure_erase_seg, &v); 1117 1118 /* max_secure_erase_seg should not be zero, the device should set a 1119 * valid number of segments 1120 */ 1121 if (!v) { 1122 dev_err(&vdev->dev, 1123 "virtio_blk: max_secure_erase_seg can't be 0\n"); 1124 err = -EINVAL; 1125 goto out_cleanup_disk; 1126 } 1127 1128 max_discard_segs = min_not_zero(max_discard_segs, v); 1129 } 1130 1131 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) || 1132 virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) { 1133 /* max_discard_seg and discard_granularity will be 0 only 1134 * if max_discard_seg and discard_sector_alignment fields in the virtio 1135 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated. 1136 * In this case, we use default values. 1137 */ 1138 if (!max_discard_segs) 1139 max_discard_segs = sg_elems; 1140 1141 blk_queue_max_discard_segments(q, 1142 min(max_discard_segs, MAX_DISCARD_SEGMENTS)); 1143 1144 if (discard_granularity) 1145 q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT; 1146 else 1147 q->limits.discard_granularity = blk_size; 1148 } 1149 1150 virtblk_update_capacity(vblk, false); 1151 virtio_device_ready(vdev); 1152 1153 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); 1154 if (err) 1155 goto out_cleanup_disk; 1156 1157 return 0; 1158 1159 out_cleanup_disk: 1160 put_disk(vblk->disk); 1161 out_free_tags: 1162 blk_mq_free_tag_set(&vblk->tag_set); 1163 out_free_vq: 1164 vdev->config->del_vqs(vdev); 1165 kfree(vblk->vqs); 1166 out_free_vblk: 1167 kfree(vblk); 1168 out_free_index: 1169 ida_free(&vd_index_ida, index); 1170 out: 1171 return err; 1172 } 1173 1174 static void virtblk_remove(struct virtio_device *vdev) 1175 { 1176 struct virtio_blk *vblk = vdev->priv; 1177 1178 /* Make sure no work handler is accessing the device. */ 1179 flush_work(&vblk->config_work); 1180 1181 del_gendisk(vblk->disk); 1182 blk_mq_free_tag_set(&vblk->tag_set); 1183 1184 mutex_lock(&vblk->vdev_mutex); 1185 1186 /* Stop all the virtqueues. */ 1187 virtio_reset_device(vdev); 1188 1189 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ 1190 vblk->vdev = NULL; 1191 1192 vdev->config->del_vqs(vdev); 1193 kfree(vblk->vqs); 1194 1195 mutex_unlock(&vblk->vdev_mutex); 1196 1197 put_disk(vblk->disk); 1198 } 1199 1200 #ifdef CONFIG_PM_SLEEP 1201 static int virtblk_freeze(struct virtio_device *vdev) 1202 { 1203 struct virtio_blk *vblk = vdev->priv; 1204 1205 /* Ensure we don't receive any more interrupts */ 1206 virtio_reset_device(vdev); 1207 1208 /* Make sure no work handler is accessing the device. */ 1209 flush_work(&vblk->config_work); 1210 1211 blk_mq_quiesce_queue(vblk->disk->queue); 1212 1213 vdev->config->del_vqs(vdev); 1214 kfree(vblk->vqs); 1215 1216 return 0; 1217 } 1218 1219 static int virtblk_restore(struct virtio_device *vdev) 1220 { 1221 struct virtio_blk *vblk = vdev->priv; 1222 int ret; 1223 1224 ret = init_vq(vdev->priv); 1225 if (ret) 1226 return ret; 1227 1228 virtio_device_ready(vdev); 1229 1230 blk_mq_unquiesce_queue(vblk->disk->queue); 1231 return 0; 1232 } 1233 #endif 1234 1235 static const struct virtio_device_id id_table[] = { 1236 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, 1237 { 0 }, 1238 }; 1239 1240 static unsigned int features_legacy[] = { 1241 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 1242 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 1243 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 1244 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, 1245 VIRTIO_BLK_F_SECURE_ERASE, 1246 } 1247 ; 1248 static unsigned int features[] = { 1249 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 1250 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 1251 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 1252 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, 1253 VIRTIO_BLK_F_SECURE_ERASE, 1254 }; 1255 1256 static struct virtio_driver virtio_blk = { 1257 .feature_table = features, 1258 .feature_table_size = ARRAY_SIZE(features), 1259 .feature_table_legacy = features_legacy, 1260 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 1261 .driver.name = KBUILD_MODNAME, 1262 .driver.owner = THIS_MODULE, 1263 .id_table = id_table, 1264 .probe = virtblk_probe, 1265 .remove = virtblk_remove, 1266 .config_changed = virtblk_config_changed, 1267 #ifdef CONFIG_PM_SLEEP 1268 .freeze = virtblk_freeze, 1269 .restore = virtblk_restore, 1270 #endif 1271 }; 1272 1273 static int __init virtio_blk_init(void) 1274 { 1275 int error; 1276 1277 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); 1278 if (!virtblk_wq) 1279 return -ENOMEM; 1280 1281 major = register_blkdev(0, "virtblk"); 1282 if (major < 0) { 1283 error = major; 1284 goto out_destroy_workqueue; 1285 } 1286 1287 error = register_virtio_driver(&virtio_blk); 1288 if (error) 1289 goto out_unregister_blkdev; 1290 return 0; 1291 1292 out_unregister_blkdev: 1293 unregister_blkdev(major, "virtblk"); 1294 out_destroy_workqueue: 1295 destroy_workqueue(virtblk_wq); 1296 return error; 1297 } 1298 1299 static void __exit virtio_blk_fini(void) 1300 { 1301 unregister_virtio_driver(&virtio_blk); 1302 unregister_blkdev(major, "virtblk"); 1303 destroy_workqueue(virtblk_wq); 1304 } 1305 module_init(virtio_blk_init); 1306 module_exit(virtio_blk_fini); 1307 1308 MODULE_DEVICE_TABLE(virtio, id_table); 1309 MODULE_DESCRIPTION("Virtio block driver"); 1310 MODULE_LICENSE("GPL"); 1311