1 // SPDX-License-Identifier: GPL-2.0-only 2 //#define DEBUG 3 #include <linux/spinlock.h> 4 #include <linux/slab.h> 5 #include <linux/blkdev.h> 6 #include <linux/hdreg.h> 7 #include <linux/module.h> 8 #include <linux/mutex.h> 9 #include <linux/interrupt.h> 10 #include <linux/virtio.h> 11 #include <linux/virtio_blk.h> 12 #include <linux/scatterlist.h> 13 #include <linux/string_helpers.h> 14 #include <linux/idr.h> 15 #include <linux/blk-mq.h> 16 #include <linux/blk-mq-virtio.h> 17 #include <linux/numa.h> 18 #include <linux/vmalloc.h> 19 #include <uapi/linux/virtio_ring.h> 20 21 #define PART_BITS 4 22 #define VQ_NAME_LEN 16 23 #define MAX_DISCARD_SEGMENTS 256u 24 25 /* The maximum number of sg elements that fit into a virtqueue */ 26 #define VIRTIO_BLK_MAX_SG_ELEMS 32768 27 28 #ifdef CONFIG_ARCH_NO_SG_CHAIN 29 #define VIRTIO_BLK_INLINE_SG_CNT 0 30 #else 31 #define VIRTIO_BLK_INLINE_SG_CNT 2 32 #endif 33 34 static unsigned int num_request_queues; 35 module_param(num_request_queues, uint, 0644); 36 MODULE_PARM_DESC(num_request_queues, 37 "Limit the number of request queues to use for blk device. " 38 "0 for no limit. " 39 "Values > nr_cpu_ids truncated to nr_cpu_ids."); 40 41 static unsigned int poll_queues; 42 module_param(poll_queues, uint, 0644); 43 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O"); 44 45 static int major; 46 static DEFINE_IDA(vd_index_ida); 47 48 static struct workqueue_struct *virtblk_wq; 49 50 struct virtio_blk_vq { 51 struct virtqueue *vq; 52 spinlock_t lock; 53 char name[VQ_NAME_LEN]; 54 } ____cacheline_aligned_in_smp; 55 56 struct virtio_blk { 57 /* 58 * This mutex must be held by anything that may run after 59 * virtblk_remove() sets vblk->vdev to NULL. 60 * 61 * blk-mq, virtqueue processing, and sysfs attribute code paths are 62 * shut down before vblk->vdev is set to NULL and therefore do not need 63 * to hold this mutex. 64 */ 65 struct mutex vdev_mutex; 66 struct virtio_device *vdev; 67 68 /* The disk structure for the kernel. */ 69 struct gendisk *disk; 70 71 /* Block layer tags. */ 72 struct blk_mq_tag_set tag_set; 73 74 /* Process context for config space updates */ 75 struct work_struct config_work; 76 77 /* Ida index - used to track minor number allocations. */ 78 int index; 79 80 /* num of vqs */ 81 int num_vqs; 82 int io_queues[HCTX_MAX_TYPES]; 83 struct virtio_blk_vq *vqs; 84 85 /* For zoned device */ 86 unsigned int zone_sectors; 87 }; 88 89 struct virtblk_req { 90 /* Out header */ 91 struct virtio_blk_outhdr out_hdr; 92 93 /* In header */ 94 union { 95 u8 status; 96 97 /* 98 * The zone append command has an extended in header. 99 * The status field in zone_append_in_hdr must always 100 * be the last byte. 101 */ 102 struct { 103 __virtio64 sector; 104 u8 status; 105 } zone_append; 106 } in_hdr; 107 108 size_t in_hdr_len; 109 110 struct sg_table sg_table; 111 struct scatterlist sg[]; 112 }; 113 114 static inline blk_status_t virtblk_result(u8 status) 115 { 116 switch (status) { 117 case VIRTIO_BLK_S_OK: 118 return BLK_STS_OK; 119 case VIRTIO_BLK_S_UNSUPP: 120 return BLK_STS_NOTSUPP; 121 case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE: 122 return BLK_STS_ZONE_OPEN_RESOURCE; 123 case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE: 124 return BLK_STS_ZONE_ACTIVE_RESOURCE; 125 case VIRTIO_BLK_S_IOERR: 126 case VIRTIO_BLK_S_ZONE_UNALIGNED_WP: 127 default: 128 return BLK_STS_IOERR; 129 } 130 } 131 132 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx) 133 { 134 struct virtio_blk *vblk = hctx->queue->queuedata; 135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; 136 137 return vq; 138 } 139 140 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr) 141 { 142 struct scatterlist out_hdr, in_hdr, *sgs[3]; 143 unsigned int num_out = 0, num_in = 0; 144 145 sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); 146 sgs[num_out++] = &out_hdr; 147 148 if (vbr->sg_table.nents) { 149 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) 150 sgs[num_out++] = vbr->sg_table.sgl; 151 else 152 sgs[num_out + num_in++] = vbr->sg_table.sgl; 153 } 154 155 sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len); 156 sgs[num_out + num_in++] = &in_hdr; 157 158 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 159 } 160 161 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap) 162 { 163 unsigned short segments = blk_rq_nr_discard_segments(req); 164 unsigned short n = 0; 165 struct virtio_blk_discard_write_zeroes *range; 166 struct bio *bio; 167 u32 flags = 0; 168 169 if (unmap) 170 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; 171 172 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 173 if (!range) 174 return -ENOMEM; 175 176 /* 177 * Single max discard segment means multi-range discard isn't 178 * supported, and block layer only runs contiguity merge like 179 * normal RW request. So we can't reply on bio for retrieving 180 * each range info. 181 */ 182 if (queue_max_discard_segments(req->q) == 1) { 183 range[0].flags = cpu_to_le32(flags); 184 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); 185 range[0].sector = cpu_to_le64(blk_rq_pos(req)); 186 n = 1; 187 } else { 188 __rq_for_each_bio(bio, req) { 189 u64 sector = bio->bi_iter.bi_sector; 190 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; 191 192 range[n].flags = cpu_to_le32(flags); 193 range[n].num_sectors = cpu_to_le32(num_sectors); 194 range[n].sector = cpu_to_le64(sector); 195 n++; 196 } 197 } 198 199 WARN_ON_ONCE(n != segments); 200 201 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments); 202 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 203 204 return 0; 205 } 206 207 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr) 208 { 209 if (blk_rq_nr_phys_segments(req)) 210 sg_free_table_chained(&vbr->sg_table, 211 VIRTIO_BLK_INLINE_SG_CNT); 212 } 213 214 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, 215 struct virtblk_req *vbr) 216 { 217 int err; 218 219 if (!blk_rq_nr_phys_segments(req)) 220 return 0; 221 222 vbr->sg_table.sgl = vbr->sg; 223 err = sg_alloc_table_chained(&vbr->sg_table, 224 blk_rq_nr_phys_segments(req), 225 vbr->sg_table.sgl, 226 VIRTIO_BLK_INLINE_SG_CNT); 227 if (unlikely(err)) 228 return -ENOMEM; 229 230 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); 231 } 232 233 static void virtblk_cleanup_cmd(struct request *req) 234 { 235 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) 236 kfree(bvec_virt(&req->special_vec)); 237 } 238 239 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, 240 struct request *req, 241 struct virtblk_req *vbr) 242 { 243 size_t in_hdr_len = sizeof(vbr->in_hdr.status); 244 bool unmap = false; 245 u32 type; 246 u64 sector = 0; 247 248 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req))) 249 return BLK_STS_NOTSUPP; 250 251 /* Set fields for all request types */ 252 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); 253 254 switch (req_op(req)) { 255 case REQ_OP_READ: 256 type = VIRTIO_BLK_T_IN; 257 sector = blk_rq_pos(req); 258 break; 259 case REQ_OP_WRITE: 260 type = VIRTIO_BLK_T_OUT; 261 sector = blk_rq_pos(req); 262 break; 263 case REQ_OP_FLUSH: 264 type = VIRTIO_BLK_T_FLUSH; 265 break; 266 case REQ_OP_DISCARD: 267 type = VIRTIO_BLK_T_DISCARD; 268 break; 269 case REQ_OP_WRITE_ZEROES: 270 type = VIRTIO_BLK_T_WRITE_ZEROES; 271 unmap = !(req->cmd_flags & REQ_NOUNMAP); 272 break; 273 case REQ_OP_SECURE_ERASE: 274 type = VIRTIO_BLK_T_SECURE_ERASE; 275 break; 276 case REQ_OP_ZONE_OPEN: 277 type = VIRTIO_BLK_T_ZONE_OPEN; 278 sector = blk_rq_pos(req); 279 break; 280 case REQ_OP_ZONE_CLOSE: 281 type = VIRTIO_BLK_T_ZONE_CLOSE; 282 sector = blk_rq_pos(req); 283 break; 284 case REQ_OP_ZONE_FINISH: 285 type = VIRTIO_BLK_T_ZONE_FINISH; 286 sector = blk_rq_pos(req); 287 break; 288 case REQ_OP_ZONE_APPEND: 289 type = VIRTIO_BLK_T_ZONE_APPEND; 290 sector = blk_rq_pos(req); 291 in_hdr_len = sizeof(vbr->in_hdr.zone_append); 292 break; 293 case REQ_OP_ZONE_RESET: 294 type = VIRTIO_BLK_T_ZONE_RESET; 295 sector = blk_rq_pos(req); 296 break; 297 case REQ_OP_ZONE_RESET_ALL: 298 type = VIRTIO_BLK_T_ZONE_RESET_ALL; 299 break; 300 case REQ_OP_DRV_IN: 301 /* 302 * Out header has already been prepared by the caller (virtblk_get_id() 303 * or virtblk_submit_zone_report()), nothing to do here. 304 */ 305 return 0; 306 default: 307 WARN_ON_ONCE(1); 308 return BLK_STS_IOERR; 309 } 310 311 /* Set fields for non-REQ_OP_DRV_IN request types */ 312 vbr->in_hdr_len = in_hdr_len; 313 vbr->out_hdr.type = cpu_to_virtio32(vdev, type); 314 vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector); 315 316 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES || 317 type == VIRTIO_BLK_T_SECURE_ERASE) { 318 if (virtblk_setup_discard_write_zeroes_erase(req, unmap)) 319 return BLK_STS_RESOURCE; 320 } 321 322 return 0; 323 } 324 325 /* 326 * The status byte is always the last byte of the virtblk request 327 * in-header. This helper fetches its value for all in-header formats 328 * that are currently defined. 329 */ 330 static inline u8 virtblk_vbr_status(struct virtblk_req *vbr) 331 { 332 return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1); 333 } 334 335 static inline void virtblk_request_done(struct request *req) 336 { 337 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 338 blk_status_t status = virtblk_result(virtblk_vbr_status(vbr)); 339 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; 340 341 virtblk_unmap_data(req, vbr); 342 virtblk_cleanup_cmd(req); 343 344 if (req_op(req) == REQ_OP_ZONE_APPEND) 345 req->__sector = virtio64_to_cpu(vblk->vdev, 346 vbr->in_hdr.zone_append.sector); 347 348 blk_mq_end_request(req, status); 349 } 350 351 static void virtblk_done(struct virtqueue *vq) 352 { 353 struct virtio_blk *vblk = vq->vdev->priv; 354 bool req_done = false; 355 int qid = vq->index; 356 struct virtblk_req *vbr; 357 unsigned long flags; 358 unsigned int len; 359 360 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 361 do { 362 virtqueue_disable_cb(vq); 363 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 364 struct request *req = blk_mq_rq_from_pdu(vbr); 365 366 if (likely(!blk_should_fake_timeout(req->q))) 367 blk_mq_complete_request(req); 368 req_done = true; 369 } 370 if (unlikely(virtqueue_is_broken(vq))) 371 break; 372 } while (!virtqueue_enable_cb(vq)); 373 374 /* In case queue is stopped waiting for more buffers. */ 375 if (req_done) 376 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 377 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 378 } 379 380 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) 381 { 382 struct virtio_blk *vblk = hctx->queue->queuedata; 383 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; 384 bool kick; 385 386 spin_lock_irq(&vq->lock); 387 kick = virtqueue_kick_prepare(vq->vq); 388 spin_unlock_irq(&vq->lock); 389 390 if (kick) 391 virtqueue_notify(vq->vq); 392 } 393 394 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) 395 { 396 virtblk_cleanup_cmd(req); 397 switch (rc) { 398 case -ENOSPC: 399 return BLK_STS_DEV_RESOURCE; 400 case -ENOMEM: 401 return BLK_STS_RESOURCE; 402 default: 403 return BLK_STS_IOERR; 404 } 405 } 406 407 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, 408 struct virtio_blk *vblk, 409 struct request *req, 410 struct virtblk_req *vbr) 411 { 412 blk_status_t status; 413 int num; 414 415 status = virtblk_setup_cmd(vblk->vdev, req, vbr); 416 if (unlikely(status)) 417 return status; 418 419 num = virtblk_map_data(hctx, req, vbr); 420 if (unlikely(num < 0)) 421 return virtblk_fail_to_queue(req, -ENOMEM); 422 vbr->sg_table.nents = num; 423 424 blk_mq_start_request(req); 425 426 return BLK_STS_OK; 427 } 428 429 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 430 const struct blk_mq_queue_data *bd) 431 { 432 struct virtio_blk *vblk = hctx->queue->queuedata; 433 struct request *req = bd->rq; 434 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 435 unsigned long flags; 436 int qid = hctx->queue_num; 437 bool notify = false; 438 blk_status_t status; 439 int err; 440 441 status = virtblk_prep_rq(hctx, vblk, req, vbr); 442 if (unlikely(status)) 443 return status; 444 445 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 446 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); 447 if (err) { 448 virtqueue_kick(vblk->vqs[qid].vq); 449 /* Don't stop the queue if -ENOMEM: we may have failed to 450 * bounce the buffer due to global resource outage. 451 */ 452 if (err == -ENOSPC) 453 blk_mq_stop_hw_queue(hctx); 454 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 455 virtblk_unmap_data(req, vbr); 456 return virtblk_fail_to_queue(req, err); 457 } 458 459 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) 460 notify = true; 461 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 462 463 if (notify) 464 virtqueue_notify(vblk->vqs[qid].vq); 465 return BLK_STS_OK; 466 } 467 468 static bool virtblk_prep_rq_batch(struct request *req) 469 { 470 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; 471 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 472 473 req->mq_hctx->tags->rqs[req->tag] = req; 474 475 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; 476 } 477 478 static void virtblk_add_req_batch(struct virtio_blk_vq *vq, 479 struct request **rqlist) 480 { 481 struct request *req; 482 unsigned long flags; 483 bool kick; 484 485 spin_lock_irqsave(&vq->lock, flags); 486 487 while ((req = rq_list_pop(rqlist))) { 488 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 489 int err; 490 491 err = virtblk_add_req(vq->vq, vbr); 492 if (err) { 493 virtblk_unmap_data(req, vbr); 494 virtblk_cleanup_cmd(req); 495 blk_mq_requeue_request(req, true); 496 } 497 } 498 499 kick = virtqueue_kick_prepare(vq->vq); 500 spin_unlock_irqrestore(&vq->lock, flags); 501 502 if (kick) 503 virtqueue_notify(vq->vq); 504 } 505 506 static void virtio_queue_rqs(struct request **rqlist) 507 { 508 struct request *submit_list = NULL; 509 struct request *requeue_list = NULL; 510 struct request **requeue_lastp = &requeue_list; 511 struct virtio_blk_vq *vq = NULL; 512 struct request *req; 513 514 while ((req = rq_list_pop(rqlist))) { 515 struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx); 516 517 if (vq && vq != this_vq) 518 virtblk_add_req_batch(vq, &submit_list); 519 vq = this_vq; 520 521 if (virtblk_prep_rq_batch(req)) 522 rq_list_add(&submit_list, req); /* reverse order */ 523 else 524 rq_list_add_tail(&requeue_lastp, req); 525 } 526 527 if (vq) 528 virtblk_add_req_batch(vq, &submit_list); 529 *rqlist = requeue_list; 530 } 531 532 #ifdef CONFIG_BLK_DEV_ZONED 533 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk, 534 unsigned int nr_zones, 535 size_t *buflen) 536 { 537 struct request_queue *q = vblk->disk->queue; 538 size_t bufsize; 539 void *buf; 540 541 nr_zones = min_t(unsigned int, nr_zones, 542 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors)); 543 544 bufsize = sizeof(struct virtio_blk_zone_report) + 545 nr_zones * sizeof(struct virtio_blk_zone_descriptor); 546 bufsize = min_t(size_t, bufsize, 547 queue_max_hw_sectors(q) << SECTOR_SHIFT); 548 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); 549 550 while (bufsize >= sizeof(struct virtio_blk_zone_report)) { 551 buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); 552 if (buf) { 553 *buflen = bufsize; 554 return buf; 555 } 556 bufsize >>= 1; 557 } 558 559 return NULL; 560 } 561 562 static int virtblk_submit_zone_report(struct virtio_blk *vblk, 563 char *report_buf, size_t report_len, 564 sector_t sector) 565 { 566 struct request_queue *q = vblk->disk->queue; 567 struct request *req; 568 struct virtblk_req *vbr; 569 int err; 570 571 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); 572 if (IS_ERR(req)) 573 return PTR_ERR(req); 574 575 vbr = blk_mq_rq_to_pdu(req); 576 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); 577 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); 578 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); 579 580 err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL); 581 if (err) 582 goto out; 583 584 blk_execute_rq(req, false); 585 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); 586 out: 587 blk_mq_free_request(req); 588 return err; 589 } 590 591 static int virtblk_parse_zone(struct virtio_blk *vblk, 592 struct virtio_blk_zone_descriptor *entry, 593 unsigned int idx, report_zones_cb cb, void *data) 594 { 595 struct blk_zone zone = { }; 596 597 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start); 598 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk)) 599 zone.len = vblk->zone_sectors; 600 else 601 zone.len = get_capacity(vblk->disk) - zone.start; 602 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap); 603 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp); 604 605 switch (entry->z_type) { 606 case VIRTIO_BLK_ZT_SWR: 607 zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; 608 break; 609 case VIRTIO_BLK_ZT_SWP: 610 zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF; 611 break; 612 case VIRTIO_BLK_ZT_CONV: 613 zone.type = BLK_ZONE_TYPE_CONVENTIONAL; 614 break; 615 default: 616 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n", 617 zone.start, entry->z_type); 618 return -EIO; 619 } 620 621 switch (entry->z_state) { 622 case VIRTIO_BLK_ZS_EMPTY: 623 zone.cond = BLK_ZONE_COND_EMPTY; 624 break; 625 case VIRTIO_BLK_ZS_CLOSED: 626 zone.cond = BLK_ZONE_COND_CLOSED; 627 break; 628 case VIRTIO_BLK_ZS_FULL: 629 zone.cond = BLK_ZONE_COND_FULL; 630 zone.wp = zone.start + zone.len; 631 break; 632 case VIRTIO_BLK_ZS_EOPEN: 633 zone.cond = BLK_ZONE_COND_EXP_OPEN; 634 break; 635 case VIRTIO_BLK_ZS_IOPEN: 636 zone.cond = BLK_ZONE_COND_IMP_OPEN; 637 break; 638 case VIRTIO_BLK_ZS_NOT_WP: 639 zone.cond = BLK_ZONE_COND_NOT_WP; 640 break; 641 case VIRTIO_BLK_ZS_RDONLY: 642 zone.cond = BLK_ZONE_COND_READONLY; 643 zone.wp = ULONG_MAX; 644 break; 645 case VIRTIO_BLK_ZS_OFFLINE: 646 zone.cond = BLK_ZONE_COND_OFFLINE; 647 zone.wp = ULONG_MAX; 648 break; 649 default: 650 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n", 651 zone.start, entry->z_state); 652 return -EIO; 653 } 654 655 /* 656 * The callback below checks the validity of the reported 657 * entry data, no need to further validate it here. 658 */ 659 return cb(&zone, idx, data); 660 } 661 662 static int virtblk_report_zones(struct gendisk *disk, sector_t sector, 663 unsigned int nr_zones, report_zones_cb cb, 664 void *data) 665 { 666 struct virtio_blk *vblk = disk->private_data; 667 struct virtio_blk_zone_report *report; 668 unsigned long long nz, i; 669 size_t buflen; 670 unsigned int zone_idx = 0; 671 int ret; 672 673 if (WARN_ON_ONCE(!vblk->zone_sectors)) 674 return -EOPNOTSUPP; 675 676 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen); 677 if (!report) 678 return -ENOMEM; 679 680 mutex_lock(&vblk->vdev_mutex); 681 682 if (!vblk->vdev) { 683 ret = -ENXIO; 684 goto fail_report; 685 } 686 687 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { 688 memset(report, 0, buflen); 689 690 ret = virtblk_submit_zone_report(vblk, (char *)report, 691 buflen, sector); 692 if (ret) 693 goto fail_report; 694 695 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), 696 nr_zones); 697 if (!nz) 698 break; 699 700 for (i = 0; i < nz && zone_idx < nr_zones; i++) { 701 ret = virtblk_parse_zone(vblk, &report->zones[i], 702 zone_idx, cb, data); 703 if (ret) 704 goto fail_report; 705 706 sector = virtio64_to_cpu(vblk->vdev, 707 report->zones[i].z_start) + 708 vblk->zone_sectors; 709 zone_idx++; 710 } 711 } 712 713 if (zone_idx > 0) 714 ret = zone_idx; 715 else 716 ret = -EINVAL; 717 fail_report: 718 mutex_unlock(&vblk->vdev_mutex); 719 kvfree(report); 720 return ret; 721 } 722 723 static void virtblk_revalidate_zones(struct virtio_blk *vblk) 724 { 725 u8 model; 726 727 virtio_cread(vblk->vdev, struct virtio_blk_config, 728 zoned.model, &model); 729 switch (model) { 730 default: 731 dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model); 732 fallthrough; 733 case VIRTIO_BLK_Z_NONE: 734 case VIRTIO_BLK_Z_HA: 735 disk_set_zoned(vblk->disk, BLK_ZONED_NONE); 736 return; 737 case VIRTIO_BLK_Z_HM: 738 WARN_ON_ONCE(!vblk->zone_sectors); 739 if (!blk_revalidate_disk_zones(vblk->disk, NULL)) 740 set_capacity_and_notify(vblk->disk, 0); 741 } 742 } 743 744 static int virtblk_probe_zoned_device(struct virtio_device *vdev, 745 struct virtio_blk *vblk, 746 struct request_queue *q) 747 { 748 u32 v, wg; 749 u8 model; 750 751 virtio_cread(vdev, struct virtio_blk_config, 752 zoned.model, &model); 753 754 switch (model) { 755 case VIRTIO_BLK_Z_NONE: 756 case VIRTIO_BLK_Z_HA: 757 /* Present the host-aware device as non-zoned */ 758 return 0; 759 case VIRTIO_BLK_Z_HM: 760 break; 761 default: 762 dev_err(&vdev->dev, "unsupported zone model %d\n", model); 763 return -EINVAL; 764 } 765 766 dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); 767 768 disk_set_zoned(vblk->disk, BLK_ZONED_HM); 769 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); 770 771 virtio_cread(vdev, struct virtio_blk_config, 772 zoned.max_open_zones, &v); 773 disk_set_max_open_zones(vblk->disk, v); 774 dev_dbg(&vdev->dev, "max open zones = %u\n", v); 775 776 virtio_cread(vdev, struct virtio_blk_config, 777 zoned.max_active_zones, &v); 778 disk_set_max_active_zones(vblk->disk, v); 779 dev_dbg(&vdev->dev, "max active zones = %u\n", v); 780 781 virtio_cread(vdev, struct virtio_blk_config, 782 zoned.write_granularity, &wg); 783 if (!wg) { 784 dev_warn(&vdev->dev, "zero write granularity reported\n"); 785 return -ENODEV; 786 } 787 blk_queue_physical_block_size(q, wg); 788 blk_queue_io_min(q, wg); 789 790 dev_dbg(&vdev->dev, "write granularity = %u\n", wg); 791 792 /* 793 * virtio ZBD specification doesn't require zones to be a power of 794 * two sectors in size, but the code in this driver expects that. 795 */ 796 virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, 797 &vblk->zone_sectors); 798 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { 799 dev_err(&vdev->dev, 800 "zoned device with non power of two zone size %u\n", 801 vblk->zone_sectors); 802 return -ENODEV; 803 } 804 blk_queue_chunk_sectors(q, vblk->zone_sectors); 805 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); 806 807 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { 808 dev_warn(&vblk->vdev->dev, 809 "ignoring negotiated F_DISCARD for zoned device\n"); 810 blk_queue_max_discard_sectors(q, 0); 811 } 812 813 virtio_cread(vdev, struct virtio_blk_config, 814 zoned.max_append_sectors, &v); 815 if (!v) { 816 dev_warn(&vdev->dev, "zero max_append_sectors reported\n"); 817 return -ENODEV; 818 } 819 if ((v << SECTOR_SHIFT) < wg) { 820 dev_err(&vdev->dev, 821 "write granularity %u exceeds max_append_sectors %u limit\n", 822 wg, v); 823 return -ENODEV; 824 } 825 blk_queue_max_zone_append_sectors(q, v); 826 dev_dbg(&vdev->dev, "max append sectors = %u\n", v); 827 828 return blk_revalidate_disk_zones(vblk->disk, NULL); 829 } 830 831 #else 832 833 /* 834 * Zoned block device support is not configured in this kernel. 835 * Host-managed zoned devices can't be supported, but others are 836 * good to go as regular block devices. 837 */ 838 #define virtblk_report_zones NULL 839 840 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk) 841 { 842 } 843 844 static inline int virtblk_probe_zoned_device(struct virtio_device *vdev, 845 struct virtio_blk *vblk, struct request_queue *q) 846 { 847 u8 model; 848 849 virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model); 850 if (model == VIRTIO_BLK_Z_HM) { 851 dev_err(&vdev->dev, 852 "virtio_blk: zoned devices are not supported"); 853 return -EOPNOTSUPP; 854 } 855 856 return 0; 857 } 858 #endif /* CONFIG_BLK_DEV_ZONED */ 859 860 /* return id (s/n) string for *disk to *id_str 861 */ 862 static int virtblk_get_id(struct gendisk *disk, char *id_str) 863 { 864 struct virtio_blk *vblk = disk->private_data; 865 struct request_queue *q = vblk->disk->queue; 866 struct request *req; 867 struct virtblk_req *vbr; 868 int err; 869 870 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); 871 if (IS_ERR(req)) 872 return PTR_ERR(req); 873 874 vbr = blk_mq_rq_to_pdu(req); 875 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); 876 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); 877 vbr->out_hdr.sector = 0; 878 879 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 880 if (err) 881 goto out; 882 883 blk_execute_rq(req, false); 884 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); 885 out: 886 blk_mq_free_request(req); 887 return err; 888 } 889 890 /* We provide getgeo only to please some old bootloader/partitioning tools */ 891 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 892 { 893 struct virtio_blk *vblk = bd->bd_disk->private_data; 894 int ret = 0; 895 896 mutex_lock(&vblk->vdev_mutex); 897 898 if (!vblk->vdev) { 899 ret = -ENXIO; 900 goto out; 901 } 902 903 /* see if the host passed in geometry config */ 904 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { 905 virtio_cread(vblk->vdev, struct virtio_blk_config, 906 geometry.cylinders, &geo->cylinders); 907 virtio_cread(vblk->vdev, struct virtio_blk_config, 908 geometry.heads, &geo->heads); 909 virtio_cread(vblk->vdev, struct virtio_blk_config, 910 geometry.sectors, &geo->sectors); 911 } else { 912 /* some standard values, similar to sd */ 913 geo->heads = 1 << 6; 914 geo->sectors = 1 << 5; 915 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 916 } 917 out: 918 mutex_unlock(&vblk->vdev_mutex); 919 return ret; 920 } 921 922 static void virtblk_free_disk(struct gendisk *disk) 923 { 924 struct virtio_blk *vblk = disk->private_data; 925 926 ida_free(&vd_index_ida, vblk->index); 927 mutex_destroy(&vblk->vdev_mutex); 928 kfree(vblk); 929 } 930 931 static const struct block_device_operations virtblk_fops = { 932 .owner = THIS_MODULE, 933 .getgeo = virtblk_getgeo, 934 .free_disk = virtblk_free_disk, 935 .report_zones = virtblk_report_zones, 936 }; 937 938 static int index_to_minor(int index) 939 { 940 return index << PART_BITS; 941 } 942 943 static int minor_to_index(int minor) 944 { 945 return minor >> PART_BITS; 946 } 947 948 static ssize_t serial_show(struct device *dev, 949 struct device_attribute *attr, char *buf) 950 { 951 struct gendisk *disk = dev_to_disk(dev); 952 int err; 953 954 /* sysfs gives us a PAGE_SIZE buffer */ 955 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); 956 957 buf[VIRTIO_BLK_ID_BYTES] = '\0'; 958 err = virtblk_get_id(disk, buf); 959 if (!err) 960 return strlen(buf); 961 962 if (err == -EIO) /* Unsupported? Make it empty. */ 963 return 0; 964 965 return err; 966 } 967 968 static DEVICE_ATTR_RO(serial); 969 970 /* The queue's logical block size must be set before calling this */ 971 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) 972 { 973 struct virtio_device *vdev = vblk->vdev; 974 struct request_queue *q = vblk->disk->queue; 975 char cap_str_2[10], cap_str_10[10]; 976 unsigned long long nblocks; 977 u64 capacity; 978 979 /* Host must always specify the capacity. */ 980 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); 981 982 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); 983 984 string_get_size(nblocks, queue_logical_block_size(q), 985 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 986 string_get_size(nblocks, queue_logical_block_size(q), 987 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 988 989 dev_notice(&vdev->dev, 990 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n", 991 vblk->disk->disk_name, 992 resize ? "new size: " : "", 993 nblocks, 994 queue_logical_block_size(q), 995 cap_str_10, 996 cap_str_2); 997 998 set_capacity_and_notify(vblk->disk, capacity); 999 } 1000 1001 static void virtblk_config_changed_work(struct work_struct *work) 1002 { 1003 struct virtio_blk *vblk = 1004 container_of(work, struct virtio_blk, config_work); 1005 1006 virtblk_revalidate_zones(vblk); 1007 virtblk_update_capacity(vblk, true); 1008 } 1009 1010 static void virtblk_config_changed(struct virtio_device *vdev) 1011 { 1012 struct virtio_blk *vblk = vdev->priv; 1013 1014 queue_work(virtblk_wq, &vblk->config_work); 1015 } 1016 1017 static int init_vq(struct virtio_blk *vblk) 1018 { 1019 int err; 1020 unsigned short i; 1021 vq_callback_t **callbacks; 1022 const char **names; 1023 struct virtqueue **vqs; 1024 unsigned short num_vqs; 1025 unsigned short num_poll_vqs; 1026 struct virtio_device *vdev = vblk->vdev; 1027 struct irq_affinity desc = { 0, }; 1028 1029 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, 1030 struct virtio_blk_config, num_queues, 1031 &num_vqs); 1032 if (err) 1033 num_vqs = 1; 1034 1035 if (!err && !num_vqs) { 1036 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n"); 1037 return -EINVAL; 1038 } 1039 1040 num_vqs = min_t(unsigned int, 1041 min_not_zero(num_request_queues, nr_cpu_ids), 1042 num_vqs); 1043 1044 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1); 1045 1046 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; 1047 vblk->io_queues[HCTX_TYPE_READ] = 0; 1048 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; 1049 1050 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", 1051 vblk->io_queues[HCTX_TYPE_DEFAULT], 1052 vblk->io_queues[HCTX_TYPE_READ], 1053 vblk->io_queues[HCTX_TYPE_POLL]); 1054 1055 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); 1056 if (!vblk->vqs) 1057 return -ENOMEM; 1058 1059 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL); 1060 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL); 1061 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); 1062 if (!names || !callbacks || !vqs) { 1063 err = -ENOMEM; 1064 goto out; 1065 } 1066 1067 for (i = 0; i < num_vqs - num_poll_vqs; i++) { 1068 callbacks[i] = virtblk_done; 1069 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i); 1070 names[i] = vblk->vqs[i].name; 1071 } 1072 1073 for (; i < num_vqs; i++) { 1074 callbacks[i] = NULL; 1075 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i); 1076 names[i] = vblk->vqs[i].name; 1077 } 1078 1079 /* Discover virtqueues and write information to configuration. */ 1080 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); 1081 if (err) 1082 goto out; 1083 1084 for (i = 0; i < num_vqs; i++) { 1085 spin_lock_init(&vblk->vqs[i].lock); 1086 vblk->vqs[i].vq = vqs[i]; 1087 } 1088 vblk->num_vqs = num_vqs; 1089 1090 out: 1091 kfree(vqs); 1092 kfree(callbacks); 1093 kfree(names); 1094 if (err) 1095 kfree(vblk->vqs); 1096 return err; 1097 } 1098 1099 /* 1100 * Legacy naming scheme used for virtio devices. We are stuck with it for 1101 * virtio blk but don't ever use it for any new driver. 1102 */ 1103 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) 1104 { 1105 const int base = 'z' - 'a' + 1; 1106 char *begin = buf + strlen(prefix); 1107 char *end = buf + buflen; 1108 char *p; 1109 int unit; 1110 1111 p = end - 1; 1112 *p = '\0'; 1113 unit = base; 1114 do { 1115 if (p == begin) 1116 return -EINVAL; 1117 *--p = 'a' + (index % unit); 1118 index = (index / unit) - 1; 1119 } while (index >= 0); 1120 1121 memmove(begin, p, end - p); 1122 memcpy(buf, prefix, strlen(prefix)); 1123 1124 return 0; 1125 } 1126 1127 static int virtblk_get_cache_mode(struct virtio_device *vdev) 1128 { 1129 u8 writeback; 1130 int err; 1131 1132 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, 1133 struct virtio_blk_config, wce, 1134 &writeback); 1135 1136 /* 1137 * If WCE is not configurable and flush is not available, 1138 * assume no writeback cache is in use. 1139 */ 1140 if (err) 1141 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH); 1142 1143 return writeback; 1144 } 1145 1146 static void virtblk_update_cache_mode(struct virtio_device *vdev) 1147 { 1148 u8 writeback = virtblk_get_cache_mode(vdev); 1149 struct virtio_blk *vblk = vdev->priv; 1150 1151 blk_queue_write_cache(vblk->disk->queue, writeback, false); 1152 } 1153 1154 static const char *const virtblk_cache_types[] = { 1155 "write through", "write back" 1156 }; 1157 1158 static ssize_t 1159 cache_type_store(struct device *dev, struct device_attribute *attr, 1160 const char *buf, size_t count) 1161 { 1162 struct gendisk *disk = dev_to_disk(dev); 1163 struct virtio_blk *vblk = disk->private_data; 1164 struct virtio_device *vdev = vblk->vdev; 1165 int i; 1166 1167 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); 1168 i = sysfs_match_string(virtblk_cache_types, buf); 1169 if (i < 0) 1170 return i; 1171 1172 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); 1173 virtblk_update_cache_mode(vdev); 1174 return count; 1175 } 1176 1177 static ssize_t 1178 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1179 { 1180 struct gendisk *disk = dev_to_disk(dev); 1181 struct virtio_blk *vblk = disk->private_data; 1182 u8 writeback = virtblk_get_cache_mode(vblk->vdev); 1183 1184 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); 1185 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]); 1186 } 1187 1188 static DEVICE_ATTR_RW(cache_type); 1189 1190 static struct attribute *virtblk_attrs[] = { 1191 &dev_attr_serial.attr, 1192 &dev_attr_cache_type.attr, 1193 NULL, 1194 }; 1195 1196 static umode_t virtblk_attrs_are_visible(struct kobject *kobj, 1197 struct attribute *a, int n) 1198 { 1199 struct device *dev = kobj_to_dev(kobj); 1200 struct gendisk *disk = dev_to_disk(dev); 1201 struct virtio_blk *vblk = disk->private_data; 1202 struct virtio_device *vdev = vblk->vdev; 1203 1204 if (a == &dev_attr_cache_type.attr && 1205 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) 1206 return S_IRUGO; 1207 1208 return a->mode; 1209 } 1210 1211 static const struct attribute_group virtblk_attr_group = { 1212 .attrs = virtblk_attrs, 1213 .is_visible = virtblk_attrs_are_visible, 1214 }; 1215 1216 static const struct attribute_group *virtblk_attr_groups[] = { 1217 &virtblk_attr_group, 1218 NULL, 1219 }; 1220 1221 static void virtblk_map_queues(struct blk_mq_tag_set *set) 1222 { 1223 struct virtio_blk *vblk = set->driver_data; 1224 int i, qoff; 1225 1226 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 1227 struct blk_mq_queue_map *map = &set->map[i]; 1228 1229 map->nr_queues = vblk->io_queues[i]; 1230 map->queue_offset = qoff; 1231 qoff += map->nr_queues; 1232 1233 if (map->nr_queues == 0) 1234 continue; 1235 1236 /* 1237 * Regular queues have interrupts and hence CPU affinity is 1238 * defined by the core virtio code, but polling queues have 1239 * no interrupts so we let the block layer assign CPU affinity. 1240 */ 1241 if (i == HCTX_TYPE_POLL) 1242 blk_mq_map_queues(&set->map[i]); 1243 else 1244 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0); 1245 } 1246 } 1247 1248 static void virtblk_complete_batch(struct io_comp_batch *iob) 1249 { 1250 struct request *req; 1251 1252 rq_list_for_each(&iob->req_list, req) { 1253 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); 1254 virtblk_cleanup_cmd(req); 1255 } 1256 blk_mq_end_request_batch(iob); 1257 } 1258 1259 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 1260 { 1261 struct virtio_blk *vblk = hctx->queue->queuedata; 1262 struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx); 1263 struct virtblk_req *vbr; 1264 unsigned long flags; 1265 unsigned int len; 1266 int found = 0; 1267 1268 spin_lock_irqsave(&vq->lock, flags); 1269 1270 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { 1271 struct request *req = blk_mq_rq_from_pdu(vbr); 1272 1273 found++; 1274 if (!blk_mq_complete_request_remote(req) && 1275 !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), 1276 virtblk_complete_batch)) 1277 virtblk_request_done(req); 1278 } 1279 1280 if (found) 1281 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 1282 1283 spin_unlock_irqrestore(&vq->lock, flags); 1284 1285 return found; 1286 } 1287 1288 static const struct blk_mq_ops virtio_mq_ops = { 1289 .queue_rq = virtio_queue_rq, 1290 .queue_rqs = virtio_queue_rqs, 1291 .commit_rqs = virtio_commit_rqs, 1292 .complete = virtblk_request_done, 1293 .map_queues = virtblk_map_queues, 1294 .poll = virtblk_poll, 1295 }; 1296 1297 static unsigned int virtblk_queue_depth; 1298 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); 1299 1300 static int virtblk_probe(struct virtio_device *vdev) 1301 { 1302 struct virtio_blk *vblk; 1303 struct request_queue *q; 1304 int err, index; 1305 1306 u32 v, blk_size, max_size, sg_elems, opt_io_size; 1307 u32 max_discard_segs = 0; 1308 u32 discard_granularity = 0; 1309 u16 min_io_size; 1310 u8 physical_block_exp, alignment_offset; 1311 unsigned int queue_depth; 1312 size_t max_dma_size; 1313 1314 if (!vdev->config->get) { 1315 dev_err(&vdev->dev, "%s failure: config access disabled\n", 1316 __func__); 1317 return -EINVAL; 1318 } 1319 1320 err = ida_alloc_range(&vd_index_ida, 0, 1321 minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); 1322 if (err < 0) 1323 goto out; 1324 index = err; 1325 1326 /* We need to know how many segments before we allocate. */ 1327 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, 1328 struct virtio_blk_config, seg_max, 1329 &sg_elems); 1330 1331 /* We need at least one SG element, whatever they say. */ 1332 if (err || !sg_elems) 1333 sg_elems = 1; 1334 1335 /* Prevent integer overflows and honor max vq size */ 1336 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); 1337 1338 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 1339 if (!vblk) { 1340 err = -ENOMEM; 1341 goto out_free_index; 1342 } 1343 1344 mutex_init(&vblk->vdev_mutex); 1345 1346 vblk->vdev = vdev; 1347 1348 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 1349 1350 err = init_vq(vblk); 1351 if (err) 1352 goto out_free_vblk; 1353 1354 /* Default queue sizing is to fill the ring. */ 1355 if (!virtblk_queue_depth) { 1356 queue_depth = vblk->vqs[0].vq->num_free; 1357 /* ... but without indirect descs, we use 2 descs per req */ 1358 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) 1359 queue_depth /= 2; 1360 } else { 1361 queue_depth = virtblk_queue_depth; 1362 } 1363 1364 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); 1365 vblk->tag_set.ops = &virtio_mq_ops; 1366 vblk->tag_set.queue_depth = queue_depth; 1367 vblk->tag_set.numa_node = NUMA_NO_NODE; 1368 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1369 vblk->tag_set.cmd_size = 1370 sizeof(struct virtblk_req) + 1371 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; 1372 vblk->tag_set.driver_data = vblk; 1373 vblk->tag_set.nr_hw_queues = vblk->num_vqs; 1374 vblk->tag_set.nr_maps = 1; 1375 if (vblk->io_queues[HCTX_TYPE_POLL]) 1376 vblk->tag_set.nr_maps = 3; 1377 1378 err = blk_mq_alloc_tag_set(&vblk->tag_set); 1379 if (err) 1380 goto out_free_vq; 1381 1382 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk); 1383 if (IS_ERR(vblk->disk)) { 1384 err = PTR_ERR(vblk->disk); 1385 goto out_free_tags; 1386 } 1387 q = vblk->disk->queue; 1388 1389 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 1390 1391 vblk->disk->major = major; 1392 vblk->disk->first_minor = index_to_minor(index); 1393 vblk->disk->minors = 1 << PART_BITS; 1394 vblk->disk->private_data = vblk; 1395 vblk->disk->fops = &virtblk_fops; 1396 vblk->index = index; 1397 1398 /* configure queue flush support */ 1399 virtblk_update_cache_mode(vdev); 1400 1401 /* If disk is read-only in the host, the guest should obey */ 1402 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 1403 set_disk_ro(vblk->disk, 1); 1404 1405 /* We can handle whatever the host told us to handle. */ 1406 blk_queue_max_segments(q, sg_elems); 1407 1408 /* No real sector limit. */ 1409 blk_queue_max_hw_sectors(q, UINT_MAX); 1410 1411 max_dma_size = virtio_max_dma_size(vdev); 1412 max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size; 1413 1414 /* Host can optionally specify maximum segment size and number of 1415 * segments. */ 1416 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, 1417 struct virtio_blk_config, size_max, &v); 1418 if (!err) 1419 max_size = min(max_size, v); 1420 1421 blk_queue_max_segment_size(q, max_size); 1422 1423 /* Host can optionally specify the block size of the device */ 1424 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, 1425 struct virtio_blk_config, blk_size, 1426 &blk_size); 1427 if (!err) { 1428 err = blk_validate_block_size(blk_size); 1429 if (err) { 1430 dev_err(&vdev->dev, 1431 "virtio_blk: invalid block size: 0x%x\n", 1432 blk_size); 1433 goto out_cleanup_disk; 1434 } 1435 1436 blk_queue_logical_block_size(q, blk_size); 1437 } else 1438 blk_size = queue_logical_block_size(q); 1439 1440 /* Use topology information if available */ 1441 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1442 struct virtio_blk_config, physical_block_exp, 1443 &physical_block_exp); 1444 if (!err && physical_block_exp) 1445 blk_queue_physical_block_size(q, 1446 blk_size * (1 << physical_block_exp)); 1447 1448 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1449 struct virtio_blk_config, alignment_offset, 1450 &alignment_offset); 1451 if (!err && alignment_offset) 1452 blk_queue_alignment_offset(q, blk_size * alignment_offset); 1453 1454 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1455 struct virtio_blk_config, min_io_size, 1456 &min_io_size); 1457 if (!err && min_io_size) 1458 blk_queue_io_min(q, blk_size * min_io_size); 1459 1460 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1461 struct virtio_blk_config, opt_io_size, 1462 &opt_io_size); 1463 if (!err && opt_io_size) 1464 blk_queue_io_opt(q, blk_size * opt_io_size); 1465 1466 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { 1467 virtio_cread(vdev, struct virtio_blk_config, 1468 discard_sector_alignment, &discard_granularity); 1469 1470 virtio_cread(vdev, struct virtio_blk_config, 1471 max_discard_sectors, &v); 1472 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); 1473 1474 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, 1475 &max_discard_segs); 1476 } 1477 1478 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { 1479 virtio_cread(vdev, struct virtio_blk_config, 1480 max_write_zeroes_sectors, &v); 1481 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); 1482 } 1483 1484 /* The discard and secure erase limits are combined since the Linux 1485 * block layer uses the same limit for both commands. 1486 * 1487 * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features 1488 * are negotiated, we will use the minimum between the limits. 1489 * 1490 * discard sector alignment is set to the minimum between discard_sector_alignment 1491 * and secure_erase_sector_alignment. 1492 * 1493 * max discard sectors is set to the minimum between max_discard_seg and 1494 * max_secure_erase_seg. 1495 */ 1496 if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) { 1497 1498 virtio_cread(vdev, struct virtio_blk_config, 1499 secure_erase_sector_alignment, &v); 1500 1501 /* secure_erase_sector_alignment should not be zero, the device should set a 1502 * valid number of sectors. 1503 */ 1504 if (!v) { 1505 dev_err(&vdev->dev, 1506 "virtio_blk: secure_erase_sector_alignment can't be 0\n"); 1507 err = -EINVAL; 1508 goto out_cleanup_disk; 1509 } 1510 1511 discard_granularity = min_not_zero(discard_granularity, v); 1512 1513 virtio_cread(vdev, struct virtio_blk_config, 1514 max_secure_erase_sectors, &v); 1515 1516 /* max_secure_erase_sectors should not be zero, the device should set a 1517 * valid number of sectors. 1518 */ 1519 if (!v) { 1520 dev_err(&vdev->dev, 1521 "virtio_blk: max_secure_erase_sectors can't be 0\n"); 1522 err = -EINVAL; 1523 goto out_cleanup_disk; 1524 } 1525 1526 blk_queue_max_secure_erase_sectors(q, v); 1527 1528 virtio_cread(vdev, struct virtio_blk_config, 1529 max_secure_erase_seg, &v); 1530 1531 /* max_secure_erase_seg should not be zero, the device should set a 1532 * valid number of segments 1533 */ 1534 if (!v) { 1535 dev_err(&vdev->dev, 1536 "virtio_blk: max_secure_erase_seg can't be 0\n"); 1537 err = -EINVAL; 1538 goto out_cleanup_disk; 1539 } 1540 1541 max_discard_segs = min_not_zero(max_discard_segs, v); 1542 } 1543 1544 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) || 1545 virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) { 1546 /* max_discard_seg and discard_granularity will be 0 only 1547 * if max_discard_seg and discard_sector_alignment fields in the virtio 1548 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated. 1549 * In this case, we use default values. 1550 */ 1551 if (!max_discard_segs) 1552 max_discard_segs = sg_elems; 1553 1554 blk_queue_max_discard_segments(q, 1555 min(max_discard_segs, MAX_DISCARD_SEGMENTS)); 1556 1557 if (discard_granularity) 1558 q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT; 1559 else 1560 q->limits.discard_granularity = blk_size; 1561 } 1562 1563 virtblk_update_capacity(vblk, false); 1564 virtio_device_ready(vdev); 1565 1566 /* 1567 * All steps that follow use the VQs therefore they need to be 1568 * placed after the virtio_device_ready() call above. 1569 */ 1570 if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) { 1571 err = virtblk_probe_zoned_device(vdev, vblk, q); 1572 if (err) 1573 goto out_cleanup_disk; 1574 } 1575 1576 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); 1577 if (err) 1578 goto out_cleanup_disk; 1579 1580 return 0; 1581 1582 out_cleanup_disk: 1583 put_disk(vblk->disk); 1584 out_free_tags: 1585 blk_mq_free_tag_set(&vblk->tag_set); 1586 out_free_vq: 1587 vdev->config->del_vqs(vdev); 1588 kfree(vblk->vqs); 1589 out_free_vblk: 1590 kfree(vblk); 1591 out_free_index: 1592 ida_free(&vd_index_ida, index); 1593 out: 1594 return err; 1595 } 1596 1597 static void virtblk_remove(struct virtio_device *vdev) 1598 { 1599 struct virtio_blk *vblk = vdev->priv; 1600 1601 /* Make sure no work handler is accessing the device. */ 1602 flush_work(&vblk->config_work); 1603 1604 del_gendisk(vblk->disk); 1605 blk_mq_free_tag_set(&vblk->tag_set); 1606 1607 mutex_lock(&vblk->vdev_mutex); 1608 1609 /* Stop all the virtqueues. */ 1610 virtio_reset_device(vdev); 1611 1612 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ 1613 vblk->vdev = NULL; 1614 1615 vdev->config->del_vqs(vdev); 1616 kfree(vblk->vqs); 1617 1618 mutex_unlock(&vblk->vdev_mutex); 1619 1620 put_disk(vblk->disk); 1621 } 1622 1623 #ifdef CONFIG_PM_SLEEP 1624 static int virtblk_freeze(struct virtio_device *vdev) 1625 { 1626 struct virtio_blk *vblk = vdev->priv; 1627 1628 /* Ensure no requests in virtqueues before deleting vqs. */ 1629 blk_mq_freeze_queue(vblk->disk->queue); 1630 1631 /* Ensure we don't receive any more interrupts */ 1632 virtio_reset_device(vdev); 1633 1634 /* Make sure no work handler is accessing the device. */ 1635 flush_work(&vblk->config_work); 1636 1637 vdev->config->del_vqs(vdev); 1638 kfree(vblk->vqs); 1639 1640 return 0; 1641 } 1642 1643 static int virtblk_restore(struct virtio_device *vdev) 1644 { 1645 struct virtio_blk *vblk = vdev->priv; 1646 int ret; 1647 1648 ret = init_vq(vdev->priv); 1649 if (ret) 1650 return ret; 1651 1652 virtio_device_ready(vdev); 1653 1654 blk_mq_unfreeze_queue(vblk->disk->queue); 1655 return 0; 1656 } 1657 #endif 1658 1659 static const struct virtio_device_id id_table[] = { 1660 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, 1661 { 0 }, 1662 }; 1663 1664 static unsigned int features_legacy[] = { 1665 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 1666 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 1667 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 1668 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, 1669 VIRTIO_BLK_F_SECURE_ERASE, 1670 } 1671 ; 1672 static unsigned int features[] = { 1673 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 1674 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 1675 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 1676 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, 1677 VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED, 1678 }; 1679 1680 static struct virtio_driver virtio_blk = { 1681 .feature_table = features, 1682 .feature_table_size = ARRAY_SIZE(features), 1683 .feature_table_legacy = features_legacy, 1684 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 1685 .driver.name = KBUILD_MODNAME, 1686 .driver.owner = THIS_MODULE, 1687 .id_table = id_table, 1688 .probe = virtblk_probe, 1689 .remove = virtblk_remove, 1690 .config_changed = virtblk_config_changed, 1691 #ifdef CONFIG_PM_SLEEP 1692 .freeze = virtblk_freeze, 1693 .restore = virtblk_restore, 1694 #endif 1695 }; 1696 1697 static int __init virtio_blk_init(void) 1698 { 1699 int error; 1700 1701 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); 1702 if (!virtblk_wq) 1703 return -ENOMEM; 1704 1705 major = register_blkdev(0, "virtblk"); 1706 if (major < 0) { 1707 error = major; 1708 goto out_destroy_workqueue; 1709 } 1710 1711 error = register_virtio_driver(&virtio_blk); 1712 if (error) 1713 goto out_unregister_blkdev; 1714 return 0; 1715 1716 out_unregister_blkdev: 1717 unregister_blkdev(major, "virtblk"); 1718 out_destroy_workqueue: 1719 destroy_workqueue(virtblk_wq); 1720 return error; 1721 } 1722 1723 static void __exit virtio_blk_fini(void) 1724 { 1725 unregister_virtio_driver(&virtio_blk); 1726 unregister_blkdev(major, "virtblk"); 1727 destroy_workqueue(virtblk_wq); 1728 } 1729 module_init(virtio_blk_init); 1730 module_exit(virtio_blk_fini); 1731 1732 MODULE_DEVICE_TABLE(virtio, id_table); 1733 MODULE_DESCRIPTION("Virtio block driver"); 1734 MODULE_LICENSE("GPL"); 1735