1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Zoned block device handling 4 * 5 * Copyright (c) 2015, Hannes Reinecke 6 * Copyright (c) 2015, SUSE Linux GmbH 7 * 8 * Copyright (c) 2016, Damien Le Moal 9 * Copyright (c) 2016, Western Digital 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/rbtree.h> 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/mm.h> 18 #include <linux/vmalloc.h> 19 #include <linux/sched/mm.h> 20 21 #include "blk.h" 22 23 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name 24 static const char *const zone_cond_name[] = { 25 ZONE_COND_NAME(NOT_WP), 26 ZONE_COND_NAME(EMPTY), 27 ZONE_COND_NAME(IMP_OPEN), 28 ZONE_COND_NAME(EXP_OPEN), 29 ZONE_COND_NAME(CLOSED), 30 ZONE_COND_NAME(READONLY), 31 ZONE_COND_NAME(FULL), 32 ZONE_COND_NAME(OFFLINE), 33 }; 34 #undef ZONE_COND_NAME 35 36 /** 37 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. 38 * @zone_cond: BLK_ZONE_COND_XXX. 39 * 40 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX 41 * into string format. Useful in the debugging and tracing zone conditions. For 42 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN". 43 */ 44 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) 45 { 46 static const char *zone_cond_str = "UNKNOWN"; 47 48 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond]) 49 zone_cond_str = zone_cond_name[zone_cond]; 50 51 return zone_cond_str; 52 } 53 EXPORT_SYMBOL_GPL(blk_zone_cond_str); 54 55 /* 56 * Return true if a request is a write requests that needs zone write locking. 57 */ 58 bool blk_req_needs_zone_write_lock(struct request *rq) 59 { 60 if (!rq->q->seq_zones_wlock) 61 return false; 62 63 if (blk_rq_is_passthrough(rq)) 64 return false; 65 66 switch (req_op(rq)) { 67 case REQ_OP_WRITE_ZEROES: 68 case REQ_OP_WRITE_SAME: 69 case REQ_OP_WRITE: 70 return blk_rq_zone_is_seq(rq); 71 default: 72 return false; 73 } 74 } 75 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); 76 77 bool blk_req_zone_write_trylock(struct request *rq) 78 { 79 unsigned int zno = blk_rq_zone_no(rq); 80 81 if (test_and_set_bit(zno, rq->q->seq_zones_wlock)) 82 return false; 83 84 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); 85 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; 86 87 return true; 88 } 89 EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock); 90 91 void __blk_req_zone_write_lock(struct request *rq) 92 { 93 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), 94 rq->q->seq_zones_wlock))) 95 return; 96 97 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); 98 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; 99 } 100 EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); 101 102 void __blk_req_zone_write_unlock(struct request *rq) 103 { 104 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; 105 if (rq->q->seq_zones_wlock) 106 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), 107 rq->q->seq_zones_wlock)); 108 } 109 EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); 110 111 /** 112 * blkdev_nr_zones - Get number of zones 113 * @disk: Target gendisk 114 * 115 * Return the total number of zones of a zoned block device. For a block 116 * device without zone capabilities, the number of zones is always 0. 117 */ 118 unsigned int blkdev_nr_zones(struct gendisk *disk) 119 { 120 sector_t zone_sectors = blk_queue_zone_sectors(disk->queue); 121 122 if (!blk_queue_is_zoned(disk->queue)) 123 return 0; 124 return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors); 125 } 126 EXPORT_SYMBOL_GPL(blkdev_nr_zones); 127 128 /** 129 * blkdev_report_zones - Get zones information 130 * @bdev: Target block device 131 * @sector: Sector from which to report zones 132 * @nr_zones: Maximum number of zones to report 133 * @cb: Callback function called for each reported zone 134 * @data: Private data for the callback 135 * 136 * Description: 137 * Get zone information starting from the zone containing @sector for at most 138 * @nr_zones, and call @cb for each zone reported by the device. 139 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES 140 * constant can be passed to @nr_zones. 141 * Returns the number of zones reported by the device, or a negative errno 142 * value in case of failure. 143 * 144 * Note: The caller must use memalloc_noXX_save/restore() calls to control 145 * memory allocations done within this function. 146 */ 147 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 148 unsigned int nr_zones, report_zones_cb cb, void *data) 149 { 150 struct gendisk *disk = bdev->bd_disk; 151 sector_t capacity = get_capacity(disk); 152 153 if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || 154 WARN_ON_ONCE(!disk->fops->report_zones)) 155 return -EOPNOTSUPP; 156 157 if (!nr_zones || sector >= capacity) 158 return 0; 159 160 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); 161 } 162 EXPORT_SYMBOL_GPL(blkdev_report_zones); 163 164 static inline unsigned long *blk_alloc_zone_bitmap(int node, 165 unsigned int nr_zones) 166 { 167 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), 168 GFP_NOIO, node); 169 } 170 171 static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, 172 void *data) 173 { 174 /* 175 * For an all-zones reset, ignore conventional, empty, read-only 176 * and offline zones. 177 */ 178 switch (zone->cond) { 179 case BLK_ZONE_COND_NOT_WP: 180 case BLK_ZONE_COND_EMPTY: 181 case BLK_ZONE_COND_READONLY: 182 case BLK_ZONE_COND_OFFLINE: 183 return 0; 184 default: 185 set_bit(idx, (unsigned long *)data); 186 return 0; 187 } 188 } 189 190 static int blkdev_zone_reset_all_emulated(struct block_device *bdev, 191 gfp_t gfp_mask) 192 { 193 struct request_queue *q = bdev_get_queue(bdev); 194 sector_t capacity = get_capacity(bdev->bd_disk); 195 sector_t zone_sectors = blk_queue_zone_sectors(q); 196 unsigned long *need_reset; 197 struct bio *bio = NULL; 198 sector_t sector = 0; 199 int ret; 200 201 need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones); 202 if (!need_reset) 203 return -ENOMEM; 204 205 ret = bdev->bd_disk->fops->report_zones(bdev->bd_disk, 0, 206 q->nr_zones, blk_zone_need_reset_cb, 207 need_reset); 208 if (ret < 0) 209 goto out_free_need_reset; 210 211 ret = 0; 212 while (sector < capacity) { 213 if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) { 214 sector += zone_sectors; 215 continue; 216 } 217 218 bio = blk_next_bio(bio, 0, gfp_mask); 219 bio_set_dev(bio, bdev); 220 bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC; 221 bio->bi_iter.bi_sector = sector; 222 sector += zone_sectors; 223 224 /* This may take a while, so be nice to others */ 225 cond_resched(); 226 } 227 228 if (bio) { 229 ret = submit_bio_wait(bio); 230 bio_put(bio); 231 } 232 233 out_free_need_reset: 234 kfree(need_reset); 235 return ret; 236 } 237 238 static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask) 239 { 240 struct bio bio; 241 242 bio_init(&bio, NULL, 0); 243 bio_set_dev(&bio, bdev); 244 bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC; 245 246 return submit_bio_wait(&bio); 247 } 248 249 /** 250 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones 251 * @bdev: Target block device 252 * @op: Operation to be performed on the zones 253 * @sector: Start sector of the first zone to operate on 254 * @nr_sectors: Number of sectors, should be at least the length of one zone and 255 * must be zone size aligned. 256 * @gfp_mask: Memory allocation flags (for bio_alloc) 257 * 258 * Description: 259 * Perform the specified operation on the range of zones specified by 260 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range 261 * is valid, but the specified range should not contain conventional zones. 262 * The operation to execute on each zone can be a zone reset, open, close 263 * or finish request. 264 */ 265 int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, 266 sector_t sector, sector_t nr_sectors, 267 gfp_t gfp_mask) 268 { 269 struct request_queue *q = bdev_get_queue(bdev); 270 sector_t zone_sectors = blk_queue_zone_sectors(q); 271 sector_t capacity = get_capacity(bdev->bd_disk); 272 sector_t end_sector = sector + nr_sectors; 273 struct bio *bio = NULL; 274 int ret = 0; 275 276 if (!blk_queue_is_zoned(q)) 277 return -EOPNOTSUPP; 278 279 if (bdev_read_only(bdev)) 280 return -EPERM; 281 282 if (!op_is_zone_mgmt(op)) 283 return -EOPNOTSUPP; 284 285 if (end_sector <= sector || end_sector > capacity) 286 /* Out of range */ 287 return -EINVAL; 288 289 /* Check alignment (handle eventual smaller last zone) */ 290 if (sector & (zone_sectors - 1)) 291 return -EINVAL; 292 293 if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) 294 return -EINVAL; 295 296 /* 297 * In the case of a zone reset operation over all zones, 298 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this 299 * command. For other devices, we emulate this command behavior by 300 * identifying the zones needing a reset. 301 */ 302 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { 303 if (!blk_queue_zone_resetall(q)) 304 return blkdev_zone_reset_all_emulated(bdev, gfp_mask); 305 return blkdev_zone_reset_all(bdev, gfp_mask); 306 } 307 308 while (sector < end_sector) { 309 bio = blk_next_bio(bio, 0, gfp_mask); 310 bio_set_dev(bio, bdev); 311 bio->bi_opf = op | REQ_SYNC; 312 bio->bi_iter.bi_sector = sector; 313 sector += zone_sectors; 314 315 /* This may take a while, so be nice to others */ 316 cond_resched(); 317 } 318 319 ret = submit_bio_wait(bio); 320 bio_put(bio); 321 322 return ret; 323 } 324 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); 325 326 struct zone_report_args { 327 struct blk_zone __user *zones; 328 }; 329 330 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, 331 void *data) 332 { 333 struct zone_report_args *args = data; 334 335 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) 336 return -EFAULT; 337 return 0; 338 } 339 340 /* 341 * BLKREPORTZONE ioctl processing. 342 * Called from blkdev_ioctl. 343 */ 344 int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 345 unsigned int cmd, unsigned long arg) 346 { 347 void __user *argp = (void __user *)arg; 348 struct zone_report_args args; 349 struct request_queue *q; 350 struct blk_zone_report rep; 351 int ret; 352 353 if (!argp) 354 return -EINVAL; 355 356 q = bdev_get_queue(bdev); 357 if (!q) 358 return -ENXIO; 359 360 if (!blk_queue_is_zoned(q)) 361 return -ENOTTY; 362 363 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) 364 return -EFAULT; 365 366 if (!rep.nr_zones) 367 return -EINVAL; 368 369 args.zones = argp + sizeof(struct blk_zone_report); 370 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, 371 blkdev_copy_zone_to_user, &args); 372 if (ret < 0) 373 return ret; 374 375 rep.nr_zones = ret; 376 rep.flags = BLK_ZONE_REP_CAPACITY; 377 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) 378 return -EFAULT; 379 return 0; 380 } 381 382 static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode, 383 const struct blk_zone_range *zrange) 384 { 385 loff_t start, end; 386 387 if (zrange->sector + zrange->nr_sectors <= zrange->sector || 388 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) 389 /* Out of range */ 390 return -EINVAL; 391 392 start = zrange->sector << SECTOR_SHIFT; 393 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; 394 395 return truncate_bdev_range(bdev, mode, start, end); 396 } 397 398 /* 399 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. 400 * Called from blkdev_ioctl. 401 */ 402 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 403 unsigned int cmd, unsigned long arg) 404 { 405 void __user *argp = (void __user *)arg; 406 struct request_queue *q; 407 struct blk_zone_range zrange; 408 enum req_opf op; 409 int ret; 410 411 if (!argp) 412 return -EINVAL; 413 414 q = bdev_get_queue(bdev); 415 if (!q) 416 return -ENXIO; 417 418 if (!blk_queue_is_zoned(q)) 419 return -ENOTTY; 420 421 if (!(mode & FMODE_WRITE)) 422 return -EBADF; 423 424 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) 425 return -EFAULT; 426 427 switch (cmd) { 428 case BLKRESETZONE: 429 op = REQ_OP_ZONE_RESET; 430 431 /* Invalidate the page cache, including dirty pages. */ 432 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); 433 if (ret) 434 return ret; 435 break; 436 case BLKOPENZONE: 437 op = REQ_OP_ZONE_OPEN; 438 break; 439 case BLKCLOSEZONE: 440 op = REQ_OP_ZONE_CLOSE; 441 break; 442 case BLKFINISHZONE: 443 op = REQ_OP_ZONE_FINISH; 444 break; 445 default: 446 return -ENOTTY; 447 } 448 449 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, 450 GFP_KERNEL); 451 452 /* 453 * Invalidate the page cache again for zone reset: writes can only be 454 * direct for zoned devices so concurrent writes would not add any page 455 * to the page cache after/during reset. The page cache may be filled 456 * again due to concurrent reads though and dropping the pages for 457 * these is fine. 458 */ 459 if (!ret && cmd == BLKRESETZONE) 460 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); 461 462 return ret; 463 } 464 465 void blk_queue_free_zone_bitmaps(struct request_queue *q) 466 { 467 kfree(q->conv_zones_bitmap); 468 q->conv_zones_bitmap = NULL; 469 kfree(q->seq_zones_wlock); 470 q->seq_zones_wlock = NULL; 471 } 472 473 struct blk_revalidate_zone_args { 474 struct gendisk *disk; 475 unsigned long *conv_zones_bitmap; 476 unsigned long *seq_zones_wlock; 477 unsigned int nr_zones; 478 sector_t zone_sectors; 479 sector_t sector; 480 }; 481 482 /* 483 * Helper function to check the validity of zones of a zoned block device. 484 */ 485 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, 486 void *data) 487 { 488 struct blk_revalidate_zone_args *args = data; 489 struct gendisk *disk = args->disk; 490 struct request_queue *q = disk->queue; 491 sector_t capacity = get_capacity(disk); 492 493 /* 494 * All zones must have the same size, with the exception on an eventual 495 * smaller last zone. 496 */ 497 if (zone->start == 0) { 498 if (zone->len == 0 || !is_power_of_2(zone->len)) { 499 pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n", 500 disk->disk_name, zone->len); 501 return -ENODEV; 502 } 503 504 args->zone_sectors = zone->len; 505 args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len); 506 } else if (zone->start + args->zone_sectors < capacity) { 507 if (zone->len != args->zone_sectors) { 508 pr_warn("%s: Invalid zoned device with non constant zone size\n", 509 disk->disk_name); 510 return -ENODEV; 511 } 512 } else { 513 if (zone->len > args->zone_sectors) { 514 pr_warn("%s: Invalid zoned device with larger last zone size\n", 515 disk->disk_name); 516 return -ENODEV; 517 } 518 } 519 520 /* Check for holes in the zone report */ 521 if (zone->start != args->sector) { 522 pr_warn("%s: Zone gap at sectors %llu..%llu\n", 523 disk->disk_name, args->sector, zone->start); 524 return -ENODEV; 525 } 526 527 /* Check zone type */ 528 switch (zone->type) { 529 case BLK_ZONE_TYPE_CONVENTIONAL: 530 if (!args->conv_zones_bitmap) { 531 args->conv_zones_bitmap = 532 blk_alloc_zone_bitmap(q->node, args->nr_zones); 533 if (!args->conv_zones_bitmap) 534 return -ENOMEM; 535 } 536 set_bit(idx, args->conv_zones_bitmap); 537 break; 538 case BLK_ZONE_TYPE_SEQWRITE_REQ: 539 case BLK_ZONE_TYPE_SEQWRITE_PREF: 540 if (!args->seq_zones_wlock) { 541 args->seq_zones_wlock = 542 blk_alloc_zone_bitmap(q->node, args->nr_zones); 543 if (!args->seq_zones_wlock) 544 return -ENOMEM; 545 } 546 break; 547 default: 548 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", 549 disk->disk_name, (int)zone->type, zone->start); 550 return -ENODEV; 551 } 552 553 args->sector += zone->len; 554 return 0; 555 } 556 557 /** 558 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps 559 * @disk: Target disk 560 * @update_driver_data: Callback to update driver data on the frozen disk 561 * 562 * Helper function for low-level device drivers to (re) allocate and initialize 563 * a disk request queue zone bitmaps. This functions should normally be called 564 * within the disk ->revalidate method for blk-mq based drivers. For BIO based 565 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value 566 * is correct. 567 * If the @update_driver_data callback function is not NULL, the callback is 568 * executed with the device request queue frozen after all zones have been 569 * checked. 570 */ 571 int blk_revalidate_disk_zones(struct gendisk *disk, 572 void (*update_driver_data)(struct gendisk *disk)) 573 { 574 struct request_queue *q = disk->queue; 575 struct blk_revalidate_zone_args args = { 576 .disk = disk, 577 }; 578 unsigned int noio_flag; 579 int ret; 580 581 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 582 return -EIO; 583 if (WARN_ON_ONCE(!queue_is_mq(q))) 584 return -EIO; 585 586 if (!get_capacity(disk)) 587 return -EIO; 588 589 /* 590 * Ensure that all memory allocations in this context are done as if 591 * GFP_NOIO was specified. 592 */ 593 noio_flag = memalloc_noio_save(); 594 ret = disk->fops->report_zones(disk, 0, UINT_MAX, 595 blk_revalidate_zone_cb, &args); 596 if (!ret) { 597 pr_warn("%s: No zones reported\n", disk->disk_name); 598 ret = -ENODEV; 599 } 600 memalloc_noio_restore(noio_flag); 601 602 /* 603 * If zones where reported, make sure that the entire disk capacity 604 * has been checked. 605 */ 606 if (ret > 0 && args.sector != get_capacity(disk)) { 607 pr_warn("%s: Missing zones from sector %llu\n", 608 disk->disk_name, args.sector); 609 ret = -ENODEV; 610 } 611 612 /* 613 * Install the new bitmaps and update nr_zones only once the queue is 614 * stopped and all I/Os are completed (i.e. a scheduler is not 615 * referencing the bitmaps). 616 */ 617 blk_mq_freeze_queue(q); 618 if (ret > 0) { 619 blk_queue_chunk_sectors(q, args.zone_sectors); 620 q->nr_zones = args.nr_zones; 621 swap(q->seq_zones_wlock, args.seq_zones_wlock); 622 swap(q->conv_zones_bitmap, args.conv_zones_bitmap); 623 if (update_driver_data) 624 update_driver_data(disk); 625 ret = 0; 626 } else { 627 pr_warn("%s: failed to revalidate zones\n", disk->disk_name); 628 blk_queue_free_zone_bitmaps(q); 629 } 630 blk_mq_unfreeze_queue(q); 631 632 kfree(args.seq_zones_wlock); 633 kfree(args.conv_zones_bitmap); 634 return ret; 635 } 636 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); 637 638 void blk_queue_clear_zone_settings(struct request_queue *q) 639 { 640 blk_mq_freeze_queue(q); 641 642 blk_queue_free_zone_bitmaps(q); 643 blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q); 644 q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE; 645 q->nr_zones = 0; 646 q->max_open_zones = 0; 647 q->max_active_zones = 0; 648 q->limits.chunk_sectors = 0; 649 q->limits.zone_write_granularity = 0; 650 q->limits.max_zone_append_sectors = 0; 651 652 blk_mq_unfreeze_queue(q); 653 } 654