1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/bitops.h> 4 #include <linux/slab.h> 5 #include <linux/blkdev.h> 6 #include <linux/sched/mm.h> 7 #include <linux/atomic.h> 8 #include <linux/vmalloc.h> 9 #include "ctree.h" 10 #include "volumes.h" 11 #include "zoned.h" 12 #include "rcu-string.h" 13 #include "disk-io.h" 14 #include "block-group.h" 15 #include "transaction.h" 16 #include "dev-replace.h" 17 #include "space-info.h" 18 19 /* Maximum number of zones to report per blkdev_report_zones() call */ 20 #define BTRFS_REPORT_NR_ZONES 4096 21 /* Invalid allocation pointer value for missing devices */ 22 #define WP_MISSING_DEV ((u64)-1) 23 /* Pseudo write pointer value for conventional zone */ 24 #define WP_CONVENTIONAL ((u64)-2) 25 26 /* 27 * Location of the first zone of superblock logging zone pairs. 28 * 29 * - primary superblock: 0B (zone 0) 30 * - first copy: 512G (zone starting at that offset) 31 * - second copy: 4T (zone starting at that offset) 32 */ 33 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL) 34 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G) 35 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G) 36 37 #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET) 38 #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET) 39 40 /* Number of superblock log zones */ 41 #define BTRFS_NR_SB_LOG_ZONES 2 42 43 /* 44 * Minimum of active zones we need: 45 * 46 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors 47 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group 48 * - 1 zone for tree-log dedicated block group 49 * - 1 zone for relocation 50 */ 51 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5) 52 53 /* 54 * Maximum supported zone size. Currently, SMR disks have a zone size of 55 * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not 56 * expect the zone size to become larger than 8GiB in the near future. 57 */ 58 #define BTRFS_MAX_ZONE_SIZE SZ_8G 59 60 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT) 61 62 static inline bool sb_zone_is_full(const struct blk_zone *zone) 63 { 64 return (zone->cond == BLK_ZONE_COND_FULL) || 65 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity); 66 } 67 68 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data) 69 { 70 struct blk_zone *zones = data; 71 72 memcpy(&zones[idx], zone, sizeof(*zone)); 73 74 return 0; 75 } 76 77 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones, 78 u64 *wp_ret) 79 { 80 bool empty[BTRFS_NR_SB_LOG_ZONES]; 81 bool full[BTRFS_NR_SB_LOG_ZONES]; 82 sector_t sector; 83 int i; 84 85 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 86 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL); 87 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY); 88 full[i] = sb_zone_is_full(&zones[i]); 89 } 90 91 /* 92 * Possible states of log buffer zones 93 * 94 * Empty[0] In use[0] Full[0] 95 * Empty[1] * x 0 96 * In use[1] 0 x 0 97 * Full[1] 1 1 C 98 * 99 * Log position: 100 * *: Special case, no superblock is written 101 * 0: Use write pointer of zones[0] 102 * 1: Use write pointer of zones[1] 103 * C: Compare super blocks from zones[0] and zones[1], use the latest 104 * one determined by generation 105 * x: Invalid state 106 */ 107 108 if (empty[0] && empty[1]) { 109 /* Special case to distinguish no superblock to read */ 110 *wp_ret = zones[0].start << SECTOR_SHIFT; 111 return -ENOENT; 112 } else if (full[0] && full[1]) { 113 /* Compare two super blocks */ 114 struct address_space *mapping = bdev->bd_inode->i_mapping; 115 struct page *page[BTRFS_NR_SB_LOG_ZONES]; 116 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES]; 117 int i; 118 119 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 120 u64 bytenr; 121 122 bytenr = ((zones[i].start + zones[i].len) 123 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE; 124 125 page[i] = read_cache_page_gfp(mapping, 126 bytenr >> PAGE_SHIFT, GFP_NOFS); 127 if (IS_ERR(page[i])) { 128 if (i == 1) 129 btrfs_release_disk_super(super[0]); 130 return PTR_ERR(page[i]); 131 } 132 super[i] = page_address(page[i]); 133 } 134 135 if (super[0]->generation > super[1]->generation) 136 sector = zones[1].start; 137 else 138 sector = zones[0].start; 139 140 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) 141 btrfs_release_disk_super(super[i]); 142 } else if (!full[0] && (empty[1] || full[1])) { 143 sector = zones[0].wp; 144 } else if (full[0]) { 145 sector = zones[1].wp; 146 } else { 147 return -EUCLEAN; 148 } 149 *wp_ret = sector << SECTOR_SHIFT; 150 return 0; 151 } 152 153 /* 154 * Get the first zone number of the superblock mirror 155 */ 156 static inline u32 sb_zone_number(int shift, int mirror) 157 { 158 u64 zone; 159 160 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX); 161 switch (mirror) { 162 case 0: zone = 0; break; 163 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break; 164 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break; 165 } 166 167 ASSERT(zone <= U32_MAX); 168 169 return (u32)zone; 170 } 171 172 static inline sector_t zone_start_sector(u32 zone_number, 173 struct block_device *bdev) 174 { 175 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev)); 176 } 177 178 static inline u64 zone_start_physical(u32 zone_number, 179 struct btrfs_zoned_device_info *zone_info) 180 { 181 return (u64)zone_number << zone_info->zone_size_shift; 182 } 183 184 /* 185 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block 186 * device into static sized chunks and fake a conventional zone on each of 187 * them. 188 */ 189 static int emulate_report_zones(struct btrfs_device *device, u64 pos, 190 struct blk_zone *zones, unsigned int nr_zones) 191 { 192 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT; 193 sector_t bdev_size = bdev_nr_sectors(device->bdev); 194 unsigned int i; 195 196 pos >>= SECTOR_SHIFT; 197 for (i = 0; i < nr_zones; i++) { 198 zones[i].start = i * zone_sectors + pos; 199 zones[i].len = zone_sectors; 200 zones[i].capacity = zone_sectors; 201 zones[i].wp = zones[i].start + zone_sectors; 202 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL; 203 zones[i].cond = BLK_ZONE_COND_NOT_WP; 204 205 if (zones[i].wp >= bdev_size) { 206 i++; 207 break; 208 } 209 } 210 211 return i; 212 } 213 214 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos, 215 struct blk_zone *zones, unsigned int *nr_zones) 216 { 217 struct btrfs_zoned_device_info *zinfo = device->zone_info; 218 u32 zno; 219 int ret; 220 221 if (!*nr_zones) 222 return 0; 223 224 if (!bdev_is_zoned(device->bdev)) { 225 ret = emulate_report_zones(device, pos, zones, *nr_zones); 226 *nr_zones = ret; 227 return 0; 228 } 229 230 /* Check cache */ 231 if (zinfo->zone_cache) { 232 unsigned int i; 233 234 ASSERT(IS_ALIGNED(pos, zinfo->zone_size)); 235 zno = pos >> zinfo->zone_size_shift; 236 /* 237 * We cannot report zones beyond the zone end. So, it is OK to 238 * cap *nr_zones to at the end. 239 */ 240 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno); 241 242 for (i = 0; i < *nr_zones; i++) { 243 struct blk_zone *zone_info; 244 245 zone_info = &zinfo->zone_cache[zno + i]; 246 if (!zone_info->len) 247 break; 248 } 249 250 if (i == *nr_zones) { 251 /* Cache hit on all the zones */ 252 memcpy(zones, zinfo->zone_cache + zno, 253 sizeof(*zinfo->zone_cache) * *nr_zones); 254 return 0; 255 } 256 } 257 258 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones, 259 copy_zone_info_cb, zones); 260 if (ret < 0) { 261 btrfs_err_in_rcu(device->fs_info, 262 "zoned: failed to read zone %llu on %s (devid %llu)", 263 pos, rcu_str_deref(device->name), 264 device->devid); 265 return ret; 266 } 267 *nr_zones = ret; 268 if (!ret) 269 return -EIO; 270 271 /* Populate cache */ 272 if (zinfo->zone_cache) 273 memcpy(zinfo->zone_cache + zno, zones, 274 sizeof(*zinfo->zone_cache) * *nr_zones); 275 276 return 0; 277 } 278 279 /* The emulated zone size is determined from the size of device extent */ 280 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info) 281 { 282 struct btrfs_path *path; 283 struct btrfs_root *root = fs_info->dev_root; 284 struct btrfs_key key; 285 struct extent_buffer *leaf; 286 struct btrfs_dev_extent *dext; 287 int ret = 0; 288 289 key.objectid = 1; 290 key.type = BTRFS_DEV_EXTENT_KEY; 291 key.offset = 0; 292 293 path = btrfs_alloc_path(); 294 if (!path) 295 return -ENOMEM; 296 297 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 298 if (ret < 0) 299 goto out; 300 301 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 302 ret = btrfs_next_leaf(root, path); 303 if (ret < 0) 304 goto out; 305 /* No dev extents at all? Not good */ 306 if (ret > 0) { 307 ret = -EUCLEAN; 308 goto out; 309 } 310 } 311 312 leaf = path->nodes[0]; 313 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 314 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext); 315 ret = 0; 316 317 out: 318 btrfs_free_path(path); 319 320 return ret; 321 } 322 323 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) 324 { 325 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 326 struct btrfs_device *device; 327 int ret = 0; 328 329 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */ 330 if (!btrfs_fs_incompat(fs_info, ZONED)) 331 return 0; 332 333 mutex_lock(&fs_devices->device_list_mutex); 334 list_for_each_entry(device, &fs_devices->devices, dev_list) { 335 /* We can skip reading of zone info for missing devices */ 336 if (!device->bdev) 337 continue; 338 339 ret = btrfs_get_dev_zone_info(device, true); 340 if (ret) 341 break; 342 } 343 mutex_unlock(&fs_devices->device_list_mutex); 344 345 return ret; 346 } 347 348 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) 349 { 350 struct btrfs_fs_info *fs_info = device->fs_info; 351 struct btrfs_zoned_device_info *zone_info = NULL; 352 struct block_device *bdev = device->bdev; 353 unsigned int max_active_zones; 354 unsigned int nactive; 355 sector_t nr_sectors; 356 sector_t sector = 0; 357 struct blk_zone *zones = NULL; 358 unsigned int i, nreported = 0, nr_zones; 359 sector_t zone_sectors; 360 char *model, *emulated; 361 int ret; 362 363 /* 364 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not 365 * yet be set. 366 */ 367 if (!btrfs_fs_incompat(fs_info, ZONED)) 368 return 0; 369 370 if (device->zone_info) 371 return 0; 372 373 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL); 374 if (!zone_info) 375 return -ENOMEM; 376 377 device->zone_info = zone_info; 378 379 if (!bdev_is_zoned(bdev)) { 380 if (!fs_info->zone_size) { 381 ret = calculate_emulated_zone_size(fs_info); 382 if (ret) 383 goto out; 384 } 385 386 ASSERT(fs_info->zone_size); 387 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT; 388 } else { 389 zone_sectors = bdev_zone_sectors(bdev); 390 } 391 392 /* Check if it's power of 2 (see is_power_of_2) */ 393 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0); 394 zone_info->zone_size = zone_sectors << SECTOR_SHIFT; 395 396 /* We reject devices with a zone size larger than 8GB */ 397 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) { 398 btrfs_err_in_rcu(fs_info, 399 "zoned: %s: zone size %llu larger than supported maximum %llu", 400 rcu_str_deref(device->name), 401 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE); 402 ret = -EINVAL; 403 goto out; 404 } 405 406 nr_sectors = bdev_nr_sectors(bdev); 407 zone_info->zone_size_shift = ilog2(zone_info->zone_size); 408 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors); 409 if (!IS_ALIGNED(nr_sectors, zone_sectors)) 410 zone_info->nr_zones++; 411 412 max_active_zones = bdev_max_active_zones(bdev); 413 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) { 414 btrfs_err_in_rcu(fs_info, 415 "zoned: %s: max active zones %u is too small, need at least %u active zones", 416 rcu_str_deref(device->name), max_active_zones, 417 BTRFS_MIN_ACTIVE_ZONES); 418 ret = -EINVAL; 419 goto out; 420 } 421 zone_info->max_active_zones = max_active_zones; 422 423 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 424 if (!zone_info->seq_zones) { 425 ret = -ENOMEM; 426 goto out; 427 } 428 429 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 430 if (!zone_info->empty_zones) { 431 ret = -ENOMEM; 432 goto out; 433 } 434 435 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 436 if (!zone_info->active_zones) { 437 ret = -ENOMEM; 438 goto out; 439 } 440 441 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL); 442 if (!zones) { 443 ret = -ENOMEM; 444 goto out; 445 } 446 447 /* 448 * Enable zone cache only for a zoned device. On a non-zoned device, we 449 * fill the zone info with emulated CONVENTIONAL zones, so no need to 450 * use the cache. 451 */ 452 if (populate_cache && bdev_is_zoned(device->bdev)) { 453 zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) * 454 zone_info->nr_zones); 455 if (!zone_info->zone_cache) { 456 btrfs_err_in_rcu(device->fs_info, 457 "zoned: failed to allocate zone cache for %s", 458 rcu_str_deref(device->name)); 459 ret = -ENOMEM; 460 goto out; 461 } 462 } 463 464 /* Get zones type */ 465 nactive = 0; 466 while (sector < nr_sectors) { 467 nr_zones = BTRFS_REPORT_NR_ZONES; 468 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones, 469 &nr_zones); 470 if (ret) 471 goto out; 472 473 for (i = 0; i < nr_zones; i++) { 474 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ) 475 __set_bit(nreported, zone_info->seq_zones); 476 switch (zones[i].cond) { 477 case BLK_ZONE_COND_EMPTY: 478 __set_bit(nreported, zone_info->empty_zones); 479 break; 480 case BLK_ZONE_COND_IMP_OPEN: 481 case BLK_ZONE_COND_EXP_OPEN: 482 case BLK_ZONE_COND_CLOSED: 483 __set_bit(nreported, zone_info->active_zones); 484 nactive++; 485 break; 486 } 487 nreported++; 488 } 489 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; 490 } 491 492 if (nreported != zone_info->nr_zones) { 493 btrfs_err_in_rcu(device->fs_info, 494 "inconsistent number of zones on %s (%u/%u)", 495 rcu_str_deref(device->name), nreported, 496 zone_info->nr_zones); 497 ret = -EIO; 498 goto out; 499 } 500 501 if (max_active_zones) { 502 if (nactive > max_active_zones) { 503 btrfs_err_in_rcu(device->fs_info, 504 "zoned: %u active zones on %s exceeds max_active_zones %u", 505 nactive, rcu_str_deref(device->name), 506 max_active_zones); 507 ret = -EIO; 508 goto out; 509 } 510 atomic_set(&zone_info->active_zones_left, 511 max_active_zones - nactive); 512 } 513 514 /* Validate superblock log */ 515 nr_zones = BTRFS_NR_SB_LOG_ZONES; 516 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 517 u32 sb_zone; 518 u64 sb_wp; 519 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i; 520 521 sb_zone = sb_zone_number(zone_info->zone_size_shift, i); 522 if (sb_zone + 1 >= zone_info->nr_zones) 523 continue; 524 525 ret = btrfs_get_dev_zones(device, 526 zone_start_physical(sb_zone, zone_info), 527 &zone_info->sb_zones[sb_pos], 528 &nr_zones); 529 if (ret) 530 goto out; 531 532 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) { 533 btrfs_err_in_rcu(device->fs_info, 534 "zoned: failed to read super block log zone info at devid %llu zone %u", 535 device->devid, sb_zone); 536 ret = -EUCLEAN; 537 goto out; 538 } 539 540 /* 541 * If zones[0] is conventional, always use the beginning of the 542 * zone to record superblock. No need to validate in that case. 543 */ 544 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type == 545 BLK_ZONE_TYPE_CONVENTIONAL) 546 continue; 547 548 ret = sb_write_pointer(device->bdev, 549 &zone_info->sb_zones[sb_pos], &sb_wp); 550 if (ret != -ENOENT && ret) { 551 btrfs_err_in_rcu(device->fs_info, 552 "zoned: super block log zone corrupted devid %llu zone %u", 553 device->devid, sb_zone); 554 ret = -EUCLEAN; 555 goto out; 556 } 557 } 558 559 560 kfree(zones); 561 562 switch (bdev_zoned_model(bdev)) { 563 case BLK_ZONED_HM: 564 model = "host-managed zoned"; 565 emulated = ""; 566 break; 567 case BLK_ZONED_HA: 568 model = "host-aware zoned"; 569 emulated = ""; 570 break; 571 case BLK_ZONED_NONE: 572 model = "regular"; 573 emulated = "emulated "; 574 break; 575 default: 576 /* Just in case */ 577 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s", 578 bdev_zoned_model(bdev), 579 rcu_str_deref(device->name)); 580 ret = -EOPNOTSUPP; 581 goto out_free_zone_info; 582 } 583 584 btrfs_info_in_rcu(fs_info, 585 "%s block device %s, %u %szones of %llu bytes", 586 model, rcu_str_deref(device->name), zone_info->nr_zones, 587 emulated, zone_info->zone_size); 588 589 return 0; 590 591 out: 592 kfree(zones); 593 out_free_zone_info: 594 btrfs_destroy_dev_zone_info(device); 595 596 return ret; 597 } 598 599 void btrfs_destroy_dev_zone_info(struct btrfs_device *device) 600 { 601 struct btrfs_zoned_device_info *zone_info = device->zone_info; 602 603 if (!zone_info) 604 return; 605 606 bitmap_free(zone_info->active_zones); 607 bitmap_free(zone_info->seq_zones); 608 bitmap_free(zone_info->empty_zones); 609 vfree(zone_info->zone_cache); 610 kfree(zone_info); 611 device->zone_info = NULL; 612 } 613 614 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, 615 struct blk_zone *zone) 616 { 617 unsigned int nr_zones = 1; 618 int ret; 619 620 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones); 621 if (ret != 0 || !nr_zones) 622 return ret ? ret : -EIO; 623 624 return 0; 625 } 626 627 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info) 628 { 629 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 630 struct btrfs_device *device; 631 u64 zoned_devices = 0; 632 u64 nr_devices = 0; 633 u64 zone_size = 0; 634 const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED); 635 int ret = 0; 636 637 /* Count zoned devices */ 638 list_for_each_entry(device, &fs_devices->devices, dev_list) { 639 enum blk_zoned_model model; 640 641 if (!device->bdev) 642 continue; 643 644 model = bdev_zoned_model(device->bdev); 645 /* 646 * A Host-Managed zoned device must be used as a zoned device. 647 * A Host-Aware zoned device and a non-zoned devices can be 648 * treated as a zoned device, if ZONED flag is enabled in the 649 * superblock. 650 */ 651 if (model == BLK_ZONED_HM || 652 (model == BLK_ZONED_HA && incompat_zoned) || 653 (model == BLK_ZONED_NONE && incompat_zoned)) { 654 struct btrfs_zoned_device_info *zone_info; 655 656 zone_info = device->zone_info; 657 zoned_devices++; 658 if (!zone_size) { 659 zone_size = zone_info->zone_size; 660 } else if (zone_info->zone_size != zone_size) { 661 btrfs_err(fs_info, 662 "zoned: unequal block device zone sizes: have %llu found %llu", 663 device->zone_info->zone_size, 664 zone_size); 665 ret = -EINVAL; 666 goto out; 667 } 668 } 669 nr_devices++; 670 } 671 672 if (!zoned_devices && !incompat_zoned) 673 goto out; 674 675 if (!zoned_devices && incompat_zoned) { 676 /* No zoned block device found on ZONED filesystem */ 677 btrfs_err(fs_info, 678 "zoned: no zoned devices found on a zoned filesystem"); 679 ret = -EINVAL; 680 goto out; 681 } 682 683 if (zoned_devices && !incompat_zoned) { 684 btrfs_err(fs_info, 685 "zoned: mode not enabled but zoned device found"); 686 ret = -EINVAL; 687 goto out; 688 } 689 690 if (zoned_devices != nr_devices) { 691 btrfs_err(fs_info, 692 "zoned: cannot mix zoned and regular devices"); 693 ret = -EINVAL; 694 goto out; 695 } 696 697 /* 698 * stripe_size is always aligned to BTRFS_STRIPE_LEN in 699 * btrfs_create_chunk(). Since we want stripe_len == zone_size, 700 * check the alignment here. 701 */ 702 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) { 703 btrfs_err(fs_info, 704 "zoned: zone size %llu not aligned to stripe %u", 705 zone_size, BTRFS_STRIPE_LEN); 706 ret = -EINVAL; 707 goto out; 708 } 709 710 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 711 btrfs_err(fs_info, "zoned: mixed block groups not supported"); 712 ret = -EINVAL; 713 goto out; 714 } 715 716 fs_info->zone_size = zone_size; 717 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED; 718 719 /* 720 * Check mount options here, because we might change fs_info->zoned 721 * from fs_info->zone_size. 722 */ 723 ret = btrfs_check_mountopts_zoned(fs_info); 724 if (ret) 725 goto out; 726 727 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size); 728 out: 729 return ret; 730 } 731 732 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info) 733 { 734 if (!btrfs_is_zoned(info)) 735 return 0; 736 737 /* 738 * Space cache writing is not COWed. Disable that to avoid write errors 739 * in sequential zones. 740 */ 741 if (btrfs_test_opt(info, SPACE_CACHE)) { 742 btrfs_err(info, "zoned: space cache v1 is not supported"); 743 return -EINVAL; 744 } 745 746 if (btrfs_test_opt(info, NODATACOW)) { 747 btrfs_err(info, "zoned: NODATACOW not supported"); 748 return -EINVAL; 749 } 750 751 return 0; 752 } 753 754 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones, 755 int rw, u64 *bytenr_ret) 756 { 757 u64 wp; 758 int ret; 759 760 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) { 761 *bytenr_ret = zones[0].start << SECTOR_SHIFT; 762 return 0; 763 } 764 765 ret = sb_write_pointer(bdev, zones, &wp); 766 if (ret != -ENOENT && ret < 0) 767 return ret; 768 769 if (rw == WRITE) { 770 struct blk_zone *reset = NULL; 771 772 if (wp == zones[0].start << SECTOR_SHIFT) 773 reset = &zones[0]; 774 else if (wp == zones[1].start << SECTOR_SHIFT) 775 reset = &zones[1]; 776 777 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) { 778 ASSERT(sb_zone_is_full(reset)); 779 780 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 781 reset->start, reset->len, 782 GFP_NOFS); 783 if (ret) 784 return ret; 785 786 reset->cond = BLK_ZONE_COND_EMPTY; 787 reset->wp = reset->start; 788 } 789 } else if (ret != -ENOENT) { 790 /* 791 * For READ, we want the previous one. Move write pointer to 792 * the end of a zone, if it is at the head of a zone. 793 */ 794 u64 zone_end = 0; 795 796 if (wp == zones[0].start << SECTOR_SHIFT) 797 zone_end = zones[1].start + zones[1].capacity; 798 else if (wp == zones[1].start << SECTOR_SHIFT) 799 zone_end = zones[0].start + zones[0].capacity; 800 if (zone_end) 801 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT, 802 BTRFS_SUPER_INFO_SIZE); 803 804 wp -= BTRFS_SUPER_INFO_SIZE; 805 } 806 807 *bytenr_ret = wp; 808 return 0; 809 810 } 811 812 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, 813 u64 *bytenr_ret) 814 { 815 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES]; 816 sector_t zone_sectors; 817 u32 sb_zone; 818 int ret; 819 u8 zone_sectors_shift; 820 sector_t nr_sectors; 821 u32 nr_zones; 822 823 if (!bdev_is_zoned(bdev)) { 824 *bytenr_ret = btrfs_sb_offset(mirror); 825 return 0; 826 } 827 828 ASSERT(rw == READ || rw == WRITE); 829 830 zone_sectors = bdev_zone_sectors(bdev); 831 if (!is_power_of_2(zone_sectors)) 832 return -EINVAL; 833 zone_sectors_shift = ilog2(zone_sectors); 834 nr_sectors = bdev_nr_sectors(bdev); 835 nr_zones = nr_sectors >> zone_sectors_shift; 836 837 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror); 838 if (sb_zone + 1 >= nr_zones) 839 return -ENOENT; 840 841 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev), 842 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb, 843 zones); 844 if (ret < 0) 845 return ret; 846 if (ret != BTRFS_NR_SB_LOG_ZONES) 847 return -EIO; 848 849 return sb_log_location(bdev, zones, rw, bytenr_ret); 850 } 851 852 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, 853 u64 *bytenr_ret) 854 { 855 struct btrfs_zoned_device_info *zinfo = device->zone_info; 856 u32 zone_num; 857 858 /* 859 * For a zoned filesystem on a non-zoned block device, use the same 860 * super block locations as regular filesystem. Doing so, the super 861 * block can always be retrieved and the zoned flag of the volume 862 * detected from the super block information. 863 */ 864 if (!bdev_is_zoned(device->bdev)) { 865 *bytenr_ret = btrfs_sb_offset(mirror); 866 return 0; 867 } 868 869 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror); 870 if (zone_num + 1 >= zinfo->nr_zones) 871 return -ENOENT; 872 873 return sb_log_location(device->bdev, 874 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror], 875 rw, bytenr_ret); 876 } 877 878 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo, 879 int mirror) 880 { 881 u32 zone_num; 882 883 if (!zinfo) 884 return false; 885 886 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror); 887 if (zone_num + 1 >= zinfo->nr_zones) 888 return false; 889 890 if (!test_bit(zone_num, zinfo->seq_zones)) 891 return false; 892 893 return true; 894 } 895 896 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror) 897 { 898 struct btrfs_zoned_device_info *zinfo = device->zone_info; 899 struct blk_zone *zone; 900 int i; 901 902 if (!is_sb_log_zone(zinfo, mirror)) 903 return 0; 904 905 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror]; 906 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 907 /* Advance the next zone */ 908 if (zone->cond == BLK_ZONE_COND_FULL) { 909 zone++; 910 continue; 911 } 912 913 if (zone->cond == BLK_ZONE_COND_EMPTY) 914 zone->cond = BLK_ZONE_COND_IMP_OPEN; 915 916 zone->wp += SUPER_INFO_SECTORS; 917 918 if (sb_zone_is_full(zone)) { 919 /* 920 * No room left to write new superblock. Since 921 * superblock is written with REQ_SYNC, it is safe to 922 * finish the zone now. 923 * 924 * If the write pointer is exactly at the capacity, 925 * explicit ZONE_FINISH is not necessary. 926 */ 927 if (zone->wp != zone->start + zone->capacity) { 928 int ret; 929 930 ret = blkdev_zone_mgmt(device->bdev, 931 REQ_OP_ZONE_FINISH, zone->start, 932 zone->len, GFP_NOFS); 933 if (ret) 934 return ret; 935 } 936 937 zone->wp = zone->start + zone->len; 938 zone->cond = BLK_ZONE_COND_FULL; 939 } 940 return 0; 941 } 942 943 /* All the zones are FULL. Should not reach here. */ 944 ASSERT(0); 945 return -EIO; 946 } 947 948 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror) 949 { 950 sector_t zone_sectors; 951 sector_t nr_sectors; 952 u8 zone_sectors_shift; 953 u32 sb_zone; 954 u32 nr_zones; 955 956 zone_sectors = bdev_zone_sectors(bdev); 957 zone_sectors_shift = ilog2(zone_sectors); 958 nr_sectors = bdev_nr_sectors(bdev); 959 nr_zones = nr_sectors >> zone_sectors_shift; 960 961 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror); 962 if (sb_zone + 1 >= nr_zones) 963 return -ENOENT; 964 965 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 966 zone_start_sector(sb_zone, bdev), 967 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS); 968 } 969 970 /** 971 * btrfs_find_allocatable_zones - find allocatable zones within a given region 972 * 973 * @device: the device to allocate a region on 974 * @hole_start: the position of the hole to allocate the region 975 * @num_bytes: size of wanted region 976 * @hole_end: the end of the hole 977 * @return: position of allocatable zones 978 * 979 * Allocatable region should not contain any superblock locations. 980 */ 981 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, 982 u64 hole_end, u64 num_bytes) 983 { 984 struct btrfs_zoned_device_info *zinfo = device->zone_info; 985 const u8 shift = zinfo->zone_size_shift; 986 u64 nzones = num_bytes >> shift; 987 u64 pos = hole_start; 988 u64 begin, end; 989 bool have_sb; 990 int i; 991 992 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size)); 993 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size)); 994 995 while (pos < hole_end) { 996 begin = pos >> shift; 997 end = begin + nzones; 998 999 if (end > zinfo->nr_zones) 1000 return hole_end; 1001 1002 /* Check if zones in the region are all empty */ 1003 if (btrfs_dev_is_sequential(device, pos) && 1004 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) { 1005 pos += zinfo->zone_size; 1006 continue; 1007 } 1008 1009 have_sb = false; 1010 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 1011 u32 sb_zone; 1012 u64 sb_pos; 1013 1014 sb_zone = sb_zone_number(shift, i); 1015 if (!(end <= sb_zone || 1016 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) { 1017 have_sb = true; 1018 pos = zone_start_physical( 1019 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo); 1020 break; 1021 } 1022 1023 /* We also need to exclude regular superblock positions */ 1024 sb_pos = btrfs_sb_offset(i); 1025 if (!(pos + num_bytes <= sb_pos || 1026 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) { 1027 have_sb = true; 1028 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE, 1029 zinfo->zone_size); 1030 break; 1031 } 1032 } 1033 if (!have_sb) 1034 break; 1035 } 1036 1037 return pos; 1038 } 1039 1040 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos) 1041 { 1042 struct btrfs_zoned_device_info *zone_info = device->zone_info; 1043 unsigned int zno = (pos >> zone_info->zone_size_shift); 1044 1045 /* We can use any number of zones */ 1046 if (zone_info->max_active_zones == 0) 1047 return true; 1048 1049 if (!test_bit(zno, zone_info->active_zones)) { 1050 /* Active zone left? */ 1051 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0) 1052 return false; 1053 if (test_and_set_bit(zno, zone_info->active_zones)) { 1054 /* Someone already set the bit */ 1055 atomic_inc(&zone_info->active_zones_left); 1056 } 1057 } 1058 1059 return true; 1060 } 1061 1062 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos) 1063 { 1064 struct btrfs_zoned_device_info *zone_info = device->zone_info; 1065 unsigned int zno = (pos >> zone_info->zone_size_shift); 1066 1067 /* We can use any number of zones */ 1068 if (zone_info->max_active_zones == 0) 1069 return; 1070 1071 if (test_and_clear_bit(zno, zone_info->active_zones)) 1072 atomic_inc(&zone_info->active_zones_left); 1073 } 1074 1075 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, 1076 u64 length, u64 *bytes) 1077 { 1078 int ret; 1079 1080 *bytes = 0; 1081 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET, 1082 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT, 1083 GFP_NOFS); 1084 if (ret) 1085 return ret; 1086 1087 *bytes = length; 1088 while (length) { 1089 btrfs_dev_set_zone_empty(device, physical); 1090 btrfs_dev_clear_active_zone(device, physical); 1091 physical += device->zone_info->zone_size; 1092 length -= device->zone_info->zone_size; 1093 } 1094 1095 return 0; 1096 } 1097 1098 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) 1099 { 1100 struct btrfs_zoned_device_info *zinfo = device->zone_info; 1101 const u8 shift = zinfo->zone_size_shift; 1102 unsigned long begin = start >> shift; 1103 unsigned long end = (start + size) >> shift; 1104 u64 pos; 1105 int ret; 1106 1107 ASSERT(IS_ALIGNED(start, zinfo->zone_size)); 1108 ASSERT(IS_ALIGNED(size, zinfo->zone_size)); 1109 1110 if (end > zinfo->nr_zones) 1111 return -ERANGE; 1112 1113 /* All the zones are conventional */ 1114 if (find_next_bit(zinfo->seq_zones, begin, end) == end) 1115 return 0; 1116 1117 /* All the zones are sequential and empty */ 1118 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end && 1119 find_next_zero_bit(zinfo->empty_zones, begin, end) == end) 1120 return 0; 1121 1122 for (pos = start; pos < start + size; pos += zinfo->zone_size) { 1123 u64 reset_bytes; 1124 1125 if (!btrfs_dev_is_sequential(device, pos) || 1126 btrfs_dev_is_empty_zone(device, pos)) 1127 continue; 1128 1129 /* Free regions should be empty */ 1130 btrfs_warn_in_rcu( 1131 device->fs_info, 1132 "zoned: resetting device %s (devid %llu) zone %llu for allocation", 1133 rcu_str_deref(device->name), device->devid, pos >> shift); 1134 WARN_ON_ONCE(1); 1135 1136 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size, 1137 &reset_bytes); 1138 if (ret) 1139 return ret; 1140 } 1141 1142 return 0; 1143 } 1144 1145 /* 1146 * Calculate an allocation pointer from the extent allocation information 1147 * for a block group consist of conventional zones. It is pointed to the 1148 * end of the highest addressed extent in the block group as an allocation 1149 * offset. 1150 */ 1151 static int calculate_alloc_pointer(struct btrfs_block_group *cache, 1152 u64 *offset_ret) 1153 { 1154 struct btrfs_fs_info *fs_info = cache->fs_info; 1155 struct btrfs_root *root; 1156 struct btrfs_path *path; 1157 struct btrfs_key key; 1158 struct btrfs_key found_key; 1159 int ret; 1160 u64 length; 1161 1162 path = btrfs_alloc_path(); 1163 if (!path) 1164 return -ENOMEM; 1165 1166 key.objectid = cache->start + cache->length; 1167 key.type = 0; 1168 key.offset = 0; 1169 1170 root = btrfs_extent_root(fs_info, key.objectid); 1171 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1172 /* We should not find the exact match */ 1173 if (!ret) 1174 ret = -EUCLEAN; 1175 if (ret < 0) 1176 goto out; 1177 1178 ret = btrfs_previous_extent_item(root, path, cache->start); 1179 if (ret) { 1180 if (ret == 1) { 1181 ret = 0; 1182 *offset_ret = 0; 1183 } 1184 goto out; 1185 } 1186 1187 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 1188 1189 if (found_key.type == BTRFS_EXTENT_ITEM_KEY) 1190 length = found_key.offset; 1191 else 1192 length = fs_info->nodesize; 1193 1194 if (!(found_key.objectid >= cache->start && 1195 found_key.objectid + length <= cache->start + cache->length)) { 1196 ret = -EUCLEAN; 1197 goto out; 1198 } 1199 *offset_ret = found_key.objectid + length - cache->start; 1200 ret = 0; 1201 1202 out: 1203 btrfs_free_path(path); 1204 return ret; 1205 } 1206 1207 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) 1208 { 1209 struct btrfs_fs_info *fs_info = cache->fs_info; 1210 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1211 struct extent_map *em; 1212 struct map_lookup *map; 1213 struct btrfs_device *device; 1214 u64 logical = cache->start; 1215 u64 length = cache->length; 1216 int ret; 1217 int i; 1218 unsigned int nofs_flag; 1219 u64 *alloc_offsets = NULL; 1220 u64 *caps = NULL; 1221 u64 *physical = NULL; 1222 unsigned long *active = NULL; 1223 u64 last_alloc = 0; 1224 u32 num_sequential = 0, num_conventional = 0; 1225 1226 if (!btrfs_is_zoned(fs_info)) 1227 return 0; 1228 1229 /* Sanity check */ 1230 if (!IS_ALIGNED(length, fs_info->zone_size)) { 1231 btrfs_err(fs_info, 1232 "zoned: block group %llu len %llu unaligned to zone size %llu", 1233 logical, length, fs_info->zone_size); 1234 return -EIO; 1235 } 1236 1237 /* Get the chunk mapping */ 1238 read_lock(&em_tree->lock); 1239 em = lookup_extent_mapping(em_tree, logical, length); 1240 read_unlock(&em_tree->lock); 1241 1242 if (!em) 1243 return -EINVAL; 1244 1245 map = em->map_lookup; 1246 1247 cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS); 1248 if (!cache->physical_map) { 1249 ret = -ENOMEM; 1250 goto out; 1251 } 1252 1253 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS); 1254 if (!alloc_offsets) { 1255 ret = -ENOMEM; 1256 goto out; 1257 } 1258 1259 caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS); 1260 if (!caps) { 1261 ret = -ENOMEM; 1262 goto out; 1263 } 1264 1265 physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS); 1266 if (!physical) { 1267 ret = -ENOMEM; 1268 goto out; 1269 } 1270 1271 active = bitmap_zalloc(map->num_stripes, GFP_NOFS); 1272 if (!active) { 1273 ret = -ENOMEM; 1274 goto out; 1275 } 1276 1277 for (i = 0; i < map->num_stripes; i++) { 1278 bool is_sequential; 1279 struct blk_zone zone; 1280 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1281 int dev_replace_is_ongoing = 0; 1282 1283 device = map->stripes[i].dev; 1284 physical[i] = map->stripes[i].physical; 1285 1286 if (device->bdev == NULL) { 1287 alloc_offsets[i] = WP_MISSING_DEV; 1288 continue; 1289 } 1290 1291 is_sequential = btrfs_dev_is_sequential(device, physical[i]); 1292 if (is_sequential) 1293 num_sequential++; 1294 else 1295 num_conventional++; 1296 1297 if (!is_sequential) { 1298 alloc_offsets[i] = WP_CONVENTIONAL; 1299 continue; 1300 } 1301 1302 /* 1303 * This zone will be used for allocation, so mark this zone 1304 * non-empty. 1305 */ 1306 btrfs_dev_clear_zone_empty(device, physical[i]); 1307 1308 down_read(&dev_replace->rwsem); 1309 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 1310 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) 1311 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]); 1312 up_read(&dev_replace->rwsem); 1313 1314 /* 1315 * The group is mapped to a sequential zone. Get the zone write 1316 * pointer to determine the allocation offset within the zone. 1317 */ 1318 WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size)); 1319 nofs_flag = memalloc_nofs_save(); 1320 ret = btrfs_get_dev_zone(device, physical[i], &zone); 1321 memalloc_nofs_restore(nofs_flag); 1322 if (ret == -EIO || ret == -EOPNOTSUPP) { 1323 ret = 0; 1324 alloc_offsets[i] = WP_MISSING_DEV; 1325 continue; 1326 } else if (ret) { 1327 goto out; 1328 } 1329 1330 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) { 1331 btrfs_err_in_rcu(fs_info, 1332 "zoned: unexpected conventional zone %llu on device %s (devid %llu)", 1333 zone.start << SECTOR_SHIFT, 1334 rcu_str_deref(device->name), device->devid); 1335 ret = -EIO; 1336 goto out; 1337 } 1338 1339 caps[i] = (zone.capacity << SECTOR_SHIFT); 1340 1341 switch (zone.cond) { 1342 case BLK_ZONE_COND_OFFLINE: 1343 case BLK_ZONE_COND_READONLY: 1344 btrfs_err(fs_info, 1345 "zoned: offline/readonly zone %llu on device %s (devid %llu)", 1346 physical[i] >> device->zone_info->zone_size_shift, 1347 rcu_str_deref(device->name), device->devid); 1348 alloc_offsets[i] = WP_MISSING_DEV; 1349 break; 1350 case BLK_ZONE_COND_EMPTY: 1351 alloc_offsets[i] = 0; 1352 break; 1353 case BLK_ZONE_COND_FULL: 1354 alloc_offsets[i] = caps[i]; 1355 break; 1356 default: 1357 /* Partially used zone */ 1358 alloc_offsets[i] = 1359 ((zone.wp - zone.start) << SECTOR_SHIFT); 1360 __set_bit(i, active); 1361 break; 1362 } 1363 1364 /* 1365 * Consider a zone as active if we can allow any number of 1366 * active zones. 1367 */ 1368 if (!device->zone_info->max_active_zones) 1369 __set_bit(i, active); 1370 } 1371 1372 if (num_sequential > 0) 1373 cache->seq_zone = true; 1374 1375 if (num_conventional > 0) { 1376 /* 1377 * Avoid calling calculate_alloc_pointer() for new BG. It 1378 * is no use for new BG. It must be always 0. 1379 * 1380 * Also, we have a lock chain of extent buffer lock -> 1381 * chunk mutex. For new BG, this function is called from 1382 * btrfs_make_block_group() which is already taking the 1383 * chunk mutex. Thus, we cannot call 1384 * calculate_alloc_pointer() which takes extent buffer 1385 * locks to avoid deadlock. 1386 */ 1387 1388 /* Zone capacity is always zone size in emulation */ 1389 cache->zone_capacity = cache->length; 1390 if (new) { 1391 cache->alloc_offset = 0; 1392 goto out; 1393 } 1394 ret = calculate_alloc_pointer(cache, &last_alloc); 1395 if (ret || map->num_stripes == num_conventional) { 1396 if (!ret) 1397 cache->alloc_offset = last_alloc; 1398 else 1399 btrfs_err(fs_info, 1400 "zoned: failed to determine allocation offset of bg %llu", 1401 cache->start); 1402 goto out; 1403 } 1404 } 1405 1406 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 1407 case 0: /* single */ 1408 if (alloc_offsets[0] == WP_MISSING_DEV) { 1409 btrfs_err(fs_info, 1410 "zoned: cannot recover write pointer for zone %llu", 1411 physical[0]); 1412 ret = -EIO; 1413 goto out; 1414 } 1415 cache->alloc_offset = alloc_offsets[0]; 1416 cache->zone_capacity = caps[0]; 1417 cache->zone_is_active = test_bit(0, active); 1418 break; 1419 case BTRFS_BLOCK_GROUP_DUP: 1420 if (map->type & BTRFS_BLOCK_GROUP_DATA) { 1421 btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg"); 1422 ret = -EINVAL; 1423 goto out; 1424 } 1425 if (alloc_offsets[0] == WP_MISSING_DEV) { 1426 btrfs_err(fs_info, 1427 "zoned: cannot recover write pointer for zone %llu", 1428 physical[0]); 1429 ret = -EIO; 1430 goto out; 1431 } 1432 if (alloc_offsets[1] == WP_MISSING_DEV) { 1433 btrfs_err(fs_info, 1434 "zoned: cannot recover write pointer for zone %llu", 1435 physical[1]); 1436 ret = -EIO; 1437 goto out; 1438 } 1439 if (alloc_offsets[0] != alloc_offsets[1]) { 1440 btrfs_err(fs_info, 1441 "zoned: write pointer offset mismatch of zones in DUP profile"); 1442 ret = -EIO; 1443 goto out; 1444 } 1445 if (test_bit(0, active) != test_bit(1, active)) { 1446 if (!btrfs_zone_activate(cache)) { 1447 ret = -EIO; 1448 goto out; 1449 } 1450 } else { 1451 cache->zone_is_active = test_bit(0, active); 1452 } 1453 cache->alloc_offset = alloc_offsets[0]; 1454 cache->zone_capacity = min(caps[0], caps[1]); 1455 break; 1456 case BTRFS_BLOCK_GROUP_RAID1: 1457 case BTRFS_BLOCK_GROUP_RAID0: 1458 case BTRFS_BLOCK_GROUP_RAID10: 1459 case BTRFS_BLOCK_GROUP_RAID5: 1460 case BTRFS_BLOCK_GROUP_RAID6: 1461 /* non-single profiles are not supported yet */ 1462 default: 1463 btrfs_err(fs_info, "zoned: profile %s not yet supported", 1464 btrfs_bg_type_to_raid_name(map->type)); 1465 ret = -EINVAL; 1466 goto out; 1467 } 1468 1469 if (cache->zone_is_active) { 1470 btrfs_get_block_group(cache); 1471 spin_lock(&fs_info->zone_active_bgs_lock); 1472 list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs); 1473 spin_unlock(&fs_info->zone_active_bgs_lock); 1474 } 1475 1476 out: 1477 if (cache->alloc_offset > fs_info->zone_size) { 1478 btrfs_err(fs_info, 1479 "zoned: invalid write pointer %llu in block group %llu", 1480 cache->alloc_offset, cache->start); 1481 ret = -EIO; 1482 } 1483 1484 if (cache->alloc_offset > cache->zone_capacity) { 1485 btrfs_err(fs_info, 1486 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu", 1487 cache->alloc_offset, cache->zone_capacity, 1488 cache->start); 1489 ret = -EIO; 1490 } 1491 1492 /* An extent is allocated after the write pointer */ 1493 if (!ret && num_conventional && last_alloc > cache->alloc_offset) { 1494 btrfs_err(fs_info, 1495 "zoned: got wrong write pointer in BG %llu: %llu > %llu", 1496 logical, last_alloc, cache->alloc_offset); 1497 ret = -EIO; 1498 } 1499 1500 if (!ret) 1501 cache->meta_write_pointer = cache->alloc_offset + cache->start; 1502 1503 if (ret) { 1504 kfree(cache->physical_map); 1505 cache->physical_map = NULL; 1506 } 1507 bitmap_free(active); 1508 kfree(physical); 1509 kfree(caps); 1510 kfree(alloc_offsets); 1511 free_extent_map(em); 1512 1513 return ret; 1514 } 1515 1516 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) 1517 { 1518 u64 unusable, free; 1519 1520 if (!btrfs_is_zoned(cache->fs_info)) 1521 return; 1522 1523 WARN_ON(cache->bytes_super != 0); 1524 unusable = (cache->alloc_offset - cache->used) + 1525 (cache->length - cache->zone_capacity); 1526 free = cache->zone_capacity - cache->alloc_offset; 1527 1528 /* We only need ->free_space in ALLOC_SEQ block groups */ 1529 cache->last_byte_to_unpin = (u64)-1; 1530 cache->cached = BTRFS_CACHE_FINISHED; 1531 cache->free_space_ctl->free_space = free; 1532 cache->zone_unusable = unusable; 1533 } 1534 1535 void btrfs_redirty_list_add(struct btrfs_transaction *trans, 1536 struct extent_buffer *eb) 1537 { 1538 struct btrfs_fs_info *fs_info = eb->fs_info; 1539 1540 if (!btrfs_is_zoned(fs_info) || 1541 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) || 1542 !list_empty(&eb->release_list)) 1543 return; 1544 1545 set_extent_buffer_dirty(eb); 1546 set_extent_bits_nowait(&trans->dirty_pages, eb->start, 1547 eb->start + eb->len - 1, EXTENT_DIRTY); 1548 memzero_extent_buffer(eb, 0, eb->len); 1549 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags); 1550 1551 spin_lock(&trans->releasing_ebs_lock); 1552 list_add_tail(&eb->release_list, &trans->releasing_ebs); 1553 spin_unlock(&trans->releasing_ebs_lock); 1554 atomic_inc(&eb->refs); 1555 } 1556 1557 void btrfs_free_redirty_list(struct btrfs_transaction *trans) 1558 { 1559 spin_lock(&trans->releasing_ebs_lock); 1560 while (!list_empty(&trans->releasing_ebs)) { 1561 struct extent_buffer *eb; 1562 1563 eb = list_first_entry(&trans->releasing_ebs, 1564 struct extent_buffer, release_list); 1565 list_del_init(&eb->release_list); 1566 free_extent_buffer(eb); 1567 } 1568 spin_unlock(&trans->releasing_ebs_lock); 1569 } 1570 1571 bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start) 1572 { 1573 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1574 struct btrfs_block_group *cache; 1575 bool ret = false; 1576 1577 if (!btrfs_is_zoned(fs_info)) 1578 return false; 1579 1580 if (!is_data_inode(&inode->vfs_inode)) 1581 return false; 1582 1583 /* 1584 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the 1585 * extent layout the relocation code has. 1586 * Furthermore we have set aside own block-group from which only the 1587 * relocation "process" can allocate and make sure only one process at a 1588 * time can add pages to an extent that gets relocated, so it's safe to 1589 * use regular REQ_OP_WRITE for this special case. 1590 */ 1591 if (btrfs_is_data_reloc_root(inode->root)) 1592 return false; 1593 1594 cache = btrfs_lookup_block_group(fs_info, start); 1595 ASSERT(cache); 1596 if (!cache) 1597 return false; 1598 1599 ret = cache->seq_zone; 1600 btrfs_put_block_group(cache); 1601 1602 return ret; 1603 } 1604 1605 void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset, 1606 struct bio *bio) 1607 { 1608 struct btrfs_ordered_extent *ordered; 1609 const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 1610 1611 if (bio_op(bio) != REQ_OP_ZONE_APPEND) 1612 return; 1613 1614 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset); 1615 if (WARN_ON(!ordered)) 1616 return; 1617 1618 ordered->physical = physical; 1619 ordered->bdev = bio->bi_bdev; 1620 1621 btrfs_put_ordered_extent(ordered); 1622 } 1623 1624 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) 1625 { 1626 struct btrfs_inode *inode = BTRFS_I(ordered->inode); 1627 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1628 struct extent_map_tree *em_tree; 1629 struct extent_map *em; 1630 struct btrfs_ordered_sum *sum; 1631 u64 orig_logical = ordered->disk_bytenr; 1632 u64 *logical = NULL; 1633 int nr, stripe_len; 1634 1635 /* Zoned devices should not have partitions. So, we can assume it is 0 */ 1636 ASSERT(!bdev_is_partition(ordered->bdev)); 1637 if (WARN_ON(!ordered->bdev)) 1638 return; 1639 1640 if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev, 1641 ordered->physical, &logical, &nr, 1642 &stripe_len))) 1643 goto out; 1644 1645 WARN_ON(nr != 1); 1646 1647 if (orig_logical == *logical) 1648 goto out; 1649 1650 ordered->disk_bytenr = *logical; 1651 1652 em_tree = &inode->extent_tree; 1653 write_lock(&em_tree->lock); 1654 em = search_extent_mapping(em_tree, ordered->file_offset, 1655 ordered->num_bytes); 1656 em->block_start = *logical; 1657 free_extent_map(em); 1658 write_unlock(&em_tree->lock); 1659 1660 list_for_each_entry(sum, &ordered->list, list) { 1661 if (*logical < orig_logical) 1662 sum->bytenr -= orig_logical - *logical; 1663 else 1664 sum->bytenr += *logical - orig_logical; 1665 } 1666 1667 out: 1668 kfree(logical); 1669 } 1670 1671 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, 1672 struct extent_buffer *eb, 1673 struct btrfs_block_group **cache_ret) 1674 { 1675 struct btrfs_block_group *cache; 1676 bool ret = true; 1677 1678 if (!btrfs_is_zoned(fs_info)) 1679 return true; 1680 1681 cache = btrfs_lookup_block_group(fs_info, eb->start); 1682 if (!cache) 1683 return true; 1684 1685 if (cache->meta_write_pointer != eb->start) { 1686 btrfs_put_block_group(cache); 1687 cache = NULL; 1688 ret = false; 1689 } else { 1690 cache->meta_write_pointer = eb->start + eb->len; 1691 } 1692 1693 *cache_ret = cache; 1694 1695 return ret; 1696 } 1697 1698 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache, 1699 struct extent_buffer *eb) 1700 { 1701 if (!btrfs_is_zoned(eb->fs_info) || !cache) 1702 return; 1703 1704 ASSERT(cache->meta_write_pointer == eb->start + eb->len); 1705 cache->meta_write_pointer = eb->start; 1706 } 1707 1708 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length) 1709 { 1710 if (!btrfs_dev_is_sequential(device, physical)) 1711 return -EOPNOTSUPP; 1712 1713 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT, 1714 length >> SECTOR_SHIFT, GFP_NOFS, 0); 1715 } 1716 1717 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical, 1718 struct blk_zone *zone) 1719 { 1720 struct btrfs_io_context *bioc = NULL; 1721 u64 mapped_length = PAGE_SIZE; 1722 unsigned int nofs_flag; 1723 int nmirrors; 1724 int i, ret; 1725 1726 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, 1727 &mapped_length, &bioc); 1728 if (ret || !bioc || mapped_length < PAGE_SIZE) { 1729 btrfs_put_bioc(bioc); 1730 return -EIO; 1731 } 1732 1733 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 1734 return -EINVAL; 1735 1736 nofs_flag = memalloc_nofs_save(); 1737 nmirrors = (int)bioc->num_stripes; 1738 for (i = 0; i < nmirrors; i++) { 1739 u64 physical = bioc->stripes[i].physical; 1740 struct btrfs_device *dev = bioc->stripes[i].dev; 1741 1742 /* Missing device */ 1743 if (!dev->bdev) 1744 continue; 1745 1746 ret = btrfs_get_dev_zone(dev, physical, zone); 1747 /* Failing device */ 1748 if (ret == -EIO || ret == -EOPNOTSUPP) 1749 continue; 1750 break; 1751 } 1752 memalloc_nofs_restore(nofs_flag); 1753 1754 return ret; 1755 } 1756 1757 /* 1758 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by 1759 * filling zeros between @physical_pos to a write pointer of dev-replace 1760 * source device. 1761 */ 1762 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, 1763 u64 physical_start, u64 physical_pos) 1764 { 1765 struct btrfs_fs_info *fs_info = tgt_dev->fs_info; 1766 struct blk_zone zone; 1767 u64 length; 1768 u64 wp; 1769 int ret; 1770 1771 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos)) 1772 return 0; 1773 1774 ret = read_zone_info(fs_info, logical, &zone); 1775 if (ret) 1776 return ret; 1777 1778 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT); 1779 1780 if (physical_pos == wp) 1781 return 0; 1782 1783 if (physical_pos > wp) 1784 return -EUCLEAN; 1785 1786 length = wp - physical_pos; 1787 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length); 1788 } 1789 1790 struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info, 1791 u64 logical, u64 length) 1792 { 1793 struct btrfs_device *device; 1794 struct extent_map *em; 1795 struct map_lookup *map; 1796 1797 em = btrfs_get_chunk_map(fs_info, logical, length); 1798 if (IS_ERR(em)) 1799 return ERR_CAST(em); 1800 1801 map = em->map_lookup; 1802 /* We only support single profile for now */ 1803 device = map->stripes[0].dev; 1804 1805 free_extent_map(em); 1806 1807 return device; 1808 } 1809 1810 /** 1811 * Activate block group and underlying device zones 1812 * 1813 * @block_group: the block group to activate 1814 * 1815 * Return: true on success, false otherwise 1816 */ 1817 bool btrfs_zone_activate(struct btrfs_block_group *block_group) 1818 { 1819 struct btrfs_fs_info *fs_info = block_group->fs_info; 1820 struct map_lookup *map; 1821 struct btrfs_device *device; 1822 u64 physical; 1823 bool ret; 1824 int i; 1825 1826 if (!btrfs_is_zoned(block_group->fs_info)) 1827 return true; 1828 1829 map = block_group->physical_map; 1830 1831 spin_lock(&block_group->lock); 1832 if (block_group->zone_is_active) { 1833 ret = true; 1834 goto out_unlock; 1835 } 1836 1837 /* No space left */ 1838 if (block_group->alloc_offset == block_group->zone_capacity) { 1839 ret = false; 1840 goto out_unlock; 1841 } 1842 1843 for (i = 0; i < map->num_stripes; i++) { 1844 device = map->stripes[i].dev; 1845 physical = map->stripes[i].physical; 1846 1847 if (device->zone_info->max_active_zones == 0) 1848 continue; 1849 1850 if (!btrfs_dev_set_active_zone(device, physical)) { 1851 /* Cannot activate the zone */ 1852 ret = false; 1853 goto out_unlock; 1854 } 1855 } 1856 1857 /* Successfully activated all the zones */ 1858 block_group->zone_is_active = 1; 1859 spin_unlock(&block_group->lock); 1860 1861 /* For the active block group list */ 1862 btrfs_get_block_group(block_group); 1863 1864 spin_lock(&fs_info->zone_active_bgs_lock); 1865 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs); 1866 spin_unlock(&fs_info->zone_active_bgs_lock); 1867 1868 return true; 1869 1870 out_unlock: 1871 spin_unlock(&block_group->lock); 1872 return ret; 1873 } 1874 1875 int btrfs_zone_finish(struct btrfs_block_group *block_group) 1876 { 1877 struct btrfs_fs_info *fs_info = block_group->fs_info; 1878 struct map_lookup *map; 1879 struct btrfs_device *device; 1880 u64 physical; 1881 int ret = 0; 1882 int i; 1883 1884 if (!btrfs_is_zoned(fs_info)) 1885 return 0; 1886 1887 map = block_group->physical_map; 1888 1889 spin_lock(&block_group->lock); 1890 if (!block_group->zone_is_active) { 1891 spin_unlock(&block_group->lock); 1892 return 0; 1893 } 1894 1895 /* Check if we have unwritten allocated space */ 1896 if ((block_group->flags & 1897 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) && 1898 block_group->alloc_offset > block_group->meta_write_pointer) { 1899 spin_unlock(&block_group->lock); 1900 return -EAGAIN; 1901 } 1902 spin_unlock(&block_group->lock); 1903 1904 ret = btrfs_inc_block_group_ro(block_group, false); 1905 if (ret) 1906 return ret; 1907 1908 /* Ensure all writes in this block group finish */ 1909 btrfs_wait_block_group_reservations(block_group); 1910 /* No need to wait for NOCOW writers. Zoned mode does not allow that. */ 1911 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start, 1912 block_group->length); 1913 1914 spin_lock(&block_group->lock); 1915 1916 /* 1917 * Bail out if someone already deactivated the block group, or 1918 * allocated space is left in the block group. 1919 */ 1920 if (!block_group->zone_is_active) { 1921 spin_unlock(&block_group->lock); 1922 btrfs_dec_block_group_ro(block_group); 1923 return 0; 1924 } 1925 1926 if (block_group->reserved) { 1927 spin_unlock(&block_group->lock); 1928 btrfs_dec_block_group_ro(block_group); 1929 return -EAGAIN; 1930 } 1931 1932 block_group->zone_is_active = 0; 1933 block_group->alloc_offset = block_group->zone_capacity; 1934 block_group->free_space_ctl->free_space = 0; 1935 btrfs_clear_treelog_bg(block_group); 1936 btrfs_clear_data_reloc_bg(block_group); 1937 spin_unlock(&block_group->lock); 1938 1939 for (i = 0; i < map->num_stripes; i++) { 1940 device = map->stripes[i].dev; 1941 physical = map->stripes[i].physical; 1942 1943 if (device->zone_info->max_active_zones == 0) 1944 continue; 1945 1946 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH, 1947 physical >> SECTOR_SHIFT, 1948 device->zone_info->zone_size >> SECTOR_SHIFT, 1949 GFP_NOFS); 1950 1951 if (ret) 1952 return ret; 1953 1954 btrfs_dev_clear_active_zone(device, physical); 1955 } 1956 btrfs_dec_block_group_ro(block_group); 1957 1958 spin_lock(&fs_info->zone_active_bgs_lock); 1959 ASSERT(!list_empty(&block_group->active_bg_list)); 1960 list_del_init(&block_group->active_bg_list); 1961 spin_unlock(&fs_info->zone_active_bgs_lock); 1962 1963 /* For active_bg_list */ 1964 btrfs_put_block_group(block_group); 1965 1966 return 0; 1967 } 1968 1969 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) 1970 { 1971 struct btrfs_fs_info *fs_info = fs_devices->fs_info; 1972 struct btrfs_device *device; 1973 bool ret = false; 1974 1975 if (!btrfs_is_zoned(fs_info)) 1976 return true; 1977 1978 /* Check if there is a device with active zones left */ 1979 mutex_lock(&fs_info->chunk_mutex); 1980 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 1981 struct btrfs_zoned_device_info *zinfo = device->zone_info; 1982 1983 if (!device->bdev) 1984 continue; 1985 1986 if (!zinfo->max_active_zones || 1987 atomic_read(&zinfo->active_zones_left)) { 1988 ret = true; 1989 break; 1990 } 1991 } 1992 mutex_unlock(&fs_info->chunk_mutex); 1993 1994 return ret; 1995 } 1996 1997 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) 1998 { 1999 struct btrfs_block_group *block_group; 2000 struct map_lookup *map; 2001 struct btrfs_device *device; 2002 u64 physical; 2003 2004 if (!btrfs_is_zoned(fs_info)) 2005 return; 2006 2007 block_group = btrfs_lookup_block_group(fs_info, logical); 2008 ASSERT(block_group); 2009 2010 if (logical + length < block_group->start + block_group->zone_capacity) 2011 goto out; 2012 2013 spin_lock(&block_group->lock); 2014 2015 if (!block_group->zone_is_active) { 2016 spin_unlock(&block_group->lock); 2017 goto out; 2018 } 2019 2020 block_group->zone_is_active = 0; 2021 /* We should have consumed all the free space */ 2022 ASSERT(block_group->alloc_offset == block_group->zone_capacity); 2023 ASSERT(block_group->free_space_ctl->free_space == 0); 2024 btrfs_clear_treelog_bg(block_group); 2025 btrfs_clear_data_reloc_bg(block_group); 2026 spin_unlock(&block_group->lock); 2027 2028 map = block_group->physical_map; 2029 device = map->stripes[0].dev; 2030 physical = map->stripes[0].physical; 2031 2032 if (!device->zone_info->max_active_zones) 2033 goto out; 2034 2035 btrfs_dev_clear_active_zone(device, physical); 2036 2037 spin_lock(&fs_info->zone_active_bgs_lock); 2038 ASSERT(!list_empty(&block_group->active_bg_list)); 2039 list_del_init(&block_group->active_bg_list); 2040 spin_unlock(&fs_info->zone_active_bgs_lock); 2041 2042 btrfs_put_block_group(block_group); 2043 2044 out: 2045 btrfs_put_block_group(block_group); 2046 } 2047 2048 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) 2049 { 2050 struct btrfs_fs_info *fs_info = bg->fs_info; 2051 2052 spin_lock(&fs_info->relocation_bg_lock); 2053 if (fs_info->data_reloc_bg == bg->start) 2054 fs_info->data_reloc_bg = 0; 2055 spin_unlock(&fs_info->relocation_bg_lock); 2056 } 2057 2058 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) 2059 { 2060 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2061 struct btrfs_device *device; 2062 2063 if (!btrfs_is_zoned(fs_info)) 2064 return; 2065 2066 mutex_lock(&fs_devices->device_list_mutex); 2067 list_for_each_entry(device, &fs_devices->devices, dev_list) { 2068 if (device->zone_info) { 2069 vfree(device->zone_info->zone_cache); 2070 device->zone_info->zone_cache = NULL; 2071 } 2072 } 2073 mutex_unlock(&fs_devices->device_list_mutex); 2074 } 2075