1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 10 #include <linux/module.h> 11 #include <linux/vmalloc.h> 12 #include <linux/blkdev.h> 13 #include <linux/namei.h> 14 #include <linux/ctype.h> 15 #include <linux/string.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <linux/atomic.h> 21 #include <linux/blk-mq.h> 22 #include <linux/mount.h> 23 #include <linux/dax.h> 24 25 #define DM_MSG_PREFIX "table" 26 27 #define MAX_DEPTH 16 28 #define NODE_SIZE L1_CACHE_BYTES 29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) 30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 31 32 struct dm_table { 33 struct mapped_device *md; 34 enum dm_queue_mode type; 35 36 /* btree table */ 37 unsigned int depth; 38 unsigned int counts[MAX_DEPTH]; /* in nodes */ 39 sector_t *index[MAX_DEPTH]; 40 41 unsigned int num_targets; 42 unsigned int num_allocated; 43 sector_t *highs; 44 struct dm_target *targets; 45 46 struct target_type *immutable_target_type; 47 48 bool integrity_supported:1; 49 bool singleton:1; 50 bool all_blk_mq:1; 51 unsigned integrity_added:1; 52 53 /* 54 * Indicates the rw permissions for the new logical 55 * device. This should be a combination of FMODE_READ 56 * and FMODE_WRITE. 57 */ 58 fmode_t mode; 59 60 /* a list of devices used by this table */ 61 struct list_head devices; 62 63 /* events get handed up using this callback */ 64 void (*event_fn)(void *); 65 void *event_context; 66 67 struct dm_md_mempools *mempools; 68 69 struct list_head target_callbacks; 70 }; 71 72 /* 73 * Similar to ceiling(log_size(n)) 74 */ 75 static unsigned int int_log(unsigned int n, unsigned int base) 76 { 77 int result = 0; 78 79 while (n > 1) { 80 n = dm_div_up(n, base); 81 result++; 82 } 83 84 return result; 85 } 86 87 /* 88 * Calculate the index of the child node of the n'th node k'th key. 89 */ 90 static inline unsigned int get_child(unsigned int n, unsigned int k) 91 { 92 return (n * CHILDREN_PER_NODE) + k; 93 } 94 95 /* 96 * Return the n'th node of level l from table t. 97 */ 98 static inline sector_t *get_node(struct dm_table *t, 99 unsigned int l, unsigned int n) 100 { 101 return t->index[l] + (n * KEYS_PER_NODE); 102 } 103 104 /* 105 * Return the highest key that you could lookup from the n'th 106 * node on level l of the btree. 107 */ 108 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) 109 { 110 for (; l < t->depth - 1; l++) 111 n = get_child(n, CHILDREN_PER_NODE - 1); 112 113 if (n >= t->counts[l]) 114 return (sector_t) - 1; 115 116 return get_node(t, l, n)[KEYS_PER_NODE - 1]; 117 } 118 119 /* 120 * Fills in a level of the btree based on the highs of the level 121 * below it. 122 */ 123 static int setup_btree_index(unsigned int l, struct dm_table *t) 124 { 125 unsigned int n, k; 126 sector_t *node; 127 128 for (n = 0U; n < t->counts[l]; n++) { 129 node = get_node(t, l, n); 130 131 for (k = 0U; k < KEYS_PER_NODE; k++) 132 node[k] = high(t, l + 1, get_child(n, k)); 133 } 134 135 return 0; 136 } 137 138 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) 139 { 140 unsigned long size; 141 void *addr; 142 143 /* 144 * Check that we're not going to overflow. 145 */ 146 if (nmemb > (ULONG_MAX / elem_size)) 147 return NULL; 148 149 size = nmemb * elem_size; 150 addr = vzalloc(size); 151 152 return addr; 153 } 154 EXPORT_SYMBOL(dm_vcalloc); 155 156 /* 157 * highs, and targets are managed as dynamic arrays during a 158 * table load. 159 */ 160 static int alloc_targets(struct dm_table *t, unsigned int num) 161 { 162 sector_t *n_highs; 163 struct dm_target *n_targets; 164 165 /* 166 * Allocate both the target array and offset array at once. 167 * Append an empty entry to catch sectors beyond the end of 168 * the device. 169 */ 170 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + 171 sizeof(sector_t)); 172 if (!n_highs) 173 return -ENOMEM; 174 175 n_targets = (struct dm_target *) (n_highs + num); 176 177 memset(n_highs, -1, sizeof(*n_highs) * num); 178 vfree(t->highs); 179 180 t->num_allocated = num; 181 t->highs = n_highs; 182 t->targets = n_targets; 183 184 return 0; 185 } 186 187 int dm_table_create(struct dm_table **result, fmode_t mode, 188 unsigned num_targets, struct mapped_device *md) 189 { 190 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); 191 192 if (!t) 193 return -ENOMEM; 194 195 INIT_LIST_HEAD(&t->devices); 196 INIT_LIST_HEAD(&t->target_callbacks); 197 198 if (!num_targets) 199 num_targets = KEYS_PER_NODE; 200 201 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 202 203 if (!num_targets) { 204 kfree(t); 205 return -ENOMEM; 206 } 207 208 if (alloc_targets(t, num_targets)) { 209 kfree(t); 210 return -ENOMEM; 211 } 212 213 t->type = DM_TYPE_NONE; 214 t->mode = mode; 215 t->md = md; 216 *result = t; 217 return 0; 218 } 219 220 static void free_devices(struct list_head *devices, struct mapped_device *md) 221 { 222 struct list_head *tmp, *next; 223 224 list_for_each_safe(tmp, next, devices) { 225 struct dm_dev_internal *dd = 226 list_entry(tmp, struct dm_dev_internal, list); 227 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", 228 dm_device_name(md), dd->dm_dev->name); 229 dm_put_table_device(md, dd->dm_dev); 230 kfree(dd); 231 } 232 } 233 234 void dm_table_destroy(struct dm_table *t) 235 { 236 unsigned int i; 237 238 if (!t) 239 return; 240 241 /* free the indexes */ 242 if (t->depth >= 2) 243 vfree(t->index[t->depth - 2]); 244 245 /* free the targets */ 246 for (i = 0; i < t->num_targets; i++) { 247 struct dm_target *tgt = t->targets + i; 248 249 if (tgt->type->dtr) 250 tgt->type->dtr(tgt); 251 252 dm_put_target_type(tgt->type); 253 } 254 255 vfree(t->highs); 256 257 /* free the device list */ 258 free_devices(&t->devices, t->md); 259 260 dm_free_md_mempools(t->mempools); 261 262 kfree(t); 263 } 264 265 /* 266 * See if we've already got a device in the list. 267 */ 268 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) 269 { 270 struct dm_dev_internal *dd; 271 272 list_for_each_entry (dd, l, list) 273 if (dd->dm_dev->bdev->bd_dev == dev) 274 return dd; 275 276 return NULL; 277 } 278 279 /* 280 * If possible, this checks an area of a destination device is invalid. 281 */ 282 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 283 sector_t start, sector_t len, void *data) 284 { 285 struct request_queue *q; 286 struct queue_limits *limits = data; 287 struct block_device *bdev = dev->bdev; 288 sector_t dev_size = 289 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 290 unsigned short logical_block_size_sectors = 291 limits->logical_block_size >> SECTOR_SHIFT; 292 char b[BDEVNAME_SIZE]; 293 294 /* 295 * Some devices exist without request functions, 296 * such as loop devices not yet bound to backing files. 297 * Forbid the use of such devices. 298 */ 299 q = bdev_get_queue(bdev); 300 if (!q || !q->make_request_fn) { 301 DMWARN("%s: %s is not yet initialised: " 302 "start=%llu, len=%llu, dev_size=%llu", 303 dm_device_name(ti->table->md), bdevname(bdev, b), 304 (unsigned long long)start, 305 (unsigned long long)len, 306 (unsigned long long)dev_size); 307 return 1; 308 } 309 310 if (!dev_size) 311 return 0; 312 313 if ((start >= dev_size) || (start + len > dev_size)) { 314 DMWARN("%s: %s too small for target: " 315 "start=%llu, len=%llu, dev_size=%llu", 316 dm_device_name(ti->table->md), bdevname(bdev, b), 317 (unsigned long long)start, 318 (unsigned long long)len, 319 (unsigned long long)dev_size); 320 return 1; 321 } 322 323 /* 324 * If the target is mapped to zoned block device(s), check 325 * that the zones are not partially mapped. 326 */ 327 if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) { 328 unsigned int zone_sectors = bdev_zone_sectors(bdev); 329 330 if (start & (zone_sectors - 1)) { 331 DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s", 332 dm_device_name(ti->table->md), 333 (unsigned long long)start, 334 zone_sectors, bdevname(bdev, b)); 335 return 1; 336 } 337 338 /* 339 * Note: The last zone of a zoned block device may be smaller 340 * than other zones. So for a target mapping the end of a 341 * zoned block device with such a zone, len would not be zone 342 * aligned. We do not allow such last smaller zone to be part 343 * of the mapping here to ensure that mappings with multiple 344 * devices do not end up with a smaller zone in the middle of 345 * the sector range. 346 */ 347 if (len & (zone_sectors - 1)) { 348 DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s", 349 dm_device_name(ti->table->md), 350 (unsigned long long)len, 351 zone_sectors, bdevname(bdev, b)); 352 return 1; 353 } 354 } 355 356 if (logical_block_size_sectors <= 1) 357 return 0; 358 359 if (start & (logical_block_size_sectors - 1)) { 360 DMWARN("%s: start=%llu not aligned to h/w " 361 "logical block size %u of %s", 362 dm_device_name(ti->table->md), 363 (unsigned long long)start, 364 limits->logical_block_size, bdevname(bdev, b)); 365 return 1; 366 } 367 368 if (len & (logical_block_size_sectors - 1)) { 369 DMWARN("%s: len=%llu not aligned to h/w " 370 "logical block size %u of %s", 371 dm_device_name(ti->table->md), 372 (unsigned long long)len, 373 limits->logical_block_size, bdevname(bdev, b)); 374 return 1; 375 } 376 377 return 0; 378 } 379 380 /* 381 * This upgrades the mode on an already open dm_dev, being 382 * careful to leave things as they were if we fail to reopen the 383 * device and not to touch the existing bdev field in case 384 * it is accessed concurrently inside dm_table_any_congested(). 385 */ 386 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, 387 struct mapped_device *md) 388 { 389 int r; 390 struct dm_dev *old_dev, *new_dev; 391 392 old_dev = dd->dm_dev; 393 394 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, 395 dd->dm_dev->mode | new_mode, &new_dev); 396 if (r) 397 return r; 398 399 dd->dm_dev = new_dev; 400 dm_put_table_device(md, old_dev); 401 402 return 0; 403 } 404 405 /* 406 * Convert the path to a device 407 */ 408 dev_t dm_get_dev_t(const char *path) 409 { 410 dev_t dev; 411 struct block_device *bdev; 412 413 bdev = lookup_bdev(path); 414 if (IS_ERR(bdev)) 415 dev = name_to_dev_t(path); 416 else { 417 dev = bdev->bd_dev; 418 bdput(bdev); 419 } 420 421 return dev; 422 } 423 EXPORT_SYMBOL_GPL(dm_get_dev_t); 424 425 /* 426 * Add a device to the list, or just increment the usage count if 427 * it's already present. 428 */ 429 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 430 struct dm_dev **result) 431 { 432 int r; 433 dev_t dev; 434 struct dm_dev_internal *dd; 435 struct dm_table *t = ti->table; 436 437 BUG_ON(!t); 438 439 dev = dm_get_dev_t(path); 440 if (!dev) 441 return -ENODEV; 442 443 dd = find_device(&t->devices, dev); 444 if (!dd) { 445 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 446 if (!dd) 447 return -ENOMEM; 448 449 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { 450 kfree(dd); 451 return r; 452 } 453 454 refcount_set(&dd->count, 1); 455 list_add(&dd->list, &t->devices); 456 457 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 458 r = upgrade_mode(dd, mode, t->md); 459 if (r) 460 return r; 461 refcount_inc(&dd->count); 462 } 463 464 *result = dd->dm_dev; 465 return 0; 466 } 467 EXPORT_SYMBOL(dm_get_device); 468 469 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 470 sector_t start, sector_t len, void *data) 471 { 472 struct queue_limits *limits = data; 473 struct block_device *bdev = dev->bdev; 474 struct request_queue *q = bdev_get_queue(bdev); 475 char b[BDEVNAME_SIZE]; 476 477 if (unlikely(!q)) { 478 DMWARN("%s: Cannot set limits for nonexistent device %s", 479 dm_device_name(ti->table->md), bdevname(bdev, b)); 480 return 0; 481 } 482 483 if (bdev_stack_limits(limits, bdev, start) < 0) 484 DMWARN("%s: adding target device %s caused an alignment inconsistency: " 485 "physical_block_size=%u, logical_block_size=%u, " 486 "alignment_offset=%u, start=%llu", 487 dm_device_name(ti->table->md), bdevname(bdev, b), 488 q->limits.physical_block_size, 489 q->limits.logical_block_size, 490 q->limits.alignment_offset, 491 (unsigned long long) start << SECTOR_SHIFT); 492 493 limits->zoned = blk_queue_zoned_model(q); 494 495 return 0; 496 } 497 498 /* 499 * Decrement a device's use count and remove it if necessary. 500 */ 501 void dm_put_device(struct dm_target *ti, struct dm_dev *d) 502 { 503 int found = 0; 504 struct list_head *devices = &ti->table->devices; 505 struct dm_dev_internal *dd; 506 507 list_for_each_entry(dd, devices, list) { 508 if (dd->dm_dev == d) { 509 found = 1; 510 break; 511 } 512 } 513 if (!found) { 514 DMWARN("%s: device %s not in table devices list", 515 dm_device_name(ti->table->md), d->name); 516 return; 517 } 518 if (refcount_dec_and_test(&dd->count)) { 519 dm_put_table_device(ti->table->md, d); 520 list_del(&dd->list); 521 kfree(dd); 522 } 523 } 524 EXPORT_SYMBOL(dm_put_device); 525 526 /* 527 * Checks to see if the target joins onto the end of the table. 528 */ 529 static int adjoin(struct dm_table *table, struct dm_target *ti) 530 { 531 struct dm_target *prev; 532 533 if (!table->num_targets) 534 return !ti->begin; 535 536 prev = &table->targets[table->num_targets - 1]; 537 return (ti->begin == (prev->begin + prev->len)); 538 } 539 540 /* 541 * Used to dynamically allocate the arg array. 542 * 543 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must 544 * process messages even if some device is suspended. These messages have a 545 * small fixed number of arguments. 546 * 547 * On the other hand, dm-switch needs to process bulk data using messages and 548 * excessive use of GFP_NOIO could cause trouble. 549 */ 550 static char **realloc_argv(unsigned *array_size, char **old_argv) 551 { 552 char **argv; 553 unsigned new_size; 554 gfp_t gfp; 555 556 if (*array_size) { 557 new_size = *array_size * 2; 558 gfp = GFP_KERNEL; 559 } else { 560 new_size = 8; 561 gfp = GFP_NOIO; 562 } 563 argv = kmalloc(new_size * sizeof(*argv), gfp); 564 if (argv) { 565 memcpy(argv, old_argv, *array_size * sizeof(*argv)); 566 *array_size = new_size; 567 } 568 569 kfree(old_argv); 570 return argv; 571 } 572 573 /* 574 * Destructively splits up the argument list to pass to ctr. 575 */ 576 int dm_split_args(int *argc, char ***argvp, char *input) 577 { 578 char *start, *end = input, *out, **argv = NULL; 579 unsigned array_size = 0; 580 581 *argc = 0; 582 583 if (!input) { 584 *argvp = NULL; 585 return 0; 586 } 587 588 argv = realloc_argv(&array_size, argv); 589 if (!argv) 590 return -ENOMEM; 591 592 while (1) { 593 /* Skip whitespace */ 594 start = skip_spaces(end); 595 596 if (!*start) 597 break; /* success, we hit the end */ 598 599 /* 'out' is used to remove any back-quotes */ 600 end = out = start; 601 while (*end) { 602 /* Everything apart from '\0' can be quoted */ 603 if (*end == '\\' && *(end + 1)) { 604 *out++ = *(end + 1); 605 end += 2; 606 continue; 607 } 608 609 if (isspace(*end)) 610 break; /* end of token */ 611 612 *out++ = *end++; 613 } 614 615 /* have we already filled the array ? */ 616 if ((*argc + 1) > array_size) { 617 argv = realloc_argv(&array_size, argv); 618 if (!argv) 619 return -ENOMEM; 620 } 621 622 /* we know this is whitespace */ 623 if (*end) 624 end++; 625 626 /* terminate the string and put it in the array */ 627 *out = '\0'; 628 argv[*argc] = start; 629 (*argc)++; 630 } 631 632 *argvp = argv; 633 return 0; 634 } 635 636 /* 637 * Impose necessary and sufficient conditions on a devices's table such 638 * that any incoming bio which respects its logical_block_size can be 639 * processed successfully. If it falls across the boundary between 640 * two or more targets, the size of each piece it gets split into must 641 * be compatible with the logical_block_size of the target processing it. 642 */ 643 static int validate_hardware_logical_block_alignment(struct dm_table *table, 644 struct queue_limits *limits) 645 { 646 /* 647 * This function uses arithmetic modulo the logical_block_size 648 * (in units of 512-byte sectors). 649 */ 650 unsigned short device_logical_block_size_sects = 651 limits->logical_block_size >> SECTOR_SHIFT; 652 653 /* 654 * Offset of the start of the next table entry, mod logical_block_size. 655 */ 656 unsigned short next_target_start = 0; 657 658 /* 659 * Given an aligned bio that extends beyond the end of a 660 * target, how many sectors must the next target handle? 661 */ 662 unsigned short remaining = 0; 663 664 struct dm_target *uninitialized_var(ti); 665 struct queue_limits ti_limits; 666 unsigned i; 667 668 /* 669 * Check each entry in the table in turn. 670 */ 671 for (i = 0; i < dm_table_get_num_targets(table); i++) { 672 ti = dm_table_get_target(table, i); 673 674 blk_set_stacking_limits(&ti_limits); 675 676 /* combine all target devices' limits */ 677 if (ti->type->iterate_devices) 678 ti->type->iterate_devices(ti, dm_set_device_limits, 679 &ti_limits); 680 681 /* 682 * If the remaining sectors fall entirely within this 683 * table entry are they compatible with its logical_block_size? 684 */ 685 if (remaining < ti->len && 686 remaining & ((ti_limits.logical_block_size >> 687 SECTOR_SHIFT) - 1)) 688 break; /* Error */ 689 690 next_target_start = 691 (unsigned short) ((next_target_start + ti->len) & 692 (device_logical_block_size_sects - 1)); 693 remaining = next_target_start ? 694 device_logical_block_size_sects - next_target_start : 0; 695 } 696 697 if (remaining) { 698 DMWARN("%s: table line %u (start sect %llu len %llu) " 699 "not aligned to h/w logical block size %u", 700 dm_device_name(table->md), i, 701 (unsigned long long) ti->begin, 702 (unsigned long long) ti->len, 703 limits->logical_block_size); 704 return -EINVAL; 705 } 706 707 return 0; 708 } 709 710 int dm_table_add_target(struct dm_table *t, const char *type, 711 sector_t start, sector_t len, char *params) 712 { 713 int r = -EINVAL, argc; 714 char **argv; 715 struct dm_target *tgt; 716 717 if (t->singleton) { 718 DMERR("%s: target type %s must appear alone in table", 719 dm_device_name(t->md), t->targets->type->name); 720 return -EINVAL; 721 } 722 723 BUG_ON(t->num_targets >= t->num_allocated); 724 725 tgt = t->targets + t->num_targets; 726 memset(tgt, 0, sizeof(*tgt)); 727 728 if (!len) { 729 DMERR("%s: zero-length target", dm_device_name(t->md)); 730 return -EINVAL; 731 } 732 733 tgt->type = dm_get_target_type(type); 734 if (!tgt->type) { 735 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); 736 return -EINVAL; 737 } 738 739 if (dm_target_needs_singleton(tgt->type)) { 740 if (t->num_targets) { 741 tgt->error = "singleton target type must appear alone in table"; 742 goto bad; 743 } 744 t->singleton = true; 745 } 746 747 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { 748 tgt->error = "target type may not be included in a read-only table"; 749 goto bad; 750 } 751 752 if (t->immutable_target_type) { 753 if (t->immutable_target_type != tgt->type) { 754 tgt->error = "immutable target type cannot be mixed with other target types"; 755 goto bad; 756 } 757 } else if (dm_target_is_immutable(tgt->type)) { 758 if (t->num_targets) { 759 tgt->error = "immutable target type cannot be mixed with other target types"; 760 goto bad; 761 } 762 t->immutable_target_type = tgt->type; 763 } 764 765 if (dm_target_has_integrity(tgt->type)) 766 t->integrity_added = 1; 767 768 tgt->table = t; 769 tgt->begin = start; 770 tgt->len = len; 771 tgt->error = "Unknown error"; 772 773 /* 774 * Does this target adjoin the previous one ? 775 */ 776 if (!adjoin(t, tgt)) { 777 tgt->error = "Gap in table"; 778 goto bad; 779 } 780 781 r = dm_split_args(&argc, &argv, params); 782 if (r) { 783 tgt->error = "couldn't split parameters (insufficient memory)"; 784 goto bad; 785 } 786 787 r = tgt->type->ctr(tgt, argc, argv); 788 kfree(argv); 789 if (r) 790 goto bad; 791 792 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 793 794 if (!tgt->num_discard_bios && tgt->discards_supported) 795 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", 796 dm_device_name(t->md), type); 797 798 return 0; 799 800 bad: 801 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); 802 dm_put_target_type(tgt->type); 803 return r; 804 } 805 806 /* 807 * Target argument parsing helpers. 808 */ 809 static int validate_next_arg(const struct dm_arg *arg, 810 struct dm_arg_set *arg_set, 811 unsigned *value, char **error, unsigned grouped) 812 { 813 const char *arg_str = dm_shift_arg(arg_set); 814 char dummy; 815 816 if (!arg_str || 817 (sscanf(arg_str, "%u%c", value, &dummy) != 1) || 818 (*value < arg->min) || 819 (*value > arg->max) || 820 (grouped && arg_set->argc < *value)) { 821 *error = arg->error; 822 return -EINVAL; 823 } 824 825 return 0; 826 } 827 828 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 829 unsigned *value, char **error) 830 { 831 return validate_next_arg(arg, arg_set, value, error, 0); 832 } 833 EXPORT_SYMBOL(dm_read_arg); 834 835 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 836 unsigned *value, char **error) 837 { 838 return validate_next_arg(arg, arg_set, value, error, 1); 839 } 840 EXPORT_SYMBOL(dm_read_arg_group); 841 842 const char *dm_shift_arg(struct dm_arg_set *as) 843 { 844 char *r; 845 846 if (as->argc) { 847 as->argc--; 848 r = *as->argv; 849 as->argv++; 850 return r; 851 } 852 853 return NULL; 854 } 855 EXPORT_SYMBOL(dm_shift_arg); 856 857 void dm_consume_args(struct dm_arg_set *as, unsigned num_args) 858 { 859 BUG_ON(as->argc < num_args); 860 as->argc -= num_args; 861 as->argv += num_args; 862 } 863 EXPORT_SYMBOL(dm_consume_args); 864 865 static bool __table_type_bio_based(enum dm_queue_mode table_type) 866 { 867 return (table_type == DM_TYPE_BIO_BASED || 868 table_type == DM_TYPE_DAX_BIO_BASED); 869 } 870 871 static bool __table_type_request_based(enum dm_queue_mode table_type) 872 { 873 return (table_type == DM_TYPE_REQUEST_BASED || 874 table_type == DM_TYPE_MQ_REQUEST_BASED); 875 } 876 877 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) 878 { 879 t->type = type; 880 } 881 EXPORT_SYMBOL_GPL(dm_table_set_type); 882 883 static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, 884 sector_t start, sector_t len, void *data) 885 { 886 struct request_queue *q = bdev_get_queue(dev->bdev); 887 888 return q && blk_queue_dax(q); 889 } 890 891 static bool dm_table_supports_dax(struct dm_table *t) 892 { 893 struct dm_target *ti; 894 unsigned i; 895 896 /* Ensure that all targets support DAX. */ 897 for (i = 0; i < dm_table_get_num_targets(t); i++) { 898 ti = dm_table_get_target(t, i); 899 900 if (!ti->type->direct_access) 901 return false; 902 903 if (!ti->type->iterate_devices || 904 !ti->type->iterate_devices(ti, device_supports_dax, NULL)) 905 return false; 906 } 907 908 return true; 909 } 910 911 static int dm_table_determine_type(struct dm_table *t) 912 { 913 unsigned i; 914 unsigned bio_based = 0, request_based = 0, hybrid = 0; 915 unsigned sq_count = 0, mq_count = 0; 916 struct dm_target *tgt; 917 struct dm_dev_internal *dd; 918 struct list_head *devices = dm_table_get_devices(t); 919 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); 920 921 if (t->type != DM_TYPE_NONE) { 922 /* target already set the table's type */ 923 if (t->type == DM_TYPE_BIO_BASED) 924 return 0; 925 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 926 goto verify_rq_based; 927 } 928 929 for (i = 0; i < t->num_targets; i++) { 930 tgt = t->targets + i; 931 if (dm_target_hybrid(tgt)) 932 hybrid = 1; 933 else if (dm_target_request_based(tgt)) 934 request_based = 1; 935 else 936 bio_based = 1; 937 938 if (bio_based && request_based) { 939 DMWARN("Inconsistent table: different target types" 940 " can't be mixed up"); 941 return -EINVAL; 942 } 943 } 944 945 if (hybrid && !bio_based && !request_based) { 946 /* 947 * The targets can work either way. 948 * Determine the type from the live device. 949 * Default to bio-based if device is new. 950 */ 951 if (__table_type_request_based(live_md_type)) 952 request_based = 1; 953 else 954 bio_based = 1; 955 } 956 957 if (bio_based) { 958 /* We must use this table as bio-based */ 959 t->type = DM_TYPE_BIO_BASED; 960 if (dm_table_supports_dax(t) || 961 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) 962 t->type = DM_TYPE_DAX_BIO_BASED; 963 return 0; 964 } 965 966 BUG_ON(!request_based); /* No targets in this table */ 967 968 /* 969 * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by 970 * having a compatible target use dm_table_set_type. 971 */ 972 t->type = DM_TYPE_REQUEST_BASED; 973 974 verify_rq_based: 975 /* 976 * Request-based dm supports only tables that have a single target now. 977 * To support multiple targets, request splitting support is needed, 978 * and that needs lots of changes in the block-layer. 979 * (e.g. request completion process for partial completion.) 980 */ 981 if (t->num_targets > 1) { 982 DMWARN("Request-based dm doesn't support multiple targets yet"); 983 return -EINVAL; 984 } 985 986 if (list_empty(devices)) { 987 int srcu_idx; 988 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); 989 990 /* inherit live table's type and all_blk_mq */ 991 if (live_table) { 992 t->type = live_table->type; 993 t->all_blk_mq = live_table->all_blk_mq; 994 } 995 dm_put_live_table(t->md, srcu_idx); 996 return 0; 997 } 998 999 /* Non-request-stackable devices can't be used for request-based dm */ 1000 list_for_each_entry(dd, devices, list) { 1001 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); 1002 1003 if (!queue_is_rq_based(q)) { 1004 DMERR("table load rejected: including" 1005 " non-request-stackable devices"); 1006 return -EINVAL; 1007 } 1008 1009 if (q->mq_ops) 1010 mq_count++; 1011 else 1012 sq_count++; 1013 } 1014 if (sq_count && mq_count) { 1015 DMERR("table load rejected: not all devices are blk-mq request-stackable"); 1016 return -EINVAL; 1017 } 1018 t->all_blk_mq = mq_count > 0; 1019 1020 if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) { 1021 DMERR("table load rejected: all devices are not blk-mq request-stackable"); 1022 return -EINVAL; 1023 } 1024 1025 return 0; 1026 } 1027 1028 enum dm_queue_mode dm_table_get_type(struct dm_table *t) 1029 { 1030 return t->type; 1031 } 1032 1033 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) 1034 { 1035 return t->immutable_target_type; 1036 } 1037 1038 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) 1039 { 1040 /* Immutable target is implicitly a singleton */ 1041 if (t->num_targets > 1 || 1042 !dm_target_is_immutable(t->targets[0].type)) 1043 return NULL; 1044 1045 return t->targets; 1046 } 1047 1048 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) 1049 { 1050 struct dm_target *ti; 1051 unsigned i; 1052 1053 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1054 ti = dm_table_get_target(t, i); 1055 if (dm_target_is_wildcard(ti->type)) 1056 return ti; 1057 } 1058 1059 return NULL; 1060 } 1061 1062 bool dm_table_bio_based(struct dm_table *t) 1063 { 1064 return __table_type_bio_based(dm_table_get_type(t)); 1065 } 1066 1067 bool dm_table_request_based(struct dm_table *t) 1068 { 1069 return __table_type_request_based(dm_table_get_type(t)); 1070 } 1071 1072 bool dm_table_all_blk_mq_devices(struct dm_table *t) 1073 { 1074 return t->all_blk_mq; 1075 } 1076 1077 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) 1078 { 1079 enum dm_queue_mode type = dm_table_get_type(t); 1080 unsigned per_io_data_size = 0; 1081 struct dm_target *tgt; 1082 unsigned i; 1083 1084 if (unlikely(type == DM_TYPE_NONE)) { 1085 DMWARN("no table type is set, can't allocate mempools"); 1086 return -EINVAL; 1087 } 1088 1089 if (__table_type_bio_based(type)) 1090 for (i = 0; i < t->num_targets; i++) { 1091 tgt = t->targets + i; 1092 per_io_data_size = max(per_io_data_size, tgt->per_io_data_size); 1093 } 1094 1095 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size); 1096 if (!t->mempools) 1097 return -ENOMEM; 1098 1099 return 0; 1100 } 1101 1102 void dm_table_free_md_mempools(struct dm_table *t) 1103 { 1104 dm_free_md_mempools(t->mempools); 1105 t->mempools = NULL; 1106 } 1107 1108 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) 1109 { 1110 return t->mempools; 1111 } 1112 1113 static int setup_indexes(struct dm_table *t) 1114 { 1115 int i; 1116 unsigned int total = 0; 1117 sector_t *indexes; 1118 1119 /* allocate the space for *all* the indexes */ 1120 for (i = t->depth - 2; i >= 0; i--) { 1121 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); 1122 total += t->counts[i]; 1123 } 1124 1125 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); 1126 if (!indexes) 1127 return -ENOMEM; 1128 1129 /* set up internal nodes, bottom-up */ 1130 for (i = t->depth - 2; i >= 0; i--) { 1131 t->index[i] = indexes; 1132 indexes += (KEYS_PER_NODE * t->counts[i]); 1133 setup_btree_index(i, t); 1134 } 1135 1136 return 0; 1137 } 1138 1139 /* 1140 * Builds the btree to index the map. 1141 */ 1142 static int dm_table_build_index(struct dm_table *t) 1143 { 1144 int r = 0; 1145 unsigned int leaf_nodes; 1146 1147 /* how many indexes will the btree have ? */ 1148 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); 1149 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); 1150 1151 /* leaf layer has already been set up */ 1152 t->counts[t->depth - 1] = leaf_nodes; 1153 t->index[t->depth - 1] = t->highs; 1154 1155 if (t->depth >= 2) 1156 r = setup_indexes(t); 1157 1158 return r; 1159 } 1160 1161 static bool integrity_profile_exists(struct gendisk *disk) 1162 { 1163 return !!blk_get_integrity(disk); 1164 } 1165 1166 /* 1167 * Get a disk whose integrity profile reflects the table's profile. 1168 * Returns NULL if integrity support was inconsistent or unavailable. 1169 */ 1170 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) 1171 { 1172 struct list_head *devices = dm_table_get_devices(t); 1173 struct dm_dev_internal *dd = NULL; 1174 struct gendisk *prev_disk = NULL, *template_disk = NULL; 1175 unsigned i; 1176 1177 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1178 struct dm_target *ti = dm_table_get_target(t, i); 1179 if (!dm_target_passes_integrity(ti->type)) 1180 goto no_integrity; 1181 } 1182 1183 list_for_each_entry(dd, devices, list) { 1184 template_disk = dd->dm_dev->bdev->bd_disk; 1185 if (!integrity_profile_exists(template_disk)) 1186 goto no_integrity; 1187 else if (prev_disk && 1188 blk_integrity_compare(prev_disk, template_disk) < 0) 1189 goto no_integrity; 1190 prev_disk = template_disk; 1191 } 1192 1193 return template_disk; 1194 1195 no_integrity: 1196 if (prev_disk) 1197 DMWARN("%s: integrity not set: %s and %s profile mismatch", 1198 dm_device_name(t->md), 1199 prev_disk->disk_name, 1200 template_disk->disk_name); 1201 return NULL; 1202 } 1203 1204 /* 1205 * Register the mapped device for blk_integrity support if the 1206 * underlying devices have an integrity profile. But all devices may 1207 * not have matching profiles (checking all devices isn't reliable 1208 * during table load because this table may use other DM device(s) which 1209 * must be resumed before they will have an initialized integity 1210 * profile). Consequently, stacked DM devices force a 2 stage integrity 1211 * profile validation: First pass during table load, final pass during 1212 * resume. 1213 */ 1214 static int dm_table_register_integrity(struct dm_table *t) 1215 { 1216 struct mapped_device *md = t->md; 1217 struct gendisk *template_disk = NULL; 1218 1219 /* If target handles integrity itself do not register it here. */ 1220 if (t->integrity_added) 1221 return 0; 1222 1223 template_disk = dm_table_get_integrity_disk(t); 1224 if (!template_disk) 1225 return 0; 1226 1227 if (!integrity_profile_exists(dm_disk(md))) { 1228 t->integrity_supported = true; 1229 /* 1230 * Register integrity profile during table load; we can do 1231 * this because the final profile must match during resume. 1232 */ 1233 blk_integrity_register(dm_disk(md), 1234 blk_get_integrity(template_disk)); 1235 return 0; 1236 } 1237 1238 /* 1239 * If DM device already has an initialized integrity 1240 * profile the new profile should not conflict. 1241 */ 1242 if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { 1243 DMWARN("%s: conflict with existing integrity profile: " 1244 "%s profile mismatch", 1245 dm_device_name(t->md), 1246 template_disk->disk_name); 1247 return 1; 1248 } 1249 1250 /* Preserve existing integrity profile */ 1251 t->integrity_supported = true; 1252 return 0; 1253 } 1254 1255 /* 1256 * Prepares the table for use by building the indices, 1257 * setting the type, and allocating mempools. 1258 */ 1259 int dm_table_complete(struct dm_table *t) 1260 { 1261 int r; 1262 1263 r = dm_table_determine_type(t); 1264 if (r) { 1265 DMERR("unable to determine table type"); 1266 return r; 1267 } 1268 1269 r = dm_table_build_index(t); 1270 if (r) { 1271 DMERR("unable to build btrees"); 1272 return r; 1273 } 1274 1275 r = dm_table_register_integrity(t); 1276 if (r) { 1277 DMERR("could not register integrity profile."); 1278 return r; 1279 } 1280 1281 r = dm_table_alloc_md_mempools(t, t->md); 1282 if (r) 1283 DMERR("unable to allocate mempools"); 1284 1285 return r; 1286 } 1287 1288 static DEFINE_MUTEX(_event_lock); 1289 void dm_table_event_callback(struct dm_table *t, 1290 void (*fn)(void *), void *context) 1291 { 1292 mutex_lock(&_event_lock); 1293 t->event_fn = fn; 1294 t->event_context = context; 1295 mutex_unlock(&_event_lock); 1296 } 1297 1298 void dm_table_event(struct dm_table *t) 1299 { 1300 /* 1301 * You can no longer call dm_table_event() from interrupt 1302 * context, use a bottom half instead. 1303 */ 1304 BUG_ON(in_interrupt()); 1305 1306 mutex_lock(&_event_lock); 1307 if (t->event_fn) 1308 t->event_fn(t->event_context); 1309 mutex_unlock(&_event_lock); 1310 } 1311 EXPORT_SYMBOL(dm_table_event); 1312 1313 sector_t dm_table_get_size(struct dm_table *t) 1314 { 1315 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1316 } 1317 EXPORT_SYMBOL(dm_table_get_size); 1318 1319 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) 1320 { 1321 if (index >= t->num_targets) 1322 return NULL; 1323 1324 return t->targets + index; 1325 } 1326 1327 /* 1328 * Search the btree for the correct target. 1329 * 1330 * Caller should check returned pointer with dm_target_is_valid() 1331 * to trap I/O beyond end of device. 1332 */ 1333 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) 1334 { 1335 unsigned int l, n = 0, k = 0; 1336 sector_t *node; 1337 1338 for (l = 0; l < t->depth; l++) { 1339 n = get_child(n, k); 1340 node = get_node(t, l, n); 1341 1342 for (k = 0; k < KEYS_PER_NODE; k++) 1343 if (node[k] >= sector) 1344 break; 1345 } 1346 1347 return &t->targets[(KEYS_PER_NODE * n) + k]; 1348 } 1349 1350 static int count_device(struct dm_target *ti, struct dm_dev *dev, 1351 sector_t start, sector_t len, void *data) 1352 { 1353 unsigned *num_devices = data; 1354 1355 (*num_devices)++; 1356 1357 return 0; 1358 } 1359 1360 /* 1361 * Check whether a table has no data devices attached using each 1362 * target's iterate_devices method. 1363 * Returns false if the result is unknown because a target doesn't 1364 * support iterate_devices. 1365 */ 1366 bool dm_table_has_no_data_devices(struct dm_table *table) 1367 { 1368 struct dm_target *ti; 1369 unsigned i, num_devices; 1370 1371 for (i = 0; i < dm_table_get_num_targets(table); i++) { 1372 ti = dm_table_get_target(table, i); 1373 1374 if (!ti->type->iterate_devices) 1375 return false; 1376 1377 num_devices = 0; 1378 ti->type->iterate_devices(ti, count_device, &num_devices); 1379 if (num_devices) 1380 return false; 1381 } 1382 1383 return true; 1384 } 1385 1386 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, 1387 sector_t start, sector_t len, void *data) 1388 { 1389 struct request_queue *q = bdev_get_queue(dev->bdev); 1390 enum blk_zoned_model *zoned_model = data; 1391 1392 return q && blk_queue_zoned_model(q) == *zoned_model; 1393 } 1394 1395 static bool dm_table_supports_zoned_model(struct dm_table *t, 1396 enum blk_zoned_model zoned_model) 1397 { 1398 struct dm_target *ti; 1399 unsigned i; 1400 1401 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1402 ti = dm_table_get_target(t, i); 1403 1404 if (zoned_model == BLK_ZONED_HM && 1405 !dm_target_supports_zoned_hm(ti->type)) 1406 return false; 1407 1408 if (!ti->type->iterate_devices || 1409 !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) 1410 return false; 1411 } 1412 1413 return true; 1414 } 1415 1416 static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, 1417 sector_t start, sector_t len, void *data) 1418 { 1419 struct request_queue *q = bdev_get_queue(dev->bdev); 1420 unsigned int *zone_sectors = data; 1421 1422 return q && blk_queue_zone_sectors(q) == *zone_sectors; 1423 } 1424 1425 static bool dm_table_matches_zone_sectors(struct dm_table *t, 1426 unsigned int zone_sectors) 1427 { 1428 struct dm_target *ti; 1429 unsigned i; 1430 1431 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1432 ti = dm_table_get_target(t, i); 1433 1434 if (!ti->type->iterate_devices || 1435 !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) 1436 return false; 1437 } 1438 1439 return true; 1440 } 1441 1442 static int validate_hardware_zoned_model(struct dm_table *table, 1443 enum blk_zoned_model zoned_model, 1444 unsigned int zone_sectors) 1445 { 1446 if (zoned_model == BLK_ZONED_NONE) 1447 return 0; 1448 1449 if (!dm_table_supports_zoned_model(table, zoned_model)) { 1450 DMERR("%s: zoned model is not consistent across all devices", 1451 dm_device_name(table->md)); 1452 return -EINVAL; 1453 } 1454 1455 /* Check zone size validity and compatibility */ 1456 if (!zone_sectors || !is_power_of_2(zone_sectors)) 1457 return -EINVAL; 1458 1459 if (!dm_table_matches_zone_sectors(table, zone_sectors)) { 1460 DMERR("%s: zone sectors is not consistent across all devices", 1461 dm_device_name(table->md)); 1462 return -EINVAL; 1463 } 1464 1465 return 0; 1466 } 1467 1468 /* 1469 * Establish the new table's queue_limits and validate them. 1470 */ 1471 int dm_calculate_queue_limits(struct dm_table *table, 1472 struct queue_limits *limits) 1473 { 1474 struct dm_target *ti; 1475 struct queue_limits ti_limits; 1476 unsigned i; 1477 enum blk_zoned_model zoned_model = BLK_ZONED_NONE; 1478 unsigned int zone_sectors = 0; 1479 1480 blk_set_stacking_limits(limits); 1481 1482 for (i = 0; i < dm_table_get_num_targets(table); i++) { 1483 blk_set_stacking_limits(&ti_limits); 1484 1485 ti = dm_table_get_target(table, i); 1486 1487 if (!ti->type->iterate_devices) 1488 goto combine_limits; 1489 1490 /* 1491 * Combine queue limits of all the devices this target uses. 1492 */ 1493 ti->type->iterate_devices(ti, dm_set_device_limits, 1494 &ti_limits); 1495 1496 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { 1497 /* 1498 * After stacking all limits, validate all devices 1499 * in table support this zoned model and zone sectors. 1500 */ 1501 zoned_model = ti_limits.zoned; 1502 zone_sectors = ti_limits.chunk_sectors; 1503 } 1504 1505 /* Set I/O hints portion of queue limits */ 1506 if (ti->type->io_hints) 1507 ti->type->io_hints(ti, &ti_limits); 1508 1509 /* 1510 * Check each device area is consistent with the target's 1511 * overall queue limits. 1512 */ 1513 if (ti->type->iterate_devices(ti, device_area_is_invalid, 1514 &ti_limits)) 1515 return -EINVAL; 1516 1517 combine_limits: 1518 /* 1519 * Merge this target's queue limits into the overall limits 1520 * for the table. 1521 */ 1522 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1523 DMWARN("%s: adding target device " 1524 "(start sect %llu len %llu) " 1525 "caused an alignment inconsistency", 1526 dm_device_name(table->md), 1527 (unsigned long long) ti->begin, 1528 (unsigned long long) ti->len); 1529 1530 /* 1531 * FIXME: this should likely be moved to blk_stack_limits(), would 1532 * also eliminate limits->zoned stacking hack in dm_set_device_limits() 1533 */ 1534 if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { 1535 /* 1536 * By default, the stacked limits zoned model is set to 1537 * BLK_ZONED_NONE in blk_set_stacking_limits(). Update 1538 * this model using the first target model reported 1539 * that is not BLK_ZONED_NONE. This will be either the 1540 * first target device zoned model or the model reported 1541 * by the target .io_hints. 1542 */ 1543 limits->zoned = ti_limits.zoned; 1544 } 1545 } 1546 1547 /* 1548 * Verify that the zoned model and zone sectors, as determined before 1549 * any .io_hints override, are the same across all devices in the table. 1550 * - this is especially relevant if .io_hints is emulating a disk-managed 1551 * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices. 1552 * BUT... 1553 */ 1554 if (limits->zoned != BLK_ZONED_NONE) { 1555 /* 1556 * ...IF the above limits stacking determined a zoned model 1557 * validate that all of the table's devices conform to it. 1558 */ 1559 zoned_model = limits->zoned; 1560 zone_sectors = limits->chunk_sectors; 1561 } 1562 if (validate_hardware_zoned_model(table, zoned_model, zone_sectors)) 1563 return -EINVAL; 1564 1565 return validate_hardware_logical_block_alignment(table, limits); 1566 } 1567 1568 /* 1569 * Verify that all devices have an integrity profile that matches the 1570 * DM device's registered integrity profile. If the profiles don't 1571 * match then unregister the DM device's integrity profile. 1572 */ 1573 static void dm_table_verify_integrity(struct dm_table *t) 1574 { 1575 struct gendisk *template_disk = NULL; 1576 1577 if (t->integrity_added) 1578 return; 1579 1580 if (t->integrity_supported) { 1581 /* 1582 * Verify that the original integrity profile 1583 * matches all the devices in this table. 1584 */ 1585 template_disk = dm_table_get_integrity_disk(t); 1586 if (template_disk && 1587 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) 1588 return; 1589 } 1590 1591 if (integrity_profile_exists(dm_disk(t->md))) { 1592 DMWARN("%s: unable to establish an integrity profile", 1593 dm_device_name(t->md)); 1594 blk_integrity_unregister(dm_disk(t->md)); 1595 } 1596 } 1597 1598 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, 1599 sector_t start, sector_t len, void *data) 1600 { 1601 unsigned long flush = (unsigned long) data; 1602 struct request_queue *q = bdev_get_queue(dev->bdev); 1603 1604 return q && (q->queue_flags & flush); 1605 } 1606 1607 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) 1608 { 1609 struct dm_target *ti; 1610 unsigned i; 1611 1612 /* 1613 * Require at least one underlying device to support flushes. 1614 * t->devices includes internal dm devices such as mirror logs 1615 * so we need to use iterate_devices here, which targets 1616 * supporting flushes must provide. 1617 */ 1618 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1619 ti = dm_table_get_target(t, i); 1620 1621 if (!ti->num_flush_bios) 1622 continue; 1623 1624 if (ti->flush_supported) 1625 return true; 1626 1627 if (ti->type->iterate_devices && 1628 ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) 1629 return true; 1630 } 1631 1632 return false; 1633 } 1634 1635 static int device_dax_write_cache_enabled(struct dm_target *ti, 1636 struct dm_dev *dev, sector_t start, 1637 sector_t len, void *data) 1638 { 1639 struct dax_device *dax_dev = dev->dax_dev; 1640 1641 if (!dax_dev) 1642 return false; 1643 1644 if (dax_write_cache_enabled(dax_dev)) 1645 return true; 1646 return false; 1647 } 1648 1649 static int dm_table_supports_dax_write_cache(struct dm_table *t) 1650 { 1651 struct dm_target *ti; 1652 unsigned i; 1653 1654 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1655 ti = dm_table_get_target(t, i); 1656 1657 if (ti->type->iterate_devices && 1658 ti->type->iterate_devices(ti, 1659 device_dax_write_cache_enabled, NULL)) 1660 return true; 1661 } 1662 1663 return false; 1664 } 1665 1666 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, 1667 sector_t start, sector_t len, void *data) 1668 { 1669 struct request_queue *q = bdev_get_queue(dev->bdev); 1670 1671 return q && blk_queue_nonrot(q); 1672 } 1673 1674 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, 1675 sector_t start, sector_t len, void *data) 1676 { 1677 struct request_queue *q = bdev_get_queue(dev->bdev); 1678 1679 return q && !blk_queue_add_random(q); 1680 } 1681 1682 static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, 1683 sector_t start, sector_t len, void *data) 1684 { 1685 struct request_queue *q = bdev_get_queue(dev->bdev); 1686 1687 return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); 1688 } 1689 1690 static bool dm_table_all_devices_attribute(struct dm_table *t, 1691 iterate_devices_callout_fn func) 1692 { 1693 struct dm_target *ti; 1694 unsigned i; 1695 1696 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1697 ti = dm_table_get_target(t, i); 1698 1699 if (!ti->type->iterate_devices || 1700 !ti->type->iterate_devices(ti, func, NULL)) 1701 return false; 1702 } 1703 1704 return true; 1705 } 1706 1707 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, 1708 sector_t start, sector_t len, void *data) 1709 { 1710 struct request_queue *q = bdev_get_queue(dev->bdev); 1711 1712 return q && !q->limits.max_write_same_sectors; 1713 } 1714 1715 static bool dm_table_supports_write_same(struct dm_table *t) 1716 { 1717 struct dm_target *ti; 1718 unsigned i; 1719 1720 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1721 ti = dm_table_get_target(t, i); 1722 1723 if (!ti->num_write_same_bios) 1724 return false; 1725 1726 if (!ti->type->iterate_devices || 1727 ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) 1728 return false; 1729 } 1730 1731 return true; 1732 } 1733 1734 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, 1735 sector_t start, sector_t len, void *data) 1736 { 1737 struct request_queue *q = bdev_get_queue(dev->bdev); 1738 1739 return q && !q->limits.max_write_zeroes_sectors; 1740 } 1741 1742 static bool dm_table_supports_write_zeroes(struct dm_table *t) 1743 { 1744 struct dm_target *ti; 1745 unsigned i = 0; 1746 1747 while (i < dm_table_get_num_targets(t)) { 1748 ti = dm_table_get_target(t, i++); 1749 1750 if (!ti->num_write_zeroes_bios) 1751 return false; 1752 1753 if (!ti->type->iterate_devices || 1754 ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) 1755 return false; 1756 } 1757 1758 return true; 1759 } 1760 1761 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1762 sector_t start, sector_t len, void *data) 1763 { 1764 struct request_queue *q = bdev_get_queue(dev->bdev); 1765 1766 return q && !blk_queue_discard(q); 1767 } 1768 1769 static bool dm_table_supports_discards(struct dm_table *t) 1770 { 1771 struct dm_target *ti; 1772 unsigned i; 1773 1774 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1775 ti = dm_table_get_target(t, i); 1776 1777 if (!ti->num_discard_bios) 1778 return false; 1779 1780 /* 1781 * Either the target provides discard support (as implied by setting 1782 * 'discards_supported') or it relies on _all_ data devices having 1783 * discard support. 1784 */ 1785 if (!ti->discards_supported && 1786 (!ti->type->iterate_devices || 1787 ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) 1788 return false; 1789 } 1790 1791 return true; 1792 } 1793 1794 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1795 struct queue_limits *limits) 1796 { 1797 bool wc = false, fua = false; 1798 1799 /* 1800 * Copy table's limits to the DM device's request_queue 1801 */ 1802 q->limits = *limits; 1803 1804 if (!dm_table_supports_discards(t)) { 1805 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1806 /* Must also clear discard limits... */ 1807 q->limits.max_discard_sectors = 0; 1808 q->limits.max_hw_discard_sectors = 0; 1809 q->limits.discard_granularity = 0; 1810 q->limits.discard_alignment = 0; 1811 q->limits.discard_misaligned = 0; 1812 } else 1813 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1814 1815 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1816 wc = true; 1817 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) 1818 fua = true; 1819 } 1820 blk_queue_write_cache(q, wc, fua); 1821 1822 if (dm_table_supports_dax_write_cache(t)) 1823 dax_write_cache(t->md->dax_dev, true); 1824 1825 /* Ensure that all underlying devices are non-rotational. */ 1826 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1827 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1828 else 1829 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1830 1831 if (!dm_table_supports_write_same(t)) 1832 q->limits.max_write_same_sectors = 0; 1833 if (!dm_table_supports_write_zeroes(t)) 1834 q->limits.max_write_zeroes_sectors = 0; 1835 1836 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) 1837 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1838 else 1839 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1840 1841 dm_table_verify_integrity(t); 1842 1843 /* 1844 * Determine whether or not this queue's I/O timings contribute 1845 * to the entropy pool, Only request-based targets use this. 1846 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 1847 * have it set. 1848 */ 1849 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) 1850 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 1851 } 1852 1853 unsigned int dm_table_get_num_targets(struct dm_table *t) 1854 { 1855 return t->num_targets; 1856 } 1857 1858 struct list_head *dm_table_get_devices(struct dm_table *t) 1859 { 1860 return &t->devices; 1861 } 1862 1863 fmode_t dm_table_get_mode(struct dm_table *t) 1864 { 1865 return t->mode; 1866 } 1867 EXPORT_SYMBOL(dm_table_get_mode); 1868 1869 enum suspend_mode { 1870 PRESUSPEND, 1871 PRESUSPEND_UNDO, 1872 POSTSUSPEND, 1873 }; 1874 1875 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) 1876 { 1877 int i = t->num_targets; 1878 struct dm_target *ti = t->targets; 1879 1880 lockdep_assert_held(&t->md->suspend_lock); 1881 1882 while (i--) { 1883 switch (mode) { 1884 case PRESUSPEND: 1885 if (ti->type->presuspend) 1886 ti->type->presuspend(ti); 1887 break; 1888 case PRESUSPEND_UNDO: 1889 if (ti->type->presuspend_undo) 1890 ti->type->presuspend_undo(ti); 1891 break; 1892 case POSTSUSPEND: 1893 if (ti->type->postsuspend) 1894 ti->type->postsuspend(ti); 1895 break; 1896 } 1897 ti++; 1898 } 1899 } 1900 1901 void dm_table_presuspend_targets(struct dm_table *t) 1902 { 1903 if (!t) 1904 return; 1905 1906 suspend_targets(t, PRESUSPEND); 1907 } 1908 1909 void dm_table_presuspend_undo_targets(struct dm_table *t) 1910 { 1911 if (!t) 1912 return; 1913 1914 suspend_targets(t, PRESUSPEND_UNDO); 1915 } 1916 1917 void dm_table_postsuspend_targets(struct dm_table *t) 1918 { 1919 if (!t) 1920 return; 1921 1922 suspend_targets(t, POSTSUSPEND); 1923 } 1924 1925 int dm_table_resume_targets(struct dm_table *t) 1926 { 1927 int i, r = 0; 1928 1929 lockdep_assert_held(&t->md->suspend_lock); 1930 1931 for (i = 0; i < t->num_targets; i++) { 1932 struct dm_target *ti = t->targets + i; 1933 1934 if (!ti->type->preresume) 1935 continue; 1936 1937 r = ti->type->preresume(ti); 1938 if (r) { 1939 DMERR("%s: %s: preresume failed, error = %d", 1940 dm_device_name(t->md), ti->type->name, r); 1941 return r; 1942 } 1943 } 1944 1945 for (i = 0; i < t->num_targets; i++) { 1946 struct dm_target *ti = t->targets + i; 1947 1948 if (ti->type->resume) 1949 ti->type->resume(ti); 1950 } 1951 1952 return 0; 1953 } 1954 1955 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) 1956 { 1957 list_add(&cb->list, &t->target_callbacks); 1958 } 1959 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); 1960 1961 int dm_table_any_congested(struct dm_table *t, int bdi_bits) 1962 { 1963 struct dm_dev_internal *dd; 1964 struct list_head *devices = dm_table_get_devices(t); 1965 struct dm_target_callbacks *cb; 1966 int r = 0; 1967 1968 list_for_each_entry(dd, devices, list) { 1969 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); 1970 char b[BDEVNAME_SIZE]; 1971 1972 if (likely(q)) 1973 r |= bdi_congested(q->backing_dev_info, bdi_bits); 1974 else 1975 DMWARN_LIMIT("%s: any_congested: nonexistent device %s", 1976 dm_device_name(t->md), 1977 bdevname(dd->dm_dev->bdev, b)); 1978 } 1979 1980 list_for_each_entry(cb, &t->target_callbacks, list) 1981 if (cb->congested_fn) 1982 r |= cb->congested_fn(cb, bdi_bits); 1983 1984 return r; 1985 } 1986 1987 struct mapped_device *dm_table_get_md(struct dm_table *t) 1988 { 1989 return t->md; 1990 } 1991 EXPORT_SYMBOL(dm_table_get_md); 1992 1993 void dm_table_run_md_queue_async(struct dm_table *t) 1994 { 1995 struct mapped_device *md; 1996 struct request_queue *queue; 1997 unsigned long flags; 1998 1999 if (!dm_table_request_based(t)) 2000 return; 2001 2002 md = dm_table_get_md(t); 2003 queue = dm_get_md_queue(md); 2004 if (queue) { 2005 if (queue->mq_ops) 2006 blk_mq_run_hw_queues(queue, true); 2007 else { 2008 spin_lock_irqsave(queue->queue_lock, flags); 2009 blk_run_queue_async(queue); 2010 spin_unlock_irqrestore(queue->queue_lock, flags); 2011 } 2012 } 2013 } 2014 EXPORT_SYMBOL(dm_table_run_md_queue_async); 2015 2016