1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 10 #include <linux/module.h> 11 #include <linux/vmalloc.h> 12 #include <linux/blkdev.h> 13 #include <linux/namei.h> 14 #include <linux/ctype.h> 15 #include <linux/string.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <linux/atomic.h> 21 #include <linux/blk-mq.h> 22 #include <linux/mount.h> 23 24 #define DM_MSG_PREFIX "table" 25 26 #define MAX_DEPTH 16 27 #define NODE_SIZE L1_CACHE_BYTES 28 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) 29 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 30 31 struct dm_table { 32 struct mapped_device *md; 33 unsigned type; 34 35 /* btree table */ 36 unsigned int depth; 37 unsigned int counts[MAX_DEPTH]; /* in nodes */ 38 sector_t *index[MAX_DEPTH]; 39 40 unsigned int num_targets; 41 unsigned int num_allocated; 42 sector_t *highs; 43 struct dm_target *targets; 44 45 struct target_type *immutable_target_type; 46 47 bool integrity_supported:1; 48 bool singleton:1; 49 bool all_blk_mq:1; 50 51 /* 52 * Indicates the rw permissions for the new logical 53 * device. This should be a combination of FMODE_READ 54 * and FMODE_WRITE. 55 */ 56 fmode_t mode; 57 58 /* a list of devices used by this table */ 59 struct list_head devices; 60 61 /* events get handed up using this callback */ 62 void (*event_fn)(void *); 63 void *event_context; 64 65 struct dm_md_mempools *mempools; 66 67 struct list_head target_callbacks; 68 }; 69 70 /* 71 * Similar to ceiling(log_size(n)) 72 */ 73 static unsigned int int_log(unsigned int n, unsigned int base) 74 { 75 int result = 0; 76 77 while (n > 1) { 78 n = dm_div_up(n, base); 79 result++; 80 } 81 82 return result; 83 } 84 85 /* 86 * Calculate the index of the child node of the n'th node k'th key. 87 */ 88 static inline unsigned int get_child(unsigned int n, unsigned int k) 89 { 90 return (n * CHILDREN_PER_NODE) + k; 91 } 92 93 /* 94 * Return the n'th node of level l from table t. 95 */ 96 static inline sector_t *get_node(struct dm_table *t, 97 unsigned int l, unsigned int n) 98 { 99 return t->index[l] + (n * KEYS_PER_NODE); 100 } 101 102 /* 103 * Return the highest key that you could lookup from the n'th 104 * node on level l of the btree. 105 */ 106 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) 107 { 108 for (; l < t->depth - 1; l++) 109 n = get_child(n, CHILDREN_PER_NODE - 1); 110 111 if (n >= t->counts[l]) 112 return (sector_t) - 1; 113 114 return get_node(t, l, n)[KEYS_PER_NODE - 1]; 115 } 116 117 /* 118 * Fills in a level of the btree based on the highs of the level 119 * below it. 120 */ 121 static int setup_btree_index(unsigned int l, struct dm_table *t) 122 { 123 unsigned int n, k; 124 sector_t *node; 125 126 for (n = 0U; n < t->counts[l]; n++) { 127 node = get_node(t, l, n); 128 129 for (k = 0U; k < KEYS_PER_NODE; k++) 130 node[k] = high(t, l + 1, get_child(n, k)); 131 } 132 133 return 0; 134 } 135 136 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) 137 { 138 unsigned long size; 139 void *addr; 140 141 /* 142 * Check that we're not going to overflow. 143 */ 144 if (nmemb > (ULONG_MAX / elem_size)) 145 return NULL; 146 147 size = nmemb * elem_size; 148 addr = vzalloc(size); 149 150 return addr; 151 } 152 EXPORT_SYMBOL(dm_vcalloc); 153 154 /* 155 * highs, and targets are managed as dynamic arrays during a 156 * table load. 157 */ 158 static int alloc_targets(struct dm_table *t, unsigned int num) 159 { 160 sector_t *n_highs; 161 struct dm_target *n_targets; 162 163 /* 164 * Allocate both the target array and offset array at once. 165 * Append an empty entry to catch sectors beyond the end of 166 * the device. 167 */ 168 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + 169 sizeof(sector_t)); 170 if (!n_highs) 171 return -ENOMEM; 172 173 n_targets = (struct dm_target *) (n_highs + num); 174 175 memset(n_highs, -1, sizeof(*n_highs) * num); 176 vfree(t->highs); 177 178 t->num_allocated = num; 179 t->highs = n_highs; 180 t->targets = n_targets; 181 182 return 0; 183 } 184 185 int dm_table_create(struct dm_table **result, fmode_t mode, 186 unsigned num_targets, struct mapped_device *md) 187 { 188 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); 189 190 if (!t) 191 return -ENOMEM; 192 193 INIT_LIST_HEAD(&t->devices); 194 INIT_LIST_HEAD(&t->target_callbacks); 195 196 if (!num_targets) 197 num_targets = KEYS_PER_NODE; 198 199 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 200 201 if (!num_targets) { 202 kfree(t); 203 return -ENOMEM; 204 } 205 206 if (alloc_targets(t, num_targets)) { 207 kfree(t); 208 return -ENOMEM; 209 } 210 211 t->type = DM_TYPE_NONE; 212 t->mode = mode; 213 t->md = md; 214 *result = t; 215 return 0; 216 } 217 218 static void free_devices(struct list_head *devices, struct mapped_device *md) 219 { 220 struct list_head *tmp, *next; 221 222 list_for_each_safe(tmp, next, devices) { 223 struct dm_dev_internal *dd = 224 list_entry(tmp, struct dm_dev_internal, list); 225 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", 226 dm_device_name(md), dd->dm_dev->name); 227 dm_put_table_device(md, dd->dm_dev); 228 kfree(dd); 229 } 230 } 231 232 void dm_table_destroy(struct dm_table *t) 233 { 234 unsigned int i; 235 236 if (!t) 237 return; 238 239 /* free the indexes */ 240 if (t->depth >= 2) 241 vfree(t->index[t->depth - 2]); 242 243 /* free the targets */ 244 for (i = 0; i < t->num_targets; i++) { 245 struct dm_target *tgt = t->targets + i; 246 247 if (tgt->type->dtr) 248 tgt->type->dtr(tgt); 249 250 dm_put_target_type(tgt->type); 251 } 252 253 vfree(t->highs); 254 255 /* free the device list */ 256 free_devices(&t->devices, t->md); 257 258 dm_free_md_mempools(t->mempools); 259 260 kfree(t); 261 } 262 263 /* 264 * See if we've already got a device in the list. 265 */ 266 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) 267 { 268 struct dm_dev_internal *dd; 269 270 list_for_each_entry (dd, l, list) 271 if (dd->dm_dev->bdev->bd_dev == dev) 272 return dd; 273 274 return NULL; 275 } 276 277 /* 278 * If possible, this checks an area of a destination device is invalid. 279 */ 280 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 281 sector_t start, sector_t len, void *data) 282 { 283 struct request_queue *q; 284 struct queue_limits *limits = data; 285 struct block_device *bdev = dev->bdev; 286 sector_t dev_size = 287 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 288 unsigned short logical_block_size_sectors = 289 limits->logical_block_size >> SECTOR_SHIFT; 290 char b[BDEVNAME_SIZE]; 291 292 /* 293 * Some devices exist without request functions, 294 * such as loop devices not yet bound to backing files. 295 * Forbid the use of such devices. 296 */ 297 q = bdev_get_queue(bdev); 298 if (!q || !q->make_request_fn) { 299 DMWARN("%s: %s is not yet initialised: " 300 "start=%llu, len=%llu, dev_size=%llu", 301 dm_device_name(ti->table->md), bdevname(bdev, b), 302 (unsigned long long)start, 303 (unsigned long long)len, 304 (unsigned long long)dev_size); 305 return 1; 306 } 307 308 if (!dev_size) 309 return 0; 310 311 if ((start >= dev_size) || (start + len > dev_size)) { 312 DMWARN("%s: %s too small for target: " 313 "start=%llu, len=%llu, dev_size=%llu", 314 dm_device_name(ti->table->md), bdevname(bdev, b), 315 (unsigned long long)start, 316 (unsigned long long)len, 317 (unsigned long long)dev_size); 318 return 1; 319 } 320 321 if (logical_block_size_sectors <= 1) 322 return 0; 323 324 if (start & (logical_block_size_sectors - 1)) { 325 DMWARN("%s: start=%llu not aligned to h/w " 326 "logical block size %u of %s", 327 dm_device_name(ti->table->md), 328 (unsigned long long)start, 329 limits->logical_block_size, bdevname(bdev, b)); 330 return 1; 331 } 332 333 if (len & (logical_block_size_sectors - 1)) { 334 DMWARN("%s: len=%llu not aligned to h/w " 335 "logical block size %u of %s", 336 dm_device_name(ti->table->md), 337 (unsigned long long)len, 338 limits->logical_block_size, bdevname(bdev, b)); 339 return 1; 340 } 341 342 return 0; 343 } 344 345 /* 346 * This upgrades the mode on an already open dm_dev, being 347 * careful to leave things as they were if we fail to reopen the 348 * device and not to touch the existing bdev field in case 349 * it is accessed concurrently inside dm_table_any_congested(). 350 */ 351 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, 352 struct mapped_device *md) 353 { 354 int r; 355 struct dm_dev *old_dev, *new_dev; 356 357 old_dev = dd->dm_dev; 358 359 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, 360 dd->dm_dev->mode | new_mode, &new_dev); 361 if (r) 362 return r; 363 364 dd->dm_dev = new_dev; 365 dm_put_table_device(md, old_dev); 366 367 return 0; 368 } 369 370 /* 371 * Convert the path to a device 372 */ 373 dev_t dm_get_dev_t(const char *path) 374 { 375 dev_t uninitialized_var(dev); 376 struct block_device *bdev; 377 378 bdev = lookup_bdev(path); 379 if (IS_ERR(bdev)) 380 dev = name_to_dev_t(path); 381 else { 382 dev = bdev->bd_dev; 383 bdput(bdev); 384 } 385 386 return dev; 387 } 388 EXPORT_SYMBOL_GPL(dm_get_dev_t); 389 390 /* 391 * Add a device to the list, or just increment the usage count if 392 * it's already present. 393 */ 394 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 395 struct dm_dev **result) 396 { 397 int r; 398 dev_t dev; 399 struct dm_dev_internal *dd; 400 struct dm_table *t = ti->table; 401 402 BUG_ON(!t); 403 404 dev = dm_get_dev_t(path); 405 if (!dev) 406 return -ENODEV; 407 408 dd = find_device(&t->devices, dev); 409 if (!dd) { 410 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 411 if (!dd) 412 return -ENOMEM; 413 414 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { 415 kfree(dd); 416 return r; 417 } 418 419 atomic_set(&dd->count, 0); 420 list_add(&dd->list, &t->devices); 421 422 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 423 r = upgrade_mode(dd, mode, t->md); 424 if (r) 425 return r; 426 } 427 atomic_inc(&dd->count); 428 429 *result = dd->dm_dev; 430 return 0; 431 } 432 EXPORT_SYMBOL(dm_get_device); 433 434 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 435 sector_t start, sector_t len, void *data) 436 { 437 struct queue_limits *limits = data; 438 struct block_device *bdev = dev->bdev; 439 struct request_queue *q = bdev_get_queue(bdev); 440 char b[BDEVNAME_SIZE]; 441 442 if (unlikely(!q)) { 443 DMWARN("%s: Cannot set limits for nonexistent device %s", 444 dm_device_name(ti->table->md), bdevname(bdev, b)); 445 return 0; 446 } 447 448 if (bdev_stack_limits(limits, bdev, start) < 0) 449 DMWARN("%s: adding target device %s caused an alignment inconsistency: " 450 "physical_block_size=%u, logical_block_size=%u, " 451 "alignment_offset=%u, start=%llu", 452 dm_device_name(ti->table->md), bdevname(bdev, b), 453 q->limits.physical_block_size, 454 q->limits.logical_block_size, 455 q->limits.alignment_offset, 456 (unsigned long long) start << SECTOR_SHIFT); 457 458 return 0; 459 } 460 461 /* 462 * Decrement a device's use count and remove it if necessary. 463 */ 464 void dm_put_device(struct dm_target *ti, struct dm_dev *d) 465 { 466 int found = 0; 467 struct list_head *devices = &ti->table->devices; 468 struct dm_dev_internal *dd; 469 470 list_for_each_entry(dd, devices, list) { 471 if (dd->dm_dev == d) { 472 found = 1; 473 break; 474 } 475 } 476 if (!found) { 477 DMWARN("%s: device %s not in table devices list", 478 dm_device_name(ti->table->md), d->name); 479 return; 480 } 481 if (atomic_dec_and_test(&dd->count)) { 482 dm_put_table_device(ti->table->md, d); 483 list_del(&dd->list); 484 kfree(dd); 485 } 486 } 487 EXPORT_SYMBOL(dm_put_device); 488 489 /* 490 * Checks to see if the target joins onto the end of the table. 491 */ 492 static int adjoin(struct dm_table *table, struct dm_target *ti) 493 { 494 struct dm_target *prev; 495 496 if (!table->num_targets) 497 return !ti->begin; 498 499 prev = &table->targets[table->num_targets - 1]; 500 return (ti->begin == (prev->begin + prev->len)); 501 } 502 503 /* 504 * Used to dynamically allocate the arg array. 505 * 506 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must 507 * process messages even if some device is suspended. These messages have a 508 * small fixed number of arguments. 509 * 510 * On the other hand, dm-switch needs to process bulk data using messages and 511 * excessive use of GFP_NOIO could cause trouble. 512 */ 513 static char **realloc_argv(unsigned *array_size, char **old_argv) 514 { 515 char **argv; 516 unsigned new_size; 517 gfp_t gfp; 518 519 if (*array_size) { 520 new_size = *array_size * 2; 521 gfp = GFP_KERNEL; 522 } else { 523 new_size = 8; 524 gfp = GFP_NOIO; 525 } 526 argv = kmalloc(new_size * sizeof(*argv), gfp); 527 if (argv) { 528 memcpy(argv, old_argv, *array_size * sizeof(*argv)); 529 *array_size = new_size; 530 } 531 532 kfree(old_argv); 533 return argv; 534 } 535 536 /* 537 * Destructively splits up the argument list to pass to ctr. 538 */ 539 int dm_split_args(int *argc, char ***argvp, char *input) 540 { 541 char *start, *end = input, *out, **argv = NULL; 542 unsigned array_size = 0; 543 544 *argc = 0; 545 546 if (!input) { 547 *argvp = NULL; 548 return 0; 549 } 550 551 argv = realloc_argv(&array_size, argv); 552 if (!argv) 553 return -ENOMEM; 554 555 while (1) { 556 /* Skip whitespace */ 557 start = skip_spaces(end); 558 559 if (!*start) 560 break; /* success, we hit the end */ 561 562 /* 'out' is used to remove any back-quotes */ 563 end = out = start; 564 while (*end) { 565 /* Everything apart from '\0' can be quoted */ 566 if (*end == '\\' && *(end + 1)) { 567 *out++ = *(end + 1); 568 end += 2; 569 continue; 570 } 571 572 if (isspace(*end)) 573 break; /* end of token */ 574 575 *out++ = *end++; 576 } 577 578 /* have we already filled the array ? */ 579 if ((*argc + 1) > array_size) { 580 argv = realloc_argv(&array_size, argv); 581 if (!argv) 582 return -ENOMEM; 583 } 584 585 /* we know this is whitespace */ 586 if (*end) 587 end++; 588 589 /* terminate the string and put it in the array */ 590 *out = '\0'; 591 argv[*argc] = start; 592 (*argc)++; 593 } 594 595 *argvp = argv; 596 return 0; 597 } 598 599 /* 600 * Impose necessary and sufficient conditions on a devices's table such 601 * that any incoming bio which respects its logical_block_size can be 602 * processed successfully. If it falls across the boundary between 603 * two or more targets, the size of each piece it gets split into must 604 * be compatible with the logical_block_size of the target processing it. 605 */ 606 static int validate_hardware_logical_block_alignment(struct dm_table *table, 607 struct queue_limits *limits) 608 { 609 /* 610 * This function uses arithmetic modulo the logical_block_size 611 * (in units of 512-byte sectors). 612 */ 613 unsigned short device_logical_block_size_sects = 614 limits->logical_block_size >> SECTOR_SHIFT; 615 616 /* 617 * Offset of the start of the next table entry, mod logical_block_size. 618 */ 619 unsigned short next_target_start = 0; 620 621 /* 622 * Given an aligned bio that extends beyond the end of a 623 * target, how many sectors must the next target handle? 624 */ 625 unsigned short remaining = 0; 626 627 struct dm_target *uninitialized_var(ti); 628 struct queue_limits ti_limits; 629 unsigned i = 0; 630 631 /* 632 * Check each entry in the table in turn. 633 */ 634 while (i < dm_table_get_num_targets(table)) { 635 ti = dm_table_get_target(table, i++); 636 637 blk_set_stacking_limits(&ti_limits); 638 639 /* combine all target devices' limits */ 640 if (ti->type->iterate_devices) 641 ti->type->iterate_devices(ti, dm_set_device_limits, 642 &ti_limits); 643 644 /* 645 * If the remaining sectors fall entirely within this 646 * table entry are they compatible with its logical_block_size? 647 */ 648 if (remaining < ti->len && 649 remaining & ((ti_limits.logical_block_size >> 650 SECTOR_SHIFT) - 1)) 651 break; /* Error */ 652 653 next_target_start = 654 (unsigned short) ((next_target_start + ti->len) & 655 (device_logical_block_size_sects - 1)); 656 remaining = next_target_start ? 657 device_logical_block_size_sects - next_target_start : 0; 658 } 659 660 if (remaining) { 661 DMWARN("%s: table line %u (start sect %llu len %llu) " 662 "not aligned to h/w logical block size %u", 663 dm_device_name(table->md), i, 664 (unsigned long long) ti->begin, 665 (unsigned long long) ti->len, 666 limits->logical_block_size); 667 return -EINVAL; 668 } 669 670 return 0; 671 } 672 673 int dm_table_add_target(struct dm_table *t, const char *type, 674 sector_t start, sector_t len, char *params) 675 { 676 int r = -EINVAL, argc; 677 char **argv; 678 struct dm_target *tgt; 679 680 if (t->singleton) { 681 DMERR("%s: target type %s must appear alone in table", 682 dm_device_name(t->md), t->targets->type->name); 683 return -EINVAL; 684 } 685 686 BUG_ON(t->num_targets >= t->num_allocated); 687 688 tgt = t->targets + t->num_targets; 689 memset(tgt, 0, sizeof(*tgt)); 690 691 if (!len) { 692 DMERR("%s: zero-length target", dm_device_name(t->md)); 693 return -EINVAL; 694 } 695 696 tgt->type = dm_get_target_type(type); 697 if (!tgt->type) { 698 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); 699 return -EINVAL; 700 } 701 702 if (dm_target_needs_singleton(tgt->type)) { 703 if (t->num_targets) { 704 tgt->error = "singleton target type must appear alone in table"; 705 goto bad; 706 } 707 t->singleton = true; 708 } 709 710 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { 711 tgt->error = "target type may not be included in a read-only table"; 712 goto bad; 713 } 714 715 if (t->immutable_target_type) { 716 if (t->immutable_target_type != tgt->type) { 717 tgt->error = "immutable target type cannot be mixed with other target types"; 718 goto bad; 719 } 720 } else if (dm_target_is_immutable(tgt->type)) { 721 if (t->num_targets) { 722 tgt->error = "immutable target type cannot be mixed with other target types"; 723 goto bad; 724 } 725 t->immutable_target_type = tgt->type; 726 } 727 728 tgt->table = t; 729 tgt->begin = start; 730 tgt->len = len; 731 tgt->error = "Unknown error"; 732 733 /* 734 * Does this target adjoin the previous one ? 735 */ 736 if (!adjoin(t, tgt)) { 737 tgt->error = "Gap in table"; 738 goto bad; 739 } 740 741 r = dm_split_args(&argc, &argv, params); 742 if (r) { 743 tgt->error = "couldn't split parameters (insufficient memory)"; 744 goto bad; 745 } 746 747 r = tgt->type->ctr(tgt, argc, argv); 748 kfree(argv); 749 if (r) 750 goto bad; 751 752 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 753 754 if (!tgt->num_discard_bios && tgt->discards_supported) 755 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", 756 dm_device_name(t->md), type); 757 758 return 0; 759 760 bad: 761 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); 762 dm_put_target_type(tgt->type); 763 return r; 764 } 765 766 /* 767 * Target argument parsing helpers. 768 */ 769 static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 770 unsigned *value, char **error, unsigned grouped) 771 { 772 const char *arg_str = dm_shift_arg(arg_set); 773 char dummy; 774 775 if (!arg_str || 776 (sscanf(arg_str, "%u%c", value, &dummy) != 1) || 777 (*value < arg->min) || 778 (*value > arg->max) || 779 (grouped && arg_set->argc < *value)) { 780 *error = arg->error; 781 return -EINVAL; 782 } 783 784 return 0; 785 } 786 787 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 788 unsigned *value, char **error) 789 { 790 return validate_next_arg(arg, arg_set, value, error, 0); 791 } 792 EXPORT_SYMBOL(dm_read_arg); 793 794 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, 795 unsigned *value, char **error) 796 { 797 return validate_next_arg(arg, arg_set, value, error, 1); 798 } 799 EXPORT_SYMBOL(dm_read_arg_group); 800 801 const char *dm_shift_arg(struct dm_arg_set *as) 802 { 803 char *r; 804 805 if (as->argc) { 806 as->argc--; 807 r = *as->argv; 808 as->argv++; 809 return r; 810 } 811 812 return NULL; 813 } 814 EXPORT_SYMBOL(dm_shift_arg); 815 816 void dm_consume_args(struct dm_arg_set *as, unsigned num_args) 817 { 818 BUG_ON(as->argc < num_args); 819 as->argc -= num_args; 820 as->argv += num_args; 821 } 822 EXPORT_SYMBOL(dm_consume_args); 823 824 static bool __table_type_bio_based(unsigned table_type) 825 { 826 return (table_type == DM_TYPE_BIO_BASED || 827 table_type == DM_TYPE_DAX_BIO_BASED); 828 } 829 830 static bool __table_type_request_based(unsigned table_type) 831 { 832 return (table_type == DM_TYPE_REQUEST_BASED || 833 table_type == DM_TYPE_MQ_REQUEST_BASED); 834 } 835 836 void dm_table_set_type(struct dm_table *t, unsigned type) 837 { 838 t->type = type; 839 } 840 EXPORT_SYMBOL_GPL(dm_table_set_type); 841 842 static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, 843 sector_t start, sector_t len, void *data) 844 { 845 struct request_queue *q = bdev_get_queue(dev->bdev); 846 847 return q && blk_queue_dax(q); 848 } 849 850 static bool dm_table_supports_dax(struct dm_table *t) 851 { 852 struct dm_target *ti; 853 unsigned i = 0; 854 855 /* Ensure that all targets support DAX. */ 856 while (i < dm_table_get_num_targets(t)) { 857 ti = dm_table_get_target(t, i++); 858 859 if (!ti->type->direct_access) 860 return false; 861 862 if (!ti->type->iterate_devices || 863 !ti->type->iterate_devices(ti, device_supports_dax, NULL)) 864 return false; 865 } 866 867 return true; 868 } 869 870 static int dm_table_determine_type(struct dm_table *t) 871 { 872 unsigned i; 873 unsigned bio_based = 0, request_based = 0, hybrid = 0; 874 bool verify_blk_mq = false; 875 struct dm_target *tgt; 876 struct dm_dev_internal *dd; 877 struct list_head *devices = dm_table_get_devices(t); 878 unsigned live_md_type = dm_get_md_type(t->md); 879 880 if (t->type != DM_TYPE_NONE) { 881 /* target already set the table's type */ 882 if (t->type == DM_TYPE_BIO_BASED) 883 return 0; 884 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 885 goto verify_rq_based; 886 } 887 888 for (i = 0; i < t->num_targets; i++) { 889 tgt = t->targets + i; 890 if (dm_target_hybrid(tgt)) 891 hybrid = 1; 892 else if (dm_target_request_based(tgt)) 893 request_based = 1; 894 else 895 bio_based = 1; 896 897 if (bio_based && request_based) { 898 DMWARN("Inconsistent table: different target types" 899 " can't be mixed up"); 900 return -EINVAL; 901 } 902 } 903 904 if (hybrid && !bio_based && !request_based) { 905 /* 906 * The targets can work either way. 907 * Determine the type from the live device. 908 * Default to bio-based if device is new. 909 */ 910 if (__table_type_request_based(live_md_type)) 911 request_based = 1; 912 else 913 bio_based = 1; 914 } 915 916 if (bio_based) { 917 /* We must use this table as bio-based */ 918 t->type = DM_TYPE_BIO_BASED; 919 if (dm_table_supports_dax(t) || 920 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) 921 t->type = DM_TYPE_DAX_BIO_BASED; 922 return 0; 923 } 924 925 BUG_ON(!request_based); /* No targets in this table */ 926 927 if (list_empty(devices) && __table_type_request_based(live_md_type)) { 928 /* inherit live MD type */ 929 t->type = live_md_type; 930 return 0; 931 } 932 933 /* 934 * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by 935 * having a compatible target use dm_table_set_type. 936 */ 937 t->type = DM_TYPE_REQUEST_BASED; 938 939 verify_rq_based: 940 /* 941 * Request-based dm supports only tables that have a single target now. 942 * To support multiple targets, request splitting support is needed, 943 * and that needs lots of changes in the block-layer. 944 * (e.g. request completion process for partial completion.) 945 */ 946 if (t->num_targets > 1) { 947 DMWARN("Request-based dm doesn't support multiple targets yet"); 948 return -EINVAL; 949 } 950 951 /* Non-request-stackable devices can't be used for request-based dm */ 952 list_for_each_entry(dd, devices, list) { 953 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); 954 955 if (!blk_queue_stackable(q)) { 956 DMERR("table load rejected: including" 957 " non-request-stackable devices"); 958 return -EINVAL; 959 } 960 961 if (q->mq_ops) 962 verify_blk_mq = true; 963 } 964 965 if (verify_blk_mq) { 966 /* verify _all_ devices in the table are blk-mq devices */ 967 list_for_each_entry(dd, devices, list) 968 if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { 969 DMERR("table load rejected: not all devices" 970 " are blk-mq request-stackable"); 971 return -EINVAL; 972 } 973 974 t->all_blk_mq = true; 975 } 976 977 return 0; 978 } 979 980 unsigned dm_table_get_type(struct dm_table *t) 981 { 982 return t->type; 983 } 984 985 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) 986 { 987 return t->immutable_target_type; 988 } 989 990 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) 991 { 992 /* Immutable target is implicitly a singleton */ 993 if (t->num_targets > 1 || 994 !dm_target_is_immutable(t->targets[0].type)) 995 return NULL; 996 997 return t->targets; 998 } 999 1000 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) 1001 { 1002 struct dm_target *uninitialized_var(ti); 1003 unsigned i = 0; 1004 1005 while (i < dm_table_get_num_targets(t)) { 1006 ti = dm_table_get_target(t, i++); 1007 if (dm_target_is_wildcard(ti->type)) 1008 return ti; 1009 } 1010 1011 return NULL; 1012 } 1013 1014 bool dm_table_bio_based(struct dm_table *t) 1015 { 1016 return __table_type_bio_based(dm_table_get_type(t)); 1017 } 1018 1019 bool dm_table_request_based(struct dm_table *t) 1020 { 1021 return __table_type_request_based(dm_table_get_type(t)); 1022 } 1023 1024 bool dm_table_all_blk_mq_devices(struct dm_table *t) 1025 { 1026 return t->all_blk_mq; 1027 } 1028 1029 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) 1030 { 1031 unsigned type = dm_table_get_type(t); 1032 unsigned per_io_data_size = 0; 1033 struct dm_target *tgt; 1034 unsigned i; 1035 1036 if (unlikely(type == DM_TYPE_NONE)) { 1037 DMWARN("no table type is set, can't allocate mempools"); 1038 return -EINVAL; 1039 } 1040 1041 if (__table_type_bio_based(type)) 1042 for (i = 0; i < t->num_targets; i++) { 1043 tgt = t->targets + i; 1044 per_io_data_size = max(per_io_data_size, tgt->per_io_data_size); 1045 } 1046 1047 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size); 1048 if (!t->mempools) 1049 return -ENOMEM; 1050 1051 return 0; 1052 } 1053 1054 void dm_table_free_md_mempools(struct dm_table *t) 1055 { 1056 dm_free_md_mempools(t->mempools); 1057 t->mempools = NULL; 1058 } 1059 1060 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) 1061 { 1062 return t->mempools; 1063 } 1064 1065 static int setup_indexes(struct dm_table *t) 1066 { 1067 int i; 1068 unsigned int total = 0; 1069 sector_t *indexes; 1070 1071 /* allocate the space for *all* the indexes */ 1072 for (i = t->depth - 2; i >= 0; i--) { 1073 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); 1074 total += t->counts[i]; 1075 } 1076 1077 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); 1078 if (!indexes) 1079 return -ENOMEM; 1080 1081 /* set up internal nodes, bottom-up */ 1082 for (i = t->depth - 2; i >= 0; i--) { 1083 t->index[i] = indexes; 1084 indexes += (KEYS_PER_NODE * t->counts[i]); 1085 setup_btree_index(i, t); 1086 } 1087 1088 return 0; 1089 } 1090 1091 /* 1092 * Builds the btree to index the map. 1093 */ 1094 static int dm_table_build_index(struct dm_table *t) 1095 { 1096 int r = 0; 1097 unsigned int leaf_nodes; 1098 1099 /* how many indexes will the btree have ? */ 1100 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); 1101 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); 1102 1103 /* leaf layer has already been set up */ 1104 t->counts[t->depth - 1] = leaf_nodes; 1105 t->index[t->depth - 1] = t->highs; 1106 1107 if (t->depth >= 2) 1108 r = setup_indexes(t); 1109 1110 return r; 1111 } 1112 1113 static bool integrity_profile_exists(struct gendisk *disk) 1114 { 1115 return !!blk_get_integrity(disk); 1116 } 1117 1118 /* 1119 * Get a disk whose integrity profile reflects the table's profile. 1120 * Returns NULL if integrity support was inconsistent or unavailable. 1121 */ 1122 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) 1123 { 1124 struct list_head *devices = dm_table_get_devices(t); 1125 struct dm_dev_internal *dd = NULL; 1126 struct gendisk *prev_disk = NULL, *template_disk = NULL; 1127 1128 list_for_each_entry(dd, devices, list) { 1129 template_disk = dd->dm_dev->bdev->bd_disk; 1130 if (!integrity_profile_exists(template_disk)) 1131 goto no_integrity; 1132 else if (prev_disk && 1133 blk_integrity_compare(prev_disk, template_disk) < 0) 1134 goto no_integrity; 1135 prev_disk = template_disk; 1136 } 1137 1138 return template_disk; 1139 1140 no_integrity: 1141 if (prev_disk) 1142 DMWARN("%s: integrity not set: %s and %s profile mismatch", 1143 dm_device_name(t->md), 1144 prev_disk->disk_name, 1145 template_disk->disk_name); 1146 return NULL; 1147 } 1148 1149 /* 1150 * Register the mapped device for blk_integrity support if the 1151 * underlying devices have an integrity profile. But all devices may 1152 * not have matching profiles (checking all devices isn't reliable 1153 * during table load because this table may use other DM device(s) which 1154 * must be resumed before they will have an initialized integity 1155 * profile). Consequently, stacked DM devices force a 2 stage integrity 1156 * profile validation: First pass during table load, final pass during 1157 * resume. 1158 */ 1159 static int dm_table_register_integrity(struct dm_table *t) 1160 { 1161 struct mapped_device *md = t->md; 1162 struct gendisk *template_disk = NULL; 1163 1164 template_disk = dm_table_get_integrity_disk(t); 1165 if (!template_disk) 1166 return 0; 1167 1168 if (!integrity_profile_exists(dm_disk(md))) { 1169 t->integrity_supported = true; 1170 /* 1171 * Register integrity profile during table load; we can do 1172 * this because the final profile must match during resume. 1173 */ 1174 blk_integrity_register(dm_disk(md), 1175 blk_get_integrity(template_disk)); 1176 return 0; 1177 } 1178 1179 /* 1180 * If DM device already has an initialized integrity 1181 * profile the new profile should not conflict. 1182 */ 1183 if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { 1184 DMWARN("%s: conflict with existing integrity profile: " 1185 "%s profile mismatch", 1186 dm_device_name(t->md), 1187 template_disk->disk_name); 1188 return 1; 1189 } 1190 1191 /* Preserve existing integrity profile */ 1192 t->integrity_supported = true; 1193 return 0; 1194 } 1195 1196 /* 1197 * Prepares the table for use by building the indices, 1198 * setting the type, and allocating mempools. 1199 */ 1200 int dm_table_complete(struct dm_table *t) 1201 { 1202 int r; 1203 1204 r = dm_table_determine_type(t); 1205 if (r) { 1206 DMERR("unable to determine table type"); 1207 return r; 1208 } 1209 1210 r = dm_table_build_index(t); 1211 if (r) { 1212 DMERR("unable to build btrees"); 1213 return r; 1214 } 1215 1216 r = dm_table_register_integrity(t); 1217 if (r) { 1218 DMERR("could not register integrity profile."); 1219 return r; 1220 } 1221 1222 r = dm_table_alloc_md_mempools(t, t->md); 1223 if (r) 1224 DMERR("unable to allocate mempools"); 1225 1226 return r; 1227 } 1228 1229 static DEFINE_MUTEX(_event_lock); 1230 void dm_table_event_callback(struct dm_table *t, 1231 void (*fn)(void *), void *context) 1232 { 1233 mutex_lock(&_event_lock); 1234 t->event_fn = fn; 1235 t->event_context = context; 1236 mutex_unlock(&_event_lock); 1237 } 1238 1239 void dm_table_event(struct dm_table *t) 1240 { 1241 /* 1242 * You can no longer call dm_table_event() from interrupt 1243 * context, use a bottom half instead. 1244 */ 1245 BUG_ON(in_interrupt()); 1246 1247 mutex_lock(&_event_lock); 1248 if (t->event_fn) 1249 t->event_fn(t->event_context); 1250 mutex_unlock(&_event_lock); 1251 } 1252 EXPORT_SYMBOL(dm_table_event); 1253 1254 sector_t dm_table_get_size(struct dm_table *t) 1255 { 1256 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1257 } 1258 EXPORT_SYMBOL(dm_table_get_size); 1259 1260 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) 1261 { 1262 if (index >= t->num_targets) 1263 return NULL; 1264 1265 return t->targets + index; 1266 } 1267 1268 /* 1269 * Search the btree for the correct target. 1270 * 1271 * Caller should check returned pointer with dm_target_is_valid() 1272 * to trap I/O beyond end of device. 1273 */ 1274 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) 1275 { 1276 unsigned int l, n = 0, k = 0; 1277 sector_t *node; 1278 1279 for (l = 0; l < t->depth; l++) { 1280 n = get_child(n, k); 1281 node = get_node(t, l, n); 1282 1283 for (k = 0; k < KEYS_PER_NODE; k++) 1284 if (node[k] >= sector) 1285 break; 1286 } 1287 1288 return &t->targets[(KEYS_PER_NODE * n) + k]; 1289 } 1290 1291 static int count_device(struct dm_target *ti, struct dm_dev *dev, 1292 sector_t start, sector_t len, void *data) 1293 { 1294 unsigned *num_devices = data; 1295 1296 (*num_devices)++; 1297 1298 return 0; 1299 } 1300 1301 /* 1302 * Check whether a table has no data devices attached using each 1303 * target's iterate_devices method. 1304 * Returns false if the result is unknown because a target doesn't 1305 * support iterate_devices. 1306 */ 1307 bool dm_table_has_no_data_devices(struct dm_table *table) 1308 { 1309 struct dm_target *uninitialized_var(ti); 1310 unsigned i = 0, num_devices = 0; 1311 1312 while (i < dm_table_get_num_targets(table)) { 1313 ti = dm_table_get_target(table, i++); 1314 1315 if (!ti->type->iterate_devices) 1316 return false; 1317 1318 ti->type->iterate_devices(ti, count_device, &num_devices); 1319 if (num_devices) 1320 return false; 1321 } 1322 1323 return true; 1324 } 1325 1326 /* 1327 * Establish the new table's queue_limits and validate them. 1328 */ 1329 int dm_calculate_queue_limits(struct dm_table *table, 1330 struct queue_limits *limits) 1331 { 1332 struct dm_target *uninitialized_var(ti); 1333 struct queue_limits ti_limits; 1334 unsigned i = 0; 1335 1336 blk_set_stacking_limits(limits); 1337 1338 while (i < dm_table_get_num_targets(table)) { 1339 blk_set_stacking_limits(&ti_limits); 1340 1341 ti = dm_table_get_target(table, i++); 1342 1343 if (!ti->type->iterate_devices) 1344 goto combine_limits; 1345 1346 /* 1347 * Combine queue limits of all the devices this target uses. 1348 */ 1349 ti->type->iterate_devices(ti, dm_set_device_limits, 1350 &ti_limits); 1351 1352 /* Set I/O hints portion of queue limits */ 1353 if (ti->type->io_hints) 1354 ti->type->io_hints(ti, &ti_limits); 1355 1356 /* 1357 * Check each device area is consistent with the target's 1358 * overall queue limits. 1359 */ 1360 if (ti->type->iterate_devices(ti, device_area_is_invalid, 1361 &ti_limits)) 1362 return -EINVAL; 1363 1364 combine_limits: 1365 /* 1366 * Merge this target's queue limits into the overall limits 1367 * for the table. 1368 */ 1369 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1370 DMWARN("%s: adding target device " 1371 "(start sect %llu len %llu) " 1372 "caused an alignment inconsistency", 1373 dm_device_name(table->md), 1374 (unsigned long long) ti->begin, 1375 (unsigned long long) ti->len); 1376 } 1377 1378 return validate_hardware_logical_block_alignment(table, limits); 1379 } 1380 1381 /* 1382 * Verify that all devices have an integrity profile that matches the 1383 * DM device's registered integrity profile. If the profiles don't 1384 * match then unregister the DM device's integrity profile. 1385 */ 1386 static void dm_table_verify_integrity(struct dm_table *t) 1387 { 1388 struct gendisk *template_disk = NULL; 1389 1390 if (t->integrity_supported) { 1391 /* 1392 * Verify that the original integrity profile 1393 * matches all the devices in this table. 1394 */ 1395 template_disk = dm_table_get_integrity_disk(t); 1396 if (template_disk && 1397 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) 1398 return; 1399 } 1400 1401 if (integrity_profile_exists(dm_disk(t->md))) { 1402 DMWARN("%s: unable to establish an integrity profile", 1403 dm_device_name(t->md)); 1404 blk_integrity_unregister(dm_disk(t->md)); 1405 } 1406 } 1407 1408 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, 1409 sector_t start, sector_t len, void *data) 1410 { 1411 unsigned long flush = (unsigned long) data; 1412 struct request_queue *q = bdev_get_queue(dev->bdev); 1413 1414 return q && (q->queue_flags & flush); 1415 } 1416 1417 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) 1418 { 1419 struct dm_target *ti; 1420 unsigned i = 0; 1421 1422 /* 1423 * Require at least one underlying device to support flushes. 1424 * t->devices includes internal dm devices such as mirror logs 1425 * so we need to use iterate_devices here, which targets 1426 * supporting flushes must provide. 1427 */ 1428 while (i < dm_table_get_num_targets(t)) { 1429 ti = dm_table_get_target(t, i++); 1430 1431 if (!ti->num_flush_bios) 1432 continue; 1433 1434 if (ti->flush_supported) 1435 return true; 1436 1437 if (ti->type->iterate_devices && 1438 ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) 1439 return true; 1440 } 1441 1442 return false; 1443 } 1444 1445 static bool dm_table_discard_zeroes_data(struct dm_table *t) 1446 { 1447 struct dm_target *ti; 1448 unsigned i = 0; 1449 1450 /* Ensure that all targets supports discard_zeroes_data. */ 1451 while (i < dm_table_get_num_targets(t)) { 1452 ti = dm_table_get_target(t, i++); 1453 1454 if (ti->discard_zeroes_data_unsupported) 1455 return false; 1456 } 1457 1458 return true; 1459 } 1460 1461 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, 1462 sector_t start, sector_t len, void *data) 1463 { 1464 struct request_queue *q = bdev_get_queue(dev->bdev); 1465 1466 return q && blk_queue_nonrot(q); 1467 } 1468 1469 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, 1470 sector_t start, sector_t len, void *data) 1471 { 1472 struct request_queue *q = bdev_get_queue(dev->bdev); 1473 1474 return q && !blk_queue_add_random(q); 1475 } 1476 1477 static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, 1478 sector_t start, sector_t len, void *data) 1479 { 1480 struct request_queue *q = bdev_get_queue(dev->bdev); 1481 1482 return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); 1483 } 1484 1485 static bool dm_table_all_devices_attribute(struct dm_table *t, 1486 iterate_devices_callout_fn func) 1487 { 1488 struct dm_target *ti; 1489 unsigned i = 0; 1490 1491 while (i < dm_table_get_num_targets(t)) { 1492 ti = dm_table_get_target(t, i++); 1493 1494 if (!ti->type->iterate_devices || 1495 !ti->type->iterate_devices(ti, func, NULL)) 1496 return false; 1497 } 1498 1499 return true; 1500 } 1501 1502 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, 1503 sector_t start, sector_t len, void *data) 1504 { 1505 struct request_queue *q = bdev_get_queue(dev->bdev); 1506 1507 return q && !q->limits.max_write_same_sectors; 1508 } 1509 1510 static bool dm_table_supports_write_same(struct dm_table *t) 1511 { 1512 struct dm_target *ti; 1513 unsigned i = 0; 1514 1515 while (i < dm_table_get_num_targets(t)) { 1516 ti = dm_table_get_target(t, i++); 1517 1518 if (!ti->num_write_same_bios) 1519 return false; 1520 1521 if (!ti->type->iterate_devices || 1522 ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) 1523 return false; 1524 } 1525 1526 return true; 1527 } 1528 1529 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1530 sector_t start, sector_t len, void *data) 1531 { 1532 struct request_queue *q = bdev_get_queue(dev->bdev); 1533 1534 return q && blk_queue_discard(q); 1535 } 1536 1537 static bool dm_table_supports_discards(struct dm_table *t) 1538 { 1539 struct dm_target *ti; 1540 unsigned i = 0; 1541 1542 /* 1543 * Unless any target used by the table set discards_supported, 1544 * require at least one underlying device to support discards. 1545 * t->devices includes internal dm devices such as mirror logs 1546 * so we need to use iterate_devices here, which targets 1547 * supporting discard selectively must provide. 1548 */ 1549 while (i < dm_table_get_num_targets(t)) { 1550 ti = dm_table_get_target(t, i++); 1551 1552 if (!ti->num_discard_bios) 1553 continue; 1554 1555 if (ti->discards_supported) 1556 return true; 1557 1558 if (ti->type->iterate_devices && 1559 ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1560 return true; 1561 } 1562 1563 return false; 1564 } 1565 1566 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1567 struct queue_limits *limits) 1568 { 1569 bool wc = false, fua = false; 1570 1571 /* 1572 * Copy table's limits to the DM device's request_queue 1573 */ 1574 q->limits = *limits; 1575 1576 if (!dm_table_supports_discards(t)) 1577 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1578 else 1579 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1580 1581 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1582 wc = true; 1583 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) 1584 fua = true; 1585 } 1586 blk_queue_write_cache(q, wc, fua); 1587 1588 if (!dm_table_discard_zeroes_data(t)) 1589 q->limits.discard_zeroes_data = 0; 1590 1591 /* Ensure that all underlying devices are non-rotational. */ 1592 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1593 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1594 else 1595 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1596 1597 if (!dm_table_supports_write_same(t)) 1598 q->limits.max_write_same_sectors = 0; 1599 1600 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) 1601 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1602 else 1603 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1604 1605 dm_table_verify_integrity(t); 1606 1607 /* 1608 * Determine whether or not this queue's I/O timings contribute 1609 * to the entropy pool, Only request-based targets use this. 1610 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 1611 * have it set. 1612 */ 1613 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) 1614 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 1615 1616 /* 1617 * QUEUE_FLAG_STACKABLE must be set after all queue settings are 1618 * visible to other CPUs because, once the flag is set, incoming bios 1619 * are processed by request-based dm, which refers to the queue 1620 * settings. 1621 * Until the flag set, bios are passed to bio-based dm and queued to 1622 * md->deferred where queue settings are not needed yet. 1623 * Those bios are passed to request-based dm at the resume time. 1624 */ 1625 smp_mb(); 1626 if (dm_table_request_based(t)) 1627 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); 1628 } 1629 1630 unsigned int dm_table_get_num_targets(struct dm_table *t) 1631 { 1632 return t->num_targets; 1633 } 1634 1635 struct list_head *dm_table_get_devices(struct dm_table *t) 1636 { 1637 return &t->devices; 1638 } 1639 1640 fmode_t dm_table_get_mode(struct dm_table *t) 1641 { 1642 return t->mode; 1643 } 1644 EXPORT_SYMBOL(dm_table_get_mode); 1645 1646 enum suspend_mode { 1647 PRESUSPEND, 1648 PRESUSPEND_UNDO, 1649 POSTSUSPEND, 1650 }; 1651 1652 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) 1653 { 1654 int i = t->num_targets; 1655 struct dm_target *ti = t->targets; 1656 1657 while (i--) { 1658 switch (mode) { 1659 case PRESUSPEND: 1660 if (ti->type->presuspend) 1661 ti->type->presuspend(ti); 1662 break; 1663 case PRESUSPEND_UNDO: 1664 if (ti->type->presuspend_undo) 1665 ti->type->presuspend_undo(ti); 1666 break; 1667 case POSTSUSPEND: 1668 if (ti->type->postsuspend) 1669 ti->type->postsuspend(ti); 1670 break; 1671 } 1672 ti++; 1673 } 1674 } 1675 1676 void dm_table_presuspend_targets(struct dm_table *t) 1677 { 1678 if (!t) 1679 return; 1680 1681 suspend_targets(t, PRESUSPEND); 1682 } 1683 1684 void dm_table_presuspend_undo_targets(struct dm_table *t) 1685 { 1686 if (!t) 1687 return; 1688 1689 suspend_targets(t, PRESUSPEND_UNDO); 1690 } 1691 1692 void dm_table_postsuspend_targets(struct dm_table *t) 1693 { 1694 if (!t) 1695 return; 1696 1697 suspend_targets(t, POSTSUSPEND); 1698 } 1699 1700 int dm_table_resume_targets(struct dm_table *t) 1701 { 1702 int i, r = 0; 1703 1704 for (i = 0; i < t->num_targets; i++) { 1705 struct dm_target *ti = t->targets + i; 1706 1707 if (!ti->type->preresume) 1708 continue; 1709 1710 r = ti->type->preresume(ti); 1711 if (r) { 1712 DMERR("%s: %s: preresume failed, error = %d", 1713 dm_device_name(t->md), ti->type->name, r); 1714 return r; 1715 } 1716 } 1717 1718 for (i = 0; i < t->num_targets; i++) { 1719 struct dm_target *ti = t->targets + i; 1720 1721 if (ti->type->resume) 1722 ti->type->resume(ti); 1723 } 1724 1725 return 0; 1726 } 1727 1728 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) 1729 { 1730 list_add(&cb->list, &t->target_callbacks); 1731 } 1732 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); 1733 1734 int dm_table_any_congested(struct dm_table *t, int bdi_bits) 1735 { 1736 struct dm_dev_internal *dd; 1737 struct list_head *devices = dm_table_get_devices(t); 1738 struct dm_target_callbacks *cb; 1739 int r = 0; 1740 1741 list_for_each_entry(dd, devices, list) { 1742 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); 1743 char b[BDEVNAME_SIZE]; 1744 1745 if (likely(q)) 1746 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 1747 else 1748 DMWARN_LIMIT("%s: any_congested: nonexistent device %s", 1749 dm_device_name(t->md), 1750 bdevname(dd->dm_dev->bdev, b)); 1751 } 1752 1753 list_for_each_entry(cb, &t->target_callbacks, list) 1754 if (cb->congested_fn) 1755 r |= cb->congested_fn(cb, bdi_bits); 1756 1757 return r; 1758 } 1759 1760 struct mapped_device *dm_table_get_md(struct dm_table *t) 1761 { 1762 return t->md; 1763 } 1764 EXPORT_SYMBOL(dm_table_get_md); 1765 1766 void dm_table_run_md_queue_async(struct dm_table *t) 1767 { 1768 struct mapped_device *md; 1769 struct request_queue *queue; 1770 unsigned long flags; 1771 1772 if (!dm_table_request_based(t)) 1773 return; 1774 1775 md = dm_table_get_md(t); 1776 queue = dm_get_md_queue(md); 1777 if (queue) { 1778 if (queue->mq_ops) 1779 blk_mq_run_hw_queues(queue, true); 1780 else { 1781 spin_lock_irqsave(queue->queue_lock, flags); 1782 blk_run_queue_async(queue); 1783 spin_unlock_irqrestore(queue->queue_lock, flags); 1784 } 1785 } 1786 } 1787 EXPORT_SYMBOL(dm_table_run_md_queue_async); 1788 1789